1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
49 !strconcat(SSEVer, !strconcat("_",
50 !strconcat(OpcodeStr, FPSizeStr))))
51 RC:$src1, RC:$src2))]>;
52 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
54 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
55 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
56 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse",
57 !strconcat(SSEVer, !strconcat("_",
58 !strconcat(OpcodeStr, FPSizeStr))))
59 RC:$src1, mem_cpat:$src2))]>;
62 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
63 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
64 RegisterClass RC, ValueType vt,
65 X86MemOperand x86memop, PatFrag mem_frag,
66 Domain d, bit Is2Addr = 1> {
67 let isCommutable = 1 in
68 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
70 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
71 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
72 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
74 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
76 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
77 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
78 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
81 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
82 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
83 string OpcodeStr, X86MemOperand x86memop,
84 list<dag> pat_rr, list<dag> pat_rm,
86 let isCommutable = 1 in
87 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
89 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
90 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
92 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
94 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
95 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
99 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
100 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
101 string asm, string SSEVer, string FPSizeStr,
102 X86MemOperand x86memop, PatFrag mem_frag,
103 Domain d, bit Is2Addr = 1> {
104 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
106 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
107 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
108 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
109 !strconcat(SSEVer, !strconcat("_",
110 !strconcat(OpcodeStr, FPSizeStr))))
111 RC:$src1, RC:$src2))], d>;
112 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
114 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
115 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
116 [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_",
117 !strconcat(SSEVer, !strconcat("_",
118 !strconcat(OpcodeStr, FPSizeStr))))
119 RC:$src1, (mem_frag addr:$src2)))], d>;
122 //===----------------------------------------------------------------------===//
123 // SSE 1 & 2 - Move Instructions
124 //===----------------------------------------------------------------------===//
126 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
127 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
128 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
130 // Loading from memory automatically zeroing upper bits.
131 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
132 PatFrag mem_pat, string OpcodeStr> :
133 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
134 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
135 [(set RC:$dst, (mem_pat addr:$src))]>;
137 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
138 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
139 // is used instead. Register-to-register movss/movsd is not modeled as an
140 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
141 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
142 let isAsmParserOnly = 1 in {
143 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
144 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
145 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
146 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
148 let canFoldAsLoad = 1, isReMaterializable = 1 in {
149 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
151 let AddedComplexity = 20 in
152 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
156 let Constraints = "$src1 = $dst" in {
157 def MOVSSrr : sse12_move_rr<FR32, v4f32,
158 "movss\t{$src2, $dst|$dst, $src2}">, XS;
159 def MOVSDrr : sse12_move_rr<FR64, v2f64,
160 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
163 let canFoldAsLoad = 1, isReMaterializable = 1 in {
164 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
166 let AddedComplexity = 20 in
167 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
170 let AddedComplexity = 15 in {
171 // Extract the low 32-bit value from one vector and insert it into another.
172 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
173 (MOVSSrr (v4f32 VR128:$src1),
174 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
175 // Extract the low 64-bit value from one vector and insert it into another.
176 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
177 (MOVSDrr (v2f64 VR128:$src1),
178 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
181 // Implicitly promote a 32-bit scalar to a vector.
182 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
183 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
184 // Implicitly promote a 64-bit scalar to a vector.
185 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
186 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
188 let AddedComplexity = 20 in {
189 // MOVSSrm zeros the high parts of the register; represent this
190 // with SUBREG_TO_REG.
191 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
192 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
193 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
194 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
195 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
196 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
197 // MOVSDrm zeros the high parts of the register; represent this
198 // with SUBREG_TO_REG.
199 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
200 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
201 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
202 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
203 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
204 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
205 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
206 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
207 def : Pat<(v2f64 (X86vzload addr:$src)),
208 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
211 // Store scalar value to memory.
212 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
213 "movss\t{$src, $dst|$dst, $src}",
214 [(store FR32:$src, addr:$dst)]>;
215 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
216 "movsd\t{$src, $dst|$dst, $src}",
217 [(store FR64:$src, addr:$dst)]>;
219 let isAsmParserOnly = 1 in {
220 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
221 "movss\t{$src, $dst|$dst, $src}",
222 [(store FR32:$src, addr:$dst)]>, XS, VEX;
223 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
224 "movsd\t{$src, $dst|$dst, $src}",
225 [(store FR64:$src, addr:$dst)]>, XD, VEX;
228 // Extract and store.
229 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
232 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
233 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
236 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
238 // Move Aligned/Unaligned floating point values
239 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
240 X86MemOperand x86memop, PatFrag ld_frag,
241 string asm, Domain d,
242 bit IsReMaterializable = 1> {
243 let neverHasSideEffects = 1 in
244 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
245 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
246 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
247 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
248 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
249 [(set RC:$dst, (ld_frag addr:$src))], d>;
252 let isAsmParserOnly = 1 in {
253 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
254 "movaps", SSEPackedSingle>, VEX;
255 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
256 "movapd", SSEPackedDouble>, OpSize, VEX;
257 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
258 "movups", SSEPackedSingle>, VEX;
259 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
260 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
262 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
263 "movaps", SSEPackedSingle>, VEX;
264 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
265 "movapd", SSEPackedDouble>, OpSize, VEX;
266 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
267 "movups", SSEPackedSingle>, VEX;
268 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
269 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
271 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
272 "movaps", SSEPackedSingle>, TB;
273 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
274 "movapd", SSEPackedDouble>, TB, OpSize;
275 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
276 "movups", SSEPackedSingle>, TB;
277 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
278 "movupd", SSEPackedDouble, 0>, TB, OpSize;
280 let isAsmParserOnly = 1 in {
281 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
282 "movaps\t{$src, $dst|$dst, $src}",
283 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
284 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
285 "movapd\t{$src, $dst|$dst, $src}",
286 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
287 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
288 "movups\t{$src, $dst|$dst, $src}",
289 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
290 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
291 "movupd\t{$src, $dst|$dst, $src}",
292 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
293 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
294 "movaps\t{$src, $dst|$dst, $src}",
295 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
296 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
297 "movapd\t{$src, $dst|$dst, $src}",
298 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
299 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
300 "movups\t{$src, $dst|$dst, $src}",
301 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
302 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
303 "movupd\t{$src, $dst|$dst, $src}",
304 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
307 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
308 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
309 (VMOVUPSYmr addr:$dst, VR256:$src)>;
311 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
312 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
313 (VMOVUPDYmr addr:$dst, VR256:$src)>;
315 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
316 "movaps\t{$src, $dst|$dst, $src}",
317 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
318 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
319 "movapd\t{$src, $dst|$dst, $src}",
320 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
321 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
322 "movups\t{$src, $dst|$dst, $src}",
323 [(store (v4f32 VR128:$src), addr:$dst)]>;
324 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
325 "movupd\t{$src, $dst|$dst, $src}",
326 [(store (v2f64 VR128:$src), addr:$dst)]>;
328 // Intrinsic forms of MOVUPS/D load and store
329 let isAsmParserOnly = 1 in {
330 let canFoldAsLoad = 1, isReMaterializable = 1 in
331 def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
333 "movups\t{$src, $dst|$dst, $src}",
334 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
335 def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
337 "movupd\t{$src, $dst|$dst, $src}",
338 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
339 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
340 (ins f128mem:$dst, VR128:$src),
341 "movups\t{$src, $dst|$dst, $src}",
342 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
343 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
344 (ins f128mem:$dst, VR128:$src),
345 "movupd\t{$src, $dst|$dst, $src}",
346 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
348 let canFoldAsLoad = 1, isReMaterializable = 1 in
349 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
350 "movups\t{$src, $dst|$dst, $src}",
351 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
352 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
353 "movupd\t{$src, $dst|$dst, $src}",
354 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
356 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
357 "movups\t{$src, $dst|$dst, $src}",
358 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
359 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
360 "movupd\t{$src, $dst|$dst, $src}",
361 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
363 // Move Low/High packed floating point values
364 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
365 PatFrag mov_frag, string base_opc,
367 def PSrm : PI<opc, MRMSrcMem,
368 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
369 !strconcat(!strconcat(base_opc,"s"), asm_opr),
372 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
373 SSEPackedSingle>, TB;
375 def PDrm : PI<opc, MRMSrcMem,
376 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
377 !strconcat(!strconcat(base_opc,"d"), asm_opr),
378 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
379 (scalar_to_vector (loadf64 addr:$src2)))))],
380 SSEPackedDouble>, TB, OpSize;
383 let isAsmParserOnly = 1, AddedComplexity = 20 in {
384 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
385 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
386 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
387 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
389 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
390 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
391 "\t{$src2, $dst|$dst, $src2}">;
392 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
393 "\t{$src2, $dst|$dst, $src2}">;
396 let isAsmParserOnly = 1 in {
397 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
398 "movlps\t{$src, $dst|$dst, $src}",
399 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
400 (iPTR 0))), addr:$dst)]>, VEX;
401 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
402 "movlpd\t{$src, $dst|$dst, $src}",
403 [(store (f64 (vector_extract (v2f64 VR128:$src),
404 (iPTR 0))), addr:$dst)]>, VEX;
406 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
407 "movlps\t{$src, $dst|$dst, $src}",
408 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
409 (iPTR 0))), addr:$dst)]>;
410 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
411 "movlpd\t{$src, $dst|$dst, $src}",
412 [(store (f64 (vector_extract (v2f64 VR128:$src),
413 (iPTR 0))), addr:$dst)]>;
415 // v2f64 extract element 1 is always custom lowered to unpack high to low
416 // and extract element 0 so the non-store version isn't too horrible.
417 let isAsmParserOnly = 1 in {
418 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
419 "movhps\t{$src, $dst|$dst, $src}",
420 [(store (f64 (vector_extract
421 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
422 (undef)), (iPTR 0))), addr:$dst)]>,
424 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
425 "movhpd\t{$src, $dst|$dst, $src}",
426 [(store (f64 (vector_extract
427 (v2f64 (unpckh VR128:$src, (undef))),
428 (iPTR 0))), addr:$dst)]>,
431 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
432 "movhps\t{$src, $dst|$dst, $src}",
433 [(store (f64 (vector_extract
434 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
435 (undef)), (iPTR 0))), addr:$dst)]>;
436 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
437 "movhpd\t{$src, $dst|$dst, $src}",
438 [(store (f64 (vector_extract
439 (v2f64 (unpckh VR128:$src, (undef))),
440 (iPTR 0))), addr:$dst)]>;
442 let isAsmParserOnly = 1, AddedComplexity = 20 in {
443 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
444 (ins VR128:$src1, VR128:$src2),
445 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
447 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
449 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
450 (ins VR128:$src1, VR128:$src2),
451 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
453 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
456 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
457 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
458 (ins VR128:$src1, VR128:$src2),
459 "movlhps\t{$src2, $dst|$dst, $src2}",
461 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
462 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
463 (ins VR128:$src1, VR128:$src2),
464 "movhlps\t{$src2, $dst|$dst, $src2}",
466 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
469 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
470 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
471 let AddedComplexity = 20 in {
472 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
473 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
474 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
475 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
478 //===----------------------------------------------------------------------===//
479 // SSE 1 & 2 - Conversion Instructions
480 //===----------------------------------------------------------------------===//
482 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
483 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
485 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
486 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
487 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
488 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
491 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
492 X86MemOperand x86memop, string asm> {
493 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
495 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
499 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
500 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
501 string asm, Domain d> {
502 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
503 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
504 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
505 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
508 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
509 X86MemOperand x86memop, string asm> {
510 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
511 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
512 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
513 (ins DstRC:$src1, x86memop:$src),
514 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
517 let isAsmParserOnly = 1 in {
518 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
519 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
520 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
521 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
523 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
524 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
525 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
526 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
529 // The assembler can recognize rr 64-bit instructions by seeing a rxx
530 // register, but the same isn't true when only using memory operands,
531 // provide other assembly "l" and "q" forms to address this explicitly
532 // where appropriate to do so.
533 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
535 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
537 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
539 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
541 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
545 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
546 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
547 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
548 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
549 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
550 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
551 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
552 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
553 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
554 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
555 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
556 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
557 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
558 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
559 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
560 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
562 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
563 // and/or XMM operand(s).
565 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
566 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
568 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
569 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
570 [(set DstRC:$dst, (Int SrcRC:$src))]>;
571 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
572 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
573 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
576 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
577 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
578 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
579 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
581 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
582 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
583 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
584 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
585 (ins DstRC:$src1, x86memop:$src2),
587 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
588 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
589 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
592 let isAsmParserOnly = 1 in {
593 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
594 f32mem, load, "cvtss2si">, XS, VEX;
595 defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
596 int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
598 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
599 f128mem, load, "cvtsd2si">, XD, VEX;
600 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
601 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
604 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
605 // Get rid of this hack or rename the intrinsics, there are several
606 // intructions that only match with the intrinsic form, why create duplicates
607 // to let them be recognized by the assembler?
608 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
609 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
610 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
611 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
613 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
614 f32mem, load, "cvtss2si">, XS;
615 defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
616 f32mem, load, "cvtss2si{q}">, XS, REX_W;
617 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
618 f128mem, load, "cvtsd2si{l}">, XD;
619 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
620 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
623 let isAsmParserOnly = 1 in {
624 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
625 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
626 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
627 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
629 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
630 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
631 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
632 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
636 let Constraints = "$src1 = $dst" in {
637 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
638 int_x86_sse_cvtsi2ss, i32mem, loadi32,
640 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
641 int_x86_sse_cvtsi642ss, i64mem, loadi64,
642 "cvtsi2ss{q}">, XS, REX_W;
643 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
644 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
646 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
647 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
648 "cvtsi2sd">, XD, REX_W;
653 // Aliases for intrinsics
654 let isAsmParserOnly = 1 in {
655 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
656 f32mem, load, "cvttss2si">, XS, VEX;
657 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
658 int_x86_sse_cvttss2si64, f32mem, load,
659 "cvttss2si">, XS, VEX, VEX_W;
660 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
661 f128mem, load, "cvttss2si">, XD, VEX;
662 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
663 int_x86_sse2_cvttsd2si64, f128mem, load,
664 "cvttss2si">, XD, VEX, VEX_W;
666 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
667 f32mem, load, "cvttss2si">, XS;
668 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
669 int_x86_sse_cvttss2si64, f32mem, load,
670 "cvttss2si{q}">, XS, REX_W;
671 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
672 f128mem, load, "cvttss2si">, XD;
673 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
674 int_x86_sse2_cvttsd2si64, f128mem, load,
675 "cvttss2si{q}">, XD, REX_W;
677 let isAsmParserOnly = 1, Pattern = []<dag> in {
678 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
679 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
680 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
681 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
683 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
684 "cvtdq2ps\t{$src, $dst|$dst, $src}",
685 SSEPackedSingle>, TB, VEX;
686 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
687 "cvtdq2ps\t{$src, $dst|$dst, $src}",
688 SSEPackedSingle>, TB, VEX;
690 let Pattern = []<dag> in {
691 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
692 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
693 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
694 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
695 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
696 "cvtdq2ps\t{$src, $dst|$dst, $src}",
697 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
702 // Convert scalar double to scalar single
703 let isAsmParserOnly = 1 in {
704 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
705 (ins FR64:$src1, FR64:$src2),
706 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
708 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
709 (ins FR64:$src1, f64mem:$src2),
710 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
711 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
713 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
714 "cvtsd2ss\t{$src, $dst|$dst, $src}",
715 [(set FR32:$dst, (fround FR64:$src))]>;
716 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
717 "cvtsd2ss\t{$src, $dst|$dst, $src}",
718 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
719 Requires<[HasSSE2, OptForSize]>;
721 let isAsmParserOnly = 1 in
722 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
723 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
725 let Constraints = "$src1 = $dst" in
726 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
727 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
729 // Convert scalar single to scalar double
730 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
731 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
732 (ins FR32:$src1, FR32:$src2),
733 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
734 []>, XS, Requires<[HasAVX]>, VEX_4V;
735 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
736 (ins FR32:$src1, f32mem:$src2),
737 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
738 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
740 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
741 "cvtss2sd\t{$src, $dst|$dst, $src}",
742 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
744 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
745 "cvtss2sd\t{$src, $dst|$dst, $src}",
746 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
747 Requires<[HasSSE2, OptForSize]>;
749 let isAsmParserOnly = 1 in {
750 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
751 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
752 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
753 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
754 VR128:$src2))]>, XS, VEX_4V,
756 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
757 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
758 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
759 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
760 (load addr:$src2)))]>, XS, VEX_4V,
763 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
764 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
765 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
766 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
767 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
770 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
771 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
772 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
773 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
774 (load addr:$src2)))]>, XS,
778 def : Pat<(extloadf32 addr:$src),
779 (CVTSS2SDrr (MOVSSrm addr:$src))>,
780 Requires<[HasSSE2, OptForSpeed]>;
782 // Convert doubleword to packed single/double fp
783 let isAsmParserOnly = 1 in { // SSE2 instructions without OpSize prefix
784 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
785 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
786 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
787 TB, VEX, Requires<[HasAVX]>;
788 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
789 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
790 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
791 (bitconvert (memopv2i64 addr:$src))))]>,
792 TB, VEX, Requires<[HasAVX]>;
794 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
795 "cvtdq2ps\t{$src, $dst|$dst, $src}",
796 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
797 TB, Requires<[HasSSE2]>;
798 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
799 "cvtdq2ps\t{$src, $dst|$dst, $src}",
800 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
801 (bitconvert (memopv2i64 addr:$src))))]>,
802 TB, Requires<[HasSSE2]>;
804 // FIXME: why the non-intrinsic version is described as SSE3?
805 let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix
806 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
807 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
808 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
809 XS, VEX, Requires<[HasAVX]>;
810 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
811 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
812 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
813 (bitconvert (memopv2i64 addr:$src))))]>,
814 XS, VEX, Requires<[HasAVX]>;
816 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
817 "cvtdq2pd\t{$src, $dst|$dst, $src}",
818 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
819 XS, Requires<[HasSSE2]>;
820 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
821 "cvtdq2pd\t{$src, $dst|$dst, $src}",
822 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
823 (bitconvert (memopv2i64 addr:$src))))]>,
824 XS, Requires<[HasSSE2]>;
827 // Convert packed single/double fp to doubleword
828 let isAsmParserOnly = 1 in {
829 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
830 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
831 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
832 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
833 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
834 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
835 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
836 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
838 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
839 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
840 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
841 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
843 let isAsmParserOnly = 1 in {
844 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
845 "cvtps2dq\t{$src, $dst|$dst, $src}",
846 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
848 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
850 "cvtps2dq\t{$src, $dst|$dst, $src}",
851 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
852 (memop addr:$src)))]>, VEX;
854 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
855 "cvtps2dq\t{$src, $dst|$dst, $src}",
856 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
857 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
858 "cvtps2dq\t{$src, $dst|$dst, $src}",
859 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
860 (memop addr:$src)))]>;
862 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XD prefix
863 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
864 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
865 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
866 XD, VEX, Requires<[HasAVX]>;
867 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
868 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
869 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
870 (memop addr:$src)))]>,
871 XD, VEX, Requires<[HasAVX]>;
873 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
874 "cvtpd2dq\t{$src, $dst|$dst, $src}",
875 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
876 XD, Requires<[HasSSE2]>;
877 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
878 "cvtpd2dq\t{$src, $dst|$dst, $src}",
879 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
880 (memop addr:$src)))]>,
881 XD, Requires<[HasSSE2]>;
884 // Convert with truncation packed single/double fp to doubleword
885 let isAsmParserOnly = 1 in { // SSE2 packed instructions with XS prefix
886 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
887 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
888 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
889 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
890 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
891 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
892 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
893 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
895 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
896 "cvttps2dq\t{$src, $dst|$dst, $src}",
898 (int_x86_sse2_cvttps2dq VR128:$src))]>;
899 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
900 "cvttps2dq\t{$src, $dst|$dst, $src}",
902 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
905 let isAsmParserOnly = 1 in {
906 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
907 "vcvttps2dq\t{$src, $dst|$dst, $src}",
909 (int_x86_sse2_cvttps2dq VR128:$src))]>,
910 XS, VEX, Requires<[HasAVX]>;
911 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
912 "vcvttps2dq\t{$src, $dst|$dst, $src}",
913 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
914 (memop addr:$src)))]>,
915 XS, VEX, Requires<[HasAVX]>;
918 let isAsmParserOnly = 1 in {
919 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
921 "cvttpd2dq\t{$src, $dst|$dst, $src}",
922 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
924 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
926 "cvttpd2dq\t{$src, $dst|$dst, $src}",
927 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
928 (memop addr:$src)))]>, VEX;
930 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
931 "cvttpd2dq\t{$src, $dst|$dst, $src}",
932 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
933 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
934 "cvttpd2dq\t{$src, $dst|$dst, $src}",
935 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
936 (memop addr:$src)))]>;
938 let isAsmParserOnly = 1 in {
939 // The assembler can recognize rr 256-bit instructions by seeing a ymm
940 // register, but the same isn't true when using memory operands instead.
941 // Provide other assembly rr and rm forms to address this explicitly.
942 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
943 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
944 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
945 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
948 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
949 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
950 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
951 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
954 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
955 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
956 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
957 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
960 // Convert packed single to packed double
961 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
962 // SSE2 instructions without OpSize prefix
963 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
964 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
965 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
966 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
967 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
968 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
969 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
970 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
972 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
973 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
974 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
975 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
977 let isAsmParserOnly = 1 in {
978 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
979 "vcvtps2pd\t{$src, $dst|$dst, $src}",
980 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
981 VEX, Requires<[HasAVX]>;
982 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
983 "vcvtps2pd\t{$src, $dst|$dst, $src}",
984 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
985 (load addr:$src)))]>,
986 VEX, Requires<[HasAVX]>;
988 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
989 "cvtps2pd\t{$src, $dst|$dst, $src}",
990 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
991 TB, Requires<[HasSSE2]>;
992 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
993 "cvtps2pd\t{$src, $dst|$dst, $src}",
994 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
995 (load addr:$src)))]>,
996 TB, Requires<[HasSSE2]>;
998 // Convert packed double to packed single
999 let isAsmParserOnly = 1 in {
1000 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1001 // register, but the same isn't true when using memory operands instead.
1002 // Provide other assembly rr and rm forms to address this explicitly.
1003 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1004 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1005 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1006 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1009 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1010 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1011 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1012 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1015 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1016 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1017 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1018 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1020 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1021 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1022 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1023 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1026 let isAsmParserOnly = 1 in {
1027 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1028 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1029 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1030 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1032 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1033 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1034 (memop addr:$src)))]>;
1036 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1037 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1038 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1039 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1040 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1041 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1042 (memop addr:$src)))]>;
1044 // AVX 256-bit register conversion intrinsics
1045 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1046 // whenever possible to avoid declaring two versions of each one.
1047 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1048 (VCVTDQ2PSYrr VR256:$src)>;
1049 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1050 (VCVTDQ2PSYrm addr:$src)>;
1052 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1053 (VCVTPD2PSYrr VR256:$src)>;
1054 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1055 (VCVTPD2PSYrm addr:$src)>;
1057 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1058 (VCVTPS2DQYrr VR256:$src)>;
1059 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1060 (VCVTPS2DQYrm addr:$src)>;
1062 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1063 (VCVTPS2PDYrr VR128:$src)>;
1064 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1065 (VCVTPS2PDYrm addr:$src)>;
1067 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1068 (VCVTTPD2DQYrr VR256:$src)>;
1069 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1070 (VCVTTPD2DQYrm addr:$src)>;
1072 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1073 (VCVTTPS2DQYrr VR256:$src)>;
1074 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1075 (VCVTTPS2DQYrm addr:$src)>;
1077 //===----------------------------------------------------------------------===//
1078 // SSE 1 & 2 - Compare Instructions
1079 //===----------------------------------------------------------------------===//
1081 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1082 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1083 string asm, string asm_alt> {
1084 def rr : SIi8<0xC2, MRMSrcReg,
1085 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1088 def rm : SIi8<0xC2, MRMSrcMem,
1089 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1091 // Accept explicit immediate argument form instead of comparison code.
1092 let isAsmParserOnly = 1 in {
1093 def rr_alt : SIi8<0xC2, MRMSrcReg,
1094 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1097 def rm_alt : SIi8<0xC2, MRMSrcMem,
1098 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1103 let neverHasSideEffects = 1, isAsmParserOnly = 1 in {
1104 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1105 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1106 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1108 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1109 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1110 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1114 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1115 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1116 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1117 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1118 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1119 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1120 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1123 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1124 Intrinsic Int, string asm> {
1125 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1126 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1127 [(set VR128:$dst, (Int VR128:$src1,
1128 VR128:$src, imm:$cc))]>;
1129 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1130 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1131 [(set VR128:$dst, (Int VR128:$src1,
1132 (load addr:$src), imm:$cc))]>;
1135 // Aliases to match intrinsics which expect XMM operand(s).
1136 let isAsmParserOnly = 1 in {
1137 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1138 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1140 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1141 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1144 let Constraints = "$src1 = $dst" in {
1145 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1146 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1147 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1148 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1152 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1153 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1154 ValueType vt, X86MemOperand x86memop,
1155 PatFrag ld_frag, string OpcodeStr, Domain d> {
1156 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1157 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1158 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1159 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1160 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1161 [(set EFLAGS, (OpNode (vt RC:$src1),
1162 (ld_frag addr:$src2)))], d>;
1165 let Defs = [EFLAGS] in {
1166 let isAsmParserOnly = 1 in {
1167 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1168 "ucomiss", SSEPackedSingle>, VEX;
1169 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1170 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1171 let Pattern = []<dag> in {
1172 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1173 "comiss", SSEPackedSingle>, VEX;
1174 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1175 "comisd", SSEPackedDouble>, OpSize, VEX;
1178 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1179 load, "ucomiss", SSEPackedSingle>, VEX;
1180 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1181 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1183 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1184 load, "comiss", SSEPackedSingle>, VEX;
1185 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1186 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1188 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1189 "ucomiss", SSEPackedSingle>, TB;
1190 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1191 "ucomisd", SSEPackedDouble>, TB, OpSize;
1193 let Pattern = []<dag> in {
1194 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1195 "comiss", SSEPackedSingle>, TB;
1196 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1197 "comisd", SSEPackedDouble>, TB, OpSize;
1200 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1201 load, "ucomiss", SSEPackedSingle>, TB;
1202 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1203 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1205 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1206 "comiss", SSEPackedSingle>, TB;
1207 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1208 "comisd", SSEPackedDouble>, TB, OpSize;
1209 } // Defs = [EFLAGS]
1211 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1212 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1213 Intrinsic Int, string asm, string asm_alt,
1215 def rri : PIi8<0xC2, MRMSrcReg,
1216 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1217 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1218 def rmi : PIi8<0xC2, MRMSrcMem,
1219 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1220 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1221 // Accept explicit immediate argument form instead of comparison code.
1222 let isAsmParserOnly = 1 in {
1223 def rri_alt : PIi8<0xC2, MRMSrcReg,
1224 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1226 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1227 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1232 let isAsmParserOnly = 1 in {
1233 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1234 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1235 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1236 SSEPackedSingle>, VEX_4V;
1237 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1238 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1239 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1240 SSEPackedDouble>, OpSize, VEX_4V;
1241 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1242 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1243 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1244 SSEPackedSingle>, VEX_4V;
1245 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1246 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1247 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1248 SSEPackedDouble>, OpSize, VEX_4V;
1250 let Constraints = "$src1 = $dst" in {
1251 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1252 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1253 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1254 SSEPackedSingle>, TB;
1255 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1256 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1257 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1258 SSEPackedDouble>, TB, OpSize;
1261 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1262 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1263 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1264 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1265 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1266 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1267 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1268 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1270 //===----------------------------------------------------------------------===//
1271 // SSE 1 & 2 - Shuffle Instructions
1272 //===----------------------------------------------------------------------===//
1274 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1275 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1276 ValueType vt, string asm, PatFrag mem_frag,
1277 Domain d, bit IsConvertibleToThreeAddress = 0> {
1278 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1279 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1280 [(set RC:$dst, (vt (shufp:$src3
1281 RC:$src1, (mem_frag addr:$src2))))], d>;
1282 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1283 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1284 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1286 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1289 let isAsmParserOnly = 1 in {
1290 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1291 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1292 memopv4f32, SSEPackedSingle>, VEX_4V;
1293 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1294 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1295 memopv8f32, SSEPackedSingle>, VEX_4V;
1296 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1297 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1298 memopv2f64, SSEPackedDouble>, OpSize, VEX_4V;
1299 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1300 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1301 memopv4f64, SSEPackedDouble>, OpSize, VEX_4V;
1304 let Constraints = "$src1 = $dst" in {
1305 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1306 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1307 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1309 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1310 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1311 memopv2f64, SSEPackedDouble>, TB, OpSize;
1314 //===----------------------------------------------------------------------===//
1315 // SSE 1 & 2 - Unpack Instructions
1316 //===----------------------------------------------------------------------===//
1318 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1319 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1320 PatFrag mem_frag, RegisterClass RC,
1321 X86MemOperand x86memop, string asm,
1323 def rr : PI<opc, MRMSrcReg,
1324 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1326 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1327 def rm : PI<opc, MRMSrcMem,
1328 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1330 (vt (OpNode RC:$src1,
1331 (mem_frag addr:$src2))))], d>;
1334 let AddedComplexity = 10 in {
1335 let isAsmParserOnly = 1 in {
1336 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1337 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1338 SSEPackedSingle>, VEX_4V;
1339 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1340 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1341 SSEPackedDouble>, OpSize, VEX_4V;
1342 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1343 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1344 SSEPackedSingle>, VEX_4V;
1345 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1346 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1347 SSEPackedDouble>, OpSize, VEX_4V;
1349 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1350 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1351 SSEPackedSingle>, VEX_4V;
1352 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1353 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1354 SSEPackedDouble>, OpSize, VEX_4V;
1355 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1356 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1357 SSEPackedSingle>, VEX_4V;
1358 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1359 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1360 SSEPackedDouble>, OpSize, VEX_4V;
1363 let Constraints = "$src1 = $dst" in {
1364 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1365 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1366 SSEPackedSingle>, TB;
1367 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1368 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1369 SSEPackedDouble>, TB, OpSize;
1370 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1371 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1372 SSEPackedSingle>, TB;
1373 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1374 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1375 SSEPackedDouble>, TB, OpSize;
1376 } // Constraints = "$src1 = $dst"
1377 } // AddedComplexity
1379 //===----------------------------------------------------------------------===//
1380 // SSE 1 & 2 - Extract Floating-Point Sign mask
1381 //===----------------------------------------------------------------------===//
1383 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1384 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1386 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1387 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1388 [(set GR32:$dst, (Int RC:$src))], d>;
1389 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1390 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1394 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1395 SSEPackedSingle>, TB;
1396 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1397 SSEPackedDouble>, TB, OpSize;
1399 let isAsmParserOnly = 1 in {
1400 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1401 "movmskps", SSEPackedSingle>, VEX;
1402 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1403 "movmskpd", SSEPackedDouble>, OpSize,
1405 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1406 "movmskps", SSEPackedSingle>, VEX;
1407 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1408 "movmskpd", SSEPackedDouble>, OpSize,
1412 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1413 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1414 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1415 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1417 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1418 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1419 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1420 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1424 //===----------------------------------------------------------------------===//
1425 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1426 //===----------------------------------------------------------------------===//
1428 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1429 // names that start with 'Fs'.
1431 // Alias instructions that map fld0 to pxor for sse.
1432 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1433 canFoldAsLoad = 1 in {
1434 // FIXME: Set encoding to pseudo!
1435 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1436 [(set FR32:$dst, fp32imm0)]>,
1437 Requires<[HasSSE1]>, TB, OpSize;
1438 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1439 [(set FR64:$dst, fpimm0)]>,
1440 Requires<[HasSSE2]>, TB, OpSize;
1443 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1444 // bits are disregarded.
1445 let neverHasSideEffects = 1 in {
1446 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1447 "movaps\t{$src, $dst|$dst, $src}", []>;
1448 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1449 "movapd\t{$src, $dst|$dst, $src}", []>;
1452 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1453 // bits are disregarded.
1454 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1455 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1456 "movaps\t{$src, $dst|$dst, $src}",
1457 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1458 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1459 "movapd\t{$src, $dst|$dst, $src}",
1460 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1463 //===----------------------------------------------------------------------===//
1464 // SSE 1 & 2 - Logical Instructions
1465 //===----------------------------------------------------------------------===//
1467 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1469 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1471 let isAsmParserOnly = 1 in {
1472 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1473 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1475 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1476 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1479 let Constraints = "$src1 = $dst" in {
1480 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1481 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1483 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1484 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1488 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1489 let mayLoad = 0 in {
1490 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1491 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1492 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1495 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1496 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1498 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1500 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1501 SDNode OpNode, int HasPat = 0,
1502 list<list<dag>> Pattern = []> {
1503 let isAsmParserOnly = 1, Pattern = []<dag> in {
1504 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1505 !strconcat(OpcodeStr, "ps"), f128mem,
1506 !if(HasPat, Pattern[0], // rr
1507 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1509 !if(HasPat, Pattern[2], // rm
1510 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1511 (memopv2i64 addr:$src2)))]), 0>,
1514 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1515 !strconcat(OpcodeStr, "pd"), f128mem,
1516 !if(HasPat, Pattern[1], // rr
1517 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1520 !if(HasPat, Pattern[3], // rm
1521 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1522 (memopv2i64 addr:$src2)))]), 0>,
1525 let Constraints = "$src1 = $dst" in {
1526 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1527 !strconcat(OpcodeStr, "ps"), f128mem,
1528 !if(HasPat, Pattern[0], // rr
1529 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1531 !if(HasPat, Pattern[2], // rm
1532 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1533 (memopv2i64 addr:$src2)))])>, TB;
1535 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1536 !strconcat(OpcodeStr, "pd"), f128mem,
1537 !if(HasPat, Pattern[1], // rr
1538 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1541 !if(HasPat, Pattern[3], // rm
1542 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1543 (memopv2i64 addr:$src2)))])>,
1548 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1550 let isAsmParserOnly = 1 in {
1551 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr> {
1552 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1553 !strconcat(OpcodeStr, "ps"), f256mem, [], [], 0>, VEX_4V;
1555 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1556 !strconcat(OpcodeStr, "pd"), f256mem, [], [], 0>, OpSize, VEX_4V;
1560 // AVX 256-bit packed logical ops forms
1561 defm VAND : sse12_fp_packed_logical_y<0x54, "and">;
1562 defm VOR : sse12_fp_packed_logical_y<0x56, "or">;
1563 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor">;
1564 let isCommutable = 0 in
1565 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn">;
1567 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1568 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1569 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1570 let isCommutable = 0 in
1571 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1573 [(set VR128:$dst, (v2i64 (and (xor VR128:$src1,
1574 (bc_v2i64 (v4i32 immAllOnesV))),
1577 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1578 (bc_v2i64 (v2f64 VR128:$src2))))],
1580 [(set VR128:$dst, (v2i64 (and (xor (bc_v2i64 (v4f32 VR128:$src1)),
1581 (bc_v2i64 (v4i32 immAllOnesV))),
1582 (memopv2i64 addr:$src2))))],
1584 [(set VR128:$dst, (and (vnot (bc_v2i64 (v2f64 VR128:$src1))),
1585 (memopv2i64 addr:$src2)))]]>;
1587 //===----------------------------------------------------------------------===//
1588 // SSE 1 & 2 - Arithmetic Instructions
1589 //===----------------------------------------------------------------------===//
1591 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1594 /// In addition, we also have a special variant of the scalar form here to
1595 /// represent the associated intrinsic operation. This form is unlike the
1596 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1597 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1599 /// These three forms can each be reg+reg or reg+mem.
1602 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1604 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1606 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1607 OpNode, FR32, f32mem, Is2Addr>, XS;
1608 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1609 OpNode, FR64, f64mem, Is2Addr>, XD;
1612 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1614 let mayLoad = 0 in {
1615 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1616 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1617 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1618 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1622 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1624 let mayLoad = 0 in {
1625 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1626 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1627 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1628 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1632 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1634 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1635 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1636 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1637 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1640 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1642 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1643 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1644 SSEPackedSingle, Is2Addr>, TB;
1646 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1647 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1648 SSEPackedDouble, Is2Addr>, TB, OpSize;
1651 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1652 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1653 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1654 SSEPackedSingle, 0>, TB;
1656 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1657 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1658 SSEPackedDouble, 0>, TB, OpSize;
1661 // Binary Arithmetic instructions
1662 let isAsmParserOnly = 1 in {
1663 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1664 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1665 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1666 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1667 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1668 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1669 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1670 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1672 let isCommutable = 0 in {
1673 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1674 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1675 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1676 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1677 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1678 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1679 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1680 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1681 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1682 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1683 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1684 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1685 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1686 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1687 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1688 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1689 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1690 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1691 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1692 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1696 let Constraints = "$src1 = $dst" in {
1697 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1698 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1699 basic_sse12_fp_binop_s_int<0x58, "add">;
1700 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1701 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1702 basic_sse12_fp_binop_s_int<0x59, "mul">;
1704 let isCommutable = 0 in {
1705 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1706 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1707 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1708 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1709 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1710 basic_sse12_fp_binop_s_int<0x5E, "div">;
1711 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1712 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1713 basic_sse12_fp_binop_s_int<0x5F, "max">,
1714 basic_sse12_fp_binop_p_int<0x5F, "max">;
1715 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1716 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1717 basic_sse12_fp_binop_s_int<0x5D, "min">,
1718 basic_sse12_fp_binop_p_int<0x5D, "min">;
1723 /// In addition, we also have a special variant of the scalar form here to
1724 /// represent the associated intrinsic operation. This form is unlike the
1725 /// plain scalar form, in that it takes an entire vector (instead of a
1726 /// scalar) and leaves the top elements undefined.
1728 /// And, we have a special variant form for a full-vector intrinsic form.
1730 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1731 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1732 SDNode OpNode, Intrinsic F32Int> {
1733 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1734 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1735 [(set FR32:$dst, (OpNode FR32:$src))]>;
1736 // For scalar unary operations, fold a load into the operation
1737 // only in OptForSize mode. It eliminates an instruction, but it also
1738 // eliminates a whole-register clobber (the load), so it introduces a
1739 // partial register update condition.
1740 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1741 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1742 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1743 Requires<[HasSSE1, OptForSize]>;
1744 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1745 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1746 [(set VR128:$dst, (F32Int VR128:$src))]>;
1747 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1748 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1749 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1752 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1753 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1754 SDNode OpNode, Intrinsic F32Int> {
1755 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1756 !strconcat(OpcodeStr,
1757 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1758 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1759 !strconcat(OpcodeStr,
1760 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1761 []>, XS, Requires<[HasAVX, OptForSize]>;
1762 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1763 !strconcat(OpcodeStr,
1764 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1765 [(set VR128:$dst, (F32Int VR128:$src))]>;
1766 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1767 !strconcat(OpcodeStr,
1768 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1769 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1772 /// sse1_fp_unop_p - SSE1 unops in packed form.
1773 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1774 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1775 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1776 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1777 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1778 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1779 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1782 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1783 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1784 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1785 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1786 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1787 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1788 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1789 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1792 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1793 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1794 Intrinsic V4F32Int> {
1795 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1796 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1797 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1798 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1799 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1800 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1803 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1804 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1805 Intrinsic V4F32Int> {
1806 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1807 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1808 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1809 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1810 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1811 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1814 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1815 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1816 SDNode OpNode, Intrinsic F64Int> {
1817 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1818 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1819 [(set FR64:$dst, (OpNode FR64:$src))]>;
1820 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1821 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1822 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1823 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1824 Requires<[HasSSE2, OptForSize]>;
1825 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1826 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1827 [(set VR128:$dst, (F64Int VR128:$src))]>;
1828 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1829 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1830 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1833 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1834 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1835 SDNode OpNode, Intrinsic F64Int> {
1836 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1837 !strconcat(OpcodeStr,
1838 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1839 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1840 (ins FR64:$src1, f64mem:$src2),
1841 !strconcat(OpcodeStr,
1842 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1843 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1844 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1845 [(set VR128:$dst, (F64Int VR128:$src))]>;
1846 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1847 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1848 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1851 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1852 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1854 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1855 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1856 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1857 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1858 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1859 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1862 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1863 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1864 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1865 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1866 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1867 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1868 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1869 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1872 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1873 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1874 Intrinsic V2F64Int> {
1875 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1876 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1877 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1878 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1879 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1880 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1883 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1884 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1885 Intrinsic V2F64Int> {
1886 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1887 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1888 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1889 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1890 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1891 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1894 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
1896 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
1897 sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1900 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1901 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1902 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1903 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1904 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1905 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1906 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1907 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1910 // Reciprocal approximations. Note that these typically require refinement
1911 // in order to obtain suitable precision.
1912 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
1913 int_x86_sse_rsqrt_ss>, VEX_4V;
1914 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1915 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
1916 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
1917 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
1919 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
1921 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1922 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
1923 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
1924 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
1928 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1929 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
1930 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
1931 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1932 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
1933 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
1935 // Reciprocal approximations. Note that these typically require refinement
1936 // in order to obtain suitable precision.
1937 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
1938 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
1939 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
1940 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
1941 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
1942 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
1944 // There is no f64 version of the reciprocal approximation instructions.
1946 //===----------------------------------------------------------------------===//
1947 // SSE 1 & 2 - Non-temporal stores
1948 //===----------------------------------------------------------------------===//
1950 let isAsmParserOnly = 1 in {
1951 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
1952 (ins i128mem:$dst, VR128:$src),
1953 "movntps\t{$src, $dst|$dst, $src}",
1954 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
1955 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
1956 (ins i128mem:$dst, VR128:$src),
1957 "movntpd\t{$src, $dst|$dst, $src}",
1958 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
1960 let ExeDomain = SSEPackedInt in
1961 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
1962 (ins f128mem:$dst, VR128:$src),
1963 "movntdq\t{$src, $dst|$dst, $src}",
1964 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
1966 let AddedComplexity = 400 in { // Prefer non-temporal versions
1967 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
1968 (ins f128mem:$dst, VR128:$src),
1969 "movntps\t{$src, $dst|$dst, $src}",
1970 [(alignednontemporalstore (v4f32 VR128:$src),
1972 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
1973 (ins f128mem:$dst, VR128:$src),
1974 "movntpd\t{$src, $dst|$dst, $src}",
1975 [(alignednontemporalstore (v2f64 VR128:$src),
1977 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
1978 (ins f128mem:$dst, VR128:$src),
1979 "movntdq\t{$src, $dst|$dst, $src}",
1980 [(alignednontemporalstore (v2f64 VR128:$src),
1982 let ExeDomain = SSEPackedInt in
1983 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
1984 (ins f128mem:$dst, VR128:$src),
1985 "movntdq\t{$src, $dst|$dst, $src}",
1986 [(alignednontemporalstore (v4f32 VR128:$src),
1989 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
1990 (ins f256mem:$dst, VR256:$src),
1991 "movntps\t{$src, $dst|$dst, $src}",
1992 [(alignednontemporalstore (v8f32 VR256:$src),
1994 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
1995 (ins f256mem:$dst, VR256:$src),
1996 "movntpd\t{$src, $dst|$dst, $src}",
1997 [(alignednontemporalstore (v4f64 VR256:$src),
1999 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2000 (ins f256mem:$dst, VR256:$src),
2001 "movntdq\t{$src, $dst|$dst, $src}",
2002 [(alignednontemporalstore (v4f64 VR256:$src),
2004 let ExeDomain = SSEPackedInt in
2005 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2006 (ins f256mem:$dst, VR256:$src),
2007 "movntdq\t{$src, $dst|$dst, $src}",
2008 [(alignednontemporalstore (v8f32 VR256:$src),
2013 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2014 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2015 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2016 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2017 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2018 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2020 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2021 "movntps\t{$src, $dst|$dst, $src}",
2022 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2023 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2024 "movntpd\t{$src, $dst|$dst, $src}",
2025 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2027 let ExeDomain = SSEPackedInt in
2028 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2029 "movntdq\t{$src, $dst|$dst, $src}",
2030 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2032 let AddedComplexity = 400 in { // Prefer non-temporal versions
2033 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2034 "movntps\t{$src, $dst|$dst, $src}",
2035 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2036 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2037 "movntpd\t{$src, $dst|$dst, $src}",
2038 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2040 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2041 "movntdq\t{$src, $dst|$dst, $src}",
2042 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2044 let ExeDomain = SSEPackedInt in
2045 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2046 "movntdq\t{$src, $dst|$dst, $src}",
2047 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2049 // There is no AVX form for instructions below this point
2050 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2051 "movnti\t{$src, $dst|$dst, $src}",
2052 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2053 TB, Requires<[HasSSE2]>;
2055 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2056 "movnti\t{$src, $dst|$dst, $src}",
2057 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2058 TB, Requires<[HasSSE2]>;
2061 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2062 "movnti\t{$src, $dst|$dst, $src}",
2063 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2064 TB, Requires<[HasSSE2]>;
2066 //===----------------------------------------------------------------------===//
2067 // SSE 1 & 2 - Misc Instructions (No AVX form)
2068 //===----------------------------------------------------------------------===//
2070 // Prefetch intrinsic.
2071 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2072 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2073 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2074 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2075 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2076 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2077 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2078 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2080 // Load, store, and memory fence
2081 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2082 TB, Requires<[HasSSE1]>;
2083 def : Pat<(X86SFence), (SFENCE)>;
2085 // Alias instructions that map zero vector to pxor / xorp* for sse.
2086 // We set canFoldAsLoad because this can be converted to a constant-pool
2087 // load of an all-zeros value if folding it would be beneficial.
2088 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2089 // JIT implementatioan, it does not expand the instructions below like
2090 // X86MCInstLower does.
2091 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2092 isCodeGenOnly = 1 in {
2093 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2094 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2095 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2096 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2097 let ExeDomain = SSEPackedInt in
2098 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2099 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2102 // The same as done above but for AVX. The 128-bit versions are the
2103 // same, but re-encoded. The 256-bit does not support PI version.
2104 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2105 // JIT implementatioan, it does not expand the instructions below like
2106 // X86MCInstLower does.
2107 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2108 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2109 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2110 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2111 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2112 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2113 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2114 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2115 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2116 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2117 let ExeDomain = SSEPackedInt in
2118 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2119 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2122 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2123 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2124 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2126 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2127 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2129 //===----------------------------------------------------------------------===//
2130 // SSE 1 & 2 - Load/Store XCSR register
2131 //===----------------------------------------------------------------------===//
2133 let isAsmParserOnly = 1 in {
2134 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2135 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2136 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2137 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2140 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2141 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2142 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2143 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2145 //===---------------------------------------------------------------------===//
2146 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2147 //===---------------------------------------------------------------------===//
2149 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2151 let isAsmParserOnly = 1 in {
2152 let neverHasSideEffects = 1 in {
2153 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2154 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2155 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2156 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2158 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2159 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2160 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2161 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2163 let canFoldAsLoad = 1, mayLoad = 1 in {
2164 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2165 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2166 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2167 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2168 let Predicates = [HasAVX] in {
2169 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2170 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2171 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2172 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2176 let mayStore = 1 in {
2177 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2178 (ins i128mem:$dst, VR128:$src),
2179 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2180 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2181 (ins i256mem:$dst, VR256:$src),
2182 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2183 let Predicates = [HasAVX] in {
2184 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2185 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2186 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2187 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2192 let neverHasSideEffects = 1 in
2193 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2194 "movdqa\t{$src, $dst|$dst, $src}", []>;
2196 let canFoldAsLoad = 1, mayLoad = 1 in {
2197 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2198 "movdqa\t{$src, $dst|$dst, $src}",
2199 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2200 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2201 "movdqu\t{$src, $dst|$dst, $src}",
2202 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2203 XS, Requires<[HasSSE2]>;
2206 let mayStore = 1 in {
2207 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2208 "movdqa\t{$src, $dst|$dst, $src}",
2209 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2210 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2211 "movdqu\t{$src, $dst|$dst, $src}",
2212 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2213 XS, Requires<[HasSSE2]>;
2216 // Intrinsic forms of MOVDQU load and store
2217 let isAsmParserOnly = 1 in {
2218 let canFoldAsLoad = 1 in
2219 def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2220 "vmovdqu\t{$src, $dst|$dst, $src}",
2221 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2222 XS, VEX, Requires<[HasAVX]>;
2223 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2224 "vmovdqu\t{$src, $dst|$dst, $src}",
2225 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2226 XS, VEX, Requires<[HasAVX]>;
2229 let canFoldAsLoad = 1 in
2230 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2231 "movdqu\t{$src, $dst|$dst, $src}",
2232 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2233 XS, Requires<[HasSSE2]>;
2234 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2235 "movdqu\t{$src, $dst|$dst, $src}",
2236 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2237 XS, Requires<[HasSSE2]>;
2239 } // ExeDomain = SSEPackedInt
2241 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2242 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2243 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2245 //===---------------------------------------------------------------------===//
2246 // SSE2 - Packed Integer Arithmetic Instructions
2247 //===---------------------------------------------------------------------===//
2249 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2251 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2252 bit IsCommutable = 0, bit Is2Addr = 1> {
2253 let isCommutable = IsCommutable in
2254 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2255 (ins VR128:$src1, VR128:$src2),
2257 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2258 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2259 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2260 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2261 (ins VR128:$src1, i128mem:$src2),
2263 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2264 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2265 [(set VR128:$dst, (IntId VR128:$src1,
2266 (bitconvert (memopv2i64 addr:$src2))))]>;
2269 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2270 string OpcodeStr, Intrinsic IntId,
2271 Intrinsic IntId2, bit Is2Addr = 1> {
2272 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2273 (ins VR128:$src1, VR128:$src2),
2275 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2276 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2277 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2278 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2279 (ins VR128:$src1, i128mem:$src2),
2281 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2282 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2283 [(set VR128:$dst, (IntId VR128:$src1,
2284 (bitconvert (memopv2i64 addr:$src2))))]>;
2285 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2286 (ins VR128:$src1, i32i8imm:$src2),
2288 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2289 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2290 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2293 /// PDI_binop_rm - Simple SSE2 binary operator.
2294 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2295 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2296 let isCommutable = IsCommutable in
2297 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2298 (ins VR128:$src1, VR128:$src2),
2300 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2301 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2302 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2303 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2304 (ins VR128:$src1, i128mem:$src2),
2306 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2307 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2308 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2309 (bitconvert (memopv2i64 addr:$src2)))))]>;
2312 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2314 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2315 /// to collapse (bitconvert VT to VT) into its operand.
2317 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2318 bit IsCommutable = 0, bit Is2Addr = 1> {
2319 let isCommutable = IsCommutable in
2320 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2321 (ins VR128:$src1, VR128:$src2),
2323 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2324 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2325 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2326 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2327 (ins VR128:$src1, i128mem:$src2),
2329 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2330 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2331 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2334 } // ExeDomain = SSEPackedInt
2336 // 128-bit Integer Arithmetic
2338 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2339 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2340 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2341 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2342 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2343 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2344 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2345 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2346 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2347 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2350 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2352 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2354 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2356 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2358 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2360 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2362 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2364 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2366 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2368 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2370 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2372 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2374 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2376 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2378 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2380 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2382 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2384 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2386 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2390 let Constraints = "$src1 = $dst" in {
2391 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2392 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2393 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2394 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2395 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2396 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2397 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2398 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2399 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2402 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2403 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2404 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2405 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2406 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2407 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2408 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2409 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2410 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2411 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2412 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2413 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2414 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2415 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2416 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2417 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2418 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2419 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2420 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2422 } // Constraints = "$src1 = $dst"
2424 //===---------------------------------------------------------------------===//
2425 // SSE2 - Packed Integer Logical Instructions
2426 //===---------------------------------------------------------------------===//
2428 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2429 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2430 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2432 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2433 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2435 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2436 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2439 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2440 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2442 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2443 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2445 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2446 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2449 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2450 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2452 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2453 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2456 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2457 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2458 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2460 let ExeDomain = SSEPackedInt in {
2461 let neverHasSideEffects = 1 in {
2462 // 128-bit logical shifts.
2463 def VPSLLDQri : PDIi8<0x73, MRM7r,
2464 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2465 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2467 def VPSRLDQri : PDIi8<0x73, MRM3r,
2468 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2469 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2471 // PSRADQri doesn't exist in SSE[1-3].
2473 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2474 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2475 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2476 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2477 VR128:$src2)))]>, VEX_4V;
2479 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2480 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2481 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2482 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2483 (memopv2i64 addr:$src2))))]>,
2488 let Constraints = "$src1 = $dst" in {
2489 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2490 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2491 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2492 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2493 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2494 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2496 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2497 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2498 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2499 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2500 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2501 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2503 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2504 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2505 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2506 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2508 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2509 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2510 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2512 let ExeDomain = SSEPackedInt in {
2513 let neverHasSideEffects = 1 in {
2514 // 128-bit logical shifts.
2515 def PSLLDQri : PDIi8<0x73, MRM7r,
2516 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2517 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2518 def PSRLDQri : PDIi8<0x73, MRM3r,
2519 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2520 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2521 // PSRADQri doesn't exist in SSE[1-3].
2523 def PANDNrr : PDI<0xDF, MRMSrcReg,
2524 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2525 "pandn\t{$src2, $dst|$dst, $src2}",
2526 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2529 def PANDNrm : PDI<0xDF, MRMSrcMem,
2530 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2531 "pandn\t{$src2, $dst|$dst, $src2}",
2532 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2533 (memopv2i64 addr:$src2))))]>;
2535 } // Constraints = "$src1 = $dst"
2537 let Predicates = [HasAVX] in {
2538 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2539 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2540 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2541 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2542 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2543 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2544 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2545 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2546 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2547 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2549 // Shift up / down and insert zero's.
2550 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2551 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2552 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2553 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2556 let Predicates = [HasSSE2] in {
2557 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2558 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2559 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2560 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2561 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2562 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2563 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2564 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2565 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2566 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2568 // Shift up / down and insert zero's.
2569 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2570 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2571 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2572 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2575 //===---------------------------------------------------------------------===//
2576 // SSE2 - Packed Integer Comparison Instructions
2577 //===---------------------------------------------------------------------===//
2579 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2580 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2582 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2584 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2586 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2588 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2590 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2594 let Constraints = "$src1 = $dst" in {
2595 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2596 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2597 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2598 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2599 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2600 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2601 } // Constraints = "$src1 = $dst"
2603 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2604 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2605 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2606 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2607 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2608 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2609 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2610 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2611 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2612 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2613 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2614 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2616 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2617 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2618 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2619 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2620 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2621 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2622 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2623 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2624 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2625 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2626 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2627 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2629 //===---------------------------------------------------------------------===//
2630 // SSE2 - Packed Integer Pack Instructions
2631 //===---------------------------------------------------------------------===//
2633 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2634 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2636 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2638 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2642 let Constraints = "$src1 = $dst" in {
2643 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2644 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2645 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2646 } // Constraints = "$src1 = $dst"
2648 //===---------------------------------------------------------------------===//
2649 // SSE2 - Packed Integer Shuffle Instructions
2650 //===---------------------------------------------------------------------===//
2652 let ExeDomain = SSEPackedInt in {
2653 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2655 def ri : Ii8<0x70, MRMSrcReg,
2656 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2657 !strconcat(OpcodeStr,
2658 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2659 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2661 def mi : Ii8<0x70, MRMSrcMem,
2662 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2663 !strconcat(OpcodeStr,
2664 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2665 [(set VR128:$dst, (vt (pshuf_frag:$src2
2666 (bc_frag (memopv2i64 addr:$src1)),
2669 } // ExeDomain = SSEPackedInt
2671 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2672 let AddedComplexity = 5 in
2673 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2676 // SSE2 with ImmT == Imm8 and XS prefix.
2677 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2680 // SSE2 with ImmT == Imm8 and XD prefix.
2681 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2685 let Predicates = [HasSSE2] in {
2686 let AddedComplexity = 5 in
2687 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2689 // SSE2 with ImmT == Imm8 and XS prefix.
2690 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2692 // SSE2 with ImmT == Imm8 and XD prefix.
2693 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2696 //===---------------------------------------------------------------------===//
2697 // SSE2 - Packed Integer Unpack Instructions
2698 //===---------------------------------------------------------------------===//
2700 let ExeDomain = SSEPackedInt in {
2701 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2702 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2703 def rr : PDI<opc, MRMSrcReg,
2704 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2706 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2707 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2708 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2709 def rm : PDI<opc, MRMSrcMem,
2710 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2712 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2713 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2714 [(set VR128:$dst, (unp_frag VR128:$src1,
2715 (bc_frag (memopv2i64
2719 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2720 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2722 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2724 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2727 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2728 /// knew to collapse (bitconvert VT to VT) into its operand.
2729 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2730 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2731 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2733 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2734 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2735 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2736 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2738 (v2i64 (unpckl VR128:$src1,
2739 (memopv2i64 addr:$src2))))]>, VEX_4V;
2741 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2743 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2745 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2748 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2749 /// knew to collapse (bitconvert VT to VT) into its operand.
2750 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2751 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2752 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2754 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2755 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2756 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2757 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2759 (v2i64 (unpckh VR128:$src1,
2760 (memopv2i64 addr:$src2))))]>, VEX_4V;
2763 let Constraints = "$src1 = $dst" in {
2764 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2765 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2766 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2768 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2769 /// knew to collapse (bitconvert VT to VT) into its operand.
2770 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2771 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2772 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2774 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2775 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2776 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2777 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2779 (v2i64 (unpckl VR128:$src1,
2780 (memopv2i64 addr:$src2))))]>;
2782 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2783 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2784 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2786 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2787 /// knew to collapse (bitconvert VT to VT) into its operand.
2788 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2789 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2790 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2792 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2793 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2794 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2795 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2797 (v2i64 (unpckh VR128:$src1,
2798 (memopv2i64 addr:$src2))))]>;
2801 } // ExeDomain = SSEPackedInt
2803 //===---------------------------------------------------------------------===//
2804 // SSE2 - Packed Integer Extract and Insert
2805 //===---------------------------------------------------------------------===//
2807 let ExeDomain = SSEPackedInt in {
2808 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2809 def rri : Ii8<0xC4, MRMSrcReg,
2810 (outs VR128:$dst), (ins VR128:$src1,
2811 GR32:$src2, i32i8imm:$src3),
2813 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2814 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2816 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2817 def rmi : Ii8<0xC4, MRMSrcMem,
2818 (outs VR128:$dst), (ins VR128:$src1,
2819 i16mem:$src2, i32i8imm:$src3),
2821 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2822 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2824 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2829 let isAsmParserOnly = 1, Predicates = [HasAVX] in
2830 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2831 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2832 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2833 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2834 imm:$src2))]>, OpSize, VEX;
2835 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2836 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2837 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2838 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2842 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
2843 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2844 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2845 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2846 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2847 []>, OpSize, VEX_4V;
2850 let Constraints = "$src1 = $dst" in
2851 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2853 } // ExeDomain = SSEPackedInt
2855 //===---------------------------------------------------------------------===//
2856 // SSE2 - Packed Mask Creation
2857 //===---------------------------------------------------------------------===//
2859 let ExeDomain = SSEPackedInt in {
2861 let isAsmParserOnly = 1 in {
2862 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2863 "pmovmskb\t{$src, $dst|$dst, $src}",
2864 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2865 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2866 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2868 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2869 "pmovmskb\t{$src, $dst|$dst, $src}",
2870 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2872 } // ExeDomain = SSEPackedInt
2874 //===---------------------------------------------------------------------===//
2875 // SSE2 - Conditional Store
2876 //===---------------------------------------------------------------------===//
2878 let ExeDomain = SSEPackedInt in {
2880 let isAsmParserOnly = 1 in {
2882 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2883 (ins VR128:$src, VR128:$mask),
2884 "maskmovdqu\t{$mask, $src|$src, $mask}",
2885 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2887 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2888 (ins VR128:$src, VR128:$mask),
2889 "maskmovdqu\t{$mask, $src|$src, $mask}",
2890 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2894 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2895 "maskmovdqu\t{$mask, $src|$src, $mask}",
2896 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2898 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2899 "maskmovdqu\t{$mask, $src|$src, $mask}",
2900 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2902 } // ExeDomain = SSEPackedInt
2904 //===---------------------------------------------------------------------===//
2905 // SSE2 - Move Doubleword
2906 //===---------------------------------------------------------------------===//
2908 // Move Int Doubleword to Packed Double Int
2909 let isAsmParserOnly = 1 in {
2910 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2911 "movd\t{$src, $dst|$dst, $src}",
2913 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2914 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2915 "movd\t{$src, $dst|$dst, $src}",
2917 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2920 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2921 "movd\t{$src, $dst|$dst, $src}",
2923 (v4i32 (scalar_to_vector GR32:$src)))]>;
2924 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2925 "movd\t{$src, $dst|$dst, $src}",
2927 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2928 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2929 "mov{d|q}\t{$src, $dst|$dst, $src}",
2931 (v2i64 (scalar_to_vector GR64:$src)))]>;
2932 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2933 "mov{d|q}\t{$src, $dst|$dst, $src}",
2934 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2937 // Move Int Doubleword to Single Scalar
2938 let isAsmParserOnly = 1 in {
2939 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2940 "movd\t{$src, $dst|$dst, $src}",
2941 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2943 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2944 "movd\t{$src, $dst|$dst, $src}",
2945 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2948 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2949 "movd\t{$src, $dst|$dst, $src}",
2950 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2952 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2953 "movd\t{$src, $dst|$dst, $src}",
2954 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2956 // Move Packed Doubleword Int to Packed Double Int
2957 let isAsmParserOnly = 1 in {
2958 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2959 "movd\t{$src, $dst|$dst, $src}",
2960 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2962 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2963 (ins i32mem:$dst, VR128:$src),
2964 "movd\t{$src, $dst|$dst, $src}",
2965 [(store (i32 (vector_extract (v4i32 VR128:$src),
2966 (iPTR 0))), addr:$dst)]>, VEX;
2968 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2969 "movd\t{$src, $dst|$dst, $src}",
2970 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2972 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2973 "movd\t{$src, $dst|$dst, $src}",
2974 [(store (i32 (vector_extract (v4i32 VR128:$src),
2975 (iPTR 0))), addr:$dst)]>;
2977 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
2978 "mov{d|q}\t{$src, $dst|$dst, $src}",
2979 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
2981 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
2982 "movq\t{$src, $dst|$dst, $src}",
2983 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
2985 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2986 "mov{d|q}\t{$src, $dst|$dst, $src}",
2987 [(set GR64:$dst, (bitconvert FR64:$src))]>;
2988 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2989 "movq\t{$src, $dst|$dst, $src}",
2990 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
2992 // Move Scalar Single to Double Int
2993 let isAsmParserOnly = 1 in {
2994 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2995 "movd\t{$src, $dst|$dst, $src}",
2996 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
2997 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2998 "movd\t{$src, $dst|$dst, $src}",
2999 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3001 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3002 "movd\t{$src, $dst|$dst, $src}",
3003 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3004 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3005 "movd\t{$src, $dst|$dst, $src}",
3006 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3008 // movd / movq to XMM register zero-extends
3009 let AddedComplexity = 15, isAsmParserOnly = 1 in {
3010 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3011 "movd\t{$src, $dst|$dst, $src}",
3012 [(set VR128:$dst, (v4i32 (X86vzmovl
3013 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3015 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3016 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3017 [(set VR128:$dst, (v2i64 (X86vzmovl
3018 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3021 let AddedComplexity = 15 in {
3022 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3023 "movd\t{$src, $dst|$dst, $src}",
3024 [(set VR128:$dst, (v4i32 (X86vzmovl
3025 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3026 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3027 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3028 [(set VR128:$dst, (v2i64 (X86vzmovl
3029 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3032 let AddedComplexity = 20 in {
3033 let isAsmParserOnly = 1 in
3034 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3035 "movd\t{$src, $dst|$dst, $src}",
3037 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3038 (loadi32 addr:$src))))))]>,
3040 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3041 "movd\t{$src, $dst|$dst, $src}",
3043 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3044 (loadi32 addr:$src))))))]>;
3046 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3047 (MOVZDI2PDIrm addr:$src)>;
3048 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3049 (MOVZDI2PDIrm addr:$src)>;
3050 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3051 (MOVZDI2PDIrm addr:$src)>;
3054 //===---------------------------------------------------------------------===//
3055 // SSE2 - Move Quadword
3056 //===---------------------------------------------------------------------===//
3058 // Move Quadword Int to Packed Quadword Int
3059 let isAsmParserOnly = 1 in
3060 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3061 "vmovq\t{$src, $dst|$dst, $src}",
3063 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3064 VEX, Requires<[HasAVX]>;
3065 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3066 "movq\t{$src, $dst|$dst, $src}",
3068 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3069 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3071 // Move Packed Quadword Int to Quadword Int
3072 let isAsmParserOnly = 1 in
3073 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3074 "movq\t{$src, $dst|$dst, $src}",
3075 [(store (i64 (vector_extract (v2i64 VR128:$src),
3076 (iPTR 0))), addr:$dst)]>, VEX;
3077 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3078 "movq\t{$src, $dst|$dst, $src}",
3079 [(store (i64 (vector_extract (v2i64 VR128:$src),
3080 (iPTR 0))), addr:$dst)]>;
3082 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3083 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3085 // Store / copy lower 64-bits of a XMM register.
3086 let isAsmParserOnly = 1 in
3087 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3088 "movq\t{$src, $dst|$dst, $src}",
3089 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3090 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3091 "movq\t{$src, $dst|$dst, $src}",
3092 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3094 let AddedComplexity = 20, isAsmParserOnly = 1 in
3095 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3096 "vmovq\t{$src, $dst|$dst, $src}",
3098 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3099 (loadi64 addr:$src))))))]>,
3100 XS, VEX, Requires<[HasAVX]>;
3102 let AddedComplexity = 20 in {
3103 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3104 "movq\t{$src, $dst|$dst, $src}",
3106 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3107 (loadi64 addr:$src))))))]>,
3108 XS, Requires<[HasSSE2]>;
3110 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3111 (MOVZQI2PQIrm addr:$src)>;
3112 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3113 (MOVZQI2PQIrm addr:$src)>;
3114 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3117 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3118 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3119 let isAsmParserOnly = 1, AddedComplexity = 15 in
3120 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3121 "vmovq\t{$src, $dst|$dst, $src}",
3122 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3123 XS, VEX, Requires<[HasAVX]>;
3124 let AddedComplexity = 15 in
3125 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3126 "movq\t{$src, $dst|$dst, $src}",
3127 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3128 XS, Requires<[HasSSE2]>;
3130 let AddedComplexity = 20, isAsmParserOnly = 1 in
3131 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3132 "vmovq\t{$src, $dst|$dst, $src}",
3133 [(set VR128:$dst, (v2i64 (X86vzmovl
3134 (loadv2i64 addr:$src))))]>,
3135 XS, VEX, Requires<[HasAVX]>;
3136 let AddedComplexity = 20 in {
3137 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3138 "movq\t{$src, $dst|$dst, $src}",
3139 [(set VR128:$dst, (v2i64 (X86vzmovl
3140 (loadv2i64 addr:$src))))]>,
3141 XS, Requires<[HasSSE2]>;
3143 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3144 (MOVZPQILo2PQIrm addr:$src)>;
3147 // Instructions to match in the assembler
3148 let isAsmParserOnly = 1 in {
3149 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3150 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3151 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3152 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3153 // Recognize "movd" with GR64 destination, but encode as a "movq"
3154 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3155 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3158 // Instructions for the disassembler
3159 // xr = XMM register
3162 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3163 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3164 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3165 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3166 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3168 //===---------------------------------------------------------------------===//
3169 // SSE2 - Misc Instructions
3170 //===---------------------------------------------------------------------===//
3173 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3174 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3175 TB, Requires<[HasSSE2]>;
3177 // Load, store, and memory fence
3178 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3179 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3180 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3181 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3182 def : Pat<(X86LFence), (LFENCE)>;
3183 def : Pat<(X86MFence), (MFENCE)>;
3186 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3187 // was introduced with SSE2, it's backward compatible.
3188 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3190 // Alias instructions that map zero vector to pxor / xorp* for sse.
3191 // We set canFoldAsLoad because this can be converted to a constant-pool
3192 // load of an all-ones value if folding it would be beneficial.
3193 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3194 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3195 // FIXME: Change encoding to pseudo.
3196 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3197 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3199 //===---------------------------------------------------------------------===//
3200 // SSE3 - Conversion Instructions
3201 //===---------------------------------------------------------------------===//
3203 // Convert Packed Double FP to Packed DW Integers
3204 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3205 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3206 // register, but the same isn't true when using memory operands instead.
3207 // Provide other assembly rr and rm forms to address this explicitly.
3208 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3209 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3210 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3211 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3214 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3215 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3216 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3217 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3220 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3221 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3222 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3223 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3226 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3227 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3228 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3229 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3231 // Convert Packed DW Integers to Packed Double FP
3232 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3233 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3234 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3235 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3236 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3237 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3238 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3239 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3240 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3243 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3244 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3245 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3246 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3248 // AVX 256-bit register conversion intrinsics
3249 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3250 (VCVTDQ2PDYrr VR128:$src)>;
3251 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3252 (VCVTDQ2PDYrm addr:$src)>;
3254 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3255 (VCVTPD2DQYrr VR256:$src)>;
3256 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3257 (VCVTPD2DQYrm addr:$src)>;
3259 //===---------------------------------------------------------------------===//
3260 // SSE3 - Move Instructions
3261 //===---------------------------------------------------------------------===//
3263 // Replicate Single FP
3264 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3265 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3266 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3267 [(set VR128:$dst, (v4f32 (rep_frag
3268 VR128:$src, (undef))))]>;
3269 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3270 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3271 [(set VR128:$dst, (rep_frag
3272 (memopv4f32 addr:$src), (undef)))]>;
3275 multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag,
3277 def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3278 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3279 def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3280 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3283 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3284 // FIXME: Merge above classes when we have patterns for the ymm version
3285 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3286 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3287 defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX;
3288 defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX;
3290 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3291 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3293 // Replicate Double FP
3294 multiclass sse3_replicate_dfp<string OpcodeStr> {
3295 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3296 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3297 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3298 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3299 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3301 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3305 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3306 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3307 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3309 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3310 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3314 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3315 // FIXME: Merge above classes when we have patterns for the ymm version
3316 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3317 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3319 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3321 // Move Unaligned Integer
3322 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3323 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3324 "vlddqu\t{$src, $dst|$dst, $src}",
3325 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3326 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3327 "vlddqu\t{$src, $dst|$dst, $src}",
3328 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3330 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3331 "lddqu\t{$src, $dst|$dst, $src}",
3332 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3334 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3336 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3338 // Several Move patterns
3339 let AddedComplexity = 5 in {
3340 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3341 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3342 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3343 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3344 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3345 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3346 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3347 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3350 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3351 let AddedComplexity = 15 in
3352 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3353 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3354 let AddedComplexity = 20 in
3355 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3356 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3358 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3359 let AddedComplexity = 15 in
3360 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3361 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3362 let AddedComplexity = 20 in
3363 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3364 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3366 //===---------------------------------------------------------------------===//
3367 // SSE3 - Arithmetic
3368 //===---------------------------------------------------------------------===//
3370 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3371 X86MemOperand x86memop, bit Is2Addr = 1> {
3372 def rr : I<0xD0, MRMSrcReg,
3373 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3375 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3376 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3377 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3378 def rm : I<0xD0, MRMSrcMem,
3379 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3381 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3382 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3383 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3386 let isAsmParserOnly = 1, Predicates = [HasAVX],
3387 ExeDomain = SSEPackedDouble in {
3388 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3389 f128mem, 0>, XD, VEX_4V;
3390 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3391 f128mem, 0>, OpSize, VEX_4V;
3392 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3393 f256mem, 0>, XD, VEX_4V;
3394 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3395 f256mem, 0>, OpSize, VEX_4V;
3397 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3398 ExeDomain = SSEPackedDouble in {
3399 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3401 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3402 f128mem>, TB, OpSize;
3405 //===---------------------------------------------------------------------===//
3406 // SSE3 Instructions
3407 //===---------------------------------------------------------------------===//
3410 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3411 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3412 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3414 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3415 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3416 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3418 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3420 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3421 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3422 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3424 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3425 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3426 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3428 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3429 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3430 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3432 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3434 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3435 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3436 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3439 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3440 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3441 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3442 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3443 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3444 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3445 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3446 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3447 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3448 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3449 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3450 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3451 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3452 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3453 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3454 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3455 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3458 let Constraints = "$src1 = $dst" in {
3459 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3460 int_x86_sse3_hadd_ps>;
3461 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3462 int_x86_sse3_hadd_pd>;
3463 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3464 int_x86_sse3_hsub_ps>;
3465 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3466 int_x86_sse3_hsub_pd>;
3469 //===---------------------------------------------------------------------===//
3470 // SSSE3 - Packed Absolute Instructions
3471 //===---------------------------------------------------------------------===//
3474 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3475 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3476 PatFrag mem_frag128, Intrinsic IntId128> {
3477 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3479 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3480 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3483 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3485 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3488 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3491 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3492 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3493 int_x86_ssse3_pabs_b_128>, VEX;
3494 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3495 int_x86_ssse3_pabs_w_128>, VEX;
3496 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3497 int_x86_ssse3_pabs_d_128>, VEX;
3500 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3501 int_x86_ssse3_pabs_b_128>;
3502 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3503 int_x86_ssse3_pabs_w_128>;
3504 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3505 int_x86_ssse3_pabs_d_128>;
3507 //===---------------------------------------------------------------------===//
3508 // SSSE3 - Packed Binary Operator Instructions
3509 //===---------------------------------------------------------------------===//
3511 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3512 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3513 PatFrag mem_frag128, Intrinsic IntId128,
3515 let isCommutable = 1 in
3516 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3517 (ins VR128:$src1, VR128:$src2),
3519 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3520 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3521 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3523 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3524 (ins VR128:$src1, i128mem:$src2),
3526 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3527 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3529 (IntId128 VR128:$src1,
3530 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3533 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3534 let isCommutable = 0 in {
3535 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3536 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3537 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3538 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3539 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3540 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3541 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3542 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3543 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3544 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3545 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3546 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3547 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3548 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3549 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3550 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3551 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3552 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3553 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3554 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3555 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3556 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3558 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3559 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3562 // None of these have i8 immediate fields.
3563 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3564 let isCommutable = 0 in {
3565 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3566 int_x86_ssse3_phadd_w_128>;
3567 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3568 int_x86_ssse3_phadd_d_128>;
3569 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3570 int_x86_ssse3_phadd_sw_128>;
3571 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3572 int_x86_ssse3_phsub_w_128>;
3573 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3574 int_x86_ssse3_phsub_d_128>;
3575 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3576 int_x86_ssse3_phsub_sw_128>;
3577 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3578 int_x86_ssse3_pmadd_ub_sw_128>;
3579 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
3580 int_x86_ssse3_pshuf_b_128>;
3581 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3582 int_x86_ssse3_psign_b_128>;
3583 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3584 int_x86_ssse3_psign_w_128>;
3585 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3586 int_x86_ssse3_psign_d_128>;
3588 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3589 int_x86_ssse3_pmul_hr_sw_128>;
3592 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3593 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3594 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3595 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3597 //===---------------------------------------------------------------------===//
3598 // SSSE3 - Packed Align Instruction Patterns
3599 //===---------------------------------------------------------------------===//
3601 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3602 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3603 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3605 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3607 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3609 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3610 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3612 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3614 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3618 let isAsmParserOnly = 1, Predicates = [HasAVX] in
3619 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3620 let Constraints = "$src1 = $dst" in
3621 defm PALIGN : ssse3_palign<"palignr">;
3623 let AddedComplexity = 5 in {
3624 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3625 (PALIGNR128rr VR128:$src2, VR128:$src1,
3626 (SHUFFLE_get_palign_imm VR128:$src3))>,
3627 Requires<[HasSSSE3]>;
3628 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3629 (PALIGNR128rr VR128:$src2, VR128:$src1,
3630 (SHUFFLE_get_palign_imm VR128:$src3))>,
3631 Requires<[HasSSSE3]>;
3632 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3633 (PALIGNR128rr VR128:$src2, VR128:$src1,
3634 (SHUFFLE_get_palign_imm VR128:$src3))>,
3635 Requires<[HasSSSE3]>;
3636 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3637 (PALIGNR128rr VR128:$src2, VR128:$src1,
3638 (SHUFFLE_get_palign_imm VR128:$src3))>,
3639 Requires<[HasSSSE3]>;
3642 //===---------------------------------------------------------------------===//
3643 // SSSE3 Misc Instructions
3644 //===---------------------------------------------------------------------===//
3646 // Thread synchronization
3647 def MONITOR : I<0x01, MRM_C8, (outs), (ins), "monitor",
3648 [(int_x86_sse3_monitor EAX, ECX, EDX)]>,TB, Requires<[HasSSE3]>;
3649 def MWAIT : I<0x01, MRM_C9, (outs), (ins), "mwait",
3650 [(int_x86_sse3_mwait ECX, EAX)]>, TB, Requires<[HasSSE3]>;
3652 //===---------------------------------------------------------------------===//
3653 // Non-Instruction Patterns
3654 //===---------------------------------------------------------------------===//
3656 // extload f32 -> f64. This matches load+fextend because we have a hack in
3657 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3659 // Since these loads aren't folded into the fextend, we have to match it
3661 let Predicates = [HasSSE2] in
3662 def : Pat<(fextend (loadf32 addr:$src)),
3663 (CVTSS2SDrm addr:$src)>;
3666 let Predicates = [HasSSE2] in {
3667 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3668 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3669 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3670 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3671 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3672 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3673 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3674 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3675 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3676 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3677 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3678 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3679 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3680 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3681 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3682 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3683 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3684 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3685 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3686 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3687 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3688 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3689 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3690 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3691 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3692 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3693 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3694 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3695 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3696 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3699 // Move scalar to XMM zero-extended
3700 // movd to XMM register zero-extends
3701 let AddedComplexity = 15 in {
3702 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3703 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3704 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3705 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3706 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3707 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3708 (MOVSSrr (v4f32 (V_SET0PS)),
3709 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3710 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3711 (MOVSSrr (v4i32 (V_SET0PI)),
3712 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3715 // Splat v2f64 / v2i64
3716 let AddedComplexity = 10 in {
3717 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3718 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3719 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3720 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3721 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3722 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3723 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3724 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3727 // Special unary SHUFPSrri case.
3728 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3729 (SHUFPSrri VR128:$src1, VR128:$src1,
3730 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3731 let AddedComplexity = 5 in
3732 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3733 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3734 Requires<[HasSSE2]>;
3735 // Special unary SHUFPDrri case.
3736 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3737 (SHUFPDrri VR128:$src1, VR128:$src1,
3738 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3739 Requires<[HasSSE2]>;
3740 // Special unary SHUFPDrri case.
3741 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3742 (SHUFPDrri VR128:$src1, VR128:$src1,
3743 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3744 Requires<[HasSSE2]>;
3745 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3746 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3747 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3748 Requires<[HasSSE2]>;
3750 // Special binary v4i32 shuffle cases with SHUFPS.
3751 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3752 (SHUFPSrri VR128:$src1, VR128:$src2,
3753 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3754 Requires<[HasSSE2]>;
3755 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3756 (SHUFPSrmi VR128:$src1, addr:$src2,
3757 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3758 Requires<[HasSSE2]>;
3759 // Special binary v2i64 shuffle cases using SHUFPDrri.
3760 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3761 (SHUFPDrri VR128:$src1, VR128:$src2,
3762 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3763 Requires<[HasSSE2]>;
3765 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3766 let AddedComplexity = 15 in {
3767 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3768 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3769 Requires<[OptForSpeed, HasSSE2]>;
3770 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3771 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3772 Requires<[OptForSpeed, HasSSE2]>;
3774 let AddedComplexity = 10 in {
3775 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3776 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3777 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3778 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3779 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3780 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3781 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3782 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3785 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3786 let AddedComplexity = 15 in {
3787 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3788 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3789 Requires<[OptForSpeed, HasSSE2]>;
3790 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3791 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3792 Requires<[OptForSpeed, HasSSE2]>;
3794 let AddedComplexity = 10 in {
3795 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3796 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3797 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3798 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3799 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3800 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3801 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3802 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3805 let AddedComplexity = 20 in {
3806 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3807 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3808 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3810 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3811 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3812 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3814 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3815 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3816 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3817 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3818 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3821 let AddedComplexity = 20 in {
3822 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3823 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3824 (MOVLPSrm VR128:$src1, addr:$src2)>;
3825 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3826 (MOVLPDrm VR128:$src1, addr:$src2)>;
3827 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3828 (MOVLPSrm VR128:$src1, addr:$src2)>;
3829 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3830 (MOVLPDrm VR128:$src1, addr:$src2)>;
3833 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3834 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3835 (MOVLPSmr addr:$src1, VR128:$src2)>;
3836 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3837 (MOVLPDmr addr:$src1, VR128:$src2)>;
3838 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3840 (MOVLPSmr addr:$src1, VR128:$src2)>;
3841 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3842 (MOVLPDmr addr:$src1, VR128:$src2)>;
3844 let AddedComplexity = 15 in {
3845 // Setting the lowest element in the vector.
3846 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3847 (MOVSSrr (v4i32 VR128:$src1),
3848 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3849 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3850 (MOVSDrr (v2i64 VR128:$src1),
3851 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3853 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3854 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3855 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3856 Requires<[HasSSE2]>;
3857 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3858 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3859 Requires<[HasSSE2]>;
3862 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3863 // fall back to this for SSE1)
3864 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3865 (SHUFPSrri VR128:$src2, VR128:$src1,
3866 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3868 // Set lowest element and zero upper elements.
3869 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3870 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3872 // Some special case pandn patterns.
3873 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3875 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3876 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3878 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3879 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3881 (PANDNrr VR128:$src1, VR128:$src2)>, Requires<[HasSSE2]>;
3883 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v4i32 immAllOnesV))),
3884 (memop addr:$src2))),
3885 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3886 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v8i16 immAllOnesV))),
3887 (memop addr:$src2))),
3888 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3889 def : Pat<(v2i64 (and (xor VR128:$src1, (bc_v2i64 (v16i8 immAllOnesV))),
3890 (memop addr:$src2))),
3891 (PANDNrm VR128:$src1, addr:$src2)>, Requires<[HasSSE2]>;
3893 // vector -> vector casts
3894 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3895 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3896 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3897 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3899 // Use movaps / movups for SSE integer load / store (one byte shorter).
3900 let Predicates = [HasSSE1] in {
3901 def : Pat<(alignedloadv4i32 addr:$src),
3902 (MOVAPSrm addr:$src)>;
3903 def : Pat<(loadv4i32 addr:$src),
3904 (MOVUPSrm addr:$src)>;
3905 def : Pat<(alignedloadv2i64 addr:$src),
3906 (MOVAPSrm addr:$src)>;
3907 def : Pat<(loadv2i64 addr:$src),
3908 (MOVUPSrm addr:$src)>;
3910 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3911 (MOVAPSmr addr:$dst, VR128:$src)>;
3912 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3913 (MOVAPSmr addr:$dst, VR128:$src)>;
3914 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3915 (MOVAPSmr addr:$dst, VR128:$src)>;
3916 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3917 (MOVAPSmr addr:$dst, VR128:$src)>;
3918 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3919 (MOVUPSmr addr:$dst, VR128:$src)>;
3920 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3921 (MOVUPSmr addr:$dst, VR128:$src)>;
3922 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3923 (MOVUPSmr addr:$dst, VR128:$src)>;
3924 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3925 (MOVUPSmr addr:$dst, VR128:$src)>;
3928 // Use vmovaps/vmovups for AVX 128-bit integer load/store (one byte shorter).
3929 let Predicates = [HasAVX] in {
3930 def : Pat<(alignedloadv4i32 addr:$src),
3931 (VMOVAPSrm addr:$src)>;
3932 def : Pat<(loadv4i32 addr:$src),
3933 (VMOVUPSrm addr:$src)>;
3934 def : Pat<(alignedloadv2i64 addr:$src),
3935 (VMOVAPSrm addr:$src)>;
3936 def : Pat<(loadv2i64 addr:$src),
3937 (VMOVUPSrm addr:$src)>;
3939 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3940 (VMOVAPSmr addr:$dst, VR128:$src)>;
3941 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3942 (VMOVAPSmr addr:$dst, VR128:$src)>;
3943 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3944 (VMOVAPSmr addr:$dst, VR128:$src)>;
3945 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3946 (VMOVAPSmr addr:$dst, VR128:$src)>;
3947 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3948 (VMOVUPSmr addr:$dst, VR128:$src)>;
3949 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3950 (VMOVUPSmr addr:$dst, VR128:$src)>;
3951 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3952 (VMOVUPSmr addr:$dst, VR128:$src)>;
3953 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3954 (VMOVUPSmr addr:$dst, VR128:$src)>;
3957 //===----------------------------------------------------------------------===//
3958 // SSE4.1 - Packed Move with Sign/Zero Extend
3959 //===----------------------------------------------------------------------===//
3961 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3962 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3963 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3964 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3966 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3967 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3969 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3973 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
3974 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3976 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3978 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
3980 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
3982 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
3984 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
3988 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3989 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3990 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3991 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3992 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3993 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3995 // Common patterns involving scalar load.
3996 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
3997 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3998 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
3999 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4001 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4002 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4003 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4004 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4006 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4007 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4008 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4009 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4011 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4012 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4013 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4014 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4016 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4017 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4018 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4019 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4021 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4022 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4023 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4024 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4027 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4028 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4029 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4030 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4032 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4033 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4035 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4039 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4040 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4042 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4044 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4046 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4050 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4051 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4052 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4053 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4055 // Common patterns involving scalar load
4056 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4057 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4058 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4059 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4061 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4062 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4063 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4064 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4067 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4068 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4069 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4070 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4072 // Expecting a i16 load any extended to i32 value.
4073 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4074 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4075 [(set VR128:$dst, (IntId (bitconvert
4076 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4080 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4081 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4083 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4086 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4087 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4089 // Common patterns involving scalar load
4090 def : Pat<(int_x86_sse41_pmovsxbq
4091 (bitconvert (v4i32 (X86vzmovl
4092 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4093 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4095 def : Pat<(int_x86_sse41_pmovzxbq
4096 (bitconvert (v4i32 (X86vzmovl
4097 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4098 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4100 //===----------------------------------------------------------------------===//
4101 // SSE4.1 - Extract Instructions
4102 //===----------------------------------------------------------------------===//
4104 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4105 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4106 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4107 (ins VR128:$src1, i32i8imm:$src2),
4108 !strconcat(OpcodeStr,
4109 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4110 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4112 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4113 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4114 !strconcat(OpcodeStr,
4115 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4118 // There's an AssertZext in the way of writing the store pattern
4119 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4122 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4123 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4124 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4125 (ins VR128:$src1, i32i8imm:$src2),
4126 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4129 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4132 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4133 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4134 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4135 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4136 !strconcat(OpcodeStr,
4137 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4140 // There's an AssertZext in the way of writing the store pattern
4141 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4144 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4145 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4147 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4150 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4151 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4152 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4153 (ins VR128:$src1, i32i8imm:$src2),
4154 !strconcat(OpcodeStr,
4155 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4157 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4158 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4159 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4160 !strconcat(OpcodeStr,
4161 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4162 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4163 addr:$dst)]>, OpSize;
4166 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4167 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4169 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4171 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4172 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4173 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4174 (ins VR128:$src1, i32i8imm:$src2),
4175 !strconcat(OpcodeStr,
4176 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4178 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4179 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4180 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4181 !strconcat(OpcodeStr,
4182 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4183 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4184 addr:$dst)]>, OpSize, REX_W;
4187 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4188 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4190 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4192 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4194 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4195 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4196 (ins VR128:$src1, i32i8imm:$src2),
4197 !strconcat(OpcodeStr,
4198 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4200 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4202 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4203 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4204 !strconcat(OpcodeStr,
4205 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4206 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4207 addr:$dst)]>, OpSize;
4210 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4211 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4212 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4213 (ins VR128:$src1, i32i8imm:$src2),
4214 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4217 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4219 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4220 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4223 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4224 Requires<[HasSSE41]>;
4226 //===----------------------------------------------------------------------===//
4227 // SSE4.1 - Insert Instructions
4228 //===----------------------------------------------------------------------===//
4230 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4231 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4232 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4234 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4236 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4238 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4239 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4240 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4242 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4244 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4246 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4247 imm:$src3))]>, OpSize;
4250 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4251 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4252 let Constraints = "$src1 = $dst" in
4253 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4255 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4256 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4257 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4259 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4261 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4263 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4265 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4266 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4268 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4270 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4272 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4273 imm:$src3)))]>, OpSize;
4276 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4277 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4278 let Constraints = "$src1 = $dst" in
4279 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4281 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4282 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4283 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4285 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4287 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4289 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4291 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4292 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4294 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4296 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4298 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4299 imm:$src3)))]>, OpSize;
4302 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4303 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4304 let Constraints = "$src1 = $dst" in
4305 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4307 // insertps has a few different modes, there's the first two here below which
4308 // are optimized inserts that won't zero arbitrary elements in the destination
4309 // vector. The next one matches the intrinsic and could zero arbitrary elements
4310 // in the target vector.
4311 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4312 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4313 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4315 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4317 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4319 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4321 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4322 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4324 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4326 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4328 (X86insrtps VR128:$src1,
4329 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4330 imm:$src3))]>, OpSize;
4333 let Constraints = "$src1 = $dst" in
4334 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4335 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4336 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4338 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4339 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4341 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4342 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4343 Requires<[HasSSE41]>;
4345 //===----------------------------------------------------------------------===//
4346 // SSE4.1 - Round Instructions
4347 //===----------------------------------------------------------------------===//
4349 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4350 X86MemOperand x86memop, RegisterClass RC,
4351 PatFrag mem_frag32, PatFrag mem_frag64,
4352 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4353 // Intrinsic operation, reg.
4354 // Vector intrinsic operation, reg
4355 def PSr : SS4AIi8<opcps, MRMSrcReg,
4356 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4357 !strconcat(OpcodeStr,
4358 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4359 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4362 // Vector intrinsic operation, mem
4363 def PSm : Ii8<opcps, MRMSrcMem,
4364 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4365 !strconcat(OpcodeStr,
4366 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4368 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4370 Requires<[HasSSE41]>;
4372 // Vector intrinsic operation, reg
4373 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4374 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4375 !strconcat(OpcodeStr,
4376 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4377 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4380 // Vector intrinsic operation, mem
4381 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4382 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4383 !strconcat(OpcodeStr,
4384 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4386 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4390 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4391 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4392 // Intrinsic operation, reg.
4393 // Vector intrinsic operation, reg
4394 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4395 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4396 !strconcat(OpcodeStr,
4397 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4400 // Vector intrinsic operation, mem
4401 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4402 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4403 !strconcat(OpcodeStr,
4404 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4405 []>, TA, OpSize, Requires<[HasSSE41]>;
4407 // Vector intrinsic operation, reg
4408 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4409 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4410 !strconcat(OpcodeStr,
4411 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4414 // Vector intrinsic operation, mem
4415 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4416 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4417 !strconcat(OpcodeStr,
4418 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4422 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4425 Intrinsic F64Int, bit Is2Addr = 1> {
4426 // Intrinsic operation, reg.
4427 def SSr : SS4AIi8<opcss, MRMSrcReg,
4428 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4430 !strconcat(OpcodeStr,
4431 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4432 !strconcat(OpcodeStr,
4433 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4434 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4437 // Intrinsic operation, mem.
4438 def SSm : SS4AIi8<opcss, MRMSrcMem,
4439 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4441 !strconcat(OpcodeStr,
4442 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4443 !strconcat(OpcodeStr,
4444 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4446 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4449 // Intrinsic operation, reg.
4450 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4451 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4453 !strconcat(OpcodeStr,
4454 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4455 !strconcat(OpcodeStr,
4456 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4457 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4460 // Intrinsic operation, mem.
4461 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4462 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4464 !strconcat(OpcodeStr,
4465 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4466 !strconcat(OpcodeStr,
4467 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4469 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4473 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4475 // Intrinsic operation, reg.
4476 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4477 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4478 !strconcat(OpcodeStr,
4479 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4482 // Intrinsic operation, mem.
4483 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4484 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4485 !strconcat(OpcodeStr,
4486 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4489 // Intrinsic operation, reg.
4490 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4491 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4492 !strconcat(OpcodeStr,
4493 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4496 // Intrinsic operation, mem.
4497 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4498 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4499 !strconcat(OpcodeStr,
4500 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4504 // FP round - roundss, roundps, roundsd, roundpd
4505 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4507 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4508 memopv4f32, memopv2f64,
4509 int_x86_sse41_round_ps,
4510 int_x86_sse41_round_pd>, VEX;
4511 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4512 memopv8f32, memopv4f64,
4513 int_x86_avx_round_ps_256,
4514 int_x86_avx_round_pd_256>, VEX;
4515 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4516 int_x86_sse41_round_ss,
4517 int_x86_sse41_round_sd, 0>, VEX_4V;
4519 // Instructions for the assembler
4520 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4522 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4524 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4527 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4528 memopv4f32, memopv2f64,
4529 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4530 let Constraints = "$src1 = $dst" in
4531 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4532 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4534 //===----------------------------------------------------------------------===//
4535 // SSE4.1 - Packed Bit Test
4536 //===----------------------------------------------------------------------===//
4538 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4539 // the intel intrinsic that corresponds to this.
4540 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4541 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4542 "vptest\t{$src2, $src1|$src1, $src2}",
4543 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4545 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4546 "vptest\t{$src2, $src1|$src1, $src2}",
4547 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4550 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4551 "vptest\t{$src2, $src1|$src1, $src2}",
4552 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4554 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4555 "vptest\t{$src2, $src1|$src1, $src2}",
4556 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4560 let Defs = [EFLAGS] in {
4561 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4562 "ptest \t{$src2, $src1|$src1, $src2}",
4563 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4565 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4566 "ptest \t{$src2, $src1|$src1, $src2}",
4567 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4571 // The bit test instructions below are AVX only
4572 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4573 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4574 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4575 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4576 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4577 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4578 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4579 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4583 let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in {
4584 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4585 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4586 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4587 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4590 //===----------------------------------------------------------------------===//
4591 // SSE4.1 - Misc Instructions
4592 //===----------------------------------------------------------------------===//
4594 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
4595 "popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS;
4597 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
4598 "popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS;
4600 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
4601 "popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS;
4603 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
4604 "popcnt{l}\t{$src, $dst|$dst, $src}", []>, XS;
4606 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
4607 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
4609 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
4610 "popcnt{q}\t{$src, $dst|$dst, $src}", []>, XS;
4614 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4615 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4616 Intrinsic IntId128> {
4617 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4619 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4620 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4621 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4623 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4626 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4629 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4630 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4631 int_x86_sse41_phminposuw>, VEX;
4632 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4633 int_x86_sse41_phminposuw>;
4635 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4636 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4637 Intrinsic IntId128, bit Is2Addr = 1> {
4638 let isCommutable = 1 in
4639 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4640 (ins VR128:$src1, VR128:$src2),
4642 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4643 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4644 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4645 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4646 (ins VR128:$src1, i128mem:$src2),
4648 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4649 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4651 (IntId128 VR128:$src1,
4652 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4655 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4656 let isCommutable = 0 in
4657 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4659 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4661 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4663 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4665 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4667 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4669 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4671 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4673 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4675 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4677 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4681 let Constraints = "$src1 = $dst" in {
4682 let isCommutable = 0 in
4683 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4684 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4685 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4686 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4687 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4688 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4689 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4690 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4691 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4692 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4693 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4696 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4697 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4698 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4699 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4701 /// SS48I_binop_rm - Simple SSE41 binary operator.
4702 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4703 ValueType OpVT, bit Is2Addr = 1> {
4704 let isCommutable = 1 in
4705 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4706 (ins VR128:$src1, VR128:$src2),
4708 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4709 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4710 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4712 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4713 (ins VR128:$src1, i128mem:$src2),
4715 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4716 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4717 [(set VR128:$dst, (OpNode VR128:$src1,
4718 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4722 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4723 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4724 let Constraints = "$src1 = $dst" in
4725 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4727 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4728 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4729 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4730 X86MemOperand x86memop, bit Is2Addr = 1> {
4731 let isCommutable = 1 in
4732 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4733 (ins RC:$src1, RC:$src2, i32i8imm:$src3),
4735 !strconcat(OpcodeStr,
4736 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4737 !strconcat(OpcodeStr,
4738 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4739 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4741 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4742 (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
4744 !strconcat(OpcodeStr,
4745 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4746 !strconcat(OpcodeStr,
4747 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4750 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4754 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4755 let isCommutable = 0 in {
4756 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4757 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4758 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4759 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4760 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4761 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4762 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4763 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4764 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4765 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4766 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4767 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4769 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4770 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4771 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4772 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4773 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4774 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4777 let Constraints = "$src1 = $dst" in {
4778 let isCommutable = 0 in {
4779 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4780 VR128, memopv16i8, i128mem>;
4781 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4782 VR128, memopv16i8, i128mem>;
4783 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4784 VR128, memopv16i8, i128mem>;
4785 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4786 VR128, memopv16i8, i128mem>;
4788 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4789 VR128, memopv16i8, i128mem>;
4790 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4791 VR128, memopv16i8, i128mem>;
4794 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4795 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4796 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4797 RegisterClass RC, X86MemOperand x86memop,
4798 PatFrag mem_frag, Intrinsic IntId> {
4799 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4800 (ins RC:$src1, RC:$src2, RC:$src3),
4801 !strconcat(OpcodeStr,
4802 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4803 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
4804 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4806 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4807 (ins RC:$src1, x86memop:$src2, RC:$src3),
4808 !strconcat(OpcodeStr,
4809 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4811 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
4813 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4817 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
4818 memopv16i8, int_x86_sse41_blendvpd>;
4819 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
4820 memopv16i8, int_x86_sse41_blendvps>;
4821 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
4822 memopv16i8, int_x86_sse41_pblendvb>;
4823 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
4824 memopv32i8, int_x86_avx_blendv_pd_256>;
4825 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
4826 memopv32i8, int_x86_avx_blendv_ps_256>;
4828 /// SS41I_ternary_int - SSE 4.1 ternary operator
4829 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4830 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4831 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4832 (ins VR128:$src1, VR128:$src2),
4833 !strconcat(OpcodeStr,
4834 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4835 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4838 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4839 (ins VR128:$src1, i128mem:$src2),
4840 !strconcat(OpcodeStr,
4841 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4844 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4848 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4849 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4850 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4852 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4853 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4854 "vmovntdqa\t{$src, $dst|$dst, $src}",
4855 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4857 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4858 "movntdqa\t{$src, $dst|$dst, $src}",
4859 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4862 //===----------------------------------------------------------------------===//
4863 // SSE4.2 - Compare Instructions
4864 //===----------------------------------------------------------------------===//
4866 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4867 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4868 Intrinsic IntId128, bit Is2Addr = 1> {
4869 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4870 (ins VR128:$src1, VR128:$src2),
4872 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4873 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4874 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4876 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4877 (ins VR128:$src1, i128mem:$src2),
4879 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4880 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4882 (IntId128 VR128:$src1,
4883 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4886 let isAsmParserOnly = 1, Predicates = [HasAVX] in
4887 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4889 let Constraints = "$src1 = $dst" in
4890 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4892 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4893 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4894 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4895 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4897 //===----------------------------------------------------------------------===//
4898 // SSE4.2 - String/text Processing Instructions
4899 //===----------------------------------------------------------------------===//
4901 // Packed Compare Implicit Length Strings, Return Mask
4902 multiclass pseudo_pcmpistrm<string asm> {
4903 def REG : Ii8<0, Pseudo, (outs VR128:$dst),
4904 (ins VR128:$src1, VR128:$src2, i8imm:$src3), !strconcat(asm, "rr PSEUDO"),
4905 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4907 def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
4908 (ins VR128:$src1, i128mem:$src2, i8imm:$src3), !strconcat(asm, "rm PSEUDO"),
4909 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4910 VR128:$src1, (load addr:$src2), imm:$src3))]>;
4913 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4914 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
4915 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
4918 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1,
4919 Predicates = [HasAVX] in {
4920 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4921 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4922 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4923 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4924 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4925 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4928 let Defs = [XMM0, EFLAGS] in {
4929 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4930 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4931 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4932 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4933 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4934 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4937 // Packed Compare Explicit Length Strings, Return Mask
4938 multiclass pseudo_pcmpestrm<string asm> {
4939 def REG : Ii8<0, Pseudo, (outs VR128:$dst),
4940 (ins VR128:$src1, VR128:$src3, i8imm:$src5), !strconcat(asm, "rr PSEUDO"),
4941 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4942 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
4943 def MEM : Ii8<0, Pseudo, (outs VR128:$dst),
4944 (ins VR128:$src1, i128mem:$src3, i8imm:$src5), !strconcat(asm, "rm PSEUDO"),
4945 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4946 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
4949 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4950 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
4951 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
4954 let isAsmParserOnly = 1, Predicates = [HasAVX],
4955 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4956 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4957 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4958 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4959 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4960 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4961 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4964 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4965 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4966 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4967 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4968 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4969 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4970 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4973 // Packed Compare Implicit Length Strings, Return Index
4974 let Defs = [ECX, EFLAGS] in {
4975 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4976 def rr : SS42AI<0x63, MRMSrcReg, (outs),
4977 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4978 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4979 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
4980 (implicit EFLAGS)]>, OpSize;
4981 def rm : SS42AI<0x63, MRMSrcMem, (outs),
4982 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4983 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4984 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
4985 (implicit EFLAGS)]>, OpSize;
4989 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
4990 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
4992 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
4994 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
4996 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
4998 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5000 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5004 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5005 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5006 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5007 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5008 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5009 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5011 // Packed Compare Explicit Length Strings, Return Index
5012 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5013 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5014 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5015 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5016 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5017 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5018 (implicit EFLAGS)]>, OpSize;
5019 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5020 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5021 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5023 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5024 (implicit EFLAGS)]>, OpSize;
5028 let isAsmParserOnly = 1, Predicates = [HasAVX] in {
5029 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5031 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5033 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5035 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5037 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5039 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5043 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5044 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5045 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5046 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5047 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5048 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5050 //===----------------------------------------------------------------------===//
5051 // SSE4.2 - CRC Instructions
5052 //===----------------------------------------------------------------------===//
5054 // No CRC instructions have AVX equivalents
5056 // crc intrinsic instruction
5057 // This set of instructions are only rm, the only difference is the size
5059 let Constraints = "$src1 = $dst" in {
5060 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5061 (ins GR32:$src1, i8mem:$src2),
5062 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5064 (int_x86_sse42_crc32_8 GR32:$src1,
5065 (load addr:$src2)))]>;
5066 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5067 (ins GR32:$src1, GR8:$src2),
5068 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5070 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
5071 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5072 (ins GR32:$src1, i16mem:$src2),
5073 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5075 (int_x86_sse42_crc32_16 GR32:$src1,
5076 (load addr:$src2)))]>,
5078 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5079 (ins GR32:$src1, GR16:$src2),
5080 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5082 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
5084 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5085 (ins GR32:$src1, i32mem:$src2),
5086 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5088 (int_x86_sse42_crc32_32 GR32:$src1,
5089 (load addr:$src2)))]>;
5090 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5091 (ins GR32:$src1, GR32:$src2),
5092 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5094 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
5095 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5096 (ins GR64:$src1, i8mem:$src2),
5097 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5099 (int_x86_sse42_crc64_8 GR64:$src1,
5100 (load addr:$src2)))]>,
5102 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5103 (ins GR64:$src1, GR8:$src2),
5104 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5106 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
5108 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5109 (ins GR64:$src1, i64mem:$src2),
5110 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5112 (int_x86_sse42_crc64_64 GR64:$src1,
5113 (load addr:$src2)))]>,
5115 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5116 (ins GR64:$src1, GR64:$src2),
5117 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5119 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
5123 //===----------------------------------------------------------------------===//
5124 // AES-NI Instructions
5125 //===----------------------------------------------------------------------===//
5127 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5128 Intrinsic IntId128, bit Is2Addr = 1> {
5129 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5130 (ins VR128:$src1, VR128:$src2),
5132 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5133 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5134 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5136 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5137 (ins VR128:$src1, i128mem:$src2),
5139 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5140 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5142 (IntId128 VR128:$src1,
5143 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5146 // Perform One Round of an AES Encryption/Decryption Flow
5147 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5148 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5149 int_x86_aesni_aesenc, 0>, VEX_4V;
5150 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5151 int_x86_aesni_aesenclast, 0>, VEX_4V;
5152 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5153 int_x86_aesni_aesdec, 0>, VEX_4V;
5154 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5155 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5158 let Constraints = "$src1 = $dst" in {
5159 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5160 int_x86_aesni_aesenc>;
5161 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5162 int_x86_aesni_aesenclast>;
5163 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5164 int_x86_aesni_aesdec>;
5165 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5166 int_x86_aesni_aesdeclast>;
5169 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5170 (AESENCrr VR128:$src1, VR128:$src2)>;
5171 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5172 (AESENCrm VR128:$src1, addr:$src2)>;
5173 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5174 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5175 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5176 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5177 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5178 (AESDECrr VR128:$src1, VR128:$src2)>;
5179 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5180 (AESDECrm VR128:$src1, addr:$src2)>;
5181 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5182 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5183 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5184 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5186 // Perform the AES InvMixColumn Transformation
5187 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5188 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5190 "vaesimc\t{$src1, $dst|$dst, $src1}",
5192 (int_x86_aesni_aesimc VR128:$src1))]>,
5194 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5195 (ins i128mem:$src1),
5196 "vaesimc\t{$src1, $dst|$dst, $src1}",
5198 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5201 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5203 "aesimc\t{$src1, $dst|$dst, $src1}",
5205 (int_x86_aesni_aesimc VR128:$src1))]>,
5207 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5208 (ins i128mem:$src1),
5209 "aesimc\t{$src1, $dst|$dst, $src1}",
5211 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5214 // AES Round Key Generation Assist
5215 let isAsmParserOnly = 1, Predicates = [HasAVX, HasAES] in {
5216 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5217 (ins VR128:$src1, i8imm:$src2),
5218 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5220 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5222 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5223 (ins i128mem:$src1, i8imm:$src2),
5224 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5226 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5230 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5231 (ins VR128:$src1, i8imm:$src2),
5232 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5234 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5236 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5237 (ins i128mem:$src1, i8imm:$src2),
5238 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5240 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5244 //===----------------------------------------------------------------------===//
5245 // CLMUL Instructions
5246 //===----------------------------------------------------------------------===//
5248 // Only the AVX version of CLMUL instructions are described here.
5250 // Carry-less Multiplication instructions
5251 let isAsmParserOnly = 1 in {
5252 def VPCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5253 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5254 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5257 def VPCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5258 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5259 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5263 multiclass avx_vpclmul<string asm> {
5264 def rr : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
5265 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5268 def rm : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
5269 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5272 defm VPCLMULHQHQDQ : avx_vpclmul<"vpclmulhqhqdq">;
5273 defm VPCLMULHQLQDQ : avx_vpclmul<"vpclmulhqlqdq">;
5274 defm VPCLMULLQHQDQ : avx_vpclmul<"vpclmullqhqdq">;
5275 defm VPCLMULLQLQDQ : avx_vpclmul<"vpclmullqlqdq">;
5277 } // isAsmParserOnly
5279 //===----------------------------------------------------------------------===//
5281 //===----------------------------------------------------------------------===//
5283 let isAsmParserOnly = 1 in {
5285 // Load from memory and broadcast to all elements of the destination operand
5286 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5287 X86MemOperand x86memop, Intrinsic Int> :
5288 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5289 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5290 [(set RC:$dst, (Int addr:$src))]>, VEX;
5292 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5293 int_x86_avx_vbroadcastss>;
5294 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5295 int_x86_avx_vbroadcastss_256>;
5296 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5297 int_x86_avx_vbroadcast_sd_256>;
5298 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5299 int_x86_avx_vbroadcastf128_pd_256>;
5301 // Insert packed floating-point values
5302 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5303 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5304 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5306 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5307 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5308 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5311 // Extract packed floating-point values
5312 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5313 (ins VR256:$src1, i8imm:$src2),
5314 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5316 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5317 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5318 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5321 // Conditional SIMD Packed Loads and Stores
5322 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5323 Intrinsic IntLd, Intrinsic IntLd256,
5324 Intrinsic IntSt, Intrinsic IntSt256,
5325 PatFrag pf128, PatFrag pf256> {
5326 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5327 (ins VR128:$src1, f128mem:$src2),
5328 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5329 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5331 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5332 (ins VR256:$src1, f256mem:$src2),
5333 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5334 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5336 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5337 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5338 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5339 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5340 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5341 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5342 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5343 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5346 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5347 int_x86_avx_maskload_ps,
5348 int_x86_avx_maskload_ps_256,
5349 int_x86_avx_maskstore_ps,
5350 int_x86_avx_maskstore_ps_256,
5351 memopv4f32, memopv8f32>;
5352 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5353 int_x86_avx_maskload_pd,
5354 int_x86_avx_maskload_pd_256,
5355 int_x86_avx_maskstore_pd,
5356 int_x86_avx_maskstore_pd_256,
5357 memopv2f64, memopv4f64>;
5359 // Permute Floating-Point Values
5360 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5361 RegisterClass RC, X86MemOperand x86memop_f,
5362 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5363 Intrinsic IntVar, Intrinsic IntImm> {
5364 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5365 (ins RC:$src1, RC:$src2),
5366 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5367 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5368 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5369 (ins RC:$src1, x86memop_i:$src2),
5370 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5371 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5373 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5374 (ins RC:$src1, i8imm:$src2),
5375 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5376 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5377 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5378 (ins x86memop_f:$src1, i8imm:$src2),
5379 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5380 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5383 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5384 memopv4f32, memopv4i32,
5385 int_x86_avx_vpermilvar_ps,
5386 int_x86_avx_vpermil_ps>;
5387 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5388 memopv8f32, memopv8i32,
5389 int_x86_avx_vpermilvar_ps_256,
5390 int_x86_avx_vpermil_ps_256>;
5391 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5392 memopv2f64, memopv2i64,
5393 int_x86_avx_vpermilvar_pd,
5394 int_x86_avx_vpermil_pd>;
5395 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5396 memopv4f64, memopv4i64,
5397 int_x86_avx_vpermilvar_pd_256,
5398 int_x86_avx_vpermil_pd_256>;
5400 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5401 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5402 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5404 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5405 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5406 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5409 // Zero All YMM registers
5410 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5411 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5413 // Zero Upper bits of YMM registers
5414 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5415 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5417 } // isAsmParserOnly
5419 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5420 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5421 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5422 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5423 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5424 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5426 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5427 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5428 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5429 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5430 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5431 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5433 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5434 (VBROADCASTF128 addr:$src)>;
5436 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5437 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5438 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5439 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5440 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5441 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5443 def : Pat<(int_x86_avx_vperm2f128_ps_256
5444 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5445 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5446 def : Pat<(int_x86_avx_vperm2f128_pd_256
5447 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5448 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5449 def : Pat<(int_x86_avx_vperm2f128_si_256
5450 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5451 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5453 //===----------------------------------------------------------------------===//
5454 // SSE Shuffle pattern fragments
5455 //===----------------------------------------------------------------------===//
5457 // This is part of a "work in progress" refactoring. The idea is that all
5458 // vector shuffles are going to be translated into target specific nodes and
5459 // directly matched by the patterns below (which can be changed along the way)
5460 // The AVX version of some but not all of them are described here, and more
5461 // should come in a near future.
5463 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5464 // SSE2 loads, which are always promoted to v2i64. The last one should match
5465 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5466 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5467 // we investigate further.
5468 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5470 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5471 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5473 (PSHUFDmi addr:$src1, imm:$imm)>;
5474 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5476 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5478 // Shuffle with PSHUFD instruction.
5479 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5480 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5481 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5482 (PSHUFDri VR128:$src1, imm:$imm)>;
5484 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5485 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5486 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5487 (PSHUFDri VR128:$src1, imm:$imm)>;
5489 // Shuffle with SHUFPD instruction.
5490 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5491 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5492 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5493 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5494 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5495 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5497 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5498 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5499 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5500 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5502 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5503 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5504 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5505 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5507 // Shuffle with SHUFPS instruction.
5508 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5509 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5510 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5511 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5512 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5513 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5515 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5516 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5517 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5518 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5520 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5521 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5522 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5523 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5524 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5525 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5527 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5528 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5529 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5530 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5532 // Shuffle with MOVHLPS instruction
5533 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5534 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5535 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5536 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5538 // Shuffle with MOVDDUP instruction
5539 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5540 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5541 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5542 (MOVDDUPrm addr:$src)>;
5544 def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
5545 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5546 def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))),
5547 (MOVDDUPrm addr:$src)>;
5549 def : Pat<(X86Movddup (memopv2i64 addr:$src)),
5550 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5551 def : Pat<(X86Movddup (memopv2i64 addr:$src)),
5552 (MOVDDUPrm addr:$src)>;
5554 def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
5555 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5556 def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))),
5557 (MOVDDUPrm addr:$src)>;
5559 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5560 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5561 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5562 (MOVDDUPrm addr:$src)>;
5564 def : Pat<(X86Movddup (bc_v2f64
5565 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5566 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5567 def : Pat<(X86Movddup (bc_v2f64
5568 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5569 (MOVDDUPrm addr:$src)>;
5571 // Shuffle with UNPCKLPS
5572 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5573 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5574 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5575 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5577 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5578 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5579 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5580 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5582 // Shuffle with UNPCKHPS
5583 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5584 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5585 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5586 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5588 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5589 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5590 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5591 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5593 // Shuffle with UNPCKLPD
5594 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5595 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5596 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5597 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5599 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5600 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5601 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5602 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5604 // Shuffle with UNPCKHPD
5605 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5606 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5607 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5608 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5610 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5611 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5612 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5613 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5615 // Shuffle with PUNPCKLBW
5616 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1,
5617 (bc_v16i8 (memopv2i64 addr:$src2)))),
5618 (PUNPCKLBWrm VR128:$src1, addr:$src2)>;
5619 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, VR128:$src2)),
5620 (PUNPCKLBWrr VR128:$src1, VR128:$src2)>;
5622 // Shuffle with PUNPCKLWD
5623 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1,
5624 (bc_v8i16 (memopv2i64 addr:$src2)))),
5625 (PUNPCKLWDrm VR128:$src1, addr:$src2)>;
5626 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, VR128:$src2)),
5627 (PUNPCKLWDrr VR128:$src1, VR128:$src2)>;
5629 // Shuffle with PUNPCKLDQ
5630 def : Pat<(v4i32 (X86Punpckldq VR128:$src1,
5631 (bc_v4i32 (memopv2i64 addr:$src2)))),
5632 (PUNPCKLDQrm VR128:$src1, addr:$src2)>;
5633 def : Pat<(v4i32 (X86Punpckldq VR128:$src1, VR128:$src2)),
5634 (PUNPCKLDQrr VR128:$src1, VR128:$src2)>;
5636 // Shuffle with PUNPCKLQDQ
5637 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, (memopv2i64 addr:$src2))),
5638 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>;
5639 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)),
5640 (PUNPCKLQDQrr VR128:$src1, VR128:$src2)>;
5642 // Shuffle with PUNPCKHBW
5643 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1,
5644 (bc_v16i8 (memopv2i64 addr:$src2)))),
5645 (PUNPCKHBWrm VR128:$src1, addr:$src2)>;
5646 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, VR128:$src2)),
5647 (PUNPCKHBWrr VR128:$src1, VR128:$src2)>;
5649 // Shuffle with PUNPCKHWD
5650 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1,
5651 (bc_v8i16 (memopv2i64 addr:$src2)))),
5652 (PUNPCKHWDrm VR128:$src1, addr:$src2)>;
5653 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, VR128:$src2)),
5654 (PUNPCKHWDrr VR128:$src1, VR128:$src2)>;
5656 // Shuffle with PUNPCKHDQ
5657 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1,
5658 (bc_v4i32 (memopv2i64 addr:$src2)))),
5659 (PUNPCKHDQrm VR128:$src1, addr:$src2)>;
5660 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, VR128:$src2)),
5661 (PUNPCKHDQrr VR128:$src1, VR128:$src2)>;
5663 // Shuffle with PUNPCKHQDQ
5664 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, (memopv2i64 addr:$src2))),
5665 (PUNPCKHQDQrm VR128:$src1, addr:$src2)>;
5666 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)),
5667 (PUNPCKHQDQrr VR128:$src1, VR128:$src2)>;
5669 // Shuffle with MOVLHPS
5670 def : Pat<(X86Movlhps VR128:$src1,
5671 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5672 (MOVHPSrm VR128:$src1, addr:$src2)>;
5673 def : Pat<(X86Movlhps VR128:$src1,
5674 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5675 (MOVHPSrm VR128:$src1, addr:$src2)>;
5676 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
5677 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5678 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
5679 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5680 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
5681 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
5682 // FIXME: Instead of X86Movddup, there should be a X86Movlhps here, the problem
5683 // is during lowering, where it's not possible to recognize the load fold cause
5684 // it has two uses through a bitcast. One use disappears at isel time and the
5685 // fold opportunity reappears.
5686 def : Pat<(v2i64 (X86Movddup VR128:$src)),
5687 (MOVLHPSrr VR128:$src, VR128:$src)>;
5688 def : Pat<(v4f32 (X86Movddup VR128:$src)),
5689 (MOVLHPSrr VR128:$src, VR128:$src)>;
5690 def : Pat<(v2f64 (X86Movddup VR128:$src)),
5691 (UNPCKLPDrr VR128:$src, VR128:$src)>;
5693 // Shuffle with MOVLHPD
5694 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
5695 (scalar_to_vector (loadf64 addr:$src2)))),
5696 (MOVHPDrm VR128:$src1, addr:$src2)>;
5697 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
5698 // is during lowering, where it's not possible to recognize the load fold cause
5699 // it has two uses through a bitcast. One use disappears at isel time and the
5700 // fold opportunity reappears.
5701 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
5702 (scalar_to_vector (loadf64 addr:$src2)))),
5703 (MOVHPDrm VR128:$src1, addr:$src2)>;
5705 // Shuffle with MOVSS
5706 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
5707 (MOVSSrr VR128:$src1, FR32:$src2)>;
5708 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
5709 (MOVSSrr (v4i32 VR128:$src1),
5710 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
5711 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
5712 (MOVSSrr (v4f32 VR128:$src1),
5713 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
5714 // FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
5715 // is during lowering, where it's not possible to recognize the load fold cause
5716 // it has two uses through a bitcast. One use disappears at isel time and the
5717 // fold opportunity reappears.
5718 def : Pat<(X86Movss VR128:$src1,
5719 (bc_v4i32 (v2i64 (load addr:$src2)))),
5720 (MOVLPSrm VR128:$src1, addr:$src2)>;
5722 // Shuffle with MOVSD
5723 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
5724 (MOVSDrr VR128:$src1, FR64:$src2)>;
5725 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
5726 (MOVSDrr (v2i64 VR128:$src1),
5727 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
5728 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
5729 (MOVSDrr (v2f64 VR128:$src1),
5730 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
5731 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
5732 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5733 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
5734 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5736 // Shuffle with MOVSHDUP
5737 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5738 (MOVSHDUPrr VR128:$src)>;
5739 def : Pat<(X86Movshdup (bc_v4i32 (memopv2i64 addr:$src))),
5740 (MOVSHDUPrm addr:$src)>;
5742 def : Pat<(v4f32 (X86Movshdup VR128:$src)),
5743 (MOVSHDUPrr VR128:$src)>;
5744 def : Pat<(X86Movshdup (memopv4f32 addr:$src)),
5745 (MOVSHDUPrm addr:$src)>;
5747 // Shuffle with MOVSLDUP
5748 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5749 (MOVSLDUPrr VR128:$src)>;
5750 def : Pat<(X86Movsldup (bc_v4i32 (memopv2i64 addr:$src))),
5751 (MOVSLDUPrm addr:$src)>;
5753 def : Pat<(v4f32 (X86Movsldup VR128:$src)),
5754 (MOVSLDUPrr VR128:$src)>;
5755 def : Pat<(X86Movsldup (memopv4f32 addr:$src)),
5756 (MOVSLDUPrm addr:$src)>;
5758 // Shuffle with PSHUFHW
5759 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
5760 (PSHUFHWri VR128:$src, imm:$imm)>;
5761 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5762 (PSHUFHWmi addr:$src, imm:$imm)>;
5764 // Shuffle with PSHUFLW
5765 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
5766 (PSHUFLWri VR128:$src, imm:$imm)>;
5767 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5768 (PSHUFLWmi addr:$src, imm:$imm)>;
5770 // Shuffle with PALIGN
5771 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5772 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5773 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5774 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5775 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5776 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5777 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5778 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5780 // Shuffle with MOVLPS
5781 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
5782 (MOVLPSrm VR128:$src1, addr:$src2)>;
5783 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
5784 (MOVLPSrm VR128:$src1, addr:$src2)>;
5785 def : Pat<(X86Movlps VR128:$src1,
5786 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5787 (MOVLPSrm VR128:$src1, addr:$src2)>;
5789 // Shuffle with MOVLPD
5790 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5791 (MOVLPDrm VR128:$src1, addr:$src2)>;
5792 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5793 (MOVLPDrm VR128:$src1, addr:$src2)>;
5794 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
5795 (scalar_to_vector (loadf64 addr:$src2)))),
5796 (MOVLPDrm VR128:$src1, addr:$src2)>;
5798 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
5799 def : Pat<(store (f64 (vector_extract
5800 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5801 (MOVHPSmr addr:$dst, VR128:$src)>;
5802 def : Pat<(store (f64 (vector_extract
5803 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5804 (MOVHPDmr addr:$dst, VR128:$src)>;
5806 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
5807 (MOVLPSmr addr:$src1, VR128:$src2)>;
5808 def : Pat<(store (v4i32 (X86Movlps
5809 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
5810 (MOVLPSmr addr:$src1, VR128:$src2)>;
5812 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5813 (MOVLPDmr addr:$src1, VR128:$src2)>;
5814 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5815 (MOVLPDmr addr:$src1, VR128:$src2)>;