1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // SSE 1 & 2 - Move Instructions
120 //===----------------------------------------------------------------------===//
122 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
123 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
124 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
126 // Loading from memory automatically zeroing upper bits.
127 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
128 PatFrag mem_pat, string OpcodeStr> :
129 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
130 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
131 [(set RC:$dst, (mem_pat addr:$src))]>;
133 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
134 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
135 // is used instead. Register-to-register movss/movsd is not modeled as an
136 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
137 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
138 let isAsmParserOnly = 0 in {
139 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
140 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
141 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
142 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
144 let canFoldAsLoad = 1, isReMaterializable = 1 in {
145 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
147 let AddedComplexity = 20 in
148 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
152 let Constraints = "$src1 = $dst" in {
153 def MOVSSrr : sse12_move_rr<FR32, v4f32,
154 "movss\t{$src2, $dst|$dst, $src2}">, XS;
155 def MOVSDrr : sse12_move_rr<FR64, v2f64,
156 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
159 let canFoldAsLoad = 1, isReMaterializable = 1 in {
160 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
162 let AddedComplexity = 20 in
163 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
166 let AddedComplexity = 15 in {
167 // Extract the low 32-bit value from one vector and insert it into another.
168 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
169 (MOVSSrr (v4f32 VR128:$src1),
170 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
171 // Extract the low 64-bit value from one vector and insert it into another.
172 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
173 (MOVSDrr (v2f64 VR128:$src1),
174 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
177 // Implicitly promote a 32-bit scalar to a vector.
178 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
179 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
180 // Implicitly promote a 64-bit scalar to a vector.
181 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
182 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
183 // Implicitly promote a 32-bit scalar to a vector.
184 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
185 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
186 // Implicitly promote a 64-bit scalar to a vector.
187 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
188 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
190 let AddedComplexity = 20 in {
191 // MOVSSrm zeros the high parts of the register; represent this
192 // with SUBREG_TO_REG.
193 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
194 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
195 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
196 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
197 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
198 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
199 // MOVSDrm zeros the high parts of the register; represent this
200 // with SUBREG_TO_REG.
201 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
202 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
203 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
204 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
205 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
206 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
207 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
208 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
209 def : Pat<(v2f64 (X86vzload addr:$src)),
210 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
213 // Store scalar value to memory.
214 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
215 "movss\t{$src, $dst|$dst, $src}",
216 [(store FR32:$src, addr:$dst)]>;
217 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
218 "movsd\t{$src, $dst|$dst, $src}",
219 [(store FR64:$src, addr:$dst)]>;
221 let isAsmParserOnly = 0 in {
222 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
223 "movss\t{$src, $dst|$dst, $src}",
224 [(store FR32:$src, addr:$dst)]>, XS, VEX;
225 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
226 "movsd\t{$src, $dst|$dst, $src}",
227 [(store FR64:$src, addr:$dst)]>, XD, VEX;
230 // Extract and store.
231 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
234 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
235 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
238 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
240 // Move Aligned/Unaligned floating point values
241 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
242 X86MemOperand x86memop, PatFrag ld_frag,
243 string asm, Domain d,
244 bit IsReMaterializable = 1> {
245 let neverHasSideEffects = 1 in
246 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
247 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
248 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
249 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
250 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
251 [(set RC:$dst, (ld_frag addr:$src))], d>;
254 let isAsmParserOnly = 0 in {
255 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
256 "movaps", SSEPackedSingle>, VEX;
257 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
258 "movapd", SSEPackedDouble>, OpSize, VEX;
259 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
260 "movups", SSEPackedSingle>, VEX;
261 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
262 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
264 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
265 "movaps", SSEPackedSingle>, VEX;
266 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
267 "movapd", SSEPackedDouble>, OpSize, VEX;
268 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
269 "movups", SSEPackedSingle>, VEX;
270 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
271 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
273 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
274 "movaps", SSEPackedSingle>, TB;
275 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
276 "movapd", SSEPackedDouble>, TB, OpSize;
277 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
278 "movups", SSEPackedSingle>, TB;
279 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
280 "movupd", SSEPackedDouble, 0>, TB, OpSize;
282 let isAsmParserOnly = 0 in {
283 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
284 "movaps\t{$src, $dst|$dst, $src}",
285 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
286 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
287 "movapd\t{$src, $dst|$dst, $src}",
288 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
289 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
290 "movups\t{$src, $dst|$dst, $src}",
291 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
292 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
293 "movupd\t{$src, $dst|$dst, $src}",
294 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
295 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
296 "movaps\t{$src, $dst|$dst, $src}",
297 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
298 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
299 "movapd\t{$src, $dst|$dst, $src}",
300 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
301 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
302 "movups\t{$src, $dst|$dst, $src}",
303 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
304 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
305 "movupd\t{$src, $dst|$dst, $src}",
306 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
309 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
310 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
311 (VMOVUPSYmr addr:$dst, VR256:$src)>;
313 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
314 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
315 (VMOVUPDYmr addr:$dst, VR256:$src)>;
317 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
318 "movaps\t{$src, $dst|$dst, $src}",
319 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
320 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
321 "movapd\t{$src, $dst|$dst, $src}",
322 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
323 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
324 "movups\t{$src, $dst|$dst, $src}",
325 [(store (v4f32 VR128:$src), addr:$dst)]>;
326 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
327 "movupd\t{$src, $dst|$dst, $src}",
328 [(store (v2f64 VR128:$src), addr:$dst)]>;
330 // Intrinsic forms of MOVUPS/D load and store
331 let isAsmParserOnly = 0 in {
332 let canFoldAsLoad = 1, isReMaterializable = 1 in
333 def VMOVUPSrm_Int : VPSI<0x10, MRMSrcMem, (outs VR128:$dst),
335 "movups\t{$src, $dst|$dst, $src}",
336 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>, VEX;
337 def VMOVUPDrm_Int : VPDI<0x10, MRMSrcMem, (outs VR128:$dst),
339 "movupd\t{$src, $dst|$dst, $src}",
340 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>, VEX;
341 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
342 (ins f128mem:$dst, VR128:$src),
343 "movups\t{$src, $dst|$dst, $src}",
344 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
345 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
346 (ins f128mem:$dst, VR128:$src),
347 "movupd\t{$src, $dst|$dst, $src}",
348 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
350 let canFoldAsLoad = 1, isReMaterializable = 1 in
351 def MOVUPSrm_Int : PSI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
352 "movups\t{$src, $dst|$dst, $src}",
353 [(set VR128:$dst, (int_x86_sse_loadu_ps addr:$src))]>;
354 def MOVUPDrm_Int : PDI<0x10, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
355 "movupd\t{$src, $dst|$dst, $src}",
356 [(set VR128:$dst, (int_x86_sse2_loadu_pd addr:$src))]>;
358 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
359 "movups\t{$src, $dst|$dst, $src}",
360 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
361 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
362 "movupd\t{$src, $dst|$dst, $src}",
363 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
365 // Move Low/High packed floating point values
366 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
367 PatFrag mov_frag, string base_opc,
369 def PSrm : PI<opc, MRMSrcMem,
370 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
371 !strconcat(base_opc, "s", asm_opr),
374 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
375 SSEPackedSingle>, TB;
377 def PDrm : PI<opc, MRMSrcMem,
378 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
379 !strconcat(base_opc, "d", asm_opr),
380 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
381 (scalar_to_vector (loadf64 addr:$src2)))))],
382 SSEPackedDouble>, TB, OpSize;
385 let isAsmParserOnly = 0, AddedComplexity = 20 in {
386 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
387 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
388 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
389 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
391 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
392 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
393 "\t{$src2, $dst|$dst, $src2}">;
394 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
395 "\t{$src2, $dst|$dst, $src2}">;
398 let isAsmParserOnly = 0 in {
399 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
400 "movlps\t{$src, $dst|$dst, $src}",
401 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
402 (iPTR 0))), addr:$dst)]>, VEX;
403 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
404 "movlpd\t{$src, $dst|$dst, $src}",
405 [(store (f64 (vector_extract (v2f64 VR128:$src),
406 (iPTR 0))), addr:$dst)]>, VEX;
408 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
409 "movlps\t{$src, $dst|$dst, $src}",
410 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
411 (iPTR 0))), addr:$dst)]>;
412 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
413 "movlpd\t{$src, $dst|$dst, $src}",
414 [(store (f64 (vector_extract (v2f64 VR128:$src),
415 (iPTR 0))), addr:$dst)]>;
417 // v2f64 extract element 1 is always custom lowered to unpack high to low
418 // and extract element 0 so the non-store version isn't too horrible.
419 let isAsmParserOnly = 0 in {
420 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
421 "movhps\t{$src, $dst|$dst, $src}",
422 [(store (f64 (vector_extract
423 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
424 (undef)), (iPTR 0))), addr:$dst)]>,
426 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
427 "movhpd\t{$src, $dst|$dst, $src}",
428 [(store (f64 (vector_extract
429 (v2f64 (unpckh VR128:$src, (undef))),
430 (iPTR 0))), addr:$dst)]>,
433 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
434 "movhps\t{$src, $dst|$dst, $src}",
435 [(store (f64 (vector_extract
436 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
437 (undef)), (iPTR 0))), addr:$dst)]>;
438 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
439 "movhpd\t{$src, $dst|$dst, $src}",
440 [(store (f64 (vector_extract
441 (v2f64 (unpckh VR128:$src, (undef))),
442 (iPTR 0))), addr:$dst)]>;
444 let isAsmParserOnly = 0, AddedComplexity = 20 in {
445 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
446 (ins VR128:$src1, VR128:$src2),
447 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
449 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
451 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
452 (ins VR128:$src1, VR128:$src2),
453 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
455 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
458 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
459 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
460 (ins VR128:$src1, VR128:$src2),
461 "movlhps\t{$src2, $dst|$dst, $src2}",
463 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
464 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
465 (ins VR128:$src1, VR128:$src2),
466 "movhlps\t{$src2, $dst|$dst, $src2}",
468 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
471 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
472 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
473 let AddedComplexity = 20 in {
474 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
475 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
476 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
477 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
480 //===----------------------------------------------------------------------===//
481 // SSE 1 & 2 - Conversion Instructions
482 //===----------------------------------------------------------------------===//
484 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
485 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
487 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
488 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
489 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
490 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
493 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
494 X86MemOperand x86memop, string asm> {
495 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
497 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
501 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
502 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
503 string asm, Domain d> {
504 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
505 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
506 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
507 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
510 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
511 X86MemOperand x86memop, string asm> {
512 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
513 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
514 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
515 (ins DstRC:$src1, x86memop:$src),
516 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
519 let isAsmParserOnly = 0 in {
520 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
521 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
522 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
523 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
525 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
526 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
527 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
528 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
531 // The assembler can recognize rr 64-bit instructions by seeing a rxx
532 // register, but the same isn't true when only using memory operands,
533 // provide other assembly "l" and "q" forms to address this explicitly
534 // where appropriate to do so.
535 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
537 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
539 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
541 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
543 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
547 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
548 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
549 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
550 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
551 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
552 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
553 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
554 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
555 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
556 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
557 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
558 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
559 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
560 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
561 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
562 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
564 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
565 // and/or XMM operand(s).
567 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
568 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
570 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
571 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
572 [(set DstRC:$dst, (Int SrcRC:$src))]>;
573 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
574 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
575 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
578 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
579 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
580 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
581 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
583 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
584 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
585 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
586 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
587 (ins DstRC:$src1, x86memop:$src2),
589 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
590 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
591 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
594 let isAsmParserOnly = 0 in {
595 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
596 f32mem, load, "cvtss2si">, XS, VEX;
597 defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
598 int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
600 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
601 f128mem, load, "cvtsd2si">, XD, VEX;
602 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
603 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
606 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
607 // Get rid of this hack or rename the intrinsics, there are several
608 // intructions that only match with the intrinsic form, why create duplicates
609 // to let them be recognized by the assembler?
610 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
611 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
612 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
613 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
615 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
616 f32mem, load, "cvtss2si">, XS;
617 defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
618 f32mem, load, "cvtss2si{q}">, XS, REX_W;
619 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
620 f128mem, load, "cvtsd2si{l}">, XD;
621 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
622 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
625 let isAsmParserOnly = 0 in {
626 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
627 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
628 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
629 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
631 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
632 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
633 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
634 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
638 let Constraints = "$src1 = $dst" in {
639 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
640 int_x86_sse_cvtsi2ss, i32mem, loadi32,
642 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
643 int_x86_sse_cvtsi642ss, i64mem, loadi64,
644 "cvtsi2ss{q}">, XS, REX_W;
645 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
646 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
648 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
649 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
650 "cvtsi2sd">, XD, REX_W;
655 // Aliases for intrinsics
656 let isAsmParserOnly = 0 in {
657 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
658 f32mem, load, "cvttss2si">, XS, VEX;
659 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
660 int_x86_sse_cvttss2si64, f32mem, load,
661 "cvttss2si">, XS, VEX, VEX_W;
662 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
663 f128mem, load, "cvttsd2si">, XD, VEX;
664 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
665 int_x86_sse2_cvttsd2si64, f128mem, load,
666 "cvttsd2si">, XD, VEX, VEX_W;
668 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
669 f32mem, load, "cvttss2si">, XS;
670 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
671 int_x86_sse_cvttss2si64, f32mem, load,
672 "cvttss2si{q}">, XS, REX_W;
673 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
674 f128mem, load, "cvttsd2si">, XD;
675 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
676 int_x86_sse2_cvttsd2si64, f128mem, load,
677 "cvttsd2si{q}">, XD, REX_W;
679 let isAsmParserOnly = 0, Pattern = []<dag> in {
680 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
681 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
682 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
683 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
685 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
686 "cvtdq2ps\t{$src, $dst|$dst, $src}",
687 SSEPackedSingle>, TB, VEX;
688 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
689 "cvtdq2ps\t{$src, $dst|$dst, $src}",
690 SSEPackedSingle>, TB, VEX;
692 let Pattern = []<dag> in {
693 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
694 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
695 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
696 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
697 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
698 "cvtdq2ps\t{$src, $dst|$dst, $src}",
699 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
704 // Convert scalar double to scalar single
705 let isAsmParserOnly = 0 in {
706 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
707 (ins FR64:$src1, FR64:$src2),
708 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
710 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
711 (ins FR64:$src1, f64mem:$src2),
712 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
713 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
715 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
718 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
719 "cvtsd2ss\t{$src, $dst|$dst, $src}",
720 [(set FR32:$dst, (fround FR64:$src))]>;
721 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
722 "cvtsd2ss\t{$src, $dst|$dst, $src}",
723 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
724 Requires<[HasSSE2, OptForSize]>;
726 let isAsmParserOnly = 0 in
727 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
728 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
730 let Constraints = "$src1 = $dst" in
731 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
732 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
734 // Convert scalar single to scalar double
735 let isAsmParserOnly = 0 in { // SSE2 instructions with XS prefix
736 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
737 (ins FR32:$src1, FR32:$src2),
738 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
739 []>, XS, Requires<[HasAVX]>, VEX_4V;
740 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
741 (ins FR32:$src1, f32mem:$src2),
742 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
743 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
745 def : Pat<(f64 (fextend FR32:$src)), (VCVTSS2SDrr FR32:$src, FR32:$src)>,
748 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
749 "cvtss2sd\t{$src, $dst|$dst, $src}",
750 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
752 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
753 "cvtss2sd\t{$src, $dst|$dst, $src}",
754 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
755 Requires<[HasSSE2, OptForSize]>;
757 let isAsmParserOnly = 0 in {
758 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
759 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
760 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
761 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
762 VR128:$src2))]>, XS, VEX_4V,
764 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
765 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
766 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
767 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
768 (load addr:$src2)))]>, XS, VEX_4V,
771 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
772 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
773 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
774 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
775 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
778 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
779 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
780 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
781 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
782 (load addr:$src2)))]>, XS,
786 def : Pat<(extloadf32 addr:$src),
787 (CVTSS2SDrr (MOVSSrm addr:$src))>,
788 Requires<[HasSSE2, OptForSpeed]>;
790 // Convert doubleword to packed single/double fp
791 let isAsmParserOnly = 0 in { // SSE2 instructions without OpSize prefix
792 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
793 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
794 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
795 TB, VEX, Requires<[HasAVX]>;
796 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
797 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
798 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
799 (bitconvert (memopv2i64 addr:$src))))]>,
800 TB, VEX, Requires<[HasAVX]>;
802 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
803 "cvtdq2ps\t{$src, $dst|$dst, $src}",
804 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
805 TB, Requires<[HasSSE2]>;
806 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
807 "cvtdq2ps\t{$src, $dst|$dst, $src}",
808 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
809 (bitconvert (memopv2i64 addr:$src))))]>,
810 TB, Requires<[HasSSE2]>;
812 // FIXME: why the non-intrinsic version is described as SSE3?
813 let isAsmParserOnly = 0 in { // SSE2 instructions with XS prefix
814 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
815 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
816 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
817 XS, VEX, Requires<[HasAVX]>;
818 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
819 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
820 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
821 (bitconvert (memopv2i64 addr:$src))))]>,
822 XS, VEX, Requires<[HasAVX]>;
824 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
825 "cvtdq2pd\t{$src, $dst|$dst, $src}",
826 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
827 XS, Requires<[HasSSE2]>;
828 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
829 "cvtdq2pd\t{$src, $dst|$dst, $src}",
830 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
831 (bitconvert (memopv2i64 addr:$src))))]>,
832 XS, Requires<[HasSSE2]>;
835 // Convert packed single/double fp to doubleword
836 let isAsmParserOnly = 0 in {
837 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
838 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
839 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
840 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
841 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
842 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
843 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
844 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
846 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
847 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
848 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
849 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
851 let isAsmParserOnly = 0 in {
852 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
853 "cvtps2dq\t{$src, $dst|$dst, $src}",
854 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
856 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
858 "cvtps2dq\t{$src, $dst|$dst, $src}",
859 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
860 (memop addr:$src)))]>, VEX;
862 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
863 "cvtps2dq\t{$src, $dst|$dst, $src}",
864 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
865 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
866 "cvtps2dq\t{$src, $dst|$dst, $src}",
867 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
868 (memop addr:$src)))]>;
870 let isAsmParserOnly = 0 in { // SSE2 packed instructions with XD prefix
871 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
872 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
873 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
874 XD, VEX, Requires<[HasAVX]>;
875 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
876 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
877 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
878 (memop addr:$src)))]>,
879 XD, VEX, Requires<[HasAVX]>;
881 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
882 "cvtpd2dq\t{$src, $dst|$dst, $src}",
883 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
884 XD, Requires<[HasSSE2]>;
885 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
886 "cvtpd2dq\t{$src, $dst|$dst, $src}",
887 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
888 (memop addr:$src)))]>,
889 XD, Requires<[HasSSE2]>;
892 // Convert with truncation packed single/double fp to doubleword
893 let isAsmParserOnly = 0 in { // SSE2 packed instructions with XS prefix
894 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
895 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
896 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
897 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
898 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
899 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
900 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
901 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
903 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
904 "cvttps2dq\t{$src, $dst|$dst, $src}",
906 (int_x86_sse2_cvttps2dq VR128:$src))]>;
907 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
908 "cvttps2dq\t{$src, $dst|$dst, $src}",
910 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
913 let isAsmParserOnly = 0 in {
914 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
915 "vcvttps2dq\t{$src, $dst|$dst, $src}",
917 (int_x86_sse2_cvttps2dq VR128:$src))]>,
918 XS, VEX, Requires<[HasAVX]>;
919 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
920 "vcvttps2dq\t{$src, $dst|$dst, $src}",
921 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
922 (memop addr:$src)))]>,
923 XS, VEX, Requires<[HasAVX]>;
926 let isAsmParserOnly = 0 in {
927 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
929 "cvttpd2dq\t{$src, $dst|$dst, $src}",
930 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
932 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
934 "cvttpd2dq\t{$src, $dst|$dst, $src}",
935 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
936 (memop addr:$src)))]>, VEX;
938 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
939 "cvttpd2dq\t{$src, $dst|$dst, $src}",
940 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
941 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
942 "cvttpd2dq\t{$src, $dst|$dst, $src}",
943 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
944 (memop addr:$src)))]>;
946 let isAsmParserOnly = 0 in {
947 // The assembler can recognize rr 256-bit instructions by seeing a ymm
948 // register, but the same isn't true when using memory operands instead.
949 // Provide other assembly rr and rm forms to address this explicitly.
950 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
951 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
952 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
953 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
956 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
957 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
958 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
959 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
962 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
963 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
964 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
965 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
968 // Convert packed single to packed double
969 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
970 // SSE2 instructions without OpSize prefix
971 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
972 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
973 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
974 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
975 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
976 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
977 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
978 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
980 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
981 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
982 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
983 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
985 let isAsmParserOnly = 0 in {
986 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
987 "vcvtps2pd\t{$src, $dst|$dst, $src}",
988 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
989 VEX, Requires<[HasAVX]>;
990 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
991 "vcvtps2pd\t{$src, $dst|$dst, $src}",
992 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
993 (load addr:$src)))]>,
994 VEX, Requires<[HasAVX]>;
996 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
997 "cvtps2pd\t{$src, $dst|$dst, $src}",
998 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
999 TB, Requires<[HasSSE2]>;
1000 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1001 "cvtps2pd\t{$src, $dst|$dst, $src}",
1002 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1003 (load addr:$src)))]>,
1004 TB, Requires<[HasSSE2]>;
1006 // Convert packed double to packed single
1007 let isAsmParserOnly = 0 in {
1008 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1009 // register, but the same isn't true when using memory operands instead.
1010 // Provide other assembly rr and rm forms to address this explicitly.
1011 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1012 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1013 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1014 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1017 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1018 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1019 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1020 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1023 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1024 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1025 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1026 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1028 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1029 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1030 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1031 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1034 let isAsmParserOnly = 0 in {
1035 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1036 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1037 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1038 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1040 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1041 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1042 (memop addr:$src)))]>;
1044 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1045 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1046 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1047 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1048 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1049 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1050 (memop addr:$src)))]>;
1052 // AVX 256-bit register conversion intrinsics
1053 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1054 // whenever possible to avoid declaring two versions of each one.
1055 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1056 (VCVTDQ2PSYrr VR256:$src)>;
1057 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1058 (VCVTDQ2PSYrm addr:$src)>;
1060 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1061 (VCVTPD2PSYrr VR256:$src)>;
1062 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1063 (VCVTPD2PSYrm addr:$src)>;
1065 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1066 (VCVTPS2DQYrr VR256:$src)>;
1067 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1068 (VCVTPS2DQYrm addr:$src)>;
1070 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1071 (VCVTPS2PDYrr VR128:$src)>;
1072 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1073 (VCVTPS2PDYrm addr:$src)>;
1075 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1076 (VCVTTPD2DQYrr VR256:$src)>;
1077 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1078 (VCVTTPD2DQYrm addr:$src)>;
1080 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1081 (VCVTTPS2DQYrr VR256:$src)>;
1082 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1083 (VCVTTPS2DQYrm addr:$src)>;
1085 //===----------------------------------------------------------------------===//
1086 // SSE 1 & 2 - Compare Instructions
1087 //===----------------------------------------------------------------------===//
1089 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1090 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1091 string asm, string asm_alt> {
1092 let isAsmParserOnly = 1 in {
1093 def rr : SIi8<0xC2, MRMSrcReg,
1094 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1097 def rm : SIi8<0xC2, MRMSrcMem,
1098 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1102 // Accept explicit immediate argument form instead of comparison code.
1103 def rr_alt : SIi8<0xC2, MRMSrcReg,
1104 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1107 def rm_alt : SIi8<0xC2, MRMSrcMem,
1108 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1112 let neverHasSideEffects = 1, isAsmParserOnly = 0 in {
1113 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1114 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1115 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1117 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1118 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1119 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1123 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1124 defm CMPSS : sse12_cmp_scalar<FR32, f32mem,
1125 "cmp${cc}ss\t{$src, $dst|$dst, $src}",
1126 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}">, XS;
1127 defm CMPSD : sse12_cmp_scalar<FR64, f64mem,
1128 "cmp${cc}sd\t{$src, $dst|$dst, $src}",
1129 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}">, XD;
1132 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1133 Intrinsic Int, string asm> {
1134 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1135 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1136 [(set VR128:$dst, (Int VR128:$src1,
1137 VR128:$src, imm:$cc))]>;
1138 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1139 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1140 [(set VR128:$dst, (Int VR128:$src1,
1141 (load addr:$src), imm:$cc))]>;
1144 // Aliases to match intrinsics which expect XMM operand(s).
1145 let isAsmParserOnly = 0 in {
1146 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1147 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1149 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1150 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1153 let Constraints = "$src1 = $dst" in {
1154 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1155 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1156 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1157 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1161 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1162 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1163 ValueType vt, X86MemOperand x86memop,
1164 PatFrag ld_frag, string OpcodeStr, Domain d> {
1165 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1166 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1167 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1168 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1169 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1170 [(set EFLAGS, (OpNode (vt RC:$src1),
1171 (ld_frag addr:$src2)))], d>;
1174 let Defs = [EFLAGS] in {
1175 let isAsmParserOnly = 0 in {
1176 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1177 "ucomiss", SSEPackedSingle>, VEX;
1178 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1179 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1180 let Pattern = []<dag> in {
1181 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1182 "comiss", SSEPackedSingle>, VEX;
1183 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1184 "comisd", SSEPackedDouble>, OpSize, VEX;
1187 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1188 load, "ucomiss", SSEPackedSingle>, VEX;
1189 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1190 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1192 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1193 load, "comiss", SSEPackedSingle>, VEX;
1194 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1195 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1197 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1198 "ucomiss", SSEPackedSingle>, TB;
1199 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1200 "ucomisd", SSEPackedDouble>, TB, OpSize;
1202 let Pattern = []<dag> in {
1203 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1204 "comiss", SSEPackedSingle>, TB;
1205 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1206 "comisd", SSEPackedDouble>, TB, OpSize;
1209 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1210 load, "ucomiss", SSEPackedSingle>, TB;
1211 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1212 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1214 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1215 "comiss", SSEPackedSingle>, TB;
1216 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1217 "comisd", SSEPackedDouble>, TB, OpSize;
1218 } // Defs = [EFLAGS]
1220 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1221 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1222 Intrinsic Int, string asm, string asm_alt,
1224 let isAsmParserOnly = 1 in {
1225 def rri : PIi8<0xC2, MRMSrcReg,
1226 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1227 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1228 def rmi : PIi8<0xC2, MRMSrcMem,
1229 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1230 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1233 // Accept explicit immediate argument form instead of comparison code.
1234 def rri_alt : PIi8<0xC2, MRMSrcReg,
1235 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1237 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1238 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1242 let isAsmParserOnly = 0 in {
1243 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1244 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1245 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1246 SSEPackedSingle>, VEX_4V;
1247 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1248 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1249 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1250 SSEPackedDouble>, OpSize, VEX_4V;
1251 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1252 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1253 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1254 SSEPackedSingle>, VEX_4V;
1255 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1256 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1257 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1258 SSEPackedDouble>, OpSize, VEX_4V;
1260 let Constraints = "$src1 = $dst" in {
1261 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1262 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1263 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1264 SSEPackedSingle>, TB;
1265 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1266 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1267 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1268 SSEPackedDouble>, TB, OpSize;
1271 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1272 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1273 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1274 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1275 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1276 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1277 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1278 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1280 //===----------------------------------------------------------------------===//
1281 // SSE 1 & 2 - Shuffle Instructions
1282 //===----------------------------------------------------------------------===//
1284 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1285 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1286 ValueType vt, string asm, PatFrag mem_frag,
1287 Domain d, bit IsConvertibleToThreeAddress = 0> {
1288 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1289 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1290 [(set RC:$dst, (vt (shufp:$src3
1291 RC:$src1, (mem_frag addr:$src2))))], d>;
1292 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1293 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1294 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1296 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1299 let isAsmParserOnly = 0 in {
1300 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1301 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1302 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
1303 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1304 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1305 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
1306 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1307 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1308 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1309 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1310 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1311 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1314 let Constraints = "$src1 = $dst" in {
1315 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1316 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1317 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1319 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1320 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1321 memopv2f64, SSEPackedDouble>, TB, OpSize;
1324 //===----------------------------------------------------------------------===//
1325 // SSE 1 & 2 - Unpack Instructions
1326 //===----------------------------------------------------------------------===//
1328 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1329 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1330 PatFrag mem_frag, RegisterClass RC,
1331 X86MemOperand x86memop, string asm,
1333 def rr : PI<opc, MRMSrcReg,
1334 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1336 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1337 def rm : PI<opc, MRMSrcMem,
1338 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1340 (vt (OpNode RC:$src1,
1341 (mem_frag addr:$src2))))], d>;
1344 let AddedComplexity = 10 in {
1345 let isAsmParserOnly = 0 in {
1346 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1347 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1348 SSEPackedSingle>, VEX_4V;
1349 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1350 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1351 SSEPackedDouble>, OpSize, VEX_4V;
1352 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1353 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1354 SSEPackedSingle>, VEX_4V;
1355 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1356 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1357 SSEPackedDouble>, OpSize, VEX_4V;
1359 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1360 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1361 SSEPackedSingle>, VEX_4V;
1362 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1363 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1364 SSEPackedDouble>, OpSize, VEX_4V;
1365 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1366 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1367 SSEPackedSingle>, VEX_4V;
1368 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1369 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1370 SSEPackedDouble>, OpSize, VEX_4V;
1373 let Constraints = "$src1 = $dst" in {
1374 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1375 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1376 SSEPackedSingle>, TB;
1377 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1378 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1379 SSEPackedDouble>, TB, OpSize;
1380 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1381 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1382 SSEPackedSingle>, TB;
1383 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1384 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1385 SSEPackedDouble>, TB, OpSize;
1386 } // Constraints = "$src1 = $dst"
1387 } // AddedComplexity
1389 //===----------------------------------------------------------------------===//
1390 // SSE 1 & 2 - Extract Floating-Point Sign mask
1391 //===----------------------------------------------------------------------===//
1393 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1394 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1396 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1397 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1398 [(set GR32:$dst, (Int RC:$src))], d>;
1399 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1400 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1404 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1405 SSEPackedSingle>, TB;
1406 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1407 SSEPackedDouble>, TB, OpSize;
1409 let isAsmParserOnly = 0 in {
1410 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1411 "movmskps", SSEPackedSingle>, VEX;
1412 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1413 "movmskpd", SSEPackedDouble>, OpSize,
1415 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1416 "movmskps", SSEPackedSingle>, VEX;
1417 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1418 "movmskpd", SSEPackedDouble>, OpSize,
1422 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1423 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1424 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1425 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1427 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1428 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1429 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1430 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1434 //===----------------------------------------------------------------------===//
1435 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1436 //===----------------------------------------------------------------------===//
1438 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1439 // names that start with 'Fs'.
1441 // Alias instructions that map fld0 to pxor for sse.
1442 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1443 canFoldAsLoad = 1 in {
1444 // FIXME: Set encoding to pseudo!
1445 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1446 [(set FR32:$dst, fp32imm0)]>,
1447 Requires<[HasSSE1]>, TB, OpSize;
1448 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1449 [(set FR64:$dst, fpimm0)]>,
1450 Requires<[HasSSE2]>, TB, OpSize;
1451 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1452 [(set FR32:$dst, fp32imm0)]>,
1453 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1454 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1455 [(set FR64:$dst, fpimm0)]>,
1456 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1459 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1460 // bits are disregarded.
1461 let neverHasSideEffects = 1 in {
1462 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1463 "movaps\t{$src, $dst|$dst, $src}", []>;
1464 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1465 "movapd\t{$src, $dst|$dst, $src}", []>;
1468 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1469 // bits are disregarded.
1470 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1471 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1472 "movaps\t{$src, $dst|$dst, $src}",
1473 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1474 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1475 "movapd\t{$src, $dst|$dst, $src}",
1476 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1479 //===----------------------------------------------------------------------===//
1480 // SSE 1 & 2 - Logical Instructions
1481 //===----------------------------------------------------------------------===//
1483 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1485 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1487 let isAsmParserOnly = 0 in {
1488 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1489 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1491 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1492 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1495 let Constraints = "$src1 = $dst" in {
1496 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1497 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1499 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1500 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1504 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1505 let mayLoad = 0 in {
1506 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1507 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1508 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1511 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1512 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1514 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1516 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1517 SDNode OpNode, int HasPat = 0,
1518 list<list<dag>> Pattern = []> {
1519 let isAsmParserOnly = 0, Pattern = []<dag> in {
1520 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1521 !strconcat(OpcodeStr, "ps"), f128mem,
1522 !if(HasPat, Pattern[0], // rr
1523 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1525 !if(HasPat, Pattern[2], // rm
1526 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1527 (memopv2i64 addr:$src2)))]), 0>,
1530 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1531 !strconcat(OpcodeStr, "pd"), f128mem,
1532 !if(HasPat, Pattern[1], // rr
1533 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1536 !if(HasPat, Pattern[3], // rm
1537 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1538 (memopv2i64 addr:$src2)))]), 0>,
1541 let Constraints = "$src1 = $dst" in {
1542 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1543 !strconcat(OpcodeStr, "ps"), f128mem,
1544 !if(HasPat, Pattern[0], // rr
1545 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1,
1547 !if(HasPat, Pattern[2], // rm
1548 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1549 (memopv2i64 addr:$src2)))])>, TB;
1551 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1552 !strconcat(OpcodeStr, "pd"), f128mem,
1553 !if(HasPat, Pattern[1], // rr
1554 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1557 !if(HasPat, Pattern[3], // rm
1558 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1559 (memopv2i64 addr:$src2)))])>,
1564 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1566 let isAsmParserOnly = 0 in {
1567 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr> {
1568 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1569 !strconcat(OpcodeStr, "ps"), f256mem, [], [], 0>, VEX_4V;
1571 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1572 !strconcat(OpcodeStr, "pd"), f256mem, [], [], 0>, OpSize, VEX_4V;
1576 // AVX 256-bit packed logical ops forms
1577 defm VAND : sse12_fp_packed_logical_y<0x54, "and">;
1578 defm VOR : sse12_fp_packed_logical_y<0x56, "or">;
1579 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor">;
1580 let isCommutable = 0 in
1581 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn">;
1583 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1584 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1585 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1586 let isCommutable = 0 in
1587 defm ANDN : sse12_fp_packed_logical<0x55, "andn", undef /* dummy */, 1, [
1589 [(set VR128:$dst, (X86pandn VR128:$src1, VR128:$src2))],
1593 [(set VR128:$dst, (X86pandn VR128:$src1, (memopv2i64 addr:$src2)))],
1597 //===----------------------------------------------------------------------===//
1598 // SSE 1 & 2 - Arithmetic Instructions
1599 //===----------------------------------------------------------------------===//
1601 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1604 /// In addition, we also have a special variant of the scalar form here to
1605 /// represent the associated intrinsic operation. This form is unlike the
1606 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1607 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1609 /// These three forms can each be reg+reg or reg+mem.
1612 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1614 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1616 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1617 OpNode, FR32, f32mem, Is2Addr>, XS;
1618 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1619 OpNode, FR64, f64mem, Is2Addr>, XD;
1622 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1624 let mayLoad = 0 in {
1625 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1626 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1627 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1628 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1632 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1634 let mayLoad = 0 in {
1635 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1636 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1637 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1638 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1642 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1644 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1645 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1646 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1647 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1650 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1652 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1653 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1654 SSEPackedSingle, Is2Addr>, TB;
1656 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1657 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1658 SSEPackedDouble, Is2Addr>, TB, OpSize;
1661 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1662 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1663 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1664 SSEPackedSingle, 0>, TB;
1666 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1667 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1668 SSEPackedDouble, 0>, TB, OpSize;
1671 // Binary Arithmetic instructions
1672 let isAsmParserOnly = 0 in {
1673 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1674 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1675 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1676 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1677 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1678 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1679 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1680 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1682 let isCommutable = 0 in {
1683 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1684 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1685 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1686 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1687 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1688 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1689 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1690 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1691 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1692 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1693 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1694 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1695 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1696 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1697 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1698 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1699 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1700 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1701 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1702 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1706 let Constraints = "$src1 = $dst" in {
1707 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1708 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1709 basic_sse12_fp_binop_s_int<0x58, "add">;
1710 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1711 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1712 basic_sse12_fp_binop_s_int<0x59, "mul">;
1714 let isCommutable = 0 in {
1715 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1716 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1717 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1718 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1719 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1720 basic_sse12_fp_binop_s_int<0x5E, "div">;
1721 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1722 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1723 basic_sse12_fp_binop_s_int<0x5F, "max">,
1724 basic_sse12_fp_binop_p_int<0x5F, "max">;
1725 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1726 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1727 basic_sse12_fp_binop_s_int<0x5D, "min">,
1728 basic_sse12_fp_binop_p_int<0x5D, "min">;
1733 /// In addition, we also have a special variant of the scalar form here to
1734 /// represent the associated intrinsic operation. This form is unlike the
1735 /// plain scalar form, in that it takes an entire vector (instead of a
1736 /// scalar) and leaves the top elements undefined.
1738 /// And, we have a special variant form for a full-vector intrinsic form.
1740 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1741 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1742 SDNode OpNode, Intrinsic F32Int> {
1743 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1744 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1745 [(set FR32:$dst, (OpNode FR32:$src))]>;
1746 // For scalar unary operations, fold a load into the operation
1747 // only in OptForSize mode. It eliminates an instruction, but it also
1748 // eliminates a whole-register clobber (the load), so it introduces a
1749 // partial register update condition.
1750 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1751 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1752 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1753 Requires<[HasSSE1, OptForSize]>;
1754 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1755 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1756 [(set VR128:$dst, (F32Int VR128:$src))]>;
1757 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1758 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1759 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1762 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1763 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1764 SDNode OpNode, Intrinsic F32Int> {
1765 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1766 !strconcat(OpcodeStr,
1767 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1768 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1769 !strconcat(OpcodeStr,
1770 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1771 []>, XS, Requires<[HasAVX, OptForSize]>;
1772 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1773 !strconcat(OpcodeStr,
1774 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1775 [(set VR128:$dst, (F32Int VR128:$src))]>;
1776 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1777 !strconcat(OpcodeStr,
1778 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1779 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1782 /// sse1_fp_unop_p - SSE1 unops in packed form.
1783 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1784 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1785 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1786 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1787 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1788 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1789 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1792 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1793 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1794 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1795 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1796 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1797 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1798 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1799 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1802 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1803 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1804 Intrinsic V4F32Int> {
1805 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1806 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1807 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1808 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1809 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1810 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1813 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1814 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1815 Intrinsic V4F32Int> {
1816 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1817 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1818 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1819 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1820 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1821 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1824 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1825 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1826 SDNode OpNode, Intrinsic F64Int> {
1827 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1828 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1829 [(set FR64:$dst, (OpNode FR64:$src))]>;
1830 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1831 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1832 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1833 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1834 Requires<[HasSSE2, OptForSize]>;
1835 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1836 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1837 [(set VR128:$dst, (F64Int VR128:$src))]>;
1838 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1839 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1840 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1843 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1844 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1845 SDNode OpNode, Intrinsic F64Int> {
1846 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1847 !strconcat(OpcodeStr,
1848 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1849 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1850 (ins FR64:$src1, f64mem:$src2),
1851 !strconcat(OpcodeStr,
1852 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1853 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1854 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1855 [(set VR128:$dst, (F64Int VR128:$src))]>;
1856 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1857 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1858 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1861 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1862 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1864 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1865 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1866 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1867 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1868 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1869 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1872 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1873 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1874 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1875 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1876 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1877 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1878 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1879 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1882 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1883 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1884 Intrinsic V2F64Int> {
1885 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1886 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1887 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1888 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1889 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1890 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1893 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1894 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1895 Intrinsic V2F64Int> {
1896 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1897 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1898 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1899 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1900 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1901 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1904 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
1906 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
1907 sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1910 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1911 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1912 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1913 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1914 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1915 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1916 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1917 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1920 // Reciprocal approximations. Note that these typically require refinement
1921 // in order to obtain suitable precision.
1922 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
1923 int_x86_sse_rsqrt_ss>, VEX_4V;
1924 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1925 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
1926 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
1927 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
1929 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
1931 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1932 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
1933 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
1934 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
1938 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1939 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
1940 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
1941 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1942 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
1943 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
1945 // Reciprocal approximations. Note that these typically require refinement
1946 // in order to obtain suitable precision.
1947 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
1948 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
1949 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
1950 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
1951 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
1952 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
1954 // There is no f64 version of the reciprocal approximation instructions.
1956 //===----------------------------------------------------------------------===//
1957 // SSE 1 & 2 - Non-temporal stores
1958 //===----------------------------------------------------------------------===//
1960 let isAsmParserOnly = 0 in {
1961 def VMOVNTPSmr_Int : VPSI<0x2B, MRMDestMem, (outs),
1962 (ins i128mem:$dst, VR128:$src),
1963 "movntps\t{$src, $dst|$dst, $src}",
1964 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>, VEX;
1965 def VMOVNTPDmr_Int : VPDI<0x2B, MRMDestMem, (outs),
1966 (ins i128mem:$dst, VR128:$src),
1967 "movntpd\t{$src, $dst|$dst, $src}",
1968 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>, VEX;
1970 let ExeDomain = SSEPackedInt in
1971 def VMOVNTDQmr_Int : VPDI<0xE7, MRMDestMem, (outs),
1972 (ins f128mem:$dst, VR128:$src),
1973 "movntdq\t{$src, $dst|$dst, $src}",
1974 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>, VEX;
1976 let AddedComplexity = 400 in { // Prefer non-temporal versions
1977 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
1978 (ins f128mem:$dst, VR128:$src),
1979 "movntps\t{$src, $dst|$dst, $src}",
1980 [(alignednontemporalstore (v4f32 VR128:$src),
1982 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
1983 (ins f128mem:$dst, VR128:$src),
1984 "movntpd\t{$src, $dst|$dst, $src}",
1985 [(alignednontemporalstore (v2f64 VR128:$src),
1987 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
1988 (ins f128mem:$dst, VR128:$src),
1989 "movntdq\t{$src, $dst|$dst, $src}",
1990 [(alignednontemporalstore (v2f64 VR128:$src),
1992 let ExeDomain = SSEPackedInt in
1993 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
1994 (ins f128mem:$dst, VR128:$src),
1995 "movntdq\t{$src, $dst|$dst, $src}",
1996 [(alignednontemporalstore (v4f32 VR128:$src),
1999 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2000 (ins f256mem:$dst, VR256:$src),
2001 "movntps\t{$src, $dst|$dst, $src}",
2002 [(alignednontemporalstore (v8f32 VR256:$src),
2004 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2005 (ins f256mem:$dst, VR256:$src),
2006 "movntpd\t{$src, $dst|$dst, $src}",
2007 [(alignednontemporalstore (v4f64 VR256:$src),
2009 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2010 (ins f256mem:$dst, VR256:$src),
2011 "movntdq\t{$src, $dst|$dst, $src}",
2012 [(alignednontemporalstore (v4f64 VR256:$src),
2014 let ExeDomain = SSEPackedInt in
2015 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2016 (ins f256mem:$dst, VR256:$src),
2017 "movntdq\t{$src, $dst|$dst, $src}",
2018 [(alignednontemporalstore (v8f32 VR256:$src),
2023 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2024 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2025 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2026 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2027 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2028 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2030 def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2031 "movntps\t{$src, $dst|$dst, $src}",
2032 [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>;
2033 def MOVNTPDmr_Int : PDI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2034 "movntpd\t{$src, $dst|$dst, $src}",
2035 [(int_x86_sse2_movnt_pd addr:$dst, VR128:$src)]>;
2037 let ExeDomain = SSEPackedInt in
2038 def MOVNTDQmr_Int : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2039 "movntdq\t{$src, $dst|$dst, $src}",
2040 [(int_x86_sse2_movnt_dq addr:$dst, VR128:$src)]>;
2042 let AddedComplexity = 400 in { // Prefer non-temporal versions
2043 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2044 "movntps\t{$src, $dst|$dst, $src}",
2045 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2046 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2047 "movntpd\t{$src, $dst|$dst, $src}",
2048 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2050 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2051 "movntdq\t{$src, $dst|$dst, $src}",
2052 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2054 let ExeDomain = SSEPackedInt in
2055 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2056 "movntdq\t{$src, $dst|$dst, $src}",
2057 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2059 // There is no AVX form for instructions below this point
2060 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2061 "movnti\t{$src, $dst|$dst, $src}",
2062 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2063 TB, Requires<[HasSSE2]>;
2065 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2066 "movnti\t{$src, $dst|$dst, $src}",
2067 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2068 TB, Requires<[HasSSE2]>;
2071 def MOVNTImr_Int : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2072 "movnti\t{$src, $dst|$dst, $src}",
2073 [(int_x86_sse2_movnt_i addr:$dst, GR32:$src)]>,
2074 TB, Requires<[HasSSE2]>;
2076 //===----------------------------------------------------------------------===//
2077 // SSE 1 & 2 - Misc Instructions (No AVX form)
2078 //===----------------------------------------------------------------------===//
2080 // Prefetch intrinsic.
2081 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2082 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3))]>;
2083 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2084 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2))]>;
2085 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2086 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1))]>;
2087 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2088 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0))]>;
2090 // Load, store, and memory fence
2091 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2092 TB, Requires<[HasSSE1]>;
2093 def : Pat<(X86SFence), (SFENCE)>;
2095 // Alias instructions that map zero vector to pxor / xorp* for sse.
2096 // We set canFoldAsLoad because this can be converted to a constant-pool
2097 // load of an all-zeros value if folding it would be beneficial.
2098 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2099 // JIT implementation, it does not expand the instructions below like
2100 // X86MCInstLower does.
2101 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2102 isCodeGenOnly = 1 in {
2103 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2104 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2105 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2106 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2107 let ExeDomain = SSEPackedInt in
2108 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2109 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2112 // The same as done above but for AVX. The 128-bit versions are the
2113 // same, but re-encoded. The 256-bit does not support PI version.
2114 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2115 // JIT implementatioan, it does not expand the instructions below like
2116 // X86MCInstLower does.
2117 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2118 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2119 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2120 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2121 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2122 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2123 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2124 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2125 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2126 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2127 let ExeDomain = SSEPackedInt in
2128 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2129 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2132 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2133 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2134 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2136 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2137 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2139 //===----------------------------------------------------------------------===//
2140 // SSE 1 & 2 - Load/Store XCSR register
2141 //===----------------------------------------------------------------------===//
2143 let isAsmParserOnly = 0 in {
2144 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2145 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2146 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2147 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2150 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2151 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2152 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2153 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2155 //===---------------------------------------------------------------------===//
2156 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2157 //===---------------------------------------------------------------------===//
2159 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2161 let isAsmParserOnly = 0 in {
2162 let neverHasSideEffects = 1 in {
2163 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2164 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2165 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2166 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2168 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2169 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2170 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2171 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2173 let canFoldAsLoad = 1, mayLoad = 1 in {
2174 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2175 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2176 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2177 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2178 let Predicates = [HasAVX] in {
2179 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2180 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2181 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2182 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2186 let mayStore = 1 in {
2187 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2188 (ins i128mem:$dst, VR128:$src),
2189 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2190 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2191 (ins i256mem:$dst, VR256:$src),
2192 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2193 let Predicates = [HasAVX] in {
2194 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2195 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2196 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2197 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2202 let neverHasSideEffects = 1 in
2203 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2204 "movdqa\t{$src, $dst|$dst, $src}", []>;
2206 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2207 "movdqu\t{$src, $dst|$dst, $src}",
2208 []>, XS, Requires<[HasSSE2]>;
2210 let canFoldAsLoad = 1, mayLoad = 1 in {
2211 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2212 "movdqa\t{$src, $dst|$dst, $src}",
2213 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2214 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2215 "movdqu\t{$src, $dst|$dst, $src}",
2216 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2217 XS, Requires<[HasSSE2]>;
2220 let mayStore = 1 in {
2221 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2222 "movdqa\t{$src, $dst|$dst, $src}",
2223 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2224 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2225 "movdqu\t{$src, $dst|$dst, $src}",
2226 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2227 XS, Requires<[HasSSE2]>;
2230 // Intrinsic forms of MOVDQU load and store
2231 let isAsmParserOnly = 0 in {
2232 let canFoldAsLoad = 1 in
2233 def VMOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2234 "vmovdqu\t{$src, $dst|$dst, $src}",
2235 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2236 XS, VEX, Requires<[HasAVX]>;
2237 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2238 "vmovdqu\t{$src, $dst|$dst, $src}",
2239 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2240 XS, VEX, Requires<[HasAVX]>;
2243 let canFoldAsLoad = 1 in
2244 def MOVDQUrm_Int : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2245 "movdqu\t{$src, $dst|$dst, $src}",
2246 [(set VR128:$dst, (int_x86_sse2_loadu_dq addr:$src))]>,
2247 XS, Requires<[HasSSE2]>;
2248 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2249 "movdqu\t{$src, $dst|$dst, $src}",
2250 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2251 XS, Requires<[HasSSE2]>;
2253 } // ExeDomain = SSEPackedInt
2255 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2256 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2257 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2259 //===---------------------------------------------------------------------===//
2260 // SSE2 - Packed Integer Arithmetic Instructions
2261 //===---------------------------------------------------------------------===//
2263 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2265 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2266 bit IsCommutable = 0, bit Is2Addr = 1> {
2267 let isCommutable = IsCommutable in
2268 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2269 (ins VR128:$src1, VR128:$src2),
2271 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2272 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2273 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2274 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2275 (ins VR128:$src1, i128mem:$src2),
2277 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2278 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2279 [(set VR128:$dst, (IntId VR128:$src1,
2280 (bitconvert (memopv2i64 addr:$src2))))]>;
2283 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2284 string OpcodeStr, Intrinsic IntId,
2285 Intrinsic IntId2, bit Is2Addr = 1> {
2286 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2287 (ins VR128:$src1, VR128:$src2),
2289 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2290 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2291 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2292 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2293 (ins VR128:$src1, i128mem:$src2),
2295 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2296 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2297 [(set VR128:$dst, (IntId VR128:$src1,
2298 (bitconvert (memopv2i64 addr:$src2))))]>;
2299 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2300 (ins VR128:$src1, i32i8imm:$src2),
2302 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2303 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2304 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2307 /// PDI_binop_rm - Simple SSE2 binary operator.
2308 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2309 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2310 let isCommutable = IsCommutable in
2311 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2312 (ins VR128:$src1, VR128:$src2),
2314 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2315 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2316 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2317 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2318 (ins VR128:$src1, i128mem:$src2),
2320 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2321 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2322 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2323 (bitconvert (memopv2i64 addr:$src2)))))]>;
2326 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2328 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2329 /// to collapse (bitconvert VT to VT) into its operand.
2331 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2332 bit IsCommutable = 0, bit Is2Addr = 1> {
2333 let isCommutable = IsCommutable in
2334 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2335 (ins VR128:$src1, VR128:$src2),
2337 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2338 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2339 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2340 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2341 (ins VR128:$src1, i128mem:$src2),
2343 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2344 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2345 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2348 } // ExeDomain = SSEPackedInt
2350 // 128-bit Integer Arithmetic
2352 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2353 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2354 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2355 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2356 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2357 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2358 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2359 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2360 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2361 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2364 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2366 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2368 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2370 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2372 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2374 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2376 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2378 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2380 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2382 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2384 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2386 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2388 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2390 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2392 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2394 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2396 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2398 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2400 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2404 let Constraints = "$src1 = $dst" in {
2405 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2406 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2407 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2408 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2409 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2410 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2411 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2412 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2413 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2416 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2417 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2418 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2419 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2420 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2421 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2422 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2423 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2424 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2425 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2426 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2427 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2428 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2429 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2430 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2431 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2432 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2433 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2434 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2436 } // Constraints = "$src1 = $dst"
2438 //===---------------------------------------------------------------------===//
2439 // SSE2 - Packed Integer Logical Instructions
2440 //===---------------------------------------------------------------------===//
2442 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2443 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2444 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2446 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2447 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2449 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2450 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2453 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2454 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2456 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2457 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2459 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2460 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2463 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2464 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2466 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2467 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2470 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2471 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2472 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2474 let ExeDomain = SSEPackedInt in {
2475 let neverHasSideEffects = 1 in {
2476 // 128-bit logical shifts.
2477 def VPSLLDQri : PDIi8<0x73, MRM7r,
2478 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2479 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2481 def VPSRLDQri : PDIi8<0x73, MRM3r,
2482 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2483 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2485 // PSRADQri doesn't exist in SSE[1-3].
2487 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2488 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2489 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2490 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2491 VR128:$src2)))]>, VEX_4V;
2493 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2494 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2495 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2496 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2497 (memopv2i64 addr:$src2))))]>,
2502 let Constraints = "$src1 = $dst" in {
2503 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2504 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2505 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2506 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2507 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2508 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2510 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2511 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2512 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2513 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2514 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2515 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2517 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2518 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2519 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2520 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2522 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2523 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2524 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2526 let ExeDomain = SSEPackedInt in {
2527 let neverHasSideEffects = 1 in {
2528 // 128-bit logical shifts.
2529 def PSLLDQri : PDIi8<0x73, MRM7r,
2530 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2531 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2532 def PSRLDQri : PDIi8<0x73, MRM3r,
2533 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2534 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2535 // PSRADQri doesn't exist in SSE[1-3].
2537 def PANDNrr : PDI<0xDF, MRMSrcReg,
2538 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2539 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2541 def PANDNrm : PDI<0xDF, MRMSrcMem,
2542 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2543 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2545 } // Constraints = "$src1 = $dst"
2547 let Predicates = [HasAVX] in {
2548 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2549 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2550 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2551 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2552 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2553 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2554 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2555 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2556 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2557 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2559 // Shift up / down and insert zero's.
2560 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2561 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2562 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2563 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2566 let Predicates = [HasSSE2] in {
2567 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2568 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2569 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2570 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2571 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2572 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2573 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2574 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2575 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2576 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2578 // Shift up / down and insert zero's.
2579 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2580 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2581 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2582 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2585 //===---------------------------------------------------------------------===//
2586 // SSE2 - Packed Integer Comparison Instructions
2587 //===---------------------------------------------------------------------===//
2589 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2590 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2592 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2594 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2596 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2598 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2600 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2604 let Constraints = "$src1 = $dst" in {
2605 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2606 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2607 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2608 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2609 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2610 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2611 } // Constraints = "$src1 = $dst"
2613 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2614 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2615 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2616 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2617 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2618 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2619 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2620 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2621 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2622 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2623 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2624 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2626 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2627 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2628 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2629 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2630 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2631 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2632 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2633 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2634 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2635 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2636 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2637 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2639 //===---------------------------------------------------------------------===//
2640 // SSE2 - Packed Integer Pack Instructions
2641 //===---------------------------------------------------------------------===//
2643 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2644 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2646 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2648 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2652 let Constraints = "$src1 = $dst" in {
2653 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2654 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2655 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2656 } // Constraints = "$src1 = $dst"
2658 //===---------------------------------------------------------------------===//
2659 // SSE2 - Packed Integer Shuffle Instructions
2660 //===---------------------------------------------------------------------===//
2662 let ExeDomain = SSEPackedInt in {
2663 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2665 def ri : Ii8<0x70, MRMSrcReg,
2666 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2667 !strconcat(OpcodeStr,
2668 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2669 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2671 def mi : Ii8<0x70, MRMSrcMem,
2672 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2673 !strconcat(OpcodeStr,
2674 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2675 [(set VR128:$dst, (vt (pshuf_frag:$src2
2676 (bc_frag (memopv2i64 addr:$src1)),
2679 } // ExeDomain = SSEPackedInt
2681 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2682 let AddedComplexity = 5 in
2683 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2686 // SSE2 with ImmT == Imm8 and XS prefix.
2687 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2690 // SSE2 with ImmT == Imm8 and XD prefix.
2691 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2695 let Predicates = [HasSSE2] in {
2696 let AddedComplexity = 5 in
2697 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2699 // SSE2 with ImmT == Imm8 and XS prefix.
2700 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2702 // SSE2 with ImmT == Imm8 and XD prefix.
2703 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2706 //===---------------------------------------------------------------------===//
2707 // SSE2 - Packed Integer Unpack Instructions
2708 //===---------------------------------------------------------------------===//
2710 let ExeDomain = SSEPackedInt in {
2711 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2712 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2713 def rr : PDI<opc, MRMSrcReg,
2714 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2716 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2717 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2718 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2719 def rm : PDI<opc, MRMSrcMem,
2720 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2722 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2723 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2724 [(set VR128:$dst, (unp_frag VR128:$src1,
2725 (bc_frag (memopv2i64
2729 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2730 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2732 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2734 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2737 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2738 /// knew to collapse (bitconvert VT to VT) into its operand.
2739 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2740 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2741 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2743 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2744 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2745 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2746 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2748 (v2i64 (unpckl VR128:$src1,
2749 (memopv2i64 addr:$src2))))]>, VEX_4V;
2751 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2753 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2755 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2758 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2759 /// knew to collapse (bitconvert VT to VT) into its operand.
2760 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2761 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2762 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2764 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2765 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2766 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2767 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2769 (v2i64 (unpckh VR128:$src1,
2770 (memopv2i64 addr:$src2))))]>, VEX_4V;
2773 let Constraints = "$src1 = $dst" in {
2774 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2775 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2776 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2778 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2779 /// knew to collapse (bitconvert VT to VT) into its operand.
2780 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2781 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2782 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2784 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2785 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2786 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2787 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2789 (v2i64 (unpckl VR128:$src1,
2790 (memopv2i64 addr:$src2))))]>;
2792 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2793 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2794 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2796 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2797 /// knew to collapse (bitconvert VT to VT) into its operand.
2798 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2799 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2800 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2802 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2803 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2804 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2805 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2807 (v2i64 (unpckh VR128:$src1,
2808 (memopv2i64 addr:$src2))))]>;
2811 } // ExeDomain = SSEPackedInt
2813 //===---------------------------------------------------------------------===//
2814 // SSE2 - Packed Integer Extract and Insert
2815 //===---------------------------------------------------------------------===//
2817 let ExeDomain = SSEPackedInt in {
2818 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2819 def rri : Ii8<0xC4, MRMSrcReg,
2820 (outs VR128:$dst), (ins VR128:$src1,
2821 GR32:$src2, i32i8imm:$src3),
2823 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2824 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2826 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2827 def rmi : Ii8<0xC4, MRMSrcMem,
2828 (outs VR128:$dst), (ins VR128:$src1,
2829 i16mem:$src2, i32i8imm:$src3),
2831 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2832 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2834 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2839 let isAsmParserOnly = 0, Predicates = [HasAVX] in
2840 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2841 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2842 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2843 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2844 imm:$src2))]>, OpSize, VEX;
2845 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2846 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2847 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2848 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2852 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
2853 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2854 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2855 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2856 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2857 []>, OpSize, VEX_4V;
2860 let Constraints = "$src1 = $dst" in
2861 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2863 } // ExeDomain = SSEPackedInt
2865 //===---------------------------------------------------------------------===//
2866 // SSE2 - Packed Mask Creation
2867 //===---------------------------------------------------------------------===//
2869 let ExeDomain = SSEPackedInt in {
2871 let isAsmParserOnly = 0 in {
2872 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2873 "pmovmskb\t{$src, $dst|$dst, $src}",
2874 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2875 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2876 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2878 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2879 "pmovmskb\t{$src, $dst|$dst, $src}",
2880 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2882 } // ExeDomain = SSEPackedInt
2884 //===---------------------------------------------------------------------===//
2885 // SSE2 - Conditional Store
2886 //===---------------------------------------------------------------------===//
2888 let ExeDomain = SSEPackedInt in {
2890 let isAsmParserOnly = 0 in {
2892 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2893 (ins VR128:$src, VR128:$mask),
2894 "maskmovdqu\t{$mask, $src|$src, $mask}",
2895 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2897 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2898 (ins VR128:$src, VR128:$mask),
2899 "maskmovdqu\t{$mask, $src|$src, $mask}",
2900 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2904 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2905 "maskmovdqu\t{$mask, $src|$src, $mask}",
2906 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2908 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2909 "maskmovdqu\t{$mask, $src|$src, $mask}",
2910 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2912 } // ExeDomain = SSEPackedInt
2914 //===---------------------------------------------------------------------===//
2915 // SSE2 - Move Doubleword
2916 //===---------------------------------------------------------------------===//
2918 // Move Int Doubleword to Packed Double Int
2919 let isAsmParserOnly = 0 in {
2920 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2921 "movd\t{$src, $dst|$dst, $src}",
2923 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2924 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2925 "movd\t{$src, $dst|$dst, $src}",
2927 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2930 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2931 "movd\t{$src, $dst|$dst, $src}",
2933 (v4i32 (scalar_to_vector GR32:$src)))]>;
2934 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2935 "movd\t{$src, $dst|$dst, $src}",
2937 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2938 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2939 "mov{d|q}\t{$src, $dst|$dst, $src}",
2941 (v2i64 (scalar_to_vector GR64:$src)))]>;
2942 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2943 "mov{d|q}\t{$src, $dst|$dst, $src}",
2944 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2947 // Move Int Doubleword to Single Scalar
2948 let isAsmParserOnly = 0 in {
2949 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2950 "movd\t{$src, $dst|$dst, $src}",
2951 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2953 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2954 "movd\t{$src, $dst|$dst, $src}",
2955 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2958 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2959 "movd\t{$src, $dst|$dst, $src}",
2960 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2962 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2963 "movd\t{$src, $dst|$dst, $src}",
2964 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2966 // Move Packed Doubleword Int to Packed Double Int
2967 let isAsmParserOnly = 0 in {
2968 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2969 "movd\t{$src, $dst|$dst, $src}",
2970 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2972 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2973 (ins i32mem:$dst, VR128:$src),
2974 "movd\t{$src, $dst|$dst, $src}",
2975 [(store (i32 (vector_extract (v4i32 VR128:$src),
2976 (iPTR 0))), addr:$dst)]>, VEX;
2978 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2979 "movd\t{$src, $dst|$dst, $src}",
2980 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2982 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2983 "movd\t{$src, $dst|$dst, $src}",
2984 [(store (i32 (vector_extract (v4i32 VR128:$src),
2985 (iPTR 0))), addr:$dst)]>;
2987 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
2988 "mov{d|q}\t{$src, $dst|$dst, $src}",
2989 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
2991 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
2992 "movq\t{$src, $dst|$dst, $src}",
2993 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
2995 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2996 "mov{d|q}\t{$src, $dst|$dst, $src}",
2997 [(set GR64:$dst, (bitconvert FR64:$src))]>;
2998 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2999 "movq\t{$src, $dst|$dst, $src}",
3000 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
3002 // Move Scalar Single to Double Int
3003 let isAsmParserOnly = 0 in {
3004 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3005 "movd\t{$src, $dst|$dst, $src}",
3006 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
3007 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3008 "movd\t{$src, $dst|$dst, $src}",
3009 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3011 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3012 "movd\t{$src, $dst|$dst, $src}",
3013 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3014 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3015 "movd\t{$src, $dst|$dst, $src}",
3016 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3018 // movd / movq to XMM register zero-extends
3019 let AddedComplexity = 15, isAsmParserOnly = 0 in {
3020 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3021 "movd\t{$src, $dst|$dst, $src}",
3022 [(set VR128:$dst, (v4i32 (X86vzmovl
3023 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3025 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3026 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3027 [(set VR128:$dst, (v2i64 (X86vzmovl
3028 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3031 let AddedComplexity = 15 in {
3032 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3033 "movd\t{$src, $dst|$dst, $src}",
3034 [(set VR128:$dst, (v4i32 (X86vzmovl
3035 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3036 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3037 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3038 [(set VR128:$dst, (v2i64 (X86vzmovl
3039 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3042 let AddedComplexity = 20 in {
3043 let isAsmParserOnly = 0 in
3044 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3045 "movd\t{$src, $dst|$dst, $src}",
3047 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3048 (loadi32 addr:$src))))))]>,
3050 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3051 "movd\t{$src, $dst|$dst, $src}",
3053 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3054 (loadi32 addr:$src))))))]>;
3056 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3057 (MOVZDI2PDIrm addr:$src)>;
3058 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3059 (MOVZDI2PDIrm addr:$src)>;
3060 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3061 (MOVZDI2PDIrm addr:$src)>;
3064 //===---------------------------------------------------------------------===//
3065 // SSE2 - Move Quadword
3066 //===---------------------------------------------------------------------===//
3068 // Move Quadword Int to Packed Quadword Int
3069 let isAsmParserOnly = 0 in
3070 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3071 "vmovq\t{$src, $dst|$dst, $src}",
3073 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3074 VEX, Requires<[HasAVX]>;
3075 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3076 "movq\t{$src, $dst|$dst, $src}",
3078 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3079 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3081 // Move Packed Quadword Int to Quadword Int
3082 let isAsmParserOnly = 0 in
3083 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3084 "movq\t{$src, $dst|$dst, $src}",
3085 [(store (i64 (vector_extract (v2i64 VR128:$src),
3086 (iPTR 0))), addr:$dst)]>, VEX;
3087 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3088 "movq\t{$src, $dst|$dst, $src}",
3089 [(store (i64 (vector_extract (v2i64 VR128:$src),
3090 (iPTR 0))), addr:$dst)]>;
3092 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3093 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3095 // Store / copy lower 64-bits of a XMM register.
3096 let isAsmParserOnly = 0 in
3097 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3098 "movq\t{$src, $dst|$dst, $src}",
3099 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3100 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3101 "movq\t{$src, $dst|$dst, $src}",
3102 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3104 let AddedComplexity = 20, isAsmParserOnly = 0 in
3105 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3106 "vmovq\t{$src, $dst|$dst, $src}",
3108 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3109 (loadi64 addr:$src))))))]>,
3110 XS, VEX, Requires<[HasAVX]>;
3112 let AddedComplexity = 20 in {
3113 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3114 "movq\t{$src, $dst|$dst, $src}",
3116 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3117 (loadi64 addr:$src))))))]>,
3118 XS, Requires<[HasSSE2]>;
3120 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3121 (MOVZQI2PQIrm addr:$src)>;
3122 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3123 (MOVZQI2PQIrm addr:$src)>;
3124 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3127 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3128 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3129 let isAsmParserOnly = 0, AddedComplexity = 15 in
3130 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3131 "vmovq\t{$src, $dst|$dst, $src}",
3132 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3133 XS, VEX, Requires<[HasAVX]>;
3134 let AddedComplexity = 15 in
3135 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3136 "movq\t{$src, $dst|$dst, $src}",
3137 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3138 XS, Requires<[HasSSE2]>;
3140 let AddedComplexity = 20, isAsmParserOnly = 0 in
3141 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3142 "vmovq\t{$src, $dst|$dst, $src}",
3143 [(set VR128:$dst, (v2i64 (X86vzmovl
3144 (loadv2i64 addr:$src))))]>,
3145 XS, VEX, Requires<[HasAVX]>;
3146 let AddedComplexity = 20 in {
3147 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3148 "movq\t{$src, $dst|$dst, $src}",
3149 [(set VR128:$dst, (v2i64 (X86vzmovl
3150 (loadv2i64 addr:$src))))]>,
3151 XS, Requires<[HasSSE2]>;
3153 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3154 (MOVZPQILo2PQIrm addr:$src)>;
3157 // Instructions to match in the assembler
3158 let isAsmParserOnly = 0 in {
3159 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3160 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3161 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3162 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3163 // Recognize "movd" with GR64 destination, but encode as a "movq"
3164 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3165 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3168 // Instructions for the disassembler
3169 // xr = XMM register
3172 let isAsmParserOnly = 0, Predicates = [HasAVX] in
3173 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3174 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3175 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3176 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3178 //===---------------------------------------------------------------------===//
3179 // SSE2 - Misc Instructions
3180 //===---------------------------------------------------------------------===//
3183 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3184 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3185 TB, Requires<[HasSSE2]>;
3187 // Load, store, and memory fence
3188 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3189 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3190 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3191 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3192 def : Pat<(X86LFence), (LFENCE)>;
3193 def : Pat<(X86MFence), (MFENCE)>;
3196 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3197 // was introduced with SSE2, it's backward compatible.
3198 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3200 // Alias instructions that map zero vector to pxor / xorp* for sse.
3201 // We set canFoldAsLoad because this can be converted to a constant-pool
3202 // load of an all-ones value if folding it would be beneficial.
3203 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3204 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3205 // FIXME: Change encoding to pseudo.
3206 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3207 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3209 //===---------------------------------------------------------------------===//
3210 // SSE3 - Conversion Instructions
3211 //===---------------------------------------------------------------------===//
3213 // Convert Packed Double FP to Packed DW Integers
3214 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3215 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3216 // register, but the same isn't true when using memory operands instead.
3217 // Provide other assembly rr and rm forms to address this explicitly.
3218 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3219 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3220 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3221 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3224 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3225 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3226 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3227 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3230 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3231 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3232 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3233 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3236 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3237 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3238 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3239 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3241 // Convert Packed DW Integers to Packed Double FP
3242 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3243 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3244 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3245 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3246 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3247 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3248 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3249 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3250 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3253 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3254 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3255 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3256 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3258 // AVX 256-bit register conversion intrinsics
3259 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3260 (VCVTDQ2PDYrr VR128:$src)>;
3261 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3262 (VCVTDQ2PDYrm addr:$src)>;
3264 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3265 (VCVTPD2DQYrr VR256:$src)>;
3266 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3267 (VCVTPD2DQYrm addr:$src)>;
3269 //===---------------------------------------------------------------------===//
3270 // SSE3 - Move Instructions
3271 //===---------------------------------------------------------------------===//
3273 // Replicate Single FP
3274 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3275 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3276 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3277 [(set VR128:$dst, (v4f32 (rep_frag
3278 VR128:$src, (undef))))]>;
3279 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3280 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3281 [(set VR128:$dst, (rep_frag
3282 (memopv4f32 addr:$src), (undef)))]>;
3285 multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag,
3287 def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3288 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3289 def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3290 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3293 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3294 // FIXME: Merge above classes when we have patterns for the ymm version
3295 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3296 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3297 defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX;
3298 defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX;
3300 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3301 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3303 // Replicate Double FP
3304 multiclass sse3_replicate_dfp<string OpcodeStr> {
3305 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3306 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3307 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3308 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3309 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3311 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3315 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3316 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3317 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3319 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3320 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3324 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3325 // FIXME: Merge above classes when we have patterns for the ymm version
3326 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3327 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3329 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3331 // Move Unaligned Integer
3332 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3333 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3334 "vlddqu\t{$src, $dst|$dst, $src}",
3335 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3336 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3337 "vlddqu\t{$src, $dst|$dst, $src}",
3338 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3340 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3341 "lddqu\t{$src, $dst|$dst, $src}",
3342 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3344 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3346 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3348 // Several Move patterns
3349 let AddedComplexity = 5 in {
3350 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3351 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3352 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3353 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3354 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3355 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3356 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3357 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3360 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3361 let AddedComplexity = 15 in
3362 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3363 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3364 let AddedComplexity = 20 in
3365 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3366 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3368 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3369 let AddedComplexity = 15 in
3370 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3371 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3372 let AddedComplexity = 20 in
3373 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3374 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3376 //===---------------------------------------------------------------------===//
3377 // SSE3 - Arithmetic
3378 //===---------------------------------------------------------------------===//
3380 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3381 X86MemOperand x86memop, bit Is2Addr = 1> {
3382 def rr : I<0xD0, MRMSrcReg,
3383 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3385 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3386 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3387 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3388 def rm : I<0xD0, MRMSrcMem,
3389 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3391 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3392 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3393 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3396 let isAsmParserOnly = 0, Predicates = [HasAVX],
3397 ExeDomain = SSEPackedDouble in {
3398 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3399 f128mem, 0>, TB, XD, VEX_4V;
3400 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3401 f128mem, 0>, TB, OpSize, VEX_4V;
3402 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3403 f256mem, 0>, TB, XD, VEX_4V;
3404 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3405 f256mem, 0>, TB, OpSize, VEX_4V;
3407 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3408 ExeDomain = SSEPackedDouble in {
3409 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3411 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3412 f128mem>, TB, OpSize;
3415 //===---------------------------------------------------------------------===//
3416 // SSE3 Instructions
3417 //===---------------------------------------------------------------------===//
3420 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3421 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3422 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3424 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3425 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3426 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3428 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3430 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3431 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3432 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3434 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3435 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3436 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3438 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3439 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3440 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3442 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3444 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3445 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3446 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3449 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3450 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3451 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3452 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3453 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3454 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3455 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3456 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3457 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3458 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3459 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3460 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3461 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3462 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3463 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3464 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3465 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3468 let Constraints = "$src1 = $dst" in {
3469 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3470 int_x86_sse3_hadd_ps>;
3471 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3472 int_x86_sse3_hadd_pd>;
3473 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3474 int_x86_sse3_hsub_ps>;
3475 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3476 int_x86_sse3_hsub_pd>;
3479 //===---------------------------------------------------------------------===//
3480 // SSSE3 - Packed Absolute Instructions
3481 //===---------------------------------------------------------------------===//
3484 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3485 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3486 PatFrag mem_frag128, Intrinsic IntId128> {
3487 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3489 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3490 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3493 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3495 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3498 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3501 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3502 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3503 int_x86_ssse3_pabs_b_128>, VEX;
3504 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3505 int_x86_ssse3_pabs_w_128>, VEX;
3506 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3507 int_x86_ssse3_pabs_d_128>, VEX;
3510 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3511 int_x86_ssse3_pabs_b_128>;
3512 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3513 int_x86_ssse3_pabs_w_128>;
3514 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3515 int_x86_ssse3_pabs_d_128>;
3517 //===---------------------------------------------------------------------===//
3518 // SSSE3 - Packed Binary Operator Instructions
3519 //===---------------------------------------------------------------------===//
3521 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3522 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3523 PatFrag mem_frag128, Intrinsic IntId128,
3525 let isCommutable = 1 in
3526 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3527 (ins VR128:$src1, VR128:$src2),
3529 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3530 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3531 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3533 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3534 (ins VR128:$src1, i128mem:$src2),
3536 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3537 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3539 (IntId128 VR128:$src1,
3540 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3543 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3544 let isCommutable = 0 in {
3545 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3546 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3547 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3548 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3549 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3550 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3551 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3552 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3553 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3554 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3555 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3556 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3557 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3558 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3559 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3560 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3561 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3562 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3563 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3564 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3565 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3566 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3568 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3569 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3572 // None of these have i8 immediate fields.
3573 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3574 let isCommutable = 0 in {
3575 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3576 int_x86_ssse3_phadd_w_128>;
3577 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3578 int_x86_ssse3_phadd_d_128>;
3579 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3580 int_x86_ssse3_phadd_sw_128>;
3581 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3582 int_x86_ssse3_phsub_w_128>;
3583 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3584 int_x86_ssse3_phsub_d_128>;
3585 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3586 int_x86_ssse3_phsub_sw_128>;
3587 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3588 int_x86_ssse3_pmadd_ub_sw_128>;
3589 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
3590 int_x86_ssse3_pshuf_b_128>;
3591 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3592 int_x86_ssse3_psign_b_128>;
3593 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3594 int_x86_ssse3_psign_w_128>;
3595 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3596 int_x86_ssse3_psign_d_128>;
3598 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3599 int_x86_ssse3_pmul_hr_sw_128>;
3602 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3603 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3604 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3605 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3607 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
3608 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3609 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
3610 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3611 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
3612 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3614 //===---------------------------------------------------------------------===//
3615 // SSSE3 - Packed Align Instruction Patterns
3616 //===---------------------------------------------------------------------===//
3618 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3619 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3620 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3622 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3624 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3626 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3627 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3629 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3631 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3635 let isAsmParserOnly = 0, Predicates = [HasAVX] in
3636 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3637 let Constraints = "$src1 = $dst" in
3638 defm PALIGN : ssse3_palign<"palignr">;
3640 let AddedComplexity = 5 in {
3641 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3642 (PALIGNR128rr VR128:$src2, VR128:$src1,
3643 (SHUFFLE_get_palign_imm VR128:$src3))>,
3644 Requires<[HasSSSE3]>;
3645 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3646 (PALIGNR128rr VR128:$src2, VR128:$src1,
3647 (SHUFFLE_get_palign_imm VR128:$src3))>,
3648 Requires<[HasSSSE3]>;
3649 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3650 (PALIGNR128rr VR128:$src2, VR128:$src1,
3651 (SHUFFLE_get_palign_imm VR128:$src3))>,
3652 Requires<[HasSSSE3]>;
3653 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3654 (PALIGNR128rr VR128:$src2, VR128:$src1,
3655 (SHUFFLE_get_palign_imm VR128:$src3))>,
3656 Requires<[HasSSSE3]>;
3659 //===---------------------------------------------------------------------===//
3660 // SSSE3 Misc Instructions
3661 //===---------------------------------------------------------------------===//
3663 // Thread synchronization
3664 let usesCustomInserter = 1 in {
3665 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
3666 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
3667 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
3668 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
3671 let Uses = [EAX, ECX, EDX] in
3672 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
3673 Requires<[HasSSE3]>;
3674 let Uses = [ECX, EAX] in
3675 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
3676 Requires<[HasSSE3]>;
3678 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
3679 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
3681 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
3682 Requires<[In32BitMode]>;
3683 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
3684 Requires<[In64BitMode]>;
3686 //===---------------------------------------------------------------------===//
3687 // Non-Instruction Patterns
3688 //===---------------------------------------------------------------------===//
3690 // extload f32 -> f64. This matches load+fextend because we have a hack in
3691 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3693 // Since these loads aren't folded into the fextend, we have to match it
3695 let Predicates = [HasSSE2] in
3696 def : Pat<(fextend (loadf32 addr:$src)),
3697 (CVTSS2SDrm addr:$src)>;
3700 let Predicates = [HasXMMInt] in {
3701 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3702 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3703 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3704 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3705 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3706 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3707 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3708 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3709 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3710 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3711 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3712 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3713 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3714 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3715 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3716 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3717 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3718 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3719 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3720 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3721 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3722 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3723 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3724 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3725 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3726 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3727 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3728 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3729 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3730 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3733 let Predicates = [HasAVX] in {
3734 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
3737 // Move scalar to XMM zero-extended
3738 // movd to XMM register zero-extends
3739 let AddedComplexity = 15 in {
3740 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3741 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3742 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3743 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3744 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3745 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3746 (MOVSSrr (v4f32 (V_SET0PS)),
3747 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3748 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3749 (MOVSSrr (v4i32 (V_SET0PI)),
3750 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3753 // Splat v2f64 / v2i64
3754 let AddedComplexity = 10 in {
3755 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3756 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3757 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3758 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3759 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3760 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3761 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3762 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3765 // Special unary SHUFPSrri case.
3766 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3767 (SHUFPSrri VR128:$src1, VR128:$src1,
3768 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3769 let AddedComplexity = 5 in
3770 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3771 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3772 Requires<[HasSSE2]>;
3773 // Special unary SHUFPDrri case.
3774 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3775 (SHUFPDrri VR128:$src1, VR128:$src1,
3776 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3777 Requires<[HasSSE2]>;
3778 // Special unary SHUFPDrri case.
3779 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3780 (SHUFPDrri VR128:$src1, VR128:$src1,
3781 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3782 Requires<[HasSSE2]>;
3783 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3784 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3785 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3786 Requires<[HasSSE2]>;
3788 // Special binary v4i32 shuffle cases with SHUFPS.
3789 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3790 (SHUFPSrri VR128:$src1, VR128:$src2,
3791 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3792 Requires<[HasSSE2]>;
3793 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3794 (SHUFPSrmi VR128:$src1, addr:$src2,
3795 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3796 Requires<[HasSSE2]>;
3797 // Special binary v2i64 shuffle cases using SHUFPDrri.
3798 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3799 (SHUFPDrri VR128:$src1, VR128:$src2,
3800 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3801 Requires<[HasSSE2]>;
3803 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3804 let AddedComplexity = 15 in {
3805 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3806 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3807 Requires<[OptForSpeed, HasSSE2]>;
3808 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3809 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3810 Requires<[OptForSpeed, HasSSE2]>;
3812 let AddedComplexity = 10 in {
3813 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3814 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3815 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3816 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3817 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3818 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3819 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3820 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3823 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3824 let AddedComplexity = 15 in {
3825 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3826 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3827 Requires<[OptForSpeed, HasSSE2]>;
3828 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3829 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3830 Requires<[OptForSpeed, HasSSE2]>;
3832 let AddedComplexity = 10 in {
3833 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3834 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3835 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3836 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3837 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3838 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3839 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3840 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3843 let AddedComplexity = 20 in {
3844 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3845 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3846 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3848 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3849 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3850 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3852 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3853 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3854 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3855 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3856 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3859 let AddedComplexity = 20 in {
3860 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3861 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3862 (MOVLPSrm VR128:$src1, addr:$src2)>;
3863 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3864 (MOVLPDrm VR128:$src1, addr:$src2)>;
3865 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3866 (MOVLPSrm VR128:$src1, addr:$src2)>;
3867 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3868 (MOVLPDrm VR128:$src1, addr:$src2)>;
3871 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3872 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3873 (MOVLPSmr addr:$src1, VR128:$src2)>;
3874 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3875 (MOVLPDmr addr:$src1, VR128:$src2)>;
3876 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3878 (MOVLPSmr addr:$src1, VR128:$src2)>;
3879 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3880 (MOVLPDmr addr:$src1, VR128:$src2)>;
3882 let AddedComplexity = 15 in {
3883 // Setting the lowest element in the vector.
3884 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3885 (MOVSSrr (v4i32 VR128:$src1),
3886 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3887 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3888 (MOVSDrr (v2i64 VR128:$src1),
3889 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3891 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3892 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3893 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3894 Requires<[HasSSE2]>;
3895 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3896 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3897 Requires<[HasSSE2]>;
3900 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3901 // fall back to this for SSE1)
3902 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3903 (SHUFPSrri VR128:$src2, VR128:$src1,
3904 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3906 // Set lowest element and zero upper elements.
3907 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3908 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3910 // vector -> vector casts
3911 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3912 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3913 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3914 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3916 // Use movaps / movups for SSE integer load / store (one byte shorter).
3917 let Predicates = [HasSSE1] in {
3918 def : Pat<(alignedloadv4i32 addr:$src),
3919 (MOVAPSrm addr:$src)>;
3920 def : Pat<(loadv4i32 addr:$src),
3921 (MOVUPSrm addr:$src)>;
3922 def : Pat<(alignedloadv2i64 addr:$src),
3923 (MOVAPSrm addr:$src)>;
3924 def : Pat<(loadv2i64 addr:$src),
3925 (MOVUPSrm addr:$src)>;
3927 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3928 (MOVAPSmr addr:$dst, VR128:$src)>;
3929 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3930 (MOVAPSmr addr:$dst, VR128:$src)>;
3931 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3932 (MOVAPSmr addr:$dst, VR128:$src)>;
3933 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3934 (MOVAPSmr addr:$dst, VR128:$src)>;
3935 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3936 (MOVUPSmr addr:$dst, VR128:$src)>;
3937 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3938 (MOVUPSmr addr:$dst, VR128:$src)>;
3939 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3940 (MOVUPSmr addr:$dst, VR128:$src)>;
3941 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3942 (MOVUPSmr addr:$dst, VR128:$src)>;
3945 // Use vmovaps/vmovups for AVX 128-bit integer load/store (one byte shorter).
3946 let Predicates = [HasAVX] in {
3947 def : Pat<(alignedloadv4i32 addr:$src),
3948 (VMOVAPSrm addr:$src)>;
3949 def : Pat<(loadv4i32 addr:$src),
3950 (VMOVUPSrm addr:$src)>;
3951 def : Pat<(alignedloadv2i64 addr:$src),
3952 (VMOVAPSrm addr:$src)>;
3953 def : Pat<(loadv2i64 addr:$src),
3954 (VMOVUPSrm addr:$src)>;
3956 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3957 (VMOVAPSmr addr:$dst, VR128:$src)>;
3958 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3959 (VMOVAPSmr addr:$dst, VR128:$src)>;
3960 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3961 (VMOVAPSmr addr:$dst, VR128:$src)>;
3962 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3963 (VMOVAPSmr addr:$dst, VR128:$src)>;
3964 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3965 (VMOVUPSmr addr:$dst, VR128:$src)>;
3966 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3967 (VMOVUPSmr addr:$dst, VR128:$src)>;
3968 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3969 (VMOVUPSmr addr:$dst, VR128:$src)>;
3970 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3971 (VMOVUPSmr addr:$dst, VR128:$src)>;
3974 //===----------------------------------------------------------------------===//
3975 // SSE4.1 - Packed Move with Sign/Zero Extend
3976 //===----------------------------------------------------------------------===//
3978 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3979 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3980 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3981 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3983 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3984 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3986 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3990 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
3991 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3993 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3995 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
3997 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
3999 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4001 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4005 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4006 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4007 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4008 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4009 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4010 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4012 // Common patterns involving scalar load.
4013 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4014 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4015 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4016 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4018 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4019 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4020 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4021 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4023 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4024 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4025 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4026 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4028 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4029 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4030 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4031 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4033 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4034 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4035 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4036 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4038 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4039 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4040 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4041 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4044 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4045 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4046 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4047 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4049 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4050 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4052 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4056 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4057 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4059 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4061 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4063 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4067 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4068 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4069 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4070 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4072 // Common patterns involving scalar load
4073 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4074 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4075 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4076 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4078 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4079 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4080 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4081 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4084 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4085 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4086 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4087 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4089 // Expecting a i16 load any extended to i32 value.
4090 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4091 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4092 [(set VR128:$dst, (IntId (bitconvert
4093 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4097 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4098 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4100 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4103 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4104 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4106 // Common patterns involving scalar load
4107 def : Pat<(int_x86_sse41_pmovsxbq
4108 (bitconvert (v4i32 (X86vzmovl
4109 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4110 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4112 def : Pat<(int_x86_sse41_pmovzxbq
4113 (bitconvert (v4i32 (X86vzmovl
4114 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4115 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4117 //===----------------------------------------------------------------------===//
4118 // SSE4.1 - Extract Instructions
4119 //===----------------------------------------------------------------------===//
4121 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4122 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4123 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4124 (ins VR128:$src1, i32i8imm:$src2),
4125 !strconcat(OpcodeStr,
4126 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4127 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4129 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4130 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4131 !strconcat(OpcodeStr,
4132 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4135 // There's an AssertZext in the way of writing the store pattern
4136 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4139 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4140 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4141 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4142 (ins VR128:$src1, i32i8imm:$src2),
4143 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4146 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4149 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4150 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4151 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4152 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4153 !strconcat(OpcodeStr,
4154 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4157 // There's an AssertZext in the way of writing the store pattern
4158 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4161 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4162 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4164 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4167 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4168 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4169 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4170 (ins VR128:$src1, i32i8imm:$src2),
4171 !strconcat(OpcodeStr,
4172 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4174 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4175 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4176 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4177 !strconcat(OpcodeStr,
4178 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4179 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4180 addr:$dst)]>, OpSize;
4183 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4184 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4186 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4188 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4189 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4190 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4191 (ins VR128:$src1, i32i8imm:$src2),
4192 !strconcat(OpcodeStr,
4193 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4195 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4196 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4197 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4198 !strconcat(OpcodeStr,
4199 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4200 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4201 addr:$dst)]>, OpSize, REX_W;
4204 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4205 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4207 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4209 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4211 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4212 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4213 (ins VR128:$src1, i32i8imm:$src2),
4214 !strconcat(OpcodeStr,
4215 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4217 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4219 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4220 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4221 !strconcat(OpcodeStr,
4222 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4223 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4224 addr:$dst)]>, OpSize;
4227 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4228 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4229 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4230 (ins VR128:$src1, i32i8imm:$src2),
4231 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4234 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4236 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4237 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4240 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4241 Requires<[HasSSE41]>;
4243 //===----------------------------------------------------------------------===//
4244 // SSE4.1 - Insert Instructions
4245 //===----------------------------------------------------------------------===//
4247 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4248 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4249 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4251 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4253 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4255 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4256 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4257 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4259 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4261 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4263 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4264 imm:$src3))]>, OpSize;
4267 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4268 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4269 let Constraints = "$src1 = $dst" in
4270 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4272 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4273 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4274 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4276 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4278 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4280 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4282 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4283 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4285 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4287 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4289 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4290 imm:$src3)))]>, OpSize;
4293 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4294 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4295 let Constraints = "$src1 = $dst" in
4296 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4298 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4299 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4300 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4302 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4304 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4306 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4308 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4309 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4311 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4313 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4315 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4316 imm:$src3)))]>, OpSize;
4319 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4320 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4321 let Constraints = "$src1 = $dst" in
4322 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4324 // insertps has a few different modes, there's the first two here below which
4325 // are optimized inserts that won't zero arbitrary elements in the destination
4326 // vector. The next one matches the intrinsic and could zero arbitrary elements
4327 // in the target vector.
4328 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4329 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4330 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4332 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4334 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4336 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4338 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4339 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4341 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4343 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4345 (X86insrtps VR128:$src1,
4346 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4347 imm:$src3))]>, OpSize;
4350 let Constraints = "$src1 = $dst" in
4351 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4352 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4353 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4355 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4356 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4358 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4359 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4360 Requires<[HasSSE41]>;
4362 //===----------------------------------------------------------------------===//
4363 // SSE4.1 - Round Instructions
4364 //===----------------------------------------------------------------------===//
4366 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4367 X86MemOperand x86memop, RegisterClass RC,
4368 PatFrag mem_frag32, PatFrag mem_frag64,
4369 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4370 // Intrinsic operation, reg.
4371 // Vector intrinsic operation, reg
4372 def PSr : SS4AIi8<opcps, MRMSrcReg,
4373 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4374 !strconcat(OpcodeStr,
4375 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4376 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4379 // Vector intrinsic operation, mem
4380 def PSm : Ii8<opcps, MRMSrcMem,
4381 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4382 !strconcat(OpcodeStr,
4383 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4385 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4387 Requires<[HasSSE41]>;
4389 // Vector intrinsic operation, reg
4390 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4391 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4392 !strconcat(OpcodeStr,
4393 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4394 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4397 // Vector intrinsic operation, mem
4398 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4399 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4400 !strconcat(OpcodeStr,
4401 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4403 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4407 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4408 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4409 // Intrinsic operation, reg.
4410 // Vector intrinsic operation, reg
4411 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4412 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4413 !strconcat(OpcodeStr,
4414 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4417 // Vector intrinsic operation, mem
4418 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4419 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4420 !strconcat(OpcodeStr,
4421 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4422 []>, TA, OpSize, Requires<[HasSSE41]>;
4424 // Vector intrinsic operation, reg
4425 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4426 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4427 !strconcat(OpcodeStr,
4428 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4431 // Vector intrinsic operation, mem
4432 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4433 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4434 !strconcat(OpcodeStr,
4435 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4439 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4442 Intrinsic F64Int, bit Is2Addr = 1> {
4443 // Intrinsic operation, reg.
4444 def SSr : SS4AIi8<opcss, MRMSrcReg,
4445 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4447 !strconcat(OpcodeStr,
4448 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4449 !strconcat(OpcodeStr,
4450 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4451 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4454 // Intrinsic operation, mem.
4455 def SSm : SS4AIi8<opcss, MRMSrcMem,
4456 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4458 !strconcat(OpcodeStr,
4459 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4460 !strconcat(OpcodeStr,
4461 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4463 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4466 // Intrinsic operation, reg.
4467 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4468 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4470 !strconcat(OpcodeStr,
4471 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4472 !strconcat(OpcodeStr,
4473 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4474 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4477 // Intrinsic operation, mem.
4478 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4479 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4481 !strconcat(OpcodeStr,
4482 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4483 !strconcat(OpcodeStr,
4484 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4486 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4490 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4492 // Intrinsic operation, reg.
4493 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4494 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4495 !strconcat(OpcodeStr,
4496 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4499 // Intrinsic operation, mem.
4500 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4501 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4502 !strconcat(OpcodeStr,
4503 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4506 // Intrinsic operation, reg.
4507 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4508 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4509 !strconcat(OpcodeStr,
4510 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4513 // Intrinsic operation, mem.
4514 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4515 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4516 !strconcat(OpcodeStr,
4517 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4521 // FP round - roundss, roundps, roundsd, roundpd
4522 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4524 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4525 memopv4f32, memopv2f64,
4526 int_x86_sse41_round_ps,
4527 int_x86_sse41_round_pd>, VEX;
4528 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4529 memopv8f32, memopv4f64,
4530 int_x86_avx_round_ps_256,
4531 int_x86_avx_round_pd_256>, VEX;
4532 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4533 int_x86_sse41_round_ss,
4534 int_x86_sse41_round_sd, 0>, VEX_4V;
4536 // Instructions for the assembler
4537 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4539 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4541 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4544 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4545 memopv4f32, memopv2f64,
4546 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4547 let Constraints = "$src1 = $dst" in
4548 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4549 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4551 //===----------------------------------------------------------------------===//
4552 // SSE4.1 - Packed Bit Test
4553 //===----------------------------------------------------------------------===//
4555 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4556 // the intel intrinsic that corresponds to this.
4557 let Defs = [EFLAGS], isAsmParserOnly = 0, Predicates = [HasAVX] in {
4558 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4559 "vptest\t{$src2, $src1|$src1, $src2}",
4560 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4562 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4563 "vptest\t{$src2, $src1|$src1, $src2}",
4564 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4567 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4568 "vptest\t{$src2, $src1|$src1, $src2}",
4569 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4571 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4572 "vptest\t{$src2, $src1|$src1, $src2}",
4573 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4577 let Defs = [EFLAGS] in {
4578 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4579 "ptest \t{$src2, $src1|$src1, $src2}",
4580 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4582 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4583 "ptest \t{$src2, $src1|$src1, $src2}",
4584 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4588 // The bit test instructions below are AVX only
4589 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4590 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4591 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4592 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4593 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4594 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4595 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4596 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4600 let Defs = [EFLAGS], isAsmParserOnly = 0, Predicates = [HasAVX] in {
4601 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4602 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4603 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4604 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4607 //===----------------------------------------------------------------------===//
4608 // SSE4.1 - Misc Instructions
4609 //===----------------------------------------------------------------------===//
4611 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
4612 "popcnt{w}\t{$src, $dst|$dst, $src}",
4613 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
4614 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
4615 "popcnt{w}\t{$src, $dst|$dst, $src}",
4616 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
4618 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
4619 "popcnt{l}\t{$src, $dst|$dst, $src}",
4620 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
4621 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
4622 "popcnt{l}\t{$src, $dst|$dst, $src}",
4623 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
4625 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
4626 "popcnt{q}\t{$src, $dst|$dst, $src}",
4627 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
4628 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
4629 "popcnt{q}\t{$src, $dst|$dst, $src}",
4630 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
4634 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4635 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4636 Intrinsic IntId128> {
4637 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4639 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4640 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4641 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4643 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4646 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4649 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4650 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4651 int_x86_sse41_phminposuw>, VEX;
4652 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4653 int_x86_sse41_phminposuw>;
4655 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4656 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4657 Intrinsic IntId128, bit Is2Addr = 1> {
4658 let isCommutable = 1 in
4659 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4660 (ins VR128:$src1, VR128:$src2),
4662 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4663 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4664 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4665 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4666 (ins VR128:$src1, i128mem:$src2),
4668 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4669 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4671 (IntId128 VR128:$src1,
4672 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4675 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4676 let isCommutable = 0 in
4677 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4679 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4681 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4683 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4685 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4687 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4689 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4691 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4693 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4695 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4697 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4701 let Constraints = "$src1 = $dst" in {
4702 let isCommutable = 0 in
4703 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4704 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4705 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4706 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4707 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4708 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4709 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4710 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4711 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4712 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4713 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4716 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4717 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4718 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4719 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4721 /// SS48I_binop_rm - Simple SSE41 binary operator.
4722 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4723 ValueType OpVT, bit Is2Addr = 1> {
4724 let isCommutable = 1 in
4725 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4726 (ins VR128:$src1, VR128:$src2),
4728 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4729 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4730 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4732 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4733 (ins VR128:$src1, i128mem:$src2),
4735 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4736 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4737 [(set VR128:$dst, (OpNode VR128:$src1,
4738 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4742 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4743 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4744 let Constraints = "$src1 = $dst" in
4745 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4747 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4748 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4749 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4750 X86MemOperand x86memop, bit Is2Addr = 1> {
4751 let isCommutable = 1 in
4752 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4753 (ins RC:$src1, RC:$src2, i32i8imm:$src3),
4755 !strconcat(OpcodeStr,
4756 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4757 !strconcat(OpcodeStr,
4758 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4759 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4761 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4762 (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
4764 !strconcat(OpcodeStr,
4765 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4766 !strconcat(OpcodeStr,
4767 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4770 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4774 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4775 let isCommutable = 0 in {
4776 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4777 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4778 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4779 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4780 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4781 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4782 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4783 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4784 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4785 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4786 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4787 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4789 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4790 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4791 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4792 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4793 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4794 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4797 let Constraints = "$src1 = $dst" in {
4798 let isCommutable = 0 in {
4799 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4800 VR128, memopv16i8, i128mem>;
4801 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4802 VR128, memopv16i8, i128mem>;
4803 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4804 VR128, memopv16i8, i128mem>;
4805 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4806 VR128, memopv16i8, i128mem>;
4808 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4809 VR128, memopv16i8, i128mem>;
4810 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4811 VR128, memopv16i8, i128mem>;
4814 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4815 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
4816 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4817 RegisterClass RC, X86MemOperand x86memop,
4818 PatFrag mem_frag, Intrinsic IntId> {
4819 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4820 (ins RC:$src1, RC:$src2, RC:$src3),
4821 !strconcat(OpcodeStr,
4822 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4823 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
4824 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4826 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4827 (ins RC:$src1, x86memop:$src2, RC:$src3),
4828 !strconcat(OpcodeStr,
4829 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4831 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
4833 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4837 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
4838 memopv16i8, int_x86_sse41_blendvpd>;
4839 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
4840 memopv16i8, int_x86_sse41_blendvps>;
4841 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
4842 memopv16i8, int_x86_sse41_pblendvb>;
4843 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
4844 memopv32i8, int_x86_avx_blendv_pd_256>;
4845 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
4846 memopv32i8, int_x86_avx_blendv_ps_256>;
4848 /// SS41I_ternary_int - SSE 4.1 ternary operator
4849 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4850 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4851 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4852 (ins VR128:$src1, VR128:$src2),
4853 !strconcat(OpcodeStr,
4854 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4855 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4858 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4859 (ins VR128:$src1, i128mem:$src2),
4860 !strconcat(OpcodeStr,
4861 "\t{%xmm0, $src2, $dst|$dst, $src2, %xmm0}"),
4864 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4868 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4869 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4870 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4872 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
4873 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
4875 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4876 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4877 "vmovntdqa\t{$src, $dst|$dst, $src}",
4878 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4880 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4881 "movntdqa\t{$src, $dst|$dst, $src}",
4882 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4885 //===----------------------------------------------------------------------===//
4886 // SSE4.2 - Compare Instructions
4887 //===----------------------------------------------------------------------===//
4889 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4890 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4891 Intrinsic IntId128, bit Is2Addr = 1> {
4892 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4893 (ins VR128:$src1, VR128:$src2),
4895 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4896 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4897 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4899 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4900 (ins VR128:$src1, i128mem:$src2),
4902 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4903 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4905 (IntId128 VR128:$src1,
4906 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4909 let isAsmParserOnly = 0, Predicates = [HasAVX] in
4910 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4912 let Constraints = "$src1 = $dst" in
4913 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4915 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4916 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4917 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4918 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4920 //===----------------------------------------------------------------------===//
4921 // SSE4.2 - String/text Processing Instructions
4922 //===----------------------------------------------------------------------===//
4924 // Packed Compare Implicit Length Strings, Return Mask
4925 multiclass pseudo_pcmpistrm<string asm> {
4926 def REG : PseudoI<(outs VR128:$dst),
4927 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4928 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4930 def MEM : PseudoI<(outs VR128:$dst),
4931 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4932 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4933 VR128:$src1, (load addr:$src2), imm:$src3))]>;
4936 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4937 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
4938 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
4941 let Defs = [XMM0, EFLAGS], isAsmParserOnly = 0,
4942 Predicates = [HasAVX] in {
4943 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4944 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4945 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4946 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4947 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4948 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4951 let Defs = [XMM0, EFLAGS] in {
4952 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4953 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4954 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4955 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4956 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4957 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4960 // Packed Compare Explicit Length Strings, Return Mask
4961 multiclass pseudo_pcmpestrm<string asm> {
4962 def REG : PseudoI<(outs VR128:$dst),
4963 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4964 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4965 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
4966 def MEM : PseudoI<(outs VR128:$dst),
4967 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4968 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4969 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
4972 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4973 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
4974 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
4977 let isAsmParserOnly = 0, Predicates = [HasAVX],
4978 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4979 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4980 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4981 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4982 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4983 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4984 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4987 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4988 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4989 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4990 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4991 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4992 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4993 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4996 // Packed Compare Implicit Length Strings, Return Index
4997 let Defs = [ECX, EFLAGS] in {
4998 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4999 def rr : SS42AI<0x63, MRMSrcReg, (outs),
5000 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5001 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5002 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
5003 (implicit EFLAGS)]>, OpSize;
5004 def rm : SS42AI<0x63, MRMSrcMem, (outs),
5005 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5006 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5007 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
5008 (implicit EFLAGS)]>, OpSize;
5012 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
5013 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5015 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5017 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5019 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5021 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5023 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5027 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5028 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5029 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5030 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5031 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5032 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5034 // Packed Compare Explicit Length Strings, Return Index
5035 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5036 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5037 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5038 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5039 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5040 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5041 (implicit EFLAGS)]>, OpSize;
5042 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5043 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5044 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5046 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5047 (implicit EFLAGS)]>, OpSize;
5051 let isAsmParserOnly = 0, Predicates = [HasAVX] in {
5052 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5054 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5056 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5058 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5060 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5062 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5066 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5067 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5068 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5069 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5070 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5071 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5073 //===----------------------------------------------------------------------===//
5074 // SSE4.2 - CRC Instructions
5075 //===----------------------------------------------------------------------===//
5077 // No CRC instructions have AVX equivalents
5079 // crc intrinsic instruction
5080 // This set of instructions are only rm, the only difference is the size
5082 let Constraints = "$src1 = $dst" in {
5083 def CRC32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5084 (ins GR32:$src1, i8mem:$src2),
5085 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5087 (int_x86_sse42_crc32_8 GR32:$src1,
5088 (load addr:$src2)))]>;
5089 def CRC32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5090 (ins GR32:$src1, GR8:$src2),
5091 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5093 (int_x86_sse42_crc32_8 GR32:$src1, GR8:$src2))]>;
5094 def CRC32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5095 (ins GR32:$src1, i16mem:$src2),
5096 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5098 (int_x86_sse42_crc32_16 GR32:$src1,
5099 (load addr:$src2)))]>,
5101 def CRC32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5102 (ins GR32:$src1, GR16:$src2),
5103 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5105 (int_x86_sse42_crc32_16 GR32:$src1, GR16:$src2))]>,
5107 def CRC32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5108 (ins GR32:$src1, i32mem:$src2),
5109 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5111 (int_x86_sse42_crc32_32 GR32:$src1,
5112 (load addr:$src2)))]>;
5113 def CRC32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5114 (ins GR32:$src1, GR32:$src2),
5115 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5117 (int_x86_sse42_crc32_32 GR32:$src1, GR32:$src2))]>;
5118 def CRC64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5119 (ins GR64:$src1, i8mem:$src2),
5120 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5122 (int_x86_sse42_crc64_8 GR64:$src1,
5123 (load addr:$src2)))]>,
5125 def CRC64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5126 (ins GR64:$src1, GR8:$src2),
5127 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5129 (int_x86_sse42_crc64_8 GR64:$src1, GR8:$src2))]>,
5131 def CRC64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5132 (ins GR64:$src1, i64mem:$src2),
5133 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5135 (int_x86_sse42_crc64_64 GR64:$src1,
5136 (load addr:$src2)))]>,
5138 def CRC64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5139 (ins GR64:$src1, GR64:$src2),
5140 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5142 (int_x86_sse42_crc64_64 GR64:$src1, GR64:$src2))]>,
5146 //===----------------------------------------------------------------------===//
5147 // AES-NI Instructions
5148 //===----------------------------------------------------------------------===//
5150 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5151 Intrinsic IntId128, bit Is2Addr = 1> {
5152 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5153 (ins VR128:$src1, VR128:$src2),
5155 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5156 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5157 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5159 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5160 (ins VR128:$src1, i128mem:$src2),
5162 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5163 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5165 (IntId128 VR128:$src1,
5166 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5169 // Perform One Round of an AES Encryption/Decryption Flow
5170 let isAsmParserOnly = 0, Predicates = [HasAVX, HasAES] in {
5171 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5172 int_x86_aesni_aesenc, 0>, VEX_4V;
5173 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5174 int_x86_aesni_aesenclast, 0>, VEX_4V;
5175 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5176 int_x86_aesni_aesdec, 0>, VEX_4V;
5177 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5178 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5181 let Constraints = "$src1 = $dst" in {
5182 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5183 int_x86_aesni_aesenc>;
5184 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5185 int_x86_aesni_aesenclast>;
5186 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5187 int_x86_aesni_aesdec>;
5188 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5189 int_x86_aesni_aesdeclast>;
5192 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5193 (AESENCrr VR128:$src1, VR128:$src2)>;
5194 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5195 (AESENCrm VR128:$src1, addr:$src2)>;
5196 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5197 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5198 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5199 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5200 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5201 (AESDECrr VR128:$src1, VR128:$src2)>;
5202 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5203 (AESDECrm VR128:$src1, addr:$src2)>;
5204 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5205 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5206 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5207 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5209 // Perform the AES InvMixColumn Transformation
5210 let isAsmParserOnly = 0, Predicates = [HasAVX, HasAES] in {
5211 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5213 "vaesimc\t{$src1, $dst|$dst, $src1}",
5215 (int_x86_aesni_aesimc VR128:$src1))]>,
5217 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5218 (ins i128mem:$src1),
5219 "vaesimc\t{$src1, $dst|$dst, $src1}",
5221 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5224 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5226 "aesimc\t{$src1, $dst|$dst, $src1}",
5228 (int_x86_aesni_aesimc VR128:$src1))]>,
5230 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5231 (ins i128mem:$src1),
5232 "aesimc\t{$src1, $dst|$dst, $src1}",
5234 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5237 // AES Round Key Generation Assist
5238 let isAsmParserOnly = 0, Predicates = [HasAVX, HasAES] in {
5239 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5240 (ins VR128:$src1, i8imm:$src2),
5241 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5243 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5245 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5246 (ins i128mem:$src1, i8imm:$src2),
5247 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5249 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5253 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5254 (ins VR128:$src1, i8imm:$src2),
5255 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5257 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5259 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5260 (ins i128mem:$src1, i8imm:$src2),
5261 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5263 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5267 //===----------------------------------------------------------------------===//
5268 // CLMUL Instructions
5269 //===----------------------------------------------------------------------===//
5271 // Only the AVX version of CLMUL instructions are described here.
5273 // Carry-less Multiplication instructions
5274 let isAsmParserOnly = 0 in {
5275 def VPCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5276 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5277 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5280 def VPCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5281 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5282 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5286 multiclass avx_vpclmul<string asm> {
5287 def rr : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
5288 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5291 def rm : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
5292 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5295 defm VPCLMULHQHQDQ : avx_vpclmul<"vpclmulhqhqdq">;
5296 defm VPCLMULHQLQDQ : avx_vpclmul<"vpclmulhqlqdq">;
5297 defm VPCLMULLQHQDQ : avx_vpclmul<"vpclmullqhqdq">;
5298 defm VPCLMULLQLQDQ : avx_vpclmul<"vpclmullqlqdq">;
5300 } // isAsmParserOnly
5302 //===----------------------------------------------------------------------===//
5304 //===----------------------------------------------------------------------===//
5306 let isAsmParserOnly = 0 in {
5308 // Load from memory and broadcast to all elements of the destination operand
5309 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5310 X86MemOperand x86memop, Intrinsic Int> :
5311 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5312 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5313 [(set RC:$dst, (Int addr:$src))]>, VEX;
5315 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5316 int_x86_avx_vbroadcastss>;
5317 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5318 int_x86_avx_vbroadcastss_256>;
5319 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5320 int_x86_avx_vbroadcast_sd_256>;
5321 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5322 int_x86_avx_vbroadcastf128_pd_256>;
5324 // Insert packed floating-point values
5325 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5326 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5327 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5329 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5330 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5331 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5334 // Extract packed floating-point values
5335 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5336 (ins VR256:$src1, i8imm:$src2),
5337 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5339 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5340 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5341 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5344 // Conditional SIMD Packed Loads and Stores
5345 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5346 Intrinsic IntLd, Intrinsic IntLd256,
5347 Intrinsic IntSt, Intrinsic IntSt256,
5348 PatFrag pf128, PatFrag pf256> {
5349 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5350 (ins VR128:$src1, f128mem:$src2),
5351 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5352 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5354 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5355 (ins VR256:$src1, f256mem:$src2),
5356 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5357 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5359 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5360 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5361 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5362 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5363 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5364 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5365 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5366 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5369 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5370 int_x86_avx_maskload_ps,
5371 int_x86_avx_maskload_ps_256,
5372 int_x86_avx_maskstore_ps,
5373 int_x86_avx_maskstore_ps_256,
5374 memopv4f32, memopv8f32>;
5375 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5376 int_x86_avx_maskload_pd,
5377 int_x86_avx_maskload_pd_256,
5378 int_x86_avx_maskstore_pd,
5379 int_x86_avx_maskstore_pd_256,
5380 memopv2f64, memopv4f64>;
5382 // Permute Floating-Point Values
5383 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5384 RegisterClass RC, X86MemOperand x86memop_f,
5385 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5386 Intrinsic IntVar, Intrinsic IntImm> {
5387 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5388 (ins RC:$src1, RC:$src2),
5389 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5390 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5391 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5392 (ins RC:$src1, x86memop_i:$src2),
5393 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5394 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5396 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5397 (ins RC:$src1, i8imm:$src2),
5398 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5399 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5400 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5401 (ins x86memop_f:$src1, i8imm:$src2),
5402 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5403 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5406 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5407 memopv4f32, memopv4i32,
5408 int_x86_avx_vpermilvar_ps,
5409 int_x86_avx_vpermil_ps>;
5410 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5411 memopv8f32, memopv8i32,
5412 int_x86_avx_vpermilvar_ps_256,
5413 int_x86_avx_vpermil_ps_256>;
5414 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5415 memopv2f64, memopv2i64,
5416 int_x86_avx_vpermilvar_pd,
5417 int_x86_avx_vpermil_pd>;
5418 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5419 memopv4f64, memopv4i64,
5420 int_x86_avx_vpermilvar_pd_256,
5421 int_x86_avx_vpermil_pd_256>;
5423 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5424 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5425 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5427 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5428 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5429 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5432 // Zero All YMM registers
5433 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5434 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5436 // Zero Upper bits of YMM registers
5437 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5438 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5440 } // isAsmParserOnly
5442 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5443 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5444 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5445 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5446 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5447 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5449 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
5451 (VINSERTF128rr VR256:$src1, VR128:$src2,
5452 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5453 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
5455 (VINSERTF128rr VR256:$src1, VR128:$src2,
5456 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5457 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
5459 (VINSERTF128rr VR256:$src1, VR128:$src2,
5460 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5461 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
5463 (VINSERTF128rr VR256:$src1, VR128:$src2,
5464 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5466 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5467 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5468 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5469 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5470 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5471 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5473 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5474 (v4f32 (VEXTRACTF128rr
5475 (v8f32 VR256:$src1),
5476 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5477 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5478 (v2f64 (VEXTRACTF128rr
5479 (v4f64 VR256:$src1),
5480 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5481 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5482 (v4i32 (VEXTRACTF128rr
5483 (v8i32 VR256:$src1),
5484 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5485 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5486 (v2i64 (VEXTRACTF128rr
5487 (v4i64 VR256:$src1),
5488 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5490 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5491 (VBROADCASTF128 addr:$src)>;
5493 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5494 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5495 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5496 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5497 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5498 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5500 def : Pat<(int_x86_avx_vperm2f128_ps_256
5501 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5502 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5503 def : Pat<(int_x86_avx_vperm2f128_pd_256
5504 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5505 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5506 def : Pat<(int_x86_avx_vperm2f128_si_256
5507 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5508 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5510 //===----------------------------------------------------------------------===//
5511 // SSE Shuffle pattern fragments
5512 //===----------------------------------------------------------------------===//
5514 // This is part of a "work in progress" refactoring. The idea is that all
5515 // vector shuffles are going to be translated into target specific nodes and
5516 // directly matched by the patterns below (which can be changed along the way)
5517 // The AVX version of some but not all of them are described here, and more
5518 // should come in a near future.
5520 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5521 // SSE2 loads, which are always promoted to v2i64. The last one should match
5522 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5523 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5524 // we investigate further.
5525 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5527 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5528 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5530 (PSHUFDmi addr:$src1, imm:$imm)>;
5531 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5533 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5535 // Shuffle with PSHUFD instruction.
5536 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5537 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5538 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5539 (PSHUFDri VR128:$src1, imm:$imm)>;
5541 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5542 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5543 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5544 (PSHUFDri VR128:$src1, imm:$imm)>;
5546 // Shuffle with SHUFPD instruction.
5547 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5548 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5549 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5550 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5551 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5552 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5554 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5555 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5556 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5557 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5559 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5560 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5561 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5562 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5564 // Shuffle with SHUFPS instruction.
5565 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5566 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5567 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5568 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5569 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5570 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5572 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5573 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5574 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5575 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5577 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5578 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5579 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5580 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5581 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5582 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5584 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5585 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5586 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5587 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5589 // Shuffle with MOVHLPS instruction
5590 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5591 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5592 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5593 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5595 // Shuffle with MOVDDUP instruction
5596 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5597 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5598 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5599 (MOVDDUPrm addr:$src)>;
5601 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5602 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5603 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5604 (MOVDDUPrm addr:$src)>;
5606 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5607 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5608 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5609 (MOVDDUPrm addr:$src)>;
5611 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5612 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5613 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5614 (MOVDDUPrm addr:$src)>;
5616 def : Pat<(X86Movddup (bc_v2f64
5617 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5618 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5619 def : Pat<(X86Movddup (bc_v2f64
5620 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5621 (MOVDDUPrm addr:$src)>;
5624 // Shuffle with UNPCKLPS
5625 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5626 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5627 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
5628 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5629 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5630 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5632 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5633 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5634 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
5635 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5636 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5637 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5639 // Shuffle with UNPCKHPS
5640 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5641 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5642 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5643 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5645 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5646 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5647 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5648 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5650 // Shuffle with UNPCKLPD
5651 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5652 (VUNPCKLPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5653 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
5654 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5655 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5656 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
5658 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5659 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5660 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
5661 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5662 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5663 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5665 // Shuffle with UNPCKHPD
5666 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5667 (VUNPCKHPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5668 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5669 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
5671 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5672 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5673 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5674 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5676 // Shuffle with PUNPCKLBW
5677 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1,
5678 (bc_v16i8 (memopv2i64 addr:$src2)))),
5679 (PUNPCKLBWrm VR128:$src1, addr:$src2)>;
5680 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, VR128:$src2)),
5681 (PUNPCKLBWrr VR128:$src1, VR128:$src2)>;
5683 // Shuffle with PUNPCKLWD
5684 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1,
5685 (bc_v8i16 (memopv2i64 addr:$src2)))),
5686 (PUNPCKLWDrm VR128:$src1, addr:$src2)>;
5687 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, VR128:$src2)),
5688 (PUNPCKLWDrr VR128:$src1, VR128:$src2)>;
5690 // Shuffle with PUNPCKLDQ
5691 def : Pat<(v4i32 (X86Punpckldq VR128:$src1,
5692 (bc_v4i32 (memopv2i64 addr:$src2)))),
5693 (PUNPCKLDQrm VR128:$src1, addr:$src2)>;
5694 def : Pat<(v4i32 (X86Punpckldq VR128:$src1, VR128:$src2)),
5695 (PUNPCKLDQrr VR128:$src1, VR128:$src2)>;
5697 // Shuffle with PUNPCKLQDQ
5698 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, (memopv2i64 addr:$src2))),
5699 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>;
5700 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)),
5701 (PUNPCKLQDQrr VR128:$src1, VR128:$src2)>;
5703 // Shuffle with PUNPCKHBW
5704 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1,
5705 (bc_v16i8 (memopv2i64 addr:$src2)))),
5706 (PUNPCKHBWrm VR128:$src1, addr:$src2)>;
5707 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, VR128:$src2)),
5708 (PUNPCKHBWrr VR128:$src1, VR128:$src2)>;
5710 // Shuffle with PUNPCKHWD
5711 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1,
5712 (bc_v8i16 (memopv2i64 addr:$src2)))),
5713 (PUNPCKHWDrm VR128:$src1, addr:$src2)>;
5714 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, VR128:$src2)),
5715 (PUNPCKHWDrr VR128:$src1, VR128:$src2)>;
5717 // Shuffle with PUNPCKHDQ
5718 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1,
5719 (bc_v4i32 (memopv2i64 addr:$src2)))),
5720 (PUNPCKHDQrm VR128:$src1, addr:$src2)>;
5721 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, VR128:$src2)),
5722 (PUNPCKHDQrr VR128:$src1, VR128:$src2)>;
5724 // Shuffle with PUNPCKHQDQ
5725 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, (memopv2i64 addr:$src2))),
5726 (PUNPCKHQDQrm VR128:$src1, addr:$src2)>;
5727 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)),
5728 (PUNPCKHQDQrr VR128:$src1, VR128:$src2)>;
5730 // Shuffle with MOVLHPS
5731 def : Pat<(X86Movlhps VR128:$src1,
5732 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5733 (MOVHPSrm VR128:$src1, addr:$src2)>;
5734 def : Pat<(X86Movlhps VR128:$src1,
5735 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5736 (MOVHPSrm VR128:$src1, addr:$src2)>;
5737 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
5738 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5739 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
5740 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5741 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
5742 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
5744 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the problem
5745 // is during lowering, where it's not possible to recognize the load fold cause
5746 // it has two uses through a bitcast. One use disappears at isel time and the
5747 // fold opportunity reappears.
5748 def : Pat<(v2f64 (X86Movddup VR128:$src)),
5749 (UNPCKLPDrr VR128:$src, VR128:$src)>;
5751 // Shuffle with MOVLHPD
5752 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
5753 (scalar_to_vector (loadf64 addr:$src2)))),
5754 (MOVHPDrm VR128:$src1, addr:$src2)>;
5756 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
5757 // is during lowering, where it's not possible to recognize the load fold cause
5758 // it has two uses through a bitcast. One use disappears at isel time and the
5759 // fold opportunity reappears.
5760 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
5761 (scalar_to_vector (loadf64 addr:$src2)))),
5762 (MOVHPDrm VR128:$src1, addr:$src2)>;
5764 // Shuffle with MOVSS
5765 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
5766 (MOVSSrr VR128:$src1, FR32:$src2)>;
5767 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
5768 (MOVSSrr (v4i32 VR128:$src1),
5769 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
5770 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
5771 (MOVSSrr (v4f32 VR128:$src1),
5772 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
5773 // FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
5774 // is during lowering, where it's not possible to recognize the load fold cause
5775 // it has two uses through a bitcast. One use disappears at isel time and the
5776 // fold opportunity reappears.
5777 def : Pat<(X86Movss VR128:$src1,
5778 (bc_v4i32 (v2i64 (load addr:$src2)))),
5779 (MOVLPSrm VR128:$src1, addr:$src2)>;
5781 // Shuffle with MOVSD
5782 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
5783 (MOVSDrr VR128:$src1, FR64:$src2)>;
5784 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
5785 (MOVSDrr (v2i64 VR128:$src1),
5786 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
5787 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
5788 (MOVSDrr (v2f64 VR128:$src1),
5789 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
5790 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
5791 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5792 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
5793 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5795 // Shuffle with MOVSHDUP
5796 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5797 (MOVSHDUPrr VR128:$src)>;
5798 def : Pat<(X86Movshdup (bc_v4i32 (memopv2i64 addr:$src))),
5799 (MOVSHDUPrm addr:$src)>;
5801 def : Pat<(v4f32 (X86Movshdup VR128:$src)),
5802 (MOVSHDUPrr VR128:$src)>;
5803 def : Pat<(X86Movshdup (memopv4f32 addr:$src)),
5804 (MOVSHDUPrm addr:$src)>;
5806 // Shuffle with MOVSLDUP
5807 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5808 (MOVSLDUPrr VR128:$src)>;
5809 def : Pat<(X86Movsldup (bc_v4i32 (memopv2i64 addr:$src))),
5810 (MOVSLDUPrm addr:$src)>;
5812 def : Pat<(v4f32 (X86Movsldup VR128:$src)),
5813 (MOVSLDUPrr VR128:$src)>;
5814 def : Pat<(X86Movsldup (memopv4f32 addr:$src)),
5815 (MOVSLDUPrm addr:$src)>;
5817 // Shuffle with PSHUFHW
5818 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
5819 (PSHUFHWri VR128:$src, imm:$imm)>;
5820 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5821 (PSHUFHWmi addr:$src, imm:$imm)>;
5823 // Shuffle with PSHUFLW
5824 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
5825 (PSHUFLWri VR128:$src, imm:$imm)>;
5826 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5827 (PSHUFLWmi addr:$src, imm:$imm)>;
5829 // Shuffle with PALIGN
5830 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5831 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5832 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5833 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5834 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5835 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5836 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5837 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5839 // Shuffle with MOVLPS
5840 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
5841 (MOVLPSrm VR128:$src1, addr:$src2)>;
5842 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
5843 (MOVLPSrm VR128:$src1, addr:$src2)>;
5844 def : Pat<(X86Movlps VR128:$src1,
5845 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5846 (MOVLPSrm VR128:$src1, addr:$src2)>;
5847 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
5848 // is during lowering, where it's not possible to recognize the load fold cause
5849 // it has two uses through a bitcast. One use disappears at isel time and the
5850 // fold opportunity reappears.
5851 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
5852 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5854 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
5855 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5857 // Shuffle with MOVLPD
5858 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5859 (MOVLPDrm VR128:$src1, addr:$src2)>;
5860 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5861 (MOVLPDrm VR128:$src1, addr:$src2)>;
5862 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
5863 (scalar_to_vector (loadf64 addr:$src2)))),
5864 (MOVLPDrm VR128:$src1, addr:$src2)>;
5866 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
5867 def : Pat<(store (f64 (vector_extract
5868 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5869 (MOVHPSmr addr:$dst, VR128:$src)>;
5870 def : Pat<(store (f64 (vector_extract
5871 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5872 (MOVHPDmr addr:$dst, VR128:$src)>;
5874 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
5875 (MOVLPSmr addr:$src1, VR128:$src2)>;
5876 def : Pat<(store (v4i32 (X86Movlps
5877 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
5878 (MOVLPSmr addr:$src1, VR128:$src2)>;
5880 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5881 (MOVLPDmr addr:$src1, VR128:$src2)>;
5882 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5883 (MOVLPDmr addr:$src1, VR128:$src2)>;