1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // SSE 1 & 2 - Move Instructions
120 //===----------------------------------------------------------------------===//
122 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
123 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
124 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
126 // Loading from memory automatically zeroing upper bits.
127 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
128 PatFrag mem_pat, string OpcodeStr> :
129 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
130 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
131 [(set RC:$dst, (mem_pat addr:$src))]>;
133 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
134 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
135 // is used instead. Register-to-register movss/movsd is not modeled as an
136 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
137 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
138 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
139 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
140 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
141 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
143 let canFoldAsLoad = 1, isReMaterializable = 1 in {
144 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
146 let AddedComplexity = 20 in
147 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
150 let Constraints = "$src1 = $dst" in {
151 def MOVSSrr : sse12_move_rr<FR32, v4f32,
152 "movss\t{$src2, $dst|$dst, $src2}">, XS;
153 def MOVSDrr : sse12_move_rr<FR64, v2f64,
154 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
157 let canFoldAsLoad = 1, isReMaterializable = 1 in {
158 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
160 let AddedComplexity = 20 in
161 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
164 let AddedComplexity = 15 in {
165 // Extract the low 32-bit value from one vector and insert it into another.
166 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
167 (MOVSSrr (v4f32 VR128:$src1),
168 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
169 // Extract the low 64-bit value from one vector and insert it into another.
170 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
171 (MOVSDrr (v2f64 VR128:$src1),
172 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
175 // Implicitly promote a 32-bit scalar to a vector.
176 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
177 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
178 // Implicitly promote a 64-bit scalar to a vector.
179 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
180 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
181 // Implicitly promote a 32-bit scalar to a vector.
182 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
183 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
184 // Implicitly promote a 64-bit scalar to a vector.
185 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
186 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
188 let AddedComplexity = 20 in {
189 // MOVSSrm zeros the high parts of the register; represent this
190 // with SUBREG_TO_REG.
191 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
192 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
193 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
194 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
195 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
196 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
197 // MOVSDrm zeros the high parts of the register; represent this
198 // with SUBREG_TO_REG.
199 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
200 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
201 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
202 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
203 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
204 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
205 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
206 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
207 def : Pat<(v2f64 (X86vzload addr:$src)),
208 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
211 // Store scalar value to memory.
212 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
213 "movss\t{$src, $dst|$dst, $src}",
214 [(store FR32:$src, addr:$dst)]>;
215 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
216 "movsd\t{$src, $dst|$dst, $src}",
217 [(store FR64:$src, addr:$dst)]>;
219 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
220 "movss\t{$src, $dst|$dst, $src}",
221 [(store FR32:$src, addr:$dst)]>, XS, VEX;
222 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
223 "movsd\t{$src, $dst|$dst, $src}",
224 [(store FR64:$src, addr:$dst)]>, XD, VEX;
226 // Extract and store.
227 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
230 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
231 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
234 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
236 // Move Aligned/Unaligned floating point values
237 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
238 X86MemOperand x86memop, PatFrag ld_frag,
239 string asm, Domain d,
240 bit IsReMaterializable = 1> {
241 let neverHasSideEffects = 1 in
242 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
243 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
244 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
245 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
246 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
247 [(set RC:$dst, (ld_frag addr:$src))], d>;
250 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
251 "movaps", SSEPackedSingle>, VEX;
252 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
253 "movapd", SSEPackedDouble>, OpSize, VEX;
254 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
255 "movups", SSEPackedSingle>, VEX;
256 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
257 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
259 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
260 "movaps", SSEPackedSingle>, VEX;
261 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
262 "movapd", SSEPackedDouble>, OpSize, VEX;
263 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
264 "movups", SSEPackedSingle>, VEX;
265 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
266 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
267 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
268 "movaps", SSEPackedSingle>, TB;
269 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
270 "movapd", SSEPackedDouble>, TB, OpSize;
271 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
272 "movups", SSEPackedSingle>, TB;
273 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
274 "movupd", SSEPackedDouble, 0>, TB, OpSize;
276 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
277 "movaps\t{$src, $dst|$dst, $src}",
278 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
279 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
280 "movapd\t{$src, $dst|$dst, $src}",
281 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
282 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
283 "movups\t{$src, $dst|$dst, $src}",
284 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
285 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
286 "movupd\t{$src, $dst|$dst, $src}",
287 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
288 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
289 "movaps\t{$src, $dst|$dst, $src}",
290 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
291 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
292 "movapd\t{$src, $dst|$dst, $src}",
293 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
294 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
295 "movups\t{$src, $dst|$dst, $src}",
296 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
297 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
298 "movupd\t{$src, $dst|$dst, $src}",
299 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
301 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
302 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
303 (VMOVUPSYmr addr:$dst, VR256:$src)>;
305 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
306 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
307 (VMOVUPDYmr addr:$dst, VR256:$src)>;
309 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
310 "movaps\t{$src, $dst|$dst, $src}",
311 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
312 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
313 "movapd\t{$src, $dst|$dst, $src}",
314 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
315 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
316 "movups\t{$src, $dst|$dst, $src}",
317 [(store (v4f32 VR128:$src), addr:$dst)]>;
318 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
319 "movupd\t{$src, $dst|$dst, $src}",
320 [(store (v2f64 VR128:$src), addr:$dst)]>;
322 // Intrinsic forms of MOVUPS/D load and store
323 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
324 (ins f128mem:$dst, VR128:$src),
325 "movups\t{$src, $dst|$dst, $src}",
326 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
327 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
328 (ins f128mem:$dst, VR128:$src),
329 "movupd\t{$src, $dst|$dst, $src}",
330 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
332 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
333 "movups\t{$src, $dst|$dst, $src}",
334 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
335 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
336 "movupd\t{$src, $dst|$dst, $src}",
337 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
339 // Move Low/High packed floating point values
340 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
341 PatFrag mov_frag, string base_opc,
343 def PSrm : PI<opc, MRMSrcMem,
344 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
345 !strconcat(base_opc, "s", asm_opr),
348 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
349 SSEPackedSingle>, TB;
351 def PDrm : PI<opc, MRMSrcMem,
352 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
353 !strconcat(base_opc, "d", asm_opr),
354 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
355 (scalar_to_vector (loadf64 addr:$src2)))))],
356 SSEPackedDouble>, TB, OpSize;
359 let AddedComplexity = 20 in {
360 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
361 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
362 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
363 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
365 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
366 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
367 "\t{$src2, $dst|$dst, $src2}">;
368 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
369 "\t{$src2, $dst|$dst, $src2}">;
372 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
373 "movlps\t{$src, $dst|$dst, $src}",
374 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
375 (iPTR 0))), addr:$dst)]>, VEX;
376 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
377 "movlpd\t{$src, $dst|$dst, $src}",
378 [(store (f64 (vector_extract (v2f64 VR128:$src),
379 (iPTR 0))), addr:$dst)]>, VEX;
380 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
381 "movlps\t{$src, $dst|$dst, $src}",
382 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
383 (iPTR 0))), addr:$dst)]>;
384 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
385 "movlpd\t{$src, $dst|$dst, $src}",
386 [(store (f64 (vector_extract (v2f64 VR128:$src),
387 (iPTR 0))), addr:$dst)]>;
389 // v2f64 extract element 1 is always custom lowered to unpack high to low
390 // and extract element 0 so the non-store version isn't too horrible.
391 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
392 "movhps\t{$src, $dst|$dst, $src}",
393 [(store (f64 (vector_extract
394 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
395 (undef)), (iPTR 0))), addr:$dst)]>,
397 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
398 "movhpd\t{$src, $dst|$dst, $src}",
399 [(store (f64 (vector_extract
400 (v2f64 (unpckh VR128:$src, (undef))),
401 (iPTR 0))), addr:$dst)]>,
403 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
404 "movhps\t{$src, $dst|$dst, $src}",
405 [(store (f64 (vector_extract
406 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
407 (undef)), (iPTR 0))), addr:$dst)]>;
408 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
409 "movhpd\t{$src, $dst|$dst, $src}",
410 [(store (f64 (vector_extract
411 (v2f64 (unpckh VR128:$src, (undef))),
412 (iPTR 0))), addr:$dst)]>;
414 let AddedComplexity = 20 in {
415 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
416 (ins VR128:$src1, VR128:$src2),
417 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
419 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
421 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
422 (ins VR128:$src1, VR128:$src2),
423 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
425 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
428 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
429 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
430 (ins VR128:$src1, VR128:$src2),
431 "movlhps\t{$src2, $dst|$dst, $src2}",
433 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
434 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
435 (ins VR128:$src1, VR128:$src2),
436 "movhlps\t{$src2, $dst|$dst, $src2}",
438 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
441 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
442 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
443 let AddedComplexity = 20 in {
444 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
445 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
446 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
447 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
450 //===----------------------------------------------------------------------===//
451 // SSE 1 & 2 - Conversion Instructions
452 //===----------------------------------------------------------------------===//
454 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
455 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
457 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
458 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
459 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
460 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
463 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
464 X86MemOperand x86memop, string asm> {
465 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
467 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
471 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
472 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
473 string asm, Domain d> {
474 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
475 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
476 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
477 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
480 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
481 X86MemOperand x86memop, string asm> {
482 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
483 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
484 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
485 (ins DstRC:$src1, x86memop:$src),
486 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
489 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
490 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
491 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
492 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
494 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
495 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
496 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
497 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
500 // The assembler can recognize rr 64-bit instructions by seeing a rxx
501 // register, but the same isn't true when only using memory operands,
502 // provide other assembly "l" and "q" forms to address this explicitly
503 // where appropriate to do so.
504 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
506 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
508 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
510 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
512 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
515 let Predicates = [HasAVX] in {
516 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
517 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
518 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
519 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
520 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
521 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
522 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
523 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
525 def : Pat<(f32 (sint_to_fp GR32:$src)),
526 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
527 def : Pat<(f32 (sint_to_fp GR64:$src)),
528 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
529 def : Pat<(f64 (sint_to_fp GR32:$src)),
530 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
531 def : Pat<(f64 (sint_to_fp GR64:$src)),
532 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
535 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
536 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
537 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
538 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
539 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
540 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
541 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
542 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
543 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
544 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
545 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
546 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
547 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
548 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
549 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
550 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
552 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
553 // and/or XMM operand(s).
555 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
556 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
558 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
559 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
560 [(set DstRC:$dst, (Int SrcRC:$src))]>;
561 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
562 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
563 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
566 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
567 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
568 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
569 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
571 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
572 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
573 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
574 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
575 (ins DstRC:$src1, x86memop:$src2),
577 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
578 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
579 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
582 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
583 f32mem, load, "cvtss2si">, XS, VEX;
584 defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
585 int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
587 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
588 f128mem, load, "cvtsd2si">, XD, VEX;
589 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
590 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
593 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
594 // Get rid of this hack or rename the intrinsics, there are several
595 // intructions that only match with the intrinsic form, why create duplicates
596 // to let them be recognized by the assembler?
597 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
598 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
599 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
600 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
601 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
602 f32mem, load, "cvtss2si">, XS;
603 defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
604 f32mem, load, "cvtss2si{q}">, XS, REX_W;
605 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
606 f128mem, load, "cvtsd2si{l}">, XD;
607 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
608 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
611 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
612 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
613 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
614 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
616 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
617 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
618 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
619 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
622 let Constraints = "$src1 = $dst" in {
623 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
624 int_x86_sse_cvtsi2ss, i32mem, loadi32,
626 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
627 int_x86_sse_cvtsi642ss, i64mem, loadi64,
628 "cvtsi2ss{q}">, XS, REX_W;
629 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
630 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
632 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
633 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
634 "cvtsi2sd">, XD, REX_W;
639 // Aliases for intrinsics
640 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
641 f32mem, load, "cvttss2si">, XS, VEX;
642 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
643 int_x86_sse_cvttss2si64, f32mem, load,
644 "cvttss2si">, XS, VEX, VEX_W;
645 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
646 f128mem, load, "cvttsd2si">, XD, VEX;
647 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
648 int_x86_sse2_cvttsd2si64, f128mem, load,
649 "cvttsd2si">, XD, VEX, VEX_W;
650 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
651 f32mem, load, "cvttss2si">, XS;
652 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
653 int_x86_sse_cvttss2si64, f32mem, load,
654 "cvttss2si{q}">, XS, REX_W;
655 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
656 f128mem, load, "cvttsd2si">, XD;
657 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
658 int_x86_sse2_cvttsd2si64, f128mem, load,
659 "cvttsd2si{q}">, XD, REX_W;
661 let Pattern = []<dag> in {
662 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
663 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
664 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
665 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
667 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
668 "cvtdq2ps\t{$src, $dst|$dst, $src}",
669 SSEPackedSingle>, TB, VEX;
670 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
671 "cvtdq2ps\t{$src, $dst|$dst, $src}",
672 SSEPackedSingle>, TB, VEX;
674 let Pattern = []<dag> in {
675 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
676 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
677 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
678 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
679 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
680 "cvtdq2ps\t{$src, $dst|$dst, $src}",
681 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
686 // Convert scalar double to scalar single
687 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
688 (ins FR64:$src1, FR64:$src2),
689 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
691 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
692 (ins FR64:$src1, f64mem:$src2),
693 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
694 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
695 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
698 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
699 "cvtsd2ss\t{$src, $dst|$dst, $src}",
700 [(set FR32:$dst, (fround FR64:$src))]>;
701 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
702 "cvtsd2ss\t{$src, $dst|$dst, $src}",
703 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
704 Requires<[HasSSE2, OptForSize]>;
706 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
707 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
709 let Constraints = "$src1 = $dst" in
710 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
711 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
713 // Convert scalar single to scalar double
714 // SSE2 instructions with XS prefix
715 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
716 (ins FR32:$src1, FR32:$src2),
717 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
718 []>, XS, Requires<[HasAVX]>, VEX_4V;
719 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
720 (ins FR32:$src1, f32mem:$src2),
721 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
722 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
724 let Predicates = [HasAVX] in {
725 def : Pat<(f64 (fextend FR32:$src)),
726 (VCVTSS2SDrr FR32:$src, FR32:$src)>;
727 def : Pat<(fextend (loadf32 addr:$src)),
728 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
729 def : Pat<(extloadf32 addr:$src),
730 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
733 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
734 "cvtss2sd\t{$src, $dst|$dst, $src}",
735 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
737 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
738 "cvtss2sd\t{$src, $dst|$dst, $src}",
739 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
740 Requires<[HasSSE2, OptForSize]>;
742 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
743 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
744 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
745 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
746 VR128:$src2))]>, XS, VEX_4V,
748 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
749 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
750 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
751 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
752 (load addr:$src2)))]>, XS, VEX_4V,
754 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
755 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
756 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
757 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
758 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
761 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
762 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
763 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
764 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
765 (load addr:$src2)))]>, XS,
769 def : Pat<(extloadf32 addr:$src),
770 (CVTSS2SDrr (MOVSSrm addr:$src))>,
771 Requires<[HasSSE2, OptForSpeed]>;
773 // Convert doubleword to packed single/double fp
774 // SSE2 instructions without OpSize prefix
775 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
776 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
777 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
778 TB, VEX, Requires<[HasAVX]>;
779 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
780 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
781 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
782 (bitconvert (memopv2i64 addr:$src))))]>,
783 TB, VEX, Requires<[HasAVX]>;
784 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
785 "cvtdq2ps\t{$src, $dst|$dst, $src}",
786 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
787 TB, Requires<[HasSSE2]>;
788 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
789 "cvtdq2ps\t{$src, $dst|$dst, $src}",
790 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
791 (bitconvert (memopv2i64 addr:$src))))]>,
792 TB, Requires<[HasSSE2]>;
794 // FIXME: why the non-intrinsic version is described as SSE3?
795 // SSE2 instructions with XS prefix
796 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
797 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
798 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
799 XS, VEX, Requires<[HasAVX]>;
800 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
801 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
802 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
803 (bitconvert (memopv2i64 addr:$src))))]>,
804 XS, VEX, Requires<[HasAVX]>;
805 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
806 "cvtdq2pd\t{$src, $dst|$dst, $src}",
807 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
808 XS, Requires<[HasSSE2]>;
809 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
810 "cvtdq2pd\t{$src, $dst|$dst, $src}",
811 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
812 (bitconvert (memopv2i64 addr:$src))))]>,
813 XS, Requires<[HasSSE2]>;
816 // Convert packed single/double fp to doubleword
817 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
818 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
819 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
820 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
821 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
822 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
823 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
824 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
825 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
826 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
827 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
828 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
830 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
831 "cvtps2dq\t{$src, $dst|$dst, $src}",
832 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
834 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
836 "cvtps2dq\t{$src, $dst|$dst, $src}",
837 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
838 (memop addr:$src)))]>, VEX;
839 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
840 "cvtps2dq\t{$src, $dst|$dst, $src}",
841 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
842 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
843 "cvtps2dq\t{$src, $dst|$dst, $src}",
844 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
845 (memop addr:$src)))]>;
847 // SSE2 packed instructions with XD prefix
848 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
849 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
850 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
851 XD, VEX, Requires<[HasAVX]>;
852 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
853 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
854 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
855 (memop addr:$src)))]>,
856 XD, VEX, Requires<[HasAVX]>;
857 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
858 "cvtpd2dq\t{$src, $dst|$dst, $src}",
859 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
860 XD, Requires<[HasSSE2]>;
861 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
862 "cvtpd2dq\t{$src, $dst|$dst, $src}",
863 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
864 (memop addr:$src)))]>,
865 XD, Requires<[HasSSE2]>;
868 // Convert with truncation packed single/double fp to doubleword
869 // SSE2 packed instructions with XS prefix
870 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
871 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
872 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
873 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
874 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
875 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
876 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
877 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
878 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
879 "cvttps2dq\t{$src, $dst|$dst, $src}",
881 (int_x86_sse2_cvttps2dq VR128:$src))]>;
882 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
883 "cvttps2dq\t{$src, $dst|$dst, $src}",
885 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
887 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
888 "vcvttps2dq\t{$src, $dst|$dst, $src}",
890 (int_x86_sse2_cvttps2dq VR128:$src))]>,
891 XS, VEX, Requires<[HasAVX]>;
892 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
893 "vcvttps2dq\t{$src, $dst|$dst, $src}",
894 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
895 (memop addr:$src)))]>,
896 XS, VEX, Requires<[HasAVX]>;
898 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
899 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
900 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
901 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
903 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
904 (VCVTDQ2PSYrr VR256:$src)>, Requires<[HasAVX]>;
905 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
906 (VCVTTPS2DQYrr VR256:$src)>, Requires<[HasAVX]>;
908 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
910 "cvttpd2dq\t{$src, $dst|$dst, $src}",
911 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
913 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
915 "cvttpd2dq\t{$src, $dst|$dst, $src}",
916 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
917 (memop addr:$src)))]>, VEX;
918 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
919 "cvttpd2dq\t{$src, $dst|$dst, $src}",
920 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
921 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
922 "cvttpd2dq\t{$src, $dst|$dst, $src}",
923 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
924 (memop addr:$src)))]>;
926 // The assembler can recognize rr 256-bit instructions by seeing a ymm
927 // register, but the same isn't true when using memory operands instead.
928 // Provide other assembly rr and rm forms to address this explicitly.
929 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
930 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
931 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
932 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
935 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
936 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
937 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
938 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
941 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
942 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
943 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
944 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
946 // Convert packed single to packed double
947 let Predicates = [HasAVX] in {
948 // SSE2 instructions without OpSize prefix
949 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
950 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
951 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
952 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
953 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
954 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
955 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
956 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
958 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
959 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
960 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
961 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
963 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
964 "vcvtps2pd\t{$src, $dst|$dst, $src}",
965 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
966 VEX, Requires<[HasAVX]>;
967 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
968 "vcvtps2pd\t{$src, $dst|$dst, $src}",
969 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
970 (load addr:$src)))]>,
971 VEX, Requires<[HasAVX]>;
972 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
973 "cvtps2pd\t{$src, $dst|$dst, $src}",
974 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
975 TB, Requires<[HasSSE2]>;
976 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
977 "cvtps2pd\t{$src, $dst|$dst, $src}",
978 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
979 (load addr:$src)))]>,
980 TB, Requires<[HasSSE2]>;
982 // Convert packed double to packed single
983 // The assembler can recognize rr 256-bit instructions by seeing a ymm
984 // register, but the same isn't true when using memory operands instead.
985 // Provide other assembly rr and rm forms to address this explicitly.
986 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
987 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
988 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
989 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
992 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
993 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
994 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
995 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
998 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
999 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1000 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1001 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1002 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1003 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1004 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1005 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1008 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1009 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1010 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1011 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1013 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1014 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1015 (memop addr:$src)))]>;
1016 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1017 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1018 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1019 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1020 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1021 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1022 (memop addr:$src)))]>;
1024 // AVX 256-bit register conversion intrinsics
1025 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1026 // whenever possible to avoid declaring two versions of each one.
1027 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1028 (VCVTDQ2PSYrr VR256:$src)>;
1029 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1030 (VCVTDQ2PSYrm addr:$src)>;
1032 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1033 (VCVTPD2PSYrr VR256:$src)>;
1034 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1035 (VCVTPD2PSYrm addr:$src)>;
1037 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1038 (VCVTPS2DQYrr VR256:$src)>;
1039 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1040 (VCVTPS2DQYrm addr:$src)>;
1042 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1043 (VCVTPS2PDYrr VR128:$src)>;
1044 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1045 (VCVTPS2PDYrm addr:$src)>;
1047 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1048 (VCVTTPD2DQYrr VR256:$src)>;
1049 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1050 (VCVTTPD2DQYrm addr:$src)>;
1052 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1053 (VCVTTPS2DQYrr VR256:$src)>;
1054 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1055 (VCVTTPS2DQYrm addr:$src)>;
1057 // Match fround for 128/256-bit conversions
1058 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
1059 (VCVTPD2PSYrr VR256:$src)>;
1060 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
1061 (VCVTPD2PSYrm addr:$src)>;
1063 //===----------------------------------------------------------------------===//
1064 // SSE 1 & 2 - Compare Instructions
1065 //===----------------------------------------------------------------------===//
1067 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1068 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1069 string asm, string asm_alt> {
1070 let isAsmParserOnly = 1 in {
1071 def rr : SIi8<0xC2, MRMSrcReg,
1072 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1075 def rm : SIi8<0xC2, MRMSrcMem,
1076 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1080 // Accept explicit immediate argument form instead of comparison code.
1081 def rr_alt : SIi8<0xC2, MRMSrcReg,
1082 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1085 def rm_alt : SIi8<0xC2, MRMSrcMem,
1086 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1090 let neverHasSideEffects = 1 in {
1091 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1092 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1093 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1095 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1096 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1097 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1101 let Constraints = "$src1 = $dst" in {
1102 def CMPSSrr : SIi8<0xC2, MRMSrcReg,
1103 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, SSECC:$cc),
1104 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1105 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), FR32:$src2, imm:$cc))]>, XS;
1106 def CMPSSrm : SIi8<0xC2, MRMSrcMem,
1107 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, SSECC:$cc),
1108 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1109 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), (loadf32 addr:$src2), imm:$cc))]>, XS;
1110 def CMPSDrr : SIi8<0xC2, MRMSrcReg,
1111 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, SSECC:$cc),
1112 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1113 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), FR64:$src2, imm:$cc))]>, XD;
1114 def CMPSDrm : SIi8<0xC2, MRMSrcMem,
1115 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, SSECC:$cc),
1116 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1117 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), (loadf64 addr:$src2), imm:$cc))]>, XD;
1119 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1120 def CMPSSrr_alt : SIi8<0xC2, MRMSrcReg,
1121 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
1122 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1123 def CMPSSrm_alt : SIi8<0xC2, MRMSrcMem,
1124 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
1125 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1126 def CMPSDrr_alt : SIi8<0xC2, MRMSrcReg,
1127 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
1128 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1129 def CMPSDrm_alt : SIi8<0xC2, MRMSrcMem,
1130 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
1131 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1134 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1135 Intrinsic Int, string asm> {
1136 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1137 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1138 [(set VR128:$dst, (Int VR128:$src1,
1139 VR128:$src, imm:$cc))]>;
1140 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1141 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1142 [(set VR128:$dst, (Int VR128:$src1,
1143 (load addr:$src), imm:$cc))]>;
1146 // Aliases to match intrinsics which expect XMM operand(s).
1147 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1148 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1150 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1151 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1153 let Constraints = "$src1 = $dst" in {
1154 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1155 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1156 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1157 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1161 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1162 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1163 ValueType vt, X86MemOperand x86memop,
1164 PatFrag ld_frag, string OpcodeStr, Domain d> {
1165 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1166 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1167 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1168 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1169 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1170 [(set EFLAGS, (OpNode (vt RC:$src1),
1171 (ld_frag addr:$src2)))], d>;
1174 let Defs = [EFLAGS] in {
1175 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1176 "ucomiss", SSEPackedSingle>, VEX;
1177 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1178 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1179 let Pattern = []<dag> in {
1180 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1181 "comiss", SSEPackedSingle>, VEX;
1182 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1183 "comisd", SSEPackedDouble>, OpSize, VEX;
1186 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1187 load, "ucomiss", SSEPackedSingle>, VEX;
1188 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1189 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1191 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1192 load, "comiss", SSEPackedSingle>, VEX;
1193 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1194 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1195 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1196 "ucomiss", SSEPackedSingle>, TB;
1197 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1198 "ucomisd", SSEPackedDouble>, TB, OpSize;
1200 let Pattern = []<dag> in {
1201 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1202 "comiss", SSEPackedSingle>, TB;
1203 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1204 "comisd", SSEPackedDouble>, TB, OpSize;
1207 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1208 load, "ucomiss", SSEPackedSingle>, TB;
1209 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1210 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1212 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1213 "comiss", SSEPackedSingle>, TB;
1214 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1215 "comisd", SSEPackedDouble>, TB, OpSize;
1216 } // Defs = [EFLAGS]
1218 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1219 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1220 Intrinsic Int, string asm, string asm_alt,
1222 let isAsmParserOnly = 1 in {
1223 def rri : PIi8<0xC2, MRMSrcReg,
1224 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1225 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1226 def rmi : PIi8<0xC2, MRMSrcMem,
1227 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1228 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1231 // Accept explicit immediate argument form instead of comparison code.
1232 def rri_alt : PIi8<0xC2, MRMSrcReg,
1233 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1235 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1236 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1240 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1241 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1242 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1243 SSEPackedSingle>, VEX_4V;
1244 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1245 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1246 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1247 SSEPackedDouble>, OpSize, VEX_4V;
1248 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1249 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1250 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1251 SSEPackedSingle>, VEX_4V;
1252 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1253 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1254 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1255 SSEPackedDouble>, OpSize, VEX_4V;
1256 let Constraints = "$src1 = $dst" in {
1257 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1258 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1259 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1260 SSEPackedSingle>, TB;
1261 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1262 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1263 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1264 SSEPackedDouble>, TB, OpSize;
1267 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1268 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1269 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1270 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1271 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1272 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1273 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1274 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1276 //===----------------------------------------------------------------------===//
1277 // SSE 1 & 2 - Shuffle Instructions
1278 //===----------------------------------------------------------------------===//
1280 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1281 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1282 ValueType vt, string asm, PatFrag mem_frag,
1283 Domain d, bit IsConvertibleToThreeAddress = 0> {
1284 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1285 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1286 [(set RC:$dst, (vt (shufp:$src3
1287 RC:$src1, (mem_frag addr:$src2))))], d>;
1288 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1289 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1290 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1292 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1295 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1296 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1297 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
1298 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1299 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1300 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
1301 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1302 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1303 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1304 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1305 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1306 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1308 let Constraints = "$src1 = $dst" in {
1309 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1310 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1311 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1313 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1314 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1315 memopv2f64, SSEPackedDouble>, TB, OpSize;
1318 //===----------------------------------------------------------------------===//
1319 // SSE 1 & 2 - Unpack Instructions
1320 //===----------------------------------------------------------------------===//
1322 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1323 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1324 PatFrag mem_frag, RegisterClass RC,
1325 X86MemOperand x86memop, string asm,
1327 def rr : PI<opc, MRMSrcReg,
1328 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1330 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1331 def rm : PI<opc, MRMSrcMem,
1332 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1334 (vt (OpNode RC:$src1,
1335 (mem_frag addr:$src2))))], d>;
1338 let AddedComplexity = 10 in {
1339 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1340 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1341 SSEPackedSingle>, VEX_4V;
1342 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1343 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1344 SSEPackedDouble>, OpSize, VEX_4V;
1345 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1346 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1347 SSEPackedSingle>, VEX_4V;
1348 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1349 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1350 SSEPackedDouble>, OpSize, VEX_4V;
1352 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1353 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1354 SSEPackedSingle>, VEX_4V;
1355 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1356 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1357 SSEPackedDouble>, OpSize, VEX_4V;
1358 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1359 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1360 SSEPackedSingle>, VEX_4V;
1361 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1362 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1363 SSEPackedDouble>, OpSize, VEX_4V;
1365 let Constraints = "$src1 = $dst" in {
1366 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1367 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1368 SSEPackedSingle>, TB;
1369 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1370 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1371 SSEPackedDouble>, TB, OpSize;
1372 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1373 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1374 SSEPackedSingle>, TB;
1375 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1376 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1377 SSEPackedDouble>, TB, OpSize;
1378 } // Constraints = "$src1 = $dst"
1379 } // AddedComplexity
1381 //===----------------------------------------------------------------------===//
1382 // SSE 1 & 2 - Extract Floating-Point Sign mask
1383 //===----------------------------------------------------------------------===//
1385 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1386 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1388 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1389 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1390 [(set GR32:$dst, (Int RC:$src))], d>;
1391 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1392 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1396 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1397 "movmskps", SSEPackedSingle>, VEX;
1398 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1399 "movmskpd", SSEPackedDouble>, OpSize,
1401 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1402 "movmskps", SSEPackedSingle>, VEX;
1403 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1404 "movmskpd", SSEPackedDouble>, OpSize,
1406 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1407 SSEPackedSingle>, TB;
1408 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1409 SSEPackedDouble>, TB, OpSize;
1412 def MOVMSKPDrr32_alt : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
1413 "movmskpd\t{$src, $dst|$dst, $src}",
1414 [(set GR32:$dst, (X86fgetsign FR64:$src))], SSEPackedDouble>, TB, OpSize;
1415 def MOVMSKPDrr64_alt : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1416 "movmskpd\t{$src, $dst|$dst, $src}",
1417 [(set GR64:$dst, (X86fgetsign FR64:$src))], SSEPackedDouble>, TB, OpSize;
1418 def MOVMSKPSrr32_alt : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
1419 "movmskps\t{$src, $dst|$dst, $src}",
1420 [(set GR32:$dst, (X86fgetsign FR32:$src))], SSEPackedSingle>, TB;
1421 def MOVMSKPSrr64_alt : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1422 "movmskps\t{$src, $dst|$dst, $src}",
1423 [(set GR64:$dst, (X86fgetsign FR32:$src))], SSEPackedSingle>, TB;
1426 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1427 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1428 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1429 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1431 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1432 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1433 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1434 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1437 //===----------------------------------------------------------------------===//
1438 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1439 //===----------------------------------------------------------------------===//
1441 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1442 // names that start with 'Fs'.
1444 // Alias instructions that map fld0 to pxor for sse.
1445 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1446 canFoldAsLoad = 1 in {
1447 // FIXME: Set encoding to pseudo!
1448 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1449 [(set FR32:$dst, fp32imm0)]>,
1450 Requires<[HasSSE1]>, TB, OpSize;
1451 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1452 [(set FR64:$dst, fpimm0)]>,
1453 Requires<[HasSSE2]>, TB, OpSize;
1454 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1455 [(set FR32:$dst, fp32imm0)]>,
1456 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1457 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1458 [(set FR64:$dst, fpimm0)]>,
1459 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1462 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1463 // bits are disregarded.
1464 let neverHasSideEffects = 1 in {
1465 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1466 "movaps\t{$src, $dst|$dst, $src}", []>;
1467 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1468 "movapd\t{$src, $dst|$dst, $src}", []>;
1471 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1472 // bits are disregarded.
1473 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1474 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1475 "movaps\t{$src, $dst|$dst, $src}",
1476 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1477 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1478 "movapd\t{$src, $dst|$dst, $src}",
1479 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1482 //===----------------------------------------------------------------------===//
1483 // SSE 1 & 2 - Logical Instructions
1484 //===----------------------------------------------------------------------===//
1486 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1488 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1490 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1491 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1493 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1494 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1496 let Constraints = "$src1 = $dst" in {
1497 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1498 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1500 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1501 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1505 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1506 let mayLoad = 0 in {
1507 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1508 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1509 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1512 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1513 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1515 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1517 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1519 let Pattern = []<dag> in {
1520 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1521 !strconcat(OpcodeStr, "ps"), f128mem,
1522 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
1523 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1524 (memopv2i64 addr:$src2)))], 0>, VEX_4V;
1526 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1527 !strconcat(OpcodeStr, "pd"), f128mem,
1528 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1529 (bc_v2i64 (v2f64 VR128:$src2))))],
1530 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1531 (memopv2i64 addr:$src2)))], 0>,
1534 let Constraints = "$src1 = $dst" in {
1535 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1536 !strconcat(OpcodeStr, "ps"), f128mem,
1537 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
1538 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1539 (memopv2i64 addr:$src2)))]>, TB;
1541 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1542 !strconcat(OpcodeStr, "pd"), f128mem,
1543 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1544 (bc_v2i64 (v2f64 VR128:$src2))))],
1545 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1546 (memopv2i64 addr:$src2)))]>, TB, OpSize;
1550 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1552 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
1554 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1555 !strconcat(OpcodeStr, "ps"), f256mem,
1556 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
1557 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
1558 (memopv4i64 addr:$src2)))], 0>, VEX_4V;
1560 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1561 !strconcat(OpcodeStr, "pd"), f256mem,
1562 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1563 (bc_v4i64 (v4f64 VR256:$src2))))],
1564 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1565 (memopv4i64 addr:$src2)))], 0>,
1569 // AVX 256-bit packed logical ops forms
1570 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
1571 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
1572 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
1573 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
1575 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1576 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1577 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1578 let isCommutable = 0 in
1579 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
1581 //===----------------------------------------------------------------------===//
1582 // SSE 1 & 2 - Arithmetic Instructions
1583 //===----------------------------------------------------------------------===//
1585 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1588 /// In addition, we also have a special variant of the scalar form here to
1589 /// represent the associated intrinsic operation. This form is unlike the
1590 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1591 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1593 /// These three forms can each be reg+reg or reg+mem.
1596 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1598 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1600 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1601 OpNode, FR32, f32mem, Is2Addr>, XS;
1602 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1603 OpNode, FR64, f64mem, Is2Addr>, XD;
1606 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1608 let mayLoad = 0 in {
1609 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1610 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1611 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1612 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1616 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1618 let mayLoad = 0 in {
1619 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1620 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1621 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1622 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1626 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1628 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1629 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1630 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1631 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1634 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1636 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1637 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1638 SSEPackedSingle, Is2Addr>, TB;
1640 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1641 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1642 SSEPackedDouble, Is2Addr>, TB, OpSize;
1645 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1646 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1647 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1648 SSEPackedSingle, 0>, TB;
1650 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1651 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1652 SSEPackedDouble, 0>, TB, OpSize;
1655 // Binary Arithmetic instructions
1656 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1657 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1658 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1659 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1660 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1661 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1662 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1663 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1665 let isCommutable = 0 in {
1666 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1667 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1668 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1669 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1670 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1671 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1672 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1673 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1674 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1675 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1676 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1677 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1678 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1679 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1680 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1681 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1682 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1683 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1684 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1685 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1688 let Constraints = "$src1 = $dst" in {
1689 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1690 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1691 basic_sse12_fp_binop_s_int<0x58, "add">;
1692 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1693 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1694 basic_sse12_fp_binop_s_int<0x59, "mul">;
1696 let isCommutable = 0 in {
1697 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1698 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1699 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1700 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1701 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1702 basic_sse12_fp_binop_s_int<0x5E, "div">;
1703 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1704 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1705 basic_sse12_fp_binop_s_int<0x5F, "max">,
1706 basic_sse12_fp_binop_p_int<0x5F, "max">;
1707 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1708 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1709 basic_sse12_fp_binop_s_int<0x5D, "min">,
1710 basic_sse12_fp_binop_p_int<0x5D, "min">;
1715 /// In addition, we also have a special variant of the scalar form here to
1716 /// represent the associated intrinsic operation. This form is unlike the
1717 /// plain scalar form, in that it takes an entire vector (instead of a
1718 /// scalar) and leaves the top elements undefined.
1720 /// And, we have a special variant form for a full-vector intrinsic form.
1722 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1723 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1724 SDNode OpNode, Intrinsic F32Int> {
1725 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1726 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1727 [(set FR32:$dst, (OpNode FR32:$src))]>;
1728 // For scalar unary operations, fold a load into the operation
1729 // only in OptForSize mode. It eliminates an instruction, but it also
1730 // eliminates a whole-register clobber (the load), so it introduces a
1731 // partial register update condition.
1732 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1733 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1734 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1735 Requires<[HasSSE1, OptForSize]>;
1736 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1737 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1738 [(set VR128:$dst, (F32Int VR128:$src))]>;
1739 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1740 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1741 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1744 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1745 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1746 SDNode OpNode, Intrinsic F32Int> {
1747 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1748 !strconcat(OpcodeStr,
1749 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1750 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1751 !strconcat(OpcodeStr,
1752 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1753 []>, XS, Requires<[HasAVX, OptForSize]>;
1754 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1755 !strconcat(OpcodeStr,
1756 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1757 [(set VR128:$dst, (F32Int VR128:$src))]>;
1758 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1759 !strconcat(OpcodeStr,
1760 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1761 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1764 /// sse1_fp_unop_p - SSE1 unops in packed form.
1765 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1766 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1767 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1768 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1769 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1770 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1771 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1774 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1775 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1776 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1777 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1778 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1779 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1780 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1781 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1784 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1785 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1786 Intrinsic V4F32Int> {
1787 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1788 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1789 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1790 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1791 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1792 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1795 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1796 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1797 Intrinsic V4F32Int> {
1798 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1799 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1800 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1801 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1802 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1803 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1806 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1807 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1808 SDNode OpNode, Intrinsic F64Int> {
1809 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1810 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1811 [(set FR64:$dst, (OpNode FR64:$src))]>;
1812 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1813 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1814 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1815 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1816 Requires<[HasSSE2, OptForSize]>;
1817 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1818 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1819 [(set VR128:$dst, (F64Int VR128:$src))]>;
1820 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1821 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1822 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1825 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1826 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1827 SDNode OpNode, Intrinsic F64Int> {
1828 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1829 !strconcat(OpcodeStr,
1830 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1831 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1832 (ins FR64:$src1, f64mem:$src2),
1833 !strconcat(OpcodeStr,
1834 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1835 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1836 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1837 [(set VR128:$dst, (F64Int VR128:$src))]>;
1838 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1839 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1840 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1843 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1844 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1846 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1847 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1848 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1849 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1850 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1851 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1854 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1855 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1856 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1857 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1858 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1859 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1860 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1861 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1864 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1865 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1866 Intrinsic V2F64Int> {
1867 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1868 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1869 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1870 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1871 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1872 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1875 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1876 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1877 Intrinsic V2F64Int> {
1878 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1879 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1880 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1881 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1882 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1883 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1886 let Predicates = [HasAVX] in {
1888 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
1889 sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1892 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1893 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1894 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1895 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1896 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1897 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1898 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1899 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1902 // Reciprocal approximations. Note that these typically require refinement
1903 // in order to obtain suitable precision.
1904 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
1905 int_x86_sse_rsqrt_ss>, VEX_4V;
1906 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1907 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
1908 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
1909 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
1911 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
1913 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1914 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
1915 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
1916 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
1919 def : Pat<(f32 (fsqrt FR32:$src)),
1920 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
1921 def : Pat<(f64 (fsqrt FR64:$src)),
1922 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
1923 def : Pat<(f64 (fsqrt (load addr:$src))),
1924 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
1925 Requires<[HasAVX, OptForSize]>;
1926 def : Pat<(f32 (fsqrt (load addr:$src))),
1927 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
1928 Requires<[HasAVX, OptForSize]>;
1931 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1932 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
1933 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
1934 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1935 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
1936 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
1938 // Reciprocal approximations. Note that these typically require refinement
1939 // in order to obtain suitable precision.
1940 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
1941 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
1942 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
1943 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
1944 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
1945 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
1947 // There is no f64 version of the reciprocal approximation instructions.
1949 //===----------------------------------------------------------------------===//
1950 // SSE 1 & 2 - Non-temporal stores
1951 //===----------------------------------------------------------------------===//
1953 let AddedComplexity = 400 in { // Prefer non-temporal versions
1954 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
1955 (ins f128mem:$dst, VR128:$src),
1956 "movntps\t{$src, $dst|$dst, $src}",
1957 [(alignednontemporalstore (v4f32 VR128:$src),
1959 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
1960 (ins f128mem:$dst, VR128:$src),
1961 "movntpd\t{$src, $dst|$dst, $src}",
1962 [(alignednontemporalstore (v2f64 VR128:$src),
1964 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
1965 (ins f128mem:$dst, VR128:$src),
1966 "movntdq\t{$src, $dst|$dst, $src}",
1967 [(alignednontemporalstore (v2f64 VR128:$src),
1970 let ExeDomain = SSEPackedInt in
1971 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
1972 (ins f128mem:$dst, VR128:$src),
1973 "movntdq\t{$src, $dst|$dst, $src}",
1974 [(alignednontemporalstore (v4f32 VR128:$src),
1977 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
1978 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
1980 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
1981 (ins f256mem:$dst, VR256:$src),
1982 "movntps\t{$src, $dst|$dst, $src}",
1983 [(alignednontemporalstore (v8f32 VR256:$src),
1985 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
1986 (ins f256mem:$dst, VR256:$src),
1987 "movntpd\t{$src, $dst|$dst, $src}",
1988 [(alignednontemporalstore (v4f64 VR256:$src),
1990 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
1991 (ins f256mem:$dst, VR256:$src),
1992 "movntdq\t{$src, $dst|$dst, $src}",
1993 [(alignednontemporalstore (v4f64 VR256:$src),
1995 let ExeDomain = SSEPackedInt in
1996 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
1997 (ins f256mem:$dst, VR256:$src),
1998 "movntdq\t{$src, $dst|$dst, $src}",
1999 [(alignednontemporalstore (v8f32 VR256:$src),
2003 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2004 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2005 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2006 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2007 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2008 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2010 let AddedComplexity = 400 in { // Prefer non-temporal versions
2011 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2012 "movntps\t{$src, $dst|$dst, $src}",
2013 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2014 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2015 "movntpd\t{$src, $dst|$dst, $src}",
2016 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2018 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2019 "movntdq\t{$src, $dst|$dst, $src}",
2020 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2022 let ExeDomain = SSEPackedInt in
2023 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2024 "movntdq\t{$src, $dst|$dst, $src}",
2025 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2027 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2028 (MOVNTDQmr addr:$dst, VR128:$src)>;
2030 // There is no AVX form for instructions below this point
2031 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2032 "movnti{l}\t{$src, $dst|$dst, $src}",
2033 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2034 TB, Requires<[HasSSE2]>;
2035 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2036 "movnti{q}\t{$src, $dst|$dst, $src}",
2037 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2038 TB, Requires<[HasSSE2]>;
2041 //===----------------------------------------------------------------------===//
2042 // SSE 1 & 2 - Misc Instructions (No AVX form)
2043 //===----------------------------------------------------------------------===//
2045 // Prefetch intrinsic.
2046 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2047 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>;
2048 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2049 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>;
2050 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2051 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>;
2052 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2053 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>;
2055 // Load, store, and memory fence
2056 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2057 TB, Requires<[HasSSE1]>;
2058 def : Pat<(X86SFence), (SFENCE)>;
2060 // Alias instructions that map zero vector to pxor / xorp* for sse.
2061 // We set canFoldAsLoad because this can be converted to a constant-pool
2062 // load of an all-zeros value if folding it would be beneficial.
2063 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2064 // JIT implementation, it does not expand the instructions below like
2065 // X86MCInstLower does.
2066 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2067 isCodeGenOnly = 1 in {
2068 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2069 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2070 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2071 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2072 let ExeDomain = SSEPackedInt in
2073 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2074 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2077 // The same as done above but for AVX. The 128-bit versions are the
2078 // same, but re-encoded. The 256-bit does not support PI version, and
2079 // doesn't need it because on sandy bridge the register is set to zero
2080 // at the rename stage without using any execution unit, so SET0PSY
2081 // and SET0PDY can be used for vector int instructions without penalty
2082 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2083 // JIT implementatioan, it does not expand the instructions below like
2084 // X86MCInstLower does.
2085 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2086 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2087 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2088 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2089 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2090 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2091 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2092 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2093 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2094 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2095 let ExeDomain = SSEPackedInt in
2096 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2097 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2100 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2101 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2102 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2104 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2105 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2107 // AVX has no support for 256-bit integer instructions, but since the 128-bit
2108 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
2109 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
2110 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
2111 (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
2113 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
2114 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
2115 (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
2117 //===----------------------------------------------------------------------===//
2118 // SSE 1 & 2 - Load/Store XCSR register
2119 //===----------------------------------------------------------------------===//
2121 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2122 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2123 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2124 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2126 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2127 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2128 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2129 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2131 //===---------------------------------------------------------------------===//
2132 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2133 //===---------------------------------------------------------------------===//
2135 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2137 let neverHasSideEffects = 1 in {
2138 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2139 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2140 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2141 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2143 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2144 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2145 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2146 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2148 let canFoldAsLoad = 1, mayLoad = 1 in {
2149 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2150 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2151 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2152 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2153 let Predicates = [HasAVX] in {
2154 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2155 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2156 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2157 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2161 let mayStore = 1 in {
2162 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2163 (ins i128mem:$dst, VR128:$src),
2164 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2165 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2166 (ins i256mem:$dst, VR256:$src),
2167 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2168 let Predicates = [HasAVX] in {
2169 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2170 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2171 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2172 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2176 let neverHasSideEffects = 1 in
2177 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2178 "movdqa\t{$src, $dst|$dst, $src}", []>;
2180 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2181 "movdqu\t{$src, $dst|$dst, $src}",
2182 []>, XS, Requires<[HasSSE2]>;
2184 let canFoldAsLoad = 1, mayLoad = 1 in {
2185 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2186 "movdqa\t{$src, $dst|$dst, $src}",
2187 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2188 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2189 "movdqu\t{$src, $dst|$dst, $src}",
2190 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2191 XS, Requires<[HasSSE2]>;
2194 let mayStore = 1 in {
2195 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2196 "movdqa\t{$src, $dst|$dst, $src}",
2197 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2198 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2199 "movdqu\t{$src, $dst|$dst, $src}",
2200 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2201 XS, Requires<[HasSSE2]>;
2204 // Intrinsic forms of MOVDQU load and store
2205 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2206 "vmovdqu\t{$src, $dst|$dst, $src}",
2207 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2208 XS, VEX, Requires<[HasAVX]>;
2210 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2211 "movdqu\t{$src, $dst|$dst, $src}",
2212 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2213 XS, Requires<[HasSSE2]>;
2215 } // ExeDomain = SSEPackedInt
2217 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2218 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2219 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2221 //===---------------------------------------------------------------------===//
2222 // SSE2 - Packed Integer Arithmetic Instructions
2223 //===---------------------------------------------------------------------===//
2225 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2227 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2228 bit IsCommutable = 0, bit Is2Addr = 1> {
2229 let isCommutable = IsCommutable in
2230 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2231 (ins VR128:$src1, VR128:$src2),
2233 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2234 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2235 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2236 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2237 (ins VR128:$src1, i128mem:$src2),
2239 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2240 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2241 [(set VR128:$dst, (IntId VR128:$src1,
2242 (bitconvert (memopv2i64 addr:$src2))))]>;
2245 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2246 string OpcodeStr, Intrinsic IntId,
2247 Intrinsic IntId2, bit Is2Addr = 1> {
2248 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2249 (ins VR128:$src1, VR128:$src2),
2251 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2252 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2253 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2254 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2255 (ins VR128:$src1, i128mem:$src2),
2257 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2258 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2259 [(set VR128:$dst, (IntId VR128:$src1,
2260 (bitconvert (memopv2i64 addr:$src2))))]>;
2261 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2262 (ins VR128:$src1, i32i8imm:$src2),
2264 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2265 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2266 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2269 /// PDI_binop_rm - Simple SSE2 binary operator.
2270 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2271 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2272 let isCommutable = IsCommutable in
2273 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2274 (ins VR128:$src1, VR128:$src2),
2276 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2277 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2278 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2279 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2280 (ins VR128:$src1, i128mem:$src2),
2282 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2283 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2284 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2285 (bitconvert (memopv2i64 addr:$src2)))))]>;
2288 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2290 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2291 /// to collapse (bitconvert VT to VT) into its operand.
2293 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2294 bit IsCommutable = 0, bit Is2Addr = 1> {
2295 let isCommutable = IsCommutable in
2296 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2297 (ins VR128:$src1, VR128:$src2),
2299 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2300 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2301 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2302 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2303 (ins VR128:$src1, i128mem:$src2),
2305 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2306 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2307 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2310 } // ExeDomain = SSEPackedInt
2312 // 128-bit Integer Arithmetic
2314 let Predicates = [HasAVX] in {
2315 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2316 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2317 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2318 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2319 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2320 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2321 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2322 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2323 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2326 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2328 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2330 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2332 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2334 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2336 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2338 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2340 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2342 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2344 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2346 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2348 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2350 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2352 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2354 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2356 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2358 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2360 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2362 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2366 let Constraints = "$src1 = $dst" in {
2367 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2368 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2369 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2370 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2371 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2372 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2373 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2374 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2375 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2378 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2379 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2380 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2381 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2382 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2383 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2384 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2385 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2386 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2387 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2388 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2389 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2390 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2391 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2392 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2393 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2394 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2395 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2396 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2398 } // Constraints = "$src1 = $dst"
2400 //===---------------------------------------------------------------------===//
2401 // SSE2 - Packed Integer Logical Instructions
2402 //===---------------------------------------------------------------------===//
2404 let Predicates = [HasAVX] in {
2405 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2406 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2408 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2409 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2411 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2412 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2415 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2416 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2418 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2419 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2421 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2422 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2425 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2426 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2428 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2429 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2432 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2433 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2434 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2436 let ExeDomain = SSEPackedInt in {
2437 let neverHasSideEffects = 1 in {
2438 // 128-bit logical shifts.
2439 def VPSLLDQri : PDIi8<0x73, MRM7r,
2440 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2441 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2443 def VPSRLDQri : PDIi8<0x73, MRM3r,
2444 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2445 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2447 // PSRADQri doesn't exist in SSE[1-3].
2449 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2450 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2451 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2452 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2453 VR128:$src2)))]>, VEX_4V;
2455 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2456 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2457 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2458 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2459 (memopv2i64 addr:$src2))))]>,
2464 let Constraints = "$src1 = $dst" in {
2465 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2466 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2467 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2468 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2469 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2470 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2472 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2473 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2474 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2475 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2476 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2477 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2479 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2480 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2481 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2482 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2484 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2485 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2486 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2488 let ExeDomain = SSEPackedInt in {
2489 let neverHasSideEffects = 1 in {
2490 // 128-bit logical shifts.
2491 def PSLLDQri : PDIi8<0x73, MRM7r,
2492 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2493 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2494 def PSRLDQri : PDIi8<0x73, MRM3r,
2495 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2496 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2497 // PSRADQri doesn't exist in SSE[1-3].
2499 def PANDNrr : PDI<0xDF, MRMSrcReg,
2500 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2501 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2503 def PANDNrm : PDI<0xDF, MRMSrcMem,
2504 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2505 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2507 } // Constraints = "$src1 = $dst"
2509 let Predicates = [HasAVX] in {
2510 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2511 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2512 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2513 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2514 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2515 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2516 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2517 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2518 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2519 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2521 // Shift up / down and insert zero's.
2522 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2523 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2524 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2525 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2528 let Predicates = [HasSSE2] in {
2529 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2530 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2531 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2532 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2533 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2534 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2535 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2536 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2537 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2538 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2540 // Shift up / down and insert zero's.
2541 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2542 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2543 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2544 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2547 //===---------------------------------------------------------------------===//
2548 // SSE2 - Packed Integer Comparison Instructions
2549 //===---------------------------------------------------------------------===//
2551 let Predicates = [HasAVX] in {
2552 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2554 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2556 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2558 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2560 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2562 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2566 let Constraints = "$src1 = $dst" in {
2567 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2568 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2569 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2570 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2571 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2572 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2573 } // Constraints = "$src1 = $dst"
2575 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2576 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2577 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2578 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2579 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2580 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2581 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2582 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2583 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2584 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2585 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2586 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2588 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2589 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2590 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2591 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2592 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2593 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2594 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2595 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2596 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2597 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2598 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2599 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2601 //===---------------------------------------------------------------------===//
2602 // SSE2 - Packed Integer Pack Instructions
2603 //===---------------------------------------------------------------------===//
2605 let Predicates = [HasAVX] in {
2606 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2608 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2610 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2614 let Constraints = "$src1 = $dst" in {
2615 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2616 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2617 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2618 } // Constraints = "$src1 = $dst"
2620 //===---------------------------------------------------------------------===//
2621 // SSE2 - Packed Integer Shuffle Instructions
2622 //===---------------------------------------------------------------------===//
2624 let ExeDomain = SSEPackedInt in {
2625 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2627 def ri : Ii8<0x70, MRMSrcReg,
2628 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2629 !strconcat(OpcodeStr,
2630 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2631 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2633 def mi : Ii8<0x70, MRMSrcMem,
2634 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2635 !strconcat(OpcodeStr,
2636 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2637 [(set VR128:$dst, (vt (pshuf_frag:$src2
2638 (bc_frag (memopv2i64 addr:$src1)),
2641 } // ExeDomain = SSEPackedInt
2643 let Predicates = [HasAVX] in {
2644 let AddedComplexity = 5 in
2645 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2648 // SSE2 with ImmT == Imm8 and XS prefix.
2649 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2652 // SSE2 with ImmT == Imm8 and XD prefix.
2653 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2657 let Predicates = [HasSSE2] in {
2658 let AddedComplexity = 5 in
2659 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2661 // SSE2 with ImmT == Imm8 and XS prefix.
2662 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2664 // SSE2 with ImmT == Imm8 and XD prefix.
2665 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2668 //===---------------------------------------------------------------------===//
2669 // SSE2 - Packed Integer Unpack Instructions
2670 //===---------------------------------------------------------------------===//
2672 let ExeDomain = SSEPackedInt in {
2673 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2674 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
2675 def rr : PDI<opc, MRMSrcReg,
2676 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2678 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2679 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2680 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))]>;
2681 def rm : PDI<opc, MRMSrcMem,
2682 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2684 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2685 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2686 [(set VR128:$dst, (OpNode VR128:$src1,
2687 (bc_frag (memopv2i64
2691 let Predicates = [HasAVX] in {
2692 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Punpcklbw,
2693 bc_v16i8, 0>, VEX_4V;
2694 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Punpcklwd,
2695 bc_v8i16, 0>, VEX_4V;
2696 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Punpckldq,
2697 bc_v4i32, 0>, VEX_4V;
2699 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2700 /// knew to collapse (bitconvert VT to VT) into its operand.
2701 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2702 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2703 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2704 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
2705 VR128:$src2)))]>, VEX_4V;
2706 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2707 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2708 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2709 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
2710 (memopv2i64 addr:$src2))))]>, VEX_4V;
2712 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Punpckhbw,
2713 bc_v16i8, 0>, VEX_4V;
2714 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Punpckhwd,
2715 bc_v8i16, 0>, VEX_4V;
2716 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Punpckhdq,
2717 bc_v4i32, 0>, VEX_4V;
2719 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2720 /// knew to collapse (bitconvert VT to VT) into its operand.
2721 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2722 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2723 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2724 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
2725 VR128:$src2)))]>, VEX_4V;
2726 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2727 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2728 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2729 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
2730 (memopv2i64 addr:$src2))))]>, VEX_4V;
2733 let Constraints = "$src1 = $dst" in {
2734 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Punpcklbw, bc_v16i8>;
2735 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Punpcklwd, bc_v8i16>;
2736 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Punpckldq, bc_v4i32>;
2738 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2739 /// knew to collapse (bitconvert VT to VT) into its operand.
2740 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2741 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2742 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2744 (v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)))]>;
2745 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2746 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2747 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2749 (v2i64 (X86Punpcklqdq VR128:$src1,
2750 (memopv2i64 addr:$src2))))]>;
2752 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Punpckhbw, bc_v16i8>;
2753 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Punpckhwd, bc_v8i16>;
2754 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Punpckhdq, bc_v4i32>;
2756 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2757 /// knew to collapse (bitconvert VT to VT) into its operand.
2758 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2759 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2760 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2762 (v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)))]>;
2763 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2764 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2765 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2767 (v2i64 (X86Punpckhqdq VR128:$src1,
2768 (memopv2i64 addr:$src2))))]>;
2771 } // ExeDomain = SSEPackedInt
2773 //===---------------------------------------------------------------------===//
2774 // SSE2 - Packed Integer Extract and Insert
2775 //===---------------------------------------------------------------------===//
2777 let ExeDomain = SSEPackedInt in {
2778 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2779 def rri : Ii8<0xC4, MRMSrcReg,
2780 (outs VR128:$dst), (ins VR128:$src1,
2781 GR32:$src2, i32i8imm:$src3),
2783 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2784 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2786 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2787 def rmi : Ii8<0xC4, MRMSrcMem,
2788 (outs VR128:$dst), (ins VR128:$src1,
2789 i16mem:$src2, i32i8imm:$src3),
2791 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2792 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2794 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2799 let Predicates = [HasAVX] in
2800 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2801 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2802 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2803 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2804 imm:$src2))]>, OpSize, VEX;
2805 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2806 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2807 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2808 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2812 let Predicates = [HasAVX] in {
2813 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2814 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2815 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2816 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2817 []>, OpSize, VEX_4V;
2820 let Constraints = "$src1 = $dst" in
2821 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2823 } // ExeDomain = SSEPackedInt
2825 //===---------------------------------------------------------------------===//
2826 // SSE2 - Packed Mask Creation
2827 //===---------------------------------------------------------------------===//
2829 let ExeDomain = SSEPackedInt in {
2831 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2832 "pmovmskb\t{$src, $dst|$dst, $src}",
2833 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2834 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2835 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2836 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2837 "pmovmskb\t{$src, $dst|$dst, $src}",
2838 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2840 } // ExeDomain = SSEPackedInt
2842 //===---------------------------------------------------------------------===//
2843 // SSE2 - Conditional Store
2844 //===---------------------------------------------------------------------===//
2846 let ExeDomain = SSEPackedInt in {
2849 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2850 (ins VR128:$src, VR128:$mask),
2851 "maskmovdqu\t{$mask, $src|$src, $mask}",
2852 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2854 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2855 (ins VR128:$src, VR128:$mask),
2856 "maskmovdqu\t{$mask, $src|$src, $mask}",
2857 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2860 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2861 "maskmovdqu\t{$mask, $src|$src, $mask}",
2862 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2864 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2865 "maskmovdqu\t{$mask, $src|$src, $mask}",
2866 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2868 } // ExeDomain = SSEPackedInt
2870 //===---------------------------------------------------------------------===//
2871 // SSE2 - Move Doubleword
2872 //===---------------------------------------------------------------------===//
2874 //===---------------------------------------------------------------------===//
2875 // Move Int Doubleword to Packed Double Int
2877 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2878 "movd\t{$src, $dst|$dst, $src}",
2880 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2881 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2882 "movd\t{$src, $dst|$dst, $src}",
2884 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2886 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2887 "mov{d|q}\t{$src, $dst|$dst, $src}",
2889 (v2i64 (scalar_to_vector GR64:$src)))]>, VEX;
2890 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2891 "mov{d|q}\t{$src, $dst|$dst, $src}",
2892 [(set FR64:$dst, (bitconvert GR64:$src))]>, VEX;
2894 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2895 "movd\t{$src, $dst|$dst, $src}",
2897 (v4i32 (scalar_to_vector GR32:$src)))]>;
2898 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2899 "movd\t{$src, $dst|$dst, $src}",
2901 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2902 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2903 "mov{d|q}\t{$src, $dst|$dst, $src}",
2905 (v2i64 (scalar_to_vector GR64:$src)))]>;
2906 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2907 "mov{d|q}\t{$src, $dst|$dst, $src}",
2908 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2910 //===---------------------------------------------------------------------===//
2911 // Move Int Doubleword to Single Scalar
2913 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2914 "movd\t{$src, $dst|$dst, $src}",
2915 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2917 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2918 "movd\t{$src, $dst|$dst, $src}",
2919 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2921 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2922 "movd\t{$src, $dst|$dst, $src}",
2923 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2925 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2926 "movd\t{$src, $dst|$dst, $src}",
2927 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2929 //===---------------------------------------------------------------------===//
2930 // Move Packed Doubleword Int to Packed Double Int
2932 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2933 "movd\t{$src, $dst|$dst, $src}",
2934 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2936 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2937 (ins i32mem:$dst, VR128:$src),
2938 "movd\t{$src, $dst|$dst, $src}",
2939 [(store (i32 (vector_extract (v4i32 VR128:$src),
2940 (iPTR 0))), addr:$dst)]>, VEX;
2941 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2942 "movd\t{$src, $dst|$dst, $src}",
2943 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2945 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2946 "movd\t{$src, $dst|$dst, $src}",
2947 [(store (i32 (vector_extract (v4i32 VR128:$src),
2948 (iPTR 0))), addr:$dst)]>;
2950 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
2951 "mov{d|q}\t{$src, $dst|$dst, $src}",
2952 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
2954 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
2955 "movq\t{$src, $dst|$dst, $src}",
2956 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
2958 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2959 "mov{d|q}\t{$src, $dst|$dst, $src}",
2960 [(set GR64:$dst, (bitconvert FR64:$src))]>;
2961 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2962 "movq\t{$src, $dst|$dst, $src}",
2963 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
2965 //===---------------------------------------------------------------------===//
2966 // Move Scalar Single to Double Int
2968 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2969 "movd\t{$src, $dst|$dst, $src}",
2970 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
2971 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2972 "movd\t{$src, $dst|$dst, $src}",
2973 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
2974 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2975 "movd\t{$src, $dst|$dst, $src}",
2976 [(set GR32:$dst, (bitconvert FR32:$src))]>;
2977 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2978 "movd\t{$src, $dst|$dst, $src}",
2979 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2981 //===---------------------------------------------------------------------===//
2982 // Patterns and instructions to describe movd/movq to XMM register zero-extends
2984 let AddedComplexity = 15 in {
2985 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2986 "movd\t{$src, $dst|$dst, $src}",
2987 [(set VR128:$dst, (v4i32 (X86vzmovl
2988 (v4i32 (scalar_to_vector GR32:$src)))))]>,
2990 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2991 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
2992 [(set VR128:$dst, (v2i64 (X86vzmovl
2993 (v2i64 (scalar_to_vector GR64:$src)))))]>,
2996 let AddedComplexity = 15 in {
2997 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2998 "movd\t{$src, $dst|$dst, $src}",
2999 [(set VR128:$dst, (v4i32 (X86vzmovl
3000 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3001 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3002 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3003 [(set VR128:$dst, (v2i64 (X86vzmovl
3004 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3007 let AddedComplexity = 20 in {
3008 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3009 "movd\t{$src, $dst|$dst, $src}",
3011 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3012 (loadi32 addr:$src))))))]>,
3014 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3015 "movd\t{$src, $dst|$dst, $src}",
3017 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3018 (loadi32 addr:$src))))))]>;
3020 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3021 (MOVZDI2PDIrm addr:$src)>;
3022 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3023 (MOVZDI2PDIrm addr:$src)>;
3024 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3025 (MOVZDI2PDIrm addr:$src)>;
3028 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
3029 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
3030 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
3031 (v4i32 (scalar_to_vector GR32:$src)), (i32 0)))),
3032 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
3033 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
3034 (v2i64 (scalar_to_vector GR64:$src)), (i32 0)))),
3035 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
3037 // These are the correct encodings of the instructions so that we know how to
3038 // read correct assembly, even though we continue to emit the wrong ones for
3039 // compatibility with Darwin's buggy assembler.
3040 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3041 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
3042 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3043 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
3044 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3045 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
3046 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3047 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
3048 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3049 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3050 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3051 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3053 //===---------------------------------------------------------------------===//
3054 // SSE2 - Move Quadword
3055 //===---------------------------------------------------------------------===//
3057 //===---------------------------------------------------------------------===//
3058 // Move Quadword Int to Packed Quadword Int
3060 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3061 "vmovq\t{$src, $dst|$dst, $src}",
3063 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3064 VEX, Requires<[HasAVX]>;
3065 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3066 "movq\t{$src, $dst|$dst, $src}",
3068 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3069 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3071 //===---------------------------------------------------------------------===//
3072 // Move Packed Quadword Int to Quadword Int
3074 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3075 "movq\t{$src, $dst|$dst, $src}",
3076 [(store (i64 (vector_extract (v2i64 VR128:$src),
3077 (iPTR 0))), addr:$dst)]>, VEX;
3078 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3079 "movq\t{$src, $dst|$dst, $src}",
3080 [(store (i64 (vector_extract (v2i64 VR128:$src),
3081 (iPTR 0))), addr:$dst)]>;
3083 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3084 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3086 //===---------------------------------------------------------------------===//
3087 // Store / copy lower 64-bits of a XMM register.
3089 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3090 "movq\t{$src, $dst|$dst, $src}",
3091 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3092 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3093 "movq\t{$src, $dst|$dst, $src}",
3094 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3096 let AddedComplexity = 20 in
3097 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3098 "vmovq\t{$src, $dst|$dst, $src}",
3100 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3101 (loadi64 addr:$src))))))]>,
3102 XS, VEX, Requires<[HasAVX]>;
3104 let AddedComplexity = 20 in {
3105 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3106 "movq\t{$src, $dst|$dst, $src}",
3108 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3109 (loadi64 addr:$src))))))]>,
3110 XS, Requires<[HasSSE2]>;
3112 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3113 (MOVZQI2PQIrm addr:$src)>;
3114 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3115 (MOVZQI2PQIrm addr:$src)>;
3116 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3119 //===---------------------------------------------------------------------===//
3120 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3121 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3123 let AddedComplexity = 15 in
3124 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3125 "vmovq\t{$src, $dst|$dst, $src}",
3126 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3127 XS, VEX, Requires<[HasAVX]>;
3128 let AddedComplexity = 15 in
3129 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3130 "movq\t{$src, $dst|$dst, $src}",
3131 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3132 XS, Requires<[HasSSE2]>;
3134 let AddedComplexity = 20 in
3135 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3136 "vmovq\t{$src, $dst|$dst, $src}",
3137 [(set VR128:$dst, (v2i64 (X86vzmovl
3138 (loadv2i64 addr:$src))))]>,
3139 XS, VEX, Requires<[HasAVX]>;
3140 let AddedComplexity = 20 in {
3141 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3142 "movq\t{$src, $dst|$dst, $src}",
3143 [(set VR128:$dst, (v2i64 (X86vzmovl
3144 (loadv2i64 addr:$src))))]>,
3145 XS, Requires<[HasSSE2]>;
3147 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3148 (MOVZPQILo2PQIrm addr:$src)>;
3151 // Instructions to match in the assembler
3152 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3153 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3154 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3155 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3156 // Recognize "movd" with GR64 destination, but encode as a "movq"
3157 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3158 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3160 // Instructions for the disassembler
3161 // xr = XMM register
3164 let Predicates = [HasAVX] in
3165 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3166 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3167 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3168 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3170 //===---------------------------------------------------------------------===//
3171 // SSE2 - Misc Instructions
3172 //===---------------------------------------------------------------------===//
3175 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3176 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3177 TB, Requires<[HasSSE2]>;
3179 // Load, store, and memory fence
3180 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3181 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3182 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3183 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3184 def : Pat<(X86LFence), (LFENCE)>;
3185 def : Pat<(X86MFence), (MFENCE)>;
3188 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3189 // was introduced with SSE2, it's backward compatible.
3190 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3192 // Alias instructions that map zero vector to pxor / xorp* for sse.
3193 // We set canFoldAsLoad because this can be converted to a constant-pool
3194 // load of an all-ones value if folding it would be beneficial.
3195 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
3196 // JIT implementation, it does not expand the instructions below like
3197 // X86MCInstLower does.
3198 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3199 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3200 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3201 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3202 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3203 isCodeGenOnly = 1, ExeDomain = SSEPackedInt, Predicates = [HasAVX] in
3204 def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3205 [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
3207 //===---------------------------------------------------------------------===//
3208 // SSE3 - Conversion Instructions
3209 //===---------------------------------------------------------------------===//
3211 // Convert Packed Double FP to Packed DW Integers
3212 let Predicates = [HasAVX] in {
3213 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3214 // register, but the same isn't true when using memory operands instead.
3215 // Provide other assembly rr and rm forms to address this explicitly.
3216 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3217 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3218 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3219 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3222 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3223 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3224 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3225 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3228 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3229 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3230 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3231 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3234 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3235 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3236 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3237 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3239 // Convert Packed DW Integers to Packed Double FP
3240 let Predicates = [HasAVX] in {
3241 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3242 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3243 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3244 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3245 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3246 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3247 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3248 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3251 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3252 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3253 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3254 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3256 // AVX 256-bit register conversion intrinsics
3257 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3258 (VCVTDQ2PDYrr VR128:$src)>;
3259 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3260 (VCVTDQ2PDYrm addr:$src)>;
3262 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3263 (VCVTPD2DQYrr VR256:$src)>;
3264 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3265 (VCVTPD2DQYrm addr:$src)>;
3267 //===---------------------------------------------------------------------===//
3268 // SSE3 - Move Instructions
3269 //===---------------------------------------------------------------------===//
3271 //===---------------------------------------------------------------------===//
3272 // Replicate Single FP - MOVSHDUP and MOVSLDUP
3274 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
3275 ValueType vt, RegisterClass RC, PatFrag mem_frag,
3276 X86MemOperand x86memop> {
3277 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3278 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3279 [(set RC:$dst, (vt (OpNode RC:$src)))]>;
3280 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3281 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3282 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>;
3285 let Predicates = [HasAVX] in {
3286 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3287 v4f32, VR128, memopv4f32, f128mem>, VEX;
3288 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3289 v4f32, VR128, memopv4f32, f128mem>, VEX;
3290 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3291 v8f32, VR256, memopv8f32, f256mem>, VEX;
3292 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3293 v8f32, VR256, memopv8f32, f256mem>, VEX;
3295 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
3296 memopv4f32, f128mem>;
3297 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
3298 memopv4f32, f128mem>;
3300 let Predicates = [HasSSE3] in {
3301 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3302 (MOVSHDUPrr VR128:$src)>;
3303 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3304 (MOVSHDUPrm addr:$src)>;
3305 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3306 (MOVSLDUPrr VR128:$src)>;
3307 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3308 (MOVSLDUPrm addr:$src)>;
3311 let Predicates = [HasAVX] in {
3312 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3313 (VMOVSHDUPrr VR128:$src)>;
3314 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3315 (VMOVSHDUPrm addr:$src)>;
3316 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3317 (VMOVSLDUPrr VR128:$src)>;
3318 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3319 (VMOVSLDUPrm addr:$src)>;
3320 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
3321 (VMOVSHDUPYrr VR256:$src)>;
3322 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
3323 (VMOVSHDUPYrm addr:$src)>;
3324 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
3325 (VMOVSLDUPYrr VR256:$src)>;
3326 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
3327 (VMOVSLDUPYrm addr:$src)>;
3330 //===---------------------------------------------------------------------===//
3331 // Replicate Double FP - MOVDDUP
3333 multiclass sse3_replicate_dfp<string OpcodeStr> {
3334 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3335 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3336 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3337 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3338 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3340 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3344 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3345 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3346 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3348 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3349 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3353 let Predicates = [HasAVX] in {
3354 // FIXME: Merge above classes when we have patterns for the ymm version
3355 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3356 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3358 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3360 // Move Unaligned Integer
3361 let Predicates = [HasAVX] in {
3362 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3363 "vlddqu\t{$src, $dst|$dst, $src}",
3364 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3365 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3366 "vlddqu\t{$src, $dst|$dst, $src}",
3367 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3369 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3370 "lddqu\t{$src, $dst|$dst, $src}",
3371 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3373 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3375 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3377 // Several Move patterns
3378 let AddedComplexity = 5 in {
3379 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3380 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3381 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3382 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3383 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3384 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3385 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3386 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3389 //===---------------------------------------------------------------------===//
3390 // SSE3 - Arithmetic
3391 //===---------------------------------------------------------------------===//
3393 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3394 X86MemOperand x86memop, bit Is2Addr = 1> {
3395 def rr : I<0xD0, MRMSrcReg,
3396 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3398 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3399 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3400 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3401 def rm : I<0xD0, MRMSrcMem,
3402 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3404 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3405 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3406 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3409 let Predicates = [HasAVX],
3410 ExeDomain = SSEPackedDouble in {
3411 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3412 f128mem, 0>, TB, XD, VEX_4V;
3413 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3414 f128mem, 0>, TB, OpSize, VEX_4V;
3415 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3416 f256mem, 0>, TB, XD, VEX_4V;
3417 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3418 f256mem, 0>, TB, OpSize, VEX_4V;
3420 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3421 ExeDomain = SSEPackedDouble in {
3422 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3424 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3425 f128mem>, TB, OpSize;
3428 //===---------------------------------------------------------------------===//
3429 // SSE3 Instructions
3430 //===---------------------------------------------------------------------===//
3433 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3434 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3435 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3437 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3438 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3439 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3441 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3443 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3444 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3445 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3447 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3448 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3449 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3451 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3452 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3453 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3455 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3457 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3458 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3459 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3462 let Predicates = [HasAVX] in {
3463 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3464 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3465 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3466 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3467 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3468 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3469 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3470 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3471 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3472 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3473 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3474 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3475 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3476 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3477 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3478 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3481 let Constraints = "$src1 = $dst" in {
3482 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3483 int_x86_sse3_hadd_ps>;
3484 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3485 int_x86_sse3_hadd_pd>;
3486 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3487 int_x86_sse3_hsub_ps>;
3488 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3489 int_x86_sse3_hsub_pd>;
3492 //===---------------------------------------------------------------------===//
3493 // SSSE3 - Packed Absolute Instructions
3494 //===---------------------------------------------------------------------===//
3497 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3498 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3499 PatFrag mem_frag128, Intrinsic IntId128> {
3500 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3502 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3503 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3506 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3508 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3511 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3514 let Predicates = [HasAVX] in {
3515 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3516 int_x86_ssse3_pabs_b_128>, VEX;
3517 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3518 int_x86_ssse3_pabs_w_128>, VEX;
3519 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3520 int_x86_ssse3_pabs_d_128>, VEX;
3523 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3524 int_x86_ssse3_pabs_b_128>;
3525 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3526 int_x86_ssse3_pabs_w_128>;
3527 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3528 int_x86_ssse3_pabs_d_128>;
3530 //===---------------------------------------------------------------------===//
3531 // SSSE3 - Packed Binary Operator Instructions
3532 //===---------------------------------------------------------------------===//
3534 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3535 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3536 PatFrag mem_frag128, Intrinsic IntId128,
3538 let isCommutable = 1 in
3539 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3540 (ins VR128:$src1, VR128:$src2),
3542 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3543 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3544 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3546 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3547 (ins VR128:$src1, i128mem:$src2),
3549 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3550 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3552 (IntId128 VR128:$src1,
3553 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3556 let Predicates = [HasAVX] in {
3557 let isCommutable = 0 in {
3558 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3559 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3560 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3561 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3562 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3563 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3564 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3565 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3566 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3567 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3568 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3569 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3570 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3571 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3572 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3573 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3574 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3575 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3576 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3577 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3578 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3579 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3581 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3582 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3585 // None of these have i8 immediate fields.
3586 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3587 let isCommutable = 0 in {
3588 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3589 int_x86_ssse3_phadd_w_128>;
3590 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3591 int_x86_ssse3_phadd_d_128>;
3592 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3593 int_x86_ssse3_phadd_sw_128>;
3594 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3595 int_x86_ssse3_phsub_w_128>;
3596 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3597 int_x86_ssse3_phsub_d_128>;
3598 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3599 int_x86_ssse3_phsub_sw_128>;
3600 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3601 int_x86_ssse3_pmadd_ub_sw_128>;
3602 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
3603 int_x86_ssse3_pshuf_b_128>;
3604 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3605 int_x86_ssse3_psign_b_128>;
3606 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3607 int_x86_ssse3_psign_w_128>;
3608 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3609 int_x86_ssse3_psign_d_128>;
3611 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3612 int_x86_ssse3_pmul_hr_sw_128>;
3615 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3616 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3617 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3618 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3620 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
3621 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3622 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
3623 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3624 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
3625 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3627 //===---------------------------------------------------------------------===//
3628 // SSSE3 - Packed Align Instruction Patterns
3629 //===---------------------------------------------------------------------===//
3631 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3632 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3633 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3635 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3637 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3639 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3640 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3642 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3644 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3648 let Predicates = [HasAVX] in
3649 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3650 let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
3651 defm PALIGN : ssse3_palign<"palignr">;
3653 let Predicates = [HasSSSE3] in {
3654 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3655 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3656 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3657 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3658 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3659 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3660 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3661 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3664 let Predicates = [HasAVX] in {
3665 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3666 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3667 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3668 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3669 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3670 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3671 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3672 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3675 //===---------------------------------------------------------------------===//
3676 // SSSE3 Misc Instructions
3677 //===---------------------------------------------------------------------===//
3679 // Thread synchronization
3680 let usesCustomInserter = 1 in {
3681 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
3682 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
3683 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
3684 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
3687 let Uses = [EAX, ECX, EDX] in
3688 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
3689 Requires<[HasSSE3]>;
3690 let Uses = [ECX, EAX] in
3691 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
3692 Requires<[HasSSE3]>;
3694 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
3695 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
3697 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
3698 Requires<[In32BitMode]>;
3699 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
3700 Requires<[In64BitMode]>;
3702 //===---------------------------------------------------------------------===//
3703 // Non-Instruction Patterns
3704 //===---------------------------------------------------------------------===//
3706 // extload f32 -> f64. This matches load+fextend because we have a hack in
3707 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3709 // Since these loads aren't folded into the fextend, we have to match it
3711 let Predicates = [HasSSE2] in
3712 def : Pat<(fextend (loadf32 addr:$src)),
3713 (CVTSS2SDrm addr:$src)>;
3715 // Bitcasts between 128-bit vector types. Return the original type since
3716 // no instruction is needed for the conversion
3717 let Predicates = [HasXMMInt] in {
3718 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3719 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3720 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3721 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3722 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3723 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3724 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3725 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3726 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3727 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3728 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3729 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3730 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3731 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3732 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3733 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3734 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3735 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3736 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3737 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3738 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3739 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3740 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3741 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3742 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3743 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3744 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3745 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3746 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3747 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3750 // Bitcasts between 256-bit vector types. Return the original type since
3751 // no instruction is needed for the conversion
3752 let Predicates = [HasAVX] in {
3753 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
3754 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
3755 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
3756 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
3757 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
3758 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
3759 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
3760 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
3761 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
3762 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
3763 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
3764 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
3765 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
3766 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
3767 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
3768 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
3769 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
3770 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
3771 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
3772 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
3773 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
3774 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
3775 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
3776 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
3777 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
3778 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
3779 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
3780 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
3781 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
3782 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
3785 // Move scalar to XMM zero-extended
3786 // movd to XMM register zero-extends
3787 let AddedComplexity = 15 in {
3788 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3789 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3790 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3791 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3792 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3793 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3794 (MOVSSrr (v4f32 (V_SET0PS)),
3795 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3796 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3797 (MOVSSrr (v4i32 (V_SET0PI)),
3798 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3801 // Splat v2f64 / v2i64
3802 let AddedComplexity = 10 in {
3803 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3804 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3805 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3806 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3809 // Special unary SHUFPSrri case.
3810 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3811 (SHUFPSrri VR128:$src1, VR128:$src1,
3812 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3813 let AddedComplexity = 5 in
3814 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3815 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3816 Requires<[HasSSE2]>;
3817 // Special unary SHUFPDrri case.
3818 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3819 (SHUFPDrri VR128:$src1, VR128:$src1,
3820 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3821 Requires<[HasSSE2]>;
3822 // Special unary SHUFPDrri case.
3823 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3824 (SHUFPDrri VR128:$src1, VR128:$src1,
3825 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3826 Requires<[HasSSE2]>;
3827 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3828 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3829 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3830 Requires<[HasSSE2]>;
3832 // Special binary v4i32 shuffle cases with SHUFPS.
3833 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3834 (SHUFPSrri VR128:$src1, VR128:$src2,
3835 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3836 Requires<[HasSSE2]>;
3837 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3838 (SHUFPSrmi VR128:$src1, addr:$src2,
3839 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3840 Requires<[HasSSE2]>;
3841 // Special binary v2i64 shuffle cases using SHUFPDrri.
3842 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3843 (SHUFPDrri VR128:$src1, VR128:$src2,
3844 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3845 Requires<[HasSSE2]>;
3847 let AddedComplexity = 20 in {
3848 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3849 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3850 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3852 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3853 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3854 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3856 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3857 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3858 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3859 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3860 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3863 let AddedComplexity = 20 in {
3864 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3865 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3866 (MOVLPSrm VR128:$src1, addr:$src2)>;
3867 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3868 (MOVLPDrm VR128:$src1, addr:$src2)>;
3869 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3870 (MOVLPSrm VR128:$src1, addr:$src2)>;
3871 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3872 (MOVLPDrm VR128:$src1, addr:$src2)>;
3875 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3876 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3877 (MOVLPSmr addr:$src1, VR128:$src2)>;
3878 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3879 (MOVLPDmr addr:$src1, VR128:$src2)>;
3880 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3882 (MOVLPSmr addr:$src1, VR128:$src2)>;
3883 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3884 (MOVLPDmr addr:$src1, VR128:$src2)>;
3886 let AddedComplexity = 15 in {
3887 // Setting the lowest element in the vector.
3888 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3889 (MOVSSrr (v4i32 VR128:$src1),
3890 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3891 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3892 (MOVSDrr (v2i64 VR128:$src1),
3893 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3895 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3896 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3897 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3898 Requires<[HasSSE2]>;
3899 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3900 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3901 Requires<[HasSSE2]>;
3904 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3905 // fall back to this for SSE1)
3906 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3907 (SHUFPSrri VR128:$src2, VR128:$src1,
3908 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3910 // Set lowest element and zero upper elements.
3911 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3912 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3914 // Use movaps / movups for SSE integer load / store (one byte shorter).
3915 // The instructions selected below are then converted to MOVDQA/MOVDQU
3916 // during the SSE domain pass.
3917 let Predicates = [HasSSE1] in {
3918 def : Pat<(alignedloadv4i32 addr:$src),
3919 (MOVAPSrm addr:$src)>;
3920 def : Pat<(loadv4i32 addr:$src),
3921 (MOVUPSrm addr:$src)>;
3922 def : Pat<(alignedloadv2i64 addr:$src),
3923 (MOVAPSrm addr:$src)>;
3924 def : Pat<(loadv2i64 addr:$src),
3925 (MOVUPSrm addr:$src)>;
3927 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3928 (MOVAPSmr addr:$dst, VR128:$src)>;
3929 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3930 (MOVAPSmr addr:$dst, VR128:$src)>;
3931 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3932 (MOVAPSmr addr:$dst, VR128:$src)>;
3933 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3934 (MOVAPSmr addr:$dst, VR128:$src)>;
3935 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3936 (MOVUPSmr addr:$dst, VR128:$src)>;
3937 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3938 (MOVUPSmr addr:$dst, VR128:$src)>;
3939 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3940 (MOVUPSmr addr:$dst, VR128:$src)>;
3941 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3942 (MOVUPSmr addr:$dst, VR128:$src)>;
3945 // Use vmovaps/vmovups for AVX integer load/store.
3946 let Predicates = [HasAVX] in {
3947 // 128-bit load/store
3948 def : Pat<(alignedloadv4i32 addr:$src),
3949 (VMOVAPSrm addr:$src)>;
3950 def : Pat<(loadv4i32 addr:$src),
3951 (VMOVUPSrm addr:$src)>;
3952 def : Pat<(alignedloadv2i64 addr:$src),
3953 (VMOVAPSrm addr:$src)>;
3954 def : Pat<(loadv2i64 addr:$src),
3955 (VMOVUPSrm addr:$src)>;
3957 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3958 (VMOVAPSmr addr:$dst, VR128:$src)>;
3959 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3960 (VMOVAPSmr addr:$dst, VR128:$src)>;
3961 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3962 (VMOVAPSmr addr:$dst, VR128:$src)>;
3963 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3964 (VMOVAPSmr addr:$dst, VR128:$src)>;
3965 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3966 (VMOVUPSmr addr:$dst, VR128:$src)>;
3967 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3968 (VMOVUPSmr addr:$dst, VR128:$src)>;
3969 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3970 (VMOVUPSmr addr:$dst, VR128:$src)>;
3971 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3972 (VMOVUPSmr addr:$dst, VR128:$src)>;
3974 // 256-bit load/store
3975 def : Pat<(alignedloadv4i64 addr:$src),
3976 (VMOVAPSYrm addr:$src)>;
3977 def : Pat<(loadv4i64 addr:$src),
3978 (VMOVUPSYrm addr:$src)>;
3979 def : Pat<(alignedloadv8i32 addr:$src),
3980 (VMOVAPSYrm addr:$src)>;
3981 def : Pat<(loadv8i32 addr:$src),
3982 (VMOVUPSYrm addr:$src)>;
3983 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
3984 (VMOVAPSYmr addr:$dst, VR256:$src)>;
3985 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
3986 (VMOVAPSYmr addr:$dst, VR256:$src)>;
3987 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
3988 (VMOVUPSYmr addr:$dst, VR256:$src)>;
3989 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
3990 (VMOVUPSYmr addr:$dst, VR256:$src)>;
3993 //===----------------------------------------------------------------------===//
3994 // SSE4.1 - Packed Move with Sign/Zero Extend
3995 //===----------------------------------------------------------------------===//
3997 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3998 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3999 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4000 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4002 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4003 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4005 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
4009 let Predicates = [HasAVX] in {
4010 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
4012 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
4014 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4016 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4018 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4020 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4024 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4025 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4026 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4027 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4028 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4029 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4031 // Common patterns involving scalar load.
4032 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4033 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4034 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4035 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4037 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4038 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4039 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4040 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4042 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4043 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4044 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4045 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4047 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4048 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4049 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4050 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4052 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4053 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4054 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4055 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4057 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4058 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4059 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4060 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4063 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4064 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4065 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4066 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4068 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4069 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4071 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4075 let Predicates = [HasAVX] in {
4076 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4078 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4080 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4082 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4086 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4087 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4088 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4089 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4091 // Common patterns involving scalar load
4092 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4093 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4094 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4095 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4097 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4098 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4099 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4100 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4103 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4104 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4105 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4106 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4108 // Expecting a i16 load any extended to i32 value.
4109 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4110 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4111 [(set VR128:$dst, (IntId (bitconvert
4112 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4116 let Predicates = [HasAVX] in {
4117 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4119 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4122 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4123 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4125 // Common patterns involving scalar load
4126 def : Pat<(int_x86_sse41_pmovsxbq
4127 (bitconvert (v4i32 (X86vzmovl
4128 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4129 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4131 def : Pat<(int_x86_sse41_pmovzxbq
4132 (bitconvert (v4i32 (X86vzmovl
4133 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4134 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4136 //===----------------------------------------------------------------------===//
4137 // SSE4.1 - Extract Instructions
4138 //===----------------------------------------------------------------------===//
4140 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4141 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4142 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4143 (ins VR128:$src1, i32i8imm:$src2),
4144 !strconcat(OpcodeStr,
4145 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4146 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4148 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4149 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4150 !strconcat(OpcodeStr,
4151 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4154 // There's an AssertZext in the way of writing the store pattern
4155 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4158 let Predicates = [HasAVX] in {
4159 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4160 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4161 (ins VR128:$src1, i32i8imm:$src2),
4162 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4165 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4168 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4169 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4170 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4171 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4172 !strconcat(OpcodeStr,
4173 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4176 // There's an AssertZext in the way of writing the store pattern
4177 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4180 let Predicates = [HasAVX] in
4181 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4183 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4186 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4187 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4188 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4189 (ins VR128:$src1, i32i8imm:$src2),
4190 !strconcat(OpcodeStr,
4191 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4193 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4194 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4195 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4196 !strconcat(OpcodeStr,
4197 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4198 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4199 addr:$dst)]>, OpSize;
4202 let Predicates = [HasAVX] in
4203 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4205 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4207 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4208 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4209 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4210 (ins VR128:$src1, i32i8imm:$src2),
4211 !strconcat(OpcodeStr,
4212 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4214 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4215 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4216 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4217 !strconcat(OpcodeStr,
4218 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4219 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4220 addr:$dst)]>, OpSize, REX_W;
4223 let Predicates = [HasAVX] in
4224 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4226 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4228 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4230 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4231 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4232 (ins VR128:$src1, i32i8imm:$src2),
4233 !strconcat(OpcodeStr,
4234 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4236 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4238 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4239 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4240 !strconcat(OpcodeStr,
4241 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4242 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4243 addr:$dst)]>, OpSize;
4246 let Predicates = [HasAVX] in {
4247 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4248 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4249 (ins VR128:$src1, i32i8imm:$src2),
4250 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4253 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4255 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4256 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4259 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4260 Requires<[HasSSE41]>;
4262 //===----------------------------------------------------------------------===//
4263 // SSE4.1 - Insert Instructions
4264 //===----------------------------------------------------------------------===//
4266 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4267 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4268 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4270 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4272 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4274 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4275 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4276 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4278 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4280 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4282 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4283 imm:$src3))]>, OpSize;
4286 let Predicates = [HasAVX] in
4287 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4288 let Constraints = "$src1 = $dst" in
4289 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4291 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4292 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4293 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4295 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4297 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4299 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4301 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4302 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4304 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4306 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4308 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4309 imm:$src3)))]>, OpSize;
4312 let Predicates = [HasAVX] in
4313 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4314 let Constraints = "$src1 = $dst" in
4315 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4317 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4318 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4319 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4321 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4323 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4325 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4327 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4328 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4330 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4332 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4334 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4335 imm:$src3)))]>, OpSize;
4338 let Predicates = [HasAVX] in
4339 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4340 let Constraints = "$src1 = $dst" in
4341 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4343 // insertps has a few different modes, there's the first two here below which
4344 // are optimized inserts that won't zero arbitrary elements in the destination
4345 // vector. The next one matches the intrinsic and could zero arbitrary elements
4346 // in the target vector.
4347 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4348 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4349 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
4351 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4353 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4355 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4357 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4358 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
4360 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4362 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4364 (X86insrtps VR128:$src1,
4365 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4366 imm:$src3))]>, OpSize;
4369 let Constraints = "$src1 = $dst" in
4370 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4371 let Predicates = [HasAVX] in
4372 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4374 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4375 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4377 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4378 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4379 Requires<[HasSSE41]>;
4381 //===----------------------------------------------------------------------===//
4382 // SSE4.1 - Round Instructions
4383 //===----------------------------------------------------------------------===//
4385 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4386 X86MemOperand x86memop, RegisterClass RC,
4387 PatFrag mem_frag32, PatFrag mem_frag64,
4388 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4389 // Intrinsic operation, reg.
4390 // Vector intrinsic operation, reg
4391 def PSr : SS4AIi8<opcps, MRMSrcReg,
4392 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4393 !strconcat(OpcodeStr,
4394 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4395 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4398 // Vector intrinsic operation, mem
4399 def PSm : Ii8<opcps, MRMSrcMem,
4400 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4401 !strconcat(OpcodeStr,
4402 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4404 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4406 Requires<[HasSSE41]>;
4408 // Vector intrinsic operation, reg
4409 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4410 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4411 !strconcat(OpcodeStr,
4412 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4413 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4416 // Vector intrinsic operation, mem
4417 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4418 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4419 !strconcat(OpcodeStr,
4420 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4422 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4426 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4427 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4428 // Intrinsic operation, reg.
4429 // Vector intrinsic operation, reg
4430 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4431 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4432 !strconcat(OpcodeStr,
4433 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4436 // Vector intrinsic operation, mem
4437 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4438 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4439 !strconcat(OpcodeStr,
4440 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4441 []>, TA, OpSize, Requires<[HasSSE41]>;
4443 // Vector intrinsic operation, reg
4444 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4445 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4446 !strconcat(OpcodeStr,
4447 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4450 // Vector intrinsic operation, mem
4451 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4452 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4453 !strconcat(OpcodeStr,
4454 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4458 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4461 Intrinsic F64Int, bit Is2Addr = 1> {
4462 // Intrinsic operation, reg.
4463 def SSr : SS4AIi8<opcss, MRMSrcReg,
4464 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4466 !strconcat(OpcodeStr,
4467 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4468 !strconcat(OpcodeStr,
4469 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4470 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4473 // Intrinsic operation, mem.
4474 def SSm : SS4AIi8<opcss, MRMSrcMem,
4475 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4477 !strconcat(OpcodeStr,
4478 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4479 !strconcat(OpcodeStr,
4480 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4482 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4485 // Intrinsic operation, reg.
4486 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4487 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4489 !strconcat(OpcodeStr,
4490 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4491 !strconcat(OpcodeStr,
4492 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4493 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4496 // Intrinsic operation, mem.
4497 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4498 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4500 !strconcat(OpcodeStr,
4501 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4502 !strconcat(OpcodeStr,
4503 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4505 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4509 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4511 // Intrinsic operation, reg.
4512 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4513 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4514 !strconcat(OpcodeStr,
4515 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4518 // Intrinsic operation, mem.
4519 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4520 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4521 !strconcat(OpcodeStr,
4522 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4525 // Intrinsic operation, reg.
4526 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4527 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4528 !strconcat(OpcodeStr,
4529 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4532 // Intrinsic operation, mem.
4533 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4534 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4535 !strconcat(OpcodeStr,
4536 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4540 // FP round - roundss, roundps, roundsd, roundpd
4541 let Predicates = [HasAVX] in {
4543 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4544 memopv4f32, memopv2f64,
4545 int_x86_sse41_round_ps,
4546 int_x86_sse41_round_pd>, VEX;
4547 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4548 memopv8f32, memopv4f64,
4549 int_x86_avx_round_ps_256,
4550 int_x86_avx_round_pd_256>, VEX;
4551 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4552 int_x86_sse41_round_ss,
4553 int_x86_sse41_round_sd, 0>, VEX_4V;
4555 // Instructions for the assembler
4556 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4558 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4560 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4563 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4564 memopv4f32, memopv2f64,
4565 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4566 let Constraints = "$src1 = $dst" in
4567 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4568 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4570 //===----------------------------------------------------------------------===//
4571 // SSE4.1 - Packed Bit Test
4572 //===----------------------------------------------------------------------===//
4574 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4575 // the intel intrinsic that corresponds to this.
4576 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4577 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4578 "vptest\t{$src2, $src1|$src1, $src2}",
4579 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4581 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4582 "vptest\t{$src2, $src1|$src1, $src2}",
4583 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4586 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4587 "vptest\t{$src2, $src1|$src1, $src2}",
4588 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4590 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4591 "vptest\t{$src2, $src1|$src1, $src2}",
4592 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4596 let Defs = [EFLAGS] in {
4597 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4598 "ptest \t{$src2, $src1|$src1, $src2}",
4599 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4601 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4602 "ptest \t{$src2, $src1|$src1, $src2}",
4603 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4607 // The bit test instructions below are AVX only
4608 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4609 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4610 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4611 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4612 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4613 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4614 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4615 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4619 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4620 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4621 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4622 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4623 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4626 //===----------------------------------------------------------------------===//
4627 // SSE4.1 - Misc Instructions
4628 //===----------------------------------------------------------------------===//
4630 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
4631 "popcnt{w}\t{$src, $dst|$dst, $src}",
4632 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
4633 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
4634 "popcnt{w}\t{$src, $dst|$dst, $src}",
4635 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
4637 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
4638 "popcnt{l}\t{$src, $dst|$dst, $src}",
4639 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
4640 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
4641 "popcnt{l}\t{$src, $dst|$dst, $src}",
4642 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
4644 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
4645 "popcnt{q}\t{$src, $dst|$dst, $src}",
4646 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
4647 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
4648 "popcnt{q}\t{$src, $dst|$dst, $src}",
4649 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
4653 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4654 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4655 Intrinsic IntId128> {
4656 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4658 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4659 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4660 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4662 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4665 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4668 let Predicates = [HasAVX] in
4669 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4670 int_x86_sse41_phminposuw>, VEX;
4671 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4672 int_x86_sse41_phminposuw>;
4674 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4675 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4676 Intrinsic IntId128, bit Is2Addr = 1> {
4677 let isCommutable = 1 in
4678 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4679 (ins VR128:$src1, VR128:$src2),
4681 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4682 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4683 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4684 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4685 (ins VR128:$src1, i128mem:$src2),
4687 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4688 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4690 (IntId128 VR128:$src1,
4691 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4694 let Predicates = [HasAVX] in {
4695 let isCommutable = 0 in
4696 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4698 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4700 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4702 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4704 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4706 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4708 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4710 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4712 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4714 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4716 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4720 let Constraints = "$src1 = $dst" in {
4721 let isCommutable = 0 in
4722 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4723 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4724 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4725 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4726 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4727 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4728 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4729 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4730 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4731 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4732 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4735 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4736 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4737 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4738 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4740 /// SS48I_binop_rm - Simple SSE41 binary operator.
4741 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4742 ValueType OpVT, bit Is2Addr = 1> {
4743 let isCommutable = 1 in
4744 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4745 (ins VR128:$src1, VR128:$src2),
4747 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4748 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4749 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4751 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4752 (ins VR128:$src1, i128mem:$src2),
4754 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4755 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4756 [(set VR128:$dst, (OpNode VR128:$src1,
4757 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4761 let Predicates = [HasAVX] in
4762 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4763 let Constraints = "$src1 = $dst" in
4764 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4766 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4767 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4768 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4769 X86MemOperand x86memop, bit Is2Addr = 1> {
4770 let isCommutable = 1 in
4771 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4772 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
4774 !strconcat(OpcodeStr,
4775 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4776 !strconcat(OpcodeStr,
4777 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4778 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4780 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4781 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
4783 !strconcat(OpcodeStr,
4784 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4785 !strconcat(OpcodeStr,
4786 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4789 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4793 let Predicates = [HasAVX] in {
4794 let isCommutable = 0 in {
4795 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4796 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4797 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4798 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4799 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4800 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4801 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4802 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4803 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4804 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4805 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4806 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4808 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4809 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4810 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4811 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4812 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4813 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4816 let Constraints = "$src1 = $dst" in {
4817 let isCommutable = 0 in {
4818 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4819 VR128, memopv16i8, i128mem>;
4820 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4821 VR128, memopv16i8, i128mem>;
4822 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4823 VR128, memopv16i8, i128mem>;
4824 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4825 VR128, memopv16i8, i128mem>;
4827 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4828 VR128, memopv16i8, i128mem>;
4829 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4830 VR128, memopv16i8, i128mem>;
4833 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4834 let Predicates = [HasAVX] in {
4835 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4836 RegisterClass RC, X86MemOperand x86memop,
4837 PatFrag mem_frag, Intrinsic IntId> {
4838 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4839 (ins RC:$src1, RC:$src2, RC:$src3),
4840 !strconcat(OpcodeStr,
4841 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4842 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
4843 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4845 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4846 (ins RC:$src1, x86memop:$src2, RC:$src3),
4847 !strconcat(OpcodeStr,
4848 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4850 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
4852 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4856 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
4857 memopv16i8, int_x86_sse41_blendvpd>;
4858 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
4859 memopv16i8, int_x86_sse41_blendvps>;
4860 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
4861 memopv16i8, int_x86_sse41_pblendvb>;
4862 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
4863 memopv32i8, int_x86_avx_blendv_pd_256>;
4864 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
4865 memopv32i8, int_x86_avx_blendv_ps_256>;
4867 /// SS41I_ternary_int - SSE 4.1 ternary operator
4868 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4869 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4870 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4871 (ins VR128:$src1, VR128:$src2),
4872 !strconcat(OpcodeStr,
4873 "\t{$src2, $dst|$dst, $src2}"),
4874 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4877 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4878 (ins VR128:$src1, i128mem:$src2),
4879 !strconcat(OpcodeStr,
4880 "\t{$src2, $dst|$dst, $src2}"),
4883 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4887 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4888 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4889 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4891 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
4892 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
4894 let Predicates = [HasAVX] in
4895 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4896 "vmovntdqa\t{$src, $dst|$dst, $src}",
4897 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4899 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4900 "movntdqa\t{$src, $dst|$dst, $src}",
4901 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4904 //===----------------------------------------------------------------------===//
4905 // SSE4.2 - Compare Instructions
4906 //===----------------------------------------------------------------------===//
4908 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4909 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4910 Intrinsic IntId128, bit Is2Addr = 1> {
4911 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4912 (ins VR128:$src1, VR128:$src2),
4914 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4915 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4916 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4918 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4919 (ins VR128:$src1, i128mem:$src2),
4921 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4922 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4924 (IntId128 VR128:$src1,
4925 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4928 let Predicates = [HasAVX] in
4929 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4931 let Constraints = "$src1 = $dst" in
4932 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4934 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4935 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4936 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4937 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4939 //===----------------------------------------------------------------------===//
4940 // SSE4.2 - String/text Processing Instructions
4941 //===----------------------------------------------------------------------===//
4943 // Packed Compare Implicit Length Strings, Return Mask
4944 multiclass pseudo_pcmpistrm<string asm> {
4945 def REG : PseudoI<(outs VR128:$dst),
4946 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4947 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4949 def MEM : PseudoI<(outs VR128:$dst),
4950 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4951 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4952 VR128:$src1, (load addr:$src2), imm:$src3))]>;
4955 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4956 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
4957 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
4960 let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
4961 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4962 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4963 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4964 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4965 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4966 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4969 let Defs = [XMM0, EFLAGS] in {
4970 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4971 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4972 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4973 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4974 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4975 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4978 // Packed Compare Explicit Length Strings, Return Mask
4979 multiclass pseudo_pcmpestrm<string asm> {
4980 def REG : PseudoI<(outs VR128:$dst),
4981 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4982 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4983 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
4984 def MEM : PseudoI<(outs VR128:$dst),
4985 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4986 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4987 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
4990 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4991 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
4992 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
4995 let Predicates = [HasAVX],
4996 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4997 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4998 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4999 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5000 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5001 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5002 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5005 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5006 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5007 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5008 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5009 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5010 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5011 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5014 // Packed Compare Implicit Length Strings, Return Index
5015 let Defs = [ECX, EFLAGS] in {
5016 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
5017 def rr : SS42AI<0x63, MRMSrcReg, (outs),
5018 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5019 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5020 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
5021 (implicit EFLAGS)]>, OpSize;
5022 def rm : SS42AI<0x63, MRMSrcMem, (outs),
5023 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5024 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5025 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
5026 (implicit EFLAGS)]>, OpSize;
5030 let Predicates = [HasAVX] in {
5031 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5033 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5035 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5037 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5039 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5041 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5045 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5046 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5047 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5048 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5049 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5050 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5052 // Packed Compare Explicit Length Strings, Return Index
5053 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5054 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5055 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5056 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5057 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5058 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5059 (implicit EFLAGS)]>, OpSize;
5060 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5061 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5062 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5064 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5065 (implicit EFLAGS)]>, OpSize;
5069 let Predicates = [HasAVX] in {
5070 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5072 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5074 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5076 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5078 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5080 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5084 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5085 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5086 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5087 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5088 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5089 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5091 //===----------------------------------------------------------------------===//
5092 // SSE4.2 - CRC Instructions
5093 //===----------------------------------------------------------------------===//
5095 // No CRC instructions have AVX equivalents
5097 // crc intrinsic instruction
5098 // This set of instructions are only rm, the only difference is the size
5100 let Constraints = "$src1 = $dst" in {
5101 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5102 (ins GR32:$src1, i8mem:$src2),
5103 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5105 (int_x86_sse42_crc32_32_8 GR32:$src1,
5106 (load addr:$src2)))]>;
5107 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5108 (ins GR32:$src1, GR8:$src2),
5109 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5111 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
5112 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5113 (ins GR32:$src1, i16mem:$src2),
5114 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5116 (int_x86_sse42_crc32_32_16 GR32:$src1,
5117 (load addr:$src2)))]>,
5119 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5120 (ins GR32:$src1, GR16:$src2),
5121 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5123 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
5125 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5126 (ins GR32:$src1, i32mem:$src2),
5127 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5129 (int_x86_sse42_crc32_32_32 GR32:$src1,
5130 (load addr:$src2)))]>;
5131 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5132 (ins GR32:$src1, GR32:$src2),
5133 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5135 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
5136 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5137 (ins GR64:$src1, i8mem:$src2),
5138 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5140 (int_x86_sse42_crc32_64_8 GR64:$src1,
5141 (load addr:$src2)))]>,
5143 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5144 (ins GR64:$src1, GR8:$src2),
5145 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5147 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
5149 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5150 (ins GR64:$src1, i64mem:$src2),
5151 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5153 (int_x86_sse42_crc32_64_64 GR64:$src1,
5154 (load addr:$src2)))]>,
5156 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5157 (ins GR64:$src1, GR64:$src2),
5158 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5160 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
5164 //===----------------------------------------------------------------------===//
5165 // AES-NI Instructions
5166 //===----------------------------------------------------------------------===//
5168 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5169 Intrinsic IntId128, bit Is2Addr = 1> {
5170 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5171 (ins VR128:$src1, VR128:$src2),
5173 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5174 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5175 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5177 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5178 (ins VR128:$src1, i128mem:$src2),
5180 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5181 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5183 (IntId128 VR128:$src1,
5184 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5187 // Perform One Round of an AES Encryption/Decryption Flow
5188 let Predicates = [HasAVX, HasAES] in {
5189 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5190 int_x86_aesni_aesenc, 0>, VEX_4V;
5191 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5192 int_x86_aesni_aesenclast, 0>, VEX_4V;
5193 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5194 int_x86_aesni_aesdec, 0>, VEX_4V;
5195 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5196 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5199 let Constraints = "$src1 = $dst" in {
5200 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5201 int_x86_aesni_aesenc>;
5202 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5203 int_x86_aesni_aesenclast>;
5204 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5205 int_x86_aesni_aesdec>;
5206 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5207 int_x86_aesni_aesdeclast>;
5210 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5211 (AESENCrr VR128:$src1, VR128:$src2)>;
5212 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5213 (AESENCrm VR128:$src1, addr:$src2)>;
5214 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5215 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5216 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5217 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5218 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5219 (AESDECrr VR128:$src1, VR128:$src2)>;
5220 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5221 (AESDECrm VR128:$src1, addr:$src2)>;
5222 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5223 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5224 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5225 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5227 // Perform the AES InvMixColumn Transformation
5228 let Predicates = [HasAVX, HasAES] in {
5229 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5231 "vaesimc\t{$src1, $dst|$dst, $src1}",
5233 (int_x86_aesni_aesimc VR128:$src1))]>,
5235 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5236 (ins i128mem:$src1),
5237 "vaesimc\t{$src1, $dst|$dst, $src1}",
5239 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5242 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5244 "aesimc\t{$src1, $dst|$dst, $src1}",
5246 (int_x86_aesni_aesimc VR128:$src1))]>,
5248 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5249 (ins i128mem:$src1),
5250 "aesimc\t{$src1, $dst|$dst, $src1}",
5252 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5255 // AES Round Key Generation Assist
5256 let Predicates = [HasAVX, HasAES] in {
5257 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5258 (ins VR128:$src1, i8imm:$src2),
5259 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5261 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5263 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5264 (ins i128mem:$src1, i8imm:$src2),
5265 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5267 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5271 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5272 (ins VR128:$src1, i8imm:$src2),
5273 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5275 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5277 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5278 (ins i128mem:$src1, i8imm:$src2),
5279 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5281 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5285 //===----------------------------------------------------------------------===//
5286 // CLMUL Instructions
5287 //===----------------------------------------------------------------------===//
5289 // Carry-less Multiplication instructions
5290 let Constraints = "$src1 = $dst" in {
5291 def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5292 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5293 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5296 def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5297 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5298 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5302 // AVX carry-less Multiplication instructions
5303 def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5304 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5305 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5308 def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5309 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5310 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5314 multiclass pclmul_alias<string asm, int immop> {
5315 def : InstAlias<!strconcat("pclmul", asm,
5316 "dq {$src, $dst|$dst, $src}"),
5317 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
5319 def : InstAlias<!strconcat("pclmul", asm,
5320 "dq {$src, $dst|$dst, $src}"),
5321 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
5323 def : InstAlias<!strconcat("vpclmul", asm,
5324 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5325 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
5327 def : InstAlias<!strconcat("vpclmul", asm,
5328 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5329 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
5331 defm : pclmul_alias<"hqhq", 0x11>;
5332 defm : pclmul_alias<"hqlq", 0x01>;
5333 defm : pclmul_alias<"lqhq", 0x10>;
5334 defm : pclmul_alias<"lqlq", 0x00>;
5336 //===----------------------------------------------------------------------===//
5338 //===----------------------------------------------------------------------===//
5340 //===----------------------------------------------------------------------===//
5341 // VBROADCAST - Load from memory and broadcast to all elements of the
5342 // destination operand
5344 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5345 X86MemOperand x86memop, Intrinsic Int> :
5346 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5347 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5348 [(set RC:$dst, (Int addr:$src))]>, VEX;
5350 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5351 int_x86_avx_vbroadcastss>;
5352 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5353 int_x86_avx_vbroadcastss_256>;
5354 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5355 int_x86_avx_vbroadcast_sd_256>;
5356 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5357 int_x86_avx_vbroadcastf128_pd_256>;
5359 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5360 (VBROADCASTF128 addr:$src)>;
5362 //===----------------------------------------------------------------------===//
5363 // VINSERTF128 - Insert packed floating-point values
5365 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5366 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5367 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5369 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5370 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5371 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5374 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5375 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5376 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5377 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5378 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5379 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5381 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
5383 (VINSERTF128rr VR256:$src1, VR128:$src2,
5384 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5385 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
5387 (VINSERTF128rr VR256:$src1, VR128:$src2,
5388 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5389 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
5391 (VINSERTF128rr VR256:$src1, VR128:$src2,
5392 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5393 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
5395 (VINSERTF128rr VR256:$src1, VR128:$src2,
5396 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5397 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
5399 (VINSERTF128rr VR256:$src1, VR128:$src2,
5400 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5401 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
5403 (VINSERTF128rr VR256:$src1, VR128:$src2,
5404 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5406 // Special COPY patterns
5407 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
5408 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5409 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)),
5410 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5411 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)),
5412 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5413 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)),
5414 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5415 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)),
5416 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5417 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
5418 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5420 //===----------------------------------------------------------------------===//
5421 // VEXTRACTF128 - Extract packed floating-point values
5423 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5424 (ins VR256:$src1, i8imm:$src2),
5425 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5427 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5428 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5429 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5432 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5433 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5434 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5435 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5436 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5437 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5439 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5440 (v4f32 (VEXTRACTF128rr
5441 (v8f32 VR256:$src1),
5442 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5443 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5444 (v2f64 (VEXTRACTF128rr
5445 (v4f64 VR256:$src1),
5446 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5447 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5448 (v4i32 (VEXTRACTF128rr
5449 (v8i32 VR256:$src1),
5450 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5451 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5452 (v2i64 (VEXTRACTF128rr
5453 (v4i64 VR256:$src1),
5454 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5455 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5456 (v8i16 (VEXTRACTF128rr
5457 (v16i16 VR256:$src1),
5458 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5459 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5460 (v16i8 (VEXTRACTF128rr
5461 (v32i8 VR256:$src1),
5462 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5464 // Special COPY patterns
5465 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))),
5466 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
5467 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))),
5468 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
5470 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
5471 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
5472 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))),
5473 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
5476 //===----------------------------------------------------------------------===//
5477 // VMASKMOV - Conditional SIMD Packed Loads and Stores
5479 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5480 Intrinsic IntLd, Intrinsic IntLd256,
5481 Intrinsic IntSt, Intrinsic IntSt256,
5482 PatFrag pf128, PatFrag pf256> {
5483 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5484 (ins VR128:$src1, f128mem:$src2),
5485 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5486 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5488 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5489 (ins VR256:$src1, f256mem:$src2),
5490 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5491 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5493 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5494 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5495 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5496 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5497 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5498 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5499 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5500 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5503 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5504 int_x86_avx_maskload_ps,
5505 int_x86_avx_maskload_ps_256,
5506 int_x86_avx_maskstore_ps,
5507 int_x86_avx_maskstore_ps_256,
5508 memopv4f32, memopv8f32>;
5509 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5510 int_x86_avx_maskload_pd,
5511 int_x86_avx_maskload_pd_256,
5512 int_x86_avx_maskstore_pd,
5513 int_x86_avx_maskstore_pd_256,
5514 memopv2f64, memopv4f64>;
5516 //===----------------------------------------------------------------------===//
5517 // VPERM - Permute Floating-Point Values
5519 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5520 RegisterClass RC, X86MemOperand x86memop_f,
5521 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5522 Intrinsic IntVar, Intrinsic IntImm> {
5523 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5524 (ins RC:$src1, RC:$src2),
5525 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5526 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5527 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5528 (ins RC:$src1, x86memop_i:$src2),
5529 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5530 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5532 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5533 (ins RC:$src1, i8imm:$src2),
5534 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5535 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5536 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5537 (ins x86memop_f:$src1, i8imm:$src2),
5538 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5539 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5542 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5543 memopv4f32, memopv4i32,
5544 int_x86_avx_vpermilvar_ps,
5545 int_x86_avx_vpermil_ps>;
5546 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5547 memopv8f32, memopv8i32,
5548 int_x86_avx_vpermilvar_ps_256,
5549 int_x86_avx_vpermil_ps_256>;
5550 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5551 memopv2f64, memopv2i64,
5552 int_x86_avx_vpermilvar_pd,
5553 int_x86_avx_vpermil_pd>;
5554 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5555 memopv4f64, memopv4i64,
5556 int_x86_avx_vpermilvar_pd_256,
5557 int_x86_avx_vpermil_pd_256>;
5559 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5560 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5561 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5563 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5564 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5565 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5568 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5569 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5570 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5571 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5572 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5573 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5575 def : Pat<(int_x86_avx_vperm2f128_ps_256
5576 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5577 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5578 def : Pat<(int_x86_avx_vperm2f128_pd_256
5579 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5580 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5581 def : Pat<(int_x86_avx_vperm2f128_si_256
5582 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5583 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5585 // Shuffle with VPERMIL instructions
5586 def : Pat<(v8f32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
5587 (VPERMILPSYri VR256:$src1, imm:$imm)>;
5588 def : Pat<(v4f64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
5589 (VPERMILPDYri VR256:$src1, imm:$imm)>;
5590 def : Pat<(v8i32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
5591 (VPERMILPSYri VR256:$src1, imm:$imm)>;
5592 def : Pat<(v4i64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
5593 (VPERMILPDYri VR256:$src1, imm:$imm)>;
5595 //===----------------------------------------------------------------------===//
5596 // VZERO - Zero YMM registers
5598 // Zero All YMM registers
5599 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5600 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5602 // Zero Upper bits of YMM registers
5603 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5604 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5606 //===----------------------------------------------------------------------===//
5607 // SSE Shuffle pattern fragments
5608 //===----------------------------------------------------------------------===//
5610 // This is part of a "work in progress" refactoring. The idea is that all
5611 // vector shuffles are going to be translated into target specific nodes and
5612 // directly matched by the patterns below (which can be changed along the way)
5613 // The AVX version of some but not all of them are described here, and more
5614 // should come in a near future.
5616 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5617 // SSE2 loads, which are always promoted to v2i64. The last one should match
5618 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5619 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5620 // we investigate further.
5621 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5623 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5624 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5626 (PSHUFDmi addr:$src1, imm:$imm)>;
5627 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5629 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5631 // Shuffle with PSHUFD instruction.
5632 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5633 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5634 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5635 (PSHUFDri VR128:$src1, imm:$imm)>;
5637 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5638 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5639 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5640 (PSHUFDri VR128:$src1, imm:$imm)>;
5642 // Shuffle with SHUFPD instruction.
5643 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5644 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5645 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5646 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5647 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5648 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5650 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5651 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5652 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5653 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5655 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5656 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5657 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5658 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5660 // Shuffle with SHUFPS instruction.
5661 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5662 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5663 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5664 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5665 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5666 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5668 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5669 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5670 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5671 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5673 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5674 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5675 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5676 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5677 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5678 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5680 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5681 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5682 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5683 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5685 // Shuffle with MOVHLPS instruction
5686 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5687 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5688 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5689 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5691 // Shuffle with MOVDDUP instruction
5692 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5693 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5694 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5695 (MOVDDUPrm addr:$src)>;
5697 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5698 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5699 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5700 (MOVDDUPrm addr:$src)>;
5702 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5703 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5704 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5705 (MOVDDUPrm addr:$src)>;
5707 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5708 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5709 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5710 (MOVDDUPrm addr:$src)>;
5712 def : Pat<(X86Movddup (bc_v2f64
5713 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5714 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5715 def : Pat<(X86Movddup (bc_v2f64
5716 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5717 (MOVDDUPrm addr:$src)>;
5720 // Shuffle with UNPCKLPS
5721 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5722 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5723 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
5724 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5725 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5726 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5728 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5729 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5730 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
5731 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5732 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5733 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5735 // Shuffle with UNPCKHPS
5736 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5737 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5738 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5739 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5741 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5742 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5743 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5744 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5746 // Shuffle with VUNPCKHPSY
5747 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, (memopv8f32 addr:$src2))),
5748 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5749 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
5750 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5752 // Shuffle with UNPCKLPD
5753 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5754 (VUNPCKLPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5755 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
5756 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5757 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5758 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
5760 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5761 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5762 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
5763 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5764 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5765 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5767 // Shuffle with UNPCKHPD
5768 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5769 (VUNPCKHPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5770 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5771 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
5773 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5774 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5775 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5776 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5778 // Shuffle with VUNPCKHPDY
5779 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, (memopv4f64 addr:$src2))),
5780 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5781 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
5782 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5784 // Shuffle with MOVLHPS
5785 def : Pat<(X86Movlhps VR128:$src1,
5786 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5787 (MOVHPSrm VR128:$src1, addr:$src2)>;
5788 def : Pat<(X86Movlhps VR128:$src1,
5789 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5790 (MOVHPSrm VR128:$src1, addr:$src2)>;
5791 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
5792 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5793 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
5794 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5795 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
5796 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
5798 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the problem
5799 // is during lowering, where it's not possible to recognize the load fold cause
5800 // it has two uses through a bitcast. One use disappears at isel time and the
5801 // fold opportunity reappears.
5802 def : Pat<(v2f64 (X86Movddup VR128:$src)),
5803 (UNPCKLPDrr VR128:$src, VR128:$src)>;
5805 // Shuffle with MOVLHPD
5806 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
5807 (scalar_to_vector (loadf64 addr:$src2)))),
5808 (MOVHPDrm VR128:$src1, addr:$src2)>;
5810 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
5811 // is during lowering, where it's not possible to recognize the load fold cause
5812 // it has two uses through a bitcast. One use disappears at isel time and the
5813 // fold opportunity reappears.
5814 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
5815 (scalar_to_vector (loadf64 addr:$src2)))),
5816 (MOVHPDrm VR128:$src1, addr:$src2)>;
5818 // Shuffle with MOVSS
5819 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
5820 (MOVSSrr VR128:$src1, FR32:$src2)>;
5821 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
5822 (MOVSSrr (v4i32 VR128:$src1),
5823 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
5824 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
5825 (MOVSSrr (v4f32 VR128:$src1),
5826 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
5827 // FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
5828 // is during lowering, where it's not possible to recognize the load fold cause
5829 // it has two uses through a bitcast. One use disappears at isel time and the
5830 // fold opportunity reappears.
5831 def : Pat<(X86Movss VR128:$src1,
5832 (bc_v4i32 (v2i64 (load addr:$src2)))),
5833 (MOVLPSrm VR128:$src1, addr:$src2)>;
5835 // Shuffle with MOVSD
5836 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
5837 (MOVSDrr VR128:$src1, FR64:$src2)>;
5838 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
5839 (MOVSDrr (v2i64 VR128:$src1),
5840 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
5841 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
5842 (MOVSDrr (v2f64 VR128:$src1),
5843 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
5844 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
5845 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5846 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
5847 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5849 // Shuffle with PSHUFHW
5850 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
5851 (PSHUFHWri VR128:$src, imm:$imm)>;
5852 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5853 (PSHUFHWmi addr:$src, imm:$imm)>;
5855 // Shuffle with PSHUFLW
5856 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
5857 (PSHUFLWri VR128:$src, imm:$imm)>;
5858 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5859 (PSHUFLWmi addr:$src, imm:$imm)>;
5861 // Shuffle with MOVLPS
5862 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
5863 (MOVLPSrm VR128:$src1, addr:$src2)>;
5864 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
5865 (MOVLPSrm VR128:$src1, addr:$src2)>;
5866 def : Pat<(X86Movlps VR128:$src1,
5867 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5868 (MOVLPSrm VR128:$src1, addr:$src2)>;
5869 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
5870 // is during lowering, where it's not possible to recognize the load fold cause
5871 // it has two uses through a bitcast. One use disappears at isel time and the
5872 // fold opportunity reappears.
5873 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
5874 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5876 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
5877 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5879 // Shuffle with MOVLPD
5880 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5881 (MOVLPDrm VR128:$src1, addr:$src2)>;
5882 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5883 (MOVLPDrm VR128:$src1, addr:$src2)>;
5884 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
5885 (scalar_to_vector (loadf64 addr:$src2)))),
5886 (MOVLPDrm VR128:$src1, addr:$src2)>;
5888 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
5889 def : Pat<(store (f64 (vector_extract
5890 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5891 (MOVHPSmr addr:$dst, VR128:$src)>;
5892 def : Pat<(store (f64 (vector_extract
5893 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5894 (MOVHPDmr addr:$dst, VR128:$src)>;
5896 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
5897 (MOVLPSmr addr:$src1, VR128:$src2)>;
5898 def : Pat<(store (v4i32 (X86Movlps
5899 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
5900 (MOVLPSmr addr:$src1, VR128:$src2)>;
5902 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5903 (MOVLPDmr addr:$src1, VR128:$src2)>;
5904 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5905 (MOVLPDmr addr:$src1, VR128:$src2)>;