1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // SSE 1 & 2 - Move Instructions
120 //===----------------------------------------------------------------------===//
122 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
123 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
124 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
126 // Loading from memory automatically zeroing upper bits.
127 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
128 PatFrag mem_pat, string OpcodeStr> :
129 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
130 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
131 [(set RC:$dst, (mem_pat addr:$src))]>;
133 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
134 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
135 // is used instead. Register-to-register movss/movsd is not modeled as an
136 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
137 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
138 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
139 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
140 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
141 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
143 let canFoldAsLoad = 1, isReMaterializable = 1 in {
144 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
146 let AddedComplexity = 20 in
147 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
150 let Constraints = "$src1 = $dst" in {
151 def MOVSSrr : sse12_move_rr<FR32, v4f32,
152 "movss\t{$src2, $dst|$dst, $src2}">, XS;
153 def MOVSDrr : sse12_move_rr<FR64, v2f64,
154 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
157 let canFoldAsLoad = 1, isReMaterializable = 1 in {
158 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
160 let AddedComplexity = 20 in
161 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
164 let AddedComplexity = 15 in {
165 // Extract the low 32-bit value from one vector and insert it into another.
166 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
167 (MOVSSrr (v4f32 VR128:$src1),
168 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
169 // Extract the low 64-bit value from one vector and insert it into another.
170 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
171 (MOVSDrr (v2f64 VR128:$src1),
172 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
175 // Implicitly promote a 32-bit scalar to a vector.
176 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
177 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
178 // Implicitly promote a 64-bit scalar to a vector.
179 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
180 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
181 // Implicitly promote a 32-bit scalar to a vector.
182 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
183 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
184 // Implicitly promote a 64-bit scalar to a vector.
185 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
186 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
188 let AddedComplexity = 20 in {
189 // MOVSSrm zeros the high parts of the register; represent this
190 // with SUBREG_TO_REG.
191 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
192 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
193 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
194 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
195 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
196 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
197 // MOVSDrm zeros the high parts of the register; represent this
198 // with SUBREG_TO_REG.
199 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
200 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
201 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
202 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
203 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
204 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
205 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
206 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
207 def : Pat<(v2f64 (X86vzload addr:$src)),
208 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
211 // Store scalar value to memory.
212 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
213 "movss\t{$src, $dst|$dst, $src}",
214 [(store FR32:$src, addr:$dst)]>;
215 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
216 "movsd\t{$src, $dst|$dst, $src}",
217 [(store FR64:$src, addr:$dst)]>;
219 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
220 "movss\t{$src, $dst|$dst, $src}",
221 [(store FR32:$src, addr:$dst)]>, XS, VEX;
222 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
223 "movsd\t{$src, $dst|$dst, $src}",
224 [(store FR64:$src, addr:$dst)]>, XD, VEX;
226 // Extract and store.
227 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
230 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
231 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
234 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
236 // Move Aligned/Unaligned floating point values
237 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
238 X86MemOperand x86memop, PatFrag ld_frag,
239 string asm, Domain d,
240 bit IsReMaterializable = 1> {
241 let neverHasSideEffects = 1 in
242 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
243 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
244 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
245 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
246 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
247 [(set RC:$dst, (ld_frag addr:$src))], d>;
250 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
251 "movaps", SSEPackedSingle>, VEX;
252 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
253 "movapd", SSEPackedDouble>, OpSize, VEX;
254 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
255 "movups", SSEPackedSingle>, VEX;
256 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
257 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
259 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
260 "movaps", SSEPackedSingle>, VEX;
261 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
262 "movapd", SSEPackedDouble>, OpSize, VEX;
263 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
264 "movups", SSEPackedSingle>, VEX;
265 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
266 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
267 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
268 "movaps", SSEPackedSingle>, TB;
269 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
270 "movapd", SSEPackedDouble>, TB, OpSize;
271 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
272 "movups", SSEPackedSingle>, TB;
273 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
274 "movupd", SSEPackedDouble, 0>, TB, OpSize;
276 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
277 "movaps\t{$src, $dst|$dst, $src}",
278 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
279 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
280 "movapd\t{$src, $dst|$dst, $src}",
281 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
282 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
283 "movups\t{$src, $dst|$dst, $src}",
284 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
285 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
286 "movupd\t{$src, $dst|$dst, $src}",
287 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
288 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
289 "movaps\t{$src, $dst|$dst, $src}",
290 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
291 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
292 "movapd\t{$src, $dst|$dst, $src}",
293 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
294 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
295 "movups\t{$src, $dst|$dst, $src}",
296 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
297 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
298 "movupd\t{$src, $dst|$dst, $src}",
299 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
301 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
302 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
303 (VMOVUPSYmr addr:$dst, VR256:$src)>;
305 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
306 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
307 (VMOVUPDYmr addr:$dst, VR256:$src)>;
309 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
310 "movaps\t{$src, $dst|$dst, $src}",
311 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
312 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
313 "movapd\t{$src, $dst|$dst, $src}",
314 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
315 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
316 "movups\t{$src, $dst|$dst, $src}",
317 [(store (v4f32 VR128:$src), addr:$dst)]>;
318 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
319 "movupd\t{$src, $dst|$dst, $src}",
320 [(store (v2f64 VR128:$src), addr:$dst)]>;
322 // Intrinsic forms of MOVUPS/D load and store
323 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
324 (ins f128mem:$dst, VR128:$src),
325 "movups\t{$src, $dst|$dst, $src}",
326 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
327 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
328 (ins f128mem:$dst, VR128:$src),
329 "movupd\t{$src, $dst|$dst, $src}",
330 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
332 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
333 "movups\t{$src, $dst|$dst, $src}",
334 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
335 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
336 "movupd\t{$src, $dst|$dst, $src}",
337 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
339 // Move Low/High packed floating point values
340 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
341 PatFrag mov_frag, string base_opc,
343 def PSrm : PI<opc, MRMSrcMem,
344 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
345 !strconcat(base_opc, "s", asm_opr),
348 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
349 SSEPackedSingle>, TB;
351 def PDrm : PI<opc, MRMSrcMem,
352 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
353 !strconcat(base_opc, "d", asm_opr),
354 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
355 (scalar_to_vector (loadf64 addr:$src2)))))],
356 SSEPackedDouble>, TB, OpSize;
359 let AddedComplexity = 20 in {
360 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
361 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
362 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
363 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
365 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
366 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
367 "\t{$src2, $dst|$dst, $src2}">;
368 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
369 "\t{$src2, $dst|$dst, $src2}">;
372 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
373 "movlps\t{$src, $dst|$dst, $src}",
374 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
375 (iPTR 0))), addr:$dst)]>, VEX;
376 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
377 "movlpd\t{$src, $dst|$dst, $src}",
378 [(store (f64 (vector_extract (v2f64 VR128:$src),
379 (iPTR 0))), addr:$dst)]>, VEX;
380 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
381 "movlps\t{$src, $dst|$dst, $src}",
382 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
383 (iPTR 0))), addr:$dst)]>;
384 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
385 "movlpd\t{$src, $dst|$dst, $src}",
386 [(store (f64 (vector_extract (v2f64 VR128:$src),
387 (iPTR 0))), addr:$dst)]>;
389 // v2f64 extract element 1 is always custom lowered to unpack high to low
390 // and extract element 0 so the non-store version isn't too horrible.
391 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
392 "movhps\t{$src, $dst|$dst, $src}",
393 [(store (f64 (vector_extract
394 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
395 (undef)), (iPTR 0))), addr:$dst)]>,
397 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
398 "movhpd\t{$src, $dst|$dst, $src}",
399 [(store (f64 (vector_extract
400 (v2f64 (unpckh VR128:$src, (undef))),
401 (iPTR 0))), addr:$dst)]>,
403 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
404 "movhps\t{$src, $dst|$dst, $src}",
405 [(store (f64 (vector_extract
406 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
407 (undef)), (iPTR 0))), addr:$dst)]>;
408 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
409 "movhpd\t{$src, $dst|$dst, $src}",
410 [(store (f64 (vector_extract
411 (v2f64 (unpckh VR128:$src, (undef))),
412 (iPTR 0))), addr:$dst)]>;
414 let AddedComplexity = 20 in {
415 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
416 (ins VR128:$src1, VR128:$src2),
417 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
419 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
421 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
422 (ins VR128:$src1, VR128:$src2),
423 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
425 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
428 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
429 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
430 (ins VR128:$src1, VR128:$src2),
431 "movlhps\t{$src2, $dst|$dst, $src2}",
433 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
434 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
435 (ins VR128:$src1, VR128:$src2),
436 "movhlps\t{$src2, $dst|$dst, $src2}",
438 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
441 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
442 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
443 let AddedComplexity = 20 in {
444 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
445 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
446 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
447 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
450 //===----------------------------------------------------------------------===//
451 // SSE 1 & 2 - Conversion Instructions
452 //===----------------------------------------------------------------------===//
454 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
455 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
457 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
458 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
459 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
460 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
463 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
464 X86MemOperand x86memop, string asm> {
465 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
467 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
471 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
472 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
473 string asm, Domain d> {
474 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
475 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
476 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
477 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
480 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
481 X86MemOperand x86memop, string asm> {
482 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
483 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
484 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
485 (ins DstRC:$src1, x86memop:$src),
486 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
489 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
490 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
491 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
492 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
494 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
495 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
496 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
497 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
500 // The assembler can recognize rr 64-bit instructions by seeing a rxx
501 // register, but the same isn't true when only using memory operands,
502 // provide other assembly "l" and "q" forms to address this explicitly
503 // where appropriate to do so.
504 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
506 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
508 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
510 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
512 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
515 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
516 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
517 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
518 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
519 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
520 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
521 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
522 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
523 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
524 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
525 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
526 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
527 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
528 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
529 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
530 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
532 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
533 // and/or XMM operand(s).
535 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
536 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
538 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
539 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
540 [(set DstRC:$dst, (Int SrcRC:$src))]>;
541 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
542 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
543 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
546 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
547 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
548 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
549 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
551 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
552 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
553 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
554 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
555 (ins DstRC:$src1, x86memop:$src2),
557 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
558 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
559 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
562 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
563 f32mem, load, "cvtss2si">, XS, VEX;
564 defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
565 int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
567 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
568 f128mem, load, "cvtsd2si">, XD, VEX;
569 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
570 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
573 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
574 // Get rid of this hack or rename the intrinsics, there are several
575 // intructions that only match with the intrinsic form, why create duplicates
576 // to let them be recognized by the assembler?
577 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
578 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
579 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
580 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
581 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
582 f32mem, load, "cvtss2si">, XS;
583 defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
584 f32mem, load, "cvtss2si{q}">, XS, REX_W;
585 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
586 f128mem, load, "cvtsd2si{l}">, XD;
587 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
588 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
591 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
592 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
593 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
594 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
596 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
597 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
598 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
599 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
602 let Constraints = "$src1 = $dst" in {
603 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
604 int_x86_sse_cvtsi2ss, i32mem, loadi32,
606 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
607 int_x86_sse_cvtsi642ss, i64mem, loadi64,
608 "cvtsi2ss{q}">, XS, REX_W;
609 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
610 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
612 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
613 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
614 "cvtsi2sd">, XD, REX_W;
619 // Aliases for intrinsics
620 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
621 f32mem, load, "cvttss2si">, XS, VEX;
622 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
623 int_x86_sse_cvttss2si64, f32mem, load,
624 "cvttss2si">, XS, VEX, VEX_W;
625 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
626 f128mem, load, "cvttsd2si">, XD, VEX;
627 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
628 int_x86_sse2_cvttsd2si64, f128mem, load,
629 "cvttsd2si">, XD, VEX, VEX_W;
630 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
631 f32mem, load, "cvttss2si">, XS;
632 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
633 int_x86_sse_cvttss2si64, f32mem, load,
634 "cvttss2si{q}">, XS, REX_W;
635 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
636 f128mem, load, "cvttsd2si">, XD;
637 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
638 int_x86_sse2_cvttsd2si64, f128mem, load,
639 "cvttsd2si{q}">, XD, REX_W;
641 let Pattern = []<dag> in {
642 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
643 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
644 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
645 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
647 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
648 "cvtdq2ps\t{$src, $dst|$dst, $src}",
649 SSEPackedSingle>, TB, VEX;
650 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
651 "cvtdq2ps\t{$src, $dst|$dst, $src}",
652 SSEPackedSingle>, TB, VEX;
654 let Pattern = []<dag> in {
655 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
656 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
657 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
658 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
659 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
660 "cvtdq2ps\t{$src, $dst|$dst, $src}",
661 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
666 // Convert scalar double to scalar single
667 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
668 (ins FR64:$src1, FR64:$src2),
669 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
671 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
672 (ins FR64:$src1, f64mem:$src2),
673 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
674 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
675 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
678 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
679 "cvtsd2ss\t{$src, $dst|$dst, $src}",
680 [(set FR32:$dst, (fround FR64:$src))]>;
681 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
682 "cvtsd2ss\t{$src, $dst|$dst, $src}",
683 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
684 Requires<[HasSSE2, OptForSize]>;
686 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
687 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
689 let Constraints = "$src1 = $dst" in
690 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
691 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
693 // Convert scalar single to scalar double
694 // SSE2 instructions with XS prefix
695 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
696 (ins FR32:$src1, FR32:$src2),
697 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
698 []>, XS, Requires<[HasAVX]>, VEX_4V;
699 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
700 (ins FR32:$src1, f32mem:$src2),
701 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
702 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
703 def : Pat<(f64 (fextend FR32:$src)), (VCVTSS2SDrr FR32:$src, FR32:$src)>,
706 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
707 "cvtss2sd\t{$src, $dst|$dst, $src}",
708 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
710 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
711 "cvtss2sd\t{$src, $dst|$dst, $src}",
712 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
713 Requires<[HasSSE2, OptForSize]>;
715 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
716 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
717 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
718 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
719 VR128:$src2))]>, XS, VEX_4V,
721 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
722 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
723 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
724 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
725 (load addr:$src2)))]>, XS, VEX_4V,
727 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
728 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
729 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
730 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
731 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
734 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
735 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
736 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
737 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
738 (load addr:$src2)))]>, XS,
742 def : Pat<(extloadf32 addr:$src),
743 (CVTSS2SDrr (MOVSSrm addr:$src))>,
744 Requires<[HasSSE2, OptForSpeed]>;
746 // Convert doubleword to packed single/double fp
747 // SSE2 instructions without OpSize prefix
748 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
749 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
750 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
751 TB, VEX, Requires<[HasAVX]>;
752 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
753 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
754 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
755 (bitconvert (memopv2i64 addr:$src))))]>,
756 TB, VEX, Requires<[HasAVX]>;
757 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
758 "cvtdq2ps\t{$src, $dst|$dst, $src}",
759 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
760 TB, Requires<[HasSSE2]>;
761 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
762 "cvtdq2ps\t{$src, $dst|$dst, $src}",
763 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
764 (bitconvert (memopv2i64 addr:$src))))]>,
765 TB, Requires<[HasSSE2]>;
767 // FIXME: why the non-intrinsic version is described as SSE3?
768 // SSE2 instructions with XS prefix
769 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
770 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
771 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
772 XS, VEX, Requires<[HasAVX]>;
773 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
774 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
775 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
776 (bitconvert (memopv2i64 addr:$src))))]>,
777 XS, VEX, Requires<[HasAVX]>;
778 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
779 "cvtdq2pd\t{$src, $dst|$dst, $src}",
780 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
781 XS, Requires<[HasSSE2]>;
782 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
783 "cvtdq2pd\t{$src, $dst|$dst, $src}",
784 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
785 (bitconvert (memopv2i64 addr:$src))))]>,
786 XS, Requires<[HasSSE2]>;
789 // Convert packed single/double fp to doubleword
790 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
791 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
792 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
793 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
794 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
795 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
796 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
797 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
798 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
799 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
800 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
801 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
803 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
804 "cvtps2dq\t{$src, $dst|$dst, $src}",
805 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
807 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
809 "cvtps2dq\t{$src, $dst|$dst, $src}",
810 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
811 (memop addr:$src)))]>, VEX;
812 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
813 "cvtps2dq\t{$src, $dst|$dst, $src}",
814 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
815 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
816 "cvtps2dq\t{$src, $dst|$dst, $src}",
817 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
818 (memop addr:$src)))]>;
820 // SSE2 packed instructions with XD prefix
821 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
822 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
823 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
824 XD, VEX, Requires<[HasAVX]>;
825 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
826 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
827 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
828 (memop addr:$src)))]>,
829 XD, VEX, Requires<[HasAVX]>;
830 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
831 "cvtpd2dq\t{$src, $dst|$dst, $src}",
832 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
833 XD, Requires<[HasSSE2]>;
834 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
835 "cvtpd2dq\t{$src, $dst|$dst, $src}",
836 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
837 (memop addr:$src)))]>,
838 XD, Requires<[HasSSE2]>;
841 // Convert with truncation packed single/double fp to doubleword
842 // SSE2 packed instructions with XS prefix
843 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
844 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
845 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
846 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
847 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
848 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
849 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
850 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
851 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
852 "cvttps2dq\t{$src, $dst|$dst, $src}",
854 (int_x86_sse2_cvttps2dq VR128:$src))]>;
855 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
856 "cvttps2dq\t{$src, $dst|$dst, $src}",
858 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
861 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
862 "vcvttps2dq\t{$src, $dst|$dst, $src}",
864 (int_x86_sse2_cvttps2dq VR128:$src))]>,
865 XS, VEX, Requires<[HasAVX]>;
866 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
867 "vcvttps2dq\t{$src, $dst|$dst, $src}",
868 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
869 (memop addr:$src)))]>,
870 XS, VEX, Requires<[HasAVX]>;
872 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
874 "cvttpd2dq\t{$src, $dst|$dst, $src}",
875 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
877 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
879 "cvttpd2dq\t{$src, $dst|$dst, $src}",
880 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
881 (memop addr:$src)))]>, VEX;
882 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
883 "cvttpd2dq\t{$src, $dst|$dst, $src}",
884 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
885 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
886 "cvttpd2dq\t{$src, $dst|$dst, $src}",
887 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
888 (memop addr:$src)))]>;
890 // The assembler can recognize rr 256-bit instructions by seeing a ymm
891 // register, but the same isn't true when using memory operands instead.
892 // Provide other assembly rr and rm forms to address this explicitly.
893 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
894 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
895 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
896 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
899 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
900 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
901 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
902 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
905 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
906 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
907 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
908 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
910 // Convert packed single to packed double
911 let Predicates = [HasAVX] in {
912 // SSE2 instructions without OpSize prefix
913 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
914 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
915 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
916 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
917 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
918 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
919 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
920 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
922 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
923 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
924 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
925 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
927 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
928 "vcvtps2pd\t{$src, $dst|$dst, $src}",
929 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
930 VEX, Requires<[HasAVX]>;
931 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
932 "vcvtps2pd\t{$src, $dst|$dst, $src}",
933 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
934 (load addr:$src)))]>,
935 VEX, Requires<[HasAVX]>;
936 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
937 "cvtps2pd\t{$src, $dst|$dst, $src}",
938 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
939 TB, Requires<[HasSSE2]>;
940 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
941 "cvtps2pd\t{$src, $dst|$dst, $src}",
942 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
943 (load addr:$src)))]>,
944 TB, Requires<[HasSSE2]>;
946 // Convert packed double to packed single
947 // The assembler can recognize rr 256-bit instructions by seeing a ymm
948 // register, but the same isn't true when using memory operands instead.
949 // Provide other assembly rr and rm forms to address this explicitly.
950 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
951 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
952 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
953 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
956 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
957 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
958 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
959 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
962 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
963 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
964 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
965 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
966 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
967 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
968 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
969 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
972 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
973 "cvtpd2ps\t{$src, $dst|$dst, $src}",
974 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
975 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
977 "cvtpd2ps\t{$src, $dst|$dst, $src}",
978 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
979 (memop addr:$src)))]>;
980 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
981 "cvtpd2ps\t{$src, $dst|$dst, $src}",
982 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
983 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
984 "cvtpd2ps\t{$src, $dst|$dst, $src}",
985 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
986 (memop addr:$src)))]>;
988 // AVX 256-bit register conversion intrinsics
989 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
990 // whenever possible to avoid declaring two versions of each one.
991 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
992 (VCVTDQ2PSYrr VR256:$src)>;
993 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
994 (VCVTDQ2PSYrm addr:$src)>;
996 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
997 (VCVTPD2PSYrr VR256:$src)>;
998 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
999 (VCVTPD2PSYrm addr:$src)>;
1001 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1002 (VCVTPS2DQYrr VR256:$src)>;
1003 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1004 (VCVTPS2DQYrm addr:$src)>;
1006 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1007 (VCVTPS2PDYrr VR128:$src)>;
1008 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1009 (VCVTPS2PDYrm addr:$src)>;
1011 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1012 (VCVTTPD2DQYrr VR256:$src)>;
1013 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1014 (VCVTTPD2DQYrm addr:$src)>;
1016 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1017 (VCVTTPS2DQYrr VR256:$src)>;
1018 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1019 (VCVTTPS2DQYrm addr:$src)>;
1021 //===----------------------------------------------------------------------===//
1022 // SSE 1 & 2 - Compare Instructions
1023 //===----------------------------------------------------------------------===//
1025 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1026 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1027 string asm, string asm_alt> {
1028 let isAsmParserOnly = 1 in {
1029 def rr : SIi8<0xC2, MRMSrcReg,
1030 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1033 def rm : SIi8<0xC2, MRMSrcMem,
1034 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1038 // Accept explicit immediate argument form instead of comparison code.
1039 def rr_alt : SIi8<0xC2, MRMSrcReg,
1040 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1043 def rm_alt : SIi8<0xC2, MRMSrcMem,
1044 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1048 let neverHasSideEffects = 1 in {
1049 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1050 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1051 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1053 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1054 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1055 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1059 let Constraints = "$src1 = $dst" in {
1060 def CMPSSrr : SIi8<0xC2, MRMSrcReg,
1061 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, SSECC:$cc),
1062 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1063 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), FR32:$src2, imm:$cc))]>, XS;
1064 def CMPSSrm : SIi8<0xC2, MRMSrcMem,
1065 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, SSECC:$cc),
1066 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1067 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), (loadf32 addr:$src2), imm:$cc))]>, XS;
1068 def CMPSDrr : SIi8<0xC2, MRMSrcReg,
1069 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, SSECC:$cc),
1070 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1071 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), FR64:$src2, imm:$cc))]>, XD;
1072 def CMPSDrm : SIi8<0xC2, MRMSrcMem,
1073 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, SSECC:$cc),
1074 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1075 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), (loadf64 addr:$src2), imm:$cc))]>, XD;
1077 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1078 def CMPSSrr_alt : SIi8<0xC2, MRMSrcReg,
1079 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
1080 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1081 def CMPSSrm_alt : SIi8<0xC2, MRMSrcMem,
1082 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
1083 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1084 def CMPSDrr_alt : SIi8<0xC2, MRMSrcReg,
1085 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
1086 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1087 def CMPSDrm_alt : SIi8<0xC2, MRMSrcMem,
1088 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
1089 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1092 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1093 Intrinsic Int, string asm> {
1094 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1095 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1096 [(set VR128:$dst, (Int VR128:$src1,
1097 VR128:$src, imm:$cc))]>;
1098 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1099 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1100 [(set VR128:$dst, (Int VR128:$src1,
1101 (load addr:$src), imm:$cc))]>;
1104 // Aliases to match intrinsics which expect XMM operand(s).
1105 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1106 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1108 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1109 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1111 let Constraints = "$src1 = $dst" in {
1112 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1113 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1114 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1115 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1119 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1120 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1121 ValueType vt, X86MemOperand x86memop,
1122 PatFrag ld_frag, string OpcodeStr, Domain d> {
1123 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1124 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1125 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1126 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1127 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1128 [(set EFLAGS, (OpNode (vt RC:$src1),
1129 (ld_frag addr:$src2)))], d>;
1132 let Defs = [EFLAGS] in {
1133 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1134 "ucomiss", SSEPackedSingle>, VEX;
1135 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1136 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1137 let Pattern = []<dag> in {
1138 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1139 "comiss", SSEPackedSingle>, VEX;
1140 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1141 "comisd", SSEPackedDouble>, OpSize, VEX;
1144 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1145 load, "ucomiss", SSEPackedSingle>, VEX;
1146 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1147 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1149 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1150 load, "comiss", SSEPackedSingle>, VEX;
1151 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1152 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1153 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1154 "ucomiss", SSEPackedSingle>, TB;
1155 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1156 "ucomisd", SSEPackedDouble>, TB, OpSize;
1158 let Pattern = []<dag> in {
1159 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1160 "comiss", SSEPackedSingle>, TB;
1161 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1162 "comisd", SSEPackedDouble>, TB, OpSize;
1165 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1166 load, "ucomiss", SSEPackedSingle>, TB;
1167 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1168 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1170 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1171 "comiss", SSEPackedSingle>, TB;
1172 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1173 "comisd", SSEPackedDouble>, TB, OpSize;
1174 } // Defs = [EFLAGS]
1176 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1177 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1178 Intrinsic Int, string asm, string asm_alt,
1180 let isAsmParserOnly = 1 in {
1181 def rri : PIi8<0xC2, MRMSrcReg,
1182 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1183 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1184 def rmi : PIi8<0xC2, MRMSrcMem,
1185 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1186 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1189 // Accept explicit immediate argument form instead of comparison code.
1190 def rri_alt : PIi8<0xC2, MRMSrcReg,
1191 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1193 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1194 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1198 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1199 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1200 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1201 SSEPackedSingle>, VEX_4V;
1202 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1203 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1204 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1205 SSEPackedDouble>, OpSize, VEX_4V;
1206 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1207 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1208 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1209 SSEPackedSingle>, VEX_4V;
1210 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1211 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1212 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1213 SSEPackedDouble>, OpSize, VEX_4V;
1214 let Constraints = "$src1 = $dst" in {
1215 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1216 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1217 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1218 SSEPackedSingle>, TB;
1219 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1220 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1221 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1222 SSEPackedDouble>, TB, OpSize;
1225 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1226 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1227 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1228 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1229 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1230 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1231 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1232 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1234 //===----------------------------------------------------------------------===//
1235 // SSE 1 & 2 - Shuffle Instructions
1236 //===----------------------------------------------------------------------===//
1238 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1239 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1240 ValueType vt, string asm, PatFrag mem_frag,
1241 Domain d, bit IsConvertibleToThreeAddress = 0> {
1242 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1243 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1244 [(set RC:$dst, (vt (shufp:$src3
1245 RC:$src1, (mem_frag addr:$src2))))], d>;
1246 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1247 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1248 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1250 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1253 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1254 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1255 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
1256 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1257 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1258 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
1259 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1260 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1261 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1262 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1263 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1264 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1266 let Constraints = "$src1 = $dst" in {
1267 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1268 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1269 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1271 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1272 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1273 memopv2f64, SSEPackedDouble>, TB, OpSize;
1276 //===----------------------------------------------------------------------===//
1277 // SSE 1 & 2 - Unpack Instructions
1278 //===----------------------------------------------------------------------===//
1280 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1281 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1282 PatFrag mem_frag, RegisterClass RC,
1283 X86MemOperand x86memop, string asm,
1285 def rr : PI<opc, MRMSrcReg,
1286 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1288 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1289 def rm : PI<opc, MRMSrcMem,
1290 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1292 (vt (OpNode RC:$src1,
1293 (mem_frag addr:$src2))))], d>;
1296 let AddedComplexity = 10 in {
1297 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1298 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1299 SSEPackedSingle>, VEX_4V;
1300 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1301 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1302 SSEPackedDouble>, OpSize, VEX_4V;
1303 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1304 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1305 SSEPackedSingle>, VEX_4V;
1306 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1307 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1308 SSEPackedDouble>, OpSize, VEX_4V;
1310 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1311 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1312 SSEPackedSingle>, VEX_4V;
1313 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1314 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1315 SSEPackedDouble>, OpSize, VEX_4V;
1316 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1317 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1318 SSEPackedSingle>, VEX_4V;
1319 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1320 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1321 SSEPackedDouble>, OpSize, VEX_4V;
1323 let Constraints = "$src1 = $dst" in {
1324 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1325 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1326 SSEPackedSingle>, TB;
1327 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1328 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1329 SSEPackedDouble>, TB, OpSize;
1330 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1331 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1332 SSEPackedSingle>, TB;
1333 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1334 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1335 SSEPackedDouble>, TB, OpSize;
1336 } // Constraints = "$src1 = $dst"
1337 } // AddedComplexity
1339 //===----------------------------------------------------------------------===//
1340 // SSE 1 & 2 - Extract Floating-Point Sign mask
1341 //===----------------------------------------------------------------------===//
1343 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1344 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1346 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1347 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1348 [(set GR32:$dst, (Int RC:$src))], d>;
1349 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1350 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1354 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1355 "movmskps", SSEPackedSingle>, VEX;
1356 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1357 "movmskpd", SSEPackedDouble>, OpSize,
1359 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1360 "movmskps", SSEPackedSingle>, VEX;
1361 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1362 "movmskpd", SSEPackedDouble>, OpSize,
1364 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1365 SSEPackedSingle>, TB;
1366 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1367 SSEPackedDouble>, TB, OpSize;
1370 def MOVMSKPDrr32_alt : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
1371 "movmskpd\t{$src, $dst|$dst, $src}",
1372 [(set GR32:$dst, (X86fgetsign FR64:$src))], SSEPackedDouble>, TB, OpSize;
1373 def MOVMSKPDrr64_alt : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1374 "movmskpd\t{$src, $dst|$dst, $src}",
1375 [(set GR64:$dst, (X86fgetsign FR64:$src))], SSEPackedDouble>, TB, OpSize;
1376 def MOVMSKPSrr32_alt : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
1377 "movmskps\t{$src, $dst|$dst, $src}",
1378 [(set GR32:$dst, (X86fgetsign FR32:$src))], SSEPackedSingle>, TB;
1379 def MOVMSKPSrr64_alt : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1380 "movmskps\t{$src, $dst|$dst, $src}",
1381 [(set GR64:$dst, (X86fgetsign FR32:$src))], SSEPackedSingle>, TB;
1384 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1385 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1386 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1387 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1389 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1390 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1391 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1392 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1395 //===----------------------------------------------------------------------===//
1396 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1397 //===----------------------------------------------------------------------===//
1399 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1400 // names that start with 'Fs'.
1402 // Alias instructions that map fld0 to pxor for sse.
1403 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1404 canFoldAsLoad = 1 in {
1405 // FIXME: Set encoding to pseudo!
1406 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1407 [(set FR32:$dst, fp32imm0)]>,
1408 Requires<[HasSSE1]>, TB, OpSize;
1409 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1410 [(set FR64:$dst, fpimm0)]>,
1411 Requires<[HasSSE2]>, TB, OpSize;
1412 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1413 [(set FR32:$dst, fp32imm0)]>,
1414 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1415 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1416 [(set FR64:$dst, fpimm0)]>,
1417 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1420 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1421 // bits are disregarded.
1422 let neverHasSideEffects = 1 in {
1423 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1424 "movaps\t{$src, $dst|$dst, $src}", []>;
1425 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1426 "movapd\t{$src, $dst|$dst, $src}", []>;
1429 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1430 // bits are disregarded.
1431 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1432 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1433 "movaps\t{$src, $dst|$dst, $src}",
1434 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1435 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1436 "movapd\t{$src, $dst|$dst, $src}",
1437 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1440 //===----------------------------------------------------------------------===//
1441 // SSE 1 & 2 - Logical Instructions
1442 //===----------------------------------------------------------------------===//
1444 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1446 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1448 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1449 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1451 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1452 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1454 let Constraints = "$src1 = $dst" in {
1455 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1456 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1458 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1459 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1463 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1464 let mayLoad = 0 in {
1465 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1466 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1467 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1470 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1471 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1473 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1475 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1477 let Pattern = []<dag> in {
1478 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1479 !strconcat(OpcodeStr, "ps"), f128mem,
1480 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
1481 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1482 (memopv2i64 addr:$src2)))], 0>, VEX_4V;
1484 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1485 !strconcat(OpcodeStr, "pd"), f128mem,
1486 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1487 (bc_v2i64 (v2f64 VR128:$src2))))],
1488 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1489 (memopv2i64 addr:$src2)))], 0>,
1492 let Constraints = "$src1 = $dst" in {
1493 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1494 !strconcat(OpcodeStr, "ps"), f128mem,
1495 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
1496 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1497 (memopv2i64 addr:$src2)))]>, TB;
1499 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1500 !strconcat(OpcodeStr, "pd"), f128mem,
1501 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1502 (bc_v2i64 (v2f64 VR128:$src2))))],
1503 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1504 (memopv2i64 addr:$src2)))]>, TB, OpSize;
1508 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1510 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
1512 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1513 !strconcat(OpcodeStr, "ps"), f256mem,
1514 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
1515 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
1516 (memopv4i64 addr:$src2)))], 0>, VEX_4V;
1518 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1519 !strconcat(OpcodeStr, "pd"), f256mem,
1520 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1521 (bc_v4i64 (v4f64 VR256:$src2))))],
1522 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1523 (memopv4i64 addr:$src2)))], 0>,
1527 // AVX 256-bit packed logical ops forms
1528 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
1529 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
1530 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
1531 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
1533 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1534 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1535 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1536 let isCommutable = 0 in
1537 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
1539 //===----------------------------------------------------------------------===//
1540 // SSE 1 & 2 - Arithmetic Instructions
1541 //===----------------------------------------------------------------------===//
1543 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1546 /// In addition, we also have a special variant of the scalar form here to
1547 /// represent the associated intrinsic operation. This form is unlike the
1548 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1549 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1551 /// These three forms can each be reg+reg or reg+mem.
1554 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1556 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1558 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1559 OpNode, FR32, f32mem, Is2Addr>, XS;
1560 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1561 OpNode, FR64, f64mem, Is2Addr>, XD;
1564 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1566 let mayLoad = 0 in {
1567 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1568 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1569 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1570 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1574 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1576 let mayLoad = 0 in {
1577 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1578 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1579 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1580 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1584 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1586 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1587 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1588 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1589 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1592 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1594 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1595 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1596 SSEPackedSingle, Is2Addr>, TB;
1598 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1599 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1600 SSEPackedDouble, Is2Addr>, TB, OpSize;
1603 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1604 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1605 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1606 SSEPackedSingle, 0>, TB;
1608 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1609 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1610 SSEPackedDouble, 0>, TB, OpSize;
1613 // Binary Arithmetic instructions
1614 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1615 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1616 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1617 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1618 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1619 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1620 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1621 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1623 let isCommutable = 0 in {
1624 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1625 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1626 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1627 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1628 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1629 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1630 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1631 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1632 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1633 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1634 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1635 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1636 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1637 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1638 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1639 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1640 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1641 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1642 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1643 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1646 let Constraints = "$src1 = $dst" in {
1647 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1648 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1649 basic_sse12_fp_binop_s_int<0x58, "add">;
1650 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1651 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1652 basic_sse12_fp_binop_s_int<0x59, "mul">;
1654 let isCommutable = 0 in {
1655 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1656 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1657 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1658 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1659 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1660 basic_sse12_fp_binop_s_int<0x5E, "div">;
1661 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1662 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1663 basic_sse12_fp_binop_s_int<0x5F, "max">,
1664 basic_sse12_fp_binop_p_int<0x5F, "max">;
1665 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1666 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1667 basic_sse12_fp_binop_s_int<0x5D, "min">,
1668 basic_sse12_fp_binop_p_int<0x5D, "min">;
1673 /// In addition, we also have a special variant of the scalar form here to
1674 /// represent the associated intrinsic operation. This form is unlike the
1675 /// plain scalar form, in that it takes an entire vector (instead of a
1676 /// scalar) and leaves the top elements undefined.
1678 /// And, we have a special variant form for a full-vector intrinsic form.
1680 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1681 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1682 SDNode OpNode, Intrinsic F32Int> {
1683 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1684 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1685 [(set FR32:$dst, (OpNode FR32:$src))]>;
1686 // For scalar unary operations, fold a load into the operation
1687 // only in OptForSize mode. It eliminates an instruction, but it also
1688 // eliminates a whole-register clobber (the load), so it introduces a
1689 // partial register update condition.
1690 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1691 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1692 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1693 Requires<[HasSSE1, OptForSize]>;
1694 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1695 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1696 [(set VR128:$dst, (F32Int VR128:$src))]>;
1697 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1698 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1699 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1702 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1703 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1704 SDNode OpNode, Intrinsic F32Int> {
1705 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1706 !strconcat(OpcodeStr,
1707 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1708 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1709 !strconcat(OpcodeStr,
1710 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1711 []>, XS, Requires<[HasAVX, OptForSize]>;
1712 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1713 !strconcat(OpcodeStr,
1714 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1715 [(set VR128:$dst, (F32Int VR128:$src))]>;
1716 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1717 !strconcat(OpcodeStr,
1718 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1719 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1722 /// sse1_fp_unop_p - SSE1 unops in packed form.
1723 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1724 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1725 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1726 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1727 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1728 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1729 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1732 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1733 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1734 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1735 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1736 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1737 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1738 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1739 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1742 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1743 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1744 Intrinsic V4F32Int> {
1745 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1746 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1747 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1748 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1749 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1750 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1753 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1754 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1755 Intrinsic V4F32Int> {
1756 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1757 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1758 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1759 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1760 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1761 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1764 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1765 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1766 SDNode OpNode, Intrinsic F64Int> {
1767 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1768 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1769 [(set FR64:$dst, (OpNode FR64:$src))]>;
1770 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1771 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1772 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1773 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1774 Requires<[HasSSE2, OptForSize]>;
1775 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1776 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1777 [(set VR128:$dst, (F64Int VR128:$src))]>;
1778 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1779 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1780 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1783 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1784 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1785 SDNode OpNode, Intrinsic F64Int> {
1786 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1787 !strconcat(OpcodeStr,
1788 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1789 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1790 (ins FR64:$src1, f64mem:$src2),
1791 !strconcat(OpcodeStr,
1792 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1793 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1794 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1795 [(set VR128:$dst, (F64Int VR128:$src))]>;
1796 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1797 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1798 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1801 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1802 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1804 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1805 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1806 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1807 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1808 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1809 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1812 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1813 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1814 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1815 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1816 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1817 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1818 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1819 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1822 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1823 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1824 Intrinsic V2F64Int> {
1825 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1826 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1827 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1828 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1829 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1830 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1833 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1834 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1835 Intrinsic V2F64Int> {
1836 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1837 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1838 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1839 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1840 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1841 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1844 let Predicates = [HasAVX] in {
1846 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
1847 sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1850 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1851 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1852 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1853 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1854 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1855 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1856 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1857 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1860 // Reciprocal approximations. Note that these typically require refinement
1861 // in order to obtain suitable precision.
1862 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
1863 int_x86_sse_rsqrt_ss>, VEX_4V;
1864 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1865 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
1866 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
1867 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
1869 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
1871 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1872 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
1873 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
1874 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
1878 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1879 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
1880 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
1881 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1882 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
1883 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
1885 // Reciprocal approximations. Note that these typically require refinement
1886 // in order to obtain suitable precision.
1887 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
1888 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
1889 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
1890 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
1891 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
1892 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
1894 // There is no f64 version of the reciprocal approximation instructions.
1896 //===----------------------------------------------------------------------===//
1897 // SSE 1 & 2 - Non-temporal stores
1898 //===----------------------------------------------------------------------===//
1900 let AddedComplexity = 400 in { // Prefer non-temporal versions
1901 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
1902 (ins f128mem:$dst, VR128:$src),
1903 "movntps\t{$src, $dst|$dst, $src}",
1904 [(alignednontemporalstore (v4f32 VR128:$src),
1906 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
1907 (ins f128mem:$dst, VR128:$src),
1908 "movntpd\t{$src, $dst|$dst, $src}",
1909 [(alignednontemporalstore (v2f64 VR128:$src),
1911 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
1912 (ins f128mem:$dst, VR128:$src),
1913 "movntdq\t{$src, $dst|$dst, $src}",
1914 [(alignednontemporalstore (v2f64 VR128:$src),
1917 let ExeDomain = SSEPackedInt in
1918 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
1919 (ins f128mem:$dst, VR128:$src),
1920 "movntdq\t{$src, $dst|$dst, $src}",
1921 [(alignednontemporalstore (v4f32 VR128:$src),
1924 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
1925 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
1927 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
1928 (ins f256mem:$dst, VR256:$src),
1929 "movntps\t{$src, $dst|$dst, $src}",
1930 [(alignednontemporalstore (v8f32 VR256:$src),
1932 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
1933 (ins f256mem:$dst, VR256:$src),
1934 "movntpd\t{$src, $dst|$dst, $src}",
1935 [(alignednontemporalstore (v4f64 VR256:$src),
1937 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
1938 (ins f256mem:$dst, VR256:$src),
1939 "movntdq\t{$src, $dst|$dst, $src}",
1940 [(alignednontemporalstore (v4f64 VR256:$src),
1942 let ExeDomain = SSEPackedInt in
1943 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
1944 (ins f256mem:$dst, VR256:$src),
1945 "movntdq\t{$src, $dst|$dst, $src}",
1946 [(alignednontemporalstore (v8f32 VR256:$src),
1950 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
1951 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
1952 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
1953 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
1954 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
1955 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
1957 let AddedComplexity = 400 in { // Prefer non-temporal versions
1958 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1959 "movntps\t{$src, $dst|$dst, $src}",
1960 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
1961 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1962 "movntpd\t{$src, $dst|$dst, $src}",
1963 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
1965 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1966 "movntdq\t{$src, $dst|$dst, $src}",
1967 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
1969 let ExeDomain = SSEPackedInt in
1970 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
1971 "movntdq\t{$src, $dst|$dst, $src}",
1972 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
1974 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
1975 (MOVNTDQmr addr:$dst, VR128:$src)>;
1977 // There is no AVX form for instructions below this point
1978 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
1979 "movnti{l}\t{$src, $dst|$dst, $src}",
1980 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
1981 TB, Requires<[HasSSE2]>;
1982 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
1983 "movnti{q}\t{$src, $dst|$dst, $src}",
1984 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
1985 TB, Requires<[HasSSE2]>;
1988 //===----------------------------------------------------------------------===//
1989 // SSE 1 & 2 - Misc Instructions (No AVX form)
1990 //===----------------------------------------------------------------------===//
1992 // Prefetch intrinsic.
1993 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
1994 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>;
1995 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
1996 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>;
1997 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
1998 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>;
1999 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2000 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>;
2002 // Load, store, and memory fence
2003 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2004 TB, Requires<[HasSSE1]>;
2005 def : Pat<(X86SFence), (SFENCE)>;
2007 // Alias instructions that map zero vector to pxor / xorp* for sse.
2008 // We set canFoldAsLoad because this can be converted to a constant-pool
2009 // load of an all-zeros value if folding it would be beneficial.
2010 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2011 // JIT implementation, it does not expand the instructions below like
2012 // X86MCInstLower does.
2013 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2014 isCodeGenOnly = 1 in {
2015 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2016 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2017 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2018 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2019 let ExeDomain = SSEPackedInt in
2020 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2021 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2024 // The same as done above but for AVX. The 128-bit versions are the
2025 // same, but re-encoded. The 256-bit does not support PI version, and
2026 // doesn't need it because on sandy bridge the register is set to zero
2027 // at the rename stage without using any execution unit, so SET0PSY
2028 // and SET0PDY can be used for vector int instructions without penalty
2029 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2030 // JIT implementatioan, it does not expand the instructions below like
2031 // X86MCInstLower does.
2032 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2033 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2034 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2035 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2036 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2037 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2038 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2039 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2040 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2041 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2042 let ExeDomain = SSEPackedInt in
2043 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2044 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2047 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2048 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2049 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2051 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2052 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2054 // FIXME: According to the intel manual, DEST[127:64] <- SRC1[127:64], while
2055 // in the non-AVX version bits 127:64 aren't touched. Find a better way to
2056 // represent this instead of always zeroing SRC1. One possible solution is
2057 // to represent the instruction w/ something similar as the "$src1 = $dst"
2058 // constraint but without the tied operands.
2059 def : Pat<(extloadf32 addr:$src),
2060 (VCVTSS2SDrm (f32 (EXTRACT_SUBREG (AVX_SET0PS), sub_ss)), addr:$src)>,
2061 Requires<[HasAVX, OptForSpeed]>;
2063 //===----------------------------------------------------------------------===//
2064 // SSE 1 & 2 - Load/Store XCSR register
2065 //===----------------------------------------------------------------------===//
2067 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2068 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2069 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2070 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2072 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2073 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2074 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2075 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2077 //===---------------------------------------------------------------------===//
2078 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2079 //===---------------------------------------------------------------------===//
2081 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2083 let neverHasSideEffects = 1 in {
2084 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2085 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2086 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2087 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2089 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2090 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2091 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2092 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2094 let canFoldAsLoad = 1, mayLoad = 1 in {
2095 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2096 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2097 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2098 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2099 let Predicates = [HasAVX] in {
2100 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2101 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2102 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2103 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2107 let mayStore = 1 in {
2108 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2109 (ins i128mem:$dst, VR128:$src),
2110 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2111 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2112 (ins i256mem:$dst, VR256:$src),
2113 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2114 let Predicates = [HasAVX] in {
2115 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2116 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2117 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2118 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2122 let neverHasSideEffects = 1 in
2123 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2124 "movdqa\t{$src, $dst|$dst, $src}", []>;
2126 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2127 "movdqu\t{$src, $dst|$dst, $src}",
2128 []>, XS, Requires<[HasSSE2]>;
2130 let canFoldAsLoad = 1, mayLoad = 1 in {
2131 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2132 "movdqa\t{$src, $dst|$dst, $src}",
2133 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2134 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2135 "movdqu\t{$src, $dst|$dst, $src}",
2136 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2137 XS, Requires<[HasSSE2]>;
2140 let mayStore = 1 in {
2141 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2142 "movdqa\t{$src, $dst|$dst, $src}",
2143 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2144 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2145 "movdqu\t{$src, $dst|$dst, $src}",
2146 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2147 XS, Requires<[HasSSE2]>;
2150 // Intrinsic forms of MOVDQU load and store
2151 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2152 "vmovdqu\t{$src, $dst|$dst, $src}",
2153 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2154 XS, VEX, Requires<[HasAVX]>;
2156 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2157 "movdqu\t{$src, $dst|$dst, $src}",
2158 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2159 XS, Requires<[HasSSE2]>;
2161 } // ExeDomain = SSEPackedInt
2163 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2164 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2165 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2167 //===---------------------------------------------------------------------===//
2168 // SSE2 - Packed Integer Arithmetic Instructions
2169 //===---------------------------------------------------------------------===//
2171 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2173 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2174 bit IsCommutable = 0, bit Is2Addr = 1> {
2175 let isCommutable = IsCommutable in
2176 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2177 (ins VR128:$src1, VR128:$src2),
2179 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2180 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2181 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2182 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2183 (ins VR128:$src1, i128mem:$src2),
2185 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2186 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2187 [(set VR128:$dst, (IntId VR128:$src1,
2188 (bitconvert (memopv2i64 addr:$src2))))]>;
2191 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2192 string OpcodeStr, Intrinsic IntId,
2193 Intrinsic IntId2, bit Is2Addr = 1> {
2194 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2195 (ins VR128:$src1, VR128:$src2),
2197 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2198 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2199 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2200 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2201 (ins VR128:$src1, i128mem:$src2),
2203 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2204 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2205 [(set VR128:$dst, (IntId VR128:$src1,
2206 (bitconvert (memopv2i64 addr:$src2))))]>;
2207 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2208 (ins VR128:$src1, i32i8imm:$src2),
2210 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2211 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2212 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2215 /// PDI_binop_rm - Simple SSE2 binary operator.
2216 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2217 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2218 let isCommutable = IsCommutable in
2219 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2220 (ins VR128:$src1, VR128:$src2),
2222 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2223 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2224 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2225 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2226 (ins VR128:$src1, i128mem:$src2),
2228 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2229 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2230 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2231 (bitconvert (memopv2i64 addr:$src2)))))]>;
2234 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2236 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2237 /// to collapse (bitconvert VT to VT) into its operand.
2239 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2240 bit IsCommutable = 0, bit Is2Addr = 1> {
2241 let isCommutable = IsCommutable in
2242 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2243 (ins VR128:$src1, VR128:$src2),
2245 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2246 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2247 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2248 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2249 (ins VR128:$src1, i128mem:$src2),
2251 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2252 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2253 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2256 } // ExeDomain = SSEPackedInt
2258 // 128-bit Integer Arithmetic
2260 let Predicates = [HasAVX] in {
2261 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2262 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2263 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2264 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2265 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2266 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2267 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2268 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2269 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2272 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2274 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2276 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2278 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2280 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2282 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2284 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2286 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2288 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2290 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2292 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2294 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2296 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2298 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2300 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2302 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2304 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2306 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2308 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2312 let Constraints = "$src1 = $dst" in {
2313 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2314 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2315 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2316 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2317 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2318 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2319 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2320 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2321 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2324 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2325 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2326 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2327 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2328 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2329 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2330 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2331 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2332 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2333 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2334 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2335 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2336 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2337 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2338 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2339 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2340 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2341 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2342 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2344 } // Constraints = "$src1 = $dst"
2346 //===---------------------------------------------------------------------===//
2347 // SSE2 - Packed Integer Logical Instructions
2348 //===---------------------------------------------------------------------===//
2350 let Predicates = [HasAVX] in {
2351 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2352 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2354 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2355 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2357 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2358 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2361 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2362 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2364 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2365 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2367 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2368 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2371 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2372 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2374 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2375 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2378 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2379 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2380 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2382 let ExeDomain = SSEPackedInt in {
2383 let neverHasSideEffects = 1 in {
2384 // 128-bit logical shifts.
2385 def VPSLLDQri : PDIi8<0x73, MRM7r,
2386 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2387 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2389 def VPSRLDQri : PDIi8<0x73, MRM3r,
2390 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2391 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2393 // PSRADQri doesn't exist in SSE[1-3].
2395 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2396 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2397 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2398 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2399 VR128:$src2)))]>, VEX_4V;
2401 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2402 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2403 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2404 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2405 (memopv2i64 addr:$src2))))]>,
2410 let Constraints = "$src1 = $dst" in {
2411 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2412 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2413 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2414 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2415 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2416 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2418 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2419 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2420 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2421 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2422 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2423 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2425 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2426 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2427 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2428 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2430 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2431 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2432 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2434 let ExeDomain = SSEPackedInt in {
2435 let neverHasSideEffects = 1 in {
2436 // 128-bit logical shifts.
2437 def PSLLDQri : PDIi8<0x73, MRM7r,
2438 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2439 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2440 def PSRLDQri : PDIi8<0x73, MRM3r,
2441 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2442 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2443 // PSRADQri doesn't exist in SSE[1-3].
2445 def PANDNrr : PDI<0xDF, MRMSrcReg,
2446 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2447 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2449 def PANDNrm : PDI<0xDF, MRMSrcMem,
2450 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2451 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2453 } // Constraints = "$src1 = $dst"
2455 let Predicates = [HasAVX] in {
2456 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2457 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2458 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2459 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2460 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2461 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2462 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2463 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2464 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2465 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2467 // Shift up / down and insert zero's.
2468 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2469 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2470 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2471 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2474 let Predicates = [HasSSE2] in {
2475 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2476 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2477 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2478 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2479 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2480 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2481 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2482 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2483 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2484 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2486 // Shift up / down and insert zero's.
2487 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2488 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2489 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2490 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2493 //===---------------------------------------------------------------------===//
2494 // SSE2 - Packed Integer Comparison Instructions
2495 //===---------------------------------------------------------------------===//
2497 let Predicates = [HasAVX] in {
2498 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2500 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2502 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2504 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2506 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2508 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2512 let Constraints = "$src1 = $dst" in {
2513 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2514 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2515 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2516 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2517 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2518 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2519 } // Constraints = "$src1 = $dst"
2521 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2522 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2523 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2524 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2525 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2526 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2527 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2528 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2529 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2530 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2531 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2532 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2534 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2535 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2536 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2537 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2538 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2539 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2540 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2541 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2542 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2543 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2544 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2545 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2547 //===---------------------------------------------------------------------===//
2548 // SSE2 - Packed Integer Pack Instructions
2549 //===---------------------------------------------------------------------===//
2551 let Predicates = [HasAVX] in {
2552 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2554 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2556 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2560 let Constraints = "$src1 = $dst" in {
2561 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2562 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2563 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2564 } // Constraints = "$src1 = $dst"
2566 //===---------------------------------------------------------------------===//
2567 // SSE2 - Packed Integer Shuffle Instructions
2568 //===---------------------------------------------------------------------===//
2570 let ExeDomain = SSEPackedInt in {
2571 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2573 def ri : Ii8<0x70, MRMSrcReg,
2574 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2575 !strconcat(OpcodeStr,
2576 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2577 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2579 def mi : Ii8<0x70, MRMSrcMem,
2580 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2581 !strconcat(OpcodeStr,
2582 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2583 [(set VR128:$dst, (vt (pshuf_frag:$src2
2584 (bc_frag (memopv2i64 addr:$src1)),
2587 } // ExeDomain = SSEPackedInt
2589 let Predicates = [HasAVX] in {
2590 let AddedComplexity = 5 in
2591 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2594 // SSE2 with ImmT == Imm8 and XS prefix.
2595 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2598 // SSE2 with ImmT == Imm8 and XD prefix.
2599 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2603 let Predicates = [HasSSE2] in {
2604 let AddedComplexity = 5 in
2605 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2607 // SSE2 with ImmT == Imm8 and XS prefix.
2608 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2610 // SSE2 with ImmT == Imm8 and XD prefix.
2611 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2614 //===---------------------------------------------------------------------===//
2615 // SSE2 - Packed Integer Unpack Instructions
2616 //===---------------------------------------------------------------------===//
2618 let ExeDomain = SSEPackedInt in {
2619 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2620 PatFrag unp_frag, PatFrag bc_frag, bit Is2Addr = 1> {
2621 def rr : PDI<opc, MRMSrcReg,
2622 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2624 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2625 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2626 [(set VR128:$dst, (vt (unp_frag VR128:$src1, VR128:$src2)))]>;
2627 def rm : PDI<opc, MRMSrcMem,
2628 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2630 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2631 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2632 [(set VR128:$dst, (unp_frag VR128:$src1,
2633 (bc_frag (memopv2i64
2637 let Predicates = [HasAVX] in {
2638 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, unpckl, bc_v16i8,
2640 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, unpckl, bc_v8i16,
2642 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, unpckl, bc_v4i32,
2645 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2646 /// knew to collapse (bitconvert VT to VT) into its operand.
2647 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2648 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2649 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2651 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>, VEX_4V;
2652 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2653 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2654 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2656 (v2i64 (unpckl VR128:$src1,
2657 (memopv2i64 addr:$src2))))]>, VEX_4V;
2659 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, unpckh, bc_v16i8,
2661 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, unpckh, bc_v8i16,
2663 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, unpckh, bc_v4i32,
2666 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2667 /// knew to collapse (bitconvert VT to VT) into its operand.
2668 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2669 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2670 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2672 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>, VEX_4V;
2673 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2674 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2675 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2677 (v2i64 (unpckh VR128:$src1,
2678 (memopv2i64 addr:$src2))))]>, VEX_4V;
2681 let Constraints = "$src1 = $dst" in {
2682 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, unpckl, bc_v16i8>;
2683 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, unpckl, bc_v8i16>;
2684 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, unpckl, bc_v4i32>;
2686 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2687 /// knew to collapse (bitconvert VT to VT) into its operand.
2688 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2689 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2690 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2692 (v2i64 (unpckl VR128:$src1, VR128:$src2)))]>;
2693 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2694 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2695 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2697 (v2i64 (unpckl VR128:$src1,
2698 (memopv2i64 addr:$src2))))]>;
2700 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, unpckh, bc_v16i8>;
2701 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, unpckh, bc_v8i16>;
2702 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, unpckh, bc_v4i32>;
2704 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2705 /// knew to collapse (bitconvert VT to VT) into its operand.
2706 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2707 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2708 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2710 (v2i64 (unpckh VR128:$src1, VR128:$src2)))]>;
2711 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2712 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2713 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2715 (v2i64 (unpckh VR128:$src1,
2716 (memopv2i64 addr:$src2))))]>;
2719 } // ExeDomain = SSEPackedInt
2721 //===---------------------------------------------------------------------===//
2722 // SSE2 - Packed Integer Extract and Insert
2723 //===---------------------------------------------------------------------===//
2725 let ExeDomain = SSEPackedInt in {
2726 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2727 def rri : Ii8<0xC4, MRMSrcReg,
2728 (outs VR128:$dst), (ins VR128:$src1,
2729 GR32:$src2, i32i8imm:$src3),
2731 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2732 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2734 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2735 def rmi : Ii8<0xC4, MRMSrcMem,
2736 (outs VR128:$dst), (ins VR128:$src1,
2737 i16mem:$src2, i32i8imm:$src3),
2739 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2740 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2742 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2747 let Predicates = [HasAVX] in
2748 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2749 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2750 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2751 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2752 imm:$src2))]>, OpSize, VEX;
2753 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2754 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2755 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2756 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2760 let Predicates = [HasAVX] in {
2761 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2762 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2763 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2764 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2765 []>, OpSize, VEX_4V;
2768 let Constraints = "$src1 = $dst" in
2769 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2771 } // ExeDomain = SSEPackedInt
2773 //===---------------------------------------------------------------------===//
2774 // SSE2 - Packed Mask Creation
2775 //===---------------------------------------------------------------------===//
2777 let ExeDomain = SSEPackedInt in {
2779 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2780 "pmovmskb\t{$src, $dst|$dst, $src}",
2781 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2782 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2783 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2784 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2785 "pmovmskb\t{$src, $dst|$dst, $src}",
2786 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2788 } // ExeDomain = SSEPackedInt
2790 //===---------------------------------------------------------------------===//
2791 // SSE2 - Conditional Store
2792 //===---------------------------------------------------------------------===//
2794 let ExeDomain = SSEPackedInt in {
2797 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2798 (ins VR128:$src, VR128:$mask),
2799 "maskmovdqu\t{$mask, $src|$src, $mask}",
2800 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2802 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2803 (ins VR128:$src, VR128:$mask),
2804 "maskmovdqu\t{$mask, $src|$src, $mask}",
2805 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2808 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2809 "maskmovdqu\t{$mask, $src|$src, $mask}",
2810 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2812 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2813 "maskmovdqu\t{$mask, $src|$src, $mask}",
2814 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2816 } // ExeDomain = SSEPackedInt
2818 //===---------------------------------------------------------------------===//
2819 // SSE2 - Move Doubleword
2820 //===---------------------------------------------------------------------===//
2822 // Move Int Doubleword to Packed Double Int
2823 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2824 "movd\t{$src, $dst|$dst, $src}",
2826 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2827 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2828 "movd\t{$src, $dst|$dst, $src}",
2830 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2832 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2833 "movd\t{$src, $dst|$dst, $src}",
2835 (v4i32 (scalar_to_vector GR32:$src)))]>;
2836 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2837 "movd\t{$src, $dst|$dst, $src}",
2839 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2840 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2841 "mov{d|q}\t{$src, $dst|$dst, $src}",
2843 (v2i64 (scalar_to_vector GR64:$src)))]>;
2844 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2845 "mov{d|q}\t{$src, $dst|$dst, $src}",
2846 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2849 // Move Int Doubleword to Single Scalar
2850 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2851 "movd\t{$src, $dst|$dst, $src}",
2852 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2854 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2855 "movd\t{$src, $dst|$dst, $src}",
2856 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2858 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2859 "movd\t{$src, $dst|$dst, $src}",
2860 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2862 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2863 "movd\t{$src, $dst|$dst, $src}",
2864 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2866 // Move Packed Doubleword Int to Packed Double Int
2867 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2868 "movd\t{$src, $dst|$dst, $src}",
2869 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2871 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2872 (ins i32mem:$dst, VR128:$src),
2873 "movd\t{$src, $dst|$dst, $src}",
2874 [(store (i32 (vector_extract (v4i32 VR128:$src),
2875 (iPTR 0))), addr:$dst)]>, VEX;
2876 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2877 "movd\t{$src, $dst|$dst, $src}",
2878 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2880 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
2881 "movd\t{$src, $dst|$dst, $src}",
2882 [(store (i32 (vector_extract (v4i32 VR128:$src),
2883 (iPTR 0))), addr:$dst)]>;
2885 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
2886 "mov{d|q}\t{$src, $dst|$dst, $src}",
2887 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
2889 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
2890 "movq\t{$src, $dst|$dst, $src}",
2891 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
2893 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
2894 "mov{d|q}\t{$src, $dst|$dst, $src}",
2895 [(set GR64:$dst, (bitconvert FR64:$src))]>;
2896 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
2897 "movq\t{$src, $dst|$dst, $src}",
2898 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
2900 // Move Scalar Single to Double Int
2901 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2902 "movd\t{$src, $dst|$dst, $src}",
2903 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
2904 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2905 "movd\t{$src, $dst|$dst, $src}",
2906 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
2907 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
2908 "movd\t{$src, $dst|$dst, $src}",
2909 [(set GR32:$dst, (bitconvert FR32:$src))]>;
2910 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
2911 "movd\t{$src, $dst|$dst, $src}",
2912 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
2914 // movd / movq to XMM register zero-extends
2915 let AddedComplexity = 15 in {
2916 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2917 "movd\t{$src, $dst|$dst, $src}",
2918 [(set VR128:$dst, (v4i32 (X86vzmovl
2919 (v4i32 (scalar_to_vector GR32:$src)))))]>,
2921 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2922 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
2923 [(set VR128:$dst, (v2i64 (X86vzmovl
2924 (v2i64 (scalar_to_vector GR64:$src)))))]>,
2927 let AddedComplexity = 15 in {
2928 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2929 "movd\t{$src, $dst|$dst, $src}",
2930 [(set VR128:$dst, (v4i32 (X86vzmovl
2931 (v4i32 (scalar_to_vector GR32:$src)))))]>;
2932 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2933 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
2934 [(set VR128:$dst, (v2i64 (X86vzmovl
2935 (v2i64 (scalar_to_vector GR64:$src)))))]>;
2938 let AddedComplexity = 20 in {
2939 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2940 "movd\t{$src, $dst|$dst, $src}",
2942 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
2943 (loadi32 addr:$src))))))]>,
2945 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2946 "movd\t{$src, $dst|$dst, $src}",
2948 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
2949 (loadi32 addr:$src))))))]>;
2951 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
2952 (MOVZDI2PDIrm addr:$src)>;
2953 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
2954 (MOVZDI2PDIrm addr:$src)>;
2955 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
2956 (MOVZDI2PDIrm addr:$src)>;
2959 // These are the correct encodings of the instructions so that we know how to
2960 // read correct assembly, even though we continue to emit the wrong ones for
2961 // compatibility with Darwin's buggy assembler.
2962 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
2963 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
2964 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
2965 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
2966 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
2967 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
2968 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
2969 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
2970 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
2971 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
2972 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
2973 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
2975 //===---------------------------------------------------------------------===//
2976 // SSE2 - Move Quadword
2977 //===---------------------------------------------------------------------===//
2979 // Move Quadword Int to Packed Quadword Int
2980 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2981 "vmovq\t{$src, $dst|$dst, $src}",
2983 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2984 VEX, Requires<[HasAVX]>;
2985 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
2986 "movq\t{$src, $dst|$dst, $src}",
2988 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
2989 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
2991 // Move Packed Quadword Int to Quadword Int
2992 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2993 "movq\t{$src, $dst|$dst, $src}",
2994 [(store (i64 (vector_extract (v2i64 VR128:$src),
2995 (iPTR 0))), addr:$dst)]>, VEX;
2996 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
2997 "movq\t{$src, $dst|$dst, $src}",
2998 [(store (i64 (vector_extract (v2i64 VR128:$src),
2999 (iPTR 0))), addr:$dst)]>;
3001 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3002 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3004 // Store / copy lower 64-bits of a XMM register.
3005 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3006 "movq\t{$src, $dst|$dst, $src}",
3007 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3008 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3009 "movq\t{$src, $dst|$dst, $src}",
3010 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3012 let AddedComplexity = 20 in
3013 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3014 "vmovq\t{$src, $dst|$dst, $src}",
3016 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3017 (loadi64 addr:$src))))))]>,
3018 XS, VEX, Requires<[HasAVX]>;
3020 let AddedComplexity = 20 in {
3021 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3022 "movq\t{$src, $dst|$dst, $src}",
3024 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3025 (loadi64 addr:$src))))))]>,
3026 XS, Requires<[HasSSE2]>;
3028 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3029 (MOVZQI2PQIrm addr:$src)>;
3030 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3031 (MOVZQI2PQIrm addr:$src)>;
3032 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3035 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3036 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3037 let AddedComplexity = 15 in
3038 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3039 "vmovq\t{$src, $dst|$dst, $src}",
3040 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3041 XS, VEX, Requires<[HasAVX]>;
3042 let AddedComplexity = 15 in
3043 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3044 "movq\t{$src, $dst|$dst, $src}",
3045 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3046 XS, Requires<[HasSSE2]>;
3048 let AddedComplexity = 20 in
3049 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3050 "vmovq\t{$src, $dst|$dst, $src}",
3051 [(set VR128:$dst, (v2i64 (X86vzmovl
3052 (loadv2i64 addr:$src))))]>,
3053 XS, VEX, Requires<[HasAVX]>;
3054 let AddedComplexity = 20 in {
3055 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3056 "movq\t{$src, $dst|$dst, $src}",
3057 [(set VR128:$dst, (v2i64 (X86vzmovl
3058 (loadv2i64 addr:$src))))]>,
3059 XS, Requires<[HasSSE2]>;
3061 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3062 (MOVZPQILo2PQIrm addr:$src)>;
3065 // Instructions to match in the assembler
3066 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3067 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3068 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3069 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3070 // Recognize "movd" with GR64 destination, but encode as a "movq"
3071 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3072 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3074 // Instructions for the disassembler
3075 // xr = XMM register
3078 let Predicates = [HasAVX] in
3079 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3080 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3081 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3082 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3084 //===---------------------------------------------------------------------===//
3085 // SSE2 - Misc Instructions
3086 //===---------------------------------------------------------------------===//
3089 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3090 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3091 TB, Requires<[HasSSE2]>;
3093 // Load, store, and memory fence
3094 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3095 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3096 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3097 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3098 def : Pat<(X86LFence), (LFENCE)>;
3099 def : Pat<(X86MFence), (MFENCE)>;
3102 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3103 // was introduced with SSE2, it's backward compatible.
3104 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3106 // Alias instructions that map zero vector to pxor / xorp* for sse.
3107 // We set canFoldAsLoad because this can be converted to a constant-pool
3108 // load of an all-ones value if folding it would be beneficial.
3109 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3110 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3111 // FIXME: Change encoding to pseudo.
3112 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3113 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3115 //===---------------------------------------------------------------------===//
3116 // SSE3 - Conversion Instructions
3117 //===---------------------------------------------------------------------===//
3119 // Convert Packed Double FP to Packed DW Integers
3120 let Predicates = [HasAVX] in {
3121 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3122 // register, but the same isn't true when using memory operands instead.
3123 // Provide other assembly rr and rm forms to address this explicitly.
3124 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3125 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3126 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3127 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3130 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3131 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3132 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3133 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3136 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3137 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3138 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3139 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3142 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3143 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3144 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3145 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3147 // Convert Packed DW Integers to Packed Double FP
3148 let Predicates = [HasAVX] in {
3149 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3150 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3151 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3152 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3153 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3154 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3155 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3156 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3159 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3160 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3161 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3162 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3164 // AVX 256-bit register conversion intrinsics
3165 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3166 (VCVTDQ2PDYrr VR128:$src)>;
3167 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3168 (VCVTDQ2PDYrm addr:$src)>;
3170 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3171 (VCVTPD2DQYrr VR256:$src)>;
3172 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3173 (VCVTPD2DQYrm addr:$src)>;
3175 //===---------------------------------------------------------------------===//
3176 // SSE3 - Move Instructions
3177 //===---------------------------------------------------------------------===//
3179 // Replicate Single FP
3180 multiclass sse3_replicate_sfp<bits<8> op, PatFrag rep_frag, string OpcodeStr> {
3181 def rr : S3SI<op, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3182 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3183 [(set VR128:$dst, (v4f32 (rep_frag
3184 VR128:$src, (undef))))]>;
3185 def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3186 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3187 [(set VR128:$dst, (rep_frag
3188 (memopv4f32 addr:$src), (undef)))]>;
3191 multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag,
3193 def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3194 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3195 def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3196 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
3199 let Predicates = [HasAVX] in {
3200 // FIXME: Merge above classes when we have patterns for the ymm version
3201 defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX;
3202 defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX;
3203 defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX;
3204 defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX;
3206 defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">;
3207 defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">;
3209 // Replicate Double FP
3210 multiclass sse3_replicate_dfp<string OpcodeStr> {
3211 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3212 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3213 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3214 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3215 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3217 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3221 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3222 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3223 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3225 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3226 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3230 let Predicates = [HasAVX] in {
3231 // FIXME: Merge above classes when we have patterns for the ymm version
3232 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3233 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3235 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3237 // Move Unaligned Integer
3238 let Predicates = [HasAVX] in {
3239 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3240 "vlddqu\t{$src, $dst|$dst, $src}",
3241 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3242 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3243 "vlddqu\t{$src, $dst|$dst, $src}",
3244 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3246 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3247 "lddqu\t{$src, $dst|$dst, $src}",
3248 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3250 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3252 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3254 // Several Move patterns
3255 let AddedComplexity = 5 in {
3256 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3257 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3258 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3259 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3260 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3261 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3262 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3263 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3266 // vector_shuffle v1, <undef> <1, 1, 3, 3>
3267 let AddedComplexity = 15 in
3268 def : Pat<(v4i32 (movshdup VR128:$src, (undef))),
3269 (MOVSHDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3270 let AddedComplexity = 20 in
3271 def : Pat<(v4i32 (movshdup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3272 (MOVSHDUPrm addr:$src)>, Requires<[HasSSE3]>;
3274 // vector_shuffle v1, <undef> <0, 0, 2, 2>
3275 let AddedComplexity = 15 in
3276 def : Pat<(v4i32 (movsldup VR128:$src, (undef))),
3277 (MOVSLDUPrr VR128:$src)>, Requires<[HasSSE3]>;
3278 let AddedComplexity = 20 in
3279 def : Pat<(v4i32 (movsldup (bc_v4i32 (memopv2i64 addr:$src)), (undef))),
3280 (MOVSLDUPrm addr:$src)>, Requires<[HasSSE3]>;
3282 //===---------------------------------------------------------------------===//
3283 // SSE3 - Arithmetic
3284 //===---------------------------------------------------------------------===//
3286 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3287 X86MemOperand x86memop, bit Is2Addr = 1> {
3288 def rr : I<0xD0, MRMSrcReg,
3289 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3291 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3292 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3293 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3294 def rm : I<0xD0, MRMSrcMem,
3295 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3297 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3298 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3299 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3302 let Predicates = [HasAVX],
3303 ExeDomain = SSEPackedDouble in {
3304 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3305 f128mem, 0>, TB, XD, VEX_4V;
3306 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3307 f128mem, 0>, TB, OpSize, VEX_4V;
3308 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3309 f256mem, 0>, TB, XD, VEX_4V;
3310 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3311 f256mem, 0>, TB, OpSize, VEX_4V;
3313 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3314 ExeDomain = SSEPackedDouble in {
3315 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3317 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3318 f128mem>, TB, OpSize;
3321 //===---------------------------------------------------------------------===//
3322 // SSE3 Instructions
3323 //===---------------------------------------------------------------------===//
3326 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3327 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3328 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3330 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3331 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3332 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3334 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3336 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3337 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3338 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3340 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3341 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3342 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3344 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3345 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3346 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3348 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3350 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3351 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3352 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3355 let Predicates = [HasAVX] in {
3356 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3357 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3358 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3359 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3360 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3361 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3362 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3363 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3364 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3365 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3366 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3367 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3368 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3369 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3370 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3371 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3374 let Constraints = "$src1 = $dst" in {
3375 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3376 int_x86_sse3_hadd_ps>;
3377 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3378 int_x86_sse3_hadd_pd>;
3379 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3380 int_x86_sse3_hsub_ps>;
3381 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3382 int_x86_sse3_hsub_pd>;
3385 //===---------------------------------------------------------------------===//
3386 // SSSE3 - Packed Absolute Instructions
3387 //===---------------------------------------------------------------------===//
3390 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3391 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3392 PatFrag mem_frag128, Intrinsic IntId128> {
3393 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3395 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3396 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3399 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3401 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3404 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3407 let Predicates = [HasAVX] in {
3408 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3409 int_x86_ssse3_pabs_b_128>, VEX;
3410 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3411 int_x86_ssse3_pabs_w_128>, VEX;
3412 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3413 int_x86_ssse3_pabs_d_128>, VEX;
3416 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3417 int_x86_ssse3_pabs_b_128>;
3418 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3419 int_x86_ssse3_pabs_w_128>;
3420 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3421 int_x86_ssse3_pabs_d_128>;
3423 //===---------------------------------------------------------------------===//
3424 // SSSE3 - Packed Binary Operator Instructions
3425 //===---------------------------------------------------------------------===//
3427 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3428 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3429 PatFrag mem_frag128, Intrinsic IntId128,
3431 let isCommutable = 1 in
3432 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3433 (ins VR128:$src1, VR128:$src2),
3435 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3436 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3437 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3439 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3440 (ins VR128:$src1, i128mem:$src2),
3442 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3443 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3445 (IntId128 VR128:$src1,
3446 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3449 let Predicates = [HasAVX] in {
3450 let isCommutable = 0 in {
3451 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3452 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3453 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3454 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3455 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3456 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3457 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3458 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3459 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3460 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3461 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3462 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3463 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3464 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3465 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3466 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3467 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3468 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3469 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3470 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3471 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3472 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3474 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3475 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3478 // None of these have i8 immediate fields.
3479 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3480 let isCommutable = 0 in {
3481 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3482 int_x86_ssse3_phadd_w_128>;
3483 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3484 int_x86_ssse3_phadd_d_128>;
3485 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3486 int_x86_ssse3_phadd_sw_128>;
3487 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3488 int_x86_ssse3_phsub_w_128>;
3489 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3490 int_x86_ssse3_phsub_d_128>;
3491 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3492 int_x86_ssse3_phsub_sw_128>;
3493 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3494 int_x86_ssse3_pmadd_ub_sw_128>;
3495 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
3496 int_x86_ssse3_pshuf_b_128>;
3497 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3498 int_x86_ssse3_psign_b_128>;
3499 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3500 int_x86_ssse3_psign_w_128>;
3501 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3502 int_x86_ssse3_psign_d_128>;
3504 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3505 int_x86_ssse3_pmul_hr_sw_128>;
3508 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3509 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3510 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3511 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3513 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
3514 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3515 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
3516 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3517 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
3518 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3520 //===---------------------------------------------------------------------===//
3521 // SSSE3 - Packed Align Instruction Patterns
3522 //===---------------------------------------------------------------------===//
3524 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3525 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3526 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3528 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3530 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3532 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3533 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3535 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3537 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3541 let Predicates = [HasAVX] in
3542 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3543 let Constraints = "$src1 = $dst" in
3544 defm PALIGN : ssse3_palign<"palignr">;
3546 let AddedComplexity = 5 in {
3547 def : Pat<(v4i32 (palign:$src3 VR128:$src1, VR128:$src2)),
3548 (PALIGNR128rr VR128:$src2, VR128:$src1,
3549 (SHUFFLE_get_palign_imm VR128:$src3))>,
3550 Requires<[HasSSSE3]>;
3551 def : Pat<(v4f32 (palign:$src3 VR128:$src1, VR128:$src2)),
3552 (PALIGNR128rr VR128:$src2, VR128:$src1,
3553 (SHUFFLE_get_palign_imm VR128:$src3))>,
3554 Requires<[HasSSSE3]>;
3555 def : Pat<(v8i16 (palign:$src3 VR128:$src1, VR128:$src2)),
3556 (PALIGNR128rr VR128:$src2, VR128:$src1,
3557 (SHUFFLE_get_palign_imm VR128:$src3))>,
3558 Requires<[HasSSSE3]>;
3559 def : Pat<(v16i8 (palign:$src3 VR128:$src1, VR128:$src2)),
3560 (PALIGNR128rr VR128:$src2, VR128:$src1,
3561 (SHUFFLE_get_palign_imm VR128:$src3))>,
3562 Requires<[HasSSSE3]>;
3565 //===---------------------------------------------------------------------===//
3566 // SSSE3 Misc Instructions
3567 //===---------------------------------------------------------------------===//
3569 // Thread synchronization
3570 let usesCustomInserter = 1 in {
3571 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
3572 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
3573 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
3574 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
3577 let Uses = [EAX, ECX, EDX] in
3578 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
3579 Requires<[HasSSE3]>;
3580 let Uses = [ECX, EAX] in
3581 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
3582 Requires<[HasSSE3]>;
3584 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
3585 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
3587 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
3588 Requires<[In32BitMode]>;
3589 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
3590 Requires<[In64BitMode]>;
3592 //===---------------------------------------------------------------------===//
3593 // Non-Instruction Patterns
3594 //===---------------------------------------------------------------------===//
3596 // extload f32 -> f64. This matches load+fextend because we have a hack in
3597 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3599 // Since these loads aren't folded into the fextend, we have to match it
3601 let Predicates = [HasSSE2] in
3602 def : Pat<(fextend (loadf32 addr:$src)),
3603 (CVTSS2SDrm addr:$src)>;
3605 // FIXME: According to the intel manual, DEST[127:64] <- SRC1[127:64], while
3606 // in the non-AVX version bits 127:64 aren't touched. Find a better way to
3607 // represent this instead of always zeroing SRC1. One possible solution is
3608 // to represent the instruction w/ something similar as the "$src1 = $dst"
3609 // constraint but without the tied operands.
3610 let Predicates = [HasAVX] in
3611 def : Pat<(fextend (loadf32 addr:$src)),
3612 (VCVTSS2SDrm (f32 (EXTRACT_SUBREG (AVX_SET0PS), sub_ss)),
3616 let Predicates = [HasXMMInt] in {
3617 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3618 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3619 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3620 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3621 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3622 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3623 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3624 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3625 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3626 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3627 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3628 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3629 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3630 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3631 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3632 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3633 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3634 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3635 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3636 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3637 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3638 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3639 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3640 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3641 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3642 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3643 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3644 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3645 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3646 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3649 let Predicates = [HasAVX] in {
3650 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
3651 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
3652 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
3653 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
3654 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
3657 // Move scalar to XMM zero-extended
3658 // movd to XMM register zero-extends
3659 let AddedComplexity = 15 in {
3660 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3661 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3662 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3663 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3664 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3665 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3666 (MOVSSrr (v4f32 (V_SET0PS)),
3667 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3668 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3669 (MOVSSrr (v4i32 (V_SET0PI)),
3670 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3673 // Splat v2f64 / v2i64
3674 let AddedComplexity = 10 in {
3675 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3676 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3677 def : Pat<(unpckh (v2f64 VR128:$src), (undef)),
3678 (UNPCKHPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3679 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3680 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3681 def : Pat<(unpckh (v2i64 VR128:$src), (undef)),
3682 (PUNPCKHQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3685 // Special unary SHUFPSrri case.
3686 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3687 (SHUFPSrri VR128:$src1, VR128:$src1,
3688 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3689 let AddedComplexity = 5 in
3690 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3691 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3692 Requires<[HasSSE2]>;
3693 // Special unary SHUFPDrri case.
3694 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3695 (SHUFPDrri VR128:$src1, VR128:$src1,
3696 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3697 Requires<[HasSSE2]>;
3698 // Special unary SHUFPDrri case.
3699 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3700 (SHUFPDrri VR128:$src1, VR128:$src1,
3701 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3702 Requires<[HasSSE2]>;
3703 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3704 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3705 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3706 Requires<[HasSSE2]>;
3708 // Special binary v4i32 shuffle cases with SHUFPS.
3709 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3710 (SHUFPSrri VR128:$src1, VR128:$src2,
3711 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3712 Requires<[HasSSE2]>;
3713 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3714 (SHUFPSrmi VR128:$src1, addr:$src2,
3715 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3716 Requires<[HasSSE2]>;
3717 // Special binary v2i64 shuffle cases using SHUFPDrri.
3718 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3719 (SHUFPDrri VR128:$src1, VR128:$src2,
3720 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3721 Requires<[HasSSE2]>;
3723 // vector_shuffle v1, <undef>, <0, 0, 1, 1, ...>
3724 let AddedComplexity = 15 in {
3725 def : Pat<(v4i32 (unpckl_undef:$src2 VR128:$src, (undef))),
3726 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3727 Requires<[OptForSpeed, HasSSE2]>;
3728 def : Pat<(v4f32 (unpckl_undef:$src2 VR128:$src, (undef))),
3729 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3730 Requires<[OptForSpeed, HasSSE2]>;
3732 let AddedComplexity = 10 in {
3733 def : Pat<(v4f32 (unpckl_undef VR128:$src, (undef))),
3734 (UNPCKLPSrr VR128:$src, VR128:$src)>;
3735 def : Pat<(v16i8 (unpckl_undef VR128:$src, (undef))),
3736 (PUNPCKLBWrr VR128:$src, VR128:$src)>;
3737 def : Pat<(v8i16 (unpckl_undef VR128:$src, (undef))),
3738 (PUNPCKLWDrr VR128:$src, VR128:$src)>;
3739 def : Pat<(v4i32 (unpckl_undef VR128:$src, (undef))),
3740 (PUNPCKLDQrr VR128:$src, VR128:$src)>;
3743 // vector_shuffle v1, <undef>, <2, 2, 3, 3, ...>
3744 let AddedComplexity = 15 in {
3745 def : Pat<(v4i32 (unpckh_undef:$src2 VR128:$src, (undef))),
3746 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3747 Requires<[OptForSpeed, HasSSE2]>;
3748 def : Pat<(v4f32 (unpckh_undef:$src2 VR128:$src, (undef))),
3749 (PSHUFDri VR128:$src, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3750 Requires<[OptForSpeed, HasSSE2]>;
3752 let AddedComplexity = 10 in {
3753 def : Pat<(v4f32 (unpckh_undef VR128:$src, (undef))),
3754 (UNPCKHPSrr VR128:$src, VR128:$src)>;
3755 def : Pat<(v16i8 (unpckh_undef VR128:$src, (undef))),
3756 (PUNPCKHBWrr VR128:$src, VR128:$src)>;
3757 def : Pat<(v8i16 (unpckh_undef VR128:$src, (undef))),
3758 (PUNPCKHWDrr VR128:$src, VR128:$src)>;
3759 def : Pat<(v4i32 (unpckh_undef VR128:$src, (undef))),
3760 (PUNPCKHDQrr VR128:$src, VR128:$src)>;
3763 let AddedComplexity = 20 in {
3764 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3765 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3766 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3768 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3769 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3770 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3772 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3773 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3774 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3775 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3776 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3779 let AddedComplexity = 20 in {
3780 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3781 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3782 (MOVLPSrm VR128:$src1, addr:$src2)>;
3783 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3784 (MOVLPDrm VR128:$src1, addr:$src2)>;
3785 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3786 (MOVLPSrm VR128:$src1, addr:$src2)>;
3787 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3788 (MOVLPDrm VR128:$src1, addr:$src2)>;
3791 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3792 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3793 (MOVLPSmr addr:$src1, VR128:$src2)>;
3794 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3795 (MOVLPDmr addr:$src1, VR128:$src2)>;
3796 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3798 (MOVLPSmr addr:$src1, VR128:$src2)>;
3799 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3800 (MOVLPDmr addr:$src1, VR128:$src2)>;
3802 let AddedComplexity = 15 in {
3803 // Setting the lowest element in the vector.
3804 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3805 (MOVSSrr (v4i32 VR128:$src1),
3806 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3807 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3808 (MOVSDrr (v2i64 VR128:$src1),
3809 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3811 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3812 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3813 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3814 Requires<[HasSSE2]>;
3815 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3816 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3817 Requires<[HasSSE2]>;
3820 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3821 // fall back to this for SSE1)
3822 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3823 (SHUFPSrri VR128:$src2, VR128:$src1,
3824 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3826 // Set lowest element and zero upper elements.
3827 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3828 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3830 // vector -> vector casts
3831 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
3832 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
3833 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
3834 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
3836 // Use movaps / movups for SSE integer load / store (one byte shorter).
3837 // The instructions selected below are then converted to MOVDQA/MOVDQU
3838 // during the SSE domain pass.
3839 let Predicates = [HasSSE1] in {
3840 def : Pat<(alignedloadv4i32 addr:$src),
3841 (MOVAPSrm addr:$src)>;
3842 def : Pat<(loadv4i32 addr:$src),
3843 (MOVUPSrm addr:$src)>;
3844 def : Pat<(alignedloadv2i64 addr:$src),
3845 (MOVAPSrm addr:$src)>;
3846 def : Pat<(loadv2i64 addr:$src),
3847 (MOVUPSrm addr:$src)>;
3849 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3850 (MOVAPSmr addr:$dst, VR128:$src)>;
3851 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3852 (MOVAPSmr addr:$dst, VR128:$src)>;
3853 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3854 (MOVAPSmr addr:$dst, VR128:$src)>;
3855 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3856 (MOVAPSmr addr:$dst, VR128:$src)>;
3857 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3858 (MOVUPSmr addr:$dst, VR128:$src)>;
3859 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3860 (MOVUPSmr addr:$dst, VR128:$src)>;
3861 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3862 (MOVUPSmr addr:$dst, VR128:$src)>;
3863 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3864 (MOVUPSmr addr:$dst, VR128:$src)>;
3867 // Use vmovaps/vmovups for AVX integer load/store.
3868 let Predicates = [HasAVX] in {
3869 // 128-bit load/store
3870 def : Pat<(alignedloadv4i32 addr:$src),
3871 (VMOVAPSrm addr:$src)>;
3872 def : Pat<(loadv4i32 addr:$src),
3873 (VMOVUPSrm addr:$src)>;
3874 def : Pat<(alignedloadv2i64 addr:$src),
3875 (VMOVAPSrm addr:$src)>;
3876 def : Pat<(loadv2i64 addr:$src),
3877 (VMOVUPSrm addr:$src)>;
3879 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3880 (VMOVAPSmr addr:$dst, VR128:$src)>;
3881 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3882 (VMOVAPSmr addr:$dst, VR128:$src)>;
3883 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3884 (VMOVAPSmr addr:$dst, VR128:$src)>;
3885 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3886 (VMOVAPSmr addr:$dst, VR128:$src)>;
3887 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3888 (VMOVUPSmr addr:$dst, VR128:$src)>;
3889 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3890 (VMOVUPSmr addr:$dst, VR128:$src)>;
3891 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
3892 (VMOVUPSmr addr:$dst, VR128:$src)>;
3893 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
3894 (VMOVUPSmr addr:$dst, VR128:$src)>;
3896 // 256-bit load/store
3897 def : Pat<(alignedloadv4i64 addr:$src),
3898 (VMOVAPSYrm addr:$src)>;
3899 def : Pat<(loadv4i64 addr:$src),
3900 (VMOVUPSYrm addr:$src)>;
3901 def : Pat<(alignedloadv8i32 addr:$src),
3902 (VMOVAPSYrm addr:$src)>;
3903 def : Pat<(loadv8i32 addr:$src),
3904 (VMOVUPSYrm addr:$src)>;
3905 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
3906 (VMOVAPSYmr addr:$dst, VR256:$src)>;
3907 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
3908 (VMOVAPSYmr addr:$dst, VR256:$src)>;
3909 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
3910 (VMOVUPSYmr addr:$dst, VR256:$src)>;
3911 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
3912 (VMOVUPSYmr addr:$dst, VR256:$src)>;
3915 //===----------------------------------------------------------------------===//
3916 // SSE4.1 - Packed Move with Sign/Zero Extend
3917 //===----------------------------------------------------------------------===//
3919 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3920 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3921 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3922 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3924 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3925 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3927 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
3931 let Predicates = [HasAVX] in {
3932 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
3934 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
3936 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
3938 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
3940 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
3942 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
3946 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
3947 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
3948 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
3949 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
3950 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
3951 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
3953 // Common patterns involving scalar load.
3954 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
3955 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3956 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
3957 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
3959 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
3960 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3961 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
3962 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
3964 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
3965 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
3966 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
3967 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
3969 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
3970 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
3971 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
3972 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
3974 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
3975 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
3976 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
3977 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
3979 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
3980 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
3981 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
3982 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
3985 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
3986 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3987 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3988 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
3990 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3991 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3993 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
3997 let Predicates = [HasAVX] in {
3998 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4000 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4002 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4004 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4008 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4009 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4010 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4011 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4013 // Common patterns involving scalar load
4014 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4015 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4016 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4017 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4019 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4020 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4021 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4022 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4025 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4026 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4027 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4028 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4030 // Expecting a i16 load any extended to i32 value.
4031 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4032 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4033 [(set VR128:$dst, (IntId (bitconvert
4034 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4038 let Predicates = [HasAVX] in {
4039 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4041 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4044 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4045 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4047 // Common patterns involving scalar load
4048 def : Pat<(int_x86_sse41_pmovsxbq
4049 (bitconvert (v4i32 (X86vzmovl
4050 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4051 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4053 def : Pat<(int_x86_sse41_pmovzxbq
4054 (bitconvert (v4i32 (X86vzmovl
4055 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4056 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4058 //===----------------------------------------------------------------------===//
4059 // SSE4.1 - Extract Instructions
4060 //===----------------------------------------------------------------------===//
4062 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4063 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4064 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4065 (ins VR128:$src1, i32i8imm:$src2),
4066 !strconcat(OpcodeStr,
4067 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4068 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4070 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4071 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4072 !strconcat(OpcodeStr,
4073 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4076 // There's an AssertZext in the way of writing the store pattern
4077 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4080 let Predicates = [HasAVX] in {
4081 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4082 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4083 (ins VR128:$src1, i32i8imm:$src2),
4084 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4087 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4090 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4091 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4092 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4093 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4094 !strconcat(OpcodeStr,
4095 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4098 // There's an AssertZext in the way of writing the store pattern
4099 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4102 let Predicates = [HasAVX] in
4103 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4105 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4108 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4109 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4110 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4111 (ins VR128:$src1, i32i8imm:$src2),
4112 !strconcat(OpcodeStr,
4113 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4115 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4116 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4117 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4118 !strconcat(OpcodeStr,
4119 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4120 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4121 addr:$dst)]>, OpSize;
4124 let Predicates = [HasAVX] in
4125 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4127 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4129 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4130 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4131 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4132 (ins VR128:$src1, i32i8imm:$src2),
4133 !strconcat(OpcodeStr,
4134 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4136 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4137 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4138 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4139 !strconcat(OpcodeStr,
4140 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4141 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4142 addr:$dst)]>, OpSize, REX_W;
4145 let Predicates = [HasAVX] in
4146 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4148 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4150 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4152 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4153 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4154 (ins VR128:$src1, i32i8imm:$src2),
4155 !strconcat(OpcodeStr,
4156 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4158 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4160 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4161 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4162 !strconcat(OpcodeStr,
4163 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4164 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4165 addr:$dst)]>, OpSize;
4168 let Predicates = [HasAVX] in {
4169 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4170 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4171 (ins VR128:$src1, i32i8imm:$src2),
4172 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4175 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4177 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4178 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4181 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4182 Requires<[HasSSE41]>;
4184 //===----------------------------------------------------------------------===//
4185 // SSE4.1 - Insert Instructions
4186 //===----------------------------------------------------------------------===//
4188 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4189 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4190 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4192 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4194 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4196 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4197 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4198 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4200 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4202 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4204 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4205 imm:$src3))]>, OpSize;
4208 let Predicates = [HasAVX] in
4209 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4210 let Constraints = "$src1 = $dst" in
4211 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4213 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4214 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4215 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4217 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4219 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4221 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4223 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4224 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4226 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4228 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4230 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4231 imm:$src3)))]>, OpSize;
4234 let Predicates = [HasAVX] in
4235 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4236 let Constraints = "$src1 = $dst" in
4237 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4239 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4240 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4241 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4243 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4245 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4247 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4249 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4250 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4252 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4254 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4256 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4257 imm:$src3)))]>, OpSize;
4260 let Predicates = [HasAVX] in
4261 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4262 let Constraints = "$src1 = $dst" in
4263 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4265 // insertps has a few different modes, there's the first two here below which
4266 // are optimized inserts that won't zero arbitrary elements in the destination
4267 // vector. The next one matches the intrinsic and could zero arbitrary elements
4268 // in the target vector.
4269 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4270 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4271 (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4273 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4275 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4277 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4279 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4280 (ins VR128:$src1, f32mem:$src2, i32i8imm:$src3),
4282 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4284 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4286 (X86insrtps VR128:$src1,
4287 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4288 imm:$src3))]>, OpSize;
4291 let Constraints = "$src1 = $dst" in
4292 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4293 let Predicates = [HasAVX] in
4294 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4296 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4297 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4299 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4300 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4301 Requires<[HasSSE41]>;
4303 //===----------------------------------------------------------------------===//
4304 // SSE4.1 - Round Instructions
4305 //===----------------------------------------------------------------------===//
4307 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4308 X86MemOperand x86memop, RegisterClass RC,
4309 PatFrag mem_frag32, PatFrag mem_frag64,
4310 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4311 // Intrinsic operation, reg.
4312 // Vector intrinsic operation, reg
4313 def PSr : SS4AIi8<opcps, MRMSrcReg,
4314 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4315 !strconcat(OpcodeStr,
4316 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4317 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4320 // Vector intrinsic operation, mem
4321 def PSm : Ii8<opcps, MRMSrcMem,
4322 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4323 !strconcat(OpcodeStr,
4324 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4326 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4328 Requires<[HasSSE41]>;
4330 // Vector intrinsic operation, reg
4331 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4332 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4333 !strconcat(OpcodeStr,
4334 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4335 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4338 // Vector intrinsic operation, mem
4339 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4340 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4341 !strconcat(OpcodeStr,
4342 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4344 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4348 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4349 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4350 // Intrinsic operation, reg.
4351 // Vector intrinsic operation, reg
4352 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4353 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4354 !strconcat(OpcodeStr,
4355 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4358 // Vector intrinsic operation, mem
4359 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4360 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4361 !strconcat(OpcodeStr,
4362 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4363 []>, TA, OpSize, Requires<[HasSSE41]>;
4365 // Vector intrinsic operation, reg
4366 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4367 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4368 !strconcat(OpcodeStr,
4369 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4372 // Vector intrinsic operation, mem
4373 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4374 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4375 !strconcat(OpcodeStr,
4376 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4380 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4383 Intrinsic F64Int, bit Is2Addr = 1> {
4384 // Intrinsic operation, reg.
4385 def SSr : SS4AIi8<opcss, MRMSrcReg,
4386 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4388 !strconcat(OpcodeStr,
4389 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4390 !strconcat(OpcodeStr,
4391 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4392 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4395 // Intrinsic operation, mem.
4396 def SSm : SS4AIi8<opcss, MRMSrcMem,
4397 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4399 !strconcat(OpcodeStr,
4400 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4401 !strconcat(OpcodeStr,
4402 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4404 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4407 // Intrinsic operation, reg.
4408 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4409 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4411 !strconcat(OpcodeStr,
4412 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4413 !strconcat(OpcodeStr,
4414 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4415 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4418 // Intrinsic operation, mem.
4419 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4420 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4422 !strconcat(OpcodeStr,
4423 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4424 !strconcat(OpcodeStr,
4425 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4427 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4431 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4433 // Intrinsic operation, reg.
4434 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4435 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4436 !strconcat(OpcodeStr,
4437 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4440 // Intrinsic operation, mem.
4441 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4442 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4443 !strconcat(OpcodeStr,
4444 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4447 // Intrinsic operation, reg.
4448 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4449 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4450 !strconcat(OpcodeStr,
4451 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4454 // Intrinsic operation, mem.
4455 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4456 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4457 !strconcat(OpcodeStr,
4458 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4462 // FP round - roundss, roundps, roundsd, roundpd
4463 let Predicates = [HasAVX] in {
4465 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4466 memopv4f32, memopv2f64,
4467 int_x86_sse41_round_ps,
4468 int_x86_sse41_round_pd>, VEX;
4469 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4470 memopv8f32, memopv4f64,
4471 int_x86_avx_round_ps_256,
4472 int_x86_avx_round_pd_256>, VEX;
4473 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4474 int_x86_sse41_round_ss,
4475 int_x86_sse41_round_sd, 0>, VEX_4V;
4477 // Instructions for the assembler
4478 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4480 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4482 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4485 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4486 memopv4f32, memopv2f64,
4487 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4488 let Constraints = "$src1 = $dst" in
4489 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4490 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4492 //===----------------------------------------------------------------------===//
4493 // SSE4.1 - Packed Bit Test
4494 //===----------------------------------------------------------------------===//
4496 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4497 // the intel intrinsic that corresponds to this.
4498 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4499 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4500 "vptest\t{$src2, $src1|$src1, $src2}",
4501 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4503 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4504 "vptest\t{$src2, $src1|$src1, $src2}",
4505 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4508 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4509 "vptest\t{$src2, $src1|$src1, $src2}",
4510 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4512 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4513 "vptest\t{$src2, $src1|$src1, $src2}",
4514 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4518 let Defs = [EFLAGS] in {
4519 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4520 "ptest \t{$src2, $src1|$src1, $src2}",
4521 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4523 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4524 "ptest \t{$src2, $src1|$src1, $src2}",
4525 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4529 // The bit test instructions below are AVX only
4530 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4531 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4532 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4533 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4534 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4535 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4536 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4537 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4541 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4542 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4543 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4544 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4545 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4548 //===----------------------------------------------------------------------===//
4549 // SSE4.1 - Misc Instructions
4550 //===----------------------------------------------------------------------===//
4552 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
4553 "popcnt{w}\t{$src, $dst|$dst, $src}",
4554 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
4555 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
4556 "popcnt{w}\t{$src, $dst|$dst, $src}",
4557 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
4559 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
4560 "popcnt{l}\t{$src, $dst|$dst, $src}",
4561 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
4562 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
4563 "popcnt{l}\t{$src, $dst|$dst, $src}",
4564 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
4566 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
4567 "popcnt{q}\t{$src, $dst|$dst, $src}",
4568 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
4569 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
4570 "popcnt{q}\t{$src, $dst|$dst, $src}",
4571 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
4575 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4576 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4577 Intrinsic IntId128> {
4578 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4580 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4581 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4582 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4584 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4587 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4590 let Predicates = [HasAVX] in
4591 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4592 int_x86_sse41_phminposuw>, VEX;
4593 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4594 int_x86_sse41_phminposuw>;
4596 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4597 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4598 Intrinsic IntId128, bit Is2Addr = 1> {
4599 let isCommutable = 1 in
4600 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4601 (ins VR128:$src1, VR128:$src2),
4603 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4604 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4605 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4606 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4607 (ins VR128:$src1, i128mem:$src2),
4609 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4610 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4612 (IntId128 VR128:$src1,
4613 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4616 let Predicates = [HasAVX] in {
4617 let isCommutable = 0 in
4618 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4620 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4622 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4624 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4626 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4628 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4630 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4632 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4634 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4636 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4638 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4642 let Constraints = "$src1 = $dst" in {
4643 let isCommutable = 0 in
4644 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4645 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4646 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4647 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4648 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4649 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4650 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4651 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4652 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4653 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4654 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4657 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4658 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4659 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4660 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4662 /// SS48I_binop_rm - Simple SSE41 binary operator.
4663 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4664 ValueType OpVT, bit Is2Addr = 1> {
4665 let isCommutable = 1 in
4666 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4667 (ins VR128:$src1, VR128:$src2),
4669 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4670 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4671 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4673 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4674 (ins VR128:$src1, i128mem:$src2),
4676 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4677 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4678 [(set VR128:$dst, (OpNode VR128:$src1,
4679 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4683 let Predicates = [HasAVX] in
4684 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4685 let Constraints = "$src1 = $dst" in
4686 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4688 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4689 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4690 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4691 X86MemOperand x86memop, bit Is2Addr = 1> {
4692 let isCommutable = 1 in
4693 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4694 (ins RC:$src1, RC:$src2, i32i8imm:$src3),
4696 !strconcat(OpcodeStr,
4697 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4698 !strconcat(OpcodeStr,
4699 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4700 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4702 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4703 (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
4705 !strconcat(OpcodeStr,
4706 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4707 !strconcat(OpcodeStr,
4708 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4711 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4715 let Predicates = [HasAVX] in {
4716 let isCommutable = 0 in {
4717 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4718 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4719 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4720 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4721 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4722 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4723 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4724 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4725 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4726 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4727 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4728 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4730 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4731 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4732 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4733 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4734 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4735 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4738 let Constraints = "$src1 = $dst" in {
4739 let isCommutable = 0 in {
4740 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4741 VR128, memopv16i8, i128mem>;
4742 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4743 VR128, memopv16i8, i128mem>;
4744 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4745 VR128, memopv16i8, i128mem>;
4746 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4747 VR128, memopv16i8, i128mem>;
4749 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4750 VR128, memopv16i8, i128mem>;
4751 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4752 VR128, memopv16i8, i128mem>;
4755 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4756 let Predicates = [HasAVX] in {
4757 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4758 RegisterClass RC, X86MemOperand x86memop,
4759 PatFrag mem_frag, Intrinsic IntId> {
4760 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4761 (ins RC:$src1, RC:$src2, RC:$src3),
4762 !strconcat(OpcodeStr,
4763 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4764 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
4765 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4767 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4768 (ins RC:$src1, x86memop:$src2, RC:$src3),
4769 !strconcat(OpcodeStr,
4770 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4772 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
4774 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4778 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
4779 memopv16i8, int_x86_sse41_blendvpd>;
4780 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
4781 memopv16i8, int_x86_sse41_blendvps>;
4782 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
4783 memopv16i8, int_x86_sse41_pblendvb>;
4784 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
4785 memopv32i8, int_x86_avx_blendv_pd_256>;
4786 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
4787 memopv32i8, int_x86_avx_blendv_ps_256>;
4789 /// SS41I_ternary_int - SSE 4.1 ternary operator
4790 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4791 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4792 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4793 (ins VR128:$src1, VR128:$src2),
4794 !strconcat(OpcodeStr,
4795 "\t{$src2, $dst|$dst, $src2}"),
4796 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4799 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4800 (ins VR128:$src1, i128mem:$src2),
4801 !strconcat(OpcodeStr,
4802 "\t{$src2, $dst|$dst, $src2}"),
4805 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4809 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4810 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4811 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4813 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
4814 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
4816 let Predicates = [HasAVX] in
4817 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4818 "vmovntdqa\t{$src, $dst|$dst, $src}",
4819 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4821 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4822 "movntdqa\t{$src, $dst|$dst, $src}",
4823 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4826 //===----------------------------------------------------------------------===//
4827 // SSE4.2 - Compare Instructions
4828 //===----------------------------------------------------------------------===//
4830 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4831 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4832 Intrinsic IntId128, bit Is2Addr = 1> {
4833 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4834 (ins VR128:$src1, VR128:$src2),
4836 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4837 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4838 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4840 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4841 (ins VR128:$src1, i128mem:$src2),
4843 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4844 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4846 (IntId128 VR128:$src1,
4847 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4850 let Predicates = [HasAVX] in
4851 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4853 let Constraints = "$src1 = $dst" in
4854 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4856 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4857 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4858 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4859 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4861 //===----------------------------------------------------------------------===//
4862 // SSE4.2 - String/text Processing Instructions
4863 //===----------------------------------------------------------------------===//
4865 // Packed Compare Implicit Length Strings, Return Mask
4866 multiclass pseudo_pcmpistrm<string asm> {
4867 def REG : PseudoI<(outs VR128:$dst),
4868 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4869 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
4871 def MEM : PseudoI<(outs VR128:$dst),
4872 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4873 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
4874 VR128:$src1, (load addr:$src2), imm:$src3))]>;
4877 let Defs = [EFLAGS], usesCustomInserter = 1 in {
4878 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
4879 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
4882 let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
4883 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4884 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4885 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4886 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4887 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4888 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
4891 let Defs = [XMM0, EFLAGS] in {
4892 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
4893 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4894 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4895 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
4896 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4897 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
4900 // Packed Compare Explicit Length Strings, Return Mask
4901 multiclass pseudo_pcmpestrm<string asm> {
4902 def REG : PseudoI<(outs VR128:$dst),
4903 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4904 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4905 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
4906 def MEM : PseudoI<(outs VR128:$dst),
4907 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4908 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
4909 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
4912 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
4913 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
4914 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
4917 let Predicates = [HasAVX],
4918 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4919 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4920 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4921 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4922 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4923 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4924 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
4927 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
4928 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
4929 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4930 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4931 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
4932 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4933 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
4936 // Packed Compare Implicit Length Strings, Return Index
4937 let Defs = [ECX, EFLAGS] in {
4938 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
4939 def rr : SS42AI<0x63, MRMSrcReg, (outs),
4940 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4941 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4942 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
4943 (implicit EFLAGS)]>, OpSize;
4944 def rm : SS42AI<0x63, MRMSrcMem, (outs),
4945 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4946 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
4947 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
4948 (implicit EFLAGS)]>, OpSize;
4952 let Predicates = [HasAVX] in {
4953 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
4955 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
4957 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
4959 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
4961 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
4963 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
4967 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
4968 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
4969 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
4970 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
4971 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
4972 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
4974 // Packed Compare Explicit Length Strings, Return Index
4975 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
4976 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
4977 def rr : SS42AI<0x61, MRMSrcReg, (outs),
4978 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
4979 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
4980 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
4981 (implicit EFLAGS)]>, OpSize;
4982 def rm : SS42AI<0x61, MRMSrcMem, (outs),
4983 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
4984 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
4986 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
4987 (implicit EFLAGS)]>, OpSize;
4991 let Predicates = [HasAVX] in {
4992 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
4994 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
4996 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
4998 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5000 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5002 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5006 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5007 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5008 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5009 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5010 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5011 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5013 //===----------------------------------------------------------------------===//
5014 // SSE4.2 - CRC Instructions
5015 //===----------------------------------------------------------------------===//
5017 // No CRC instructions have AVX equivalents
5019 // crc intrinsic instruction
5020 // This set of instructions are only rm, the only difference is the size
5022 let Constraints = "$src1 = $dst" in {
5023 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5024 (ins GR32:$src1, i8mem:$src2),
5025 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5027 (int_x86_sse42_crc32_32_8 GR32:$src1,
5028 (load addr:$src2)))]>;
5029 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5030 (ins GR32:$src1, GR8:$src2),
5031 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5033 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
5034 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5035 (ins GR32:$src1, i16mem:$src2),
5036 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5038 (int_x86_sse42_crc32_32_16 GR32:$src1,
5039 (load addr:$src2)))]>,
5041 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5042 (ins GR32:$src1, GR16:$src2),
5043 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5045 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
5047 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5048 (ins GR32:$src1, i32mem:$src2),
5049 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5051 (int_x86_sse42_crc32_32_32 GR32:$src1,
5052 (load addr:$src2)))]>;
5053 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5054 (ins GR32:$src1, GR32:$src2),
5055 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5057 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
5058 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5059 (ins GR64:$src1, i8mem:$src2),
5060 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5062 (int_x86_sse42_crc32_64_8 GR64:$src1,
5063 (load addr:$src2)))]>,
5065 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5066 (ins GR64:$src1, GR8:$src2),
5067 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5069 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
5071 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5072 (ins GR64:$src1, i64mem:$src2),
5073 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5075 (int_x86_sse42_crc32_64_64 GR64:$src1,
5076 (load addr:$src2)))]>,
5078 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5079 (ins GR64:$src1, GR64:$src2),
5080 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5082 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
5086 //===----------------------------------------------------------------------===//
5087 // AES-NI Instructions
5088 //===----------------------------------------------------------------------===//
5090 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5091 Intrinsic IntId128, bit Is2Addr = 1> {
5092 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5093 (ins VR128:$src1, VR128:$src2),
5095 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5096 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5097 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5099 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5100 (ins VR128:$src1, i128mem:$src2),
5102 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5103 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5105 (IntId128 VR128:$src1,
5106 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5109 // Perform One Round of an AES Encryption/Decryption Flow
5110 let Predicates = [HasAVX, HasAES] in {
5111 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5112 int_x86_aesni_aesenc, 0>, VEX_4V;
5113 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5114 int_x86_aesni_aesenclast, 0>, VEX_4V;
5115 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5116 int_x86_aesni_aesdec, 0>, VEX_4V;
5117 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5118 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5121 let Constraints = "$src1 = $dst" in {
5122 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5123 int_x86_aesni_aesenc>;
5124 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5125 int_x86_aesni_aesenclast>;
5126 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5127 int_x86_aesni_aesdec>;
5128 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5129 int_x86_aesni_aesdeclast>;
5132 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5133 (AESENCrr VR128:$src1, VR128:$src2)>;
5134 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5135 (AESENCrm VR128:$src1, addr:$src2)>;
5136 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5137 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5138 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5139 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5140 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5141 (AESDECrr VR128:$src1, VR128:$src2)>;
5142 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5143 (AESDECrm VR128:$src1, addr:$src2)>;
5144 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5145 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5146 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5147 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5149 // Perform the AES InvMixColumn Transformation
5150 let Predicates = [HasAVX, HasAES] in {
5151 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5153 "vaesimc\t{$src1, $dst|$dst, $src1}",
5155 (int_x86_aesni_aesimc VR128:$src1))]>,
5157 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5158 (ins i128mem:$src1),
5159 "vaesimc\t{$src1, $dst|$dst, $src1}",
5161 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5164 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5166 "aesimc\t{$src1, $dst|$dst, $src1}",
5168 (int_x86_aesni_aesimc VR128:$src1))]>,
5170 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5171 (ins i128mem:$src1),
5172 "aesimc\t{$src1, $dst|$dst, $src1}",
5174 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5177 // AES Round Key Generation Assist
5178 let Predicates = [HasAVX, HasAES] in {
5179 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5180 (ins VR128:$src1, i8imm:$src2),
5181 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5183 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5185 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5186 (ins i128mem:$src1, i8imm:$src2),
5187 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5189 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5193 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5194 (ins VR128:$src1, i8imm:$src2),
5195 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5197 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5199 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5200 (ins i128mem:$src1, i8imm:$src2),
5201 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5203 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5207 //===----------------------------------------------------------------------===//
5208 // CLMUL Instructions
5209 //===----------------------------------------------------------------------===//
5211 // Carry-less Multiplication instructions
5212 let Constraints = "$src1 = $dst" in {
5213 def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5214 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5215 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5218 def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5219 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5220 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5224 // AVX carry-less Multiplication instructions
5225 def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5226 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5227 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5230 def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5231 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5232 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5236 multiclass pclmul_alias<string asm, int immop> {
5237 def : InstAlias<!strconcat("pclmul", asm,
5238 "dq {$src, $dst|$dst, $src}"),
5239 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
5241 def : InstAlias<!strconcat("pclmul", asm,
5242 "dq {$src, $dst|$dst, $src}"),
5243 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
5245 def : InstAlias<!strconcat("vpclmul", asm,
5246 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5247 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
5249 def : InstAlias<!strconcat("vpclmul", asm,
5250 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5251 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
5253 defm : pclmul_alias<"hqhq", 0x11>;
5254 defm : pclmul_alias<"hqlq", 0x01>;
5255 defm : pclmul_alias<"lqhq", 0x10>;
5256 defm : pclmul_alias<"lqlq", 0x00>;
5258 //===----------------------------------------------------------------------===//
5260 //===----------------------------------------------------------------------===//
5263 // Load from memory and broadcast to all elements of the destination operand
5264 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5265 X86MemOperand x86memop, Intrinsic Int> :
5266 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5267 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5268 [(set RC:$dst, (Int addr:$src))]>, VEX;
5270 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5271 int_x86_avx_vbroadcastss>;
5272 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5273 int_x86_avx_vbroadcastss_256>;
5274 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5275 int_x86_avx_vbroadcast_sd_256>;
5276 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5277 int_x86_avx_vbroadcastf128_pd_256>;
5279 // Insert packed floating-point values
5280 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5281 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5282 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5284 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5285 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5286 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5289 // Extract packed floating-point values
5290 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5291 (ins VR256:$src1, i8imm:$src2),
5292 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5294 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5295 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5296 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5299 // Conditional SIMD Packed Loads and Stores
5300 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5301 Intrinsic IntLd, Intrinsic IntLd256,
5302 Intrinsic IntSt, Intrinsic IntSt256,
5303 PatFrag pf128, PatFrag pf256> {
5304 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5305 (ins VR128:$src1, f128mem:$src2),
5306 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5307 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5309 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5310 (ins VR256:$src1, f256mem:$src2),
5311 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5312 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5314 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5315 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5316 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5317 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5318 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5319 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5320 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5321 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5324 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5325 int_x86_avx_maskload_ps,
5326 int_x86_avx_maskload_ps_256,
5327 int_x86_avx_maskstore_ps,
5328 int_x86_avx_maskstore_ps_256,
5329 memopv4f32, memopv8f32>;
5330 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5331 int_x86_avx_maskload_pd,
5332 int_x86_avx_maskload_pd_256,
5333 int_x86_avx_maskstore_pd,
5334 int_x86_avx_maskstore_pd_256,
5335 memopv2f64, memopv4f64>;
5337 // Permute Floating-Point Values
5338 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5339 RegisterClass RC, X86MemOperand x86memop_f,
5340 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5341 Intrinsic IntVar, Intrinsic IntImm> {
5342 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5343 (ins RC:$src1, RC:$src2),
5344 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5345 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5346 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5347 (ins RC:$src1, x86memop_i:$src2),
5348 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5349 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5351 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5352 (ins RC:$src1, i8imm:$src2),
5353 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5354 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5355 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5356 (ins x86memop_f:$src1, i8imm:$src2),
5357 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5358 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5361 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5362 memopv4f32, memopv4i32,
5363 int_x86_avx_vpermilvar_ps,
5364 int_x86_avx_vpermil_ps>;
5365 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5366 memopv8f32, memopv8i32,
5367 int_x86_avx_vpermilvar_ps_256,
5368 int_x86_avx_vpermil_ps_256>;
5369 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5370 memopv2f64, memopv2i64,
5371 int_x86_avx_vpermilvar_pd,
5372 int_x86_avx_vpermil_pd>;
5373 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5374 memopv4f64, memopv4i64,
5375 int_x86_avx_vpermilvar_pd_256,
5376 int_x86_avx_vpermil_pd_256>;
5378 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5379 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5380 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5382 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5383 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5384 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5387 // Zero All YMM registers
5388 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5389 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5391 // Zero Upper bits of YMM registers
5392 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5393 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5395 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5396 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5397 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5398 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5399 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5400 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5402 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
5404 (VINSERTF128rr VR256:$src1, VR128:$src2,
5405 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5406 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
5408 (VINSERTF128rr VR256:$src1, VR128:$src2,
5409 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5410 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
5412 (VINSERTF128rr VR256:$src1, VR128:$src2,
5413 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5414 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
5416 (VINSERTF128rr VR256:$src1, VR128:$src2,
5417 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5419 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5420 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5421 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5422 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5423 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5424 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5426 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5427 (v4f32 (VEXTRACTF128rr
5428 (v8f32 VR256:$src1),
5429 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5430 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5431 (v2f64 (VEXTRACTF128rr
5432 (v4f64 VR256:$src1),
5433 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5434 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5435 (v4i32 (VEXTRACTF128rr
5436 (v8i32 VR256:$src1),
5437 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5438 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5439 (v2i64 (VEXTRACTF128rr
5440 (v4i64 VR256:$src1),
5441 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5443 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5444 (VBROADCASTF128 addr:$src)>;
5446 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5447 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5448 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5449 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5450 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5451 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5453 def : Pat<(int_x86_avx_vperm2f128_ps_256
5454 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5455 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5456 def : Pat<(int_x86_avx_vperm2f128_pd_256
5457 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5458 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5459 def : Pat<(int_x86_avx_vperm2f128_si_256
5460 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5461 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5463 //===----------------------------------------------------------------------===//
5464 // SSE Shuffle pattern fragments
5465 //===----------------------------------------------------------------------===//
5467 // This is part of a "work in progress" refactoring. The idea is that all
5468 // vector shuffles are going to be translated into target specific nodes and
5469 // directly matched by the patterns below (which can be changed along the way)
5470 // The AVX version of some but not all of them are described here, and more
5471 // should come in a near future.
5473 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5474 // SSE2 loads, which are always promoted to v2i64. The last one should match
5475 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5476 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5477 // we investigate further.
5478 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5480 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5481 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5483 (PSHUFDmi addr:$src1, imm:$imm)>;
5484 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5486 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5488 // Shuffle with PSHUFD instruction.
5489 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5490 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5491 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5492 (PSHUFDri VR128:$src1, imm:$imm)>;
5494 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5495 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5496 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5497 (PSHUFDri VR128:$src1, imm:$imm)>;
5499 // Shuffle with SHUFPD instruction.
5500 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5501 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5502 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5503 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5504 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5505 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5507 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5508 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5509 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5510 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5512 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5513 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5514 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5515 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5517 // Shuffle with SHUFPS instruction.
5518 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5519 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5520 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5521 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5522 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5523 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5525 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5526 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5527 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5528 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5530 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5531 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5532 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5533 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5534 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5535 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5537 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5538 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5539 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5540 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5542 // Shuffle with MOVHLPS instruction
5543 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5544 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5545 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5546 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5548 // Shuffle with MOVDDUP instruction
5549 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5550 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5551 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5552 (MOVDDUPrm addr:$src)>;
5554 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5555 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5556 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5557 (MOVDDUPrm addr:$src)>;
5559 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5560 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5561 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5562 (MOVDDUPrm addr:$src)>;
5564 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5565 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5566 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5567 (MOVDDUPrm addr:$src)>;
5569 def : Pat<(X86Movddup (bc_v2f64
5570 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5571 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5572 def : Pat<(X86Movddup (bc_v2f64
5573 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5574 (MOVDDUPrm addr:$src)>;
5577 // Shuffle with UNPCKLPS
5578 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5579 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5580 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
5581 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5582 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5583 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5585 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5586 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5587 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
5588 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5589 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5590 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5592 // Shuffle with UNPCKHPS
5593 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5594 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5595 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5596 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5598 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5599 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5600 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5601 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5603 // Shuffle with UNPCKLPD
5604 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5605 (VUNPCKLPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5606 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
5607 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5608 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5609 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
5611 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5612 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5613 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
5614 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5615 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5616 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5618 // Shuffle with UNPCKHPD
5619 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5620 (VUNPCKHPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5621 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5622 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
5624 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5625 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5626 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5627 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5629 // Shuffle with PUNPCKLBW
5630 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1,
5631 (bc_v16i8 (memopv2i64 addr:$src2)))),
5632 (PUNPCKLBWrm VR128:$src1, addr:$src2)>;
5633 def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, VR128:$src2)),
5634 (PUNPCKLBWrr VR128:$src1, VR128:$src2)>;
5636 // Shuffle with PUNPCKLWD
5637 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1,
5638 (bc_v8i16 (memopv2i64 addr:$src2)))),
5639 (PUNPCKLWDrm VR128:$src1, addr:$src2)>;
5640 def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, VR128:$src2)),
5641 (PUNPCKLWDrr VR128:$src1, VR128:$src2)>;
5643 // Shuffle with PUNPCKLDQ
5644 def : Pat<(v4i32 (X86Punpckldq VR128:$src1,
5645 (bc_v4i32 (memopv2i64 addr:$src2)))),
5646 (PUNPCKLDQrm VR128:$src1, addr:$src2)>;
5647 def : Pat<(v4i32 (X86Punpckldq VR128:$src1, VR128:$src2)),
5648 (PUNPCKLDQrr VR128:$src1, VR128:$src2)>;
5650 // Shuffle with PUNPCKLQDQ
5651 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, (memopv2i64 addr:$src2))),
5652 (PUNPCKLQDQrm VR128:$src1, addr:$src2)>;
5653 def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)),
5654 (PUNPCKLQDQrr VR128:$src1, VR128:$src2)>;
5656 // Shuffle with PUNPCKHBW
5657 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1,
5658 (bc_v16i8 (memopv2i64 addr:$src2)))),
5659 (PUNPCKHBWrm VR128:$src1, addr:$src2)>;
5660 def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, VR128:$src2)),
5661 (PUNPCKHBWrr VR128:$src1, VR128:$src2)>;
5663 // Shuffle with PUNPCKHWD
5664 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1,
5665 (bc_v8i16 (memopv2i64 addr:$src2)))),
5666 (PUNPCKHWDrm VR128:$src1, addr:$src2)>;
5667 def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, VR128:$src2)),
5668 (PUNPCKHWDrr VR128:$src1, VR128:$src2)>;
5670 // Shuffle with PUNPCKHDQ
5671 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1,
5672 (bc_v4i32 (memopv2i64 addr:$src2)))),
5673 (PUNPCKHDQrm VR128:$src1, addr:$src2)>;
5674 def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, VR128:$src2)),
5675 (PUNPCKHDQrr VR128:$src1, VR128:$src2)>;
5677 // Shuffle with PUNPCKHQDQ
5678 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, (memopv2i64 addr:$src2))),
5679 (PUNPCKHQDQrm VR128:$src1, addr:$src2)>;
5680 def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)),
5681 (PUNPCKHQDQrr VR128:$src1, VR128:$src2)>;
5683 // Shuffle with MOVLHPS
5684 def : Pat<(X86Movlhps VR128:$src1,
5685 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5686 (MOVHPSrm VR128:$src1, addr:$src2)>;
5687 def : Pat<(X86Movlhps VR128:$src1,
5688 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5689 (MOVHPSrm VR128:$src1, addr:$src2)>;
5690 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
5691 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5692 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
5693 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5694 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
5695 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
5697 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the problem
5698 // is during lowering, where it's not possible to recognize the load fold cause
5699 // it has two uses through a bitcast. One use disappears at isel time and the
5700 // fold opportunity reappears.
5701 def : Pat<(v2f64 (X86Movddup VR128:$src)),
5702 (UNPCKLPDrr VR128:$src, VR128:$src)>;
5704 // Shuffle with MOVLHPD
5705 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
5706 (scalar_to_vector (loadf64 addr:$src2)))),
5707 (MOVHPDrm VR128:$src1, addr:$src2)>;
5709 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
5710 // is during lowering, where it's not possible to recognize the load fold cause
5711 // it has two uses through a bitcast. One use disappears at isel time and the
5712 // fold opportunity reappears.
5713 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
5714 (scalar_to_vector (loadf64 addr:$src2)))),
5715 (MOVHPDrm VR128:$src1, addr:$src2)>;
5717 // Shuffle with MOVSS
5718 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
5719 (MOVSSrr VR128:$src1, FR32:$src2)>;
5720 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
5721 (MOVSSrr (v4i32 VR128:$src1),
5722 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
5723 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
5724 (MOVSSrr (v4f32 VR128:$src1),
5725 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
5726 // FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
5727 // is during lowering, where it's not possible to recognize the load fold cause
5728 // it has two uses through a bitcast. One use disappears at isel time and the
5729 // fold opportunity reappears.
5730 def : Pat<(X86Movss VR128:$src1,
5731 (bc_v4i32 (v2i64 (load addr:$src2)))),
5732 (MOVLPSrm VR128:$src1, addr:$src2)>;
5734 // Shuffle with MOVSD
5735 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
5736 (MOVSDrr VR128:$src1, FR64:$src2)>;
5737 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
5738 (MOVSDrr (v2i64 VR128:$src1),
5739 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
5740 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
5741 (MOVSDrr (v2f64 VR128:$src1),
5742 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
5743 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
5744 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5745 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
5746 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5748 // Shuffle with MOVSHDUP
5749 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
5750 (MOVSHDUPrr VR128:$src)>;
5751 def : Pat<(X86Movshdup (bc_v4i32 (memopv2i64 addr:$src))),
5752 (MOVSHDUPrm addr:$src)>;
5754 def : Pat<(v4f32 (X86Movshdup VR128:$src)),
5755 (MOVSHDUPrr VR128:$src)>;
5756 def : Pat<(X86Movshdup (memopv4f32 addr:$src)),
5757 (MOVSHDUPrm addr:$src)>;
5759 // Shuffle with MOVSLDUP
5760 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
5761 (MOVSLDUPrr VR128:$src)>;
5762 def : Pat<(X86Movsldup (bc_v4i32 (memopv2i64 addr:$src))),
5763 (MOVSLDUPrm addr:$src)>;
5765 def : Pat<(v4f32 (X86Movsldup VR128:$src)),
5766 (MOVSLDUPrr VR128:$src)>;
5767 def : Pat<(X86Movsldup (memopv4f32 addr:$src)),
5768 (MOVSLDUPrm addr:$src)>;
5770 // Shuffle with PSHUFHW
5771 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
5772 (PSHUFHWri VR128:$src, imm:$imm)>;
5773 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5774 (PSHUFHWmi addr:$src, imm:$imm)>;
5776 // Shuffle with PSHUFLW
5777 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
5778 (PSHUFLWri VR128:$src, imm:$imm)>;
5779 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5780 (PSHUFLWmi addr:$src, imm:$imm)>;
5782 // Shuffle with PALIGN
5783 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5784 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5785 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5786 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5787 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5788 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5789 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5790 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
5792 // Shuffle with MOVLPS
5793 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
5794 (MOVLPSrm VR128:$src1, addr:$src2)>;
5795 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
5796 (MOVLPSrm VR128:$src1, addr:$src2)>;
5797 def : Pat<(X86Movlps VR128:$src1,
5798 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5799 (MOVLPSrm VR128:$src1, addr:$src2)>;
5800 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
5801 // is during lowering, where it's not possible to recognize the load fold cause
5802 // it has two uses through a bitcast. One use disappears at isel time and the
5803 // fold opportunity reappears.
5804 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
5805 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5807 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
5808 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5810 // Shuffle with MOVLPD
5811 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5812 (MOVLPDrm VR128:$src1, addr:$src2)>;
5813 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5814 (MOVLPDrm VR128:$src1, addr:$src2)>;
5815 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
5816 (scalar_to_vector (loadf64 addr:$src2)))),
5817 (MOVLPDrm VR128:$src1, addr:$src2)>;
5819 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
5820 def : Pat<(store (f64 (vector_extract
5821 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5822 (MOVHPSmr addr:$dst, VR128:$src)>;
5823 def : Pat<(store (f64 (vector_extract
5824 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5825 (MOVHPDmr addr:$dst, VR128:$src)>;
5827 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
5828 (MOVLPSmr addr:$src1, VR128:$src2)>;
5829 def : Pat<(store (v4i32 (X86Movlps
5830 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
5831 (MOVLPSmr addr:$src1, VR128:$src2)>;
5833 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5834 (MOVLPDmr addr:$src1, VR128:$src2)>;
5835 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5836 (MOVLPDmr addr:$src1, VR128:$src2)>;