1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // SSE 1 & 2 - Move Instructions
120 //===----------------------------------------------------------------------===//
122 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
123 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
124 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
126 // Loading from memory automatically zeroing upper bits.
127 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
128 PatFrag mem_pat, string OpcodeStr> :
129 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
130 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
131 [(set RC:$dst, (mem_pat addr:$src))]>;
133 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
134 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
135 // is used instead. Register-to-register movss/movsd is not modeled as an
136 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
137 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
138 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
139 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
140 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
141 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
143 let canFoldAsLoad = 1, isReMaterializable = 1 in {
144 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
146 let AddedComplexity = 20 in
147 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
150 let Constraints = "$src1 = $dst" in {
151 def MOVSSrr : sse12_move_rr<FR32, v4f32,
152 "movss\t{$src2, $dst|$dst, $src2}">, XS;
153 def MOVSDrr : sse12_move_rr<FR64, v2f64,
154 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
157 let canFoldAsLoad = 1, isReMaterializable = 1 in {
158 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
160 let AddedComplexity = 20 in
161 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
164 let AddedComplexity = 15 in {
165 // Extract the low 32-bit value from one vector and insert it into another.
166 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
167 (MOVSSrr (v4f32 VR128:$src1),
168 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
169 // Extract the low 64-bit value from one vector and insert it into another.
170 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
171 (MOVSDrr (v2f64 VR128:$src1),
172 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
175 // Implicitly promote a 32-bit scalar to a vector.
176 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
177 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
178 // Implicitly promote a 64-bit scalar to a vector.
179 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
180 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
181 // Implicitly promote a 32-bit scalar to a vector.
182 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
183 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
184 // Implicitly promote a 64-bit scalar to a vector.
185 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
186 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
188 let AddedComplexity = 20 in {
189 let Predicates = [HasSSE1] in {
190 // MOVSSrm zeros the high parts of the register; represent this
191 // with SUBREG_TO_REG.
192 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
193 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
194 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
195 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
196 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
197 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
199 let Predicates = [HasSSE2] in {
200 // MOVSDrm zeros the high parts of the register; represent this
201 // with SUBREG_TO_REG.
202 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
203 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
204 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
205 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
206 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
207 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
208 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
209 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
210 def : Pat<(v2f64 (X86vzload addr:$src)),
211 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
215 let AddedComplexity = 20, Predicates = [HasAVX] in {
216 // MOVSSrm zeros the high parts of the register; represent this
217 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
218 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
219 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
220 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
221 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
222 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
223 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
224 // MOVSDrm zeros the high parts of the register; represent this
225 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
226 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
227 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
228 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
229 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
230 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
231 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
232 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
233 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
234 def : Pat<(v2f64 (X86vzload addr:$src)),
235 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
236 // Represent the same patterns above but in the form they appear for
238 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
239 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
240 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
241 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
242 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))),
243 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_sd)>;
246 // Store scalar value to memory.
247 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
248 "movss\t{$src, $dst|$dst, $src}",
249 [(store FR32:$src, addr:$dst)]>;
250 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
251 "movsd\t{$src, $dst|$dst, $src}",
252 [(store FR64:$src, addr:$dst)]>;
254 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
255 "movss\t{$src, $dst|$dst, $src}",
256 [(store FR32:$src, addr:$dst)]>, XS, VEX;
257 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
258 "movsd\t{$src, $dst|$dst, $src}",
259 [(store FR64:$src, addr:$dst)]>, XD, VEX;
261 // Extract and store.
262 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
265 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
266 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
269 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
271 // Move Aligned/Unaligned floating point values
272 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
273 X86MemOperand x86memop, PatFrag ld_frag,
274 string asm, Domain d,
275 bit IsReMaterializable = 1> {
276 let neverHasSideEffects = 1 in
277 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
278 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
279 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
280 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
281 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
282 [(set RC:$dst, (ld_frag addr:$src))], d>;
285 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
286 "movaps", SSEPackedSingle>, VEX;
287 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
288 "movapd", SSEPackedDouble>, OpSize, VEX;
289 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
290 "movups", SSEPackedSingle>, VEX;
291 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
292 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
294 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
295 "movaps", SSEPackedSingle>, VEX;
296 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
297 "movapd", SSEPackedDouble>, OpSize, VEX;
298 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
299 "movups", SSEPackedSingle>, VEX;
300 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
301 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
302 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
303 "movaps", SSEPackedSingle>, TB;
304 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
305 "movapd", SSEPackedDouble>, TB, OpSize;
306 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
307 "movups", SSEPackedSingle>, TB;
308 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
309 "movupd", SSEPackedDouble, 0>, TB, OpSize;
311 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
312 "movaps\t{$src, $dst|$dst, $src}",
313 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
314 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
315 "movapd\t{$src, $dst|$dst, $src}",
316 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
317 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
318 "movups\t{$src, $dst|$dst, $src}",
319 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
320 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
321 "movupd\t{$src, $dst|$dst, $src}",
322 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
323 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
324 "movaps\t{$src, $dst|$dst, $src}",
325 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
326 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
327 "movapd\t{$src, $dst|$dst, $src}",
328 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
329 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
330 "movups\t{$src, $dst|$dst, $src}",
331 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
332 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
333 "movupd\t{$src, $dst|$dst, $src}",
334 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
336 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
337 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
338 (VMOVUPSYmr addr:$dst, VR256:$src)>;
340 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
341 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
342 (VMOVUPDYmr addr:$dst, VR256:$src)>;
344 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
345 "movaps\t{$src, $dst|$dst, $src}",
346 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
347 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
348 "movapd\t{$src, $dst|$dst, $src}",
349 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
350 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
351 "movups\t{$src, $dst|$dst, $src}",
352 [(store (v4f32 VR128:$src), addr:$dst)]>;
353 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
354 "movupd\t{$src, $dst|$dst, $src}",
355 [(store (v2f64 VR128:$src), addr:$dst)]>;
357 // Intrinsic forms of MOVUPS/D load and store
358 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
359 (ins f128mem:$dst, VR128:$src),
360 "movups\t{$src, $dst|$dst, $src}",
361 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
362 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
363 (ins f128mem:$dst, VR128:$src),
364 "movupd\t{$src, $dst|$dst, $src}",
365 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
367 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
368 "movups\t{$src, $dst|$dst, $src}",
369 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
370 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
371 "movupd\t{$src, $dst|$dst, $src}",
372 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
374 // Move Low/High packed floating point values
375 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
376 PatFrag mov_frag, string base_opc,
378 def PSrm : PI<opc, MRMSrcMem,
379 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
380 !strconcat(base_opc, "s", asm_opr),
383 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
384 SSEPackedSingle>, TB;
386 def PDrm : PI<opc, MRMSrcMem,
387 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
388 !strconcat(base_opc, "d", asm_opr),
389 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
390 (scalar_to_vector (loadf64 addr:$src2)))))],
391 SSEPackedDouble>, TB, OpSize;
394 let AddedComplexity = 20 in {
395 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
396 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
397 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
398 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
400 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
401 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
402 "\t{$src2, $dst|$dst, $src2}">;
403 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
404 "\t{$src2, $dst|$dst, $src2}">;
407 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
408 "movlps\t{$src, $dst|$dst, $src}",
409 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
410 (iPTR 0))), addr:$dst)]>, VEX;
411 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
412 "movlpd\t{$src, $dst|$dst, $src}",
413 [(store (f64 (vector_extract (v2f64 VR128:$src),
414 (iPTR 0))), addr:$dst)]>, VEX;
415 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
416 "movlps\t{$src, $dst|$dst, $src}",
417 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
418 (iPTR 0))), addr:$dst)]>;
419 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
420 "movlpd\t{$src, $dst|$dst, $src}",
421 [(store (f64 (vector_extract (v2f64 VR128:$src),
422 (iPTR 0))), addr:$dst)]>;
424 // v2f64 extract element 1 is always custom lowered to unpack high to low
425 // and extract element 0 so the non-store version isn't too horrible.
426 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
427 "movhps\t{$src, $dst|$dst, $src}",
428 [(store (f64 (vector_extract
429 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
430 (undef)), (iPTR 0))), addr:$dst)]>,
432 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
433 "movhpd\t{$src, $dst|$dst, $src}",
434 [(store (f64 (vector_extract
435 (v2f64 (unpckh VR128:$src, (undef))),
436 (iPTR 0))), addr:$dst)]>,
438 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
439 "movhps\t{$src, $dst|$dst, $src}",
440 [(store (f64 (vector_extract
441 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
442 (undef)), (iPTR 0))), addr:$dst)]>;
443 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
444 "movhpd\t{$src, $dst|$dst, $src}",
445 [(store (f64 (vector_extract
446 (v2f64 (unpckh VR128:$src, (undef))),
447 (iPTR 0))), addr:$dst)]>;
449 let AddedComplexity = 20 in {
450 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
451 (ins VR128:$src1, VR128:$src2),
452 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
454 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
456 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
457 (ins VR128:$src1, VR128:$src2),
458 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
460 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
463 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
464 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
465 (ins VR128:$src1, VR128:$src2),
466 "movlhps\t{$src2, $dst|$dst, $src2}",
468 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
469 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
470 (ins VR128:$src1, VR128:$src2),
471 "movhlps\t{$src2, $dst|$dst, $src2}",
473 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
476 let Predicates = [HasAVX] in {
478 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
479 (VMOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
480 def : Pat<(X86Movlhps VR128:$src1,
481 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
482 (VMOVHPSrm VR128:$src1, addr:$src2)>;
483 def : Pat<(X86Movlhps VR128:$src1,
484 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
485 (VMOVHPSrm VR128:$src1, addr:$src2)>;
488 let AddedComplexity = 20 in {
489 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
490 (VMOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
491 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
492 (VMOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
494 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
495 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
496 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
498 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
499 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
500 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
501 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
502 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
503 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
506 let AddedComplexity = 20 in {
507 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
508 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
509 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
511 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
512 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
513 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
514 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
515 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
519 let Predicates = [HasSSE1] in {
521 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
522 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
523 def : Pat<(X86Movlhps VR128:$src1,
524 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
525 (MOVHPSrm VR128:$src1, addr:$src2)>;
526 def : Pat<(X86Movlhps VR128:$src1,
527 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
528 (MOVHPSrm VR128:$src1, addr:$src2)>;
531 let AddedComplexity = 20 in {
532 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
533 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
534 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
535 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
537 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
538 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
539 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
541 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
542 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
543 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
544 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
545 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
546 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
549 let AddedComplexity = 20 in {
550 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
551 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
552 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
554 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
555 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
556 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
557 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
558 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
562 //===----------------------------------------------------------------------===//
563 // SSE 1 & 2 - Conversion Instructions
564 //===----------------------------------------------------------------------===//
566 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
567 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
569 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
570 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
571 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
572 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
575 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
576 X86MemOperand x86memop, string asm> {
577 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
579 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
583 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
584 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
585 string asm, Domain d> {
586 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
587 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
588 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
589 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
592 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
593 X86MemOperand x86memop, string asm> {
594 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
595 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
596 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
597 (ins DstRC:$src1, x86memop:$src),
598 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
601 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
602 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
603 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
604 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
606 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
607 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
608 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
609 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
612 // The assembler can recognize rr 64-bit instructions by seeing a rxx
613 // register, but the same isn't true when only using memory operands,
614 // provide other assembly "l" and "q" forms to address this explicitly
615 // where appropriate to do so.
616 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
618 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
620 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
622 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
624 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
627 let Predicates = [HasAVX] in {
628 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
629 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
630 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
631 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
632 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
633 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
634 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
635 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
637 def : Pat<(f32 (sint_to_fp GR32:$src)),
638 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
639 def : Pat<(f32 (sint_to_fp GR64:$src)),
640 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
641 def : Pat<(f64 (sint_to_fp GR32:$src)),
642 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
643 def : Pat<(f64 (sint_to_fp GR64:$src)),
644 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
647 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
648 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
649 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
650 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
651 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
652 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
653 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
654 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
655 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
656 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
657 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
658 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
659 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
660 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
661 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
662 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
664 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
665 // and/or XMM operand(s).
667 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
668 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
670 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
671 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
672 [(set DstRC:$dst, (Int SrcRC:$src))]>;
673 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
674 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
675 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
678 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
679 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
680 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
681 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
683 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
684 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
685 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
686 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
687 (ins DstRC:$src1, x86memop:$src2),
689 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
690 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
691 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
694 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
695 f128mem, load, "cvtsd2si">, XD, VEX;
696 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
697 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
700 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
701 // Get rid of this hack or rename the intrinsics, there are several
702 // intructions that only match with the intrinsic form, why create duplicates
703 // to let them be recognized by the assembler?
704 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
705 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
706 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
707 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
708 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
709 f128mem, load, "cvtsd2si{l}">, XD;
710 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
711 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
714 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
715 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
716 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
717 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
719 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
720 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
721 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
722 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
725 let Constraints = "$src1 = $dst" in {
726 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
727 int_x86_sse_cvtsi2ss, i32mem, loadi32,
729 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
730 int_x86_sse_cvtsi642ss, i64mem, loadi64,
731 "cvtsi2ss{q}">, XS, REX_W;
732 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
733 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
735 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
736 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
737 "cvtsi2sd">, XD, REX_W;
742 // Aliases for intrinsics
743 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
744 f32mem, load, "cvttss2si">, XS, VEX;
745 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
746 int_x86_sse_cvttss2si64, f32mem, load,
747 "cvttss2si">, XS, VEX, VEX_W;
748 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
749 f128mem, load, "cvttsd2si">, XD, VEX;
750 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
751 int_x86_sse2_cvttsd2si64, f128mem, load,
752 "cvttsd2si">, XD, VEX, VEX_W;
753 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
754 f32mem, load, "cvttss2si">, XS;
755 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
756 int_x86_sse_cvttss2si64, f32mem, load,
757 "cvttss2si{q}">, XS, REX_W;
758 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
759 f128mem, load, "cvttsd2si">, XD;
760 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
761 int_x86_sse2_cvttsd2si64, f128mem, load,
762 "cvttsd2si{q}">, XD, REX_W;
764 let Pattern = []<dag> in {
765 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
766 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
767 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
768 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
770 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
771 "cvtdq2ps\t{$src, $dst|$dst, $src}",
772 SSEPackedSingle>, TB, VEX;
773 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
774 "cvtdq2ps\t{$src, $dst|$dst, $src}",
775 SSEPackedSingle>, TB, VEX;
778 let Pattern = []<dag> in {
779 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
780 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
781 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
782 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
783 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
784 "cvtdq2ps\t{$src, $dst|$dst, $src}",
785 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
788 let Predicates = [HasSSE1] in {
789 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
790 (CVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
791 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
792 (CVTSS2SIrm addr:$src)>;
793 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
794 (CVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
795 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
796 (CVTSS2SI64rm addr:$src)>;
799 let Predicates = [HasAVX] in {
800 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
801 (VCVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
802 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
803 (VCVTSS2SIrm addr:$src)>;
804 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
805 (VCVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
806 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
807 (VCVTSS2SI64rm addr:$src)>;
812 // Convert scalar double to scalar single
813 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
814 (ins FR64:$src1, FR64:$src2),
815 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
817 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
818 (ins FR64:$src1, f64mem:$src2),
819 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
820 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
821 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
824 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
825 "cvtsd2ss\t{$src, $dst|$dst, $src}",
826 [(set FR32:$dst, (fround FR64:$src))]>;
827 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
828 "cvtsd2ss\t{$src, $dst|$dst, $src}",
829 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
830 Requires<[HasSSE2, OptForSize]>;
832 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
833 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
835 let Constraints = "$src1 = $dst" in
836 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
837 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
839 // Convert scalar single to scalar double
840 // SSE2 instructions with XS prefix
841 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
842 (ins FR32:$src1, FR32:$src2),
843 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
844 []>, XS, Requires<[HasAVX]>, VEX_4V;
845 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
846 (ins FR32:$src1, f32mem:$src2),
847 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
848 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
850 let Predicates = [HasAVX] in {
851 def : Pat<(f64 (fextend FR32:$src)),
852 (VCVTSS2SDrr FR32:$src, FR32:$src)>;
853 def : Pat<(fextend (loadf32 addr:$src)),
854 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
855 def : Pat<(extloadf32 addr:$src),
856 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
859 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
860 "cvtss2sd\t{$src, $dst|$dst, $src}",
861 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
863 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
864 "cvtss2sd\t{$src, $dst|$dst, $src}",
865 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
866 Requires<[HasSSE2, OptForSize]>;
868 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
869 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
870 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
871 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
872 VR128:$src2))]>, XS, VEX_4V,
874 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
875 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
876 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
877 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
878 (load addr:$src2)))]>, XS, VEX_4V,
880 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
881 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
882 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
883 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
884 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
887 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
888 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
889 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
890 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
891 (load addr:$src2)))]>, XS,
895 def : Pat<(extloadf32 addr:$src),
896 (CVTSS2SDrr (MOVSSrm addr:$src))>,
897 Requires<[HasSSE2, OptForSpeed]>;
899 // Convert doubleword to packed single/double fp
900 // SSE2 instructions without OpSize prefix
901 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
902 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
903 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
904 TB, VEX, Requires<[HasAVX]>;
905 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
906 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
907 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
908 (bitconvert (memopv2i64 addr:$src))))]>,
909 TB, VEX, Requires<[HasAVX]>;
910 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
911 "cvtdq2ps\t{$src, $dst|$dst, $src}",
912 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
913 TB, Requires<[HasSSE2]>;
914 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
915 "cvtdq2ps\t{$src, $dst|$dst, $src}",
916 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
917 (bitconvert (memopv2i64 addr:$src))))]>,
918 TB, Requires<[HasSSE2]>;
920 // FIXME: why the non-intrinsic version is described as SSE3?
921 // SSE2 instructions with XS prefix
922 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
923 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
924 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
925 XS, VEX, Requires<[HasAVX]>;
926 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
927 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
928 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
929 (bitconvert (memopv2i64 addr:$src))))]>,
930 XS, VEX, Requires<[HasAVX]>;
931 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
932 "cvtdq2pd\t{$src, $dst|$dst, $src}",
933 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
934 XS, Requires<[HasSSE2]>;
935 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
936 "cvtdq2pd\t{$src, $dst|$dst, $src}",
937 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
938 (bitconvert (memopv2i64 addr:$src))))]>,
939 XS, Requires<[HasSSE2]>;
942 // Convert packed single/double fp to doubleword
943 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
944 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
945 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
946 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
947 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
948 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
949 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
950 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
951 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
952 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
953 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
954 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
956 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
957 "cvtps2dq\t{$src, $dst|$dst, $src}",
958 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
960 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
962 "cvtps2dq\t{$src, $dst|$dst, $src}",
963 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
964 (memop addr:$src)))]>, VEX;
965 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
966 "cvtps2dq\t{$src, $dst|$dst, $src}",
967 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
968 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
969 "cvtps2dq\t{$src, $dst|$dst, $src}",
970 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
971 (memop addr:$src)))]>;
973 // SSE2 packed instructions with XD prefix
974 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
975 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
976 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
977 XD, VEX, Requires<[HasAVX]>;
978 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
979 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
980 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
981 (memop addr:$src)))]>,
982 XD, VEX, Requires<[HasAVX]>;
983 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
984 "cvtpd2dq\t{$src, $dst|$dst, $src}",
985 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
986 XD, Requires<[HasSSE2]>;
987 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
988 "cvtpd2dq\t{$src, $dst|$dst, $src}",
989 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
990 (memop addr:$src)))]>,
991 XD, Requires<[HasSSE2]>;
994 // Convert with truncation packed single/double fp to doubleword
995 // SSE2 packed instructions with XS prefix
996 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
997 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
998 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
999 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1000 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1001 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1002 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1003 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1004 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1005 "cvttps2dq\t{$src, $dst|$dst, $src}",
1007 (int_x86_sse2_cvttps2dq VR128:$src))]>;
1008 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1009 "cvttps2dq\t{$src, $dst|$dst, $src}",
1011 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
1013 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1014 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1016 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1017 XS, VEX, Requires<[HasAVX]>;
1018 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1019 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1020 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1021 (memop addr:$src)))]>,
1022 XS, VEX, Requires<[HasAVX]>;
1024 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1025 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
1026 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1027 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
1029 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1030 (Int_VCVTDQ2PSrr VR128:$src)>, Requires<[HasAVX]>;
1031 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1032 (VCVTTPS2DQrr VR128:$src)>, Requires<[HasAVX]>;
1033 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
1034 (VCVTDQ2PSYrr VR256:$src)>, Requires<[HasAVX]>;
1035 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
1036 (VCVTTPS2DQYrr VR256:$src)>, Requires<[HasAVX]>;
1038 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
1040 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1041 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
1043 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
1045 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1046 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1047 (memop addr:$src)))]>, VEX;
1048 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1049 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1050 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1051 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1052 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1053 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1054 (memop addr:$src)))]>;
1056 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1057 // register, but the same isn't true when using memory operands instead.
1058 // Provide other assembly rr and rm forms to address this explicitly.
1059 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1060 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1061 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1062 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1065 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1066 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1067 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1068 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1071 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1072 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
1073 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1074 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1076 // Convert packed single to packed double
1077 let Predicates = [HasAVX] in {
1078 // SSE2 instructions without OpSize prefix
1079 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1080 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1081 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1082 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1083 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1084 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1085 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1086 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1088 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1089 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1090 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1091 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1093 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1094 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1095 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1096 VEX, Requires<[HasAVX]>;
1097 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1098 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1099 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1100 (load addr:$src)))]>,
1101 VEX, Requires<[HasAVX]>;
1102 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1103 "cvtps2pd\t{$src, $dst|$dst, $src}",
1104 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1105 TB, Requires<[HasSSE2]>;
1106 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1107 "cvtps2pd\t{$src, $dst|$dst, $src}",
1108 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1109 (load addr:$src)))]>,
1110 TB, Requires<[HasSSE2]>;
1112 // Convert packed double to packed single
1113 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1114 // register, but the same isn't true when using memory operands instead.
1115 // Provide other assembly rr and rm forms to address this explicitly.
1116 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1117 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1118 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1119 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1122 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1123 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1124 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1125 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1128 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1129 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1130 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1131 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1132 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1133 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1134 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1135 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1138 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1139 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1140 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1141 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1143 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1144 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1145 (memop addr:$src)))]>;
1146 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1147 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1148 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1149 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1150 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1151 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1152 (memop addr:$src)))]>;
1154 // AVX 256-bit register conversion intrinsics
1155 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1156 // whenever possible to avoid declaring two versions of each one.
1157 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1158 (VCVTDQ2PSYrr VR256:$src)>;
1159 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1160 (VCVTDQ2PSYrm addr:$src)>;
1162 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1163 (VCVTPD2PSYrr VR256:$src)>;
1164 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1165 (VCVTPD2PSYrm addr:$src)>;
1167 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1168 (VCVTPS2DQYrr VR256:$src)>;
1169 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1170 (VCVTPS2DQYrm addr:$src)>;
1172 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1173 (VCVTPS2PDYrr VR128:$src)>;
1174 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1175 (VCVTPS2PDYrm addr:$src)>;
1177 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1178 (VCVTTPD2DQYrr VR256:$src)>;
1179 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1180 (VCVTTPD2DQYrm addr:$src)>;
1182 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1183 (VCVTTPS2DQYrr VR256:$src)>;
1184 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1185 (VCVTTPS2DQYrm addr:$src)>;
1187 // Match fround and fextend for 128/256-bit conversions
1188 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
1189 (VCVTPD2PSYrr VR256:$src)>;
1190 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
1191 (VCVTPD2PSYrm addr:$src)>;
1193 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
1194 (VCVTPS2PDYrr VR128:$src)>;
1195 def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
1196 (VCVTPS2PDYrm addr:$src)>;
1198 //===----------------------------------------------------------------------===//
1199 // SSE 1 & 2 - Compare Instructions
1200 //===----------------------------------------------------------------------===//
1202 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1203 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1204 string asm, string asm_alt> {
1205 let isAsmParserOnly = 1 in {
1206 def rr : SIi8<0xC2, MRMSrcReg,
1207 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1210 def rm : SIi8<0xC2, MRMSrcMem,
1211 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1215 // Accept explicit immediate argument form instead of comparison code.
1216 def rr_alt : SIi8<0xC2, MRMSrcReg,
1217 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1220 def rm_alt : SIi8<0xC2, MRMSrcMem,
1221 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1225 let neverHasSideEffects = 1 in {
1226 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1227 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1228 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1230 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1231 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1232 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1236 let Constraints = "$src1 = $dst" in {
1237 def CMPSSrr : SIi8<0xC2, MRMSrcReg,
1238 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, SSECC:$cc),
1239 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1240 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), FR32:$src2, imm:$cc))]>, XS;
1241 def CMPSSrm : SIi8<0xC2, MRMSrcMem,
1242 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, SSECC:$cc),
1243 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1244 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), (loadf32 addr:$src2), imm:$cc))]>, XS;
1245 def CMPSDrr : SIi8<0xC2, MRMSrcReg,
1246 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, SSECC:$cc),
1247 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1248 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), FR64:$src2, imm:$cc))]>, XD;
1249 def CMPSDrm : SIi8<0xC2, MRMSrcMem,
1250 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, SSECC:$cc),
1251 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1252 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), (loadf64 addr:$src2), imm:$cc))]>, XD;
1254 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1255 def CMPSSrr_alt : SIi8<0xC2, MRMSrcReg,
1256 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
1257 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1258 def CMPSSrm_alt : SIi8<0xC2, MRMSrcMem,
1259 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
1260 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1261 def CMPSDrr_alt : SIi8<0xC2, MRMSrcReg,
1262 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
1263 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1264 def CMPSDrm_alt : SIi8<0xC2, MRMSrcMem,
1265 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
1266 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1269 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1270 Intrinsic Int, string asm> {
1271 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1272 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1273 [(set VR128:$dst, (Int VR128:$src1,
1274 VR128:$src, imm:$cc))]>;
1275 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1276 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1277 [(set VR128:$dst, (Int VR128:$src1,
1278 (load addr:$src), imm:$cc))]>;
1281 // Aliases to match intrinsics which expect XMM operand(s).
1282 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1283 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1285 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1286 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1288 let Constraints = "$src1 = $dst" in {
1289 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1290 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1291 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1292 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1296 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1297 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1298 ValueType vt, X86MemOperand x86memop,
1299 PatFrag ld_frag, string OpcodeStr, Domain d> {
1300 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1301 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1302 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1303 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1304 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1305 [(set EFLAGS, (OpNode (vt RC:$src1),
1306 (ld_frag addr:$src2)))], d>;
1309 let Defs = [EFLAGS] in {
1310 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1311 "ucomiss", SSEPackedSingle>, VEX;
1312 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1313 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1314 let Pattern = []<dag> in {
1315 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1316 "comiss", SSEPackedSingle>, VEX;
1317 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1318 "comisd", SSEPackedDouble>, OpSize, VEX;
1321 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1322 load, "ucomiss", SSEPackedSingle>, VEX;
1323 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1324 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1326 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1327 load, "comiss", SSEPackedSingle>, VEX;
1328 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1329 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1330 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1331 "ucomiss", SSEPackedSingle>, TB;
1332 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1333 "ucomisd", SSEPackedDouble>, TB, OpSize;
1335 let Pattern = []<dag> in {
1336 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1337 "comiss", SSEPackedSingle>, TB;
1338 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1339 "comisd", SSEPackedDouble>, TB, OpSize;
1342 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1343 load, "ucomiss", SSEPackedSingle>, TB;
1344 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1345 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1347 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1348 "comiss", SSEPackedSingle>, TB;
1349 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1350 "comisd", SSEPackedDouble>, TB, OpSize;
1351 } // Defs = [EFLAGS]
1353 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1354 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1355 Intrinsic Int, string asm, string asm_alt,
1357 let isAsmParserOnly = 1 in {
1358 def rri : PIi8<0xC2, MRMSrcReg,
1359 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1360 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1361 def rmi : PIi8<0xC2, MRMSrcMem,
1362 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1363 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1366 // Accept explicit immediate argument form instead of comparison code.
1367 def rri_alt : PIi8<0xC2, MRMSrcReg,
1368 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1370 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1371 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1375 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1376 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1377 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1378 SSEPackedSingle>, VEX_4V;
1379 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1380 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1381 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1382 SSEPackedDouble>, OpSize, VEX_4V;
1383 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1384 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1385 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1386 SSEPackedSingle>, VEX_4V;
1387 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1388 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1389 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1390 SSEPackedDouble>, OpSize, VEX_4V;
1391 let Constraints = "$src1 = $dst" in {
1392 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1393 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1394 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1395 SSEPackedSingle>, TB;
1396 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1397 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1398 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1399 SSEPackedDouble>, TB, OpSize;
1402 let Predicates = [HasSSE1] in {
1403 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1404 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1405 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1406 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1409 let Predicates = [HasSSE2] in {
1410 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1411 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1412 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1413 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1416 let Predicates = [HasAVX] in {
1417 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1418 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1419 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1420 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1421 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1422 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1423 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1424 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1426 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
1427 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
1428 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
1429 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
1430 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
1431 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
1432 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
1433 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
1436 //===----------------------------------------------------------------------===//
1437 // SSE 1 & 2 - Shuffle Instructions
1438 //===----------------------------------------------------------------------===//
1440 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1441 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1442 ValueType vt, string asm, PatFrag mem_frag,
1443 Domain d, bit IsConvertibleToThreeAddress = 0> {
1444 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1445 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1446 [(set RC:$dst, (vt (shufp:$src3
1447 RC:$src1, (mem_frag addr:$src2))))], d>;
1448 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1449 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1450 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1452 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1455 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1456 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1457 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
1458 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1459 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1460 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
1461 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1462 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1463 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1464 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1465 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1466 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1468 let Constraints = "$src1 = $dst" in {
1469 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1470 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1471 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1473 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1474 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1475 memopv2f64, SSEPackedDouble>, TB, OpSize;
1478 let Predicates = [HasSSE1] in {
1479 def : Pat<(v4f32 (X86Shufps VR128:$src1,
1480 (memopv4f32 addr:$src2), (i8 imm:$imm))),
1481 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1482 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1483 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1484 def : Pat<(v4i32 (X86Shufps VR128:$src1,
1485 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
1486 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1487 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1488 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1489 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
1490 // fall back to this for SSE1)
1491 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
1492 (SHUFPSrri VR128:$src2, VR128:$src1,
1493 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1494 // Special unary SHUFPSrri case.
1495 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
1496 (SHUFPSrri VR128:$src1, VR128:$src1,
1497 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1500 let Predicates = [HasSSE2] in {
1501 // Special binary v4i32 shuffle cases with SHUFPS.
1502 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
1503 (SHUFPSrri VR128:$src1, VR128:$src2,
1504 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1505 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
1506 (bc_v4i32 (memopv2i64 addr:$src2)))),
1507 (SHUFPSrmi VR128:$src1, addr:$src2,
1508 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1509 // Special unary SHUFPDrri cases.
1510 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
1511 (SHUFPDrri VR128:$src1, VR128:$src1,
1512 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1513 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
1514 (SHUFPDrri VR128:$src1, VR128:$src1,
1515 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1516 // Special binary v2i64 shuffle cases using SHUFPDrri.
1517 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
1518 (SHUFPDrri VR128:$src1, VR128:$src2,
1519 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1520 // Generic SHUFPD patterns
1521 def : Pat<(v2f64 (X86Shufps VR128:$src1,
1522 (memopv2f64 addr:$src2), (i8 imm:$imm))),
1523 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
1524 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1525 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1526 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1527 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1530 let Predicates = [HasAVX] in {
1531 def : Pat<(v4f32 (X86Shufps VR128:$src1,
1532 (memopv4f32 addr:$src2), (i8 imm:$imm))),
1533 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1534 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1535 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1536 def : Pat<(v4i32 (X86Shufps VR128:$src1,
1537 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
1538 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1539 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1540 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1541 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
1542 // fall back to this for SSE1)
1543 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
1544 (VSHUFPSrri VR128:$src2, VR128:$src1,
1545 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1546 // Special unary SHUFPSrri case.
1547 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
1548 (VSHUFPSrri VR128:$src1, VR128:$src1,
1549 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1550 // Special binary v4i32 shuffle cases with SHUFPS.
1551 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
1552 (VSHUFPSrri VR128:$src1, VR128:$src2,
1553 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1554 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
1555 (bc_v4i32 (memopv2i64 addr:$src2)))),
1556 (VSHUFPSrmi VR128:$src1, addr:$src2,
1557 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1558 // Special unary SHUFPDrri cases.
1559 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
1560 (VSHUFPDrri VR128:$src1, VR128:$src1,
1561 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1562 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
1563 (VSHUFPDrri VR128:$src1, VR128:$src1,
1564 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1565 // Special binary v2i64 shuffle cases using SHUFPDrri.
1566 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
1567 (VSHUFPDrri VR128:$src1, VR128:$src2,
1568 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1569 // Generic VSHUFPD patterns
1570 def : Pat<(v2f64 (X86Shufps VR128:$src1,
1571 (memopv2f64 addr:$src2), (i8 imm:$imm))),
1572 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
1573 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1574 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1575 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1576 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1579 //===----------------------------------------------------------------------===//
1580 // SSE 1 & 2 - Unpack Instructions
1581 //===----------------------------------------------------------------------===//
1583 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1584 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1585 PatFrag mem_frag, RegisterClass RC,
1586 X86MemOperand x86memop, string asm,
1588 def rr : PI<opc, MRMSrcReg,
1589 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1591 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1592 def rm : PI<opc, MRMSrcMem,
1593 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1595 (vt (OpNode RC:$src1,
1596 (mem_frag addr:$src2))))], d>;
1599 let AddedComplexity = 10 in {
1600 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1601 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1602 SSEPackedSingle>, VEX_4V;
1603 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1604 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1605 SSEPackedDouble>, OpSize, VEX_4V;
1606 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1607 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1608 SSEPackedSingle>, VEX_4V;
1609 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1610 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1611 SSEPackedDouble>, OpSize, VEX_4V;
1613 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1614 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1615 SSEPackedSingle>, VEX_4V;
1616 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1617 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1618 SSEPackedDouble>, OpSize, VEX_4V;
1619 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1620 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1621 SSEPackedSingle>, VEX_4V;
1622 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1623 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1624 SSEPackedDouble>, OpSize, VEX_4V;
1626 let Constraints = "$src1 = $dst" in {
1627 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1628 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1629 SSEPackedSingle>, TB;
1630 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1631 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1632 SSEPackedDouble>, TB, OpSize;
1633 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1634 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1635 SSEPackedSingle>, TB;
1636 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1637 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1638 SSEPackedDouble>, TB, OpSize;
1639 } // Constraints = "$src1 = $dst"
1640 } // AddedComplexity
1642 //===----------------------------------------------------------------------===//
1643 // SSE 1 & 2 - Extract Floating-Point Sign mask
1644 //===----------------------------------------------------------------------===//
1646 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1647 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1649 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1650 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1651 [(set GR32:$dst, (Int RC:$src))], d>;
1652 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1653 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1656 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1657 SSEPackedSingle>, TB;
1658 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1659 SSEPackedDouble>, TB, OpSize;
1661 def : Pat<(i32 (X86fgetsign FR32:$src)),
1662 (MOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1663 sub_ss))>, Requires<[HasSSE1]>;
1664 def : Pat<(i64 (X86fgetsign FR32:$src)),
1665 (MOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1666 sub_ss))>, Requires<[HasSSE1]>;
1667 def : Pat<(i32 (X86fgetsign FR64:$src)),
1668 (MOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1669 sub_sd))>, Requires<[HasSSE2]>;
1670 def : Pat<(i64 (X86fgetsign FR64:$src)),
1671 (MOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1672 sub_sd))>, Requires<[HasSSE2]>;
1674 let Predicates = [HasAVX] in {
1675 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1676 "movmskps", SSEPackedSingle>, TB, VEX;
1677 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1678 "movmskpd", SSEPackedDouble>, TB, OpSize,
1680 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1681 "movmskps", SSEPackedSingle>, TB, VEX;
1682 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1683 "movmskpd", SSEPackedDouble>, TB, OpSize,
1686 def : Pat<(i32 (X86fgetsign FR32:$src)),
1687 (VMOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1689 def : Pat<(i64 (X86fgetsign FR32:$src)),
1690 (VMOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1692 def : Pat<(i32 (X86fgetsign FR64:$src)),
1693 (VMOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1695 def : Pat<(i64 (X86fgetsign FR64:$src)),
1696 (VMOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1700 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1701 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1702 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1703 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1705 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1706 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1707 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1708 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1712 //===----------------------------------------------------------------------===//
1713 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1714 //===----------------------------------------------------------------------===//
1716 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1717 // names that start with 'Fs'.
1719 // Alias instructions that map fld0 to pxor for sse.
1720 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1721 canFoldAsLoad = 1 in {
1722 // FIXME: Set encoding to pseudo!
1723 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1724 [(set FR32:$dst, fp32imm0)]>,
1725 Requires<[HasSSE1]>, TB, OpSize;
1726 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1727 [(set FR64:$dst, fpimm0)]>,
1728 Requires<[HasSSE2]>, TB, OpSize;
1729 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1730 [(set FR32:$dst, fp32imm0)]>,
1731 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1732 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1733 [(set FR64:$dst, fpimm0)]>,
1734 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1737 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1738 // bits are disregarded.
1739 let neverHasSideEffects = 1 in {
1740 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1741 "movaps\t{$src, $dst|$dst, $src}", []>;
1742 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1743 "movapd\t{$src, $dst|$dst, $src}", []>;
1746 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1747 // bits are disregarded.
1748 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1749 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1750 "movaps\t{$src, $dst|$dst, $src}",
1751 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1752 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1753 "movapd\t{$src, $dst|$dst, $src}",
1754 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1757 //===----------------------------------------------------------------------===//
1758 // SSE 1 & 2 - Logical Instructions
1759 //===----------------------------------------------------------------------===//
1761 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1763 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1765 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1766 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, TB, VEX_4V;
1768 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1769 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, TB, OpSize, VEX_4V;
1771 let Constraints = "$src1 = $dst" in {
1772 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1773 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1775 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1776 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1780 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1781 let mayLoad = 0 in {
1782 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1783 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1784 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1787 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1788 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1790 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1792 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1794 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
1795 // are all promoted to v2i64, and the patterns are covered by the int
1796 // version. This is needed in SSE only, because v2i64 isn't supported on
1797 // SSE1, but only on SSE2.
1798 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1799 !strconcat(OpcodeStr, "ps"), f128mem, [],
1800 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1801 (memopv2i64 addr:$src2)))], 0>, TB, VEX_4V;
1803 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1804 !strconcat(OpcodeStr, "pd"), f128mem,
1805 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1806 (bc_v2i64 (v2f64 VR128:$src2))))],
1807 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1808 (memopv2i64 addr:$src2)))], 0>,
1810 let Constraints = "$src1 = $dst" in {
1811 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1812 !strconcat(OpcodeStr, "ps"), f128mem,
1813 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
1814 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1815 (memopv2i64 addr:$src2)))]>, TB;
1817 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1818 !strconcat(OpcodeStr, "pd"), f128mem,
1819 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1820 (bc_v2i64 (v2f64 VR128:$src2))))],
1821 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1822 (memopv2i64 addr:$src2)))]>, TB, OpSize;
1826 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1828 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
1830 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1831 !strconcat(OpcodeStr, "ps"), f256mem,
1832 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
1833 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
1834 (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V;
1836 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1837 !strconcat(OpcodeStr, "pd"), f256mem,
1838 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1839 (bc_v4i64 (v4f64 VR256:$src2))))],
1840 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1841 (memopv4i64 addr:$src2)))], 0>,
1845 // AVX 256-bit packed logical ops forms
1846 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
1847 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
1848 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
1849 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
1851 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1852 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1853 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1854 let isCommutable = 0 in
1855 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
1857 //===----------------------------------------------------------------------===//
1858 // SSE 1 & 2 - Arithmetic Instructions
1859 //===----------------------------------------------------------------------===//
1861 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1864 /// In addition, we also have a special variant of the scalar form here to
1865 /// represent the associated intrinsic operation. This form is unlike the
1866 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1867 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1869 /// These three forms can each be reg+reg or reg+mem.
1872 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1874 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1876 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1877 OpNode, FR32, f32mem, Is2Addr>, XS;
1878 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1879 OpNode, FR64, f64mem, Is2Addr>, XD;
1882 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1884 let mayLoad = 0 in {
1885 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1886 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1887 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1888 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1892 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1894 let mayLoad = 0 in {
1895 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1896 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1897 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1898 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1902 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1904 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1905 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1906 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1907 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1910 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1912 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1913 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1914 SSEPackedSingle, Is2Addr>, TB;
1916 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1917 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1918 SSEPackedDouble, Is2Addr>, TB, OpSize;
1921 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1922 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1923 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1924 SSEPackedSingle, 0>, TB;
1926 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1927 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1928 SSEPackedDouble, 0>, TB, OpSize;
1931 // Binary Arithmetic instructions
1932 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1933 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1934 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1935 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1936 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1937 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1938 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1939 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1941 let isCommutable = 0 in {
1942 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1943 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1944 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1945 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1946 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1947 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1948 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1949 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1950 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1951 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1952 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1953 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1954 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1955 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1956 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1957 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1958 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1959 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1960 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1961 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1964 let Constraints = "$src1 = $dst" in {
1965 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1966 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1967 basic_sse12_fp_binop_s_int<0x58, "add">;
1968 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1969 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1970 basic_sse12_fp_binop_s_int<0x59, "mul">;
1972 let isCommutable = 0 in {
1973 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1974 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1975 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1976 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1977 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1978 basic_sse12_fp_binop_s_int<0x5E, "div">;
1979 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1980 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1981 basic_sse12_fp_binop_s_int<0x5F, "max">,
1982 basic_sse12_fp_binop_p_int<0x5F, "max">;
1983 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1984 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1985 basic_sse12_fp_binop_s_int<0x5D, "min">,
1986 basic_sse12_fp_binop_p_int<0x5D, "min">;
1991 /// In addition, we also have a special variant of the scalar form here to
1992 /// represent the associated intrinsic operation. This form is unlike the
1993 /// plain scalar form, in that it takes an entire vector (instead of a
1994 /// scalar) and leaves the top elements undefined.
1996 /// And, we have a special variant form for a full-vector intrinsic form.
1998 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1999 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
2000 SDNode OpNode, Intrinsic F32Int> {
2001 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
2002 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2003 [(set FR32:$dst, (OpNode FR32:$src))]>;
2004 // For scalar unary operations, fold a load into the operation
2005 // only in OptForSize mode. It eliminates an instruction, but it also
2006 // eliminates a whole-register clobber (the load), so it introduces a
2007 // partial register update condition.
2008 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
2009 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2010 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
2011 Requires<[HasSSE1, OptForSize]>;
2012 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2013 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2014 [(set VR128:$dst, (F32Int VR128:$src))]>;
2015 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
2016 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2017 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
2020 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
2021 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2022 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
2023 !strconcat(OpcodeStr,
2024 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2025 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1,f32mem:$src2),
2026 !strconcat(OpcodeStr,
2027 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2028 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
2029 (ins ssmem:$src1, VR128:$src2),
2030 !strconcat(OpcodeStr,
2031 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2034 /// sse1_fp_unop_p - SSE1 unops in packed form.
2035 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2036 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2037 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2038 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
2039 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2040 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2041 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
2044 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
2045 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2046 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2047 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2048 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
2049 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2050 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2051 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
2054 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
2055 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2056 Intrinsic V4F32Int> {
2057 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2058 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2059 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
2060 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2061 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2062 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
2065 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
2066 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2067 Intrinsic V4F32Int> {
2068 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2069 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2070 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
2071 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2072 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2073 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
2076 /// sse2_fp_unop_s - SSE2 unops in scalar form.
2077 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
2078 SDNode OpNode, Intrinsic F64Int> {
2079 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
2080 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2081 [(set FR64:$dst, (OpNode FR64:$src))]>;
2082 // See the comments in sse1_fp_unop_s for why this is OptForSize.
2083 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
2084 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2085 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
2086 Requires<[HasSSE2, OptForSize]>;
2087 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2088 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2089 [(set VR128:$dst, (F64Int VR128:$src))]>;
2090 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
2091 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2092 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
2095 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
2096 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2097 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
2098 !strconcat(OpcodeStr,
2099 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2100 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
2101 !strconcat(OpcodeStr,
2102 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2103 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
2104 (ins VR128:$src1, sdmem:$src2),
2105 !strconcat(OpcodeStr,
2106 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2109 /// sse2_fp_unop_p - SSE2 unops in vector forms.
2110 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
2112 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2113 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2114 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
2115 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2116 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2117 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
2120 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
2121 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2122 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2123 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2124 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
2125 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2126 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2127 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
2130 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
2131 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2132 Intrinsic V2F64Int> {
2133 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2134 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2135 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
2136 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2137 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2138 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
2141 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
2142 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2143 Intrinsic V2F64Int> {
2144 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2145 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2146 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
2147 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2148 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2149 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
2152 let Predicates = [HasAVX] in {
2154 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt">,
2155 sse2_fp_unop_s_avx<0x51, "vsqrt">, VEX_4V;
2157 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
2158 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
2159 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2160 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2161 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
2162 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
2163 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
2164 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
2167 // Reciprocal approximations. Note that these typically require refinement
2168 // in order to obtain suitable precision.
2169 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt">, VEX_4V;
2170 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
2171 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
2172 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
2173 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
2175 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp">, VEX_4V;
2176 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
2177 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
2178 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
2179 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
2182 def : Pat<(f32 (fsqrt FR32:$src)),
2183 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2184 def : Pat<(f32 (fsqrt (load addr:$src))),
2185 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2186 Requires<[HasAVX, OptForSize]>;
2187 def : Pat<(f64 (fsqrt FR64:$src)),
2188 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
2189 def : Pat<(f64 (fsqrt (load addr:$src))),
2190 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
2191 Requires<[HasAVX, OptForSize]>;
2193 def : Pat<(f32 (X86frsqrt FR32:$src)),
2194 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2195 def : Pat<(f32 (X86frsqrt (load addr:$src))),
2196 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2197 Requires<[HasAVX, OptForSize]>;
2199 def : Pat<(f32 (X86frcp FR32:$src)),
2200 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2201 def : Pat<(f32 (X86frcp (load addr:$src))),
2202 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2203 Requires<[HasAVX, OptForSize]>;
2205 let Predicates = [HasAVX] in {
2206 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
2207 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2208 (VSQRTSSr (f32 (IMPLICIT_DEF)),
2209 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2211 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
2212 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2214 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
2215 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
2216 (VSQRTSDr (f64 (IMPLICIT_DEF)),
2217 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd)),
2219 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
2220 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
2222 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
2223 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2224 (VRSQRTSSr (f32 (IMPLICIT_DEF)),
2225 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2227 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
2228 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2230 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
2231 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2232 (VRCPSSr (f32 (IMPLICIT_DEF)),
2233 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2235 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
2236 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2240 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2241 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
2242 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
2243 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2244 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
2245 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
2247 // Reciprocal approximations. Note that these typically require refinement
2248 // in order to obtain suitable precision.
2249 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
2250 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
2251 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
2252 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2253 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
2254 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
2256 // There is no f64 version of the reciprocal approximation instructions.
2258 //===----------------------------------------------------------------------===//
2259 // SSE 1 & 2 - Non-temporal stores
2260 //===----------------------------------------------------------------------===//
2262 let AddedComplexity = 400 in { // Prefer non-temporal versions
2263 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2264 (ins f128mem:$dst, VR128:$src),
2265 "movntps\t{$src, $dst|$dst, $src}",
2266 [(alignednontemporalstore (v4f32 VR128:$src),
2268 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2269 (ins f128mem:$dst, VR128:$src),
2270 "movntpd\t{$src, $dst|$dst, $src}",
2271 [(alignednontemporalstore (v2f64 VR128:$src),
2273 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2274 (ins f128mem:$dst, VR128:$src),
2275 "movntdq\t{$src, $dst|$dst, $src}",
2276 [(alignednontemporalstore (v2f64 VR128:$src),
2279 let ExeDomain = SSEPackedInt in
2280 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2281 (ins f128mem:$dst, VR128:$src),
2282 "movntdq\t{$src, $dst|$dst, $src}",
2283 [(alignednontemporalstore (v4f32 VR128:$src),
2286 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2287 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
2289 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2290 (ins f256mem:$dst, VR256:$src),
2291 "movntps\t{$src, $dst|$dst, $src}",
2292 [(alignednontemporalstore (v8f32 VR256:$src),
2294 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2295 (ins f256mem:$dst, VR256:$src),
2296 "movntpd\t{$src, $dst|$dst, $src}",
2297 [(alignednontemporalstore (v4f64 VR256:$src),
2299 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2300 (ins f256mem:$dst, VR256:$src),
2301 "movntdq\t{$src, $dst|$dst, $src}",
2302 [(alignednontemporalstore (v4f64 VR256:$src),
2304 let ExeDomain = SSEPackedInt in
2305 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2306 (ins f256mem:$dst, VR256:$src),
2307 "movntdq\t{$src, $dst|$dst, $src}",
2308 [(alignednontemporalstore (v8f32 VR256:$src),
2312 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2313 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2314 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2315 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2316 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2317 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2319 let AddedComplexity = 400 in { // Prefer non-temporal versions
2320 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2321 "movntps\t{$src, $dst|$dst, $src}",
2322 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2323 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2324 "movntpd\t{$src, $dst|$dst, $src}",
2325 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2327 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2328 "movntdq\t{$src, $dst|$dst, $src}",
2329 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2331 let ExeDomain = SSEPackedInt in
2332 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2333 "movntdq\t{$src, $dst|$dst, $src}",
2334 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2336 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2337 (MOVNTDQmr addr:$dst, VR128:$src)>;
2339 // There is no AVX form for instructions below this point
2340 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2341 "movnti{l}\t{$src, $dst|$dst, $src}",
2342 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2343 TB, Requires<[HasSSE2]>;
2344 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2345 "movnti{q}\t{$src, $dst|$dst, $src}",
2346 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2347 TB, Requires<[HasSSE2]>;
2350 //===----------------------------------------------------------------------===//
2351 // SSE 1 & 2 - Misc Instructions (No AVX form)
2352 //===----------------------------------------------------------------------===//
2354 // Prefetch intrinsic.
2355 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2356 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>;
2357 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2358 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>;
2359 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2360 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>;
2361 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2362 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>;
2364 // Load, store, and memory fence
2365 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2366 TB, Requires<[HasSSE1]>;
2367 def : Pat<(X86SFence), (SFENCE)>;
2369 // Alias instructions that map zero vector to pxor / xorp* for sse.
2370 // We set canFoldAsLoad because this can be converted to a constant-pool
2371 // load of an all-zeros value if folding it would be beneficial.
2372 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2373 // JIT implementation, it does not expand the instructions below like
2374 // X86MCInstLower does.
2375 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2376 isCodeGenOnly = 1 in {
2377 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2378 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2379 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2380 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2381 let ExeDomain = SSEPackedInt in
2382 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2383 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2386 // The same as done above but for AVX. The 128-bit versions are the
2387 // same, but re-encoded. The 256-bit does not support PI version, and
2388 // doesn't need it because on sandy bridge the register is set to zero
2389 // at the rename stage without using any execution unit, so SET0PSY
2390 // and SET0PDY can be used for vector int instructions without penalty
2391 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2392 // JIT implementatioan, it does not expand the instructions below like
2393 // X86MCInstLower does.
2394 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2395 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2396 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2397 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2398 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2399 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2400 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2401 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2402 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2403 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2404 let ExeDomain = SSEPackedInt in
2405 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2406 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2409 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2410 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2411 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2413 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2414 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2416 // AVX has no support for 256-bit integer instructions, but since the 128-bit
2417 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
2418 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
2419 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
2420 (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
2422 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
2423 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
2424 (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
2426 //===----------------------------------------------------------------------===//
2427 // SSE 1 & 2 - Load/Store XCSR register
2428 //===----------------------------------------------------------------------===//
2430 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2431 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2432 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2433 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2435 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2436 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2437 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2438 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2440 //===---------------------------------------------------------------------===//
2441 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2442 //===---------------------------------------------------------------------===//
2444 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2446 let neverHasSideEffects = 1 in {
2447 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2448 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2449 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2450 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2452 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2453 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2454 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2455 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2457 let canFoldAsLoad = 1, mayLoad = 1 in {
2458 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2459 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2460 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2461 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2462 let Predicates = [HasAVX] in {
2463 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2464 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2465 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2466 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2470 let mayStore = 1 in {
2471 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2472 (ins i128mem:$dst, VR128:$src),
2473 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2474 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2475 (ins i256mem:$dst, VR256:$src),
2476 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2477 let Predicates = [HasAVX] in {
2478 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2479 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2480 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2481 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2485 let neverHasSideEffects = 1 in
2486 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2487 "movdqa\t{$src, $dst|$dst, $src}", []>;
2489 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2490 "movdqu\t{$src, $dst|$dst, $src}",
2491 []>, XS, Requires<[HasSSE2]>;
2493 let canFoldAsLoad = 1, mayLoad = 1 in {
2494 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2495 "movdqa\t{$src, $dst|$dst, $src}",
2496 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2497 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2498 "movdqu\t{$src, $dst|$dst, $src}",
2499 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2500 XS, Requires<[HasSSE2]>;
2503 let mayStore = 1 in {
2504 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2505 "movdqa\t{$src, $dst|$dst, $src}",
2506 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2507 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2508 "movdqu\t{$src, $dst|$dst, $src}",
2509 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2510 XS, Requires<[HasSSE2]>;
2513 // Intrinsic forms of MOVDQU load and store
2514 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2515 "vmovdqu\t{$src, $dst|$dst, $src}",
2516 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2517 XS, VEX, Requires<[HasAVX]>;
2519 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2520 "movdqu\t{$src, $dst|$dst, $src}",
2521 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2522 XS, Requires<[HasSSE2]>;
2524 } // ExeDomain = SSEPackedInt
2526 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2527 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2528 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2530 //===---------------------------------------------------------------------===//
2531 // SSE2 - Packed Integer Arithmetic Instructions
2532 //===---------------------------------------------------------------------===//
2534 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2536 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2537 bit IsCommutable = 0, bit Is2Addr = 1> {
2538 let isCommutable = IsCommutable in
2539 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2540 (ins VR128:$src1, VR128:$src2),
2542 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2543 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2544 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2545 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2546 (ins VR128:$src1, i128mem:$src2),
2548 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2549 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2550 [(set VR128:$dst, (IntId VR128:$src1,
2551 (bitconvert (memopv2i64 addr:$src2))))]>;
2554 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2555 string OpcodeStr, Intrinsic IntId,
2556 Intrinsic IntId2, bit Is2Addr = 1> {
2557 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2558 (ins VR128:$src1, VR128:$src2),
2560 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2561 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2562 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2563 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2564 (ins VR128:$src1, i128mem:$src2),
2566 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2567 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2568 [(set VR128:$dst, (IntId VR128:$src1,
2569 (bitconvert (memopv2i64 addr:$src2))))]>;
2570 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2571 (ins VR128:$src1, i32i8imm:$src2),
2573 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2574 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2575 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2578 /// PDI_binop_rm - Simple SSE2 binary operator.
2579 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2580 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2581 let isCommutable = IsCommutable in
2582 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2583 (ins VR128:$src1, VR128:$src2),
2585 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2586 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2587 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2588 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2589 (ins VR128:$src1, i128mem:$src2),
2591 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2592 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2593 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2594 (bitconvert (memopv2i64 addr:$src2)))))]>;
2597 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2599 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2600 /// to collapse (bitconvert VT to VT) into its operand.
2602 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2603 bit IsCommutable = 0, bit Is2Addr = 1> {
2604 let isCommutable = IsCommutable in
2605 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2606 (ins VR128:$src1, VR128:$src2),
2608 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2609 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2610 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2611 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2612 (ins VR128:$src1, i128mem:$src2),
2614 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2615 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2616 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2619 } // ExeDomain = SSEPackedInt
2621 // 128-bit Integer Arithmetic
2623 let Predicates = [HasAVX] in {
2624 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2625 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2626 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2627 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2628 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2629 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2630 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2631 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2632 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2635 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2637 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2639 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2641 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2643 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2645 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2647 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2649 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2651 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2653 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2655 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2657 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2659 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2661 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2663 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2665 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2667 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2669 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2671 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2675 let Constraints = "$src1 = $dst" in {
2676 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2677 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2678 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2679 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2680 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2681 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2682 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2683 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2684 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2687 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2688 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2689 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2690 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2691 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2692 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2693 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2694 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2695 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2696 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2697 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2698 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2699 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2700 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2701 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2702 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2703 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2704 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2705 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2707 } // Constraints = "$src1 = $dst"
2709 //===---------------------------------------------------------------------===//
2710 // SSE2 - Packed Integer Logical Instructions
2711 //===---------------------------------------------------------------------===//
2713 let Predicates = [HasAVX] in {
2714 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2715 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2717 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2718 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2720 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2721 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2724 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2725 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2727 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2728 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2730 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2731 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2734 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2735 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2737 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2738 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2741 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2742 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2743 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2745 let ExeDomain = SSEPackedInt in {
2746 let neverHasSideEffects = 1 in {
2747 // 128-bit logical shifts.
2748 def VPSLLDQri : PDIi8<0x73, MRM7r,
2749 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2750 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2752 def VPSRLDQri : PDIi8<0x73, MRM3r,
2753 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2754 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2756 // PSRADQri doesn't exist in SSE[1-3].
2758 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2759 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2760 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2762 (v2i64 (X86andnp VR128:$src1, VR128:$src2)))]>,VEX_4V;
2764 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2765 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2766 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2767 [(set VR128:$dst, (X86andnp VR128:$src1,
2768 (memopv2i64 addr:$src2)))]>, VEX_4V;
2772 let Constraints = "$src1 = $dst" in {
2773 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2774 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2775 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2776 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2777 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2778 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2780 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2781 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2782 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2783 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2784 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2785 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2787 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2788 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2789 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2790 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2792 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2793 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2794 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2796 let ExeDomain = SSEPackedInt in {
2797 let neverHasSideEffects = 1 in {
2798 // 128-bit logical shifts.
2799 def PSLLDQri : PDIi8<0x73, MRM7r,
2800 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2801 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2802 def PSRLDQri : PDIi8<0x73, MRM3r,
2803 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2804 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2805 // PSRADQri doesn't exist in SSE[1-3].
2807 def PANDNrr : PDI<0xDF, MRMSrcReg,
2808 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2809 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2811 def PANDNrm : PDI<0xDF, MRMSrcMem,
2812 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2813 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2815 } // Constraints = "$src1 = $dst"
2817 let Predicates = [HasAVX] in {
2818 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2819 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2820 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2821 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2822 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2823 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2824 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2825 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2826 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2827 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2829 // Shift up / down and insert zero's.
2830 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2831 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2832 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2833 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2836 let Predicates = [HasSSE2] in {
2837 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2838 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2839 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2840 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2841 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2842 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2843 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2844 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2845 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2846 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2848 // Shift up / down and insert zero's.
2849 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2850 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2851 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2852 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2855 //===---------------------------------------------------------------------===//
2856 // SSE2 - Packed Integer Comparison Instructions
2857 //===---------------------------------------------------------------------===//
2859 let Predicates = [HasAVX] in {
2860 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2862 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2864 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2866 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2868 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2870 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2873 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2874 (VPCMPEQBrr VR128:$src1, VR128:$src2)>;
2875 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2876 (VPCMPEQBrm VR128:$src1, addr:$src2)>;
2877 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2878 (VPCMPEQWrr VR128:$src1, VR128:$src2)>;
2879 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2880 (VPCMPEQWrm VR128:$src1, addr:$src2)>;
2881 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2882 (VPCMPEQDrr VR128:$src1, VR128:$src2)>;
2883 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2884 (VPCMPEQDrm VR128:$src1, addr:$src2)>;
2886 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2887 (VPCMPGTBrr VR128:$src1, VR128:$src2)>;
2888 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2889 (VPCMPGTBrm VR128:$src1, addr:$src2)>;
2890 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2891 (VPCMPGTWrr VR128:$src1, VR128:$src2)>;
2892 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2893 (VPCMPGTWrm VR128:$src1, addr:$src2)>;
2894 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2895 (VPCMPGTDrr VR128:$src1, VR128:$src2)>;
2896 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2897 (VPCMPGTDrm VR128:$src1, addr:$src2)>;
2900 let Constraints = "$src1 = $dst" in {
2901 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2902 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2903 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2904 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2905 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2906 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2907 } // Constraints = "$src1 = $dst"
2909 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2910 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2911 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2912 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2913 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2914 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2915 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2916 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2917 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2918 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2919 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2920 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2922 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2923 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2924 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2925 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2926 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2927 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2928 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2929 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2930 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2931 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2932 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2933 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2935 //===---------------------------------------------------------------------===//
2936 // SSE2 - Packed Integer Pack Instructions
2937 //===---------------------------------------------------------------------===//
2939 let Predicates = [HasAVX] in {
2940 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2942 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2944 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2948 let Constraints = "$src1 = $dst" in {
2949 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2950 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2951 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2952 } // Constraints = "$src1 = $dst"
2954 //===---------------------------------------------------------------------===//
2955 // SSE2 - Packed Integer Shuffle Instructions
2956 //===---------------------------------------------------------------------===//
2958 let ExeDomain = SSEPackedInt in {
2959 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2961 def ri : Ii8<0x70, MRMSrcReg,
2962 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2963 !strconcat(OpcodeStr,
2964 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2965 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2967 def mi : Ii8<0x70, MRMSrcMem,
2968 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2969 !strconcat(OpcodeStr,
2970 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2971 [(set VR128:$dst, (vt (pshuf_frag:$src2
2972 (bc_frag (memopv2i64 addr:$src1)),
2975 } // ExeDomain = SSEPackedInt
2977 let Predicates = [HasAVX] in {
2978 let AddedComplexity = 5 in
2979 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2982 // SSE2 with ImmT == Imm8 and XS prefix.
2983 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2986 // SSE2 with ImmT == Imm8 and XD prefix.
2987 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2991 let Predicates = [HasSSE2] in {
2992 let AddedComplexity = 5 in
2993 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2995 // SSE2 with ImmT == Imm8 and XS prefix.
2996 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2998 // SSE2 with ImmT == Imm8 and XD prefix.
2999 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
3002 //===---------------------------------------------------------------------===//
3003 // SSE2 - Packed Integer Unpack Instructions
3004 //===---------------------------------------------------------------------===//
3006 let ExeDomain = SSEPackedInt in {
3007 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
3008 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
3009 def rr : PDI<opc, MRMSrcReg,
3010 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3012 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3013 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3014 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))]>;
3015 def rm : PDI<opc, MRMSrcMem,
3016 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3018 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3019 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3020 [(set VR128:$dst, (OpNode VR128:$src1,
3021 (bc_frag (memopv2i64
3025 let Predicates = [HasAVX] in {
3026 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Punpcklbw,
3027 bc_v16i8, 0>, VEX_4V;
3028 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Punpcklwd,
3029 bc_v8i16, 0>, VEX_4V;
3030 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Punpckldq,
3031 bc_v4i32, 0>, VEX_4V;
3033 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3034 /// knew to collapse (bitconvert VT to VT) into its operand.
3035 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3036 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3037 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3038 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3039 VR128:$src2)))]>, VEX_4V;
3040 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3041 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3042 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3043 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3044 (memopv2i64 addr:$src2))))]>, VEX_4V;
3046 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Punpckhbw,
3047 bc_v16i8, 0>, VEX_4V;
3048 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Punpckhwd,
3049 bc_v8i16, 0>, VEX_4V;
3050 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Punpckhdq,
3051 bc_v4i32, 0>, VEX_4V;
3053 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3054 /// knew to collapse (bitconvert VT to VT) into its operand.
3055 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3056 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3057 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3058 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3059 VR128:$src2)))]>, VEX_4V;
3060 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3061 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3062 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3063 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3064 (memopv2i64 addr:$src2))))]>, VEX_4V;
3067 let Constraints = "$src1 = $dst" in {
3068 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Punpcklbw, bc_v16i8>;
3069 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Punpcklwd, bc_v8i16>;
3070 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Punpckldq, bc_v4i32>;
3072 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3073 /// knew to collapse (bitconvert VT to VT) into its operand.
3074 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3075 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3076 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3078 (v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)))]>;
3079 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3080 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3081 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3083 (v2i64 (X86Punpcklqdq VR128:$src1,
3084 (memopv2i64 addr:$src2))))]>;
3086 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Punpckhbw, bc_v16i8>;
3087 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Punpckhwd, bc_v8i16>;
3088 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Punpckhdq, bc_v4i32>;
3090 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3091 /// knew to collapse (bitconvert VT to VT) into its operand.
3092 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3093 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3094 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3096 (v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)))]>;
3097 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3098 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3099 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3101 (v2i64 (X86Punpckhqdq VR128:$src1,
3102 (memopv2i64 addr:$src2))))]>;
3105 } // ExeDomain = SSEPackedInt
3107 //===---------------------------------------------------------------------===//
3108 // SSE2 - Packed Integer Extract and Insert
3109 //===---------------------------------------------------------------------===//
3111 let ExeDomain = SSEPackedInt in {
3112 multiclass sse2_pinsrw<bit Is2Addr = 1> {
3113 def rri : Ii8<0xC4, MRMSrcReg,
3114 (outs VR128:$dst), (ins VR128:$src1,
3115 GR32:$src2, i32i8imm:$src3),
3117 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3118 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3120 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
3121 def rmi : Ii8<0xC4, MRMSrcMem,
3122 (outs VR128:$dst), (ins VR128:$src1,
3123 i16mem:$src2, i32i8imm:$src3),
3125 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3126 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3128 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
3133 let Predicates = [HasAVX] in
3134 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
3135 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3136 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3137 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3138 imm:$src2))]>, OpSize, VEX;
3139 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
3140 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3141 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3142 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3146 let Predicates = [HasAVX] in {
3147 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
3148 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
3149 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
3150 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
3151 []>, OpSize, VEX_4V;
3154 let Constraints = "$src1 = $dst" in
3155 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
3157 } // ExeDomain = SSEPackedInt
3159 //===---------------------------------------------------------------------===//
3160 // SSE2 - Packed Mask Creation
3161 //===---------------------------------------------------------------------===//
3163 let ExeDomain = SSEPackedInt in {
3165 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3166 "pmovmskb\t{$src, $dst|$dst, $src}",
3167 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
3168 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
3169 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
3170 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3171 "pmovmskb\t{$src, $dst|$dst, $src}",
3172 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
3174 } // ExeDomain = SSEPackedInt
3176 //===---------------------------------------------------------------------===//
3177 // SSE2 - Conditional Store
3178 //===---------------------------------------------------------------------===//
3180 let ExeDomain = SSEPackedInt in {
3183 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
3184 (ins VR128:$src, VR128:$mask),
3185 "maskmovdqu\t{$mask, $src|$src, $mask}",
3186 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
3188 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
3189 (ins VR128:$src, VR128:$mask),
3190 "maskmovdqu\t{$mask, $src|$src, $mask}",
3191 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
3194 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3195 "maskmovdqu\t{$mask, $src|$src, $mask}",
3196 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
3198 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3199 "maskmovdqu\t{$mask, $src|$src, $mask}",
3200 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
3202 } // ExeDomain = SSEPackedInt
3204 //===---------------------------------------------------------------------===//
3205 // SSE2 - Move Doubleword
3206 //===---------------------------------------------------------------------===//
3208 //===---------------------------------------------------------------------===//
3209 // Move Int Doubleword to Packed Double Int
3211 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3212 "movd\t{$src, $dst|$dst, $src}",
3214 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
3215 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3216 "movd\t{$src, $dst|$dst, $src}",
3218 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
3220 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3221 "mov{d|q}\t{$src, $dst|$dst, $src}",
3223 (v2i64 (scalar_to_vector GR64:$src)))]>, VEX;
3224 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3225 "mov{d|q}\t{$src, $dst|$dst, $src}",
3226 [(set FR64:$dst, (bitconvert GR64:$src))]>, VEX;
3228 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3229 "movd\t{$src, $dst|$dst, $src}",
3231 (v4i32 (scalar_to_vector GR32:$src)))]>;
3232 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3233 "movd\t{$src, $dst|$dst, $src}",
3235 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
3236 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3237 "mov{d|q}\t{$src, $dst|$dst, $src}",
3239 (v2i64 (scalar_to_vector GR64:$src)))]>;
3240 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3241 "mov{d|q}\t{$src, $dst|$dst, $src}",
3242 [(set FR64:$dst, (bitconvert GR64:$src))]>;
3244 //===---------------------------------------------------------------------===//
3245 // Move Int Doubleword to Single Scalar
3247 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3248 "movd\t{$src, $dst|$dst, $src}",
3249 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
3251 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3252 "movd\t{$src, $dst|$dst, $src}",
3253 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
3255 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3256 "movd\t{$src, $dst|$dst, $src}",
3257 [(set FR32:$dst, (bitconvert GR32:$src))]>;
3259 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3260 "movd\t{$src, $dst|$dst, $src}",
3261 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
3263 //===---------------------------------------------------------------------===//
3264 // Move Packed Doubleword Int to Packed Double Int
3266 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3267 "movd\t{$src, $dst|$dst, $src}",
3268 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3270 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
3271 (ins i32mem:$dst, VR128:$src),
3272 "movd\t{$src, $dst|$dst, $src}",
3273 [(store (i32 (vector_extract (v4i32 VR128:$src),
3274 (iPTR 0))), addr:$dst)]>, VEX;
3275 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3276 "movd\t{$src, $dst|$dst, $src}",
3277 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3279 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
3280 "movd\t{$src, $dst|$dst, $src}",
3281 [(store (i32 (vector_extract (v4i32 VR128:$src),
3282 (iPTR 0))), addr:$dst)]>;
3284 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3285 "mov{d|q}\t{$src, $dst|$dst, $src}",
3286 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
3288 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
3289 "movq\t{$src, $dst|$dst, $src}",
3290 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
3292 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
3293 "mov{d|q}\t{$src, $dst|$dst, $src}",
3294 [(set GR64:$dst, (bitconvert FR64:$src))]>;
3295 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
3296 "movq\t{$src, $dst|$dst, $src}",
3297 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
3299 //===---------------------------------------------------------------------===//
3300 // Move Scalar Single to Double Int
3302 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3303 "movd\t{$src, $dst|$dst, $src}",
3304 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
3305 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3306 "movd\t{$src, $dst|$dst, $src}",
3307 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3308 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3309 "movd\t{$src, $dst|$dst, $src}",
3310 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3311 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3312 "movd\t{$src, $dst|$dst, $src}",
3313 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3315 //===---------------------------------------------------------------------===//
3316 // Patterns and instructions to describe movd/movq to XMM register zero-extends
3318 let AddedComplexity = 15 in {
3319 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3320 "movd\t{$src, $dst|$dst, $src}",
3321 [(set VR128:$dst, (v4i32 (X86vzmovl
3322 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3324 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3325 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3326 [(set VR128:$dst, (v2i64 (X86vzmovl
3327 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3330 let AddedComplexity = 15 in {
3331 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3332 "movd\t{$src, $dst|$dst, $src}",
3333 [(set VR128:$dst, (v4i32 (X86vzmovl
3334 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3335 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3336 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3337 [(set VR128:$dst, (v2i64 (X86vzmovl
3338 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3341 let AddedComplexity = 20 in {
3342 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3343 "movd\t{$src, $dst|$dst, $src}",
3345 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3346 (loadi32 addr:$src))))))]>,
3348 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3349 "movd\t{$src, $dst|$dst, $src}",
3351 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3352 (loadi32 addr:$src))))))]>;
3354 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3355 (MOVZDI2PDIrm addr:$src)>;
3356 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3357 (MOVZDI2PDIrm addr:$src)>;
3358 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3359 (MOVZDI2PDIrm addr:$src)>;
3362 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
3363 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
3364 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
3365 (v4i32 (scalar_to_vector GR32:$src)), (i32 0)))),
3366 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
3367 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
3368 (v2i64 (scalar_to_vector GR64:$src)), (i32 0)))),
3369 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
3371 // These are the correct encodings of the instructions so that we know how to
3372 // read correct assembly, even though we continue to emit the wrong ones for
3373 // compatibility with Darwin's buggy assembler.
3374 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3375 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
3376 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3377 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
3378 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3379 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
3380 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3381 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
3382 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3383 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3384 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3385 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3387 //===---------------------------------------------------------------------===//
3388 // SSE2 - Move Quadword
3389 //===---------------------------------------------------------------------===//
3391 //===---------------------------------------------------------------------===//
3392 // Move Quadword Int to Packed Quadword Int
3394 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3395 "vmovq\t{$src, $dst|$dst, $src}",
3397 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3398 VEX, Requires<[HasAVX]>;
3399 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3400 "movq\t{$src, $dst|$dst, $src}",
3402 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3403 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3405 //===---------------------------------------------------------------------===//
3406 // Move Packed Quadword Int to Quadword Int
3408 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3409 "movq\t{$src, $dst|$dst, $src}",
3410 [(store (i64 (vector_extract (v2i64 VR128:$src),
3411 (iPTR 0))), addr:$dst)]>, VEX;
3412 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3413 "movq\t{$src, $dst|$dst, $src}",
3414 [(store (i64 (vector_extract (v2i64 VR128:$src),
3415 (iPTR 0))), addr:$dst)]>;
3417 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3418 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3420 //===---------------------------------------------------------------------===//
3421 // Store / copy lower 64-bits of a XMM register.
3423 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3424 "movq\t{$src, $dst|$dst, $src}",
3425 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3426 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3427 "movq\t{$src, $dst|$dst, $src}",
3428 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3430 let AddedComplexity = 20 in
3431 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3432 "vmovq\t{$src, $dst|$dst, $src}",
3434 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3435 (loadi64 addr:$src))))))]>,
3436 XS, VEX, Requires<[HasAVX]>;
3438 let AddedComplexity = 20 in {
3439 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3440 "movq\t{$src, $dst|$dst, $src}",
3442 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3443 (loadi64 addr:$src))))))]>,
3444 XS, Requires<[HasSSE2]>;
3446 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3447 (MOVZQI2PQIrm addr:$src)>;
3448 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3449 (MOVZQI2PQIrm addr:$src)>;
3450 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3453 //===---------------------------------------------------------------------===//
3454 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3455 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3457 let AddedComplexity = 15 in
3458 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3459 "vmovq\t{$src, $dst|$dst, $src}",
3460 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3461 XS, VEX, Requires<[HasAVX]>;
3462 let AddedComplexity = 15 in
3463 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3464 "movq\t{$src, $dst|$dst, $src}",
3465 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3466 XS, Requires<[HasSSE2]>;
3468 let AddedComplexity = 20 in
3469 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3470 "vmovq\t{$src, $dst|$dst, $src}",
3471 [(set VR128:$dst, (v2i64 (X86vzmovl
3472 (loadv2i64 addr:$src))))]>,
3473 XS, VEX, Requires<[HasAVX]>;
3474 let AddedComplexity = 20 in {
3475 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3476 "movq\t{$src, $dst|$dst, $src}",
3477 [(set VR128:$dst, (v2i64 (X86vzmovl
3478 (loadv2i64 addr:$src))))]>,
3479 XS, Requires<[HasSSE2]>;
3481 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3482 (MOVZPQILo2PQIrm addr:$src)>;
3485 // Instructions to match in the assembler
3486 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3487 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3488 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3489 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3490 // Recognize "movd" with GR64 destination, but encode as a "movq"
3491 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3492 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3494 // Instructions for the disassembler
3495 // xr = XMM register
3498 let Predicates = [HasAVX] in
3499 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3500 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3501 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3502 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3504 //===---------------------------------------------------------------------===//
3505 // SSE2 - Misc Instructions
3506 //===---------------------------------------------------------------------===//
3509 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3510 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3511 TB, Requires<[HasSSE2]>;
3513 // Load, store, and memory fence
3514 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3515 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3516 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3517 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3518 def : Pat<(X86LFence), (LFENCE)>;
3519 def : Pat<(X86MFence), (MFENCE)>;
3522 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3523 // was introduced with SSE2, it's backward compatible.
3524 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3526 // Alias instructions that map zero vector to pxor / xorp* for sse.
3527 // We set canFoldAsLoad because this can be converted to a constant-pool
3528 // load of an all-ones value if folding it would be beneficial.
3529 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
3530 // JIT implementation, it does not expand the instructions below like
3531 // X86MCInstLower does.
3532 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3533 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3534 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3535 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3536 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3537 isCodeGenOnly = 1, ExeDomain = SSEPackedInt, Predicates = [HasAVX] in
3538 def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3539 [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
3541 //===---------------------------------------------------------------------===//
3542 // SSE3 - Conversion Instructions
3543 //===---------------------------------------------------------------------===//
3545 // Convert Packed Double FP to Packed DW Integers
3546 let Predicates = [HasAVX] in {
3547 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3548 // register, but the same isn't true when using memory operands instead.
3549 // Provide other assembly rr and rm forms to address this explicitly.
3550 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3551 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3552 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3553 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3556 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3557 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3558 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3559 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3562 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3563 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3564 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3565 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3568 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3569 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3570 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3571 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3573 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
3574 (VCVTPD2DQYrr VR256:$src)>;
3575 def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
3576 (VCVTPD2DQYrm addr:$src)>;
3578 // Convert Packed DW Integers to Packed Double FP
3579 let Predicates = [HasAVX] in {
3580 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3581 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3582 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3583 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3584 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3585 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3586 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3587 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3590 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3591 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3592 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3593 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3595 // AVX 256-bit register conversion intrinsics
3596 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3597 (VCVTDQ2PDYrr VR128:$src)>;
3598 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3599 (VCVTDQ2PDYrm addr:$src)>;
3601 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3602 (VCVTPD2DQYrr VR256:$src)>;
3603 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3604 (VCVTPD2DQYrm addr:$src)>;
3606 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
3607 (VCVTDQ2PDYrr VR128:$src)>;
3608 def : Pat<(v4f64 (sint_to_fp (memopv4i32 addr:$src))),
3609 (VCVTDQ2PDYrm addr:$src)>;
3611 //===---------------------------------------------------------------------===//
3612 // SSE3 - Move Instructions
3613 //===---------------------------------------------------------------------===//
3615 //===---------------------------------------------------------------------===//
3616 // Replicate Single FP - MOVSHDUP and MOVSLDUP
3618 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
3619 ValueType vt, RegisterClass RC, PatFrag mem_frag,
3620 X86MemOperand x86memop> {
3621 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3622 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3623 [(set RC:$dst, (vt (OpNode RC:$src)))]>;
3624 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3625 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3626 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>;
3629 let Predicates = [HasAVX] in {
3630 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3631 v4f32, VR128, memopv4f32, f128mem>, VEX;
3632 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3633 v4f32, VR128, memopv4f32, f128mem>, VEX;
3634 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3635 v8f32, VR256, memopv8f32, f256mem>, VEX;
3636 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3637 v8f32, VR256, memopv8f32, f256mem>, VEX;
3639 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
3640 memopv4f32, f128mem>;
3641 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
3642 memopv4f32, f128mem>;
3644 let Predicates = [HasSSE3] in {
3645 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3646 (MOVSHDUPrr VR128:$src)>;
3647 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3648 (MOVSHDUPrm addr:$src)>;
3649 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3650 (MOVSLDUPrr VR128:$src)>;
3651 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3652 (MOVSLDUPrm addr:$src)>;
3655 let Predicates = [HasAVX] in {
3656 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3657 (VMOVSHDUPrr VR128:$src)>;
3658 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3659 (VMOVSHDUPrm addr:$src)>;
3660 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3661 (VMOVSLDUPrr VR128:$src)>;
3662 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3663 (VMOVSLDUPrm addr:$src)>;
3664 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
3665 (VMOVSHDUPYrr VR256:$src)>;
3666 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
3667 (VMOVSHDUPYrm addr:$src)>;
3668 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
3669 (VMOVSLDUPYrr VR256:$src)>;
3670 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
3671 (VMOVSLDUPYrm addr:$src)>;
3674 //===---------------------------------------------------------------------===//
3675 // Replicate Double FP - MOVDDUP
3677 multiclass sse3_replicate_dfp<string OpcodeStr> {
3678 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3679 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3680 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3681 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3682 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3684 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3688 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3689 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3690 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3692 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3693 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3697 let Predicates = [HasAVX] in {
3698 // FIXME: Merge above classes when we have patterns for the ymm version
3699 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3700 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3702 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3704 // Move Unaligned Integer
3705 let Predicates = [HasAVX] in {
3706 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3707 "vlddqu\t{$src, $dst|$dst, $src}",
3708 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3709 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3710 "vlddqu\t{$src, $dst|$dst, $src}",
3711 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3713 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3714 "lddqu\t{$src, $dst|$dst, $src}",
3715 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3717 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3719 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3721 // Several Move patterns
3722 let AddedComplexity = 5 in {
3723 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3724 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3725 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3726 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3727 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3728 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3729 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3730 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3733 //===---------------------------------------------------------------------===//
3734 // SSE3 - Arithmetic
3735 //===---------------------------------------------------------------------===//
3737 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3738 X86MemOperand x86memop, bit Is2Addr = 1> {
3739 def rr : I<0xD0, MRMSrcReg,
3740 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3742 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3743 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3744 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3745 def rm : I<0xD0, MRMSrcMem,
3746 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3748 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3749 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3750 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3753 let Predicates = [HasAVX],
3754 ExeDomain = SSEPackedDouble in {
3755 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3756 f128mem, 0>, TB, XD, VEX_4V;
3757 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3758 f128mem, 0>, TB, OpSize, VEX_4V;
3759 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3760 f256mem, 0>, TB, XD, VEX_4V;
3761 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3762 f256mem, 0>, TB, OpSize, VEX_4V;
3764 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3765 ExeDomain = SSEPackedDouble in {
3766 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3768 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3769 f128mem>, TB, OpSize;
3772 //===---------------------------------------------------------------------===//
3773 // SSE3 Instructions
3774 //===---------------------------------------------------------------------===//
3777 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3778 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3779 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3781 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3782 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3783 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3785 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3787 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3788 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3789 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3791 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3792 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3793 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3795 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3796 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3797 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3799 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3801 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3802 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3803 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3806 let Predicates = [HasAVX] in {
3807 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3808 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3809 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3810 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3811 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3812 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3813 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3814 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3815 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3816 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3817 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3818 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3819 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3820 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3821 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3822 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3825 let Constraints = "$src1 = $dst" in {
3826 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3827 int_x86_sse3_hadd_ps>;
3828 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3829 int_x86_sse3_hadd_pd>;
3830 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3831 int_x86_sse3_hsub_ps>;
3832 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3833 int_x86_sse3_hsub_pd>;
3836 //===---------------------------------------------------------------------===//
3837 // SSSE3 - Packed Absolute Instructions
3838 //===---------------------------------------------------------------------===//
3841 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3842 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3843 PatFrag mem_frag128, Intrinsic IntId128> {
3844 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3846 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3847 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3850 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3852 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3855 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3858 let Predicates = [HasAVX] in {
3859 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3860 int_x86_ssse3_pabs_b_128>, VEX;
3861 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3862 int_x86_ssse3_pabs_w_128>, VEX;
3863 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3864 int_x86_ssse3_pabs_d_128>, VEX;
3867 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3868 int_x86_ssse3_pabs_b_128>;
3869 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3870 int_x86_ssse3_pabs_w_128>;
3871 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3872 int_x86_ssse3_pabs_d_128>;
3874 //===---------------------------------------------------------------------===//
3875 // SSSE3 - Packed Binary Operator Instructions
3876 //===---------------------------------------------------------------------===//
3878 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3879 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3880 PatFrag mem_frag128, Intrinsic IntId128,
3882 let isCommutable = 1 in
3883 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3884 (ins VR128:$src1, VR128:$src2),
3886 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3887 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3888 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3890 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3891 (ins VR128:$src1, i128mem:$src2),
3893 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3894 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3896 (IntId128 VR128:$src1,
3897 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3900 let Predicates = [HasAVX] in {
3901 let isCommutable = 0 in {
3902 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3903 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3904 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3905 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3906 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3907 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3908 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3909 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3910 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3911 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3912 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3913 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3914 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3915 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3916 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3917 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3918 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3919 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3920 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3921 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3922 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3923 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3925 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3926 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3929 // None of these have i8 immediate fields.
3930 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3931 let isCommutable = 0 in {
3932 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3933 int_x86_ssse3_phadd_w_128>;
3934 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3935 int_x86_ssse3_phadd_d_128>;
3936 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3937 int_x86_ssse3_phadd_sw_128>;
3938 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3939 int_x86_ssse3_phsub_w_128>;
3940 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3941 int_x86_ssse3_phsub_d_128>;
3942 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3943 int_x86_ssse3_phsub_sw_128>;
3944 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3945 int_x86_ssse3_pmadd_ub_sw_128>;
3946 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
3947 int_x86_ssse3_pshuf_b_128>;
3948 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3949 int_x86_ssse3_psign_b_128>;
3950 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3951 int_x86_ssse3_psign_w_128>;
3952 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3953 int_x86_ssse3_psign_d_128>;
3955 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3956 int_x86_ssse3_pmul_hr_sw_128>;
3959 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3960 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3961 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3962 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3964 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
3965 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3966 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
3967 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3968 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
3969 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3971 //===---------------------------------------------------------------------===//
3972 // SSSE3 - Packed Align Instruction Patterns
3973 //===---------------------------------------------------------------------===//
3975 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3976 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3977 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3979 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3981 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3983 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3984 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3986 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3988 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3992 let Predicates = [HasAVX] in
3993 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3994 let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
3995 defm PALIGN : ssse3_palign<"palignr">;
3997 let Predicates = [HasSSSE3] in {
3998 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3999 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4000 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4001 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4002 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4003 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4004 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4005 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4008 let Predicates = [HasAVX] in {
4009 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4010 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4011 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4012 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4013 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4014 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4015 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4016 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4019 //===---------------------------------------------------------------------===//
4020 // SSSE3 Misc Instructions
4021 //===---------------------------------------------------------------------===//
4023 // Thread synchronization
4024 let usesCustomInserter = 1 in {
4025 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
4026 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
4027 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
4028 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
4031 let Uses = [EAX, ECX, EDX] in
4032 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
4033 Requires<[HasSSE3]>;
4034 let Uses = [ECX, EAX] in
4035 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
4036 Requires<[HasSSE3]>;
4038 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
4039 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
4041 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
4042 Requires<[In32BitMode]>;
4043 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
4044 Requires<[In64BitMode]>;
4046 //===---------------------------------------------------------------------===//
4047 // Non-Instruction Patterns
4048 //===---------------------------------------------------------------------===//
4050 // extload f32 -> f64. This matches load+fextend because we have a hack in
4051 // the isel (PreprocessForFPConvert) that can introduce loads after dag
4053 // Since these loads aren't folded into the fextend, we have to match it
4055 let Predicates = [HasSSE2] in
4056 def : Pat<(fextend (loadf32 addr:$src)),
4057 (CVTSS2SDrm addr:$src)>;
4059 // Bitcasts between 128-bit vector types. Return the original type since
4060 // no instruction is needed for the conversion
4061 let Predicates = [HasXMMInt] in {
4062 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
4063 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
4064 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
4065 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
4066 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
4067 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
4068 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
4069 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
4070 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
4071 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
4072 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
4073 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
4074 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
4075 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
4076 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
4077 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
4078 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
4079 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
4080 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
4081 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
4082 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
4083 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
4084 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
4085 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
4086 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
4087 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
4088 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
4089 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
4090 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
4091 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
4094 // Bitcasts between 256-bit vector types. Return the original type since
4095 // no instruction is needed for the conversion
4096 let Predicates = [HasAVX] in {
4097 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
4098 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
4099 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
4100 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
4101 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
4102 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
4103 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
4104 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
4105 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
4106 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
4107 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
4108 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
4109 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
4110 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
4111 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
4112 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
4113 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
4114 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
4115 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
4116 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
4117 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
4118 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
4119 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
4120 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
4121 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
4122 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
4123 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
4124 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
4125 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
4126 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
4129 // Move scalar to XMM zero-extended
4130 // movd to XMM register zero-extends
4131 let AddedComplexity = 15 in {
4132 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
4133 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
4134 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
4135 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
4136 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
4137 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
4138 (MOVSSrr (v4f32 (V_SET0PS)),
4139 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
4140 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
4141 (MOVSSrr (v4i32 (V_SET0PI)),
4142 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
4145 // Splat v2f64 / v2i64
4146 let AddedComplexity = 10 in {
4147 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
4148 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
4149 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
4150 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
4153 let AddedComplexity = 5 in
4154 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
4155 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
4156 Requires<[HasSSE2]>;
4157 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
4158 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
4159 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
4160 Requires<[HasSSE2]>;
4162 let AddedComplexity = 20 in {
4163 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
4164 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
4165 (MOVLPSrm VR128:$src1, addr:$src2)>;
4166 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
4167 (MOVLPDrm VR128:$src1, addr:$src2)>;
4168 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
4169 (MOVLPSrm VR128:$src1, addr:$src2)>;
4170 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
4171 (MOVLPDrm VR128:$src1, addr:$src2)>;
4174 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
4175 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4176 (MOVLPSmr addr:$src1, VR128:$src2)>;
4177 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4178 (MOVLPDmr addr:$src1, VR128:$src2)>;
4179 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
4181 (MOVLPSmr addr:$src1, VR128:$src2)>;
4182 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4183 (MOVLPDmr addr:$src1, VR128:$src2)>;
4185 let AddedComplexity = 15 in {
4186 // Setting the lowest element in the vector.
4187 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
4188 (MOVSSrr (v4i32 VR128:$src1),
4189 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
4190 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
4191 (MOVSDrr (v2i64 VR128:$src1),
4192 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
4194 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
4195 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
4196 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4197 Requires<[HasSSE2]>;
4198 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
4199 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4200 Requires<[HasSSE2]>;
4203 // Set lowest element and zero upper elements.
4204 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4205 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
4207 // Use movaps / movups for SSE integer load / store (one byte shorter).
4208 // The instructions selected below are then converted to MOVDQA/MOVDQU
4209 // during the SSE domain pass.
4210 let Predicates = [HasSSE1] in {
4211 def : Pat<(alignedloadv4i32 addr:$src),
4212 (MOVAPSrm addr:$src)>;
4213 def : Pat<(loadv4i32 addr:$src),
4214 (MOVUPSrm addr:$src)>;
4215 def : Pat<(alignedloadv2i64 addr:$src),
4216 (MOVAPSrm addr:$src)>;
4217 def : Pat<(loadv2i64 addr:$src),
4218 (MOVUPSrm addr:$src)>;
4220 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4221 (MOVAPSmr addr:$dst, VR128:$src)>;
4222 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4223 (MOVAPSmr addr:$dst, VR128:$src)>;
4224 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4225 (MOVAPSmr addr:$dst, VR128:$src)>;
4226 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4227 (MOVAPSmr addr:$dst, VR128:$src)>;
4228 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4229 (MOVUPSmr addr:$dst, VR128:$src)>;
4230 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4231 (MOVUPSmr addr:$dst, VR128:$src)>;
4232 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4233 (MOVUPSmr addr:$dst, VR128:$src)>;
4234 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4235 (MOVUPSmr addr:$dst, VR128:$src)>;
4238 // Use vmovaps/vmovups for AVX integer load/store.
4239 let Predicates = [HasAVX] in {
4240 // 128-bit load/store
4241 def : Pat<(alignedloadv4i32 addr:$src),
4242 (VMOVAPSrm addr:$src)>;
4243 def : Pat<(loadv4i32 addr:$src),
4244 (VMOVUPSrm addr:$src)>;
4245 def : Pat<(alignedloadv2i64 addr:$src),
4246 (VMOVAPSrm addr:$src)>;
4247 def : Pat<(loadv2i64 addr:$src),
4248 (VMOVUPSrm addr:$src)>;
4250 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4251 (VMOVAPSmr addr:$dst, VR128:$src)>;
4252 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4253 (VMOVAPSmr addr:$dst, VR128:$src)>;
4254 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4255 (VMOVAPSmr addr:$dst, VR128:$src)>;
4256 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4257 (VMOVAPSmr addr:$dst, VR128:$src)>;
4258 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4259 (VMOVUPSmr addr:$dst, VR128:$src)>;
4260 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4261 (VMOVUPSmr addr:$dst, VR128:$src)>;
4262 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4263 (VMOVUPSmr addr:$dst, VR128:$src)>;
4264 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4265 (VMOVUPSmr addr:$dst, VR128:$src)>;
4267 // 256-bit load/store
4268 def : Pat<(alignedloadv4i64 addr:$src),
4269 (VMOVAPSYrm addr:$src)>;
4270 def : Pat<(loadv4i64 addr:$src),
4271 (VMOVUPSYrm addr:$src)>;
4272 def : Pat<(alignedloadv8i32 addr:$src),
4273 (VMOVAPSYrm addr:$src)>;
4274 def : Pat<(loadv8i32 addr:$src),
4275 (VMOVUPSYrm addr:$src)>;
4276 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
4277 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4278 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
4279 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4280 def : Pat<(alignedstore (v16i16 VR256:$src), addr:$dst),
4281 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4282 def : Pat<(alignedstore (v32i8 VR256:$src), addr:$dst),
4283 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4284 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
4285 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4286 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
4287 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4288 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
4289 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4290 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
4291 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4294 //===----------------------------------------------------------------------===//
4295 // SSE4.1 - Packed Move with Sign/Zero Extend
4296 //===----------------------------------------------------------------------===//
4298 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4299 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4300 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4301 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4303 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4304 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4306 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
4310 let Predicates = [HasAVX] in {
4311 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
4313 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
4315 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4317 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4319 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4321 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4325 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4326 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4327 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4328 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4329 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4330 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4332 // Common patterns involving scalar load.
4333 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4334 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4335 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4336 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4338 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4339 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4340 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4341 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4343 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4344 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4345 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4346 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4348 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4349 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4350 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4351 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4353 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4354 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4355 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4356 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4358 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4359 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4360 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4361 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4364 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4365 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4366 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4367 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4369 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4370 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4372 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4376 let Predicates = [HasAVX] in {
4377 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4379 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4381 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4383 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4387 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4388 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4389 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4390 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4392 // Common patterns involving scalar load
4393 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4394 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4395 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4396 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4398 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4399 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4400 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4401 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4404 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4405 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4406 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4407 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4409 // Expecting a i16 load any extended to i32 value.
4410 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4411 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4412 [(set VR128:$dst, (IntId (bitconvert
4413 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4417 let Predicates = [HasAVX] in {
4418 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4420 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4423 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4424 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4426 // Common patterns involving scalar load
4427 def : Pat<(int_x86_sse41_pmovsxbq
4428 (bitconvert (v4i32 (X86vzmovl
4429 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4430 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4432 def : Pat<(int_x86_sse41_pmovzxbq
4433 (bitconvert (v4i32 (X86vzmovl
4434 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4435 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4437 //===----------------------------------------------------------------------===//
4438 // SSE4.1 - Extract Instructions
4439 //===----------------------------------------------------------------------===//
4441 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4442 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4443 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4444 (ins VR128:$src1, i32i8imm:$src2),
4445 !strconcat(OpcodeStr,
4446 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4447 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4449 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4450 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4451 !strconcat(OpcodeStr,
4452 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4455 // There's an AssertZext in the way of writing the store pattern
4456 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4459 let Predicates = [HasAVX] in {
4460 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4461 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4462 (ins VR128:$src1, i32i8imm:$src2),
4463 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4466 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4469 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4470 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4471 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4472 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4473 !strconcat(OpcodeStr,
4474 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4477 // There's an AssertZext in the way of writing the store pattern
4478 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4481 let Predicates = [HasAVX] in
4482 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4484 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4487 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4488 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4489 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4490 (ins VR128:$src1, i32i8imm:$src2),
4491 !strconcat(OpcodeStr,
4492 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4494 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4495 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4496 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4497 !strconcat(OpcodeStr,
4498 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4499 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4500 addr:$dst)]>, OpSize;
4503 let Predicates = [HasAVX] in
4504 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4506 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4508 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4509 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4510 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4511 (ins VR128:$src1, i32i8imm:$src2),
4512 !strconcat(OpcodeStr,
4513 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4515 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4516 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4517 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4518 !strconcat(OpcodeStr,
4519 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4520 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4521 addr:$dst)]>, OpSize, REX_W;
4524 let Predicates = [HasAVX] in
4525 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4527 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4529 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4531 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4532 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4533 (ins VR128:$src1, i32i8imm:$src2),
4534 !strconcat(OpcodeStr,
4535 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4537 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4539 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4540 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4541 !strconcat(OpcodeStr,
4542 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4543 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4544 addr:$dst)]>, OpSize;
4547 let Predicates = [HasAVX] in {
4548 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4549 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4550 (ins VR128:$src1, i32i8imm:$src2),
4551 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4554 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4556 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4557 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4560 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4561 Requires<[HasSSE41]>;
4563 //===----------------------------------------------------------------------===//
4564 // SSE4.1 - Insert Instructions
4565 //===----------------------------------------------------------------------===//
4567 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4568 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4569 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4571 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4573 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4575 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4576 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4577 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4579 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4581 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4583 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4584 imm:$src3))]>, OpSize;
4587 let Predicates = [HasAVX] in
4588 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4589 let Constraints = "$src1 = $dst" in
4590 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4592 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4593 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4594 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4596 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4598 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4600 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4602 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4603 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4605 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4607 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4609 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4610 imm:$src3)))]>, OpSize;
4613 let Predicates = [HasAVX] in
4614 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4615 let Constraints = "$src1 = $dst" in
4616 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4618 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4619 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4620 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4622 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4624 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4626 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4628 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4629 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4631 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4633 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4635 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4636 imm:$src3)))]>, OpSize;
4639 let Predicates = [HasAVX] in
4640 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4641 let Constraints = "$src1 = $dst" in
4642 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4644 // insertps has a few different modes, there's the first two here below which
4645 // are optimized inserts that won't zero arbitrary elements in the destination
4646 // vector. The next one matches the intrinsic and could zero arbitrary elements
4647 // in the target vector.
4648 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4649 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4650 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
4652 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4654 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4656 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4658 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4659 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
4661 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4663 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4665 (X86insrtps VR128:$src1,
4666 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4667 imm:$src3))]>, OpSize;
4670 let Constraints = "$src1 = $dst" in
4671 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4672 let Predicates = [HasAVX] in
4673 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4675 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4676 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4678 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4679 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4680 Requires<[HasSSE41]>;
4682 //===----------------------------------------------------------------------===//
4683 // SSE4.1 - Round Instructions
4684 //===----------------------------------------------------------------------===//
4686 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4687 X86MemOperand x86memop, RegisterClass RC,
4688 PatFrag mem_frag32, PatFrag mem_frag64,
4689 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4690 // Intrinsic operation, reg.
4691 // Vector intrinsic operation, reg
4692 def PSr : SS4AIi8<opcps, MRMSrcReg,
4693 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4694 !strconcat(OpcodeStr,
4695 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4696 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4699 // Vector intrinsic operation, mem
4700 def PSm : Ii8<opcps, MRMSrcMem,
4701 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4702 !strconcat(OpcodeStr,
4703 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4705 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4707 Requires<[HasSSE41]>;
4709 // Vector intrinsic operation, reg
4710 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4711 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4712 !strconcat(OpcodeStr,
4713 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4714 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4717 // Vector intrinsic operation, mem
4718 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4719 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4720 !strconcat(OpcodeStr,
4721 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4723 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4727 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4728 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4729 // Intrinsic operation, reg.
4730 // Vector intrinsic operation, reg
4731 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4732 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4733 !strconcat(OpcodeStr,
4734 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4737 // Vector intrinsic operation, mem
4738 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4739 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4740 !strconcat(OpcodeStr,
4741 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4742 []>, TA, OpSize, Requires<[HasSSE41]>;
4744 // Vector intrinsic operation, reg
4745 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4746 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4747 !strconcat(OpcodeStr,
4748 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4751 // Vector intrinsic operation, mem
4752 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4753 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4754 !strconcat(OpcodeStr,
4755 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4759 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4762 Intrinsic F64Int, bit Is2Addr = 1> {
4763 // Intrinsic operation, reg.
4764 def SSr : SS4AIi8<opcss, MRMSrcReg,
4765 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4767 !strconcat(OpcodeStr,
4768 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4769 !strconcat(OpcodeStr,
4770 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4771 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4774 // Intrinsic operation, mem.
4775 def SSm : SS4AIi8<opcss, MRMSrcMem,
4776 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4778 !strconcat(OpcodeStr,
4779 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4780 !strconcat(OpcodeStr,
4781 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4783 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4786 // Intrinsic operation, reg.
4787 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4788 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4790 !strconcat(OpcodeStr,
4791 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4792 !strconcat(OpcodeStr,
4793 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4794 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4797 // Intrinsic operation, mem.
4798 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4799 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4801 !strconcat(OpcodeStr,
4802 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4803 !strconcat(OpcodeStr,
4804 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4806 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4810 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4812 // Intrinsic operation, reg.
4813 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4814 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4815 !strconcat(OpcodeStr,
4816 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4819 // Intrinsic operation, mem.
4820 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4821 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4822 !strconcat(OpcodeStr,
4823 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4826 // Intrinsic operation, reg.
4827 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4828 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4829 !strconcat(OpcodeStr,
4830 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4833 // Intrinsic operation, mem.
4834 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4835 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4836 !strconcat(OpcodeStr,
4837 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4841 // FP round - roundss, roundps, roundsd, roundpd
4842 let Predicates = [HasAVX] in {
4844 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4845 memopv4f32, memopv2f64,
4846 int_x86_sse41_round_ps,
4847 int_x86_sse41_round_pd>, VEX;
4848 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4849 memopv8f32, memopv4f64,
4850 int_x86_avx_round_ps_256,
4851 int_x86_avx_round_pd_256>, VEX;
4852 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4853 int_x86_sse41_round_ss,
4854 int_x86_sse41_round_sd, 0>, VEX_4V;
4856 // Instructions for the assembler
4857 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4859 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4861 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4864 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4865 memopv4f32, memopv2f64,
4866 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4867 let Constraints = "$src1 = $dst" in
4868 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4869 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4871 //===----------------------------------------------------------------------===//
4872 // SSE4.1 - Packed Bit Test
4873 //===----------------------------------------------------------------------===//
4875 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4876 // the intel intrinsic that corresponds to this.
4877 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4878 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4879 "vptest\t{$src2, $src1|$src1, $src2}",
4880 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4882 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4883 "vptest\t{$src2, $src1|$src1, $src2}",
4884 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4887 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4888 "vptest\t{$src2, $src1|$src1, $src2}",
4889 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4891 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4892 "vptest\t{$src2, $src1|$src1, $src2}",
4893 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4897 let Defs = [EFLAGS] in {
4898 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4899 "ptest \t{$src2, $src1|$src1, $src2}",
4900 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4902 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4903 "ptest \t{$src2, $src1|$src1, $src2}",
4904 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4908 // The bit test instructions below are AVX only
4909 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4910 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4911 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4912 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4913 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4914 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4915 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4916 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4920 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4921 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4922 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4923 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4924 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4927 //===----------------------------------------------------------------------===//
4928 // SSE4.1 - Misc Instructions
4929 //===----------------------------------------------------------------------===//
4931 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
4932 "popcnt{w}\t{$src, $dst|$dst, $src}",
4933 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
4934 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
4935 "popcnt{w}\t{$src, $dst|$dst, $src}",
4936 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
4938 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
4939 "popcnt{l}\t{$src, $dst|$dst, $src}",
4940 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
4941 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
4942 "popcnt{l}\t{$src, $dst|$dst, $src}",
4943 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
4945 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
4946 "popcnt{q}\t{$src, $dst|$dst, $src}",
4947 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
4948 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
4949 "popcnt{q}\t{$src, $dst|$dst, $src}",
4950 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
4954 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4955 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4956 Intrinsic IntId128> {
4957 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4959 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4960 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4961 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4963 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4966 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4969 let Predicates = [HasAVX] in
4970 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4971 int_x86_sse41_phminposuw>, VEX;
4972 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4973 int_x86_sse41_phminposuw>;
4975 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4976 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4977 Intrinsic IntId128, bit Is2Addr = 1> {
4978 let isCommutable = 1 in
4979 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4980 (ins VR128:$src1, VR128:$src2),
4982 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4983 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4984 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4985 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4986 (ins VR128:$src1, i128mem:$src2),
4988 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4989 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4991 (IntId128 VR128:$src1,
4992 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4995 let Predicates = [HasAVX] in {
4996 let isCommutable = 0 in
4997 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4999 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
5001 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
5003 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
5005 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
5007 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
5009 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
5011 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
5013 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
5015 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
5017 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
5020 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5021 (VPCMPEQQrr VR128:$src1, VR128:$src2)>;
5022 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5023 (VPCMPEQQrm VR128:$src1, addr:$src2)>;
5026 let Constraints = "$src1 = $dst" in {
5027 let isCommutable = 0 in
5028 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
5029 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
5030 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
5031 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
5032 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
5033 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
5034 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
5035 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
5036 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
5037 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
5038 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
5041 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5042 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
5043 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5044 (PCMPEQQrm VR128:$src1, addr:$src2)>;
5046 /// SS48I_binop_rm - Simple SSE41 binary operator.
5047 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5048 ValueType OpVT, bit Is2Addr = 1> {
5049 let isCommutable = 1 in
5050 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5051 (ins VR128:$src1, VR128:$src2),
5053 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5054 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5055 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
5057 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5058 (ins VR128:$src1, i128mem:$src2),
5060 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5061 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5062 [(set VR128:$dst, (OpNode VR128:$src1,
5063 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
5067 let Predicates = [HasAVX] in
5068 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
5069 let Constraints = "$src1 = $dst" in
5070 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
5072 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
5073 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
5074 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
5075 X86MemOperand x86memop, bit Is2Addr = 1> {
5076 let isCommutable = 1 in
5077 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
5078 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
5080 !strconcat(OpcodeStr,
5081 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5082 !strconcat(OpcodeStr,
5083 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5084 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
5086 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
5087 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
5089 !strconcat(OpcodeStr,
5090 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5091 !strconcat(OpcodeStr,
5092 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5095 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
5099 let Predicates = [HasAVX] in {
5100 let isCommutable = 0 in {
5101 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
5102 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5103 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
5104 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5105 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
5106 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5107 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
5108 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5109 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
5110 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5111 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
5112 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5114 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
5115 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5116 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
5117 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5118 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
5119 VR256, memopv32i8, i256mem, 0>, VEX_4V;
5122 let Constraints = "$src1 = $dst" in {
5123 let isCommutable = 0 in {
5124 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
5125 VR128, memopv16i8, i128mem>;
5126 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
5127 VR128, memopv16i8, i128mem>;
5128 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
5129 VR128, memopv16i8, i128mem>;
5130 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
5131 VR128, memopv16i8, i128mem>;
5133 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
5134 VR128, memopv16i8, i128mem>;
5135 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
5136 VR128, memopv16i8, i128mem>;
5139 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
5140 let Predicates = [HasAVX] in {
5141 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
5142 RegisterClass RC, X86MemOperand x86memop,
5143 PatFrag mem_frag, Intrinsic IntId> {
5144 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
5145 (ins RC:$src1, RC:$src2, RC:$src3),
5146 !strconcat(OpcodeStr,
5147 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5148 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
5149 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5151 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
5152 (ins RC:$src1, x86memop:$src2, RC:$src3),
5153 !strconcat(OpcodeStr,
5154 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5156 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
5158 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5162 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
5163 memopv16i8, int_x86_sse41_blendvpd>;
5164 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
5165 memopv16i8, int_x86_sse41_blendvps>;
5166 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
5167 memopv16i8, int_x86_sse41_pblendvb>;
5168 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
5169 memopv32i8, int_x86_avx_blendv_pd_256>;
5170 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
5171 memopv32i8, int_x86_avx_blendv_ps_256>;
5173 /// SS41I_ternary_int - SSE 4.1 ternary operator
5174 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
5175 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5176 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5177 (ins VR128:$src1, VR128:$src2),
5178 !strconcat(OpcodeStr,
5179 "\t{$src2, $dst|$dst, $src2}"),
5180 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
5183 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5184 (ins VR128:$src1, i128mem:$src2),
5185 !strconcat(OpcodeStr,
5186 "\t{$src2, $dst|$dst, $src2}"),
5189 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
5193 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
5194 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
5195 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
5197 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
5198 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
5200 let Predicates = [HasAVX] in
5201 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5202 "vmovntdqa\t{$src, $dst|$dst, $src}",
5203 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5205 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5206 "movntdqa\t{$src, $dst|$dst, $src}",
5207 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5210 //===----------------------------------------------------------------------===//
5211 // SSE4.2 - Compare Instructions
5212 //===----------------------------------------------------------------------===//
5214 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
5215 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
5216 Intrinsic IntId128, bit Is2Addr = 1> {
5217 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
5218 (ins VR128:$src1, VR128:$src2),
5220 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5221 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5222 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5224 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
5225 (ins VR128:$src1, i128mem:$src2),
5227 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5228 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5230 (IntId128 VR128:$src1,
5231 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5234 let Predicates = [HasAVX] in {
5235 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
5238 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5239 (VPCMPGTQrr VR128:$src1, VR128:$src2)>;
5240 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5241 (VPCMPGTQrm VR128:$src1, addr:$src2)>;
5244 let Constraints = "$src1 = $dst" in
5245 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
5247 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5248 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
5249 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5250 (PCMPGTQrm VR128:$src1, addr:$src2)>;
5252 //===----------------------------------------------------------------------===//
5253 // SSE4.2 - String/text Processing Instructions
5254 //===----------------------------------------------------------------------===//
5256 // Packed Compare Implicit Length Strings, Return Mask
5257 multiclass pseudo_pcmpistrm<string asm> {
5258 def REG : PseudoI<(outs VR128:$dst),
5259 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5260 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
5262 def MEM : PseudoI<(outs VR128:$dst),
5263 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5264 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
5265 VR128:$src1, (load addr:$src2), imm:$src3))]>;
5268 let Defs = [EFLAGS], usesCustomInserter = 1 in {
5269 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
5270 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
5273 let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
5274 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5275 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5276 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5277 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5278 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5279 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5282 let Defs = [XMM0, EFLAGS] in {
5283 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5284 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5285 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5286 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5287 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5288 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5291 // Packed Compare Explicit Length Strings, Return Mask
5292 multiclass pseudo_pcmpestrm<string asm> {
5293 def REG : PseudoI<(outs VR128:$dst),
5294 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5295 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5296 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
5297 def MEM : PseudoI<(outs VR128:$dst),
5298 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5299 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5300 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
5303 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
5304 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
5305 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
5308 let Predicates = [HasAVX],
5309 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5310 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5311 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5312 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5313 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5314 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5315 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5318 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5319 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5320 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5321 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5322 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5323 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5324 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5327 // Packed Compare Implicit Length Strings, Return Index
5328 let Defs = [ECX, EFLAGS] in {
5329 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
5330 def rr : SS42AI<0x63, MRMSrcReg, (outs),
5331 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5332 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5333 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
5334 (implicit EFLAGS)]>, OpSize;
5335 def rm : SS42AI<0x63, MRMSrcMem, (outs),
5336 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5337 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5338 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
5339 (implicit EFLAGS)]>, OpSize;
5343 let Predicates = [HasAVX] in {
5344 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5346 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5348 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5350 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5352 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5354 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5358 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5359 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5360 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5361 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5362 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5363 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5365 // Packed Compare Explicit Length Strings, Return Index
5366 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5367 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5368 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5369 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5370 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5371 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5372 (implicit EFLAGS)]>, OpSize;
5373 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5374 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5375 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5377 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5378 (implicit EFLAGS)]>, OpSize;
5382 let Predicates = [HasAVX] in {
5383 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5385 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5387 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5389 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5391 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5393 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5397 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5398 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5399 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5400 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5401 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5402 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5404 //===----------------------------------------------------------------------===//
5405 // SSE4.2 - CRC Instructions
5406 //===----------------------------------------------------------------------===//
5408 // No CRC instructions have AVX equivalents
5410 // crc intrinsic instruction
5411 // This set of instructions are only rm, the only difference is the size
5413 let Constraints = "$src1 = $dst" in {
5414 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5415 (ins GR32:$src1, i8mem:$src2),
5416 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5418 (int_x86_sse42_crc32_32_8 GR32:$src1,
5419 (load addr:$src2)))]>;
5420 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5421 (ins GR32:$src1, GR8:$src2),
5422 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5424 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
5425 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5426 (ins GR32:$src1, i16mem:$src2),
5427 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5429 (int_x86_sse42_crc32_32_16 GR32:$src1,
5430 (load addr:$src2)))]>,
5432 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5433 (ins GR32:$src1, GR16:$src2),
5434 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5436 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
5438 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5439 (ins GR32:$src1, i32mem:$src2),
5440 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5442 (int_x86_sse42_crc32_32_32 GR32:$src1,
5443 (load addr:$src2)))]>;
5444 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5445 (ins GR32:$src1, GR32:$src2),
5446 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5448 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
5449 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5450 (ins GR64:$src1, i8mem:$src2),
5451 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5453 (int_x86_sse42_crc32_64_8 GR64:$src1,
5454 (load addr:$src2)))]>,
5456 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5457 (ins GR64:$src1, GR8:$src2),
5458 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5460 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
5462 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5463 (ins GR64:$src1, i64mem:$src2),
5464 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5466 (int_x86_sse42_crc32_64_64 GR64:$src1,
5467 (load addr:$src2)))]>,
5469 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5470 (ins GR64:$src1, GR64:$src2),
5471 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5473 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
5477 //===----------------------------------------------------------------------===//
5478 // AES-NI Instructions
5479 //===----------------------------------------------------------------------===//
5481 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5482 Intrinsic IntId128, bit Is2Addr = 1> {
5483 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5484 (ins VR128:$src1, VR128:$src2),
5486 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5487 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5488 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5490 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5491 (ins VR128:$src1, i128mem:$src2),
5493 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5494 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5496 (IntId128 VR128:$src1,
5497 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5500 // Perform One Round of an AES Encryption/Decryption Flow
5501 let Predicates = [HasAVX, HasAES] in {
5502 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5503 int_x86_aesni_aesenc, 0>, VEX_4V;
5504 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5505 int_x86_aesni_aesenclast, 0>, VEX_4V;
5506 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5507 int_x86_aesni_aesdec, 0>, VEX_4V;
5508 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5509 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5512 let Constraints = "$src1 = $dst" in {
5513 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5514 int_x86_aesni_aesenc>;
5515 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5516 int_x86_aesni_aesenclast>;
5517 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5518 int_x86_aesni_aesdec>;
5519 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5520 int_x86_aesni_aesdeclast>;
5523 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5524 (AESENCrr VR128:$src1, VR128:$src2)>;
5525 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5526 (AESENCrm VR128:$src1, addr:$src2)>;
5527 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5528 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5529 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5530 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5531 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5532 (AESDECrr VR128:$src1, VR128:$src2)>;
5533 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5534 (AESDECrm VR128:$src1, addr:$src2)>;
5535 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5536 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5537 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5538 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5540 // Perform the AES InvMixColumn Transformation
5541 let Predicates = [HasAVX, HasAES] in {
5542 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5544 "vaesimc\t{$src1, $dst|$dst, $src1}",
5546 (int_x86_aesni_aesimc VR128:$src1))]>,
5548 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5549 (ins i128mem:$src1),
5550 "vaesimc\t{$src1, $dst|$dst, $src1}",
5552 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5555 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5557 "aesimc\t{$src1, $dst|$dst, $src1}",
5559 (int_x86_aesni_aesimc VR128:$src1))]>,
5561 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5562 (ins i128mem:$src1),
5563 "aesimc\t{$src1, $dst|$dst, $src1}",
5565 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5568 // AES Round Key Generation Assist
5569 let Predicates = [HasAVX, HasAES] in {
5570 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5571 (ins VR128:$src1, i8imm:$src2),
5572 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5574 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5576 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5577 (ins i128mem:$src1, i8imm:$src2),
5578 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5580 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5584 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5585 (ins VR128:$src1, i8imm:$src2),
5586 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5588 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5590 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5591 (ins i128mem:$src1, i8imm:$src2),
5592 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5594 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5598 //===----------------------------------------------------------------------===//
5599 // CLMUL Instructions
5600 //===----------------------------------------------------------------------===//
5602 // Carry-less Multiplication instructions
5603 let Constraints = "$src1 = $dst" in {
5604 def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5605 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5606 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5609 def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5610 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5611 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5615 // AVX carry-less Multiplication instructions
5616 def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5617 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5618 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5621 def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5622 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5623 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5627 multiclass pclmul_alias<string asm, int immop> {
5628 def : InstAlias<!strconcat("pclmul", asm,
5629 "dq {$src, $dst|$dst, $src}"),
5630 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
5632 def : InstAlias<!strconcat("pclmul", asm,
5633 "dq {$src, $dst|$dst, $src}"),
5634 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
5636 def : InstAlias<!strconcat("vpclmul", asm,
5637 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5638 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
5640 def : InstAlias<!strconcat("vpclmul", asm,
5641 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5642 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
5644 defm : pclmul_alias<"hqhq", 0x11>;
5645 defm : pclmul_alias<"hqlq", 0x01>;
5646 defm : pclmul_alias<"lqhq", 0x10>;
5647 defm : pclmul_alias<"lqlq", 0x00>;
5649 //===----------------------------------------------------------------------===//
5651 //===----------------------------------------------------------------------===//
5653 //===----------------------------------------------------------------------===//
5654 // VBROADCAST - Load from memory and broadcast to all elements of the
5655 // destination operand
5657 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5658 X86MemOperand x86memop, Intrinsic Int> :
5659 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5660 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5661 [(set RC:$dst, (Int addr:$src))]>, VEX;
5663 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5664 int_x86_avx_vbroadcastss>;
5665 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5666 int_x86_avx_vbroadcastss_256>;
5667 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5668 int_x86_avx_vbroadcast_sd_256>;
5669 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5670 int_x86_avx_vbroadcastf128_pd_256>;
5672 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5673 (VBROADCASTF128 addr:$src)>;
5675 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
5676 (VBROADCASTSSY addr:$src)>;
5677 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
5678 (VBROADCASTSD addr:$src)>;
5679 def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
5680 (VBROADCASTSSY addr:$src)>;
5681 def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
5682 (VBROADCASTSD addr:$src)>;
5684 def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
5685 (VBROADCASTSS addr:$src)>;
5686 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
5687 (VBROADCASTSS addr:$src)>;
5689 //===----------------------------------------------------------------------===//
5690 // VINSERTF128 - Insert packed floating-point values
5692 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5693 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5694 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5696 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5697 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5698 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5701 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5702 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5703 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5704 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5705 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5706 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5708 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
5710 (VINSERTF128rr VR256:$src1, VR128:$src2,
5711 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5712 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
5714 (VINSERTF128rr VR256:$src1, VR128:$src2,
5715 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5716 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
5718 (VINSERTF128rr VR256:$src1, VR128:$src2,
5719 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5720 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
5722 (VINSERTF128rr VR256:$src1, VR128:$src2,
5723 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5724 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
5726 (VINSERTF128rr VR256:$src1, VR128:$src2,
5727 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5728 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
5730 (VINSERTF128rr VR256:$src1, VR128:$src2,
5731 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5733 // Special COPY patterns
5734 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
5735 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5736 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)),
5737 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5738 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)),
5739 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5740 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)),
5741 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5742 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)),
5743 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5744 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
5745 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5747 //===----------------------------------------------------------------------===//
5748 // VEXTRACTF128 - Extract packed floating-point values
5750 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5751 (ins VR256:$src1, i8imm:$src2),
5752 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5754 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5755 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5756 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5759 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5760 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5761 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5762 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5763 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5764 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5766 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5767 (v4f32 (VEXTRACTF128rr
5768 (v8f32 VR256:$src1),
5769 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5770 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5771 (v2f64 (VEXTRACTF128rr
5772 (v4f64 VR256:$src1),
5773 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5774 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5775 (v4i32 (VEXTRACTF128rr
5776 (v8i32 VR256:$src1),
5777 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5778 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5779 (v2i64 (VEXTRACTF128rr
5780 (v4i64 VR256:$src1),
5781 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5782 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5783 (v8i16 (VEXTRACTF128rr
5784 (v16i16 VR256:$src1),
5785 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5786 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5787 (v16i8 (VEXTRACTF128rr
5788 (v32i8 VR256:$src1),
5789 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5791 // Special COPY patterns
5792 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))),
5793 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
5794 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))),
5795 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
5797 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
5798 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
5799 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))),
5800 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
5802 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (i32 0))),
5803 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
5804 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (i32 0))),
5805 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
5808 //===----------------------------------------------------------------------===//
5809 // VMASKMOV - Conditional SIMD Packed Loads and Stores
5811 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5812 Intrinsic IntLd, Intrinsic IntLd256,
5813 Intrinsic IntSt, Intrinsic IntSt256,
5814 PatFrag pf128, PatFrag pf256> {
5815 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5816 (ins VR128:$src1, f128mem:$src2),
5817 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5818 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5820 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5821 (ins VR256:$src1, f256mem:$src2),
5822 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5823 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5825 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5826 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5827 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5828 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5829 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5830 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5831 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5832 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5835 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5836 int_x86_avx_maskload_ps,
5837 int_x86_avx_maskload_ps_256,
5838 int_x86_avx_maskstore_ps,
5839 int_x86_avx_maskstore_ps_256,
5840 memopv4f32, memopv8f32>;
5841 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5842 int_x86_avx_maskload_pd,
5843 int_x86_avx_maskload_pd_256,
5844 int_x86_avx_maskstore_pd,
5845 int_x86_avx_maskstore_pd_256,
5846 memopv2f64, memopv4f64>;
5848 //===----------------------------------------------------------------------===//
5849 // VPERMIL - Permute Single and Double Floating-Point Values
5851 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5852 RegisterClass RC, X86MemOperand x86memop_f,
5853 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5854 Intrinsic IntVar, Intrinsic IntImm> {
5855 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5856 (ins RC:$src1, RC:$src2),
5857 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5858 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5859 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5860 (ins RC:$src1, x86memop_i:$src2),
5861 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5862 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5864 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5865 (ins RC:$src1, i8imm:$src2),
5866 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5867 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5868 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5869 (ins x86memop_f:$src1, i8imm:$src2),
5870 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5871 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5874 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5875 memopv4f32, memopv4i32,
5876 int_x86_avx_vpermilvar_ps,
5877 int_x86_avx_vpermil_ps>;
5878 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5879 memopv8f32, memopv8i32,
5880 int_x86_avx_vpermilvar_ps_256,
5881 int_x86_avx_vpermil_ps_256>;
5882 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5883 memopv2f64, memopv2i64,
5884 int_x86_avx_vpermilvar_pd,
5885 int_x86_avx_vpermil_pd>;
5886 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5887 memopv4f64, memopv4i64,
5888 int_x86_avx_vpermilvar_pd_256,
5889 int_x86_avx_vpermil_pd_256>;
5891 def : Pat<(v8f32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
5892 (VPERMILPSYri VR256:$src1, imm:$imm)>;
5893 def : Pat<(v4f64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
5894 (VPERMILPDYri VR256:$src1, imm:$imm)>;
5895 def : Pat<(v8i32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
5896 (VPERMILPSYri VR256:$src1, imm:$imm)>;
5897 def : Pat<(v4i64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
5898 (VPERMILPDYri VR256:$src1, imm:$imm)>;
5900 //===----------------------------------------------------------------------===//
5901 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
5903 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5904 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5905 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5907 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5908 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5909 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5912 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5913 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5914 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5915 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5916 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5917 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5919 def : Pat<(int_x86_avx_vperm2f128_ps_256
5920 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5921 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5922 def : Pat<(int_x86_avx_vperm2f128_pd_256
5923 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5924 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5925 def : Pat<(int_x86_avx_vperm2f128_si_256
5926 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5927 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5929 def : Pat<(v8f32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5930 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5931 def : Pat<(v8i32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5932 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5933 def : Pat<(v4i64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5934 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5935 def : Pat<(v4f64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5936 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5937 def : Pat<(v32i8 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5938 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5939 def : Pat<(v16i16 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5940 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5942 //===----------------------------------------------------------------------===//
5943 // VZERO - Zero YMM registers
5945 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
5946 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
5947 // Zero All YMM registers
5948 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5949 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5953 // Zero Upper bits of YMM registers
5954 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5955 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5957 //===----------------------------------------------------------------------===//
5958 // SSE Shuffle pattern fragments
5959 //===----------------------------------------------------------------------===//
5961 // This is part of a "work in progress" refactoring. The idea is that all
5962 // vector shuffles are going to be translated into target specific nodes and
5963 // directly matched by the patterns below (which can be changed along the way)
5964 // The AVX version of some but not all of them are described here, and more
5965 // should come in a near future.
5967 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5968 // SSE2 loads, which are always promoted to v2i64. The last one should match
5969 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5970 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5971 // we investigate further.
5972 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5974 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5975 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5977 (PSHUFDmi addr:$src1, imm:$imm)>;
5978 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5980 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5982 // Shuffle with PSHUFD instruction.
5983 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5984 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5985 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5986 (PSHUFDri VR128:$src1, imm:$imm)>;
5988 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5989 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5990 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5991 (PSHUFDri VR128:$src1, imm:$imm)>;
5993 // Shuffle with MOVHLPS instruction
5994 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5995 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5996 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5997 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5999 // Shuffle with MOVDDUP instruction
6000 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
6001 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
6002 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
6003 (MOVDDUPrm addr:$src)>;
6005 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
6006 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
6007 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
6008 (MOVDDUPrm addr:$src)>;
6010 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
6011 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
6012 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
6013 (MOVDDUPrm addr:$src)>;
6015 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
6016 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
6017 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
6018 (MOVDDUPrm addr:$src)>;
6020 def : Pat<(X86Movddup (bc_v2f64
6021 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
6022 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
6023 def : Pat<(X86Movddup (bc_v2f64
6024 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
6025 (MOVDDUPrm addr:$src)>;
6028 // Shuffle with UNPCKLPS
6029 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
6030 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
6031 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
6032 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
6034 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
6035 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
6036 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
6037 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
6039 // Shuffle with VUNPCKHPSY
6040 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
6041 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6042 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
6043 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6044 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
6045 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6046 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, (memopv8i32 addr:$src2))),
6047 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6049 // Shuffle with UNPCKHPS
6050 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
6051 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
6052 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
6053 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
6055 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
6056 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
6057 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
6058 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
6060 // Shuffle with VUNPCKHPSY
6061 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, (memopv8f32 addr:$src2))),
6062 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6063 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
6064 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6066 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, (memopv8i32 addr:$src2))),
6067 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6068 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
6069 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6071 // Shuffle with UNPCKLPD
6072 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
6073 (VUNPCKLPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
6074 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
6075 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
6077 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
6078 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
6079 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
6080 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
6082 // Shuffle with VUNPCKLPDY
6083 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
6084 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6085 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
6086 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6088 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, (memopv4i64 addr:$src2))),
6089 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6090 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
6091 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6093 // Shuffle with UNPCKHPD
6094 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
6095 (VUNPCKHPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
6096 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
6097 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
6099 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
6100 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
6101 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
6102 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
6104 // Shuffle with VUNPCKHPDY
6105 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, (memopv4f64 addr:$src2))),
6106 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6107 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
6108 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6109 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, (memopv4i64 addr:$src2))),
6110 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6111 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
6112 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6114 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the problem
6115 // is during lowering, where it's not possible to recognize the load fold cause
6116 // it has two uses through a bitcast. One use disappears at isel time and the
6117 // fold opportunity reappears.
6118 def : Pat<(v2f64 (X86Movddup VR128:$src)),
6119 (UNPCKLPDrr VR128:$src, VR128:$src)>;
6121 // Shuffle with MOVLHPD
6122 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
6123 (scalar_to_vector (loadf64 addr:$src2)))),
6124 (MOVHPDrm VR128:$src1, addr:$src2)>;
6126 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
6127 // is during lowering, where it's not possible to recognize the load fold cause
6128 // it has two uses through a bitcast. One use disappears at isel time and the
6129 // fold opportunity reappears.
6130 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
6131 (scalar_to_vector (loadf64 addr:$src2)))),
6132 (MOVHPDrm VR128:$src1, addr:$src2)>;
6134 // Shuffle with MOVSS
6135 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
6136 (MOVSSrr VR128:$src1, FR32:$src2)>;
6137 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
6138 (MOVSSrr (v4i32 VR128:$src1),
6139 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
6140 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
6141 (MOVSSrr (v4f32 VR128:$src1),
6142 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
6144 // Shuffle with MOVSD
6145 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
6146 (MOVSDrr VR128:$src1, FR64:$src2)>;
6147 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
6148 (MOVSDrr (v2i64 VR128:$src1),
6149 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
6150 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
6151 (MOVSDrr (v2f64 VR128:$src1),
6152 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
6153 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
6154 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
6155 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
6156 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
6158 // Shuffle with PSHUFHW
6159 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
6160 (PSHUFHWri VR128:$src, imm:$imm)>;
6161 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
6162 (PSHUFHWmi addr:$src, imm:$imm)>;
6164 // Shuffle with PSHUFLW
6165 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
6166 (PSHUFLWri VR128:$src, imm:$imm)>;
6167 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
6168 (PSHUFLWmi addr:$src, imm:$imm)>;
6170 // Shuffle with MOVLPS
6171 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
6172 (MOVLPSrm VR128:$src1, addr:$src2)>;
6173 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
6174 (MOVLPSrm VR128:$src1, addr:$src2)>;
6175 def : Pat<(X86Movlps VR128:$src1,
6176 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
6177 (MOVLPSrm VR128:$src1, addr:$src2)>;
6178 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
6179 // is during lowering, where it's not possible to recognize the load fold cause
6180 // it has two uses through a bitcast. One use disappears at isel time and the
6181 // fold opportunity reappears.
6182 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
6183 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
6185 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
6186 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
6188 // Shuffle with MOVLPD
6189 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
6190 (MOVLPDrm VR128:$src1, addr:$src2)>;
6191 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
6192 (MOVLPDrm VR128:$src1, addr:$src2)>;
6193 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
6194 (scalar_to_vector (loadf64 addr:$src2)))),
6195 (MOVLPDrm VR128:$src1, addr:$src2)>;
6197 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
6198 def : Pat<(store (f64 (vector_extract
6199 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
6200 (MOVHPSmr addr:$dst, VR128:$src)>;
6201 def : Pat<(store (f64 (vector_extract
6202 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
6203 (MOVHPDmr addr:$dst, VR128:$src)>;
6205 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
6206 (MOVLPSmr addr:$src1, VR128:$src2)>;
6207 def : Pat<(store (v4i32 (X86Movlps
6208 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
6209 (MOVLPSmr addr:$src1, VR128:$src2)>;
6211 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
6212 (MOVLPDmr addr:$src1, VR128:$src2)>;
6213 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
6214 (MOVLPDmr addr:$src1, VR128:$src2)>;