1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // SSE 1 & 2 - Move Instructions
120 //===----------------------------------------------------------------------===//
122 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
123 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
124 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
126 // Loading from memory automatically zeroing upper bits.
127 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
128 PatFrag mem_pat, string OpcodeStr> :
129 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
130 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
131 [(set RC:$dst, (mem_pat addr:$src))]>;
133 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
134 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
135 // is used instead. Register-to-register movss/movsd is not modeled as an
136 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
137 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
138 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
139 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
140 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
141 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
143 let canFoldAsLoad = 1, isReMaterializable = 1 in {
144 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
146 let AddedComplexity = 20 in
147 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
150 let Constraints = "$src1 = $dst" in {
151 def MOVSSrr : sse12_move_rr<FR32, v4f32,
152 "movss\t{$src2, $dst|$dst, $src2}">, XS;
153 def MOVSDrr : sse12_move_rr<FR64, v2f64,
154 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
157 let canFoldAsLoad = 1, isReMaterializable = 1 in {
158 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
160 let AddedComplexity = 20 in
161 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
164 let AddedComplexity = 15 in {
165 // Extract the low 32-bit value from one vector and insert it into another.
166 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
167 (MOVSSrr (v4f32 VR128:$src1),
168 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
169 // Extract the low 64-bit value from one vector and insert it into another.
170 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
171 (MOVSDrr (v2f64 VR128:$src1),
172 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
175 // Implicitly promote a 32-bit scalar to a vector.
176 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
177 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
178 // Implicitly promote a 64-bit scalar to a vector.
179 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
180 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
181 // Implicitly promote a 32-bit scalar to a vector.
182 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
183 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
184 // Implicitly promote a 64-bit scalar to a vector.
185 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
186 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
188 let AddedComplexity = 20 in {
189 let Predicates = [HasSSE1] in {
190 // MOVSSrm zeros the high parts of the register; represent this
191 // with SUBREG_TO_REG.
192 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
193 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
194 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
195 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
196 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
197 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
199 let Predicates = [HasSSE2] in {
200 // MOVSDrm zeros the high parts of the register; represent this
201 // with SUBREG_TO_REG.
202 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
203 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
204 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
205 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
206 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
207 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
208 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
209 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
210 def : Pat<(v2f64 (X86vzload addr:$src)),
211 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
215 let AddedComplexity = 20, Predicates = [HasAVX] in {
216 // MOVSSrm zeros the high parts of the register; represent this
217 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
218 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
219 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
220 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
221 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
222 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
223 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
224 // MOVSDrm zeros the high parts of the register; represent this
225 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
226 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
227 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
228 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
229 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
230 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
231 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
232 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
233 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
234 def : Pat<(v2f64 (X86vzload addr:$src)),
235 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
236 // Represent the same patterns above but in the form they appear for
238 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
239 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
240 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
241 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
242 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))),
243 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_sd)>;
246 // Store scalar value to memory.
247 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
248 "movss\t{$src, $dst|$dst, $src}",
249 [(store FR32:$src, addr:$dst)]>;
250 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
251 "movsd\t{$src, $dst|$dst, $src}",
252 [(store FR64:$src, addr:$dst)]>;
254 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
255 "movss\t{$src, $dst|$dst, $src}",
256 [(store FR32:$src, addr:$dst)]>, XS, VEX;
257 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
258 "movsd\t{$src, $dst|$dst, $src}",
259 [(store FR64:$src, addr:$dst)]>, XD, VEX;
261 // Extract and store.
262 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
265 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
266 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
269 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
271 // Move Aligned/Unaligned floating point values
272 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
273 X86MemOperand x86memop, PatFrag ld_frag,
274 string asm, Domain d,
275 bit IsReMaterializable = 1> {
276 let neverHasSideEffects = 1 in
277 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
278 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
279 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
280 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
281 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
282 [(set RC:$dst, (ld_frag addr:$src))], d>;
285 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
286 "movaps", SSEPackedSingle>, VEX;
287 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
288 "movapd", SSEPackedDouble>, OpSize, VEX;
289 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
290 "movups", SSEPackedSingle>, VEX;
291 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
292 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
294 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
295 "movaps", SSEPackedSingle>, VEX;
296 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
297 "movapd", SSEPackedDouble>, OpSize, VEX;
298 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
299 "movups", SSEPackedSingle>, VEX;
300 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
301 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
302 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
303 "movaps", SSEPackedSingle>, TB;
304 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
305 "movapd", SSEPackedDouble>, TB, OpSize;
306 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
307 "movups", SSEPackedSingle>, TB;
308 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
309 "movupd", SSEPackedDouble, 0>, TB, OpSize;
311 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
312 "movaps\t{$src, $dst|$dst, $src}",
313 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
314 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
315 "movapd\t{$src, $dst|$dst, $src}",
316 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
317 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
318 "movups\t{$src, $dst|$dst, $src}",
319 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
320 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
321 "movupd\t{$src, $dst|$dst, $src}",
322 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
323 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
324 "movaps\t{$src, $dst|$dst, $src}",
325 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
326 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
327 "movapd\t{$src, $dst|$dst, $src}",
328 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
329 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
330 "movups\t{$src, $dst|$dst, $src}",
331 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
332 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
333 "movupd\t{$src, $dst|$dst, $src}",
334 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
336 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
337 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
338 (VMOVUPSYmr addr:$dst, VR256:$src)>;
340 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
341 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
342 (VMOVUPDYmr addr:$dst, VR256:$src)>;
344 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
345 "movaps\t{$src, $dst|$dst, $src}",
346 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
347 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
348 "movapd\t{$src, $dst|$dst, $src}",
349 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
350 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
351 "movups\t{$src, $dst|$dst, $src}",
352 [(store (v4f32 VR128:$src), addr:$dst)]>;
353 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
354 "movupd\t{$src, $dst|$dst, $src}",
355 [(store (v2f64 VR128:$src), addr:$dst)]>;
357 // Intrinsic forms of MOVUPS/D load and store
358 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
359 (ins f128mem:$dst, VR128:$src),
360 "movups\t{$src, $dst|$dst, $src}",
361 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
362 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
363 (ins f128mem:$dst, VR128:$src),
364 "movupd\t{$src, $dst|$dst, $src}",
365 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
367 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
368 "movups\t{$src, $dst|$dst, $src}",
369 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
370 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
371 "movupd\t{$src, $dst|$dst, $src}",
372 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
374 // Move Low/High packed floating point values
375 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
376 PatFrag mov_frag, string base_opc,
378 def PSrm : PI<opc, MRMSrcMem,
379 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
380 !strconcat(base_opc, "s", asm_opr),
383 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
384 SSEPackedSingle>, TB;
386 def PDrm : PI<opc, MRMSrcMem,
387 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
388 !strconcat(base_opc, "d", asm_opr),
389 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
390 (scalar_to_vector (loadf64 addr:$src2)))))],
391 SSEPackedDouble>, TB, OpSize;
394 let AddedComplexity = 20 in {
395 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
396 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
397 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
398 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
400 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
401 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
402 "\t{$src2, $dst|$dst, $src2}">;
403 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
404 "\t{$src2, $dst|$dst, $src2}">;
407 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
408 "movlps\t{$src, $dst|$dst, $src}",
409 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
410 (iPTR 0))), addr:$dst)]>, VEX;
411 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
412 "movlpd\t{$src, $dst|$dst, $src}",
413 [(store (f64 (vector_extract (v2f64 VR128:$src),
414 (iPTR 0))), addr:$dst)]>, VEX;
415 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
416 "movlps\t{$src, $dst|$dst, $src}",
417 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
418 (iPTR 0))), addr:$dst)]>;
419 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
420 "movlpd\t{$src, $dst|$dst, $src}",
421 [(store (f64 (vector_extract (v2f64 VR128:$src),
422 (iPTR 0))), addr:$dst)]>;
424 // v2f64 extract element 1 is always custom lowered to unpack high to low
425 // and extract element 0 so the non-store version isn't too horrible.
426 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
427 "movhps\t{$src, $dst|$dst, $src}",
428 [(store (f64 (vector_extract
429 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
430 (undef)), (iPTR 0))), addr:$dst)]>,
432 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
433 "movhpd\t{$src, $dst|$dst, $src}",
434 [(store (f64 (vector_extract
435 (v2f64 (unpckh VR128:$src, (undef))),
436 (iPTR 0))), addr:$dst)]>,
438 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
439 "movhps\t{$src, $dst|$dst, $src}",
440 [(store (f64 (vector_extract
441 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
442 (undef)), (iPTR 0))), addr:$dst)]>;
443 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
444 "movhpd\t{$src, $dst|$dst, $src}",
445 [(store (f64 (vector_extract
446 (v2f64 (unpckh VR128:$src, (undef))),
447 (iPTR 0))), addr:$dst)]>;
449 let AddedComplexity = 20 in {
450 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
451 (ins VR128:$src1, VR128:$src2),
452 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
454 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
456 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
457 (ins VR128:$src1, VR128:$src2),
458 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
460 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
463 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
464 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
465 (ins VR128:$src1, VR128:$src2),
466 "movlhps\t{$src2, $dst|$dst, $src2}",
468 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
469 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
470 (ins VR128:$src1, VR128:$src2),
471 "movhlps\t{$src2, $dst|$dst, $src2}",
473 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
476 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
477 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
478 let AddedComplexity = 20 in {
479 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
480 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
481 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
482 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
485 //===----------------------------------------------------------------------===//
486 // SSE 1 & 2 - Conversion Instructions
487 //===----------------------------------------------------------------------===//
489 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
490 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
492 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
493 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
494 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
495 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
498 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
499 X86MemOperand x86memop, string asm> {
500 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
502 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
506 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
507 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
508 string asm, Domain d> {
509 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
510 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
511 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
512 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
515 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
516 X86MemOperand x86memop, string asm> {
517 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
518 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
519 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
520 (ins DstRC:$src1, x86memop:$src),
521 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
524 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
525 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
526 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
527 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
529 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
530 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
531 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
532 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
535 // The assembler can recognize rr 64-bit instructions by seeing a rxx
536 // register, but the same isn't true when only using memory operands,
537 // provide other assembly "l" and "q" forms to address this explicitly
538 // where appropriate to do so.
539 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
541 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
543 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
545 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
547 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
550 let Predicates = [HasAVX] in {
551 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
552 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
553 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
554 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
555 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
556 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
557 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
558 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
560 def : Pat<(f32 (sint_to_fp GR32:$src)),
561 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
562 def : Pat<(f32 (sint_to_fp GR64:$src)),
563 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
564 def : Pat<(f64 (sint_to_fp GR32:$src)),
565 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
566 def : Pat<(f64 (sint_to_fp GR64:$src)),
567 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
570 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
571 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
572 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
573 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
574 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
575 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
576 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
577 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
578 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
579 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
580 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
581 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
582 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
583 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
584 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
585 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
587 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
588 // and/or XMM operand(s).
590 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
591 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
593 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
594 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
595 [(set DstRC:$dst, (Int SrcRC:$src))]>;
596 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
597 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
598 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
601 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
602 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
603 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
604 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
606 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
607 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
608 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
609 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
610 (ins DstRC:$src1, x86memop:$src2),
612 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
613 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
614 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
617 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
618 f128mem, load, "cvtsd2si">, XD, VEX;
619 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
620 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
623 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
624 // Get rid of this hack or rename the intrinsics, there are several
625 // intructions that only match with the intrinsic form, why create duplicates
626 // to let them be recognized by the assembler?
627 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
628 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
629 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
630 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
631 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
632 f128mem, load, "cvtsd2si{l}">, XD;
633 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
634 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
637 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
638 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
639 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
640 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
642 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
643 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
644 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
645 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
648 let Constraints = "$src1 = $dst" in {
649 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
650 int_x86_sse_cvtsi2ss, i32mem, loadi32,
652 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
653 int_x86_sse_cvtsi642ss, i64mem, loadi64,
654 "cvtsi2ss{q}">, XS, REX_W;
655 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
656 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
658 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
659 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
660 "cvtsi2sd">, XD, REX_W;
665 // Aliases for intrinsics
666 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
667 f32mem, load, "cvttss2si">, XS, VEX;
668 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
669 int_x86_sse_cvttss2si64, f32mem, load,
670 "cvttss2si">, XS, VEX, VEX_W;
671 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
672 f128mem, load, "cvttsd2si">, XD, VEX;
673 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
674 int_x86_sse2_cvttsd2si64, f128mem, load,
675 "cvttsd2si">, XD, VEX, VEX_W;
676 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
677 f32mem, load, "cvttss2si">, XS;
678 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
679 int_x86_sse_cvttss2si64, f32mem, load,
680 "cvttss2si{q}">, XS, REX_W;
681 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
682 f128mem, load, "cvttsd2si">, XD;
683 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
684 int_x86_sse2_cvttsd2si64, f128mem, load,
685 "cvttsd2si{q}">, XD, REX_W;
687 let Pattern = []<dag> in {
688 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
689 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
690 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
691 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
693 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
694 "cvtdq2ps\t{$src, $dst|$dst, $src}",
695 SSEPackedSingle>, TB, VEX;
696 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
697 "cvtdq2ps\t{$src, $dst|$dst, $src}",
698 SSEPackedSingle>, TB, VEX;
701 let Pattern = []<dag> in {
702 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
703 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
704 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
705 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
706 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
707 "cvtdq2ps\t{$src, $dst|$dst, $src}",
708 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
711 let Predicates = [HasSSE1] in {
712 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
713 (CVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
714 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
715 (CVTSS2SIrm addr:$src)>;
716 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
717 (CVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
718 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
719 (CVTSS2SI64rm addr:$src)>;
722 let Predicates = [HasAVX] in {
723 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
724 (VCVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
725 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
726 (VCVTSS2SIrm addr:$src)>;
727 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
728 (VCVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
729 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
730 (VCVTSS2SI64rm addr:$src)>;
735 // Convert scalar double to scalar single
736 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
737 (ins FR64:$src1, FR64:$src2),
738 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
740 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
741 (ins FR64:$src1, f64mem:$src2),
742 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
743 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
744 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
747 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
748 "cvtsd2ss\t{$src, $dst|$dst, $src}",
749 [(set FR32:$dst, (fround FR64:$src))]>;
750 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
751 "cvtsd2ss\t{$src, $dst|$dst, $src}",
752 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
753 Requires<[HasSSE2, OptForSize]>;
755 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
756 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
758 let Constraints = "$src1 = $dst" in
759 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
760 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
762 // Convert scalar single to scalar double
763 // SSE2 instructions with XS prefix
764 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
765 (ins FR32:$src1, FR32:$src2),
766 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
767 []>, XS, Requires<[HasAVX]>, VEX_4V;
768 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
769 (ins FR32:$src1, f32mem:$src2),
770 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
771 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
773 let Predicates = [HasAVX] in {
774 def : Pat<(f64 (fextend FR32:$src)),
775 (VCVTSS2SDrr FR32:$src, FR32:$src)>;
776 def : Pat<(fextend (loadf32 addr:$src)),
777 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
778 def : Pat<(extloadf32 addr:$src),
779 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
782 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
783 "cvtss2sd\t{$src, $dst|$dst, $src}",
784 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
786 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
787 "cvtss2sd\t{$src, $dst|$dst, $src}",
788 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
789 Requires<[HasSSE2, OptForSize]>;
791 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
792 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
793 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
794 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
795 VR128:$src2))]>, XS, VEX_4V,
797 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
798 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
799 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
800 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
801 (load addr:$src2)))]>, XS, VEX_4V,
803 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
804 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
805 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
806 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
807 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
810 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
811 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
812 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
813 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
814 (load addr:$src2)))]>, XS,
818 def : Pat<(extloadf32 addr:$src),
819 (CVTSS2SDrr (MOVSSrm addr:$src))>,
820 Requires<[HasSSE2, OptForSpeed]>;
822 // Convert doubleword to packed single/double fp
823 // SSE2 instructions without OpSize prefix
824 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
825 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
826 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
827 TB, VEX, Requires<[HasAVX]>;
828 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
829 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
830 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
831 (bitconvert (memopv2i64 addr:$src))))]>,
832 TB, VEX, Requires<[HasAVX]>;
833 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
834 "cvtdq2ps\t{$src, $dst|$dst, $src}",
835 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
836 TB, Requires<[HasSSE2]>;
837 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
838 "cvtdq2ps\t{$src, $dst|$dst, $src}",
839 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
840 (bitconvert (memopv2i64 addr:$src))))]>,
841 TB, Requires<[HasSSE2]>;
843 // FIXME: why the non-intrinsic version is described as SSE3?
844 // SSE2 instructions with XS prefix
845 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
846 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
847 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
848 XS, VEX, Requires<[HasAVX]>;
849 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
850 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
851 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
852 (bitconvert (memopv2i64 addr:$src))))]>,
853 XS, VEX, Requires<[HasAVX]>;
854 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
855 "cvtdq2pd\t{$src, $dst|$dst, $src}",
856 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
857 XS, Requires<[HasSSE2]>;
858 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
859 "cvtdq2pd\t{$src, $dst|$dst, $src}",
860 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
861 (bitconvert (memopv2i64 addr:$src))))]>,
862 XS, Requires<[HasSSE2]>;
865 // Convert packed single/double fp to doubleword
866 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
867 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
868 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
869 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
870 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
871 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
872 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
873 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
874 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
875 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
876 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
877 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
879 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
880 "cvtps2dq\t{$src, $dst|$dst, $src}",
881 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
883 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
885 "cvtps2dq\t{$src, $dst|$dst, $src}",
886 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
887 (memop addr:$src)))]>, VEX;
888 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
889 "cvtps2dq\t{$src, $dst|$dst, $src}",
890 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
891 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
892 "cvtps2dq\t{$src, $dst|$dst, $src}",
893 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
894 (memop addr:$src)))]>;
896 // SSE2 packed instructions with XD prefix
897 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
898 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
899 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
900 XD, VEX, Requires<[HasAVX]>;
901 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
902 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
903 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
904 (memop addr:$src)))]>,
905 XD, VEX, Requires<[HasAVX]>;
906 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
907 "cvtpd2dq\t{$src, $dst|$dst, $src}",
908 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
909 XD, Requires<[HasSSE2]>;
910 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
911 "cvtpd2dq\t{$src, $dst|$dst, $src}",
912 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
913 (memop addr:$src)))]>,
914 XD, Requires<[HasSSE2]>;
917 // Convert with truncation packed single/double fp to doubleword
918 // SSE2 packed instructions with XS prefix
919 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
920 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
921 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
922 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
923 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
924 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
925 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
926 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
927 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
928 "cvttps2dq\t{$src, $dst|$dst, $src}",
930 (int_x86_sse2_cvttps2dq VR128:$src))]>;
931 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
932 "cvttps2dq\t{$src, $dst|$dst, $src}",
934 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
936 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
937 "vcvttps2dq\t{$src, $dst|$dst, $src}",
939 (int_x86_sse2_cvttps2dq VR128:$src))]>,
940 XS, VEX, Requires<[HasAVX]>;
941 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
942 "vcvttps2dq\t{$src, $dst|$dst, $src}",
943 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
944 (memop addr:$src)))]>,
945 XS, VEX, Requires<[HasAVX]>;
947 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
948 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
949 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
950 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
952 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
953 (Int_VCVTDQ2PSrr VR128:$src)>, Requires<[HasAVX]>;
954 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
955 (VCVTTPS2DQrr VR128:$src)>, Requires<[HasAVX]>;
956 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
957 (VCVTDQ2PSYrr VR256:$src)>, Requires<[HasAVX]>;
958 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
959 (VCVTTPS2DQYrr VR256:$src)>, Requires<[HasAVX]>;
961 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
963 "cvttpd2dq\t{$src, $dst|$dst, $src}",
964 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
966 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
968 "cvttpd2dq\t{$src, $dst|$dst, $src}",
969 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
970 (memop addr:$src)))]>, VEX;
971 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
972 "cvttpd2dq\t{$src, $dst|$dst, $src}",
973 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
974 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
975 "cvttpd2dq\t{$src, $dst|$dst, $src}",
976 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
977 (memop addr:$src)))]>;
979 // The assembler can recognize rr 256-bit instructions by seeing a ymm
980 // register, but the same isn't true when using memory operands instead.
981 // Provide other assembly rr and rm forms to address this explicitly.
982 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
983 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
984 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
985 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
988 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
989 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
990 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
991 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
994 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
995 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
996 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
997 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
999 // Convert packed single to packed double
1000 let Predicates = [HasAVX] in {
1001 // SSE2 instructions without OpSize prefix
1002 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1003 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1004 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1005 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1006 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1007 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1008 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1009 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1011 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1012 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1013 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1014 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1016 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1017 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1018 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1019 VEX, Requires<[HasAVX]>;
1020 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1021 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1022 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1023 (load addr:$src)))]>,
1024 VEX, Requires<[HasAVX]>;
1025 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1026 "cvtps2pd\t{$src, $dst|$dst, $src}",
1027 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1028 TB, Requires<[HasSSE2]>;
1029 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1030 "cvtps2pd\t{$src, $dst|$dst, $src}",
1031 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1032 (load addr:$src)))]>,
1033 TB, Requires<[HasSSE2]>;
1035 // Convert packed double to packed single
1036 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1037 // register, but the same isn't true when using memory operands instead.
1038 // Provide other assembly rr and rm forms to address this explicitly.
1039 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1040 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1041 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1042 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1045 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1046 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1047 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1048 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1051 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1052 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1053 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1054 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1055 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1056 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1057 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1058 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1061 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1062 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1063 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1064 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1066 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1067 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1068 (memop addr:$src)))]>;
1069 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1070 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1071 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1072 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1073 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1074 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1075 (memop addr:$src)))]>;
1077 // AVX 256-bit register conversion intrinsics
1078 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1079 // whenever possible to avoid declaring two versions of each one.
1080 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1081 (VCVTDQ2PSYrr VR256:$src)>;
1082 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1083 (VCVTDQ2PSYrm addr:$src)>;
1085 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1086 (VCVTPD2PSYrr VR256:$src)>;
1087 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1088 (VCVTPD2PSYrm addr:$src)>;
1090 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1091 (VCVTPS2DQYrr VR256:$src)>;
1092 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1093 (VCVTPS2DQYrm addr:$src)>;
1095 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1096 (VCVTPS2PDYrr VR128:$src)>;
1097 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1098 (VCVTPS2PDYrm addr:$src)>;
1100 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1101 (VCVTTPD2DQYrr VR256:$src)>;
1102 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1103 (VCVTTPD2DQYrm addr:$src)>;
1105 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1106 (VCVTTPS2DQYrr VR256:$src)>;
1107 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1108 (VCVTTPS2DQYrm addr:$src)>;
1110 // Match fround and fextend for 128/256-bit conversions
1111 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
1112 (VCVTPD2PSYrr VR256:$src)>;
1113 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
1114 (VCVTPD2PSYrm addr:$src)>;
1116 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
1117 (VCVTPS2PDYrr VR128:$src)>;
1118 def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
1119 (VCVTPS2PDYrm addr:$src)>;
1121 //===----------------------------------------------------------------------===//
1122 // SSE 1 & 2 - Compare Instructions
1123 //===----------------------------------------------------------------------===//
1125 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1126 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1127 string asm, string asm_alt> {
1128 let isAsmParserOnly = 1 in {
1129 def rr : SIi8<0xC2, MRMSrcReg,
1130 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1133 def rm : SIi8<0xC2, MRMSrcMem,
1134 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1138 // Accept explicit immediate argument form instead of comparison code.
1139 def rr_alt : SIi8<0xC2, MRMSrcReg,
1140 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1143 def rm_alt : SIi8<0xC2, MRMSrcMem,
1144 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1148 let neverHasSideEffects = 1 in {
1149 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1150 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1151 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1153 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1154 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1155 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1159 let Constraints = "$src1 = $dst" in {
1160 def CMPSSrr : SIi8<0xC2, MRMSrcReg,
1161 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, SSECC:$cc),
1162 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1163 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), FR32:$src2, imm:$cc))]>, XS;
1164 def CMPSSrm : SIi8<0xC2, MRMSrcMem,
1165 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, SSECC:$cc),
1166 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1167 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), (loadf32 addr:$src2), imm:$cc))]>, XS;
1168 def CMPSDrr : SIi8<0xC2, MRMSrcReg,
1169 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, SSECC:$cc),
1170 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1171 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), FR64:$src2, imm:$cc))]>, XD;
1172 def CMPSDrm : SIi8<0xC2, MRMSrcMem,
1173 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, SSECC:$cc),
1174 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1175 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), (loadf64 addr:$src2), imm:$cc))]>, XD;
1177 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1178 def CMPSSrr_alt : SIi8<0xC2, MRMSrcReg,
1179 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
1180 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1181 def CMPSSrm_alt : SIi8<0xC2, MRMSrcMem,
1182 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
1183 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1184 def CMPSDrr_alt : SIi8<0xC2, MRMSrcReg,
1185 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
1186 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1187 def CMPSDrm_alt : SIi8<0xC2, MRMSrcMem,
1188 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
1189 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1192 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1193 Intrinsic Int, string asm> {
1194 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1195 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1196 [(set VR128:$dst, (Int VR128:$src1,
1197 VR128:$src, imm:$cc))]>;
1198 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1199 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1200 [(set VR128:$dst, (Int VR128:$src1,
1201 (load addr:$src), imm:$cc))]>;
1204 // Aliases to match intrinsics which expect XMM operand(s).
1205 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1206 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1208 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1209 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1211 let Constraints = "$src1 = $dst" in {
1212 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1213 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1214 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1215 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1219 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1220 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1221 ValueType vt, X86MemOperand x86memop,
1222 PatFrag ld_frag, string OpcodeStr, Domain d> {
1223 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1224 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1225 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1226 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1227 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1228 [(set EFLAGS, (OpNode (vt RC:$src1),
1229 (ld_frag addr:$src2)))], d>;
1232 let Defs = [EFLAGS] in {
1233 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1234 "ucomiss", SSEPackedSingle>, VEX;
1235 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1236 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1237 let Pattern = []<dag> in {
1238 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1239 "comiss", SSEPackedSingle>, VEX;
1240 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1241 "comisd", SSEPackedDouble>, OpSize, VEX;
1244 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1245 load, "ucomiss", SSEPackedSingle>, VEX;
1246 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1247 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1249 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1250 load, "comiss", SSEPackedSingle>, VEX;
1251 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1252 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1253 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1254 "ucomiss", SSEPackedSingle>, TB;
1255 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1256 "ucomisd", SSEPackedDouble>, TB, OpSize;
1258 let Pattern = []<dag> in {
1259 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1260 "comiss", SSEPackedSingle>, TB;
1261 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1262 "comisd", SSEPackedDouble>, TB, OpSize;
1265 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1266 load, "ucomiss", SSEPackedSingle>, TB;
1267 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1268 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1270 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1271 "comiss", SSEPackedSingle>, TB;
1272 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1273 "comisd", SSEPackedDouble>, TB, OpSize;
1274 } // Defs = [EFLAGS]
1276 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1277 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1278 Intrinsic Int, string asm, string asm_alt,
1280 let isAsmParserOnly = 1 in {
1281 def rri : PIi8<0xC2, MRMSrcReg,
1282 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1283 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1284 def rmi : PIi8<0xC2, MRMSrcMem,
1285 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1286 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1289 // Accept explicit immediate argument form instead of comparison code.
1290 def rri_alt : PIi8<0xC2, MRMSrcReg,
1291 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1293 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1294 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1298 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1299 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1300 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1301 SSEPackedSingle>, VEX_4V;
1302 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1303 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1304 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1305 SSEPackedDouble>, OpSize, VEX_4V;
1306 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1307 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1308 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1309 SSEPackedSingle>, VEX_4V;
1310 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1311 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1312 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1313 SSEPackedDouble>, OpSize, VEX_4V;
1314 let Constraints = "$src1 = $dst" in {
1315 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1316 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1317 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1318 SSEPackedSingle>, TB;
1319 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1320 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1321 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1322 SSEPackedDouble>, TB, OpSize;
1325 let Predicates = [HasSSE1] in {
1326 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1327 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1328 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1329 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1332 let Predicates = [HasSSE2] in {
1333 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1334 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1335 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1336 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1339 let Predicates = [HasAVX] in {
1340 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1341 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1342 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1343 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1344 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1345 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1346 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1347 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1349 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
1350 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
1351 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
1352 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
1353 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
1354 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
1355 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
1356 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
1359 //===----------------------------------------------------------------------===//
1360 // SSE 1 & 2 - Shuffle Instructions
1361 //===----------------------------------------------------------------------===//
1363 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1364 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1365 ValueType vt, string asm, PatFrag mem_frag,
1366 Domain d, bit IsConvertibleToThreeAddress = 0> {
1367 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1368 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1369 [(set RC:$dst, (vt (shufp:$src3
1370 RC:$src1, (mem_frag addr:$src2))))], d>;
1371 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1372 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1373 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1375 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1378 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1379 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1380 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
1381 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1382 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1383 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
1384 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1385 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1386 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1387 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1388 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1389 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1391 let Constraints = "$src1 = $dst" in {
1392 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1393 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1394 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1396 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1397 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1398 memopv2f64, SSEPackedDouble>, TB, OpSize;
1401 //===----------------------------------------------------------------------===//
1402 // SSE 1 & 2 - Unpack Instructions
1403 //===----------------------------------------------------------------------===//
1405 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1406 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1407 PatFrag mem_frag, RegisterClass RC,
1408 X86MemOperand x86memop, string asm,
1410 def rr : PI<opc, MRMSrcReg,
1411 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1413 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1414 def rm : PI<opc, MRMSrcMem,
1415 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1417 (vt (OpNode RC:$src1,
1418 (mem_frag addr:$src2))))], d>;
1421 let AddedComplexity = 10 in {
1422 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1423 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1424 SSEPackedSingle>, VEX_4V;
1425 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1426 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1427 SSEPackedDouble>, OpSize, VEX_4V;
1428 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1429 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1430 SSEPackedSingle>, VEX_4V;
1431 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1432 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1433 SSEPackedDouble>, OpSize, VEX_4V;
1435 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1436 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1437 SSEPackedSingle>, VEX_4V;
1438 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1439 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1440 SSEPackedDouble>, OpSize, VEX_4V;
1441 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1442 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1443 SSEPackedSingle>, VEX_4V;
1444 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1445 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1446 SSEPackedDouble>, OpSize, VEX_4V;
1448 let Constraints = "$src1 = $dst" in {
1449 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1450 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1451 SSEPackedSingle>, TB;
1452 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1453 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1454 SSEPackedDouble>, TB, OpSize;
1455 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1456 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1457 SSEPackedSingle>, TB;
1458 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1459 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1460 SSEPackedDouble>, TB, OpSize;
1461 } // Constraints = "$src1 = $dst"
1462 } // AddedComplexity
1464 //===----------------------------------------------------------------------===//
1465 // SSE 1 & 2 - Extract Floating-Point Sign mask
1466 //===----------------------------------------------------------------------===//
1468 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1469 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1471 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1472 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1473 [(set GR32:$dst, (Int RC:$src))], d>;
1474 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1475 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1478 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1479 SSEPackedSingle>, TB;
1480 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1481 SSEPackedDouble>, TB, OpSize;
1483 def : Pat<(i32 (X86fgetsign FR32:$src)),
1484 (MOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1485 sub_ss))>, Requires<[HasSSE1]>;
1486 def : Pat<(i64 (X86fgetsign FR32:$src)),
1487 (MOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1488 sub_ss))>, Requires<[HasSSE1]>;
1489 def : Pat<(i32 (X86fgetsign FR64:$src)),
1490 (MOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1491 sub_sd))>, Requires<[HasSSE2]>;
1492 def : Pat<(i64 (X86fgetsign FR64:$src)),
1493 (MOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1494 sub_sd))>, Requires<[HasSSE2]>;
1496 let Predicates = [HasAVX] in {
1497 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1498 "movmskps", SSEPackedSingle>, TB, VEX;
1499 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1500 "movmskpd", SSEPackedDouble>, TB, OpSize,
1502 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1503 "movmskps", SSEPackedSingle>, TB, VEX;
1504 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1505 "movmskpd", SSEPackedDouble>, TB, OpSize,
1508 def : Pat<(i32 (X86fgetsign FR32:$src)),
1509 (VMOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1511 def : Pat<(i64 (X86fgetsign FR32:$src)),
1512 (VMOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1514 def : Pat<(i32 (X86fgetsign FR64:$src)),
1515 (VMOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1517 def : Pat<(i64 (X86fgetsign FR64:$src)),
1518 (VMOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1522 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1523 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1524 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1525 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1527 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1528 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1529 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1530 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1534 //===----------------------------------------------------------------------===//
1535 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1536 //===----------------------------------------------------------------------===//
1538 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1539 // names that start with 'Fs'.
1541 // Alias instructions that map fld0 to pxor for sse.
1542 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1543 canFoldAsLoad = 1 in {
1544 // FIXME: Set encoding to pseudo!
1545 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1546 [(set FR32:$dst, fp32imm0)]>,
1547 Requires<[HasSSE1]>, TB, OpSize;
1548 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1549 [(set FR64:$dst, fpimm0)]>,
1550 Requires<[HasSSE2]>, TB, OpSize;
1551 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1552 [(set FR32:$dst, fp32imm0)]>,
1553 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1554 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1555 [(set FR64:$dst, fpimm0)]>,
1556 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1559 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1560 // bits are disregarded.
1561 let neverHasSideEffects = 1 in {
1562 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1563 "movaps\t{$src, $dst|$dst, $src}", []>;
1564 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1565 "movapd\t{$src, $dst|$dst, $src}", []>;
1568 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1569 // bits are disregarded.
1570 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1571 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1572 "movaps\t{$src, $dst|$dst, $src}",
1573 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1574 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1575 "movapd\t{$src, $dst|$dst, $src}",
1576 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1579 //===----------------------------------------------------------------------===//
1580 // SSE 1 & 2 - Logical Instructions
1581 //===----------------------------------------------------------------------===//
1583 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1585 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1587 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1588 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, TB, VEX_4V;
1590 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1591 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, TB, OpSize, VEX_4V;
1593 let Constraints = "$src1 = $dst" in {
1594 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1595 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1597 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1598 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1602 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1603 let mayLoad = 0 in {
1604 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1605 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1606 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1609 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1610 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1612 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1614 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1616 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
1617 // are all promoted to v2i64, and the patterns are covered by the int
1618 // version. This is needed in SSE only, because v2i64 isn't supported on
1619 // SSE1, but only on SSE2.
1620 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1621 !strconcat(OpcodeStr, "ps"), f128mem, [],
1622 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1623 (memopv2i64 addr:$src2)))], 0>, TB, VEX_4V;
1625 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1626 !strconcat(OpcodeStr, "pd"), f128mem,
1627 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1628 (bc_v2i64 (v2f64 VR128:$src2))))],
1629 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1630 (memopv2i64 addr:$src2)))], 0>,
1632 let Constraints = "$src1 = $dst" in {
1633 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1634 !strconcat(OpcodeStr, "ps"), f128mem,
1635 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
1636 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1637 (memopv2i64 addr:$src2)))]>, TB;
1639 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1640 !strconcat(OpcodeStr, "pd"), f128mem,
1641 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1642 (bc_v2i64 (v2f64 VR128:$src2))))],
1643 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1644 (memopv2i64 addr:$src2)))]>, TB, OpSize;
1648 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1650 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
1652 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1653 !strconcat(OpcodeStr, "ps"), f256mem,
1654 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
1655 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
1656 (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V;
1658 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1659 !strconcat(OpcodeStr, "pd"), f256mem,
1660 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1661 (bc_v4i64 (v4f64 VR256:$src2))))],
1662 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1663 (memopv4i64 addr:$src2)))], 0>,
1667 // AVX 256-bit packed logical ops forms
1668 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
1669 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
1670 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
1671 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
1673 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1674 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1675 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1676 let isCommutable = 0 in
1677 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
1679 //===----------------------------------------------------------------------===//
1680 // SSE 1 & 2 - Arithmetic Instructions
1681 //===----------------------------------------------------------------------===//
1683 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1686 /// In addition, we also have a special variant of the scalar form here to
1687 /// represent the associated intrinsic operation. This form is unlike the
1688 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1689 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1691 /// These three forms can each be reg+reg or reg+mem.
1694 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1696 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1698 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1699 OpNode, FR32, f32mem, Is2Addr>, XS;
1700 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1701 OpNode, FR64, f64mem, Is2Addr>, XD;
1704 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1706 let mayLoad = 0 in {
1707 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1708 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1709 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1710 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1714 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1716 let mayLoad = 0 in {
1717 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1718 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1719 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1720 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1724 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1726 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1727 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1728 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1729 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1732 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1734 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1735 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1736 SSEPackedSingle, Is2Addr>, TB;
1738 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1739 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1740 SSEPackedDouble, Is2Addr>, TB, OpSize;
1743 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1744 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1745 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1746 SSEPackedSingle, 0>, TB;
1748 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1749 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1750 SSEPackedDouble, 0>, TB, OpSize;
1753 // Binary Arithmetic instructions
1754 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1755 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1756 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1757 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1758 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1759 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1760 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1761 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1763 let isCommutable = 0 in {
1764 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1765 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1766 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1767 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1768 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1769 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1770 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1771 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1772 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1773 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1774 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1775 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1776 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1777 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1778 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1779 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1780 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1781 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1782 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1783 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1786 let Constraints = "$src1 = $dst" in {
1787 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1788 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1789 basic_sse12_fp_binop_s_int<0x58, "add">;
1790 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1791 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1792 basic_sse12_fp_binop_s_int<0x59, "mul">;
1794 let isCommutable = 0 in {
1795 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1796 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1797 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1798 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1799 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1800 basic_sse12_fp_binop_s_int<0x5E, "div">;
1801 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1802 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1803 basic_sse12_fp_binop_s_int<0x5F, "max">,
1804 basic_sse12_fp_binop_p_int<0x5F, "max">;
1805 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1806 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1807 basic_sse12_fp_binop_s_int<0x5D, "min">,
1808 basic_sse12_fp_binop_p_int<0x5D, "min">;
1813 /// In addition, we also have a special variant of the scalar form here to
1814 /// represent the associated intrinsic operation. This form is unlike the
1815 /// plain scalar form, in that it takes an entire vector (instead of a
1816 /// scalar) and leaves the top elements undefined.
1818 /// And, we have a special variant form for a full-vector intrinsic form.
1820 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1821 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1822 SDNode OpNode, Intrinsic F32Int> {
1823 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1824 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1825 [(set FR32:$dst, (OpNode FR32:$src))]>;
1826 // For scalar unary operations, fold a load into the operation
1827 // only in OptForSize mode. It eliminates an instruction, but it also
1828 // eliminates a whole-register clobber (the load), so it introduces a
1829 // partial register update condition.
1830 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1831 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1832 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1833 Requires<[HasSSE1, OptForSize]>;
1834 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1835 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1836 [(set VR128:$dst, (F32Int VR128:$src))]>;
1837 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1838 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1839 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1842 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1843 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
1844 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1845 !strconcat(OpcodeStr,
1846 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1847 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1,f32mem:$src2),
1848 !strconcat(OpcodeStr,
1849 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1850 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
1851 (ins ssmem:$src1, VR128:$src2),
1852 !strconcat(OpcodeStr,
1853 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1856 /// sse1_fp_unop_p - SSE1 unops in packed form.
1857 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1858 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1859 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1860 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1861 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1862 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1863 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1866 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1867 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1868 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1869 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1870 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1871 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1872 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1873 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1876 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1877 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1878 Intrinsic V4F32Int> {
1879 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1880 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1881 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1882 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1883 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1884 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1887 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1888 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1889 Intrinsic V4F32Int> {
1890 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1891 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1892 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1893 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1894 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1895 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1898 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1899 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1900 SDNode OpNode, Intrinsic F64Int> {
1901 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1902 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1903 [(set FR64:$dst, (OpNode FR64:$src))]>;
1904 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1905 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1906 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1907 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1908 Requires<[HasSSE2, OptForSize]>;
1909 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1910 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1911 [(set VR128:$dst, (F64Int VR128:$src))]>;
1912 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1913 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1914 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1917 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1918 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
1919 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1920 !strconcat(OpcodeStr,
1921 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1922 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
1923 !strconcat(OpcodeStr,
1924 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1925 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
1926 (ins VR128:$src1, sdmem:$src2),
1927 !strconcat(OpcodeStr,
1928 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1931 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1932 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1934 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1935 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1936 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1937 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1938 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1939 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1942 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1943 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1944 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1945 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1946 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1947 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1948 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1949 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1952 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1953 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1954 Intrinsic V2F64Int> {
1955 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1956 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1957 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1958 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1959 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1960 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1963 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1964 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1965 Intrinsic V2F64Int> {
1966 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1967 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1968 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1969 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1970 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1971 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1974 let Predicates = [HasAVX] in {
1976 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt">,
1977 sse2_fp_unop_s_avx<0x51, "vsqrt">, VEX_4V;
1979 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1980 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1981 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1982 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1983 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1984 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1985 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1986 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1989 // Reciprocal approximations. Note that these typically require refinement
1990 // in order to obtain suitable precision.
1991 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt">, VEX_4V;
1992 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1993 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
1994 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
1995 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
1997 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp">, VEX_4V;
1998 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1999 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
2000 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
2001 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
2004 def : Pat<(f32 (fsqrt FR32:$src)),
2005 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2006 def : Pat<(f32 (fsqrt (load addr:$src))),
2007 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2008 Requires<[HasAVX, OptForSize]>;
2009 def : Pat<(f64 (fsqrt FR64:$src)),
2010 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
2011 def : Pat<(f64 (fsqrt (load addr:$src))),
2012 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
2013 Requires<[HasAVX, OptForSize]>;
2015 def : Pat<(f32 (X86frsqrt FR32:$src)),
2016 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2017 def : Pat<(f32 (X86frsqrt (load addr:$src))),
2018 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2019 Requires<[HasAVX, OptForSize]>;
2021 def : Pat<(f32 (X86frcp FR32:$src)),
2022 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2023 def : Pat<(f32 (X86frcp (load addr:$src))),
2024 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2025 Requires<[HasAVX, OptForSize]>;
2027 let Predicates = [HasAVX] in {
2028 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
2029 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2030 (VSQRTSSr (f32 (IMPLICIT_DEF)),
2031 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2033 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
2034 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2036 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
2037 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
2038 (VSQRTSDr (f64 (IMPLICIT_DEF)),
2039 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd)),
2041 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
2042 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
2044 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
2045 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2046 (VRSQRTSSr (f32 (IMPLICIT_DEF)),
2047 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2049 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
2050 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2052 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
2053 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2054 (VRCPSSr (f32 (IMPLICIT_DEF)),
2055 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2057 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
2058 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2062 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2063 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
2064 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
2065 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2066 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
2067 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
2069 // Reciprocal approximations. Note that these typically require refinement
2070 // in order to obtain suitable precision.
2071 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
2072 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
2073 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
2074 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2075 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
2076 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
2078 // There is no f64 version of the reciprocal approximation instructions.
2080 //===----------------------------------------------------------------------===//
2081 // SSE 1 & 2 - Non-temporal stores
2082 //===----------------------------------------------------------------------===//
2084 let AddedComplexity = 400 in { // Prefer non-temporal versions
2085 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2086 (ins f128mem:$dst, VR128:$src),
2087 "movntps\t{$src, $dst|$dst, $src}",
2088 [(alignednontemporalstore (v4f32 VR128:$src),
2090 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2091 (ins f128mem:$dst, VR128:$src),
2092 "movntpd\t{$src, $dst|$dst, $src}",
2093 [(alignednontemporalstore (v2f64 VR128:$src),
2095 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2096 (ins f128mem:$dst, VR128:$src),
2097 "movntdq\t{$src, $dst|$dst, $src}",
2098 [(alignednontemporalstore (v2f64 VR128:$src),
2101 let ExeDomain = SSEPackedInt in
2102 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2103 (ins f128mem:$dst, VR128:$src),
2104 "movntdq\t{$src, $dst|$dst, $src}",
2105 [(alignednontemporalstore (v4f32 VR128:$src),
2108 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2109 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
2111 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2112 (ins f256mem:$dst, VR256:$src),
2113 "movntps\t{$src, $dst|$dst, $src}",
2114 [(alignednontemporalstore (v8f32 VR256:$src),
2116 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2117 (ins f256mem:$dst, VR256:$src),
2118 "movntpd\t{$src, $dst|$dst, $src}",
2119 [(alignednontemporalstore (v4f64 VR256:$src),
2121 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2122 (ins f256mem:$dst, VR256:$src),
2123 "movntdq\t{$src, $dst|$dst, $src}",
2124 [(alignednontemporalstore (v4f64 VR256:$src),
2126 let ExeDomain = SSEPackedInt in
2127 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2128 (ins f256mem:$dst, VR256:$src),
2129 "movntdq\t{$src, $dst|$dst, $src}",
2130 [(alignednontemporalstore (v8f32 VR256:$src),
2134 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2135 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2136 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2137 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2138 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2139 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2141 let AddedComplexity = 400 in { // Prefer non-temporal versions
2142 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2143 "movntps\t{$src, $dst|$dst, $src}",
2144 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2145 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2146 "movntpd\t{$src, $dst|$dst, $src}",
2147 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2149 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2150 "movntdq\t{$src, $dst|$dst, $src}",
2151 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2153 let ExeDomain = SSEPackedInt in
2154 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2155 "movntdq\t{$src, $dst|$dst, $src}",
2156 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2158 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2159 (MOVNTDQmr addr:$dst, VR128:$src)>;
2161 // There is no AVX form for instructions below this point
2162 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2163 "movnti{l}\t{$src, $dst|$dst, $src}",
2164 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2165 TB, Requires<[HasSSE2]>;
2166 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2167 "movnti{q}\t{$src, $dst|$dst, $src}",
2168 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2169 TB, Requires<[HasSSE2]>;
2172 //===----------------------------------------------------------------------===//
2173 // SSE 1 & 2 - Misc Instructions (No AVX form)
2174 //===----------------------------------------------------------------------===//
2176 // Prefetch intrinsic.
2177 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2178 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>;
2179 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2180 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>;
2181 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2182 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>;
2183 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2184 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>;
2186 // Load, store, and memory fence
2187 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2188 TB, Requires<[HasSSE1]>;
2189 def : Pat<(X86SFence), (SFENCE)>;
2191 // Alias instructions that map zero vector to pxor / xorp* for sse.
2192 // We set canFoldAsLoad because this can be converted to a constant-pool
2193 // load of an all-zeros value if folding it would be beneficial.
2194 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2195 // JIT implementation, it does not expand the instructions below like
2196 // X86MCInstLower does.
2197 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2198 isCodeGenOnly = 1 in {
2199 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2200 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2201 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2202 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2203 let ExeDomain = SSEPackedInt in
2204 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2205 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2208 // The same as done above but for AVX. The 128-bit versions are the
2209 // same, but re-encoded. The 256-bit does not support PI version, and
2210 // doesn't need it because on sandy bridge the register is set to zero
2211 // at the rename stage without using any execution unit, so SET0PSY
2212 // and SET0PDY can be used for vector int instructions without penalty
2213 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2214 // JIT implementatioan, it does not expand the instructions below like
2215 // X86MCInstLower does.
2216 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2217 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2218 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2219 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2220 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2221 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2222 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2223 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2224 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2225 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2226 let ExeDomain = SSEPackedInt in
2227 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2228 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2231 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2232 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2233 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2235 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2236 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2238 // AVX has no support for 256-bit integer instructions, but since the 128-bit
2239 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
2240 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
2241 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
2242 (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
2244 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
2245 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
2246 (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
2248 //===----------------------------------------------------------------------===//
2249 // SSE 1 & 2 - Load/Store XCSR register
2250 //===----------------------------------------------------------------------===//
2252 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2253 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2254 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2255 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2257 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2258 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2259 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2260 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2262 //===---------------------------------------------------------------------===//
2263 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2264 //===---------------------------------------------------------------------===//
2266 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2268 let neverHasSideEffects = 1 in {
2269 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2270 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2271 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2272 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2274 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2275 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2276 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2277 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2279 let canFoldAsLoad = 1, mayLoad = 1 in {
2280 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2281 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2282 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2283 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2284 let Predicates = [HasAVX] in {
2285 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2286 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2287 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2288 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2292 let mayStore = 1 in {
2293 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2294 (ins i128mem:$dst, VR128:$src),
2295 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2296 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2297 (ins i256mem:$dst, VR256:$src),
2298 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2299 let Predicates = [HasAVX] in {
2300 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2301 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2302 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2303 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2307 let neverHasSideEffects = 1 in
2308 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2309 "movdqa\t{$src, $dst|$dst, $src}", []>;
2311 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2312 "movdqu\t{$src, $dst|$dst, $src}",
2313 []>, XS, Requires<[HasSSE2]>;
2315 let canFoldAsLoad = 1, mayLoad = 1 in {
2316 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2317 "movdqa\t{$src, $dst|$dst, $src}",
2318 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2319 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2320 "movdqu\t{$src, $dst|$dst, $src}",
2321 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2322 XS, Requires<[HasSSE2]>;
2325 let mayStore = 1 in {
2326 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2327 "movdqa\t{$src, $dst|$dst, $src}",
2328 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2329 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2330 "movdqu\t{$src, $dst|$dst, $src}",
2331 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2332 XS, Requires<[HasSSE2]>;
2335 // Intrinsic forms of MOVDQU load and store
2336 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2337 "vmovdqu\t{$src, $dst|$dst, $src}",
2338 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2339 XS, VEX, Requires<[HasAVX]>;
2341 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2342 "movdqu\t{$src, $dst|$dst, $src}",
2343 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2344 XS, Requires<[HasSSE2]>;
2346 } // ExeDomain = SSEPackedInt
2348 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2349 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2350 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2352 //===---------------------------------------------------------------------===//
2353 // SSE2 - Packed Integer Arithmetic Instructions
2354 //===---------------------------------------------------------------------===//
2356 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2358 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2359 bit IsCommutable = 0, bit Is2Addr = 1> {
2360 let isCommutable = IsCommutable in
2361 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2362 (ins VR128:$src1, VR128:$src2),
2364 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2365 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2366 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2367 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2368 (ins VR128:$src1, i128mem:$src2),
2370 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2371 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2372 [(set VR128:$dst, (IntId VR128:$src1,
2373 (bitconvert (memopv2i64 addr:$src2))))]>;
2376 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2377 string OpcodeStr, Intrinsic IntId,
2378 Intrinsic IntId2, bit Is2Addr = 1> {
2379 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2380 (ins VR128:$src1, VR128:$src2),
2382 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2383 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2384 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2385 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2386 (ins VR128:$src1, i128mem:$src2),
2388 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2389 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2390 [(set VR128:$dst, (IntId VR128:$src1,
2391 (bitconvert (memopv2i64 addr:$src2))))]>;
2392 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2393 (ins VR128:$src1, i32i8imm:$src2),
2395 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2396 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2397 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2400 /// PDI_binop_rm - Simple SSE2 binary operator.
2401 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2402 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2403 let isCommutable = IsCommutable in
2404 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2405 (ins VR128:$src1, VR128:$src2),
2407 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2408 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2409 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2410 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2411 (ins VR128:$src1, i128mem:$src2),
2413 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2414 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2415 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2416 (bitconvert (memopv2i64 addr:$src2)))))]>;
2419 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2421 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2422 /// to collapse (bitconvert VT to VT) into its operand.
2424 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2425 bit IsCommutable = 0, bit Is2Addr = 1> {
2426 let isCommutable = IsCommutable in
2427 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2428 (ins VR128:$src1, VR128:$src2),
2430 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2431 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2432 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2433 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2434 (ins VR128:$src1, i128mem:$src2),
2436 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2437 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2438 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2441 } // ExeDomain = SSEPackedInt
2443 // 128-bit Integer Arithmetic
2445 let Predicates = [HasAVX] in {
2446 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2447 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2448 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2449 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2450 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2451 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2452 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2453 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2454 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2457 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2459 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2461 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2463 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2465 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2467 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2469 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2471 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2473 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2475 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2477 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2479 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2481 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2483 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2485 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2487 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2489 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2491 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2493 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2497 let Constraints = "$src1 = $dst" in {
2498 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2499 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2500 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2501 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2502 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2503 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2504 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2505 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2506 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2509 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2510 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2511 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2512 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2513 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2514 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2515 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2516 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2517 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2518 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2519 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2520 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2521 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2522 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2523 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2524 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2525 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2526 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2527 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2529 } // Constraints = "$src1 = $dst"
2531 //===---------------------------------------------------------------------===//
2532 // SSE2 - Packed Integer Logical Instructions
2533 //===---------------------------------------------------------------------===//
2535 let Predicates = [HasAVX] in {
2536 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2537 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2539 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2540 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2542 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2543 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2546 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2547 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2549 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2550 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2552 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2553 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2556 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2557 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2559 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2560 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2563 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2564 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2565 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2567 let ExeDomain = SSEPackedInt in {
2568 let neverHasSideEffects = 1 in {
2569 // 128-bit logical shifts.
2570 def VPSLLDQri : PDIi8<0x73, MRM7r,
2571 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2572 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2574 def VPSRLDQri : PDIi8<0x73, MRM3r,
2575 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2576 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2578 // PSRADQri doesn't exist in SSE[1-3].
2580 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2581 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2582 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2584 (v2i64 (X86andnp VR128:$src1, VR128:$src2)))]>,VEX_4V;
2586 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2587 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2588 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2589 [(set VR128:$dst, (X86andnp VR128:$src1,
2590 (memopv2i64 addr:$src2)))]>, VEX_4V;
2594 let Constraints = "$src1 = $dst" in {
2595 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2596 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2597 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2598 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2599 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2600 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2602 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2603 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2604 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2605 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2606 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2607 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2609 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2610 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2611 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2612 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2614 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2615 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2616 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2618 let ExeDomain = SSEPackedInt in {
2619 let neverHasSideEffects = 1 in {
2620 // 128-bit logical shifts.
2621 def PSLLDQri : PDIi8<0x73, MRM7r,
2622 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2623 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2624 def PSRLDQri : PDIi8<0x73, MRM3r,
2625 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2626 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2627 // PSRADQri doesn't exist in SSE[1-3].
2629 def PANDNrr : PDI<0xDF, MRMSrcReg,
2630 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2631 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2633 def PANDNrm : PDI<0xDF, MRMSrcMem,
2634 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2635 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2637 } // Constraints = "$src1 = $dst"
2639 let Predicates = [HasAVX] in {
2640 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2641 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2642 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2643 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2644 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2645 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2646 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2647 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2648 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2649 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2651 // Shift up / down and insert zero's.
2652 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2653 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2654 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2655 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2658 let Predicates = [HasSSE2] in {
2659 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2660 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2661 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2662 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2663 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2664 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2665 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2666 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2667 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2668 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2670 // Shift up / down and insert zero's.
2671 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2672 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2673 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2674 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2677 //===---------------------------------------------------------------------===//
2678 // SSE2 - Packed Integer Comparison Instructions
2679 //===---------------------------------------------------------------------===//
2681 let Predicates = [HasAVX] in {
2682 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2684 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2686 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2688 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2690 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2692 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2695 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2696 (VPCMPEQBrr VR128:$src1, VR128:$src2)>;
2697 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2698 (VPCMPEQBrm VR128:$src1, addr:$src2)>;
2699 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2700 (VPCMPEQWrr VR128:$src1, VR128:$src2)>;
2701 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2702 (VPCMPEQWrm VR128:$src1, addr:$src2)>;
2703 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2704 (VPCMPEQDrr VR128:$src1, VR128:$src2)>;
2705 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2706 (VPCMPEQDrm VR128:$src1, addr:$src2)>;
2708 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2709 (VPCMPGTBrr VR128:$src1, VR128:$src2)>;
2710 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2711 (VPCMPGTBrm VR128:$src1, addr:$src2)>;
2712 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2713 (VPCMPGTWrr VR128:$src1, VR128:$src2)>;
2714 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2715 (VPCMPGTWrm VR128:$src1, addr:$src2)>;
2716 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2717 (VPCMPGTDrr VR128:$src1, VR128:$src2)>;
2718 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2719 (VPCMPGTDrm VR128:$src1, addr:$src2)>;
2722 let Constraints = "$src1 = $dst" in {
2723 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2724 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2725 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2726 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2727 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2728 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2729 } // Constraints = "$src1 = $dst"
2731 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2732 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2733 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2734 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2735 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2736 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2737 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2738 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2739 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2740 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2741 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2742 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2744 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2745 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2746 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2747 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2748 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2749 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2750 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2751 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2752 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2753 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2754 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2755 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2757 //===---------------------------------------------------------------------===//
2758 // SSE2 - Packed Integer Pack Instructions
2759 //===---------------------------------------------------------------------===//
2761 let Predicates = [HasAVX] in {
2762 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2764 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2766 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2770 let Constraints = "$src1 = $dst" in {
2771 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2772 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2773 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2774 } // Constraints = "$src1 = $dst"
2776 //===---------------------------------------------------------------------===//
2777 // SSE2 - Packed Integer Shuffle Instructions
2778 //===---------------------------------------------------------------------===//
2780 let ExeDomain = SSEPackedInt in {
2781 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2783 def ri : Ii8<0x70, MRMSrcReg,
2784 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2785 !strconcat(OpcodeStr,
2786 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2787 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2789 def mi : Ii8<0x70, MRMSrcMem,
2790 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2791 !strconcat(OpcodeStr,
2792 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2793 [(set VR128:$dst, (vt (pshuf_frag:$src2
2794 (bc_frag (memopv2i64 addr:$src1)),
2797 } // ExeDomain = SSEPackedInt
2799 let Predicates = [HasAVX] in {
2800 let AddedComplexity = 5 in
2801 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2804 // SSE2 with ImmT == Imm8 and XS prefix.
2805 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2808 // SSE2 with ImmT == Imm8 and XD prefix.
2809 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2813 let Predicates = [HasSSE2] in {
2814 let AddedComplexity = 5 in
2815 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2817 // SSE2 with ImmT == Imm8 and XS prefix.
2818 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2820 // SSE2 with ImmT == Imm8 and XD prefix.
2821 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2824 //===---------------------------------------------------------------------===//
2825 // SSE2 - Packed Integer Unpack Instructions
2826 //===---------------------------------------------------------------------===//
2828 let ExeDomain = SSEPackedInt in {
2829 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2830 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
2831 def rr : PDI<opc, MRMSrcReg,
2832 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2834 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2835 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2836 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))]>;
2837 def rm : PDI<opc, MRMSrcMem,
2838 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2840 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2841 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2842 [(set VR128:$dst, (OpNode VR128:$src1,
2843 (bc_frag (memopv2i64
2847 let Predicates = [HasAVX] in {
2848 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Punpcklbw,
2849 bc_v16i8, 0>, VEX_4V;
2850 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Punpcklwd,
2851 bc_v8i16, 0>, VEX_4V;
2852 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Punpckldq,
2853 bc_v4i32, 0>, VEX_4V;
2855 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2856 /// knew to collapse (bitconvert VT to VT) into its operand.
2857 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2858 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2859 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2860 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
2861 VR128:$src2)))]>, VEX_4V;
2862 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2863 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2864 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2865 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
2866 (memopv2i64 addr:$src2))))]>, VEX_4V;
2868 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Punpckhbw,
2869 bc_v16i8, 0>, VEX_4V;
2870 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Punpckhwd,
2871 bc_v8i16, 0>, VEX_4V;
2872 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Punpckhdq,
2873 bc_v4i32, 0>, VEX_4V;
2875 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2876 /// knew to collapse (bitconvert VT to VT) into its operand.
2877 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2878 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2879 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2880 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
2881 VR128:$src2)))]>, VEX_4V;
2882 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2883 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2884 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2885 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
2886 (memopv2i64 addr:$src2))))]>, VEX_4V;
2889 let Constraints = "$src1 = $dst" in {
2890 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Punpcklbw, bc_v16i8>;
2891 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Punpcklwd, bc_v8i16>;
2892 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Punpckldq, bc_v4i32>;
2894 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2895 /// knew to collapse (bitconvert VT to VT) into its operand.
2896 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2897 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2898 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2900 (v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)))]>;
2901 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2902 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2903 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2905 (v2i64 (X86Punpcklqdq VR128:$src1,
2906 (memopv2i64 addr:$src2))))]>;
2908 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Punpckhbw, bc_v16i8>;
2909 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Punpckhwd, bc_v8i16>;
2910 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Punpckhdq, bc_v4i32>;
2912 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2913 /// knew to collapse (bitconvert VT to VT) into its operand.
2914 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2915 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2916 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2918 (v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)))]>;
2919 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2920 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2921 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2923 (v2i64 (X86Punpckhqdq VR128:$src1,
2924 (memopv2i64 addr:$src2))))]>;
2927 } // ExeDomain = SSEPackedInt
2929 //===---------------------------------------------------------------------===//
2930 // SSE2 - Packed Integer Extract and Insert
2931 //===---------------------------------------------------------------------===//
2933 let ExeDomain = SSEPackedInt in {
2934 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2935 def rri : Ii8<0xC4, MRMSrcReg,
2936 (outs VR128:$dst), (ins VR128:$src1,
2937 GR32:$src2, i32i8imm:$src3),
2939 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2940 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2942 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2943 def rmi : Ii8<0xC4, MRMSrcMem,
2944 (outs VR128:$dst), (ins VR128:$src1,
2945 i16mem:$src2, i32i8imm:$src3),
2947 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2948 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2950 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2955 let Predicates = [HasAVX] in
2956 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2957 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2958 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2959 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2960 imm:$src2))]>, OpSize, VEX;
2961 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2962 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2963 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2964 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2968 let Predicates = [HasAVX] in {
2969 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2970 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2971 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2972 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2973 []>, OpSize, VEX_4V;
2976 let Constraints = "$src1 = $dst" in
2977 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2979 } // ExeDomain = SSEPackedInt
2981 //===---------------------------------------------------------------------===//
2982 // SSE2 - Packed Mask Creation
2983 //===---------------------------------------------------------------------===//
2985 let ExeDomain = SSEPackedInt in {
2987 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2988 "pmovmskb\t{$src, $dst|$dst, $src}",
2989 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2990 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2991 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2992 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2993 "pmovmskb\t{$src, $dst|$dst, $src}",
2994 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2996 } // ExeDomain = SSEPackedInt
2998 //===---------------------------------------------------------------------===//
2999 // SSE2 - Conditional Store
3000 //===---------------------------------------------------------------------===//
3002 let ExeDomain = SSEPackedInt in {
3005 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
3006 (ins VR128:$src, VR128:$mask),
3007 "maskmovdqu\t{$mask, $src|$src, $mask}",
3008 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
3010 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
3011 (ins VR128:$src, VR128:$mask),
3012 "maskmovdqu\t{$mask, $src|$src, $mask}",
3013 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
3016 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3017 "maskmovdqu\t{$mask, $src|$src, $mask}",
3018 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
3020 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3021 "maskmovdqu\t{$mask, $src|$src, $mask}",
3022 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
3024 } // ExeDomain = SSEPackedInt
3026 //===---------------------------------------------------------------------===//
3027 // SSE2 - Move Doubleword
3028 //===---------------------------------------------------------------------===//
3030 //===---------------------------------------------------------------------===//
3031 // Move Int Doubleword to Packed Double Int
3033 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3034 "movd\t{$src, $dst|$dst, $src}",
3036 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
3037 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3038 "movd\t{$src, $dst|$dst, $src}",
3040 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
3042 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3043 "mov{d|q}\t{$src, $dst|$dst, $src}",
3045 (v2i64 (scalar_to_vector GR64:$src)))]>, VEX;
3046 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3047 "mov{d|q}\t{$src, $dst|$dst, $src}",
3048 [(set FR64:$dst, (bitconvert GR64:$src))]>, VEX;
3050 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3051 "movd\t{$src, $dst|$dst, $src}",
3053 (v4i32 (scalar_to_vector GR32:$src)))]>;
3054 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3055 "movd\t{$src, $dst|$dst, $src}",
3057 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
3058 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3059 "mov{d|q}\t{$src, $dst|$dst, $src}",
3061 (v2i64 (scalar_to_vector GR64:$src)))]>;
3062 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3063 "mov{d|q}\t{$src, $dst|$dst, $src}",
3064 [(set FR64:$dst, (bitconvert GR64:$src))]>;
3066 //===---------------------------------------------------------------------===//
3067 // Move Int Doubleword to Single Scalar
3069 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3070 "movd\t{$src, $dst|$dst, $src}",
3071 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
3073 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3074 "movd\t{$src, $dst|$dst, $src}",
3075 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
3077 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3078 "movd\t{$src, $dst|$dst, $src}",
3079 [(set FR32:$dst, (bitconvert GR32:$src))]>;
3081 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3082 "movd\t{$src, $dst|$dst, $src}",
3083 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
3085 //===---------------------------------------------------------------------===//
3086 // Move Packed Doubleword Int to Packed Double Int
3088 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3089 "movd\t{$src, $dst|$dst, $src}",
3090 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3092 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
3093 (ins i32mem:$dst, VR128:$src),
3094 "movd\t{$src, $dst|$dst, $src}",
3095 [(store (i32 (vector_extract (v4i32 VR128:$src),
3096 (iPTR 0))), addr:$dst)]>, VEX;
3097 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3098 "movd\t{$src, $dst|$dst, $src}",
3099 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3101 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
3102 "movd\t{$src, $dst|$dst, $src}",
3103 [(store (i32 (vector_extract (v4i32 VR128:$src),
3104 (iPTR 0))), addr:$dst)]>;
3106 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3107 "mov{d|q}\t{$src, $dst|$dst, $src}",
3108 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
3110 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
3111 "movq\t{$src, $dst|$dst, $src}",
3112 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
3114 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
3115 "mov{d|q}\t{$src, $dst|$dst, $src}",
3116 [(set GR64:$dst, (bitconvert FR64:$src))]>;
3117 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
3118 "movq\t{$src, $dst|$dst, $src}",
3119 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
3121 //===---------------------------------------------------------------------===//
3122 // Move Scalar Single to Double Int
3124 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3125 "movd\t{$src, $dst|$dst, $src}",
3126 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
3127 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3128 "movd\t{$src, $dst|$dst, $src}",
3129 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3130 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3131 "movd\t{$src, $dst|$dst, $src}",
3132 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3133 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3134 "movd\t{$src, $dst|$dst, $src}",
3135 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3137 //===---------------------------------------------------------------------===//
3138 // Patterns and instructions to describe movd/movq to XMM register zero-extends
3140 let AddedComplexity = 15 in {
3141 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3142 "movd\t{$src, $dst|$dst, $src}",
3143 [(set VR128:$dst, (v4i32 (X86vzmovl
3144 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3146 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3147 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3148 [(set VR128:$dst, (v2i64 (X86vzmovl
3149 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3152 let AddedComplexity = 15 in {
3153 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3154 "movd\t{$src, $dst|$dst, $src}",
3155 [(set VR128:$dst, (v4i32 (X86vzmovl
3156 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3157 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3158 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3159 [(set VR128:$dst, (v2i64 (X86vzmovl
3160 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3163 let AddedComplexity = 20 in {
3164 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3165 "movd\t{$src, $dst|$dst, $src}",
3167 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3168 (loadi32 addr:$src))))))]>,
3170 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3171 "movd\t{$src, $dst|$dst, $src}",
3173 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3174 (loadi32 addr:$src))))))]>;
3176 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3177 (MOVZDI2PDIrm addr:$src)>;
3178 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3179 (MOVZDI2PDIrm addr:$src)>;
3180 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3181 (MOVZDI2PDIrm addr:$src)>;
3184 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
3185 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
3186 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
3187 (v4i32 (scalar_to_vector GR32:$src)), (i32 0)))),
3188 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
3189 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
3190 (v2i64 (scalar_to_vector GR64:$src)), (i32 0)))),
3191 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
3193 // These are the correct encodings of the instructions so that we know how to
3194 // read correct assembly, even though we continue to emit the wrong ones for
3195 // compatibility with Darwin's buggy assembler.
3196 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3197 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
3198 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3199 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
3200 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3201 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
3202 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3203 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
3204 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3205 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3206 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3207 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3209 //===---------------------------------------------------------------------===//
3210 // SSE2 - Move Quadword
3211 //===---------------------------------------------------------------------===//
3213 //===---------------------------------------------------------------------===//
3214 // Move Quadword Int to Packed Quadword Int
3216 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3217 "vmovq\t{$src, $dst|$dst, $src}",
3219 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3220 VEX, Requires<[HasAVX]>;
3221 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3222 "movq\t{$src, $dst|$dst, $src}",
3224 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3225 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3227 //===---------------------------------------------------------------------===//
3228 // Move Packed Quadword Int to Quadword Int
3230 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3231 "movq\t{$src, $dst|$dst, $src}",
3232 [(store (i64 (vector_extract (v2i64 VR128:$src),
3233 (iPTR 0))), addr:$dst)]>, VEX;
3234 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3235 "movq\t{$src, $dst|$dst, $src}",
3236 [(store (i64 (vector_extract (v2i64 VR128:$src),
3237 (iPTR 0))), addr:$dst)]>;
3239 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3240 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3242 //===---------------------------------------------------------------------===//
3243 // Store / copy lower 64-bits of a XMM register.
3245 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3246 "movq\t{$src, $dst|$dst, $src}",
3247 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3248 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3249 "movq\t{$src, $dst|$dst, $src}",
3250 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3252 let AddedComplexity = 20 in
3253 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3254 "vmovq\t{$src, $dst|$dst, $src}",
3256 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3257 (loadi64 addr:$src))))))]>,
3258 XS, VEX, Requires<[HasAVX]>;
3260 let AddedComplexity = 20 in {
3261 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3262 "movq\t{$src, $dst|$dst, $src}",
3264 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3265 (loadi64 addr:$src))))))]>,
3266 XS, Requires<[HasSSE2]>;
3268 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3269 (MOVZQI2PQIrm addr:$src)>;
3270 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3271 (MOVZQI2PQIrm addr:$src)>;
3272 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3275 //===---------------------------------------------------------------------===//
3276 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3277 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3279 let AddedComplexity = 15 in
3280 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3281 "vmovq\t{$src, $dst|$dst, $src}",
3282 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3283 XS, VEX, Requires<[HasAVX]>;
3284 let AddedComplexity = 15 in
3285 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3286 "movq\t{$src, $dst|$dst, $src}",
3287 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3288 XS, Requires<[HasSSE2]>;
3290 let AddedComplexity = 20 in
3291 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3292 "vmovq\t{$src, $dst|$dst, $src}",
3293 [(set VR128:$dst, (v2i64 (X86vzmovl
3294 (loadv2i64 addr:$src))))]>,
3295 XS, VEX, Requires<[HasAVX]>;
3296 let AddedComplexity = 20 in {
3297 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3298 "movq\t{$src, $dst|$dst, $src}",
3299 [(set VR128:$dst, (v2i64 (X86vzmovl
3300 (loadv2i64 addr:$src))))]>,
3301 XS, Requires<[HasSSE2]>;
3303 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3304 (MOVZPQILo2PQIrm addr:$src)>;
3307 // Instructions to match in the assembler
3308 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3309 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3310 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3311 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3312 // Recognize "movd" with GR64 destination, but encode as a "movq"
3313 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3314 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3316 // Instructions for the disassembler
3317 // xr = XMM register
3320 let Predicates = [HasAVX] in
3321 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3322 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3323 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3324 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3326 //===---------------------------------------------------------------------===//
3327 // SSE2 - Misc Instructions
3328 //===---------------------------------------------------------------------===//
3331 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3332 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3333 TB, Requires<[HasSSE2]>;
3335 // Load, store, and memory fence
3336 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3337 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3338 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3339 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3340 def : Pat<(X86LFence), (LFENCE)>;
3341 def : Pat<(X86MFence), (MFENCE)>;
3344 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3345 // was introduced with SSE2, it's backward compatible.
3346 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3348 // Alias instructions that map zero vector to pxor / xorp* for sse.
3349 // We set canFoldAsLoad because this can be converted to a constant-pool
3350 // load of an all-ones value if folding it would be beneficial.
3351 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
3352 // JIT implementation, it does not expand the instructions below like
3353 // X86MCInstLower does.
3354 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3355 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3356 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3357 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3358 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3359 isCodeGenOnly = 1, ExeDomain = SSEPackedInt, Predicates = [HasAVX] in
3360 def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3361 [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
3363 //===---------------------------------------------------------------------===//
3364 // SSE3 - Conversion Instructions
3365 //===---------------------------------------------------------------------===//
3367 // Convert Packed Double FP to Packed DW Integers
3368 let Predicates = [HasAVX] in {
3369 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3370 // register, but the same isn't true when using memory operands instead.
3371 // Provide other assembly rr and rm forms to address this explicitly.
3372 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3373 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3374 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3375 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3378 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3379 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3380 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3381 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3384 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3385 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3386 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3387 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3390 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3391 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3392 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3393 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3395 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
3396 (VCVTPD2DQYrr VR256:$src)>;
3397 def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
3398 (VCVTPD2DQYrm addr:$src)>;
3400 // Convert Packed DW Integers to Packed Double FP
3401 let Predicates = [HasAVX] in {
3402 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3403 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3404 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3405 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3406 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3407 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3408 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3409 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3412 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3413 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3414 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3415 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3417 // AVX 256-bit register conversion intrinsics
3418 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3419 (VCVTDQ2PDYrr VR128:$src)>;
3420 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3421 (VCVTDQ2PDYrm addr:$src)>;
3423 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3424 (VCVTPD2DQYrr VR256:$src)>;
3425 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3426 (VCVTPD2DQYrm addr:$src)>;
3428 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
3429 (VCVTDQ2PDYrr VR128:$src)>;
3430 def : Pat<(v4f64 (sint_to_fp (memopv4i32 addr:$src))),
3431 (VCVTDQ2PDYrm addr:$src)>;
3433 //===---------------------------------------------------------------------===//
3434 // SSE3 - Move Instructions
3435 //===---------------------------------------------------------------------===//
3437 //===---------------------------------------------------------------------===//
3438 // Replicate Single FP - MOVSHDUP and MOVSLDUP
3440 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
3441 ValueType vt, RegisterClass RC, PatFrag mem_frag,
3442 X86MemOperand x86memop> {
3443 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3444 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3445 [(set RC:$dst, (vt (OpNode RC:$src)))]>;
3446 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3447 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3448 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>;
3451 let Predicates = [HasAVX] in {
3452 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3453 v4f32, VR128, memopv4f32, f128mem>, VEX;
3454 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3455 v4f32, VR128, memopv4f32, f128mem>, VEX;
3456 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3457 v8f32, VR256, memopv8f32, f256mem>, VEX;
3458 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3459 v8f32, VR256, memopv8f32, f256mem>, VEX;
3461 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
3462 memopv4f32, f128mem>;
3463 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
3464 memopv4f32, f128mem>;
3466 let Predicates = [HasSSE3] in {
3467 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3468 (MOVSHDUPrr VR128:$src)>;
3469 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3470 (MOVSHDUPrm addr:$src)>;
3471 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3472 (MOVSLDUPrr VR128:$src)>;
3473 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3474 (MOVSLDUPrm addr:$src)>;
3477 let Predicates = [HasAVX] in {
3478 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3479 (VMOVSHDUPrr VR128:$src)>;
3480 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3481 (VMOVSHDUPrm addr:$src)>;
3482 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3483 (VMOVSLDUPrr VR128:$src)>;
3484 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3485 (VMOVSLDUPrm addr:$src)>;
3486 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
3487 (VMOVSHDUPYrr VR256:$src)>;
3488 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
3489 (VMOVSHDUPYrm addr:$src)>;
3490 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
3491 (VMOVSLDUPYrr VR256:$src)>;
3492 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
3493 (VMOVSLDUPYrm addr:$src)>;
3496 //===---------------------------------------------------------------------===//
3497 // Replicate Double FP - MOVDDUP
3499 multiclass sse3_replicate_dfp<string OpcodeStr> {
3500 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3501 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3502 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3503 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3504 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3506 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3510 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3511 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3512 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3514 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3515 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3519 let Predicates = [HasAVX] in {
3520 // FIXME: Merge above classes when we have patterns for the ymm version
3521 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3522 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3524 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3526 // Move Unaligned Integer
3527 let Predicates = [HasAVX] in {
3528 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3529 "vlddqu\t{$src, $dst|$dst, $src}",
3530 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3531 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3532 "vlddqu\t{$src, $dst|$dst, $src}",
3533 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3535 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3536 "lddqu\t{$src, $dst|$dst, $src}",
3537 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3539 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3541 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3543 // Several Move patterns
3544 let AddedComplexity = 5 in {
3545 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3546 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3547 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3548 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3549 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3550 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3551 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3552 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3555 //===---------------------------------------------------------------------===//
3556 // SSE3 - Arithmetic
3557 //===---------------------------------------------------------------------===//
3559 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3560 X86MemOperand x86memop, bit Is2Addr = 1> {
3561 def rr : I<0xD0, MRMSrcReg,
3562 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3564 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3565 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3566 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3567 def rm : I<0xD0, MRMSrcMem,
3568 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3570 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3571 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3572 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3575 let Predicates = [HasAVX],
3576 ExeDomain = SSEPackedDouble in {
3577 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3578 f128mem, 0>, TB, XD, VEX_4V;
3579 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3580 f128mem, 0>, TB, OpSize, VEX_4V;
3581 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3582 f256mem, 0>, TB, XD, VEX_4V;
3583 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3584 f256mem, 0>, TB, OpSize, VEX_4V;
3586 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3587 ExeDomain = SSEPackedDouble in {
3588 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3590 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3591 f128mem>, TB, OpSize;
3594 //===---------------------------------------------------------------------===//
3595 // SSE3 Instructions
3596 //===---------------------------------------------------------------------===//
3599 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3600 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3601 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3603 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3604 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3605 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3607 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3609 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3610 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3611 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3613 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3614 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3615 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3617 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3618 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3619 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3621 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3623 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3624 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3625 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3628 let Predicates = [HasAVX] in {
3629 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3630 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3631 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3632 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3633 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3634 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3635 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3636 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3637 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3638 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3639 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3640 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3641 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3642 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3643 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3644 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3647 let Constraints = "$src1 = $dst" in {
3648 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3649 int_x86_sse3_hadd_ps>;
3650 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3651 int_x86_sse3_hadd_pd>;
3652 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3653 int_x86_sse3_hsub_ps>;
3654 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3655 int_x86_sse3_hsub_pd>;
3658 //===---------------------------------------------------------------------===//
3659 // SSSE3 - Packed Absolute Instructions
3660 //===---------------------------------------------------------------------===//
3663 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3664 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3665 PatFrag mem_frag128, Intrinsic IntId128> {
3666 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3668 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3669 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3672 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3674 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3677 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3680 let Predicates = [HasAVX] in {
3681 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3682 int_x86_ssse3_pabs_b_128>, VEX;
3683 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3684 int_x86_ssse3_pabs_w_128>, VEX;
3685 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3686 int_x86_ssse3_pabs_d_128>, VEX;
3689 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3690 int_x86_ssse3_pabs_b_128>;
3691 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3692 int_x86_ssse3_pabs_w_128>;
3693 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3694 int_x86_ssse3_pabs_d_128>;
3696 //===---------------------------------------------------------------------===//
3697 // SSSE3 - Packed Binary Operator Instructions
3698 //===---------------------------------------------------------------------===//
3700 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3701 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3702 PatFrag mem_frag128, Intrinsic IntId128,
3704 let isCommutable = 1 in
3705 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3706 (ins VR128:$src1, VR128:$src2),
3708 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3709 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3710 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3712 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3713 (ins VR128:$src1, i128mem:$src2),
3715 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3716 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3718 (IntId128 VR128:$src1,
3719 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3722 let Predicates = [HasAVX] in {
3723 let isCommutable = 0 in {
3724 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3725 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3726 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3727 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3728 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3729 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3730 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3731 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3732 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3733 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3734 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3735 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3736 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3737 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3738 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3739 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3740 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3741 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3742 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3743 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3744 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3745 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3747 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3748 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3751 // None of these have i8 immediate fields.
3752 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3753 let isCommutable = 0 in {
3754 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3755 int_x86_ssse3_phadd_w_128>;
3756 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3757 int_x86_ssse3_phadd_d_128>;
3758 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3759 int_x86_ssse3_phadd_sw_128>;
3760 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3761 int_x86_ssse3_phsub_w_128>;
3762 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3763 int_x86_ssse3_phsub_d_128>;
3764 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3765 int_x86_ssse3_phsub_sw_128>;
3766 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3767 int_x86_ssse3_pmadd_ub_sw_128>;
3768 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
3769 int_x86_ssse3_pshuf_b_128>;
3770 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3771 int_x86_ssse3_psign_b_128>;
3772 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3773 int_x86_ssse3_psign_w_128>;
3774 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3775 int_x86_ssse3_psign_d_128>;
3777 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3778 int_x86_ssse3_pmul_hr_sw_128>;
3781 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3782 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3783 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3784 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3786 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
3787 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3788 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
3789 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3790 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
3791 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3793 //===---------------------------------------------------------------------===//
3794 // SSSE3 - Packed Align Instruction Patterns
3795 //===---------------------------------------------------------------------===//
3797 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3798 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3799 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3801 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3803 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3805 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3806 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3808 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3810 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3814 let Predicates = [HasAVX] in
3815 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3816 let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
3817 defm PALIGN : ssse3_palign<"palignr">;
3819 let Predicates = [HasSSSE3] in {
3820 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3821 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3822 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3823 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3824 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3825 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3826 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3827 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3830 let Predicates = [HasAVX] in {
3831 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3832 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3833 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3834 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3835 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3836 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3837 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3838 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3841 //===---------------------------------------------------------------------===//
3842 // SSSE3 Misc Instructions
3843 //===---------------------------------------------------------------------===//
3845 // Thread synchronization
3846 let usesCustomInserter = 1 in {
3847 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
3848 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
3849 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
3850 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
3853 let Uses = [EAX, ECX, EDX] in
3854 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
3855 Requires<[HasSSE3]>;
3856 let Uses = [ECX, EAX] in
3857 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
3858 Requires<[HasSSE3]>;
3860 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
3861 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
3863 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
3864 Requires<[In32BitMode]>;
3865 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
3866 Requires<[In64BitMode]>;
3868 //===---------------------------------------------------------------------===//
3869 // Non-Instruction Patterns
3870 //===---------------------------------------------------------------------===//
3872 // extload f32 -> f64. This matches load+fextend because we have a hack in
3873 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3875 // Since these loads aren't folded into the fextend, we have to match it
3877 let Predicates = [HasSSE2] in
3878 def : Pat<(fextend (loadf32 addr:$src)),
3879 (CVTSS2SDrm addr:$src)>;
3881 // Bitcasts between 128-bit vector types. Return the original type since
3882 // no instruction is needed for the conversion
3883 let Predicates = [HasXMMInt] in {
3884 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3885 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3886 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3887 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3888 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3889 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3890 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3891 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3892 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3893 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3894 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3895 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3896 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3897 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3898 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3899 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3900 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3901 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3902 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3903 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3904 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3905 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3906 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3907 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3908 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3909 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3910 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3911 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3912 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3913 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3916 // Bitcasts between 256-bit vector types. Return the original type since
3917 // no instruction is needed for the conversion
3918 let Predicates = [HasAVX] in {
3919 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
3920 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
3921 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
3922 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
3923 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
3924 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
3925 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
3926 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
3927 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
3928 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
3929 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
3930 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
3931 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
3932 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
3933 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
3934 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
3935 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
3936 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
3937 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
3938 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
3939 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
3940 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
3941 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
3942 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
3943 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
3944 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
3945 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
3946 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
3947 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
3948 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
3951 // Move scalar to XMM zero-extended
3952 // movd to XMM register zero-extends
3953 let AddedComplexity = 15 in {
3954 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3955 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3956 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3957 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3958 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3959 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3960 (MOVSSrr (v4f32 (V_SET0PS)),
3961 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3962 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3963 (MOVSSrr (v4i32 (V_SET0PI)),
3964 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3967 // Splat v2f64 / v2i64
3968 let AddedComplexity = 10 in {
3969 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3970 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3971 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3972 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3975 // Special unary SHUFPSrri case.
3976 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3977 (SHUFPSrri VR128:$src1, VR128:$src1,
3978 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3979 let AddedComplexity = 5 in
3980 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3981 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3982 Requires<[HasSSE2]>;
3983 // Special unary SHUFPDrri case.
3984 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3985 (SHUFPDrri VR128:$src1, VR128:$src1,
3986 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3987 Requires<[HasSSE2]>;
3988 // Special unary SHUFPDrri case.
3989 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3990 (SHUFPDrri VR128:$src1, VR128:$src1,
3991 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3992 Requires<[HasSSE2]>;
3993 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3994 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3995 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3996 Requires<[HasSSE2]>;
3998 // Special binary v4i32 shuffle cases with SHUFPS.
3999 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
4000 (SHUFPSrri VR128:$src1, VR128:$src2,
4001 (SHUFFLE_get_shuf_imm VR128:$src3))>,
4002 Requires<[HasSSE2]>;
4003 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
4004 (SHUFPSrmi VR128:$src1, addr:$src2,
4005 (SHUFFLE_get_shuf_imm VR128:$src3))>,
4006 Requires<[HasSSE2]>;
4007 // Special binary v2i64 shuffle cases using SHUFPDrri.
4008 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
4009 (SHUFPDrri VR128:$src1, VR128:$src2,
4010 (SHUFFLE_get_shuf_imm VR128:$src3))>,
4011 Requires<[HasSSE2]>;
4013 let AddedComplexity = 20 in {
4014 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
4015 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
4016 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
4018 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
4019 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
4020 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
4022 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
4023 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
4024 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
4025 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
4026 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
4029 let AddedComplexity = 20 in {
4030 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
4031 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
4032 (MOVLPSrm VR128:$src1, addr:$src2)>;
4033 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
4034 (MOVLPDrm VR128:$src1, addr:$src2)>;
4035 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
4036 (MOVLPSrm VR128:$src1, addr:$src2)>;
4037 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
4038 (MOVLPDrm VR128:$src1, addr:$src2)>;
4041 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
4042 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4043 (MOVLPSmr addr:$src1, VR128:$src2)>;
4044 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4045 (MOVLPDmr addr:$src1, VR128:$src2)>;
4046 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
4048 (MOVLPSmr addr:$src1, VR128:$src2)>;
4049 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4050 (MOVLPDmr addr:$src1, VR128:$src2)>;
4052 let AddedComplexity = 15 in {
4053 // Setting the lowest element in the vector.
4054 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
4055 (MOVSSrr (v4i32 VR128:$src1),
4056 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
4057 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
4058 (MOVSDrr (v2i64 VR128:$src1),
4059 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
4061 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
4062 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
4063 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4064 Requires<[HasSSE2]>;
4065 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
4066 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4067 Requires<[HasSSE2]>;
4070 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
4071 // fall back to this for SSE1)
4072 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
4073 (SHUFPSrri VR128:$src2, VR128:$src1,
4074 (SHUFFLE_get_shuf_imm VR128:$src3))>;
4076 // Set lowest element and zero upper elements.
4077 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4078 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
4080 // Use movaps / movups for SSE integer load / store (one byte shorter).
4081 // The instructions selected below are then converted to MOVDQA/MOVDQU
4082 // during the SSE domain pass.
4083 let Predicates = [HasSSE1] in {
4084 def : Pat<(alignedloadv4i32 addr:$src),
4085 (MOVAPSrm addr:$src)>;
4086 def : Pat<(loadv4i32 addr:$src),
4087 (MOVUPSrm addr:$src)>;
4088 def : Pat<(alignedloadv2i64 addr:$src),
4089 (MOVAPSrm addr:$src)>;
4090 def : Pat<(loadv2i64 addr:$src),
4091 (MOVUPSrm addr:$src)>;
4093 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4094 (MOVAPSmr addr:$dst, VR128:$src)>;
4095 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4096 (MOVAPSmr addr:$dst, VR128:$src)>;
4097 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4098 (MOVAPSmr addr:$dst, VR128:$src)>;
4099 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4100 (MOVAPSmr addr:$dst, VR128:$src)>;
4101 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4102 (MOVUPSmr addr:$dst, VR128:$src)>;
4103 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4104 (MOVUPSmr addr:$dst, VR128:$src)>;
4105 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4106 (MOVUPSmr addr:$dst, VR128:$src)>;
4107 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4108 (MOVUPSmr addr:$dst, VR128:$src)>;
4111 // Use vmovaps/vmovups for AVX integer load/store.
4112 let Predicates = [HasAVX] in {
4113 // 128-bit load/store
4114 def : Pat<(alignedloadv4i32 addr:$src),
4115 (VMOVAPSrm addr:$src)>;
4116 def : Pat<(loadv4i32 addr:$src),
4117 (VMOVUPSrm addr:$src)>;
4118 def : Pat<(alignedloadv2i64 addr:$src),
4119 (VMOVAPSrm addr:$src)>;
4120 def : Pat<(loadv2i64 addr:$src),
4121 (VMOVUPSrm addr:$src)>;
4123 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4124 (VMOVAPSmr addr:$dst, VR128:$src)>;
4125 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4126 (VMOVAPSmr addr:$dst, VR128:$src)>;
4127 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4128 (VMOVAPSmr addr:$dst, VR128:$src)>;
4129 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4130 (VMOVAPSmr addr:$dst, VR128:$src)>;
4131 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4132 (VMOVUPSmr addr:$dst, VR128:$src)>;
4133 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4134 (VMOVUPSmr addr:$dst, VR128:$src)>;
4135 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4136 (VMOVUPSmr addr:$dst, VR128:$src)>;
4137 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4138 (VMOVUPSmr addr:$dst, VR128:$src)>;
4140 // 256-bit load/store
4141 def : Pat<(alignedloadv4i64 addr:$src),
4142 (VMOVAPSYrm addr:$src)>;
4143 def : Pat<(loadv4i64 addr:$src),
4144 (VMOVUPSYrm addr:$src)>;
4145 def : Pat<(alignedloadv8i32 addr:$src),
4146 (VMOVAPSYrm addr:$src)>;
4147 def : Pat<(loadv8i32 addr:$src),
4148 (VMOVUPSYrm addr:$src)>;
4149 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
4150 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4151 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
4152 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4153 def : Pat<(alignedstore (v16i16 VR256:$src), addr:$dst),
4154 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4155 def : Pat<(alignedstore (v32i8 VR256:$src), addr:$dst),
4156 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4157 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
4158 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4159 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
4160 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4161 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
4162 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4163 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
4164 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4167 //===----------------------------------------------------------------------===//
4168 // SSE4.1 - Packed Move with Sign/Zero Extend
4169 //===----------------------------------------------------------------------===//
4171 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4172 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4173 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4174 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4176 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4177 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4179 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
4183 let Predicates = [HasAVX] in {
4184 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
4186 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
4188 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4190 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4192 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4194 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4198 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4199 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4200 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4201 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4202 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4203 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4205 // Common patterns involving scalar load.
4206 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4207 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4208 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4209 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4211 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4212 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4213 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4214 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4216 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4217 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4218 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4219 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4221 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4222 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4223 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4224 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4226 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4227 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4228 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4229 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4231 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4232 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4233 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4234 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4237 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4238 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4239 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4240 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4242 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4243 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4245 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4249 let Predicates = [HasAVX] in {
4250 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4252 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4254 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4256 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4260 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4261 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4262 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4263 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4265 // Common patterns involving scalar load
4266 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4267 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4268 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4269 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4271 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4272 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4273 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4274 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4277 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4278 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4279 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4280 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4282 // Expecting a i16 load any extended to i32 value.
4283 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4284 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4285 [(set VR128:$dst, (IntId (bitconvert
4286 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4290 let Predicates = [HasAVX] in {
4291 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4293 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4296 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4297 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4299 // Common patterns involving scalar load
4300 def : Pat<(int_x86_sse41_pmovsxbq
4301 (bitconvert (v4i32 (X86vzmovl
4302 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4303 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4305 def : Pat<(int_x86_sse41_pmovzxbq
4306 (bitconvert (v4i32 (X86vzmovl
4307 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4308 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4310 //===----------------------------------------------------------------------===//
4311 // SSE4.1 - Extract Instructions
4312 //===----------------------------------------------------------------------===//
4314 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4315 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4316 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4317 (ins VR128:$src1, i32i8imm:$src2),
4318 !strconcat(OpcodeStr,
4319 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4320 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4322 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4323 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4324 !strconcat(OpcodeStr,
4325 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4328 // There's an AssertZext in the way of writing the store pattern
4329 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4332 let Predicates = [HasAVX] in {
4333 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4334 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4335 (ins VR128:$src1, i32i8imm:$src2),
4336 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4339 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4342 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4343 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4344 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4345 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4346 !strconcat(OpcodeStr,
4347 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4350 // There's an AssertZext in the way of writing the store pattern
4351 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4354 let Predicates = [HasAVX] in
4355 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4357 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4360 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4361 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4362 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4363 (ins VR128:$src1, i32i8imm:$src2),
4364 !strconcat(OpcodeStr,
4365 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4367 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4368 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4369 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4370 !strconcat(OpcodeStr,
4371 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4372 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4373 addr:$dst)]>, OpSize;
4376 let Predicates = [HasAVX] in
4377 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4379 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4381 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4382 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4383 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4384 (ins VR128:$src1, i32i8imm:$src2),
4385 !strconcat(OpcodeStr,
4386 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4388 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4389 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4390 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4391 !strconcat(OpcodeStr,
4392 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4393 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4394 addr:$dst)]>, OpSize, REX_W;
4397 let Predicates = [HasAVX] in
4398 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4400 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4402 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4404 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4405 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4406 (ins VR128:$src1, i32i8imm:$src2),
4407 !strconcat(OpcodeStr,
4408 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4410 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4412 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4413 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4414 !strconcat(OpcodeStr,
4415 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4416 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4417 addr:$dst)]>, OpSize;
4420 let Predicates = [HasAVX] in {
4421 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4422 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4423 (ins VR128:$src1, i32i8imm:$src2),
4424 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4427 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4429 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4430 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4433 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4434 Requires<[HasSSE41]>;
4436 //===----------------------------------------------------------------------===//
4437 // SSE4.1 - Insert Instructions
4438 //===----------------------------------------------------------------------===//
4440 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4441 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4442 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4444 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4446 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4448 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4449 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4450 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4452 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4454 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4456 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4457 imm:$src3))]>, OpSize;
4460 let Predicates = [HasAVX] in
4461 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4462 let Constraints = "$src1 = $dst" in
4463 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4465 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4466 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4467 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4469 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4471 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4473 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4475 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4476 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4478 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4480 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4482 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4483 imm:$src3)))]>, OpSize;
4486 let Predicates = [HasAVX] in
4487 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4488 let Constraints = "$src1 = $dst" in
4489 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4491 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4492 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4493 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4495 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4497 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4499 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4501 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4502 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4504 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4506 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4508 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4509 imm:$src3)))]>, OpSize;
4512 let Predicates = [HasAVX] in
4513 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4514 let Constraints = "$src1 = $dst" in
4515 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4517 // insertps has a few different modes, there's the first two here below which
4518 // are optimized inserts that won't zero arbitrary elements in the destination
4519 // vector. The next one matches the intrinsic and could zero arbitrary elements
4520 // in the target vector.
4521 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4522 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4523 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
4525 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4527 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4529 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4531 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4532 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
4534 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4536 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4538 (X86insrtps VR128:$src1,
4539 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4540 imm:$src3))]>, OpSize;
4543 let Constraints = "$src1 = $dst" in
4544 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4545 let Predicates = [HasAVX] in
4546 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4548 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4549 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4551 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4552 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4553 Requires<[HasSSE41]>;
4555 //===----------------------------------------------------------------------===//
4556 // SSE4.1 - Round Instructions
4557 //===----------------------------------------------------------------------===//
4559 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4560 X86MemOperand x86memop, RegisterClass RC,
4561 PatFrag mem_frag32, PatFrag mem_frag64,
4562 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4563 // Intrinsic operation, reg.
4564 // Vector intrinsic operation, reg
4565 def PSr : SS4AIi8<opcps, MRMSrcReg,
4566 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4567 !strconcat(OpcodeStr,
4568 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4569 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4572 // Vector intrinsic operation, mem
4573 def PSm : Ii8<opcps, MRMSrcMem,
4574 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4575 !strconcat(OpcodeStr,
4576 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4578 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4580 Requires<[HasSSE41]>;
4582 // Vector intrinsic operation, reg
4583 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4584 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4585 !strconcat(OpcodeStr,
4586 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4587 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4590 // Vector intrinsic operation, mem
4591 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4592 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4593 !strconcat(OpcodeStr,
4594 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4596 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4600 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4601 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4602 // Intrinsic operation, reg.
4603 // Vector intrinsic operation, reg
4604 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4605 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4606 !strconcat(OpcodeStr,
4607 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4610 // Vector intrinsic operation, mem
4611 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4612 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4613 !strconcat(OpcodeStr,
4614 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4615 []>, TA, OpSize, Requires<[HasSSE41]>;
4617 // Vector intrinsic operation, reg
4618 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4619 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4620 !strconcat(OpcodeStr,
4621 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4624 // Vector intrinsic operation, mem
4625 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4626 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4627 !strconcat(OpcodeStr,
4628 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4632 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4635 Intrinsic F64Int, bit Is2Addr = 1> {
4636 // Intrinsic operation, reg.
4637 def SSr : SS4AIi8<opcss, MRMSrcReg,
4638 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4640 !strconcat(OpcodeStr,
4641 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4642 !strconcat(OpcodeStr,
4643 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4644 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4647 // Intrinsic operation, mem.
4648 def SSm : SS4AIi8<opcss, MRMSrcMem,
4649 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4651 !strconcat(OpcodeStr,
4652 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4653 !strconcat(OpcodeStr,
4654 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4656 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4659 // Intrinsic operation, reg.
4660 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4661 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4663 !strconcat(OpcodeStr,
4664 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4665 !strconcat(OpcodeStr,
4666 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4667 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4670 // Intrinsic operation, mem.
4671 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4672 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4674 !strconcat(OpcodeStr,
4675 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4676 !strconcat(OpcodeStr,
4677 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4679 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4683 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4685 // Intrinsic operation, reg.
4686 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4687 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4688 !strconcat(OpcodeStr,
4689 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4692 // Intrinsic operation, mem.
4693 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4694 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4695 !strconcat(OpcodeStr,
4696 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4699 // Intrinsic operation, reg.
4700 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4701 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4702 !strconcat(OpcodeStr,
4703 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4706 // Intrinsic operation, mem.
4707 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4708 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4709 !strconcat(OpcodeStr,
4710 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4714 // FP round - roundss, roundps, roundsd, roundpd
4715 let Predicates = [HasAVX] in {
4717 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4718 memopv4f32, memopv2f64,
4719 int_x86_sse41_round_ps,
4720 int_x86_sse41_round_pd>, VEX;
4721 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4722 memopv8f32, memopv4f64,
4723 int_x86_avx_round_ps_256,
4724 int_x86_avx_round_pd_256>, VEX;
4725 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4726 int_x86_sse41_round_ss,
4727 int_x86_sse41_round_sd, 0>, VEX_4V;
4729 // Instructions for the assembler
4730 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4732 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4734 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4737 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4738 memopv4f32, memopv2f64,
4739 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4740 let Constraints = "$src1 = $dst" in
4741 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4742 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4744 //===----------------------------------------------------------------------===//
4745 // SSE4.1 - Packed Bit Test
4746 //===----------------------------------------------------------------------===//
4748 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4749 // the intel intrinsic that corresponds to this.
4750 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4751 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4752 "vptest\t{$src2, $src1|$src1, $src2}",
4753 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4755 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4756 "vptest\t{$src2, $src1|$src1, $src2}",
4757 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4760 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4761 "vptest\t{$src2, $src1|$src1, $src2}",
4762 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4764 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4765 "vptest\t{$src2, $src1|$src1, $src2}",
4766 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4770 let Defs = [EFLAGS] in {
4771 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4772 "ptest \t{$src2, $src1|$src1, $src2}",
4773 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4775 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4776 "ptest \t{$src2, $src1|$src1, $src2}",
4777 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4781 // The bit test instructions below are AVX only
4782 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4783 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4784 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4785 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4786 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4787 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4788 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4789 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4793 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4794 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4795 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4796 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4797 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4800 //===----------------------------------------------------------------------===//
4801 // SSE4.1 - Misc Instructions
4802 //===----------------------------------------------------------------------===//
4804 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
4805 "popcnt{w}\t{$src, $dst|$dst, $src}",
4806 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
4807 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
4808 "popcnt{w}\t{$src, $dst|$dst, $src}",
4809 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
4811 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
4812 "popcnt{l}\t{$src, $dst|$dst, $src}",
4813 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
4814 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
4815 "popcnt{l}\t{$src, $dst|$dst, $src}",
4816 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
4818 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
4819 "popcnt{q}\t{$src, $dst|$dst, $src}",
4820 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
4821 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
4822 "popcnt{q}\t{$src, $dst|$dst, $src}",
4823 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
4827 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4828 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4829 Intrinsic IntId128> {
4830 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4832 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4833 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4834 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4836 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4839 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4842 let Predicates = [HasAVX] in
4843 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4844 int_x86_sse41_phminposuw>, VEX;
4845 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4846 int_x86_sse41_phminposuw>;
4848 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4849 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4850 Intrinsic IntId128, bit Is2Addr = 1> {
4851 let isCommutable = 1 in
4852 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4853 (ins VR128:$src1, VR128:$src2),
4855 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4856 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4857 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4858 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4859 (ins VR128:$src1, i128mem:$src2),
4861 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4862 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4864 (IntId128 VR128:$src1,
4865 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4868 let Predicates = [HasAVX] in {
4869 let isCommutable = 0 in
4870 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4872 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4874 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4876 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4878 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4880 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4882 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4884 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4886 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4888 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4890 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4894 let Constraints = "$src1 = $dst" in {
4895 let isCommutable = 0 in
4896 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4897 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4898 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4899 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4900 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4901 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4902 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4903 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4904 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4905 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4906 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4909 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4910 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4911 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4912 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4914 /// SS48I_binop_rm - Simple SSE41 binary operator.
4915 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4916 ValueType OpVT, bit Is2Addr = 1> {
4917 let isCommutable = 1 in
4918 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4919 (ins VR128:$src1, VR128:$src2),
4921 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4922 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4923 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4925 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4926 (ins VR128:$src1, i128mem:$src2),
4928 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4929 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4930 [(set VR128:$dst, (OpNode VR128:$src1,
4931 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4935 let Predicates = [HasAVX] in
4936 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4937 let Constraints = "$src1 = $dst" in
4938 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4940 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4941 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4942 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4943 X86MemOperand x86memop, bit Is2Addr = 1> {
4944 let isCommutable = 1 in
4945 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4946 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
4948 !strconcat(OpcodeStr,
4949 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4950 !strconcat(OpcodeStr,
4951 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4952 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4954 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4955 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
4957 !strconcat(OpcodeStr,
4958 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4959 !strconcat(OpcodeStr,
4960 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4963 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4967 let Predicates = [HasAVX] in {
4968 let isCommutable = 0 in {
4969 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4970 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4971 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4972 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4973 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4974 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4975 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4976 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4977 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4978 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4979 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4980 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4982 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4983 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4984 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4985 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4986 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4987 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4990 let Constraints = "$src1 = $dst" in {
4991 let isCommutable = 0 in {
4992 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4993 VR128, memopv16i8, i128mem>;
4994 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4995 VR128, memopv16i8, i128mem>;
4996 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4997 VR128, memopv16i8, i128mem>;
4998 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4999 VR128, memopv16i8, i128mem>;
5001 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
5002 VR128, memopv16i8, i128mem>;
5003 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
5004 VR128, memopv16i8, i128mem>;
5007 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
5008 let Predicates = [HasAVX] in {
5009 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
5010 RegisterClass RC, X86MemOperand x86memop,
5011 PatFrag mem_frag, Intrinsic IntId> {
5012 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
5013 (ins RC:$src1, RC:$src2, RC:$src3),
5014 !strconcat(OpcodeStr,
5015 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5016 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
5017 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5019 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
5020 (ins RC:$src1, x86memop:$src2, RC:$src3),
5021 !strconcat(OpcodeStr,
5022 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5024 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
5026 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5030 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
5031 memopv16i8, int_x86_sse41_blendvpd>;
5032 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
5033 memopv16i8, int_x86_sse41_blendvps>;
5034 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
5035 memopv16i8, int_x86_sse41_pblendvb>;
5036 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
5037 memopv32i8, int_x86_avx_blendv_pd_256>;
5038 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
5039 memopv32i8, int_x86_avx_blendv_ps_256>;
5041 /// SS41I_ternary_int - SSE 4.1 ternary operator
5042 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
5043 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5044 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5045 (ins VR128:$src1, VR128:$src2),
5046 !strconcat(OpcodeStr,
5047 "\t{$src2, $dst|$dst, $src2}"),
5048 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
5051 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5052 (ins VR128:$src1, i128mem:$src2),
5053 !strconcat(OpcodeStr,
5054 "\t{$src2, $dst|$dst, $src2}"),
5057 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
5061 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
5062 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
5063 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
5065 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
5066 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
5068 let Predicates = [HasAVX] in
5069 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5070 "vmovntdqa\t{$src, $dst|$dst, $src}",
5071 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5073 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5074 "movntdqa\t{$src, $dst|$dst, $src}",
5075 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5078 //===----------------------------------------------------------------------===//
5079 // SSE4.2 - Compare Instructions
5080 //===----------------------------------------------------------------------===//
5082 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
5083 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
5084 Intrinsic IntId128, bit Is2Addr = 1> {
5085 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
5086 (ins VR128:$src1, VR128:$src2),
5088 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5089 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5090 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5092 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
5093 (ins VR128:$src1, i128mem:$src2),
5095 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5096 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5098 (IntId128 VR128:$src1,
5099 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5102 let Predicates = [HasAVX] in
5103 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
5105 let Constraints = "$src1 = $dst" in
5106 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
5108 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5109 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
5110 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5111 (PCMPGTQrm VR128:$src1, addr:$src2)>;
5113 //===----------------------------------------------------------------------===//
5114 // SSE4.2 - String/text Processing Instructions
5115 //===----------------------------------------------------------------------===//
5117 // Packed Compare Implicit Length Strings, Return Mask
5118 multiclass pseudo_pcmpistrm<string asm> {
5119 def REG : PseudoI<(outs VR128:$dst),
5120 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5121 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
5123 def MEM : PseudoI<(outs VR128:$dst),
5124 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5125 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
5126 VR128:$src1, (load addr:$src2), imm:$src3))]>;
5129 let Defs = [EFLAGS], usesCustomInserter = 1 in {
5130 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
5131 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
5134 let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
5135 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5136 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5137 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5138 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5139 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5140 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5143 let Defs = [XMM0, EFLAGS] in {
5144 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5145 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5146 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5147 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5148 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5149 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5152 // Packed Compare Explicit Length Strings, Return Mask
5153 multiclass pseudo_pcmpestrm<string asm> {
5154 def REG : PseudoI<(outs VR128:$dst),
5155 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5156 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5157 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
5158 def MEM : PseudoI<(outs VR128:$dst),
5159 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5160 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5161 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
5164 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
5165 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
5166 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
5169 let Predicates = [HasAVX],
5170 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5171 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5172 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5173 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5174 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5175 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5176 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5179 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5180 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5181 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5182 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5183 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5184 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5185 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5188 // Packed Compare Implicit Length Strings, Return Index
5189 let Defs = [ECX, EFLAGS] in {
5190 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
5191 def rr : SS42AI<0x63, MRMSrcReg, (outs),
5192 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5193 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5194 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
5195 (implicit EFLAGS)]>, OpSize;
5196 def rm : SS42AI<0x63, MRMSrcMem, (outs),
5197 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5198 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5199 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
5200 (implicit EFLAGS)]>, OpSize;
5204 let Predicates = [HasAVX] in {
5205 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5207 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5209 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5211 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5213 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5215 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5219 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5220 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5221 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5222 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5223 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5224 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5226 // Packed Compare Explicit Length Strings, Return Index
5227 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5228 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5229 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5230 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5231 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5232 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5233 (implicit EFLAGS)]>, OpSize;
5234 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5235 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5236 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5238 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5239 (implicit EFLAGS)]>, OpSize;
5243 let Predicates = [HasAVX] in {
5244 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5246 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5248 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5250 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5252 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5254 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5258 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5259 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5260 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5261 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5262 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5263 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5265 //===----------------------------------------------------------------------===//
5266 // SSE4.2 - CRC Instructions
5267 //===----------------------------------------------------------------------===//
5269 // No CRC instructions have AVX equivalents
5271 // crc intrinsic instruction
5272 // This set of instructions are only rm, the only difference is the size
5274 let Constraints = "$src1 = $dst" in {
5275 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5276 (ins GR32:$src1, i8mem:$src2),
5277 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5279 (int_x86_sse42_crc32_32_8 GR32:$src1,
5280 (load addr:$src2)))]>;
5281 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5282 (ins GR32:$src1, GR8:$src2),
5283 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5285 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
5286 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5287 (ins GR32:$src1, i16mem:$src2),
5288 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5290 (int_x86_sse42_crc32_32_16 GR32:$src1,
5291 (load addr:$src2)))]>,
5293 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5294 (ins GR32:$src1, GR16:$src2),
5295 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5297 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
5299 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5300 (ins GR32:$src1, i32mem:$src2),
5301 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5303 (int_x86_sse42_crc32_32_32 GR32:$src1,
5304 (load addr:$src2)))]>;
5305 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5306 (ins GR32:$src1, GR32:$src2),
5307 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5309 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
5310 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5311 (ins GR64:$src1, i8mem:$src2),
5312 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5314 (int_x86_sse42_crc32_64_8 GR64:$src1,
5315 (load addr:$src2)))]>,
5317 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5318 (ins GR64:$src1, GR8:$src2),
5319 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5321 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
5323 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5324 (ins GR64:$src1, i64mem:$src2),
5325 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5327 (int_x86_sse42_crc32_64_64 GR64:$src1,
5328 (load addr:$src2)))]>,
5330 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5331 (ins GR64:$src1, GR64:$src2),
5332 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5334 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
5338 //===----------------------------------------------------------------------===//
5339 // AES-NI Instructions
5340 //===----------------------------------------------------------------------===//
5342 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5343 Intrinsic IntId128, bit Is2Addr = 1> {
5344 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5345 (ins VR128:$src1, VR128:$src2),
5347 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5348 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5349 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5351 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5352 (ins VR128:$src1, i128mem:$src2),
5354 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5355 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5357 (IntId128 VR128:$src1,
5358 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5361 // Perform One Round of an AES Encryption/Decryption Flow
5362 let Predicates = [HasAVX, HasAES] in {
5363 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5364 int_x86_aesni_aesenc, 0>, VEX_4V;
5365 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5366 int_x86_aesni_aesenclast, 0>, VEX_4V;
5367 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5368 int_x86_aesni_aesdec, 0>, VEX_4V;
5369 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5370 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5373 let Constraints = "$src1 = $dst" in {
5374 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5375 int_x86_aesni_aesenc>;
5376 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5377 int_x86_aesni_aesenclast>;
5378 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5379 int_x86_aesni_aesdec>;
5380 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5381 int_x86_aesni_aesdeclast>;
5384 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5385 (AESENCrr VR128:$src1, VR128:$src2)>;
5386 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5387 (AESENCrm VR128:$src1, addr:$src2)>;
5388 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5389 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5390 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5391 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5392 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5393 (AESDECrr VR128:$src1, VR128:$src2)>;
5394 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5395 (AESDECrm VR128:$src1, addr:$src2)>;
5396 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5397 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5398 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5399 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5401 // Perform the AES InvMixColumn Transformation
5402 let Predicates = [HasAVX, HasAES] in {
5403 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5405 "vaesimc\t{$src1, $dst|$dst, $src1}",
5407 (int_x86_aesni_aesimc VR128:$src1))]>,
5409 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5410 (ins i128mem:$src1),
5411 "vaesimc\t{$src1, $dst|$dst, $src1}",
5413 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5416 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5418 "aesimc\t{$src1, $dst|$dst, $src1}",
5420 (int_x86_aesni_aesimc VR128:$src1))]>,
5422 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5423 (ins i128mem:$src1),
5424 "aesimc\t{$src1, $dst|$dst, $src1}",
5426 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5429 // AES Round Key Generation Assist
5430 let Predicates = [HasAVX, HasAES] in {
5431 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5432 (ins VR128:$src1, i8imm:$src2),
5433 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5435 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5437 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5438 (ins i128mem:$src1, i8imm:$src2),
5439 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5441 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5445 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5446 (ins VR128:$src1, i8imm:$src2),
5447 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5449 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5451 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5452 (ins i128mem:$src1, i8imm:$src2),
5453 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5455 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5459 //===----------------------------------------------------------------------===//
5460 // CLMUL Instructions
5461 //===----------------------------------------------------------------------===//
5463 // Carry-less Multiplication instructions
5464 let Constraints = "$src1 = $dst" in {
5465 def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5466 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5467 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5470 def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5471 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5472 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5476 // AVX carry-less Multiplication instructions
5477 def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5478 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5479 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5482 def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5483 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5484 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5488 multiclass pclmul_alias<string asm, int immop> {
5489 def : InstAlias<!strconcat("pclmul", asm,
5490 "dq {$src, $dst|$dst, $src}"),
5491 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
5493 def : InstAlias<!strconcat("pclmul", asm,
5494 "dq {$src, $dst|$dst, $src}"),
5495 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
5497 def : InstAlias<!strconcat("vpclmul", asm,
5498 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5499 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
5501 def : InstAlias<!strconcat("vpclmul", asm,
5502 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5503 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
5505 defm : pclmul_alias<"hqhq", 0x11>;
5506 defm : pclmul_alias<"hqlq", 0x01>;
5507 defm : pclmul_alias<"lqhq", 0x10>;
5508 defm : pclmul_alias<"lqlq", 0x00>;
5510 //===----------------------------------------------------------------------===//
5512 //===----------------------------------------------------------------------===//
5514 //===----------------------------------------------------------------------===//
5515 // VBROADCAST - Load from memory and broadcast to all elements of the
5516 // destination operand
5518 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5519 X86MemOperand x86memop, Intrinsic Int> :
5520 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5521 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5522 [(set RC:$dst, (Int addr:$src))]>, VEX;
5524 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5525 int_x86_avx_vbroadcastss>;
5526 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5527 int_x86_avx_vbroadcastss_256>;
5528 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5529 int_x86_avx_vbroadcast_sd_256>;
5530 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5531 int_x86_avx_vbroadcastf128_pd_256>;
5533 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5534 (VBROADCASTF128 addr:$src)>;
5536 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
5537 (VBROADCASTSSY addr:$src)>;
5538 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
5539 (VBROADCASTSD addr:$src)>;
5540 def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
5541 (VBROADCASTSSY addr:$src)>;
5542 def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
5543 (VBROADCASTSD addr:$src)>;
5545 def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
5546 (VBROADCASTSS addr:$src)>;
5547 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
5548 (VBROADCASTSS addr:$src)>;
5550 //===----------------------------------------------------------------------===//
5551 // VINSERTF128 - Insert packed floating-point values
5553 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5554 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5555 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5557 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5558 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5559 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5562 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5563 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5564 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5565 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5566 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5567 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5569 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
5571 (VINSERTF128rr VR256:$src1, VR128:$src2,
5572 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5573 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
5575 (VINSERTF128rr VR256:$src1, VR128:$src2,
5576 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5577 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
5579 (VINSERTF128rr VR256:$src1, VR128:$src2,
5580 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5581 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
5583 (VINSERTF128rr VR256:$src1, VR128:$src2,
5584 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5585 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
5587 (VINSERTF128rr VR256:$src1, VR128:$src2,
5588 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5589 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
5591 (VINSERTF128rr VR256:$src1, VR128:$src2,
5592 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5594 // Special COPY patterns
5595 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
5596 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5597 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)),
5598 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5599 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)),
5600 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5601 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)),
5602 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5603 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)),
5604 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5605 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
5606 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5608 //===----------------------------------------------------------------------===//
5609 // VEXTRACTF128 - Extract packed floating-point values
5611 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5612 (ins VR256:$src1, i8imm:$src2),
5613 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5615 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5616 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5617 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5620 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5621 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5622 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5623 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5624 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5625 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5627 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5628 (v4f32 (VEXTRACTF128rr
5629 (v8f32 VR256:$src1),
5630 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5631 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5632 (v2f64 (VEXTRACTF128rr
5633 (v4f64 VR256:$src1),
5634 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5635 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5636 (v4i32 (VEXTRACTF128rr
5637 (v8i32 VR256:$src1),
5638 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5639 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5640 (v2i64 (VEXTRACTF128rr
5641 (v4i64 VR256:$src1),
5642 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5643 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5644 (v8i16 (VEXTRACTF128rr
5645 (v16i16 VR256:$src1),
5646 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5647 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5648 (v16i8 (VEXTRACTF128rr
5649 (v32i8 VR256:$src1),
5650 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5652 // Special COPY patterns
5653 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))),
5654 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
5655 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))),
5656 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
5658 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
5659 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
5660 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))),
5661 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
5664 //===----------------------------------------------------------------------===//
5665 // VMASKMOV - Conditional SIMD Packed Loads and Stores
5667 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5668 Intrinsic IntLd, Intrinsic IntLd256,
5669 Intrinsic IntSt, Intrinsic IntSt256,
5670 PatFrag pf128, PatFrag pf256> {
5671 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5672 (ins VR128:$src1, f128mem:$src2),
5673 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5674 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5676 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5677 (ins VR256:$src1, f256mem:$src2),
5678 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5679 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5681 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5682 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5683 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5684 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5685 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5686 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5687 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5688 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5691 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5692 int_x86_avx_maskload_ps,
5693 int_x86_avx_maskload_ps_256,
5694 int_x86_avx_maskstore_ps,
5695 int_x86_avx_maskstore_ps_256,
5696 memopv4f32, memopv8f32>;
5697 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5698 int_x86_avx_maskload_pd,
5699 int_x86_avx_maskload_pd_256,
5700 int_x86_avx_maskstore_pd,
5701 int_x86_avx_maskstore_pd_256,
5702 memopv2f64, memopv4f64>;
5704 //===----------------------------------------------------------------------===//
5705 // VPERMIL - Permute Single and Double Floating-Point Values
5707 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5708 RegisterClass RC, X86MemOperand x86memop_f,
5709 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5710 Intrinsic IntVar, Intrinsic IntImm> {
5711 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5712 (ins RC:$src1, RC:$src2),
5713 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5714 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5715 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5716 (ins RC:$src1, x86memop_i:$src2),
5717 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5718 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5720 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5721 (ins RC:$src1, i8imm:$src2),
5722 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5723 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5724 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5725 (ins x86memop_f:$src1, i8imm:$src2),
5726 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5727 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5730 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5731 memopv4f32, memopv4i32,
5732 int_x86_avx_vpermilvar_ps,
5733 int_x86_avx_vpermil_ps>;
5734 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5735 memopv8f32, memopv8i32,
5736 int_x86_avx_vpermilvar_ps_256,
5737 int_x86_avx_vpermil_ps_256>;
5738 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5739 memopv2f64, memopv2i64,
5740 int_x86_avx_vpermilvar_pd,
5741 int_x86_avx_vpermil_pd>;
5742 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5743 memopv4f64, memopv4i64,
5744 int_x86_avx_vpermilvar_pd_256,
5745 int_x86_avx_vpermil_pd_256>;
5747 def : Pat<(v8f32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
5748 (VPERMILPSYri VR256:$src1, imm:$imm)>;
5749 def : Pat<(v4f64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
5750 (VPERMILPDYri VR256:$src1, imm:$imm)>;
5751 def : Pat<(v8i32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
5752 (VPERMILPSYri VR256:$src1, imm:$imm)>;
5753 def : Pat<(v4i64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
5754 (VPERMILPDYri VR256:$src1, imm:$imm)>;
5756 //===----------------------------------------------------------------------===//
5757 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
5759 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5760 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5761 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5763 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5764 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5765 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5768 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5769 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5770 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5771 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5772 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5773 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5775 def : Pat<(int_x86_avx_vperm2f128_ps_256
5776 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5777 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5778 def : Pat<(int_x86_avx_vperm2f128_pd_256
5779 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5780 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5781 def : Pat<(int_x86_avx_vperm2f128_si_256
5782 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5783 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5785 def : Pat<(v8f32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5786 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5787 def : Pat<(v8i32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5788 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5789 def : Pat<(v4i64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5790 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5791 def : Pat<(v4f64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5792 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5793 def : Pat<(v32i8 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5794 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5795 def : Pat<(v16i16 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5796 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5798 //===----------------------------------------------------------------------===//
5799 // VZERO - Zero YMM registers
5801 // Zero All YMM registers
5802 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5803 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5805 // Zero Upper bits of YMM registers
5806 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5807 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5809 //===----------------------------------------------------------------------===//
5810 // SSE Shuffle pattern fragments
5811 //===----------------------------------------------------------------------===//
5813 // This is part of a "work in progress" refactoring. The idea is that all
5814 // vector shuffles are going to be translated into target specific nodes and
5815 // directly matched by the patterns below (which can be changed along the way)
5816 // The AVX version of some but not all of them are described here, and more
5817 // should come in a near future.
5819 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5820 // SSE2 loads, which are always promoted to v2i64. The last one should match
5821 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5822 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5823 // we investigate further.
5824 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5826 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5827 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5829 (PSHUFDmi addr:$src1, imm:$imm)>;
5830 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5832 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5834 // Shuffle with PSHUFD instruction.
5835 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5836 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5837 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5838 (PSHUFDri VR128:$src1, imm:$imm)>;
5840 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5841 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5842 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5843 (PSHUFDri VR128:$src1, imm:$imm)>;
5845 // Shuffle with SHUFPD instruction.
5846 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5847 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5848 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5849 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5850 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5851 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5853 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5854 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5855 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5856 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5858 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5859 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5860 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5861 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5863 // Shuffle with SHUFPS instruction.
5864 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5865 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5866 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5867 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5868 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5869 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5871 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5872 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5873 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5874 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5876 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5877 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5878 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5879 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5880 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5881 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5883 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5884 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5885 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5886 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5888 // Shuffle with MOVHLPS instruction
5889 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5890 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5891 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5892 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5894 // Shuffle with MOVDDUP instruction
5895 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5896 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5897 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5898 (MOVDDUPrm addr:$src)>;
5900 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5901 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5902 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5903 (MOVDDUPrm addr:$src)>;
5905 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5906 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5907 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5908 (MOVDDUPrm addr:$src)>;
5910 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5911 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5912 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5913 (MOVDDUPrm addr:$src)>;
5915 def : Pat<(X86Movddup (bc_v2f64
5916 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5917 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5918 def : Pat<(X86Movddup (bc_v2f64
5919 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5920 (MOVDDUPrm addr:$src)>;
5923 // Shuffle with UNPCKLPS
5924 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5925 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5926 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5927 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5929 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5930 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5931 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5932 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5934 // Shuffle with VUNPCKHPSY
5935 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
5936 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5937 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
5938 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5939 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
5940 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5941 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, (memopv8i32 addr:$src2))),
5942 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5944 // Shuffle with UNPCKHPS
5945 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5946 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5947 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5948 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5950 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5951 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5952 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5953 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5955 // Shuffle with VUNPCKHPSY
5956 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, (memopv8f32 addr:$src2))),
5957 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5958 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
5959 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5961 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, (memopv8i32 addr:$src2))),
5962 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5963 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
5964 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5966 // Shuffle with UNPCKLPD
5967 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5968 (VUNPCKLPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5969 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5970 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
5972 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5973 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5974 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5975 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5977 // Shuffle with VUNPCKLPDY
5978 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
5979 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5980 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
5981 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5983 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, (memopv4i64 addr:$src2))),
5984 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5985 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
5986 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5988 // Shuffle with UNPCKHPD
5989 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5990 (VUNPCKHPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5991 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5992 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
5994 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5995 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5996 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5997 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5999 // Shuffle with VUNPCKHPDY
6000 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, (memopv4f64 addr:$src2))),
6001 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6002 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
6003 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6004 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, (memopv4i64 addr:$src2))),
6005 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6006 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
6007 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6009 // Shuffle with MOVLHPS
6010 def : Pat<(X86Movlhps VR128:$src1,
6011 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
6012 (MOVHPSrm VR128:$src1, addr:$src2)>;
6013 def : Pat<(X86Movlhps VR128:$src1,
6014 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
6015 (MOVHPSrm VR128:$src1, addr:$src2)>;
6016 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
6017 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
6018 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
6019 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
6020 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
6021 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
6023 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the problem
6024 // is during lowering, where it's not possible to recognize the load fold cause
6025 // it has two uses through a bitcast. One use disappears at isel time and the
6026 // fold opportunity reappears.
6027 def : Pat<(v2f64 (X86Movddup VR128:$src)),
6028 (UNPCKLPDrr VR128:$src, VR128:$src)>;
6030 // Shuffle with MOVLHPD
6031 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
6032 (scalar_to_vector (loadf64 addr:$src2)))),
6033 (MOVHPDrm VR128:$src1, addr:$src2)>;
6035 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
6036 // is during lowering, where it's not possible to recognize the load fold cause
6037 // it has two uses through a bitcast. One use disappears at isel time and the
6038 // fold opportunity reappears.
6039 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
6040 (scalar_to_vector (loadf64 addr:$src2)))),
6041 (MOVHPDrm VR128:$src1, addr:$src2)>;
6043 // Shuffle with MOVSS
6044 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
6045 (MOVSSrr VR128:$src1, FR32:$src2)>;
6046 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
6047 (MOVSSrr (v4i32 VR128:$src1),
6048 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
6049 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
6050 (MOVSSrr (v4f32 VR128:$src1),
6051 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
6053 // Shuffle with MOVSD
6054 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
6055 (MOVSDrr VR128:$src1, FR64:$src2)>;
6056 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
6057 (MOVSDrr (v2i64 VR128:$src1),
6058 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
6059 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
6060 (MOVSDrr (v2f64 VR128:$src1),
6061 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
6062 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
6063 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
6064 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
6065 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
6067 // Shuffle with PSHUFHW
6068 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
6069 (PSHUFHWri VR128:$src, imm:$imm)>;
6070 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
6071 (PSHUFHWmi addr:$src, imm:$imm)>;
6073 // Shuffle with PSHUFLW
6074 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
6075 (PSHUFLWri VR128:$src, imm:$imm)>;
6076 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
6077 (PSHUFLWmi addr:$src, imm:$imm)>;
6079 // Shuffle with MOVLPS
6080 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
6081 (MOVLPSrm VR128:$src1, addr:$src2)>;
6082 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
6083 (MOVLPSrm VR128:$src1, addr:$src2)>;
6084 def : Pat<(X86Movlps VR128:$src1,
6085 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
6086 (MOVLPSrm VR128:$src1, addr:$src2)>;
6087 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
6088 // is during lowering, where it's not possible to recognize the load fold cause
6089 // it has two uses through a bitcast. One use disappears at isel time and the
6090 // fold opportunity reappears.
6091 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
6092 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
6094 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
6095 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
6097 // Shuffle with MOVLPD
6098 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
6099 (MOVLPDrm VR128:$src1, addr:$src2)>;
6100 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
6101 (MOVLPDrm VR128:$src1, addr:$src2)>;
6102 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
6103 (scalar_to_vector (loadf64 addr:$src2)))),
6104 (MOVLPDrm VR128:$src1, addr:$src2)>;
6106 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
6107 def : Pat<(store (f64 (vector_extract
6108 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
6109 (MOVHPSmr addr:$dst, VR128:$src)>;
6110 def : Pat<(store (f64 (vector_extract
6111 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
6112 (MOVHPDmr addr:$dst, VR128:$src)>;
6114 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
6115 (MOVLPSmr addr:$src1, VR128:$src2)>;
6116 def : Pat<(store (v4i32 (X86Movlps
6117 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
6118 (MOVLPSmr addr:$src1, VR128:$src2)>;
6120 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
6121 (MOVLPDmr addr:$src1, VR128:$src2)>;
6122 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
6123 (MOVLPDmr addr:$src1, VR128:$src2)>;