1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // SSE 1 & 2 - Move Instructions
120 //===----------------------------------------------------------------------===//
122 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
123 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
124 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
126 // Loading from memory automatically zeroing upper bits.
127 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
128 PatFrag mem_pat, string OpcodeStr> :
129 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
130 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
131 [(set RC:$dst, (mem_pat addr:$src))]>;
133 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
134 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
135 // is used instead. Register-to-register movss/movsd is not modeled as an
136 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
137 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
138 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
139 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
140 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
141 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
143 let canFoldAsLoad = 1, isReMaterializable = 1 in {
144 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
146 let AddedComplexity = 20 in
147 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
150 let Constraints = "$src1 = $dst" in {
151 def MOVSSrr : sse12_move_rr<FR32, v4f32,
152 "movss\t{$src2, $dst|$dst, $src2}">, XS;
153 def MOVSDrr : sse12_move_rr<FR64, v2f64,
154 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
157 let canFoldAsLoad = 1, isReMaterializable = 1 in {
158 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
160 let AddedComplexity = 20 in
161 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
164 let AddedComplexity = 15 in {
165 // Extract the low 32-bit value from one vector and insert it into another.
166 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
167 (MOVSSrr (v4f32 VR128:$src1),
168 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
169 // Extract the low 64-bit value from one vector and insert it into another.
170 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
171 (MOVSDrr (v2f64 VR128:$src1),
172 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
175 // Implicitly promote a 32-bit scalar to a vector.
176 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
177 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
178 // Implicitly promote a 64-bit scalar to a vector.
179 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
180 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
181 // Implicitly promote a 32-bit scalar to a vector.
182 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
183 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
184 // Implicitly promote a 64-bit scalar to a vector.
185 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
186 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
188 let AddedComplexity = 20 in {
189 let Predicates = [HasSSE1] in {
190 // MOVSSrm zeros the high parts of the register; represent this
191 // with SUBREG_TO_REG.
192 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
193 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
194 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
195 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
196 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
197 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
199 let Predicates = [HasSSE2] in {
200 // MOVSDrm zeros the high parts of the register; represent this
201 // with SUBREG_TO_REG.
202 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
203 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
204 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
205 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
206 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
207 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
208 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
209 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
210 def : Pat<(v2f64 (X86vzload addr:$src)),
211 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
215 let AddedComplexity = 20, Predicates = [HasAVX] in {
216 // MOVSSrm zeros the high parts of the register; represent this
217 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
218 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
219 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
220 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
221 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
222 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
223 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
224 // MOVSDrm zeros the high parts of the register; represent this
225 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
226 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
227 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
228 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
229 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
230 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
231 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
232 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
233 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
234 def : Pat<(v2f64 (X86vzload addr:$src)),
235 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
236 // Represent the same patterns above but in the form they appear for
238 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
239 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
240 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
241 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
242 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))),
243 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_sd)>;
246 // Store scalar value to memory.
247 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
248 "movss\t{$src, $dst|$dst, $src}",
249 [(store FR32:$src, addr:$dst)]>;
250 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
251 "movsd\t{$src, $dst|$dst, $src}",
252 [(store FR64:$src, addr:$dst)]>;
254 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
255 "movss\t{$src, $dst|$dst, $src}",
256 [(store FR32:$src, addr:$dst)]>, XS, VEX;
257 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
258 "movsd\t{$src, $dst|$dst, $src}",
259 [(store FR64:$src, addr:$dst)]>, XD, VEX;
261 // Extract and store.
262 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
265 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
266 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
269 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
271 // Move Aligned/Unaligned floating point values
272 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
273 X86MemOperand x86memop, PatFrag ld_frag,
274 string asm, Domain d,
275 bit IsReMaterializable = 1> {
276 let neverHasSideEffects = 1 in
277 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
278 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
279 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
280 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
281 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
282 [(set RC:$dst, (ld_frag addr:$src))], d>;
285 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
286 "movaps", SSEPackedSingle>, VEX;
287 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
288 "movapd", SSEPackedDouble>, OpSize, VEX;
289 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
290 "movups", SSEPackedSingle>, VEX;
291 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
292 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
294 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
295 "movaps", SSEPackedSingle>, VEX;
296 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
297 "movapd", SSEPackedDouble>, OpSize, VEX;
298 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
299 "movups", SSEPackedSingle>, VEX;
300 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
301 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
302 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
303 "movaps", SSEPackedSingle>, TB;
304 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
305 "movapd", SSEPackedDouble>, TB, OpSize;
306 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
307 "movups", SSEPackedSingle>, TB;
308 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
309 "movupd", SSEPackedDouble, 0>, TB, OpSize;
311 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
312 "movaps\t{$src, $dst|$dst, $src}",
313 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
314 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
315 "movapd\t{$src, $dst|$dst, $src}",
316 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
317 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
318 "movups\t{$src, $dst|$dst, $src}",
319 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
320 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
321 "movupd\t{$src, $dst|$dst, $src}",
322 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
323 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
324 "movaps\t{$src, $dst|$dst, $src}",
325 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
326 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
327 "movapd\t{$src, $dst|$dst, $src}",
328 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
329 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
330 "movups\t{$src, $dst|$dst, $src}",
331 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
332 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
333 "movupd\t{$src, $dst|$dst, $src}",
334 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
336 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
337 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
338 (VMOVUPSYmr addr:$dst, VR256:$src)>;
340 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
341 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
342 (VMOVUPDYmr addr:$dst, VR256:$src)>;
344 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
345 "movaps\t{$src, $dst|$dst, $src}",
346 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
347 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
348 "movapd\t{$src, $dst|$dst, $src}",
349 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
350 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
351 "movups\t{$src, $dst|$dst, $src}",
352 [(store (v4f32 VR128:$src), addr:$dst)]>;
353 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
354 "movupd\t{$src, $dst|$dst, $src}",
355 [(store (v2f64 VR128:$src), addr:$dst)]>;
357 // Intrinsic forms of MOVUPS/D load and store
358 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
359 (ins f128mem:$dst, VR128:$src),
360 "movups\t{$src, $dst|$dst, $src}",
361 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
362 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
363 (ins f128mem:$dst, VR128:$src),
364 "movupd\t{$src, $dst|$dst, $src}",
365 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
367 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
368 "movups\t{$src, $dst|$dst, $src}",
369 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
370 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
371 "movupd\t{$src, $dst|$dst, $src}",
372 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
374 // Move Low/High packed floating point values
375 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
376 PatFrag mov_frag, string base_opc,
378 def PSrm : PI<opc, MRMSrcMem,
379 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
380 !strconcat(base_opc, "s", asm_opr),
383 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
384 SSEPackedSingle>, TB;
386 def PDrm : PI<opc, MRMSrcMem,
387 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
388 !strconcat(base_opc, "d", asm_opr),
389 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
390 (scalar_to_vector (loadf64 addr:$src2)))))],
391 SSEPackedDouble>, TB, OpSize;
394 let AddedComplexity = 20 in {
395 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
396 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
397 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
398 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
400 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
401 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
402 "\t{$src2, $dst|$dst, $src2}">;
403 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
404 "\t{$src2, $dst|$dst, $src2}">;
407 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
408 "movlps\t{$src, $dst|$dst, $src}",
409 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
410 (iPTR 0))), addr:$dst)]>, VEX;
411 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
412 "movlpd\t{$src, $dst|$dst, $src}",
413 [(store (f64 (vector_extract (v2f64 VR128:$src),
414 (iPTR 0))), addr:$dst)]>, VEX;
415 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
416 "movlps\t{$src, $dst|$dst, $src}",
417 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
418 (iPTR 0))), addr:$dst)]>;
419 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
420 "movlpd\t{$src, $dst|$dst, $src}",
421 [(store (f64 (vector_extract (v2f64 VR128:$src),
422 (iPTR 0))), addr:$dst)]>;
424 // v2f64 extract element 1 is always custom lowered to unpack high to low
425 // and extract element 0 so the non-store version isn't too horrible.
426 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
427 "movhps\t{$src, $dst|$dst, $src}",
428 [(store (f64 (vector_extract
429 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
430 (undef)), (iPTR 0))), addr:$dst)]>,
432 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
433 "movhpd\t{$src, $dst|$dst, $src}",
434 [(store (f64 (vector_extract
435 (v2f64 (unpckh VR128:$src, (undef))),
436 (iPTR 0))), addr:$dst)]>,
438 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
439 "movhps\t{$src, $dst|$dst, $src}",
440 [(store (f64 (vector_extract
441 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
442 (undef)), (iPTR 0))), addr:$dst)]>;
443 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
444 "movhpd\t{$src, $dst|$dst, $src}",
445 [(store (f64 (vector_extract
446 (v2f64 (unpckh VR128:$src, (undef))),
447 (iPTR 0))), addr:$dst)]>;
449 let AddedComplexity = 20 in {
450 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
451 (ins VR128:$src1, VR128:$src2),
452 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
454 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
456 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
457 (ins VR128:$src1, VR128:$src2),
458 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
460 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
463 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
464 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
465 (ins VR128:$src1, VR128:$src2),
466 "movlhps\t{$src2, $dst|$dst, $src2}",
468 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
469 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
470 (ins VR128:$src1, VR128:$src2),
471 "movhlps\t{$src2, $dst|$dst, $src2}",
473 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
476 let Predicates = [HasAVX] in {
478 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
479 (VMOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
480 def : Pat<(X86Movlhps VR128:$src1,
481 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
482 (VMOVHPSrm VR128:$src1, addr:$src2)>;
483 def : Pat<(X86Movlhps VR128:$src1,
484 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
485 (VMOVHPSrm VR128:$src1, addr:$src2)>;
488 let AddedComplexity = 20 in {
489 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
490 (VMOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
491 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
492 (VMOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
494 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
495 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
496 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
498 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
499 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
500 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
501 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
502 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
503 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
506 let AddedComplexity = 20 in {
507 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
508 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
509 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
511 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
512 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
513 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
514 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
515 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
518 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
519 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
520 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
521 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
524 let Predicates = [HasSSE1] in {
526 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
527 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
528 def : Pat<(X86Movlhps VR128:$src1,
529 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
530 (MOVHPSrm VR128:$src1, addr:$src2)>;
531 def : Pat<(X86Movlhps VR128:$src1,
532 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
533 (MOVHPSrm VR128:$src1, addr:$src2)>;
536 let AddedComplexity = 20 in {
537 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
538 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
539 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
540 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
542 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
543 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
544 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
546 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
547 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
548 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
549 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
550 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
551 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
554 let AddedComplexity = 20 in {
555 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
556 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
557 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
559 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
560 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
561 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
562 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
563 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
566 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
567 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
568 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
569 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
572 //===----------------------------------------------------------------------===//
573 // SSE 1 & 2 - Conversion Instructions
574 //===----------------------------------------------------------------------===//
576 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
577 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
579 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
580 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
581 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
582 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
585 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
586 X86MemOperand x86memop, string asm> {
587 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
589 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
593 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
594 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
595 string asm, Domain d> {
596 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
597 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
598 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
599 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
602 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
603 X86MemOperand x86memop, string asm> {
604 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
605 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
606 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
607 (ins DstRC:$src1, x86memop:$src),
608 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
611 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
612 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
613 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
614 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
616 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
617 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
618 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
619 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
622 // The assembler can recognize rr 64-bit instructions by seeing a rxx
623 // register, but the same isn't true when only using memory operands,
624 // provide other assembly "l" and "q" forms to address this explicitly
625 // where appropriate to do so.
626 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
628 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
630 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
632 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
634 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
637 let Predicates = [HasAVX] in {
638 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
639 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
640 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
641 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
642 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
643 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
644 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
645 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
647 def : Pat<(f32 (sint_to_fp GR32:$src)),
648 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
649 def : Pat<(f32 (sint_to_fp GR64:$src)),
650 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
651 def : Pat<(f64 (sint_to_fp GR32:$src)),
652 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
653 def : Pat<(f64 (sint_to_fp GR64:$src)),
654 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
657 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
658 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
659 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
660 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
661 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
662 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
663 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
664 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
665 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
666 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
667 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
668 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
669 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
670 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
671 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
672 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
674 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
675 // and/or XMM operand(s).
677 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
678 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
680 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
681 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
682 [(set DstRC:$dst, (Int SrcRC:$src))]>;
683 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
684 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
685 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
688 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
689 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
690 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
691 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
693 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
694 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
695 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
696 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
697 (ins DstRC:$src1, x86memop:$src2),
699 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
700 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
701 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
704 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
705 f128mem, load, "cvtsd2si">, XD, VEX;
706 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
707 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
710 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
711 // Get rid of this hack or rename the intrinsics, there are several
712 // intructions that only match with the intrinsic form, why create duplicates
713 // to let them be recognized by the assembler?
714 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
715 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
716 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
717 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
718 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
719 f128mem, load, "cvtsd2si{l}">, XD;
720 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
721 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
724 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
725 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
726 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
727 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
729 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
730 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
731 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
732 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
735 let Constraints = "$src1 = $dst" in {
736 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
737 int_x86_sse_cvtsi2ss, i32mem, loadi32,
739 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
740 int_x86_sse_cvtsi642ss, i64mem, loadi64,
741 "cvtsi2ss{q}">, XS, REX_W;
742 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
743 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
745 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
746 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
747 "cvtsi2sd">, XD, REX_W;
752 // Aliases for intrinsics
753 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
754 f32mem, load, "cvttss2si">, XS, VEX;
755 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
756 int_x86_sse_cvttss2si64, f32mem, load,
757 "cvttss2si">, XS, VEX, VEX_W;
758 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
759 f128mem, load, "cvttsd2si">, XD, VEX;
760 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
761 int_x86_sse2_cvttsd2si64, f128mem, load,
762 "cvttsd2si">, XD, VEX, VEX_W;
763 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
764 f32mem, load, "cvttss2si">, XS;
765 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
766 int_x86_sse_cvttss2si64, f32mem, load,
767 "cvttss2si{q}">, XS, REX_W;
768 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
769 f128mem, load, "cvttsd2si">, XD;
770 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
771 int_x86_sse2_cvttsd2si64, f128mem, load,
772 "cvttsd2si{q}">, XD, REX_W;
774 let Pattern = []<dag> in {
775 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
776 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
777 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
778 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
780 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
781 "cvtdq2ps\t{$src, $dst|$dst, $src}",
782 SSEPackedSingle>, TB, VEX;
783 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
784 "cvtdq2ps\t{$src, $dst|$dst, $src}",
785 SSEPackedSingle>, TB, VEX;
788 let Pattern = []<dag> in {
789 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
790 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
791 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
792 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
793 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
794 "cvtdq2ps\t{$src, $dst|$dst, $src}",
795 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
798 let Predicates = [HasSSE1] in {
799 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
800 (CVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
801 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
802 (CVTSS2SIrm addr:$src)>;
803 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
804 (CVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
805 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
806 (CVTSS2SI64rm addr:$src)>;
809 let Predicates = [HasAVX] in {
810 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
811 (VCVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
812 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
813 (VCVTSS2SIrm addr:$src)>;
814 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
815 (VCVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
816 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
817 (VCVTSS2SI64rm addr:$src)>;
822 // Convert scalar double to scalar single
823 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
824 (ins FR64:$src1, FR64:$src2),
825 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
827 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
828 (ins FR64:$src1, f64mem:$src2),
829 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
830 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
831 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
834 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
835 "cvtsd2ss\t{$src, $dst|$dst, $src}",
836 [(set FR32:$dst, (fround FR64:$src))]>;
837 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
838 "cvtsd2ss\t{$src, $dst|$dst, $src}",
839 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
840 Requires<[HasSSE2, OptForSize]>;
842 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
843 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
845 let Constraints = "$src1 = $dst" in
846 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
847 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
849 // Convert scalar single to scalar double
850 // SSE2 instructions with XS prefix
851 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
852 (ins FR32:$src1, FR32:$src2),
853 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
854 []>, XS, Requires<[HasAVX]>, VEX_4V;
855 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
856 (ins FR32:$src1, f32mem:$src2),
857 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
858 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
860 let Predicates = [HasAVX] in {
861 def : Pat<(f64 (fextend FR32:$src)),
862 (VCVTSS2SDrr FR32:$src, FR32:$src)>;
863 def : Pat<(fextend (loadf32 addr:$src)),
864 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
865 def : Pat<(extloadf32 addr:$src),
866 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
869 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
870 "cvtss2sd\t{$src, $dst|$dst, $src}",
871 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
873 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
874 "cvtss2sd\t{$src, $dst|$dst, $src}",
875 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
876 Requires<[HasSSE2, OptForSize]>;
878 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
879 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
880 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
881 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
882 VR128:$src2))]>, XS, VEX_4V,
884 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
885 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
886 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
887 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
888 (load addr:$src2)))]>, XS, VEX_4V,
890 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
891 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
892 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
893 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
894 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
897 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
898 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
899 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
900 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
901 (load addr:$src2)))]>, XS,
905 def : Pat<(extloadf32 addr:$src),
906 (CVTSS2SDrr (MOVSSrm addr:$src))>,
907 Requires<[HasSSE2, OptForSpeed]>;
909 // Convert doubleword to packed single/double fp
910 // SSE2 instructions without OpSize prefix
911 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
912 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
913 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
914 TB, VEX, Requires<[HasAVX]>;
915 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
916 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
917 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
918 (bitconvert (memopv2i64 addr:$src))))]>,
919 TB, VEX, Requires<[HasAVX]>;
920 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
921 "cvtdq2ps\t{$src, $dst|$dst, $src}",
922 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
923 TB, Requires<[HasSSE2]>;
924 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
925 "cvtdq2ps\t{$src, $dst|$dst, $src}",
926 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
927 (bitconvert (memopv2i64 addr:$src))))]>,
928 TB, Requires<[HasSSE2]>;
930 // FIXME: why the non-intrinsic version is described as SSE3?
931 // SSE2 instructions with XS prefix
932 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
933 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
934 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
935 XS, VEX, Requires<[HasAVX]>;
936 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
937 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
938 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
939 (bitconvert (memopv2i64 addr:$src))))]>,
940 XS, VEX, Requires<[HasAVX]>;
941 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
942 "cvtdq2pd\t{$src, $dst|$dst, $src}",
943 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
944 XS, Requires<[HasSSE2]>;
945 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
946 "cvtdq2pd\t{$src, $dst|$dst, $src}",
947 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
948 (bitconvert (memopv2i64 addr:$src))))]>,
949 XS, Requires<[HasSSE2]>;
952 // Convert packed single/double fp to doubleword
953 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
954 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
955 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
956 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
957 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
958 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
959 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
960 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
961 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
962 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
963 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
964 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
966 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
967 "cvtps2dq\t{$src, $dst|$dst, $src}",
968 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
970 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
972 "cvtps2dq\t{$src, $dst|$dst, $src}",
973 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
974 (memop addr:$src)))]>, VEX;
975 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
976 "cvtps2dq\t{$src, $dst|$dst, $src}",
977 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
978 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
979 "cvtps2dq\t{$src, $dst|$dst, $src}",
980 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
981 (memop addr:$src)))]>;
983 // SSE2 packed instructions with XD prefix
984 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
985 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
986 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
987 XD, VEX, Requires<[HasAVX]>;
988 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
989 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
990 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
991 (memop addr:$src)))]>,
992 XD, VEX, Requires<[HasAVX]>;
993 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
994 "cvtpd2dq\t{$src, $dst|$dst, $src}",
995 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
996 XD, Requires<[HasSSE2]>;
997 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
998 "cvtpd2dq\t{$src, $dst|$dst, $src}",
999 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1000 (memop addr:$src)))]>,
1001 XD, Requires<[HasSSE2]>;
1004 // Convert with truncation packed single/double fp to doubleword
1005 // SSE2 packed instructions with XS prefix
1006 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1007 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1008 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1009 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1010 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1011 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1012 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1013 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1014 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1015 "cvttps2dq\t{$src, $dst|$dst, $src}",
1017 (int_x86_sse2_cvttps2dq VR128:$src))]>;
1018 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1019 "cvttps2dq\t{$src, $dst|$dst, $src}",
1021 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
1023 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1024 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1026 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1027 XS, VEX, Requires<[HasAVX]>;
1028 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1029 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1030 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1031 (memop addr:$src)))]>,
1032 XS, VEX, Requires<[HasAVX]>;
1034 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1035 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
1036 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1037 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
1039 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1040 (Int_VCVTDQ2PSrr VR128:$src)>, Requires<[HasAVX]>;
1041 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1042 (VCVTTPS2DQrr VR128:$src)>, Requires<[HasAVX]>;
1043 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
1044 (VCVTDQ2PSYrr VR256:$src)>, Requires<[HasAVX]>;
1045 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
1046 (VCVTTPS2DQYrr VR256:$src)>, Requires<[HasAVX]>;
1048 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
1050 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1051 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
1053 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
1055 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1056 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1057 (memop addr:$src)))]>, VEX;
1058 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1059 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1060 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1061 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1062 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1063 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1064 (memop addr:$src)))]>;
1066 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1067 // register, but the same isn't true when using memory operands instead.
1068 // Provide other assembly rr and rm forms to address this explicitly.
1069 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1070 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1071 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1072 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1075 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1076 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1077 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1078 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1081 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1082 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
1083 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1084 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1086 // Convert packed single to packed double
1087 let Predicates = [HasAVX] in {
1088 // SSE2 instructions without OpSize prefix
1089 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1090 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1091 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1092 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1093 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1094 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1095 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1096 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
1098 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1099 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1100 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1101 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1103 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1104 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1105 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1106 VEX, Requires<[HasAVX]>;
1107 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1108 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1109 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1110 (load addr:$src)))]>,
1111 VEX, Requires<[HasAVX]>;
1112 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1113 "cvtps2pd\t{$src, $dst|$dst, $src}",
1114 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1115 TB, Requires<[HasSSE2]>;
1116 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1117 "cvtps2pd\t{$src, $dst|$dst, $src}",
1118 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1119 (load addr:$src)))]>,
1120 TB, Requires<[HasSSE2]>;
1122 // Convert packed double to packed single
1123 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1124 // register, but the same isn't true when using memory operands instead.
1125 // Provide other assembly rr and rm forms to address this explicitly.
1126 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1127 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1128 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1129 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1132 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1133 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1134 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1135 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1138 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1139 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1140 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1141 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1142 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1143 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1144 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1145 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1148 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1149 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1150 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1151 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1153 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1154 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1155 (memop addr:$src)))]>;
1156 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1157 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1158 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1159 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1160 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1161 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1162 (memop addr:$src)))]>;
1164 // AVX 256-bit register conversion intrinsics
1165 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1166 // whenever possible to avoid declaring two versions of each one.
1167 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1168 (VCVTDQ2PSYrr VR256:$src)>;
1169 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1170 (VCVTDQ2PSYrm addr:$src)>;
1172 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1173 (VCVTPD2PSYrr VR256:$src)>;
1174 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1175 (VCVTPD2PSYrm addr:$src)>;
1177 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1178 (VCVTPS2DQYrr VR256:$src)>;
1179 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1180 (VCVTPS2DQYrm addr:$src)>;
1182 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1183 (VCVTPS2PDYrr VR128:$src)>;
1184 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1185 (VCVTPS2PDYrm addr:$src)>;
1187 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1188 (VCVTTPD2DQYrr VR256:$src)>;
1189 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1190 (VCVTTPD2DQYrm addr:$src)>;
1192 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1193 (VCVTTPS2DQYrr VR256:$src)>;
1194 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1195 (VCVTTPS2DQYrm addr:$src)>;
1197 // Match fround and fextend for 128/256-bit conversions
1198 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
1199 (VCVTPD2PSYrr VR256:$src)>;
1200 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
1201 (VCVTPD2PSYrm addr:$src)>;
1203 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
1204 (VCVTPS2PDYrr VR128:$src)>;
1205 def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
1206 (VCVTPS2PDYrm addr:$src)>;
1208 //===----------------------------------------------------------------------===//
1209 // SSE 1 & 2 - Compare Instructions
1210 //===----------------------------------------------------------------------===//
1212 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1213 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1214 string asm, string asm_alt> {
1215 let isAsmParserOnly = 1 in {
1216 def rr : SIi8<0xC2, MRMSrcReg,
1217 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1220 def rm : SIi8<0xC2, MRMSrcMem,
1221 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1225 // Accept explicit immediate argument form instead of comparison code.
1226 def rr_alt : SIi8<0xC2, MRMSrcReg,
1227 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1230 def rm_alt : SIi8<0xC2, MRMSrcMem,
1231 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1235 let neverHasSideEffects = 1 in {
1236 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1237 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1238 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1240 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1241 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1242 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1246 let Constraints = "$src1 = $dst" in {
1247 def CMPSSrr : SIi8<0xC2, MRMSrcReg,
1248 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, SSECC:$cc),
1249 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1250 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), FR32:$src2, imm:$cc))]>, XS;
1251 def CMPSSrm : SIi8<0xC2, MRMSrcMem,
1252 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, SSECC:$cc),
1253 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1254 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), (loadf32 addr:$src2), imm:$cc))]>, XS;
1255 def CMPSDrr : SIi8<0xC2, MRMSrcReg,
1256 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, SSECC:$cc),
1257 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1258 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), FR64:$src2, imm:$cc))]>, XD;
1259 def CMPSDrm : SIi8<0xC2, MRMSrcMem,
1260 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, SSECC:$cc),
1261 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1262 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), (loadf64 addr:$src2), imm:$cc))]>, XD;
1264 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1265 def CMPSSrr_alt : SIi8<0xC2, MRMSrcReg,
1266 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
1267 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1268 def CMPSSrm_alt : SIi8<0xC2, MRMSrcMem,
1269 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
1270 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1271 def CMPSDrr_alt : SIi8<0xC2, MRMSrcReg,
1272 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
1273 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1274 def CMPSDrm_alt : SIi8<0xC2, MRMSrcMem,
1275 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
1276 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1279 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1280 Intrinsic Int, string asm> {
1281 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1282 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1283 [(set VR128:$dst, (Int VR128:$src1,
1284 VR128:$src, imm:$cc))]>;
1285 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1286 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1287 [(set VR128:$dst, (Int VR128:$src1,
1288 (load addr:$src), imm:$cc))]>;
1291 // Aliases to match intrinsics which expect XMM operand(s).
1292 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1293 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1295 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1296 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1298 let Constraints = "$src1 = $dst" in {
1299 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1300 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1301 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1302 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1306 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1307 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1308 ValueType vt, X86MemOperand x86memop,
1309 PatFrag ld_frag, string OpcodeStr, Domain d> {
1310 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1311 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1312 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1313 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1314 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1315 [(set EFLAGS, (OpNode (vt RC:$src1),
1316 (ld_frag addr:$src2)))], d>;
1319 let Defs = [EFLAGS] in {
1320 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1321 "ucomiss", SSEPackedSingle>, VEX;
1322 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1323 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1324 let Pattern = []<dag> in {
1325 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1326 "comiss", SSEPackedSingle>, VEX;
1327 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1328 "comisd", SSEPackedDouble>, OpSize, VEX;
1331 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1332 load, "ucomiss", SSEPackedSingle>, VEX;
1333 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1334 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1336 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1337 load, "comiss", SSEPackedSingle>, VEX;
1338 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1339 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1340 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1341 "ucomiss", SSEPackedSingle>, TB;
1342 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1343 "ucomisd", SSEPackedDouble>, TB, OpSize;
1345 let Pattern = []<dag> in {
1346 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1347 "comiss", SSEPackedSingle>, TB;
1348 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1349 "comisd", SSEPackedDouble>, TB, OpSize;
1352 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1353 load, "ucomiss", SSEPackedSingle>, TB;
1354 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1355 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1357 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1358 "comiss", SSEPackedSingle>, TB;
1359 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1360 "comisd", SSEPackedDouble>, TB, OpSize;
1361 } // Defs = [EFLAGS]
1363 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1364 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1365 Intrinsic Int, string asm, string asm_alt,
1367 let isAsmParserOnly = 1 in {
1368 def rri : PIi8<0xC2, MRMSrcReg,
1369 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1370 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1371 def rmi : PIi8<0xC2, MRMSrcMem,
1372 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1373 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1376 // Accept explicit immediate argument form instead of comparison code.
1377 def rri_alt : PIi8<0xC2, MRMSrcReg,
1378 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1380 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1381 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1385 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1386 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1387 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1388 SSEPackedSingle>, VEX_4V;
1389 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1390 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1391 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1392 SSEPackedDouble>, OpSize, VEX_4V;
1393 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1394 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1395 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1396 SSEPackedSingle>, VEX_4V;
1397 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1398 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1399 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1400 SSEPackedDouble>, OpSize, VEX_4V;
1401 let Constraints = "$src1 = $dst" in {
1402 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1403 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1404 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1405 SSEPackedSingle>, TB;
1406 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1407 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1408 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1409 SSEPackedDouble>, TB, OpSize;
1412 let Predicates = [HasSSE1] in {
1413 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1414 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1415 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1416 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1419 let Predicates = [HasSSE2] in {
1420 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1421 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1422 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1423 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1426 let Predicates = [HasAVX] in {
1427 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1428 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1429 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1430 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1431 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1432 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1433 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1434 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1436 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
1437 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
1438 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
1439 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
1440 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
1441 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
1442 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
1443 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
1446 //===----------------------------------------------------------------------===//
1447 // SSE 1 & 2 - Shuffle Instructions
1448 //===----------------------------------------------------------------------===//
1450 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1451 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1452 ValueType vt, string asm, PatFrag mem_frag,
1453 Domain d, bit IsConvertibleToThreeAddress = 0> {
1454 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1455 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1456 [(set RC:$dst, (vt (shufp:$src3
1457 RC:$src1, (mem_frag addr:$src2))))], d>;
1458 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1459 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1460 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1462 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1465 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1466 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1467 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
1468 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1469 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1470 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
1471 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1472 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1473 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1474 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1475 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1476 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1478 let Constraints = "$src1 = $dst" in {
1479 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1480 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1481 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1483 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1484 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1485 memopv2f64, SSEPackedDouble>, TB, OpSize;
1488 let Predicates = [HasSSE1] in {
1489 def : Pat<(v4f32 (X86Shufps VR128:$src1,
1490 (memopv4f32 addr:$src2), (i8 imm:$imm))),
1491 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1492 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1493 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1494 def : Pat<(v4i32 (X86Shufps VR128:$src1,
1495 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
1496 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1497 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1498 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1499 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
1500 // fall back to this for SSE1)
1501 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
1502 (SHUFPSrri VR128:$src2, VR128:$src1,
1503 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1504 // Special unary SHUFPSrri case.
1505 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
1506 (SHUFPSrri VR128:$src1, VR128:$src1,
1507 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1510 let Predicates = [HasSSE2] in {
1511 // Special binary v4i32 shuffle cases with SHUFPS.
1512 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
1513 (SHUFPSrri VR128:$src1, VR128:$src2,
1514 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1515 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
1516 (bc_v4i32 (memopv2i64 addr:$src2)))),
1517 (SHUFPSrmi VR128:$src1, addr:$src2,
1518 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1519 // Special unary SHUFPDrri cases.
1520 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
1521 (SHUFPDrri VR128:$src1, VR128:$src1,
1522 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1523 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
1524 (SHUFPDrri VR128:$src1, VR128:$src1,
1525 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1526 // Special binary v2i64 shuffle cases using SHUFPDrri.
1527 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
1528 (SHUFPDrri VR128:$src1, VR128:$src2,
1529 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1530 // Generic SHUFPD patterns
1531 def : Pat<(v2f64 (X86Shufps VR128:$src1,
1532 (memopv2f64 addr:$src2), (i8 imm:$imm))),
1533 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
1534 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1535 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1536 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1537 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1540 let Predicates = [HasAVX] in {
1541 def : Pat<(v4f32 (X86Shufps VR128:$src1,
1542 (memopv4f32 addr:$src2), (i8 imm:$imm))),
1543 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1544 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1545 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1546 def : Pat<(v4i32 (X86Shufps VR128:$src1,
1547 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
1548 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1549 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1550 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1551 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
1552 // fall back to this for SSE1)
1553 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
1554 (VSHUFPSrri VR128:$src2, VR128:$src1,
1555 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1556 // Special unary SHUFPSrri case.
1557 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
1558 (VSHUFPSrri VR128:$src1, VR128:$src1,
1559 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1560 // Special binary v4i32 shuffle cases with SHUFPS.
1561 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
1562 (VSHUFPSrri VR128:$src1, VR128:$src2,
1563 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1564 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
1565 (bc_v4i32 (memopv2i64 addr:$src2)))),
1566 (VSHUFPSrmi VR128:$src1, addr:$src2,
1567 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1568 // Special unary SHUFPDrri cases.
1569 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
1570 (VSHUFPDrri VR128:$src1, VR128:$src1,
1571 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1572 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
1573 (VSHUFPDrri VR128:$src1, VR128:$src1,
1574 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1575 // Special binary v2i64 shuffle cases using SHUFPDrri.
1576 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
1577 (VSHUFPDrri VR128:$src1, VR128:$src2,
1578 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1579 // Generic VSHUFPD patterns
1580 def : Pat<(v2f64 (X86Shufps VR128:$src1,
1581 (memopv2f64 addr:$src2), (i8 imm:$imm))),
1582 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
1583 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1584 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1585 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1586 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1589 //===----------------------------------------------------------------------===//
1590 // SSE 1 & 2 - Unpack Instructions
1591 //===----------------------------------------------------------------------===//
1593 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1594 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1595 PatFrag mem_frag, RegisterClass RC,
1596 X86MemOperand x86memop, string asm,
1598 def rr : PI<opc, MRMSrcReg,
1599 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1601 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1602 def rm : PI<opc, MRMSrcMem,
1603 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1605 (vt (OpNode RC:$src1,
1606 (mem_frag addr:$src2))))], d>;
1609 let AddedComplexity = 10 in {
1610 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1611 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1612 SSEPackedSingle>, VEX_4V;
1613 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1614 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1615 SSEPackedDouble>, OpSize, VEX_4V;
1616 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1617 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1618 SSEPackedSingle>, VEX_4V;
1619 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1620 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1621 SSEPackedDouble>, OpSize, VEX_4V;
1623 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1624 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1625 SSEPackedSingle>, VEX_4V;
1626 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1627 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1628 SSEPackedDouble>, OpSize, VEX_4V;
1629 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1630 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1631 SSEPackedSingle>, VEX_4V;
1632 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1633 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1634 SSEPackedDouble>, OpSize, VEX_4V;
1636 let Constraints = "$src1 = $dst" in {
1637 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1638 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1639 SSEPackedSingle>, TB;
1640 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1641 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1642 SSEPackedDouble>, TB, OpSize;
1643 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1644 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1645 SSEPackedSingle>, TB;
1646 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1647 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1648 SSEPackedDouble>, TB, OpSize;
1649 } // Constraints = "$src1 = $dst"
1650 } // AddedComplexity
1652 //===----------------------------------------------------------------------===//
1653 // SSE 1 & 2 - Extract Floating-Point Sign mask
1654 //===----------------------------------------------------------------------===//
1656 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1657 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1659 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1660 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1661 [(set GR32:$dst, (Int RC:$src))], d>;
1662 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1663 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1666 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1667 SSEPackedSingle>, TB;
1668 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1669 SSEPackedDouble>, TB, OpSize;
1671 def : Pat<(i32 (X86fgetsign FR32:$src)),
1672 (MOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1673 sub_ss))>, Requires<[HasSSE1]>;
1674 def : Pat<(i64 (X86fgetsign FR32:$src)),
1675 (MOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1676 sub_ss))>, Requires<[HasSSE1]>;
1677 def : Pat<(i32 (X86fgetsign FR64:$src)),
1678 (MOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1679 sub_sd))>, Requires<[HasSSE2]>;
1680 def : Pat<(i64 (X86fgetsign FR64:$src)),
1681 (MOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1682 sub_sd))>, Requires<[HasSSE2]>;
1684 let Predicates = [HasAVX] in {
1685 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1686 "movmskps", SSEPackedSingle>, TB, VEX;
1687 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1688 "movmskpd", SSEPackedDouble>, TB, OpSize,
1690 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1691 "movmskps", SSEPackedSingle>, TB, VEX;
1692 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1693 "movmskpd", SSEPackedDouble>, TB, OpSize,
1696 def : Pat<(i32 (X86fgetsign FR32:$src)),
1697 (VMOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1699 def : Pat<(i64 (X86fgetsign FR32:$src)),
1700 (VMOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1702 def : Pat<(i32 (X86fgetsign FR64:$src)),
1703 (VMOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1705 def : Pat<(i64 (X86fgetsign FR64:$src)),
1706 (VMOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1710 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1711 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1712 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1713 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1715 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1716 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1717 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1718 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1722 //===----------------------------------------------------------------------===//
1723 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1724 //===----------------------------------------------------------------------===//
1726 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1727 // names that start with 'Fs'.
1729 // Alias instructions that map fld0 to pxor for sse.
1730 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1731 canFoldAsLoad = 1 in {
1732 // FIXME: Set encoding to pseudo!
1733 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1734 [(set FR32:$dst, fp32imm0)]>,
1735 Requires<[HasSSE1]>, TB, OpSize;
1736 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1737 [(set FR64:$dst, fpimm0)]>,
1738 Requires<[HasSSE2]>, TB, OpSize;
1739 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1740 [(set FR32:$dst, fp32imm0)]>,
1741 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1742 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1743 [(set FR64:$dst, fpimm0)]>,
1744 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1747 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1748 // bits are disregarded.
1749 let neverHasSideEffects = 1 in {
1750 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1751 "movaps\t{$src, $dst|$dst, $src}", []>;
1752 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1753 "movapd\t{$src, $dst|$dst, $src}", []>;
1756 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1757 // bits are disregarded.
1758 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1759 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1760 "movaps\t{$src, $dst|$dst, $src}",
1761 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1762 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1763 "movapd\t{$src, $dst|$dst, $src}",
1764 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1767 //===----------------------------------------------------------------------===//
1768 // SSE 1 & 2 - Logical Instructions
1769 //===----------------------------------------------------------------------===//
1771 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1773 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1775 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1776 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, TB, VEX_4V;
1778 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1779 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, TB, OpSize, VEX_4V;
1781 let Constraints = "$src1 = $dst" in {
1782 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1783 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1785 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1786 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1790 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1791 let mayLoad = 0 in {
1792 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1793 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1794 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1797 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1798 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1800 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1802 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1804 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
1805 // are all promoted to v2i64, and the patterns are covered by the int
1806 // version. This is needed in SSE only, because v2i64 isn't supported on
1807 // SSE1, but only on SSE2.
1808 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1809 !strconcat(OpcodeStr, "ps"), f128mem, [],
1810 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1811 (memopv2i64 addr:$src2)))], 0>, TB, VEX_4V;
1813 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1814 !strconcat(OpcodeStr, "pd"), f128mem,
1815 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1816 (bc_v2i64 (v2f64 VR128:$src2))))],
1817 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1818 (memopv2i64 addr:$src2)))], 0>,
1820 let Constraints = "$src1 = $dst" in {
1821 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1822 !strconcat(OpcodeStr, "ps"), f128mem,
1823 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
1824 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1825 (memopv2i64 addr:$src2)))]>, TB;
1827 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1828 !strconcat(OpcodeStr, "pd"), f128mem,
1829 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1830 (bc_v2i64 (v2f64 VR128:$src2))))],
1831 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1832 (memopv2i64 addr:$src2)))]>, TB, OpSize;
1836 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1838 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
1840 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1841 !strconcat(OpcodeStr, "ps"), f256mem,
1842 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
1843 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
1844 (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V;
1846 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1847 !strconcat(OpcodeStr, "pd"), f256mem,
1848 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1849 (bc_v4i64 (v4f64 VR256:$src2))))],
1850 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1851 (memopv4i64 addr:$src2)))], 0>,
1855 // AVX 256-bit packed logical ops forms
1856 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
1857 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
1858 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
1859 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
1861 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1862 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1863 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1864 let isCommutable = 0 in
1865 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
1867 //===----------------------------------------------------------------------===//
1868 // SSE 1 & 2 - Arithmetic Instructions
1869 //===----------------------------------------------------------------------===//
1871 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1874 /// In addition, we also have a special variant of the scalar form here to
1875 /// represent the associated intrinsic operation. This form is unlike the
1876 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1877 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1879 /// These three forms can each be reg+reg or reg+mem.
1882 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1884 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1886 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1887 OpNode, FR32, f32mem, Is2Addr>, XS;
1888 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1889 OpNode, FR64, f64mem, Is2Addr>, XD;
1892 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1894 let mayLoad = 0 in {
1895 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1896 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1897 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1898 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1902 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1904 let mayLoad = 0 in {
1905 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1906 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1907 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1908 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1912 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1914 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1915 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1916 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1917 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1920 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1922 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1923 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1924 SSEPackedSingle, Is2Addr>, TB;
1926 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1927 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1928 SSEPackedDouble, Is2Addr>, TB, OpSize;
1931 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1932 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1933 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1934 SSEPackedSingle, 0>, TB;
1936 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1937 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1938 SSEPackedDouble, 0>, TB, OpSize;
1941 // Binary Arithmetic instructions
1942 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1943 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1944 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1945 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1946 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1947 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1948 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1949 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1951 let isCommutable = 0 in {
1952 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1953 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1954 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1955 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1956 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1957 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1958 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1959 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1960 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1961 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1962 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1963 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1964 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1965 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1966 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1967 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1968 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1969 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1970 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1971 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1974 let Constraints = "$src1 = $dst" in {
1975 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1976 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1977 basic_sse12_fp_binop_s_int<0x58, "add">;
1978 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1979 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1980 basic_sse12_fp_binop_s_int<0x59, "mul">;
1982 let isCommutable = 0 in {
1983 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1984 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1985 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1986 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1987 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1988 basic_sse12_fp_binop_s_int<0x5E, "div">;
1989 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1990 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1991 basic_sse12_fp_binop_s_int<0x5F, "max">,
1992 basic_sse12_fp_binop_p_int<0x5F, "max">;
1993 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1994 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1995 basic_sse12_fp_binop_s_int<0x5D, "min">,
1996 basic_sse12_fp_binop_p_int<0x5D, "min">;
2001 /// In addition, we also have a special variant of the scalar form here to
2002 /// represent the associated intrinsic operation. This form is unlike the
2003 /// plain scalar form, in that it takes an entire vector (instead of a
2004 /// scalar) and leaves the top elements undefined.
2006 /// And, we have a special variant form for a full-vector intrinsic form.
2008 /// sse1_fp_unop_s - SSE1 unops in scalar form.
2009 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
2010 SDNode OpNode, Intrinsic F32Int> {
2011 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
2012 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2013 [(set FR32:$dst, (OpNode FR32:$src))]>;
2014 // For scalar unary operations, fold a load into the operation
2015 // only in OptForSize mode. It eliminates an instruction, but it also
2016 // eliminates a whole-register clobber (the load), so it introduces a
2017 // partial register update condition.
2018 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
2019 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2020 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
2021 Requires<[HasSSE1, OptForSize]>;
2022 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2023 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2024 [(set VR128:$dst, (F32Int VR128:$src))]>;
2025 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
2026 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2027 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
2030 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
2031 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2032 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
2033 !strconcat(OpcodeStr,
2034 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2035 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1,f32mem:$src2),
2036 !strconcat(OpcodeStr,
2037 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2038 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
2039 (ins ssmem:$src1, VR128:$src2),
2040 !strconcat(OpcodeStr,
2041 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2044 /// sse1_fp_unop_p - SSE1 unops in packed form.
2045 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2046 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2047 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2048 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
2049 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2050 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2051 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
2054 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
2055 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2056 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2057 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2058 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
2059 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2060 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2061 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
2064 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
2065 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2066 Intrinsic V4F32Int> {
2067 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2068 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2069 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
2070 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2071 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2072 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
2075 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
2076 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2077 Intrinsic V4F32Int> {
2078 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2079 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2080 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
2081 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2082 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2083 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
2086 /// sse2_fp_unop_s - SSE2 unops in scalar form.
2087 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
2088 SDNode OpNode, Intrinsic F64Int> {
2089 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
2090 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2091 [(set FR64:$dst, (OpNode FR64:$src))]>;
2092 // See the comments in sse1_fp_unop_s for why this is OptForSize.
2093 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
2094 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2095 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
2096 Requires<[HasSSE2, OptForSize]>;
2097 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2098 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2099 [(set VR128:$dst, (F64Int VR128:$src))]>;
2100 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
2101 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2102 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
2105 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
2106 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2107 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
2108 !strconcat(OpcodeStr,
2109 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2110 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
2111 !strconcat(OpcodeStr,
2112 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2113 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
2114 (ins VR128:$src1, sdmem:$src2),
2115 !strconcat(OpcodeStr,
2116 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2119 /// sse2_fp_unop_p - SSE2 unops in vector forms.
2120 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
2122 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2123 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2124 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
2125 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2126 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2127 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
2130 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
2131 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2132 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2133 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2134 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
2135 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2136 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2137 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
2140 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
2141 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2142 Intrinsic V2F64Int> {
2143 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2144 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2145 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
2146 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2147 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2148 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
2151 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
2152 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2153 Intrinsic V2F64Int> {
2154 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2155 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2156 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
2157 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2158 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2159 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
2162 let Predicates = [HasAVX] in {
2164 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt">,
2165 sse2_fp_unop_s_avx<0x51, "vsqrt">, VEX_4V;
2167 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
2168 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
2169 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2170 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2171 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
2172 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
2173 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
2174 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
2177 // Reciprocal approximations. Note that these typically require refinement
2178 // in order to obtain suitable precision.
2179 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt">, VEX_4V;
2180 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
2181 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
2182 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
2183 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
2185 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp">, VEX_4V;
2186 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
2187 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
2188 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
2189 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
2192 def : Pat<(f32 (fsqrt FR32:$src)),
2193 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2194 def : Pat<(f32 (fsqrt (load addr:$src))),
2195 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2196 Requires<[HasAVX, OptForSize]>;
2197 def : Pat<(f64 (fsqrt FR64:$src)),
2198 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
2199 def : Pat<(f64 (fsqrt (load addr:$src))),
2200 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
2201 Requires<[HasAVX, OptForSize]>;
2203 def : Pat<(f32 (X86frsqrt FR32:$src)),
2204 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2205 def : Pat<(f32 (X86frsqrt (load addr:$src))),
2206 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2207 Requires<[HasAVX, OptForSize]>;
2209 def : Pat<(f32 (X86frcp FR32:$src)),
2210 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2211 def : Pat<(f32 (X86frcp (load addr:$src))),
2212 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2213 Requires<[HasAVX, OptForSize]>;
2215 let Predicates = [HasAVX] in {
2216 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
2217 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2218 (VSQRTSSr (f32 (IMPLICIT_DEF)),
2219 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2221 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
2222 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2224 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
2225 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
2226 (VSQRTSDr (f64 (IMPLICIT_DEF)),
2227 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd)),
2229 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
2230 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
2232 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
2233 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2234 (VRSQRTSSr (f32 (IMPLICIT_DEF)),
2235 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2237 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
2238 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2240 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
2241 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2242 (VRCPSSr (f32 (IMPLICIT_DEF)),
2243 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2245 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
2246 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2250 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2251 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
2252 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
2253 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2254 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
2255 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
2257 // Reciprocal approximations. Note that these typically require refinement
2258 // in order to obtain suitable precision.
2259 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
2260 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
2261 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
2262 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2263 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
2264 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
2266 // There is no f64 version of the reciprocal approximation instructions.
2268 //===----------------------------------------------------------------------===//
2269 // SSE 1 & 2 - Non-temporal stores
2270 //===----------------------------------------------------------------------===//
2272 let AddedComplexity = 400 in { // Prefer non-temporal versions
2273 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2274 (ins f128mem:$dst, VR128:$src),
2275 "movntps\t{$src, $dst|$dst, $src}",
2276 [(alignednontemporalstore (v4f32 VR128:$src),
2278 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2279 (ins f128mem:$dst, VR128:$src),
2280 "movntpd\t{$src, $dst|$dst, $src}",
2281 [(alignednontemporalstore (v2f64 VR128:$src),
2283 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2284 (ins f128mem:$dst, VR128:$src),
2285 "movntdq\t{$src, $dst|$dst, $src}",
2286 [(alignednontemporalstore (v2f64 VR128:$src),
2289 let ExeDomain = SSEPackedInt in
2290 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2291 (ins f128mem:$dst, VR128:$src),
2292 "movntdq\t{$src, $dst|$dst, $src}",
2293 [(alignednontemporalstore (v4f32 VR128:$src),
2296 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2297 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
2299 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2300 (ins f256mem:$dst, VR256:$src),
2301 "movntps\t{$src, $dst|$dst, $src}",
2302 [(alignednontemporalstore (v8f32 VR256:$src),
2304 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2305 (ins f256mem:$dst, VR256:$src),
2306 "movntpd\t{$src, $dst|$dst, $src}",
2307 [(alignednontemporalstore (v4f64 VR256:$src),
2309 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2310 (ins f256mem:$dst, VR256:$src),
2311 "movntdq\t{$src, $dst|$dst, $src}",
2312 [(alignednontemporalstore (v4f64 VR256:$src),
2314 let ExeDomain = SSEPackedInt in
2315 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2316 (ins f256mem:$dst, VR256:$src),
2317 "movntdq\t{$src, $dst|$dst, $src}",
2318 [(alignednontemporalstore (v8f32 VR256:$src),
2322 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2323 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2324 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2325 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2326 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2327 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2329 let AddedComplexity = 400 in { // Prefer non-temporal versions
2330 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2331 "movntps\t{$src, $dst|$dst, $src}",
2332 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2333 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2334 "movntpd\t{$src, $dst|$dst, $src}",
2335 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2337 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2338 "movntdq\t{$src, $dst|$dst, $src}",
2339 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2341 let ExeDomain = SSEPackedInt in
2342 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2343 "movntdq\t{$src, $dst|$dst, $src}",
2344 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2346 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2347 (MOVNTDQmr addr:$dst, VR128:$src)>;
2349 // There is no AVX form for instructions below this point
2350 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2351 "movnti{l}\t{$src, $dst|$dst, $src}",
2352 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2353 TB, Requires<[HasSSE2]>;
2354 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2355 "movnti{q}\t{$src, $dst|$dst, $src}",
2356 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2357 TB, Requires<[HasSSE2]>;
2360 //===----------------------------------------------------------------------===//
2361 // SSE 1 & 2 - Misc Instructions (No AVX form)
2362 //===----------------------------------------------------------------------===//
2364 // Prefetch intrinsic.
2365 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2366 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>;
2367 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2368 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>;
2369 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2370 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>;
2371 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2372 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>;
2374 // Load, store, and memory fence
2375 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2376 TB, Requires<[HasSSE1]>;
2377 def : Pat<(X86SFence), (SFENCE)>;
2379 // Alias instructions that map zero vector to pxor / xorp* for sse.
2380 // We set canFoldAsLoad because this can be converted to a constant-pool
2381 // load of an all-zeros value if folding it would be beneficial.
2382 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2383 // JIT implementation, it does not expand the instructions below like
2384 // X86MCInstLower does.
2385 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2386 isCodeGenOnly = 1 in {
2387 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2388 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2389 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2390 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2391 let ExeDomain = SSEPackedInt in
2392 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2393 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2396 // The same as done above but for AVX. The 128-bit versions are the
2397 // same, but re-encoded. The 256-bit does not support PI version, and
2398 // doesn't need it because on sandy bridge the register is set to zero
2399 // at the rename stage without using any execution unit, so SET0PSY
2400 // and SET0PDY can be used for vector int instructions without penalty
2401 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2402 // JIT implementatioan, it does not expand the instructions below like
2403 // X86MCInstLower does.
2404 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2405 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2406 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2407 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2408 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2409 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2410 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2411 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2412 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2413 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2414 let ExeDomain = SSEPackedInt in
2415 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2416 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2419 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2420 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2421 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2423 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2424 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2426 // AVX has no support for 256-bit integer instructions, but since the 128-bit
2427 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
2428 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
2429 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
2430 (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
2432 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
2433 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
2434 (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
2436 //===----------------------------------------------------------------------===//
2437 // SSE 1 & 2 - Load/Store XCSR register
2438 //===----------------------------------------------------------------------===//
2440 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2441 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2442 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2443 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2445 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2446 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2447 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2448 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2450 //===---------------------------------------------------------------------===//
2451 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2452 //===---------------------------------------------------------------------===//
2454 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2456 let neverHasSideEffects = 1 in {
2457 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2458 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2459 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2460 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2462 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2463 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2464 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2465 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2467 let canFoldAsLoad = 1, mayLoad = 1 in {
2468 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2469 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2470 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2471 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2472 let Predicates = [HasAVX] in {
2473 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2474 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2475 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2476 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2480 let mayStore = 1 in {
2481 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2482 (ins i128mem:$dst, VR128:$src),
2483 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2484 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2485 (ins i256mem:$dst, VR256:$src),
2486 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2487 let Predicates = [HasAVX] in {
2488 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2489 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2490 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2491 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2495 let neverHasSideEffects = 1 in
2496 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2497 "movdqa\t{$src, $dst|$dst, $src}", []>;
2499 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2500 "movdqu\t{$src, $dst|$dst, $src}",
2501 []>, XS, Requires<[HasSSE2]>;
2503 let canFoldAsLoad = 1, mayLoad = 1 in {
2504 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2505 "movdqa\t{$src, $dst|$dst, $src}",
2506 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2507 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2508 "movdqu\t{$src, $dst|$dst, $src}",
2509 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2510 XS, Requires<[HasSSE2]>;
2513 let mayStore = 1 in {
2514 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2515 "movdqa\t{$src, $dst|$dst, $src}",
2516 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2517 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2518 "movdqu\t{$src, $dst|$dst, $src}",
2519 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2520 XS, Requires<[HasSSE2]>;
2523 // Intrinsic forms of MOVDQU load and store
2524 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2525 "vmovdqu\t{$src, $dst|$dst, $src}",
2526 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2527 XS, VEX, Requires<[HasAVX]>;
2529 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2530 "movdqu\t{$src, $dst|$dst, $src}",
2531 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2532 XS, Requires<[HasSSE2]>;
2534 } // ExeDomain = SSEPackedInt
2536 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2537 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2538 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2540 //===---------------------------------------------------------------------===//
2541 // SSE2 - Packed Integer Arithmetic Instructions
2542 //===---------------------------------------------------------------------===//
2544 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2546 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2547 bit IsCommutable = 0, bit Is2Addr = 1> {
2548 let isCommutable = IsCommutable in
2549 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2550 (ins VR128:$src1, VR128:$src2),
2552 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2553 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2554 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2555 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2556 (ins VR128:$src1, i128mem:$src2),
2558 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2559 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2560 [(set VR128:$dst, (IntId VR128:$src1,
2561 (bitconvert (memopv2i64 addr:$src2))))]>;
2564 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2565 string OpcodeStr, Intrinsic IntId,
2566 Intrinsic IntId2, bit Is2Addr = 1> {
2567 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2568 (ins VR128:$src1, VR128:$src2),
2570 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2571 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2572 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2573 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2574 (ins VR128:$src1, i128mem:$src2),
2576 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2577 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2578 [(set VR128:$dst, (IntId VR128:$src1,
2579 (bitconvert (memopv2i64 addr:$src2))))]>;
2580 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2581 (ins VR128:$src1, i32i8imm:$src2),
2583 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2584 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2585 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2588 /// PDI_binop_rm - Simple SSE2 binary operator.
2589 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2590 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2591 let isCommutable = IsCommutable in
2592 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2593 (ins VR128:$src1, VR128:$src2),
2595 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2596 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2597 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2598 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2599 (ins VR128:$src1, i128mem:$src2),
2601 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2602 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2603 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2604 (bitconvert (memopv2i64 addr:$src2)))))]>;
2607 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2609 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2610 /// to collapse (bitconvert VT to VT) into its operand.
2612 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2613 bit IsCommutable = 0, bit Is2Addr = 1> {
2614 let isCommutable = IsCommutable in
2615 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2616 (ins VR128:$src1, VR128:$src2),
2618 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2619 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2620 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2621 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2622 (ins VR128:$src1, i128mem:$src2),
2624 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2625 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2626 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2629 } // ExeDomain = SSEPackedInt
2631 // 128-bit Integer Arithmetic
2633 let Predicates = [HasAVX] in {
2634 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2635 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2636 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2637 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2638 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2639 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2640 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2641 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2642 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2645 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2647 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2649 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2651 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2653 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2655 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2657 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2659 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2661 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2663 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2665 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2667 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2669 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2671 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2673 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2675 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2677 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2679 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2681 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2685 let Constraints = "$src1 = $dst" in {
2686 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2687 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2688 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2689 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2690 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2691 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2692 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2693 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2694 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2697 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2698 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2699 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2700 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2701 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2702 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2703 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2704 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2705 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2706 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2707 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2708 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2709 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2710 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2711 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2712 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2713 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2714 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2715 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2717 } // Constraints = "$src1 = $dst"
2719 //===---------------------------------------------------------------------===//
2720 // SSE2 - Packed Integer Logical Instructions
2721 //===---------------------------------------------------------------------===//
2723 let Predicates = [HasAVX] in {
2724 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2725 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2727 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2728 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2730 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2731 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2734 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2735 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2737 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2738 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2740 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2741 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2744 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2745 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2747 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2748 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2751 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2752 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2753 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2755 let ExeDomain = SSEPackedInt in {
2756 let neverHasSideEffects = 1 in {
2757 // 128-bit logical shifts.
2758 def VPSLLDQri : PDIi8<0x73, MRM7r,
2759 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2760 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2762 def VPSRLDQri : PDIi8<0x73, MRM3r,
2763 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2764 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2766 // PSRADQri doesn't exist in SSE[1-3].
2768 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2769 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2770 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2772 (v2i64 (X86andnp VR128:$src1, VR128:$src2)))]>,VEX_4V;
2774 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2775 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2776 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2777 [(set VR128:$dst, (X86andnp VR128:$src1,
2778 (memopv2i64 addr:$src2)))]>, VEX_4V;
2782 let Constraints = "$src1 = $dst" in {
2783 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2784 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2785 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2786 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2787 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2788 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2790 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2791 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2792 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2793 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2794 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2795 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2797 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2798 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2799 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2800 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2802 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2803 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2804 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2806 let ExeDomain = SSEPackedInt in {
2807 let neverHasSideEffects = 1 in {
2808 // 128-bit logical shifts.
2809 def PSLLDQri : PDIi8<0x73, MRM7r,
2810 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2811 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2812 def PSRLDQri : PDIi8<0x73, MRM3r,
2813 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2814 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2815 // PSRADQri doesn't exist in SSE[1-3].
2817 def PANDNrr : PDI<0xDF, MRMSrcReg,
2818 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2819 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2821 def PANDNrm : PDI<0xDF, MRMSrcMem,
2822 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2823 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2825 } // Constraints = "$src1 = $dst"
2827 let Predicates = [HasAVX] in {
2828 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2829 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2830 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2831 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2832 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2833 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2834 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2835 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2836 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2837 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2839 // Shift up / down and insert zero's.
2840 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2841 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2842 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2843 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2846 let Predicates = [HasSSE2] in {
2847 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2848 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2849 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2850 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2851 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2852 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2853 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2854 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2855 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2856 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2858 // Shift up / down and insert zero's.
2859 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2860 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2861 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2862 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2865 //===---------------------------------------------------------------------===//
2866 // SSE2 - Packed Integer Comparison Instructions
2867 //===---------------------------------------------------------------------===//
2869 let Predicates = [HasAVX] in {
2870 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2872 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2874 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2876 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2878 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2880 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2883 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2884 (VPCMPEQBrr VR128:$src1, VR128:$src2)>;
2885 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2886 (VPCMPEQBrm VR128:$src1, addr:$src2)>;
2887 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2888 (VPCMPEQWrr VR128:$src1, VR128:$src2)>;
2889 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2890 (VPCMPEQWrm VR128:$src1, addr:$src2)>;
2891 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2892 (VPCMPEQDrr VR128:$src1, VR128:$src2)>;
2893 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2894 (VPCMPEQDrm VR128:$src1, addr:$src2)>;
2896 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2897 (VPCMPGTBrr VR128:$src1, VR128:$src2)>;
2898 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2899 (VPCMPGTBrm VR128:$src1, addr:$src2)>;
2900 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2901 (VPCMPGTWrr VR128:$src1, VR128:$src2)>;
2902 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2903 (VPCMPGTWrm VR128:$src1, addr:$src2)>;
2904 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2905 (VPCMPGTDrr VR128:$src1, VR128:$src2)>;
2906 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2907 (VPCMPGTDrm VR128:$src1, addr:$src2)>;
2910 let Constraints = "$src1 = $dst" in {
2911 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2912 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2913 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2914 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2915 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2916 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2917 } // Constraints = "$src1 = $dst"
2919 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2920 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2921 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2922 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2923 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2924 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2925 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2926 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2927 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2928 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2929 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2930 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2932 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2933 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2934 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2935 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2936 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2937 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2938 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2939 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2940 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2941 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2942 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2943 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2945 //===---------------------------------------------------------------------===//
2946 // SSE2 - Packed Integer Pack Instructions
2947 //===---------------------------------------------------------------------===//
2949 let Predicates = [HasAVX] in {
2950 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2952 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2954 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2958 let Constraints = "$src1 = $dst" in {
2959 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2960 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2961 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2962 } // Constraints = "$src1 = $dst"
2964 //===---------------------------------------------------------------------===//
2965 // SSE2 - Packed Integer Shuffle Instructions
2966 //===---------------------------------------------------------------------===//
2968 let ExeDomain = SSEPackedInt in {
2969 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2971 def ri : Ii8<0x70, MRMSrcReg,
2972 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2973 !strconcat(OpcodeStr,
2974 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2975 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2977 def mi : Ii8<0x70, MRMSrcMem,
2978 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2979 !strconcat(OpcodeStr,
2980 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2981 [(set VR128:$dst, (vt (pshuf_frag:$src2
2982 (bc_frag (memopv2i64 addr:$src1)),
2985 } // ExeDomain = SSEPackedInt
2987 let Predicates = [HasAVX] in {
2988 let AddedComplexity = 5 in
2989 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2992 // SSE2 with ImmT == Imm8 and XS prefix.
2993 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2996 // SSE2 with ImmT == Imm8 and XD prefix.
2997 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
3000 let AddedComplexity = 5 in
3001 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3002 (VPSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3003 // Unary v4f32 shuffle with VPSHUF* in order to fold a load.
3004 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3005 (VPSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3007 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
3009 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
3010 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
3012 (VPSHUFDmi addr:$src1, imm:$imm)>;
3013 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3014 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
3015 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3016 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
3017 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
3018 (VPSHUFHWri VR128:$src, imm:$imm)>;
3019 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
3021 (VPSHUFHWmi addr:$src, imm:$imm)>;
3022 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
3023 (VPSHUFLWri VR128:$src, imm:$imm)>;
3024 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
3026 (VPSHUFLWmi addr:$src, imm:$imm)>;
3029 let Predicates = [HasSSE2] in {
3030 let AddedComplexity = 5 in
3031 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
3033 // SSE2 with ImmT == Imm8 and XS prefix.
3034 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
3036 // SSE2 with ImmT == Imm8 and XD prefix.
3037 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
3039 let AddedComplexity = 5 in
3040 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3041 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3042 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3043 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3044 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3046 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
3048 (PSHUFDmi addr:$src1, imm:$imm)>;
3049 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
3051 (PSHUFDmi addr:$src1, imm:$imm)>;
3052 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3053 (PSHUFDri VR128:$src1, imm:$imm)>;
3054 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3055 (PSHUFDri VR128:$src1, imm:$imm)>;
3056 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
3057 (PSHUFHWri VR128:$src, imm:$imm)>;
3058 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
3060 (PSHUFHWmi addr:$src, imm:$imm)>;
3061 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
3062 (PSHUFLWri VR128:$src, imm:$imm)>;
3063 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
3065 (PSHUFLWmi addr:$src, imm:$imm)>;
3068 //===---------------------------------------------------------------------===//
3069 // SSE2 - Packed Integer Unpack Instructions
3070 //===---------------------------------------------------------------------===//
3072 let ExeDomain = SSEPackedInt in {
3073 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
3074 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
3075 def rr : PDI<opc, MRMSrcReg,
3076 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3078 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3079 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3080 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))]>;
3081 def rm : PDI<opc, MRMSrcMem,
3082 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3084 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3085 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3086 [(set VR128:$dst, (OpNode VR128:$src1,
3087 (bc_frag (memopv2i64
3091 let Predicates = [HasAVX] in {
3092 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Punpcklbw,
3093 bc_v16i8, 0>, VEX_4V;
3094 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Punpcklwd,
3095 bc_v8i16, 0>, VEX_4V;
3096 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Punpckldq,
3097 bc_v4i32, 0>, VEX_4V;
3099 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3100 /// knew to collapse (bitconvert VT to VT) into its operand.
3101 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3102 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3103 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3104 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3105 VR128:$src2)))]>, VEX_4V;
3106 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3107 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3108 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3109 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3110 (memopv2i64 addr:$src2))))]>, VEX_4V;
3112 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Punpckhbw,
3113 bc_v16i8, 0>, VEX_4V;
3114 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Punpckhwd,
3115 bc_v8i16, 0>, VEX_4V;
3116 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Punpckhdq,
3117 bc_v4i32, 0>, VEX_4V;
3119 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3120 /// knew to collapse (bitconvert VT to VT) into its operand.
3121 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3122 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3123 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3124 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3125 VR128:$src2)))]>, VEX_4V;
3126 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3127 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3128 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3129 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3130 (memopv2i64 addr:$src2))))]>, VEX_4V;
3133 let Constraints = "$src1 = $dst" in {
3134 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Punpcklbw, bc_v16i8>;
3135 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Punpcklwd, bc_v8i16>;
3136 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Punpckldq, bc_v4i32>;
3138 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3139 /// knew to collapse (bitconvert VT to VT) into its operand.
3140 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3141 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3142 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3144 (v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)))]>;
3145 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3146 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3147 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3149 (v2i64 (X86Punpcklqdq VR128:$src1,
3150 (memopv2i64 addr:$src2))))]>;
3152 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Punpckhbw, bc_v16i8>;
3153 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Punpckhwd, bc_v8i16>;
3154 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Punpckhdq, bc_v4i32>;
3156 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3157 /// knew to collapse (bitconvert VT to VT) into its operand.
3158 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3159 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3160 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3162 (v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)))]>;
3163 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3164 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3165 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3167 (v2i64 (X86Punpckhqdq VR128:$src1,
3168 (memopv2i64 addr:$src2))))]>;
3171 } // ExeDomain = SSEPackedInt
3173 //===---------------------------------------------------------------------===//
3174 // SSE2 - Packed Integer Extract and Insert
3175 //===---------------------------------------------------------------------===//
3177 let ExeDomain = SSEPackedInt in {
3178 multiclass sse2_pinsrw<bit Is2Addr = 1> {
3179 def rri : Ii8<0xC4, MRMSrcReg,
3180 (outs VR128:$dst), (ins VR128:$src1,
3181 GR32:$src2, i32i8imm:$src3),
3183 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3184 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3186 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
3187 def rmi : Ii8<0xC4, MRMSrcMem,
3188 (outs VR128:$dst), (ins VR128:$src1,
3189 i16mem:$src2, i32i8imm:$src3),
3191 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3192 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3194 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
3199 let Predicates = [HasAVX] in
3200 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
3201 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3202 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3203 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3204 imm:$src2))]>, OpSize, VEX;
3205 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
3206 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3207 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3208 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3212 let Predicates = [HasAVX] in {
3213 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
3214 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
3215 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
3216 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
3217 []>, OpSize, VEX_4V;
3220 let Constraints = "$src1 = $dst" in
3221 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
3223 } // ExeDomain = SSEPackedInt
3225 //===---------------------------------------------------------------------===//
3226 // SSE2 - Packed Mask Creation
3227 //===---------------------------------------------------------------------===//
3229 let ExeDomain = SSEPackedInt in {
3231 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3232 "pmovmskb\t{$src, $dst|$dst, $src}",
3233 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
3234 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
3235 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
3236 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3237 "pmovmskb\t{$src, $dst|$dst, $src}",
3238 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
3240 } // ExeDomain = SSEPackedInt
3242 //===---------------------------------------------------------------------===//
3243 // SSE2 - Conditional Store
3244 //===---------------------------------------------------------------------===//
3246 let ExeDomain = SSEPackedInt in {
3249 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
3250 (ins VR128:$src, VR128:$mask),
3251 "maskmovdqu\t{$mask, $src|$src, $mask}",
3252 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
3254 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
3255 (ins VR128:$src, VR128:$mask),
3256 "maskmovdqu\t{$mask, $src|$src, $mask}",
3257 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
3260 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3261 "maskmovdqu\t{$mask, $src|$src, $mask}",
3262 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
3264 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3265 "maskmovdqu\t{$mask, $src|$src, $mask}",
3266 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
3268 } // ExeDomain = SSEPackedInt
3270 //===---------------------------------------------------------------------===//
3271 // SSE2 - Move Doubleword
3272 //===---------------------------------------------------------------------===//
3274 //===---------------------------------------------------------------------===//
3275 // Move Int Doubleword to Packed Double Int
3277 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3278 "movd\t{$src, $dst|$dst, $src}",
3280 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
3281 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3282 "movd\t{$src, $dst|$dst, $src}",
3284 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
3286 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3287 "mov{d|q}\t{$src, $dst|$dst, $src}",
3289 (v2i64 (scalar_to_vector GR64:$src)))]>, VEX;
3290 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3291 "mov{d|q}\t{$src, $dst|$dst, $src}",
3292 [(set FR64:$dst, (bitconvert GR64:$src))]>, VEX;
3294 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3295 "movd\t{$src, $dst|$dst, $src}",
3297 (v4i32 (scalar_to_vector GR32:$src)))]>;
3298 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3299 "movd\t{$src, $dst|$dst, $src}",
3301 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
3302 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3303 "mov{d|q}\t{$src, $dst|$dst, $src}",
3305 (v2i64 (scalar_to_vector GR64:$src)))]>;
3306 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3307 "mov{d|q}\t{$src, $dst|$dst, $src}",
3308 [(set FR64:$dst, (bitconvert GR64:$src))]>;
3310 //===---------------------------------------------------------------------===//
3311 // Move Int Doubleword to Single Scalar
3313 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3314 "movd\t{$src, $dst|$dst, $src}",
3315 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
3317 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3318 "movd\t{$src, $dst|$dst, $src}",
3319 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
3321 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3322 "movd\t{$src, $dst|$dst, $src}",
3323 [(set FR32:$dst, (bitconvert GR32:$src))]>;
3325 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3326 "movd\t{$src, $dst|$dst, $src}",
3327 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
3329 //===---------------------------------------------------------------------===//
3330 // Move Packed Doubleword Int to Packed Double Int
3332 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3333 "movd\t{$src, $dst|$dst, $src}",
3334 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3336 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
3337 (ins i32mem:$dst, VR128:$src),
3338 "movd\t{$src, $dst|$dst, $src}",
3339 [(store (i32 (vector_extract (v4i32 VR128:$src),
3340 (iPTR 0))), addr:$dst)]>, VEX;
3341 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3342 "movd\t{$src, $dst|$dst, $src}",
3343 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3345 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
3346 "movd\t{$src, $dst|$dst, $src}",
3347 [(store (i32 (vector_extract (v4i32 VR128:$src),
3348 (iPTR 0))), addr:$dst)]>;
3350 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3351 "mov{d|q}\t{$src, $dst|$dst, $src}",
3352 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
3354 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
3355 "movq\t{$src, $dst|$dst, $src}",
3356 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
3358 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
3359 "mov{d|q}\t{$src, $dst|$dst, $src}",
3360 [(set GR64:$dst, (bitconvert FR64:$src))]>;
3361 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
3362 "movq\t{$src, $dst|$dst, $src}",
3363 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
3365 //===---------------------------------------------------------------------===//
3366 // Move Scalar Single to Double Int
3368 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3369 "movd\t{$src, $dst|$dst, $src}",
3370 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
3371 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3372 "movd\t{$src, $dst|$dst, $src}",
3373 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3374 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3375 "movd\t{$src, $dst|$dst, $src}",
3376 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3377 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3378 "movd\t{$src, $dst|$dst, $src}",
3379 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3381 //===---------------------------------------------------------------------===//
3382 // Patterns and instructions to describe movd/movq to XMM register zero-extends
3384 let AddedComplexity = 15 in {
3385 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3386 "movd\t{$src, $dst|$dst, $src}",
3387 [(set VR128:$dst, (v4i32 (X86vzmovl
3388 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3390 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3391 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3392 [(set VR128:$dst, (v2i64 (X86vzmovl
3393 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3396 let AddedComplexity = 15 in {
3397 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3398 "movd\t{$src, $dst|$dst, $src}",
3399 [(set VR128:$dst, (v4i32 (X86vzmovl
3400 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3401 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3402 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3403 [(set VR128:$dst, (v2i64 (X86vzmovl
3404 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3407 let AddedComplexity = 20 in {
3408 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3409 "movd\t{$src, $dst|$dst, $src}",
3411 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3412 (loadi32 addr:$src))))))]>,
3414 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3415 "movd\t{$src, $dst|$dst, $src}",
3417 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3418 (loadi32 addr:$src))))))]>;
3420 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3421 (MOVZDI2PDIrm addr:$src)>;
3422 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3423 (MOVZDI2PDIrm addr:$src)>;
3424 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3425 (MOVZDI2PDIrm addr:$src)>;
3428 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
3429 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
3430 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
3431 (v4i32 (scalar_to_vector GR32:$src)), (i32 0)))),
3432 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
3433 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
3434 (v2i64 (scalar_to_vector GR64:$src)), (i32 0)))),
3435 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
3437 // These are the correct encodings of the instructions so that we know how to
3438 // read correct assembly, even though we continue to emit the wrong ones for
3439 // compatibility with Darwin's buggy assembler.
3440 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3441 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
3442 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3443 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
3444 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3445 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
3446 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3447 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
3448 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3449 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3450 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3451 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3453 //===---------------------------------------------------------------------===//
3454 // SSE2 - Move Quadword
3455 //===---------------------------------------------------------------------===//
3457 //===---------------------------------------------------------------------===//
3458 // Move Quadword Int to Packed Quadword Int
3460 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3461 "vmovq\t{$src, $dst|$dst, $src}",
3463 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3464 VEX, Requires<[HasAVX]>;
3465 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3466 "movq\t{$src, $dst|$dst, $src}",
3468 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3469 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3471 //===---------------------------------------------------------------------===//
3472 // Move Packed Quadword Int to Quadword Int
3474 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3475 "movq\t{$src, $dst|$dst, $src}",
3476 [(store (i64 (vector_extract (v2i64 VR128:$src),
3477 (iPTR 0))), addr:$dst)]>, VEX;
3478 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3479 "movq\t{$src, $dst|$dst, $src}",
3480 [(store (i64 (vector_extract (v2i64 VR128:$src),
3481 (iPTR 0))), addr:$dst)]>;
3483 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3484 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3486 //===---------------------------------------------------------------------===//
3487 // Store / copy lower 64-bits of a XMM register.
3489 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3490 "movq\t{$src, $dst|$dst, $src}",
3491 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3492 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3493 "movq\t{$src, $dst|$dst, $src}",
3494 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3496 let AddedComplexity = 20 in
3497 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3498 "vmovq\t{$src, $dst|$dst, $src}",
3500 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3501 (loadi64 addr:$src))))))]>,
3502 XS, VEX, Requires<[HasAVX]>;
3504 let AddedComplexity = 20 in {
3505 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3506 "movq\t{$src, $dst|$dst, $src}",
3508 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3509 (loadi64 addr:$src))))))]>,
3510 XS, Requires<[HasSSE2]>;
3512 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3513 (MOVZQI2PQIrm addr:$src)>;
3514 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3515 (MOVZQI2PQIrm addr:$src)>;
3516 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3519 //===---------------------------------------------------------------------===//
3520 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3521 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3523 let AddedComplexity = 15 in
3524 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3525 "vmovq\t{$src, $dst|$dst, $src}",
3526 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3527 XS, VEX, Requires<[HasAVX]>;
3528 let AddedComplexity = 15 in
3529 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3530 "movq\t{$src, $dst|$dst, $src}",
3531 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3532 XS, Requires<[HasSSE2]>;
3534 let AddedComplexity = 20 in
3535 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3536 "vmovq\t{$src, $dst|$dst, $src}",
3537 [(set VR128:$dst, (v2i64 (X86vzmovl
3538 (loadv2i64 addr:$src))))]>,
3539 XS, VEX, Requires<[HasAVX]>;
3540 let AddedComplexity = 20 in {
3541 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3542 "movq\t{$src, $dst|$dst, $src}",
3543 [(set VR128:$dst, (v2i64 (X86vzmovl
3544 (loadv2i64 addr:$src))))]>,
3545 XS, Requires<[HasSSE2]>;
3547 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3548 (MOVZPQILo2PQIrm addr:$src)>;
3551 // Instructions to match in the assembler
3552 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3553 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3554 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3555 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3556 // Recognize "movd" with GR64 destination, but encode as a "movq"
3557 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3558 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3560 // Instructions for the disassembler
3561 // xr = XMM register
3564 let Predicates = [HasAVX] in
3565 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3566 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3567 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3568 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3570 //===---------------------------------------------------------------------===//
3571 // SSE2 - Misc Instructions
3572 //===---------------------------------------------------------------------===//
3575 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3576 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3577 TB, Requires<[HasSSE2]>;
3579 // Load, store, and memory fence
3580 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3581 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3582 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3583 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3584 def : Pat<(X86LFence), (LFENCE)>;
3585 def : Pat<(X86MFence), (MFENCE)>;
3588 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3589 // was introduced with SSE2, it's backward compatible.
3590 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3592 // Alias instructions that map zero vector to pxor / xorp* for sse.
3593 // We set canFoldAsLoad because this can be converted to a constant-pool
3594 // load of an all-ones value if folding it would be beneficial.
3595 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
3596 // JIT implementation, it does not expand the instructions below like
3597 // X86MCInstLower does.
3598 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3599 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3600 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3601 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3602 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3603 isCodeGenOnly = 1, ExeDomain = SSEPackedInt, Predicates = [HasAVX] in
3604 def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3605 [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
3607 //===---------------------------------------------------------------------===//
3608 // SSE3 - Conversion Instructions
3609 //===---------------------------------------------------------------------===//
3611 // Convert Packed Double FP to Packed DW Integers
3612 let Predicates = [HasAVX] in {
3613 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3614 // register, but the same isn't true when using memory operands instead.
3615 // Provide other assembly rr and rm forms to address this explicitly.
3616 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3617 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3618 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3619 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3622 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3623 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3624 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3625 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3628 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3629 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3630 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3631 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3634 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3635 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3636 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3637 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3639 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
3640 (VCVTPD2DQYrr VR256:$src)>;
3641 def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
3642 (VCVTPD2DQYrm addr:$src)>;
3644 // Convert Packed DW Integers to Packed Double FP
3645 let Predicates = [HasAVX] in {
3646 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3647 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3648 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3649 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3650 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3651 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3652 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3653 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3656 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3657 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3658 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3659 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3661 // AVX 256-bit register conversion intrinsics
3662 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3663 (VCVTDQ2PDYrr VR128:$src)>;
3664 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3665 (VCVTDQ2PDYrm addr:$src)>;
3667 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3668 (VCVTPD2DQYrr VR256:$src)>;
3669 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3670 (VCVTPD2DQYrm addr:$src)>;
3672 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
3673 (VCVTDQ2PDYrr VR128:$src)>;
3674 def : Pat<(v4f64 (sint_to_fp (memopv4i32 addr:$src))),
3675 (VCVTDQ2PDYrm addr:$src)>;
3677 //===---------------------------------------------------------------------===//
3678 // SSE3 - Move Instructions
3679 //===---------------------------------------------------------------------===//
3681 //===---------------------------------------------------------------------===//
3682 // Replicate Single FP - MOVSHDUP and MOVSLDUP
3684 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
3685 ValueType vt, RegisterClass RC, PatFrag mem_frag,
3686 X86MemOperand x86memop> {
3687 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3688 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3689 [(set RC:$dst, (vt (OpNode RC:$src)))]>;
3690 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3691 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3692 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>;
3695 let Predicates = [HasAVX] in {
3696 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3697 v4f32, VR128, memopv4f32, f128mem>, VEX;
3698 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3699 v4f32, VR128, memopv4f32, f128mem>, VEX;
3700 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3701 v8f32, VR256, memopv8f32, f256mem>, VEX;
3702 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3703 v8f32, VR256, memopv8f32, f256mem>, VEX;
3705 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
3706 memopv4f32, f128mem>;
3707 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
3708 memopv4f32, f128mem>;
3710 let Predicates = [HasSSE3] in {
3711 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3712 (MOVSHDUPrr VR128:$src)>;
3713 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3714 (MOVSHDUPrm addr:$src)>;
3715 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3716 (MOVSLDUPrr VR128:$src)>;
3717 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3718 (MOVSLDUPrm addr:$src)>;
3721 let Predicates = [HasAVX] in {
3722 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3723 (VMOVSHDUPrr VR128:$src)>;
3724 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3725 (VMOVSHDUPrm addr:$src)>;
3726 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3727 (VMOVSLDUPrr VR128:$src)>;
3728 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3729 (VMOVSLDUPrm addr:$src)>;
3730 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
3731 (VMOVSHDUPYrr VR256:$src)>;
3732 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
3733 (VMOVSHDUPYrm addr:$src)>;
3734 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
3735 (VMOVSLDUPYrr VR256:$src)>;
3736 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
3737 (VMOVSLDUPYrm addr:$src)>;
3740 //===---------------------------------------------------------------------===//
3741 // Replicate Double FP - MOVDDUP
3743 multiclass sse3_replicate_dfp<string OpcodeStr> {
3744 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3745 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3746 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3747 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3748 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3750 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3754 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3755 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3756 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3758 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3759 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3763 let Predicates = [HasAVX] in {
3764 // FIXME: Merge above classes when we have patterns for the ymm version
3765 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3766 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3768 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3770 // Move Unaligned Integer
3771 let Predicates = [HasAVX] in {
3772 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3773 "vlddqu\t{$src, $dst|$dst, $src}",
3774 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3775 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3776 "vlddqu\t{$src, $dst|$dst, $src}",
3777 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3779 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3780 "lddqu\t{$src, $dst|$dst, $src}",
3781 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3783 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3785 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3787 // Several Move patterns
3788 let AddedComplexity = 5 in {
3789 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3790 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3791 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3792 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3793 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3794 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3795 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3796 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3799 //===---------------------------------------------------------------------===//
3800 // SSE3 - Arithmetic
3801 //===---------------------------------------------------------------------===//
3803 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3804 X86MemOperand x86memop, bit Is2Addr = 1> {
3805 def rr : I<0xD0, MRMSrcReg,
3806 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3808 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3809 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3810 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3811 def rm : I<0xD0, MRMSrcMem,
3812 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3814 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3815 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3816 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3819 let Predicates = [HasAVX],
3820 ExeDomain = SSEPackedDouble in {
3821 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3822 f128mem, 0>, TB, XD, VEX_4V;
3823 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3824 f128mem, 0>, TB, OpSize, VEX_4V;
3825 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3826 f256mem, 0>, TB, XD, VEX_4V;
3827 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3828 f256mem, 0>, TB, OpSize, VEX_4V;
3830 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3831 ExeDomain = SSEPackedDouble in {
3832 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3834 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3835 f128mem>, TB, OpSize;
3838 //===---------------------------------------------------------------------===//
3839 // SSE3 Instructions
3840 //===---------------------------------------------------------------------===//
3843 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3844 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3845 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3847 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3848 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3849 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3851 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3853 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3854 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3855 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3857 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3858 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3859 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3861 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3862 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3863 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3865 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3867 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3868 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3869 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3872 let Predicates = [HasAVX] in {
3873 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3874 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3875 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3876 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3877 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3878 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3879 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3880 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3881 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3882 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3883 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3884 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3885 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3886 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3887 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3888 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3891 let Constraints = "$src1 = $dst" in {
3892 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3893 int_x86_sse3_hadd_ps>;
3894 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3895 int_x86_sse3_hadd_pd>;
3896 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3897 int_x86_sse3_hsub_ps>;
3898 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3899 int_x86_sse3_hsub_pd>;
3902 //===---------------------------------------------------------------------===//
3903 // SSSE3 - Packed Absolute Instructions
3904 //===---------------------------------------------------------------------===//
3907 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3908 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3909 PatFrag mem_frag128, Intrinsic IntId128> {
3910 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3912 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3913 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3916 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3918 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3921 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3924 let Predicates = [HasAVX] in {
3925 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3926 int_x86_ssse3_pabs_b_128>, VEX;
3927 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3928 int_x86_ssse3_pabs_w_128>, VEX;
3929 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3930 int_x86_ssse3_pabs_d_128>, VEX;
3933 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3934 int_x86_ssse3_pabs_b_128>;
3935 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3936 int_x86_ssse3_pabs_w_128>;
3937 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3938 int_x86_ssse3_pabs_d_128>;
3940 //===---------------------------------------------------------------------===//
3941 // SSSE3 - Packed Binary Operator Instructions
3942 //===---------------------------------------------------------------------===//
3944 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3945 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3946 PatFrag mem_frag128, Intrinsic IntId128,
3948 let isCommutable = 1 in
3949 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3950 (ins VR128:$src1, VR128:$src2),
3952 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3953 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3954 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3956 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3957 (ins VR128:$src1, i128mem:$src2),
3959 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3960 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3962 (IntId128 VR128:$src1,
3963 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3966 let Predicates = [HasAVX] in {
3967 let isCommutable = 0 in {
3968 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3969 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3970 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3971 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3972 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3973 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3974 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3975 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3976 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3977 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3978 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3979 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3980 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3981 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3982 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3983 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3984 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3985 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3986 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3987 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3988 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3989 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3991 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3992 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3995 // None of these have i8 immediate fields.
3996 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3997 let isCommutable = 0 in {
3998 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3999 int_x86_ssse3_phadd_w_128>;
4000 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
4001 int_x86_ssse3_phadd_d_128>;
4002 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
4003 int_x86_ssse3_phadd_sw_128>;
4004 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
4005 int_x86_ssse3_phsub_w_128>;
4006 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
4007 int_x86_ssse3_phsub_d_128>;
4008 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
4009 int_x86_ssse3_phsub_sw_128>;
4010 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
4011 int_x86_ssse3_pmadd_ub_sw_128>;
4012 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
4013 int_x86_ssse3_pshuf_b_128>;
4014 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
4015 int_x86_ssse3_psign_b_128>;
4016 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
4017 int_x86_ssse3_psign_w_128>;
4018 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
4019 int_x86_ssse3_psign_d_128>;
4021 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
4022 int_x86_ssse3_pmul_hr_sw_128>;
4025 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
4026 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
4027 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
4028 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
4030 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
4031 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4032 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
4033 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4034 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
4035 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4037 //===---------------------------------------------------------------------===//
4038 // SSSE3 - Packed Align Instruction Patterns
4039 //===---------------------------------------------------------------------===//
4041 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
4042 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
4043 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4045 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4047 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4049 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
4050 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4052 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4054 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4058 let Predicates = [HasAVX] in
4059 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
4060 let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
4061 defm PALIGN : ssse3_palign<"palignr">;
4063 let Predicates = [HasSSSE3] in {
4064 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4065 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4066 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4067 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4068 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4069 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4070 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4071 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4074 let Predicates = [HasAVX] in {
4075 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4076 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4077 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4078 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4079 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4080 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4081 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4082 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4085 //===---------------------------------------------------------------------===//
4086 // SSSE3 Misc Instructions
4087 //===---------------------------------------------------------------------===//
4089 // Thread synchronization
4090 let usesCustomInserter = 1 in {
4091 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
4092 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
4093 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
4094 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
4097 let Uses = [EAX, ECX, EDX] in
4098 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
4099 Requires<[HasSSE3]>;
4100 let Uses = [ECX, EAX] in
4101 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
4102 Requires<[HasSSE3]>;
4104 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
4105 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
4107 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
4108 Requires<[In32BitMode]>;
4109 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
4110 Requires<[In64BitMode]>;
4112 //===---------------------------------------------------------------------===//
4113 // Non-Instruction Patterns
4114 //===---------------------------------------------------------------------===//
4116 // extload f32 -> f64. This matches load+fextend because we have a hack in
4117 // the isel (PreprocessForFPConvert) that can introduce loads after dag
4119 // Since these loads aren't folded into the fextend, we have to match it
4121 let Predicates = [HasSSE2] in
4122 def : Pat<(fextend (loadf32 addr:$src)),
4123 (CVTSS2SDrm addr:$src)>;
4125 // Bitcasts between 128-bit vector types. Return the original type since
4126 // no instruction is needed for the conversion
4127 let Predicates = [HasXMMInt] in {
4128 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
4129 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
4130 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
4131 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
4132 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
4133 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
4134 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
4135 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
4136 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
4137 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
4138 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
4139 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
4140 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
4141 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
4142 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
4143 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
4144 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
4145 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
4146 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
4147 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
4148 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
4149 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
4150 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
4151 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
4152 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
4153 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
4154 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
4155 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
4156 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
4157 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
4160 // Bitcasts between 256-bit vector types. Return the original type since
4161 // no instruction is needed for the conversion
4162 let Predicates = [HasAVX] in {
4163 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
4164 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
4165 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
4166 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
4167 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
4168 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
4169 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
4170 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
4171 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
4172 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
4173 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
4174 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
4175 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
4176 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
4177 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
4178 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
4179 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
4180 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
4181 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
4182 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
4183 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
4184 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
4185 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
4186 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
4187 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
4188 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
4189 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
4190 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
4191 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
4192 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
4195 // Move scalar to XMM zero-extended
4196 // movd to XMM register zero-extends
4197 let AddedComplexity = 15 in {
4198 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
4199 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
4200 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
4201 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
4202 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
4203 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
4204 (MOVSSrr (v4f32 (V_SET0PS)),
4205 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
4206 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
4207 (MOVSSrr (v4i32 (V_SET0PI)),
4208 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
4211 // Splat v2f64 / v2i64
4212 let AddedComplexity = 10 in {
4213 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
4214 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
4215 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
4216 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
4219 let AddedComplexity = 20 in {
4220 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
4221 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
4222 (MOVLPSrm VR128:$src1, addr:$src2)>;
4223 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
4224 (MOVLPDrm VR128:$src1, addr:$src2)>;
4225 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
4226 (MOVLPSrm VR128:$src1, addr:$src2)>;
4227 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
4228 (MOVLPDrm VR128:$src1, addr:$src2)>;
4231 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
4232 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4233 (MOVLPSmr addr:$src1, VR128:$src2)>;
4234 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4235 (MOVLPDmr addr:$src1, VR128:$src2)>;
4236 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
4238 (MOVLPSmr addr:$src1, VR128:$src2)>;
4239 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4240 (MOVLPDmr addr:$src1, VR128:$src2)>;
4242 let AddedComplexity = 15 in {
4243 // Setting the lowest element in the vector.
4244 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
4245 (MOVSSrr (v4i32 VR128:$src1),
4246 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
4247 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
4248 (MOVSDrr (v2i64 VR128:$src1),
4249 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
4251 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
4252 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
4253 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4254 Requires<[HasSSE2]>;
4255 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
4256 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4257 Requires<[HasSSE2]>;
4260 // Set lowest element and zero upper elements.
4261 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4262 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
4264 // Use movaps / movups for SSE integer load / store (one byte shorter).
4265 // The instructions selected below are then converted to MOVDQA/MOVDQU
4266 // during the SSE domain pass.
4267 let Predicates = [HasSSE1] in {
4268 def : Pat<(alignedloadv4i32 addr:$src),
4269 (MOVAPSrm addr:$src)>;
4270 def : Pat<(loadv4i32 addr:$src),
4271 (MOVUPSrm addr:$src)>;
4272 def : Pat<(alignedloadv2i64 addr:$src),
4273 (MOVAPSrm addr:$src)>;
4274 def : Pat<(loadv2i64 addr:$src),
4275 (MOVUPSrm addr:$src)>;
4277 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4278 (MOVAPSmr addr:$dst, VR128:$src)>;
4279 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4280 (MOVAPSmr addr:$dst, VR128:$src)>;
4281 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4282 (MOVAPSmr addr:$dst, VR128:$src)>;
4283 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4284 (MOVAPSmr addr:$dst, VR128:$src)>;
4285 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4286 (MOVUPSmr addr:$dst, VR128:$src)>;
4287 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4288 (MOVUPSmr addr:$dst, VR128:$src)>;
4289 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4290 (MOVUPSmr addr:$dst, VR128:$src)>;
4291 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4292 (MOVUPSmr addr:$dst, VR128:$src)>;
4295 // Use vmovaps/vmovups for AVX integer load/store.
4296 let Predicates = [HasAVX] in {
4297 // 128-bit load/store
4298 def : Pat<(alignedloadv4i32 addr:$src),
4299 (VMOVAPSrm addr:$src)>;
4300 def : Pat<(loadv4i32 addr:$src),
4301 (VMOVUPSrm addr:$src)>;
4302 def : Pat<(alignedloadv2i64 addr:$src),
4303 (VMOVAPSrm addr:$src)>;
4304 def : Pat<(loadv2i64 addr:$src),
4305 (VMOVUPSrm addr:$src)>;
4307 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4308 (VMOVAPSmr addr:$dst, VR128:$src)>;
4309 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4310 (VMOVAPSmr addr:$dst, VR128:$src)>;
4311 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4312 (VMOVAPSmr addr:$dst, VR128:$src)>;
4313 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4314 (VMOVAPSmr addr:$dst, VR128:$src)>;
4315 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4316 (VMOVUPSmr addr:$dst, VR128:$src)>;
4317 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4318 (VMOVUPSmr addr:$dst, VR128:$src)>;
4319 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4320 (VMOVUPSmr addr:$dst, VR128:$src)>;
4321 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4322 (VMOVUPSmr addr:$dst, VR128:$src)>;
4324 // 256-bit load/store
4325 def : Pat<(alignedloadv4i64 addr:$src),
4326 (VMOVAPSYrm addr:$src)>;
4327 def : Pat<(loadv4i64 addr:$src),
4328 (VMOVUPSYrm addr:$src)>;
4329 def : Pat<(alignedloadv8i32 addr:$src),
4330 (VMOVAPSYrm addr:$src)>;
4331 def : Pat<(loadv8i32 addr:$src),
4332 (VMOVUPSYrm addr:$src)>;
4333 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
4334 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4335 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
4336 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4337 def : Pat<(alignedstore (v16i16 VR256:$src), addr:$dst),
4338 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4339 def : Pat<(alignedstore (v32i8 VR256:$src), addr:$dst),
4340 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4341 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
4342 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4343 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
4344 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4345 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
4346 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4347 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
4348 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4351 //===----------------------------------------------------------------------===//
4352 // SSE4.1 - Packed Move with Sign/Zero Extend
4353 //===----------------------------------------------------------------------===//
4355 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4356 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4357 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4358 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4360 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4361 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4363 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
4367 let Predicates = [HasAVX] in {
4368 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
4370 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
4372 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4374 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4376 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4378 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4382 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4383 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4384 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4385 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4386 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4387 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4389 // Common patterns involving scalar load.
4390 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4391 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4392 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4393 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4395 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4396 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4397 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4398 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4400 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4401 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4402 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4403 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4405 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4406 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4407 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4408 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4410 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4411 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4412 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4413 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4415 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4416 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4417 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4418 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4421 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4422 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4423 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4424 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4426 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4427 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4429 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4433 let Predicates = [HasAVX] in {
4434 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4436 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4438 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4440 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4444 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4445 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4446 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4447 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4449 // Common patterns involving scalar load
4450 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4451 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4452 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4453 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4455 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4456 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4457 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4458 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4461 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4462 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4463 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4464 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4466 // Expecting a i16 load any extended to i32 value.
4467 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4468 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4469 [(set VR128:$dst, (IntId (bitconvert
4470 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4474 let Predicates = [HasAVX] in {
4475 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4477 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4480 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4481 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4483 // Common patterns involving scalar load
4484 def : Pat<(int_x86_sse41_pmovsxbq
4485 (bitconvert (v4i32 (X86vzmovl
4486 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4487 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4489 def : Pat<(int_x86_sse41_pmovzxbq
4490 (bitconvert (v4i32 (X86vzmovl
4491 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4492 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4494 //===----------------------------------------------------------------------===//
4495 // SSE4.1 - Extract Instructions
4496 //===----------------------------------------------------------------------===//
4498 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4499 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4500 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4501 (ins VR128:$src1, i32i8imm:$src2),
4502 !strconcat(OpcodeStr,
4503 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4504 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4506 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4507 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4508 !strconcat(OpcodeStr,
4509 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4512 // There's an AssertZext in the way of writing the store pattern
4513 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4516 let Predicates = [HasAVX] in {
4517 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4518 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4519 (ins VR128:$src1, i32i8imm:$src2),
4520 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4523 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4526 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4527 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4528 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4529 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4530 !strconcat(OpcodeStr,
4531 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4534 // There's an AssertZext in the way of writing the store pattern
4535 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4538 let Predicates = [HasAVX] in
4539 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4541 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4544 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4545 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4546 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4547 (ins VR128:$src1, i32i8imm:$src2),
4548 !strconcat(OpcodeStr,
4549 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4551 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4552 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4553 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4554 !strconcat(OpcodeStr,
4555 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4556 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4557 addr:$dst)]>, OpSize;
4560 let Predicates = [HasAVX] in
4561 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4563 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4565 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4566 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4567 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4568 (ins VR128:$src1, i32i8imm:$src2),
4569 !strconcat(OpcodeStr,
4570 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4572 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4573 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4574 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4575 !strconcat(OpcodeStr,
4576 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4577 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4578 addr:$dst)]>, OpSize, REX_W;
4581 let Predicates = [HasAVX] in
4582 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4584 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4586 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4588 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4589 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4590 (ins VR128:$src1, i32i8imm:$src2),
4591 !strconcat(OpcodeStr,
4592 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4594 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4596 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4597 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4598 !strconcat(OpcodeStr,
4599 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4600 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4601 addr:$dst)]>, OpSize;
4604 let Predicates = [HasAVX] in {
4605 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4606 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4607 (ins VR128:$src1, i32i8imm:$src2),
4608 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4611 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4613 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4614 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4617 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4618 Requires<[HasSSE41]>;
4620 //===----------------------------------------------------------------------===//
4621 // SSE4.1 - Insert Instructions
4622 //===----------------------------------------------------------------------===//
4624 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4625 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4626 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4628 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4630 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4632 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4633 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4634 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4636 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4638 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4640 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4641 imm:$src3))]>, OpSize;
4644 let Predicates = [HasAVX] in
4645 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4646 let Constraints = "$src1 = $dst" in
4647 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4649 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4650 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4651 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4653 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4655 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4657 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4659 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4660 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4662 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4664 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4666 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4667 imm:$src3)))]>, OpSize;
4670 let Predicates = [HasAVX] in
4671 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4672 let Constraints = "$src1 = $dst" in
4673 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4675 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4676 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4677 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4679 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4681 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4683 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4685 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4686 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4688 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4690 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4692 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4693 imm:$src3)))]>, OpSize;
4696 let Predicates = [HasAVX] in
4697 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4698 let Constraints = "$src1 = $dst" in
4699 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4701 // insertps has a few different modes, there's the first two here below which
4702 // are optimized inserts that won't zero arbitrary elements in the destination
4703 // vector. The next one matches the intrinsic and could zero arbitrary elements
4704 // in the target vector.
4705 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4706 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4707 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
4709 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4711 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4713 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4715 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4716 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
4718 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4720 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4722 (X86insrtps VR128:$src1,
4723 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4724 imm:$src3))]>, OpSize;
4727 let Constraints = "$src1 = $dst" in
4728 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4729 let Predicates = [HasAVX] in
4730 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4732 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4733 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4735 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4736 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4737 Requires<[HasSSE41]>;
4739 //===----------------------------------------------------------------------===//
4740 // SSE4.1 - Round Instructions
4741 //===----------------------------------------------------------------------===//
4743 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4744 X86MemOperand x86memop, RegisterClass RC,
4745 PatFrag mem_frag32, PatFrag mem_frag64,
4746 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4747 // Intrinsic operation, reg.
4748 // Vector intrinsic operation, reg
4749 def PSr : SS4AIi8<opcps, MRMSrcReg,
4750 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4751 !strconcat(OpcodeStr,
4752 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4753 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4756 // Vector intrinsic operation, mem
4757 def PSm : Ii8<opcps, MRMSrcMem,
4758 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4759 !strconcat(OpcodeStr,
4760 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4762 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4764 Requires<[HasSSE41]>;
4766 // Vector intrinsic operation, reg
4767 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4768 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4769 !strconcat(OpcodeStr,
4770 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4771 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4774 // Vector intrinsic operation, mem
4775 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4776 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4777 !strconcat(OpcodeStr,
4778 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4780 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4784 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4785 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4786 // Intrinsic operation, reg.
4787 // Vector intrinsic operation, reg
4788 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4789 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4790 !strconcat(OpcodeStr,
4791 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4794 // Vector intrinsic operation, mem
4795 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4796 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4797 !strconcat(OpcodeStr,
4798 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4799 []>, TA, OpSize, Requires<[HasSSE41]>;
4801 // Vector intrinsic operation, reg
4802 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4803 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4804 !strconcat(OpcodeStr,
4805 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4808 // Vector intrinsic operation, mem
4809 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4810 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4811 !strconcat(OpcodeStr,
4812 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4816 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4819 Intrinsic F64Int, bit Is2Addr = 1> {
4820 // Intrinsic operation, reg.
4821 def SSr : SS4AIi8<opcss, MRMSrcReg,
4822 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4824 !strconcat(OpcodeStr,
4825 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4826 !strconcat(OpcodeStr,
4827 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4828 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4831 // Intrinsic operation, mem.
4832 def SSm : SS4AIi8<opcss, MRMSrcMem,
4833 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4835 !strconcat(OpcodeStr,
4836 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4837 !strconcat(OpcodeStr,
4838 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4840 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4843 // Intrinsic operation, reg.
4844 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4845 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4847 !strconcat(OpcodeStr,
4848 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4849 !strconcat(OpcodeStr,
4850 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4851 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4854 // Intrinsic operation, mem.
4855 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4856 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4858 !strconcat(OpcodeStr,
4859 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4860 !strconcat(OpcodeStr,
4861 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4863 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4867 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4869 // Intrinsic operation, reg.
4870 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4871 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4872 !strconcat(OpcodeStr,
4873 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4876 // Intrinsic operation, mem.
4877 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4878 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4879 !strconcat(OpcodeStr,
4880 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4883 // Intrinsic operation, reg.
4884 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4885 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4886 !strconcat(OpcodeStr,
4887 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4890 // Intrinsic operation, mem.
4891 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4892 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4893 !strconcat(OpcodeStr,
4894 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4898 // FP round - roundss, roundps, roundsd, roundpd
4899 let Predicates = [HasAVX] in {
4901 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4902 memopv4f32, memopv2f64,
4903 int_x86_sse41_round_ps,
4904 int_x86_sse41_round_pd>, VEX;
4905 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4906 memopv8f32, memopv4f64,
4907 int_x86_avx_round_ps_256,
4908 int_x86_avx_round_pd_256>, VEX;
4909 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4910 int_x86_sse41_round_ss,
4911 int_x86_sse41_round_sd, 0>, VEX_4V;
4913 // Instructions for the assembler
4914 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4916 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4918 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4921 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4922 memopv4f32, memopv2f64,
4923 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4924 let Constraints = "$src1 = $dst" in
4925 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4926 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4928 //===----------------------------------------------------------------------===//
4929 // SSE4.1 - Packed Bit Test
4930 //===----------------------------------------------------------------------===//
4932 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4933 // the intel intrinsic that corresponds to this.
4934 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4935 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4936 "vptest\t{$src2, $src1|$src1, $src2}",
4937 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4939 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4940 "vptest\t{$src2, $src1|$src1, $src2}",
4941 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4944 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4945 "vptest\t{$src2, $src1|$src1, $src2}",
4946 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4948 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4949 "vptest\t{$src2, $src1|$src1, $src2}",
4950 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4954 let Defs = [EFLAGS] in {
4955 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4956 "ptest \t{$src2, $src1|$src1, $src2}",
4957 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4959 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4960 "ptest \t{$src2, $src1|$src1, $src2}",
4961 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4965 // The bit test instructions below are AVX only
4966 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4967 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4968 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4969 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4970 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4971 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4972 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4973 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4977 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4978 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4979 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4980 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4981 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4984 //===----------------------------------------------------------------------===//
4985 // SSE4.1 - Misc Instructions
4986 //===----------------------------------------------------------------------===//
4988 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
4989 "popcnt{w}\t{$src, $dst|$dst, $src}",
4990 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
4991 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
4992 "popcnt{w}\t{$src, $dst|$dst, $src}",
4993 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
4995 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
4996 "popcnt{l}\t{$src, $dst|$dst, $src}",
4997 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
4998 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
4999 "popcnt{l}\t{$src, $dst|$dst, $src}",
5000 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
5002 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
5003 "popcnt{q}\t{$src, $dst|$dst, $src}",
5004 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
5005 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
5006 "popcnt{q}\t{$src, $dst|$dst, $src}",
5007 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
5011 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
5012 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
5013 Intrinsic IntId128> {
5014 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5016 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5017 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
5018 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5020 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5023 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
5026 let Predicates = [HasAVX] in
5027 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
5028 int_x86_sse41_phminposuw>, VEX;
5029 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
5030 int_x86_sse41_phminposuw>;
5032 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
5033 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
5034 Intrinsic IntId128, bit Is2Addr = 1> {
5035 let isCommutable = 1 in
5036 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5037 (ins VR128:$src1, VR128:$src2),
5039 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5040 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5041 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
5042 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5043 (ins VR128:$src1, i128mem:$src2),
5045 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5046 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5048 (IntId128 VR128:$src1,
5049 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5052 let Predicates = [HasAVX] in {
5053 let isCommutable = 0 in
5054 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
5056 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
5058 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
5060 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
5062 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
5064 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
5066 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
5068 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
5070 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
5072 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
5074 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
5077 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5078 (VPCMPEQQrr VR128:$src1, VR128:$src2)>;
5079 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5080 (VPCMPEQQrm VR128:$src1, addr:$src2)>;
5083 let Constraints = "$src1 = $dst" in {
5084 let isCommutable = 0 in
5085 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
5086 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
5087 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
5088 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
5089 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
5090 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
5091 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
5092 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
5093 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
5094 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
5095 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
5098 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5099 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
5100 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5101 (PCMPEQQrm VR128:$src1, addr:$src2)>;
5103 /// SS48I_binop_rm - Simple SSE41 binary operator.
5104 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5105 ValueType OpVT, bit Is2Addr = 1> {
5106 let isCommutable = 1 in
5107 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5108 (ins VR128:$src1, VR128:$src2),
5110 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5111 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5112 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
5114 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5115 (ins VR128:$src1, i128mem:$src2),
5117 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5118 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5119 [(set VR128:$dst, (OpNode VR128:$src1,
5120 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
5124 let Predicates = [HasAVX] in
5125 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
5126 let Constraints = "$src1 = $dst" in
5127 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
5129 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
5130 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
5131 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
5132 X86MemOperand x86memop, bit Is2Addr = 1> {
5133 let isCommutable = 1 in
5134 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
5135 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
5137 !strconcat(OpcodeStr,
5138 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5139 !strconcat(OpcodeStr,
5140 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5141 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
5143 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
5144 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
5146 !strconcat(OpcodeStr,
5147 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5148 !strconcat(OpcodeStr,
5149 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5152 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
5156 let Predicates = [HasAVX] in {
5157 let isCommutable = 0 in {
5158 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
5159 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5160 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
5161 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5162 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
5163 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5164 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
5165 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5166 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
5167 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5168 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
5169 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5171 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
5172 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5173 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
5174 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5175 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
5176 VR256, memopv32i8, i256mem, 0>, VEX_4V;
5179 let Constraints = "$src1 = $dst" in {
5180 let isCommutable = 0 in {
5181 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
5182 VR128, memopv16i8, i128mem>;
5183 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
5184 VR128, memopv16i8, i128mem>;
5185 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
5186 VR128, memopv16i8, i128mem>;
5187 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
5188 VR128, memopv16i8, i128mem>;
5190 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
5191 VR128, memopv16i8, i128mem>;
5192 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
5193 VR128, memopv16i8, i128mem>;
5196 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
5197 let Predicates = [HasAVX] in {
5198 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
5199 RegisterClass RC, X86MemOperand x86memop,
5200 PatFrag mem_frag, Intrinsic IntId> {
5201 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
5202 (ins RC:$src1, RC:$src2, RC:$src3),
5203 !strconcat(OpcodeStr,
5204 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5205 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
5206 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5208 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
5209 (ins RC:$src1, x86memop:$src2, RC:$src3),
5210 !strconcat(OpcodeStr,
5211 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5213 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
5215 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5219 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
5220 memopv16i8, int_x86_sse41_blendvpd>;
5221 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
5222 memopv16i8, int_x86_sse41_blendvps>;
5223 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
5224 memopv16i8, int_x86_sse41_pblendvb>;
5225 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
5226 memopv32i8, int_x86_avx_blendv_pd_256>;
5227 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
5228 memopv32i8, int_x86_avx_blendv_ps_256>;
5230 /// SS41I_ternary_int - SSE 4.1 ternary operator
5231 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
5232 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5233 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5234 (ins VR128:$src1, VR128:$src2),
5235 !strconcat(OpcodeStr,
5236 "\t{$src2, $dst|$dst, $src2}"),
5237 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
5240 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5241 (ins VR128:$src1, i128mem:$src2),
5242 !strconcat(OpcodeStr,
5243 "\t{$src2, $dst|$dst, $src2}"),
5246 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
5250 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
5251 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
5252 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
5254 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
5255 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
5257 let Predicates = [HasAVX] in
5258 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5259 "vmovntdqa\t{$src, $dst|$dst, $src}",
5260 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5262 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5263 "movntdqa\t{$src, $dst|$dst, $src}",
5264 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5267 //===----------------------------------------------------------------------===//
5268 // SSE4.2 - Compare Instructions
5269 //===----------------------------------------------------------------------===//
5271 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
5272 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
5273 Intrinsic IntId128, bit Is2Addr = 1> {
5274 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
5275 (ins VR128:$src1, VR128:$src2),
5277 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5278 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5279 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5281 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
5282 (ins VR128:$src1, i128mem:$src2),
5284 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5285 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5287 (IntId128 VR128:$src1,
5288 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5291 let Predicates = [HasAVX] in {
5292 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
5295 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5296 (VPCMPGTQrr VR128:$src1, VR128:$src2)>;
5297 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5298 (VPCMPGTQrm VR128:$src1, addr:$src2)>;
5301 let Constraints = "$src1 = $dst" in
5302 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
5304 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5305 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
5306 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5307 (PCMPGTQrm VR128:$src1, addr:$src2)>;
5309 //===----------------------------------------------------------------------===//
5310 // SSE4.2 - String/text Processing Instructions
5311 //===----------------------------------------------------------------------===//
5313 // Packed Compare Implicit Length Strings, Return Mask
5314 multiclass pseudo_pcmpistrm<string asm> {
5315 def REG : PseudoI<(outs VR128:$dst),
5316 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5317 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
5319 def MEM : PseudoI<(outs VR128:$dst),
5320 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5321 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
5322 VR128:$src1, (load addr:$src2), imm:$src3))]>;
5325 let Defs = [EFLAGS], usesCustomInserter = 1 in {
5326 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
5327 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
5330 let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
5331 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5332 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5333 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5334 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5335 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5336 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5339 let Defs = [XMM0, EFLAGS] in {
5340 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5341 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5342 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5343 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5344 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5345 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5348 // Packed Compare Explicit Length Strings, Return Mask
5349 multiclass pseudo_pcmpestrm<string asm> {
5350 def REG : PseudoI<(outs VR128:$dst),
5351 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5352 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5353 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
5354 def MEM : PseudoI<(outs VR128:$dst),
5355 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5356 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5357 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
5360 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
5361 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
5362 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
5365 let Predicates = [HasAVX],
5366 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5367 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5368 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5369 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5370 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5371 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5372 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5375 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5376 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5377 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5378 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5379 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5380 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5381 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5384 // Packed Compare Implicit Length Strings, Return Index
5385 let Defs = [ECX, EFLAGS] in {
5386 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
5387 def rr : SS42AI<0x63, MRMSrcReg, (outs),
5388 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5389 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5390 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
5391 (implicit EFLAGS)]>, OpSize;
5392 def rm : SS42AI<0x63, MRMSrcMem, (outs),
5393 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5394 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5395 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
5396 (implicit EFLAGS)]>, OpSize;
5400 let Predicates = [HasAVX] in {
5401 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5403 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5405 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5407 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5409 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5411 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5415 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5416 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5417 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5418 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5419 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5420 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5422 // Packed Compare Explicit Length Strings, Return Index
5423 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5424 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5425 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5426 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5427 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5428 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5429 (implicit EFLAGS)]>, OpSize;
5430 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5431 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5432 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5434 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5435 (implicit EFLAGS)]>, OpSize;
5439 let Predicates = [HasAVX] in {
5440 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5442 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5444 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5446 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5448 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5450 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5454 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5455 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5456 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5457 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5458 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5459 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5461 //===----------------------------------------------------------------------===//
5462 // SSE4.2 - CRC Instructions
5463 //===----------------------------------------------------------------------===//
5465 // No CRC instructions have AVX equivalents
5467 // crc intrinsic instruction
5468 // This set of instructions are only rm, the only difference is the size
5470 let Constraints = "$src1 = $dst" in {
5471 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5472 (ins GR32:$src1, i8mem:$src2),
5473 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5475 (int_x86_sse42_crc32_32_8 GR32:$src1,
5476 (load addr:$src2)))]>;
5477 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5478 (ins GR32:$src1, GR8:$src2),
5479 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5481 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
5482 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5483 (ins GR32:$src1, i16mem:$src2),
5484 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5486 (int_x86_sse42_crc32_32_16 GR32:$src1,
5487 (load addr:$src2)))]>,
5489 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5490 (ins GR32:$src1, GR16:$src2),
5491 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5493 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
5495 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5496 (ins GR32:$src1, i32mem:$src2),
5497 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5499 (int_x86_sse42_crc32_32_32 GR32:$src1,
5500 (load addr:$src2)))]>;
5501 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5502 (ins GR32:$src1, GR32:$src2),
5503 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5505 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
5506 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5507 (ins GR64:$src1, i8mem:$src2),
5508 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5510 (int_x86_sse42_crc32_64_8 GR64:$src1,
5511 (load addr:$src2)))]>,
5513 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5514 (ins GR64:$src1, GR8:$src2),
5515 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5517 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
5519 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5520 (ins GR64:$src1, i64mem:$src2),
5521 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5523 (int_x86_sse42_crc32_64_64 GR64:$src1,
5524 (load addr:$src2)))]>,
5526 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5527 (ins GR64:$src1, GR64:$src2),
5528 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5530 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
5534 //===----------------------------------------------------------------------===//
5535 // AES-NI Instructions
5536 //===----------------------------------------------------------------------===//
5538 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5539 Intrinsic IntId128, bit Is2Addr = 1> {
5540 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5541 (ins VR128:$src1, VR128:$src2),
5543 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5544 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5545 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5547 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5548 (ins VR128:$src1, i128mem:$src2),
5550 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5551 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5553 (IntId128 VR128:$src1,
5554 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5557 // Perform One Round of an AES Encryption/Decryption Flow
5558 let Predicates = [HasAVX, HasAES] in {
5559 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5560 int_x86_aesni_aesenc, 0>, VEX_4V;
5561 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5562 int_x86_aesni_aesenclast, 0>, VEX_4V;
5563 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5564 int_x86_aesni_aesdec, 0>, VEX_4V;
5565 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5566 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5569 let Constraints = "$src1 = $dst" in {
5570 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5571 int_x86_aesni_aesenc>;
5572 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5573 int_x86_aesni_aesenclast>;
5574 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5575 int_x86_aesni_aesdec>;
5576 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5577 int_x86_aesni_aesdeclast>;
5580 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5581 (AESENCrr VR128:$src1, VR128:$src2)>;
5582 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5583 (AESENCrm VR128:$src1, addr:$src2)>;
5584 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5585 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5586 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5587 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5588 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5589 (AESDECrr VR128:$src1, VR128:$src2)>;
5590 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5591 (AESDECrm VR128:$src1, addr:$src2)>;
5592 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5593 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5594 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5595 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5597 // Perform the AES InvMixColumn Transformation
5598 let Predicates = [HasAVX, HasAES] in {
5599 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5601 "vaesimc\t{$src1, $dst|$dst, $src1}",
5603 (int_x86_aesni_aesimc VR128:$src1))]>,
5605 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5606 (ins i128mem:$src1),
5607 "vaesimc\t{$src1, $dst|$dst, $src1}",
5609 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5612 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5614 "aesimc\t{$src1, $dst|$dst, $src1}",
5616 (int_x86_aesni_aesimc VR128:$src1))]>,
5618 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5619 (ins i128mem:$src1),
5620 "aesimc\t{$src1, $dst|$dst, $src1}",
5622 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5625 // AES Round Key Generation Assist
5626 let Predicates = [HasAVX, HasAES] in {
5627 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5628 (ins VR128:$src1, i8imm:$src2),
5629 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5631 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5633 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5634 (ins i128mem:$src1, i8imm:$src2),
5635 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5637 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5641 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5642 (ins VR128:$src1, i8imm:$src2),
5643 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5645 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5647 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5648 (ins i128mem:$src1, i8imm:$src2),
5649 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5651 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5655 //===----------------------------------------------------------------------===//
5656 // CLMUL Instructions
5657 //===----------------------------------------------------------------------===//
5659 // Carry-less Multiplication instructions
5660 let Constraints = "$src1 = $dst" in {
5661 def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5662 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5663 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5666 def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5667 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5668 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5672 // AVX carry-less Multiplication instructions
5673 def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5674 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5675 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5678 def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5679 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5680 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5684 multiclass pclmul_alias<string asm, int immop> {
5685 def : InstAlias<!strconcat("pclmul", asm,
5686 "dq {$src, $dst|$dst, $src}"),
5687 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
5689 def : InstAlias<!strconcat("pclmul", asm,
5690 "dq {$src, $dst|$dst, $src}"),
5691 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
5693 def : InstAlias<!strconcat("vpclmul", asm,
5694 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5695 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
5697 def : InstAlias<!strconcat("vpclmul", asm,
5698 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5699 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
5701 defm : pclmul_alias<"hqhq", 0x11>;
5702 defm : pclmul_alias<"hqlq", 0x01>;
5703 defm : pclmul_alias<"lqhq", 0x10>;
5704 defm : pclmul_alias<"lqlq", 0x00>;
5706 //===----------------------------------------------------------------------===//
5708 //===----------------------------------------------------------------------===//
5710 //===----------------------------------------------------------------------===//
5711 // VBROADCAST - Load from memory and broadcast to all elements of the
5712 // destination operand
5714 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5715 X86MemOperand x86memop, Intrinsic Int> :
5716 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5717 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5718 [(set RC:$dst, (Int addr:$src))]>, VEX;
5720 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5721 int_x86_avx_vbroadcastss>;
5722 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5723 int_x86_avx_vbroadcastss_256>;
5724 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5725 int_x86_avx_vbroadcast_sd_256>;
5726 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5727 int_x86_avx_vbroadcastf128_pd_256>;
5729 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5730 (VBROADCASTF128 addr:$src)>;
5732 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
5733 (VBROADCASTSSY addr:$src)>;
5734 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
5735 (VBROADCASTSD addr:$src)>;
5736 def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
5737 (VBROADCASTSSY addr:$src)>;
5738 def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
5739 (VBROADCASTSD addr:$src)>;
5741 def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
5742 (VBROADCASTSS addr:$src)>;
5743 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
5744 (VBROADCASTSS addr:$src)>;
5746 //===----------------------------------------------------------------------===//
5747 // VINSERTF128 - Insert packed floating-point values
5749 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5750 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5751 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5753 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5754 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5755 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5758 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5759 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5760 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5761 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5762 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5763 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5765 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
5767 (VINSERTF128rr VR256:$src1, VR128:$src2,
5768 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5769 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
5771 (VINSERTF128rr VR256:$src1, VR128:$src2,
5772 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5773 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
5775 (VINSERTF128rr VR256:$src1, VR128:$src2,
5776 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5777 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
5779 (VINSERTF128rr VR256:$src1, VR128:$src2,
5780 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5781 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
5783 (VINSERTF128rr VR256:$src1, VR128:$src2,
5784 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5785 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
5787 (VINSERTF128rr VR256:$src1, VR128:$src2,
5788 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5790 // Special COPY patterns
5791 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
5792 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5793 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)),
5794 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5795 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)),
5796 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5797 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)),
5798 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5799 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)),
5800 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5801 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
5802 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5804 //===----------------------------------------------------------------------===//
5805 // VEXTRACTF128 - Extract packed floating-point values
5807 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5808 (ins VR256:$src1, i8imm:$src2),
5809 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5811 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5812 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5813 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5816 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5817 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5818 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5819 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5820 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5821 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5823 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5824 (v4f32 (VEXTRACTF128rr
5825 (v8f32 VR256:$src1),
5826 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5827 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5828 (v2f64 (VEXTRACTF128rr
5829 (v4f64 VR256:$src1),
5830 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5831 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5832 (v4i32 (VEXTRACTF128rr
5833 (v8i32 VR256:$src1),
5834 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5835 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5836 (v2i64 (VEXTRACTF128rr
5837 (v4i64 VR256:$src1),
5838 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5839 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5840 (v8i16 (VEXTRACTF128rr
5841 (v16i16 VR256:$src1),
5842 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5843 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5844 (v16i8 (VEXTRACTF128rr
5845 (v32i8 VR256:$src1),
5846 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5848 // Special COPY patterns
5849 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))),
5850 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
5851 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))),
5852 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
5854 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
5855 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
5856 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))),
5857 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
5859 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (i32 0))),
5860 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
5861 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (i32 0))),
5862 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
5865 //===----------------------------------------------------------------------===//
5866 // VMASKMOV - Conditional SIMD Packed Loads and Stores
5868 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5869 Intrinsic IntLd, Intrinsic IntLd256,
5870 Intrinsic IntSt, Intrinsic IntSt256,
5871 PatFrag pf128, PatFrag pf256> {
5872 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5873 (ins VR128:$src1, f128mem:$src2),
5874 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5875 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5877 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5878 (ins VR256:$src1, f256mem:$src2),
5879 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5880 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5882 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5883 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5884 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5885 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5886 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5887 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5888 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5889 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5892 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5893 int_x86_avx_maskload_ps,
5894 int_x86_avx_maskload_ps_256,
5895 int_x86_avx_maskstore_ps,
5896 int_x86_avx_maskstore_ps_256,
5897 memopv4f32, memopv8f32>;
5898 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5899 int_x86_avx_maskload_pd,
5900 int_x86_avx_maskload_pd_256,
5901 int_x86_avx_maskstore_pd,
5902 int_x86_avx_maskstore_pd_256,
5903 memopv2f64, memopv4f64>;
5905 //===----------------------------------------------------------------------===//
5906 // VPERMIL - Permute Single and Double Floating-Point Values
5908 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5909 RegisterClass RC, X86MemOperand x86memop_f,
5910 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5911 Intrinsic IntVar, Intrinsic IntImm> {
5912 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5913 (ins RC:$src1, RC:$src2),
5914 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5915 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5916 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5917 (ins RC:$src1, x86memop_i:$src2),
5918 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5919 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5921 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5922 (ins RC:$src1, i8imm:$src2),
5923 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5924 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5925 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5926 (ins x86memop_f:$src1, i8imm:$src2),
5927 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5928 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5931 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5932 memopv4f32, memopv4i32,
5933 int_x86_avx_vpermilvar_ps,
5934 int_x86_avx_vpermil_ps>;
5935 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5936 memopv8f32, memopv8i32,
5937 int_x86_avx_vpermilvar_ps_256,
5938 int_x86_avx_vpermil_ps_256>;
5939 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5940 memopv2f64, memopv2i64,
5941 int_x86_avx_vpermilvar_pd,
5942 int_x86_avx_vpermil_pd>;
5943 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5944 memopv4f64, memopv4i64,
5945 int_x86_avx_vpermilvar_pd_256,
5946 int_x86_avx_vpermil_pd_256>;
5948 def : Pat<(v8f32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
5949 (VPERMILPSYri VR256:$src1, imm:$imm)>;
5950 def : Pat<(v4f64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
5951 (VPERMILPDYri VR256:$src1, imm:$imm)>;
5952 def : Pat<(v8i32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
5953 (VPERMILPSYri VR256:$src1, imm:$imm)>;
5954 def : Pat<(v4i64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
5955 (VPERMILPDYri VR256:$src1, imm:$imm)>;
5957 //===----------------------------------------------------------------------===//
5958 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
5960 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5961 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5962 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5964 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5965 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5966 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5969 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5970 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5971 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5972 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5973 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5974 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5976 def : Pat<(int_x86_avx_vperm2f128_ps_256
5977 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5978 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5979 def : Pat<(int_x86_avx_vperm2f128_pd_256
5980 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5981 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5982 def : Pat<(int_x86_avx_vperm2f128_si_256
5983 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5984 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5986 def : Pat<(v8f32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5987 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5988 def : Pat<(v8i32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5989 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5990 def : Pat<(v4i64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5991 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5992 def : Pat<(v4f64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5993 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5994 def : Pat<(v32i8 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5995 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5996 def : Pat<(v16i16 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
5997 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
5999 //===----------------------------------------------------------------------===//
6000 // VZERO - Zero YMM registers
6002 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
6003 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
6004 // Zero All YMM registers
6005 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
6006 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
6010 // Zero Upper bits of YMM registers
6011 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
6012 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
6014 //===----------------------------------------------------------------------===//
6015 // SSE Shuffle pattern fragments
6016 //===----------------------------------------------------------------------===//
6018 // This is part of a "work in progress" refactoring. The idea is that all
6019 // vector shuffles are going to be translated into target specific nodes and
6020 // directly matched by the patterns below (which can be changed along the way)
6021 // The AVX version of some but not all of them are described here, and more
6022 // should come in a near future.
6024 // Shuffle with MOVDDUP instruction
6025 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
6026 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
6027 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
6028 (MOVDDUPrm addr:$src)>;
6030 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
6031 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
6032 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
6033 (MOVDDUPrm addr:$src)>;
6035 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
6036 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
6037 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
6038 (MOVDDUPrm addr:$src)>;
6040 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
6041 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
6042 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
6043 (MOVDDUPrm addr:$src)>;
6045 def : Pat<(X86Movddup (bc_v2f64
6046 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
6047 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
6048 def : Pat<(X86Movddup (bc_v2f64
6049 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
6050 (MOVDDUPrm addr:$src)>;
6053 // Shuffle with UNPCKLPS
6054 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
6055 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
6056 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
6057 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
6059 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
6060 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
6061 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
6062 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
6064 // Shuffle with VUNPCKHPSY
6065 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
6066 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6067 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
6068 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6069 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
6070 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6071 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, (memopv8i32 addr:$src2))),
6072 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6074 // Shuffle with UNPCKHPS
6075 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
6076 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
6077 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
6078 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
6080 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
6081 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
6082 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
6083 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
6085 // Shuffle with VUNPCKHPSY
6086 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, (memopv8f32 addr:$src2))),
6087 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6088 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
6089 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6091 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, (memopv8i32 addr:$src2))),
6092 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6093 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
6094 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6096 // Shuffle with UNPCKLPD
6097 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
6098 (VUNPCKLPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
6099 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
6100 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
6102 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
6103 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
6104 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
6105 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
6107 // Shuffle with VUNPCKLPDY
6108 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
6109 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6110 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
6111 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6113 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, (memopv4i64 addr:$src2))),
6114 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6115 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
6116 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6118 // Shuffle with UNPCKHPD
6119 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
6120 (VUNPCKHPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
6121 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
6122 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
6124 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
6125 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
6126 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
6127 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
6129 // Shuffle with VUNPCKHPDY
6130 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, (memopv4f64 addr:$src2))),
6131 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6132 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
6133 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6134 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, (memopv4i64 addr:$src2))),
6135 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
6136 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
6137 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
6139 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the problem
6140 // is during lowering, where it's not possible to recognize the load fold cause
6141 // it has two uses through a bitcast. One use disappears at isel time and the
6142 // fold opportunity reappears.
6143 def : Pat<(v2f64 (X86Movddup VR128:$src)),
6144 (UNPCKLPDrr VR128:$src, VR128:$src)>;
6146 // Shuffle with MOVLHPD
6147 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
6148 (scalar_to_vector (loadf64 addr:$src2)))),
6149 (MOVHPDrm VR128:$src1, addr:$src2)>;
6151 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
6152 // is during lowering, where it's not possible to recognize the load fold cause
6153 // it has two uses through a bitcast. One use disappears at isel time and the
6154 // fold opportunity reappears.
6155 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
6156 (scalar_to_vector (loadf64 addr:$src2)))),
6157 (MOVHPDrm VR128:$src1, addr:$src2)>;
6159 // Shuffle with MOVSS
6160 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
6161 (MOVSSrr VR128:$src1, FR32:$src2)>;
6162 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
6163 (MOVSSrr (v4i32 VR128:$src1),
6164 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
6165 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
6166 (MOVSSrr (v4f32 VR128:$src1),
6167 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
6169 // Shuffle with MOVSD
6170 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
6171 (MOVSDrr VR128:$src1, FR64:$src2)>;
6172 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
6173 (MOVSDrr (v2i64 VR128:$src1),
6174 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
6175 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
6176 (MOVSDrr (v2f64 VR128:$src1),
6177 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
6178 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
6179 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
6180 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
6181 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
6183 // Shuffle with MOVLPS
6184 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
6185 (MOVLPSrm VR128:$src1, addr:$src2)>;
6186 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
6187 (MOVLPSrm VR128:$src1, addr:$src2)>;
6188 def : Pat<(X86Movlps VR128:$src1,
6189 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
6190 (MOVLPSrm VR128:$src1, addr:$src2)>;
6191 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
6192 // is during lowering, where it's not possible to recognize the load fold cause
6193 // it has two uses through a bitcast. One use disappears at isel time and the
6194 // fold opportunity reappears.
6195 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
6196 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
6198 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
6199 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
6201 // Shuffle with MOVLPD
6202 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
6203 (MOVLPDrm VR128:$src1, addr:$src2)>;
6204 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
6205 (MOVLPDrm VR128:$src1, addr:$src2)>;
6206 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
6207 (scalar_to_vector (loadf64 addr:$src2)))),
6208 (MOVLPDrm VR128:$src1, addr:$src2)>;
6210 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
6211 def : Pat<(store (f64 (vector_extract
6212 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
6213 (MOVHPSmr addr:$dst, VR128:$src)>;
6214 def : Pat<(store (f64 (vector_extract
6215 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
6216 (MOVHPDmr addr:$dst, VR128:$src)>;
6218 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
6219 (MOVLPSmr addr:$src1, VR128:$src2)>;
6220 def : Pat<(store (v4i32 (X86Movlps
6221 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
6222 (MOVLPSmr addr:$src1, VR128:$src2)>;
6224 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
6225 (MOVLPDmr addr:$src1, VR128:$src2)>;
6226 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
6227 (MOVLPDmr addr:$src1, VR128:$src2)>;