1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // SSE 1 & 2 - Move Instructions
120 //===----------------------------------------------------------------------===//
122 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
123 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
124 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
126 // Loading from memory automatically zeroing upper bits.
127 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
128 PatFrag mem_pat, string OpcodeStr> :
129 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
130 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
131 [(set RC:$dst, (mem_pat addr:$src))]>;
133 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
134 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
135 // is used instead. Register-to-register movss/movsd is not modeled as an
136 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
137 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
138 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
139 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
140 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
141 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
143 let canFoldAsLoad = 1, isReMaterializable = 1 in {
144 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
146 let AddedComplexity = 20 in
147 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
150 let Constraints = "$src1 = $dst" in {
151 def MOVSSrr : sse12_move_rr<FR32, v4f32,
152 "movss\t{$src2, $dst|$dst, $src2}">, XS;
153 def MOVSDrr : sse12_move_rr<FR64, v2f64,
154 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
157 let canFoldAsLoad = 1, isReMaterializable = 1 in {
158 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
160 let AddedComplexity = 20 in
161 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
164 let AddedComplexity = 15 in {
165 // Extract the low 32-bit value from one vector and insert it into another.
166 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
167 (MOVSSrr (v4f32 VR128:$src1),
168 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
169 // Extract the low 64-bit value from one vector and insert it into another.
170 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
171 (MOVSDrr (v2f64 VR128:$src1),
172 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
175 // Implicitly promote a 32-bit scalar to a vector.
176 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
177 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
178 // Implicitly promote a 64-bit scalar to a vector.
179 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
180 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
181 // Implicitly promote a 32-bit scalar to a vector.
182 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
183 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
184 // Implicitly promote a 64-bit scalar to a vector.
185 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
186 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
188 let AddedComplexity = 20 in {
189 let Predicates = [HasSSE1] in {
190 // MOVSSrm zeros the high parts of the register; represent this
191 // with SUBREG_TO_REG.
192 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
193 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
194 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
195 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
196 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
197 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
199 let Predicates = [HasSSE2] in {
200 // MOVSDrm zeros the high parts of the register; represent this
201 // with SUBREG_TO_REG.
202 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
203 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
204 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
205 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
206 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
207 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
208 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
209 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
210 def : Pat<(v2f64 (X86vzload addr:$src)),
211 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
215 let AddedComplexity = 20, Predicates = [HasAVX] in {
216 // MOVSSrm zeros the high parts of the register; represent this
217 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
218 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
219 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
220 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
221 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
222 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
223 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
224 // MOVSDrm zeros the high parts of the register; represent this
225 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
226 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
227 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
228 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
229 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
230 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
231 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
232 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
233 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
234 def : Pat<(v2f64 (X86vzload addr:$src)),
235 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
236 // Represent the same patterns above but in the form they appear for
238 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
239 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
240 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
241 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
242 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))),
243 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_sd)>;
246 // Store scalar value to memory.
247 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
248 "movss\t{$src, $dst|$dst, $src}",
249 [(store FR32:$src, addr:$dst)]>;
250 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
251 "movsd\t{$src, $dst|$dst, $src}",
252 [(store FR64:$src, addr:$dst)]>;
254 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
255 "movss\t{$src, $dst|$dst, $src}",
256 [(store FR32:$src, addr:$dst)]>, XS, VEX;
257 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
258 "movsd\t{$src, $dst|$dst, $src}",
259 [(store FR64:$src, addr:$dst)]>, XD, VEX;
261 // Extract and store.
262 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
265 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
266 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
269 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
271 // Move Aligned/Unaligned floating point values
272 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
273 X86MemOperand x86memop, PatFrag ld_frag,
274 string asm, Domain d,
275 bit IsReMaterializable = 1> {
276 let neverHasSideEffects = 1 in
277 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
278 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
279 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
280 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
281 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
282 [(set RC:$dst, (ld_frag addr:$src))], d>;
285 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
286 "movaps", SSEPackedSingle>, VEX;
287 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
288 "movapd", SSEPackedDouble>, OpSize, VEX;
289 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
290 "movups", SSEPackedSingle>, VEX;
291 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
292 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
294 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
295 "movaps", SSEPackedSingle>, VEX;
296 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
297 "movapd", SSEPackedDouble>, OpSize, VEX;
298 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
299 "movups", SSEPackedSingle>, VEX;
300 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
301 "movupd", SSEPackedDouble, 0>, OpSize, VEX;
302 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
303 "movaps", SSEPackedSingle>, TB;
304 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
305 "movapd", SSEPackedDouble>, TB, OpSize;
306 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
307 "movups", SSEPackedSingle>, TB;
308 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
309 "movupd", SSEPackedDouble, 0>, TB, OpSize;
311 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
312 "movaps\t{$src, $dst|$dst, $src}",
313 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
314 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
315 "movapd\t{$src, $dst|$dst, $src}",
316 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
317 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
318 "movups\t{$src, $dst|$dst, $src}",
319 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
320 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
321 "movupd\t{$src, $dst|$dst, $src}",
322 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
323 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
324 "movaps\t{$src, $dst|$dst, $src}",
325 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
326 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
327 "movapd\t{$src, $dst|$dst, $src}",
328 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
329 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
330 "movups\t{$src, $dst|$dst, $src}",
331 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
332 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
333 "movupd\t{$src, $dst|$dst, $src}",
334 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
336 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
337 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
338 (VMOVUPSYmr addr:$dst, VR256:$src)>;
340 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
341 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
342 (VMOVUPDYmr addr:$dst, VR256:$src)>;
344 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
345 "movaps\t{$src, $dst|$dst, $src}",
346 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
347 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
348 "movapd\t{$src, $dst|$dst, $src}",
349 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
350 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
351 "movups\t{$src, $dst|$dst, $src}",
352 [(store (v4f32 VR128:$src), addr:$dst)]>;
353 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
354 "movupd\t{$src, $dst|$dst, $src}",
355 [(store (v2f64 VR128:$src), addr:$dst)]>;
357 // Intrinsic forms of MOVUPS/D load and store
358 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
359 (ins f128mem:$dst, VR128:$src),
360 "movups\t{$src, $dst|$dst, $src}",
361 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
362 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
363 (ins f128mem:$dst, VR128:$src),
364 "movupd\t{$src, $dst|$dst, $src}",
365 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
367 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
368 "movups\t{$src, $dst|$dst, $src}",
369 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
370 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
371 "movupd\t{$src, $dst|$dst, $src}",
372 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
374 // Move Low/High packed floating point values
375 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
376 PatFrag mov_frag, string base_opc,
378 def PSrm : PI<opc, MRMSrcMem,
379 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
380 !strconcat(base_opc, "s", asm_opr),
383 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
384 SSEPackedSingle>, TB;
386 def PDrm : PI<opc, MRMSrcMem,
387 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
388 !strconcat(base_opc, "d", asm_opr),
389 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
390 (scalar_to_vector (loadf64 addr:$src2)))))],
391 SSEPackedDouble>, TB, OpSize;
394 let AddedComplexity = 20 in {
395 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
396 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
397 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
398 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
400 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
401 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
402 "\t{$src2, $dst|$dst, $src2}">;
403 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
404 "\t{$src2, $dst|$dst, $src2}">;
407 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
408 "movlps\t{$src, $dst|$dst, $src}",
409 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
410 (iPTR 0))), addr:$dst)]>, VEX;
411 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
412 "movlpd\t{$src, $dst|$dst, $src}",
413 [(store (f64 (vector_extract (v2f64 VR128:$src),
414 (iPTR 0))), addr:$dst)]>, VEX;
415 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
416 "movlps\t{$src, $dst|$dst, $src}",
417 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
418 (iPTR 0))), addr:$dst)]>;
419 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
420 "movlpd\t{$src, $dst|$dst, $src}",
421 [(store (f64 (vector_extract (v2f64 VR128:$src),
422 (iPTR 0))), addr:$dst)]>;
424 // v2f64 extract element 1 is always custom lowered to unpack high to low
425 // and extract element 0 so the non-store version isn't too horrible.
426 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
427 "movhps\t{$src, $dst|$dst, $src}",
428 [(store (f64 (vector_extract
429 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
430 (undef)), (iPTR 0))), addr:$dst)]>,
432 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
433 "movhpd\t{$src, $dst|$dst, $src}",
434 [(store (f64 (vector_extract
435 (v2f64 (unpckh VR128:$src, (undef))),
436 (iPTR 0))), addr:$dst)]>,
438 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
439 "movhps\t{$src, $dst|$dst, $src}",
440 [(store (f64 (vector_extract
441 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
442 (undef)), (iPTR 0))), addr:$dst)]>;
443 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
444 "movhpd\t{$src, $dst|$dst, $src}",
445 [(store (f64 (vector_extract
446 (v2f64 (unpckh VR128:$src, (undef))),
447 (iPTR 0))), addr:$dst)]>;
449 let AddedComplexity = 20 in {
450 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
451 (ins VR128:$src1, VR128:$src2),
452 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
454 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
456 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
457 (ins VR128:$src1, VR128:$src2),
458 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
460 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
463 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
464 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
465 (ins VR128:$src1, VR128:$src2),
466 "movlhps\t{$src2, $dst|$dst, $src2}",
468 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
469 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
470 (ins VR128:$src1, VR128:$src2),
471 "movhlps\t{$src2, $dst|$dst, $src2}",
473 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
476 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
477 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
478 let AddedComplexity = 20 in {
479 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
480 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
481 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
482 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
485 //===----------------------------------------------------------------------===//
486 // SSE 1 & 2 - Conversion Instructions
487 //===----------------------------------------------------------------------===//
489 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
490 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
492 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
493 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
494 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
495 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
498 multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
499 X86MemOperand x86memop, string asm> {
500 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
502 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
506 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
507 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
508 string asm, Domain d> {
509 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
510 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
511 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
512 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
515 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
516 X86MemOperand x86memop, string asm> {
517 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
518 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
519 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
520 (ins DstRC:$src1, x86memop:$src),
521 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
524 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
525 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
526 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
527 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
529 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
530 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
531 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
532 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
535 // The assembler can recognize rr 64-bit instructions by seeing a rxx
536 // register, but the same isn't true when only using memory operands,
537 // provide other assembly "l" and "q" forms to address this explicitly
538 // where appropriate to do so.
539 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
541 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
543 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
545 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
547 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
550 let Predicates = [HasAVX] in {
551 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
552 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
553 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
554 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
555 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
556 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
557 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
558 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
560 def : Pat<(f32 (sint_to_fp GR32:$src)),
561 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
562 def : Pat<(f32 (sint_to_fp GR64:$src)),
563 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
564 def : Pat<(f64 (sint_to_fp GR32:$src)),
565 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
566 def : Pat<(f64 (sint_to_fp GR64:$src)),
567 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
570 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
571 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
572 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
573 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
574 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
575 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
576 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
577 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
578 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
579 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
580 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
581 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
582 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
583 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
584 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
585 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
587 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
588 // and/or XMM operand(s).
590 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
591 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
593 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
594 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
595 [(set DstRC:$dst, (Int SrcRC:$src))]>;
596 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
597 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
598 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
601 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
602 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
603 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
604 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
606 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
607 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
608 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
609 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
610 (ins DstRC:$src1, x86memop:$src2),
612 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
613 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
614 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
617 defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
618 f32mem, load, "cvtss2si">, XS, VEX;
619 defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
620 int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">,
622 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
623 f128mem, load, "cvtsd2si">, XD, VEX;
624 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
625 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
628 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
629 // Get rid of this hack or rename the intrinsics, there are several
630 // intructions that only match with the intrinsic form, why create duplicates
631 // to let them be recognized by the assembler?
632 defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem,
633 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
634 defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem,
635 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
636 defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
637 f32mem, load, "cvtss2si">, XS;
638 defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
639 f32mem, load, "cvtss2si{q}">, XS, REX_W;
640 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
641 f128mem, load, "cvtsd2si{l}">, XD;
642 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
643 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
646 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
647 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
648 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
649 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
651 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
652 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
653 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
654 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
657 let Constraints = "$src1 = $dst" in {
658 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
659 int_x86_sse_cvtsi2ss, i32mem, loadi32,
661 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
662 int_x86_sse_cvtsi642ss, i64mem, loadi64,
663 "cvtsi2ss{q}">, XS, REX_W;
664 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
665 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
667 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
668 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
669 "cvtsi2sd">, XD, REX_W;
674 // Aliases for intrinsics
675 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
676 f32mem, load, "cvttss2si">, XS, VEX;
677 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
678 int_x86_sse_cvttss2si64, f32mem, load,
679 "cvttss2si">, XS, VEX, VEX_W;
680 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
681 f128mem, load, "cvttsd2si">, XD, VEX;
682 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
683 int_x86_sse2_cvttsd2si64, f128mem, load,
684 "cvttsd2si">, XD, VEX, VEX_W;
685 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
686 f32mem, load, "cvttss2si">, XS;
687 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
688 int_x86_sse_cvttss2si64, f32mem, load,
689 "cvttss2si{q}">, XS, REX_W;
690 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
691 f128mem, load, "cvttsd2si">, XD;
692 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
693 int_x86_sse2_cvttsd2si64, f128mem, load,
694 "cvttsd2si{q}">, XD, REX_W;
696 let Pattern = []<dag> in {
697 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
698 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
699 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
700 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
702 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
703 "cvtdq2ps\t{$src, $dst|$dst, $src}",
704 SSEPackedSingle>, TB, VEX;
705 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
706 "cvtdq2ps\t{$src, $dst|$dst, $src}",
707 SSEPackedSingle>, TB, VEX;
709 let Pattern = []<dag> in {
710 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
711 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
712 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
713 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
714 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
715 "cvtdq2ps\t{$src, $dst|$dst, $src}",
716 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
721 // Convert scalar double to scalar single
722 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
723 (ins FR64:$src1, FR64:$src2),
724 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
726 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
727 (ins FR64:$src1, f64mem:$src2),
728 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
729 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
730 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
733 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
734 "cvtsd2ss\t{$src, $dst|$dst, $src}",
735 [(set FR32:$dst, (fround FR64:$src))]>;
736 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
737 "cvtsd2ss\t{$src, $dst|$dst, $src}",
738 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
739 Requires<[HasSSE2, OptForSize]>;
741 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
742 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
744 let Constraints = "$src1 = $dst" in
745 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
746 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
748 // Convert scalar single to scalar double
749 // SSE2 instructions with XS prefix
750 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
751 (ins FR32:$src1, FR32:$src2),
752 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
753 []>, XS, Requires<[HasAVX]>, VEX_4V;
754 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
755 (ins FR32:$src1, f32mem:$src2),
756 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
757 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
759 let Predicates = [HasAVX] in {
760 def : Pat<(f64 (fextend FR32:$src)),
761 (VCVTSS2SDrr FR32:$src, FR32:$src)>;
762 def : Pat<(fextend (loadf32 addr:$src)),
763 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
764 def : Pat<(extloadf32 addr:$src),
765 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
768 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
769 "cvtss2sd\t{$src, $dst|$dst, $src}",
770 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
772 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
773 "cvtss2sd\t{$src, $dst|$dst, $src}",
774 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
775 Requires<[HasSSE2, OptForSize]>;
777 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
778 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
779 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
780 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
781 VR128:$src2))]>, XS, VEX_4V,
783 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
784 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
785 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
786 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
787 (load addr:$src2)))]>, XS, VEX_4V,
789 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
790 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
791 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
792 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
793 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
796 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
797 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
798 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
799 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
800 (load addr:$src2)))]>, XS,
804 def : Pat<(extloadf32 addr:$src),
805 (CVTSS2SDrr (MOVSSrm addr:$src))>,
806 Requires<[HasSSE2, OptForSpeed]>;
808 // Convert doubleword to packed single/double fp
809 // SSE2 instructions without OpSize prefix
810 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
811 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
812 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
813 TB, VEX, Requires<[HasAVX]>;
814 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
815 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
816 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
817 (bitconvert (memopv2i64 addr:$src))))]>,
818 TB, VEX, Requires<[HasAVX]>;
819 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
820 "cvtdq2ps\t{$src, $dst|$dst, $src}",
821 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
822 TB, Requires<[HasSSE2]>;
823 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
824 "cvtdq2ps\t{$src, $dst|$dst, $src}",
825 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
826 (bitconvert (memopv2i64 addr:$src))))]>,
827 TB, Requires<[HasSSE2]>;
829 // FIXME: why the non-intrinsic version is described as SSE3?
830 // SSE2 instructions with XS prefix
831 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
832 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
833 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
834 XS, VEX, Requires<[HasAVX]>;
835 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
836 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
837 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
838 (bitconvert (memopv2i64 addr:$src))))]>,
839 XS, VEX, Requires<[HasAVX]>;
840 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
841 "cvtdq2pd\t{$src, $dst|$dst, $src}",
842 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
843 XS, Requires<[HasSSE2]>;
844 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
845 "cvtdq2pd\t{$src, $dst|$dst, $src}",
846 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
847 (bitconvert (memopv2i64 addr:$src))))]>,
848 XS, Requires<[HasSSE2]>;
851 // Convert packed single/double fp to doubleword
852 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
853 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
854 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
855 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
856 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
857 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
858 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
859 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
860 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
861 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
862 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
863 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
865 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
866 "cvtps2dq\t{$src, $dst|$dst, $src}",
867 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
869 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
871 "cvtps2dq\t{$src, $dst|$dst, $src}",
872 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
873 (memop addr:$src)))]>, VEX;
874 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
875 "cvtps2dq\t{$src, $dst|$dst, $src}",
876 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
877 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
878 "cvtps2dq\t{$src, $dst|$dst, $src}",
879 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
880 (memop addr:$src)))]>;
882 // SSE2 packed instructions with XD prefix
883 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
884 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
885 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
886 XD, VEX, Requires<[HasAVX]>;
887 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
888 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
889 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
890 (memop addr:$src)))]>,
891 XD, VEX, Requires<[HasAVX]>;
892 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
893 "cvtpd2dq\t{$src, $dst|$dst, $src}",
894 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
895 XD, Requires<[HasSSE2]>;
896 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
897 "cvtpd2dq\t{$src, $dst|$dst, $src}",
898 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
899 (memop addr:$src)))]>,
900 XD, Requires<[HasSSE2]>;
903 // Convert with truncation packed single/double fp to doubleword
904 // SSE2 packed instructions with XS prefix
905 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
906 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
907 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
908 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
909 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
910 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
911 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
912 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
913 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
914 "cvttps2dq\t{$src, $dst|$dst, $src}",
916 (int_x86_sse2_cvttps2dq VR128:$src))]>;
917 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
918 "cvttps2dq\t{$src, $dst|$dst, $src}",
920 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
922 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
923 "vcvttps2dq\t{$src, $dst|$dst, $src}",
925 (int_x86_sse2_cvttps2dq VR128:$src))]>,
926 XS, VEX, Requires<[HasAVX]>;
927 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
928 "vcvttps2dq\t{$src, $dst|$dst, $src}",
929 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
930 (memop addr:$src)))]>,
931 XS, VEX, Requires<[HasAVX]>;
933 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
934 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
935 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
936 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
938 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
939 (VCVTDQ2PSYrr VR256:$src)>, Requires<[HasAVX]>;
940 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
941 (VCVTTPS2DQYrr VR256:$src)>, Requires<[HasAVX]>;
943 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
945 "cvttpd2dq\t{$src, $dst|$dst, $src}",
946 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
948 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
950 "cvttpd2dq\t{$src, $dst|$dst, $src}",
951 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
952 (memop addr:$src)))]>, VEX;
953 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
954 "cvttpd2dq\t{$src, $dst|$dst, $src}",
955 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
956 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
957 "cvttpd2dq\t{$src, $dst|$dst, $src}",
958 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
959 (memop addr:$src)))]>;
961 // The assembler can recognize rr 256-bit instructions by seeing a ymm
962 // register, but the same isn't true when using memory operands instead.
963 // Provide other assembly rr and rm forms to address this explicitly.
964 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
965 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
966 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
967 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
970 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
971 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
972 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
973 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
976 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
977 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
978 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
979 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
981 // Convert packed single to packed double
982 let Predicates = [HasAVX] in {
983 // SSE2 instructions without OpSize prefix
984 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
985 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
986 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
987 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
988 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
989 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
990 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
991 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, VEX;
993 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
994 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
995 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
996 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
998 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
999 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1000 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1001 VEX, Requires<[HasAVX]>;
1002 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1003 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1004 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1005 (load addr:$src)))]>,
1006 VEX, Requires<[HasAVX]>;
1007 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1008 "cvtps2pd\t{$src, $dst|$dst, $src}",
1009 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1010 TB, Requires<[HasSSE2]>;
1011 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1012 "cvtps2pd\t{$src, $dst|$dst, $src}",
1013 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1014 (load addr:$src)))]>,
1015 TB, Requires<[HasSSE2]>;
1017 // Convert packed double to packed single
1018 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1019 // register, but the same isn't true when using memory operands instead.
1020 // Provide other assembly rr and rm forms to address this explicitly.
1021 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1022 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1023 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1024 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1027 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1028 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1029 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1030 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1033 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1034 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1035 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1036 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1037 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1038 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1039 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1040 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1043 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1044 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1045 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1046 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1048 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1049 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1050 (memop addr:$src)))]>;
1051 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1052 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1053 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1054 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1055 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1056 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1057 (memop addr:$src)))]>;
1059 // AVX 256-bit register conversion intrinsics
1060 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1061 // whenever possible to avoid declaring two versions of each one.
1062 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1063 (VCVTDQ2PSYrr VR256:$src)>;
1064 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1065 (VCVTDQ2PSYrm addr:$src)>;
1067 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1068 (VCVTPD2PSYrr VR256:$src)>;
1069 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1070 (VCVTPD2PSYrm addr:$src)>;
1072 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1073 (VCVTPS2DQYrr VR256:$src)>;
1074 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1075 (VCVTPS2DQYrm addr:$src)>;
1077 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1078 (VCVTPS2PDYrr VR128:$src)>;
1079 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1080 (VCVTPS2PDYrm addr:$src)>;
1082 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1083 (VCVTTPD2DQYrr VR256:$src)>;
1084 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1085 (VCVTTPD2DQYrm addr:$src)>;
1087 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1088 (VCVTTPS2DQYrr VR256:$src)>;
1089 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1090 (VCVTTPS2DQYrm addr:$src)>;
1092 // Match fround for 128/256-bit conversions
1093 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
1094 (VCVTPD2PSYrr VR256:$src)>;
1095 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
1096 (VCVTPD2PSYrm addr:$src)>;
1098 //===----------------------------------------------------------------------===//
1099 // SSE 1 & 2 - Compare Instructions
1100 //===----------------------------------------------------------------------===//
1102 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1103 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1104 string asm, string asm_alt> {
1105 let isAsmParserOnly = 1 in {
1106 def rr : SIi8<0xC2, MRMSrcReg,
1107 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1110 def rm : SIi8<0xC2, MRMSrcMem,
1111 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1115 // Accept explicit immediate argument form instead of comparison code.
1116 def rr_alt : SIi8<0xC2, MRMSrcReg,
1117 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1120 def rm_alt : SIi8<0xC2, MRMSrcMem,
1121 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1125 let neverHasSideEffects = 1 in {
1126 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1127 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1128 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1130 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1131 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1132 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1136 let Constraints = "$src1 = $dst" in {
1137 def CMPSSrr : SIi8<0xC2, MRMSrcReg,
1138 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, SSECC:$cc),
1139 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1140 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), FR32:$src2, imm:$cc))]>, XS;
1141 def CMPSSrm : SIi8<0xC2, MRMSrcMem,
1142 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, SSECC:$cc),
1143 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1144 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), (loadf32 addr:$src2), imm:$cc))]>, XS;
1145 def CMPSDrr : SIi8<0xC2, MRMSrcReg,
1146 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, SSECC:$cc),
1147 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1148 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), FR64:$src2, imm:$cc))]>, XD;
1149 def CMPSDrm : SIi8<0xC2, MRMSrcMem,
1150 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, SSECC:$cc),
1151 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1152 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), (loadf64 addr:$src2), imm:$cc))]>, XD;
1154 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1155 def CMPSSrr_alt : SIi8<0xC2, MRMSrcReg,
1156 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
1157 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1158 def CMPSSrm_alt : SIi8<0xC2, MRMSrcMem,
1159 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
1160 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1161 def CMPSDrr_alt : SIi8<0xC2, MRMSrcReg,
1162 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
1163 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1164 def CMPSDrm_alt : SIi8<0xC2, MRMSrcMem,
1165 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
1166 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1169 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1170 Intrinsic Int, string asm> {
1171 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1172 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1173 [(set VR128:$dst, (Int VR128:$src1,
1174 VR128:$src, imm:$cc))]>;
1175 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1176 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1177 [(set VR128:$dst, (Int VR128:$src1,
1178 (load addr:$src), imm:$cc))]>;
1181 // Aliases to match intrinsics which expect XMM operand(s).
1182 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1183 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1185 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1186 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1188 let Constraints = "$src1 = $dst" in {
1189 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1190 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1191 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1192 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1196 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1197 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1198 ValueType vt, X86MemOperand x86memop,
1199 PatFrag ld_frag, string OpcodeStr, Domain d> {
1200 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1201 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1202 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1203 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1204 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1205 [(set EFLAGS, (OpNode (vt RC:$src1),
1206 (ld_frag addr:$src2)))], d>;
1209 let Defs = [EFLAGS] in {
1210 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1211 "ucomiss", SSEPackedSingle>, VEX;
1212 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1213 "ucomisd", SSEPackedDouble>, OpSize, VEX;
1214 let Pattern = []<dag> in {
1215 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1216 "comiss", SSEPackedSingle>, VEX;
1217 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1218 "comisd", SSEPackedDouble>, OpSize, VEX;
1221 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1222 load, "ucomiss", SSEPackedSingle>, VEX;
1223 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1224 load, "ucomisd", SSEPackedDouble>, OpSize, VEX;
1226 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1227 load, "comiss", SSEPackedSingle>, VEX;
1228 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1229 load, "comisd", SSEPackedDouble>, OpSize, VEX;
1230 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1231 "ucomiss", SSEPackedSingle>, TB;
1232 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1233 "ucomisd", SSEPackedDouble>, TB, OpSize;
1235 let Pattern = []<dag> in {
1236 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1237 "comiss", SSEPackedSingle>, TB;
1238 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1239 "comisd", SSEPackedDouble>, TB, OpSize;
1242 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1243 load, "ucomiss", SSEPackedSingle>, TB;
1244 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1245 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1247 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1248 "comiss", SSEPackedSingle>, TB;
1249 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1250 "comisd", SSEPackedDouble>, TB, OpSize;
1251 } // Defs = [EFLAGS]
1253 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1254 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1255 Intrinsic Int, string asm, string asm_alt,
1257 let isAsmParserOnly = 1 in {
1258 def rri : PIi8<0xC2, MRMSrcReg,
1259 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1260 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1261 def rmi : PIi8<0xC2, MRMSrcMem,
1262 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1263 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1266 // Accept explicit immediate argument form instead of comparison code.
1267 def rri_alt : PIi8<0xC2, MRMSrcReg,
1268 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1270 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1271 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1275 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1276 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1277 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1278 SSEPackedSingle>, VEX_4V;
1279 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1280 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1281 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1282 SSEPackedDouble>, OpSize, VEX_4V;
1283 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1284 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1285 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1286 SSEPackedSingle>, VEX_4V;
1287 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1288 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1289 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1290 SSEPackedDouble>, OpSize, VEX_4V;
1291 let Constraints = "$src1 = $dst" in {
1292 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1293 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1294 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1295 SSEPackedSingle>, TB;
1296 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1297 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1298 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1299 SSEPackedDouble>, TB, OpSize;
1302 let Predicates = [HasSSE1] in {
1303 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1304 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1305 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1306 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1309 let Predicates = [HasSSE2] in {
1310 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1311 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1312 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1313 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1316 let Predicates = [HasAVX] in {
1317 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1318 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1319 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1320 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1321 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1322 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1323 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1324 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1326 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
1327 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
1328 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
1329 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
1330 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
1331 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
1332 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
1333 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
1336 //===----------------------------------------------------------------------===//
1337 // SSE 1 & 2 - Shuffle Instructions
1338 //===----------------------------------------------------------------------===//
1340 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1341 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1342 ValueType vt, string asm, PatFrag mem_frag,
1343 Domain d, bit IsConvertibleToThreeAddress = 0> {
1344 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1345 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1346 [(set RC:$dst, (vt (shufp:$src3
1347 RC:$src1, (mem_frag addr:$src2))))], d>;
1348 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1349 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1350 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1352 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1355 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1356 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1357 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
1358 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1359 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1360 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
1361 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1362 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1363 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1364 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1365 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1366 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1368 let Constraints = "$src1 = $dst" in {
1369 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1370 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1371 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1373 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1374 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1375 memopv2f64, SSEPackedDouble>, TB, OpSize;
1378 //===----------------------------------------------------------------------===//
1379 // SSE 1 & 2 - Unpack Instructions
1380 //===----------------------------------------------------------------------===//
1382 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1383 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1384 PatFrag mem_frag, RegisterClass RC,
1385 X86MemOperand x86memop, string asm,
1387 def rr : PI<opc, MRMSrcReg,
1388 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1390 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1391 def rm : PI<opc, MRMSrcMem,
1392 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1394 (vt (OpNode RC:$src1,
1395 (mem_frag addr:$src2))))], d>;
1398 let AddedComplexity = 10 in {
1399 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1400 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1401 SSEPackedSingle>, VEX_4V;
1402 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1403 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1404 SSEPackedDouble>, OpSize, VEX_4V;
1405 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1406 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1407 SSEPackedSingle>, VEX_4V;
1408 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1409 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1410 SSEPackedDouble>, OpSize, VEX_4V;
1412 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1413 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1414 SSEPackedSingle>, VEX_4V;
1415 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1416 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1417 SSEPackedDouble>, OpSize, VEX_4V;
1418 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1419 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1420 SSEPackedSingle>, VEX_4V;
1421 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1422 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1423 SSEPackedDouble>, OpSize, VEX_4V;
1425 let Constraints = "$src1 = $dst" in {
1426 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1427 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1428 SSEPackedSingle>, TB;
1429 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1430 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1431 SSEPackedDouble>, TB, OpSize;
1432 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1433 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1434 SSEPackedSingle>, TB;
1435 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1436 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1437 SSEPackedDouble>, TB, OpSize;
1438 } // Constraints = "$src1 = $dst"
1439 } // AddedComplexity
1441 //===----------------------------------------------------------------------===//
1442 // SSE 1 & 2 - Extract Floating-Point Sign mask
1443 //===----------------------------------------------------------------------===//
1445 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1446 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1448 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1449 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1450 [(set GR32:$dst, (Int RC:$src))], d>;
1451 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1452 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1456 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1457 "movmskps", SSEPackedSingle>, VEX;
1458 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1459 "movmskpd", SSEPackedDouble>, OpSize,
1461 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1462 "movmskps", SSEPackedSingle>, VEX;
1463 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1464 "movmskpd", SSEPackedDouble>, OpSize,
1466 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1467 SSEPackedSingle>, TB;
1468 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1469 SSEPackedDouble>, TB, OpSize;
1472 def MOVMSKPDrr32_alt : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
1473 "movmskpd\t{$src, $dst|$dst, $src}",
1474 [(set GR32:$dst, (X86fgetsign FR64:$src))], SSEPackedDouble>, TB, OpSize;
1475 def MOVMSKPDrr64_alt : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src),
1476 "movmskpd\t{$src, $dst|$dst, $src}",
1477 [(set GR64:$dst, (X86fgetsign FR64:$src))], SSEPackedDouble>, TB, OpSize;
1478 def MOVMSKPSrr32_alt : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
1479 "movmskps\t{$src, $dst|$dst, $src}",
1480 [(set GR32:$dst, (X86fgetsign FR32:$src))], SSEPackedSingle>, TB;
1481 def MOVMSKPSrr64_alt : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src),
1482 "movmskps\t{$src, $dst|$dst, $src}",
1483 [(set GR64:$dst, (X86fgetsign FR32:$src))], SSEPackedSingle>, TB;
1486 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1487 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1488 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1489 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1491 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1492 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX;
1493 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1494 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize,
1497 //===----------------------------------------------------------------------===//
1498 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1499 //===----------------------------------------------------------------------===//
1501 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1502 // names that start with 'Fs'.
1504 // Alias instructions that map fld0 to pxor for sse.
1505 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1506 canFoldAsLoad = 1 in {
1507 // FIXME: Set encoding to pseudo!
1508 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1509 [(set FR32:$dst, fp32imm0)]>,
1510 Requires<[HasSSE1]>, TB, OpSize;
1511 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1512 [(set FR64:$dst, fpimm0)]>,
1513 Requires<[HasSSE2]>, TB, OpSize;
1514 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1515 [(set FR32:$dst, fp32imm0)]>,
1516 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1517 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1518 [(set FR64:$dst, fpimm0)]>,
1519 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1522 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1523 // bits are disregarded.
1524 let neverHasSideEffects = 1 in {
1525 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1526 "movaps\t{$src, $dst|$dst, $src}", []>;
1527 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1528 "movapd\t{$src, $dst|$dst, $src}", []>;
1531 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
1532 // bits are disregarded.
1533 let canFoldAsLoad = 1, isReMaterializable = 1 in {
1534 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
1535 "movaps\t{$src, $dst|$dst, $src}",
1536 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
1537 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
1538 "movapd\t{$src, $dst|$dst, $src}",
1539 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
1542 //===----------------------------------------------------------------------===//
1543 // SSE 1 & 2 - Logical Instructions
1544 //===----------------------------------------------------------------------===//
1546 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
1548 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
1550 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
1551 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, VEX_4V;
1553 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
1554 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, OpSize, VEX_4V;
1556 let Constraints = "$src1 = $dst" in {
1557 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
1558 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
1560 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
1561 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
1565 // Alias bitwise logical operations using SSE logical ops on packed FP values.
1566 let mayLoad = 0 in {
1567 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
1568 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
1569 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
1572 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
1573 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
1575 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
1577 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
1579 let Pattern = []<dag> in {
1580 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1581 !strconcat(OpcodeStr, "ps"), f128mem,
1582 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
1583 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1584 (memopv2i64 addr:$src2)))], 0>, VEX_4V;
1586 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1587 !strconcat(OpcodeStr, "pd"), f128mem,
1588 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1589 (bc_v2i64 (v2f64 VR128:$src2))))],
1590 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1591 (memopv2i64 addr:$src2)))], 0>,
1594 let Constraints = "$src1 = $dst" in {
1595 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
1596 !strconcat(OpcodeStr, "ps"), f128mem,
1597 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
1598 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
1599 (memopv2i64 addr:$src2)))]>, TB;
1601 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
1602 !strconcat(OpcodeStr, "pd"), f128mem,
1603 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1604 (bc_v2i64 (v2f64 VR128:$src2))))],
1605 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
1606 (memopv2i64 addr:$src2)))]>, TB, OpSize;
1610 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
1612 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
1614 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
1615 !strconcat(OpcodeStr, "ps"), f256mem,
1616 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
1617 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
1618 (memopv4i64 addr:$src2)))], 0>, VEX_4V;
1620 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
1621 !strconcat(OpcodeStr, "pd"), f256mem,
1622 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1623 (bc_v4i64 (v4f64 VR256:$src2))))],
1624 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
1625 (memopv4i64 addr:$src2)))], 0>,
1629 // AVX 256-bit packed logical ops forms
1630 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
1631 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
1632 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
1633 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
1635 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
1636 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
1637 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
1638 let isCommutable = 0 in
1639 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
1641 //===----------------------------------------------------------------------===//
1642 // SSE 1 & 2 - Arithmetic Instructions
1643 //===----------------------------------------------------------------------===//
1645 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
1648 /// In addition, we also have a special variant of the scalar form here to
1649 /// represent the associated intrinsic operation. This form is unlike the
1650 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
1651 /// and leaves the top elements unmodified (therefore these cannot be commuted).
1653 /// These three forms can each be reg+reg or reg+mem.
1656 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
1658 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
1660 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
1661 OpNode, FR32, f32mem, Is2Addr>, XS;
1662 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
1663 OpNode, FR64, f64mem, Is2Addr>, XD;
1666 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
1668 let mayLoad = 0 in {
1669 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
1670 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
1671 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
1672 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
1676 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
1678 let mayLoad = 0 in {
1679 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
1680 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
1681 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
1682 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
1686 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
1688 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1689 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
1690 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
1691 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
1694 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
1696 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1697 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
1698 SSEPackedSingle, Is2Addr>, TB;
1700 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
1701 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
1702 SSEPackedDouble, Is2Addr>, TB, OpSize;
1705 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
1706 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1707 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
1708 SSEPackedSingle, 0>, TB;
1710 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
1711 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
1712 SSEPackedDouble, 0>, TB, OpSize;
1715 // Binary Arithmetic instructions
1716 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
1717 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
1718 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
1719 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
1720 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
1721 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
1722 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
1723 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
1725 let isCommutable = 0 in {
1726 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
1727 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
1728 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
1729 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
1730 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
1731 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
1732 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
1733 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
1734 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
1735 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
1736 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
1737 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
1738 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
1739 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
1740 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
1741 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
1742 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
1743 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
1744 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
1745 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
1748 let Constraints = "$src1 = $dst" in {
1749 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
1750 basic_sse12_fp_binop_p<0x58, "add", fadd>,
1751 basic_sse12_fp_binop_s_int<0x58, "add">;
1752 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
1753 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
1754 basic_sse12_fp_binop_s_int<0x59, "mul">;
1756 let isCommutable = 0 in {
1757 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
1758 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
1759 basic_sse12_fp_binop_s_int<0x5C, "sub">;
1760 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
1761 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
1762 basic_sse12_fp_binop_s_int<0x5E, "div">;
1763 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
1764 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
1765 basic_sse12_fp_binop_s_int<0x5F, "max">,
1766 basic_sse12_fp_binop_p_int<0x5F, "max">;
1767 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
1768 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
1769 basic_sse12_fp_binop_s_int<0x5D, "min">,
1770 basic_sse12_fp_binop_p_int<0x5D, "min">;
1775 /// In addition, we also have a special variant of the scalar form here to
1776 /// represent the associated intrinsic operation. This form is unlike the
1777 /// plain scalar form, in that it takes an entire vector (instead of a
1778 /// scalar) and leaves the top elements undefined.
1780 /// And, we have a special variant form for a full-vector intrinsic form.
1782 /// sse1_fp_unop_s - SSE1 unops in scalar form.
1783 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
1784 SDNode OpNode, Intrinsic F32Int> {
1785 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
1786 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1787 [(set FR32:$dst, (OpNode FR32:$src))]>;
1788 // For scalar unary operations, fold a load into the operation
1789 // only in OptForSize mode. It eliminates an instruction, but it also
1790 // eliminates a whole-register clobber (the load), so it introduces a
1791 // partial register update condition.
1792 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
1793 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1794 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
1795 Requires<[HasSSE1, OptForSize]>;
1796 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1797 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1798 [(set VR128:$dst, (F32Int VR128:$src))]>;
1799 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1800 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
1801 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1804 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
1805 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1806 SDNode OpNode, Intrinsic F32Int> {
1807 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
1808 !strconcat(OpcodeStr,
1809 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1810 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2),
1811 !strconcat(OpcodeStr,
1812 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
1813 []>, XS, Requires<[HasAVX, OptForSize]>;
1814 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1815 !strconcat(OpcodeStr,
1816 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1817 [(set VR128:$dst, (F32Int VR128:$src))]>;
1818 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
1819 !strconcat(OpcodeStr,
1820 "ss\t{$src, $dst, $dst|$dst, $dst, $src}"),
1821 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
1824 /// sse1_fp_unop_p - SSE1 unops in packed form.
1825 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1826 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1827 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1828 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
1829 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1830 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1831 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
1834 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
1835 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1836 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1837 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1838 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
1839 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1840 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1841 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
1844 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
1845 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1846 Intrinsic V4F32Int> {
1847 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1848 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1849 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
1850 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1851 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1852 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
1855 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
1856 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1857 Intrinsic V4F32Int> {
1858 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1859 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1860 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
1861 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1862 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
1863 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
1866 /// sse2_fp_unop_s - SSE2 unops in scalar form.
1867 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
1868 SDNode OpNode, Intrinsic F64Int> {
1869 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
1870 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1871 [(set FR64:$dst, (OpNode FR64:$src))]>;
1872 // See the comments in sse1_fp_unop_s for why this is OptForSize.
1873 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
1874 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1875 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
1876 Requires<[HasSSE2, OptForSize]>;
1877 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1878 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1879 [(set VR128:$dst, (F64Int VR128:$src))]>;
1880 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1881 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
1882 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1885 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
1886 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr,
1887 SDNode OpNode, Intrinsic F64Int> {
1888 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
1889 !strconcat(OpcodeStr,
1890 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1891 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst),
1892 (ins FR64:$src1, f64mem:$src2),
1893 !strconcat(OpcodeStr,
1894 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
1895 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1896 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1897 [(set VR128:$dst, (F64Int VR128:$src))]>;
1898 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
1899 !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"),
1900 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
1903 /// sse2_fp_unop_p - SSE2 unops in vector forms.
1904 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
1906 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1907 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1908 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
1909 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1910 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1911 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
1914 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
1915 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
1916 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1917 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1918 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
1919 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1920 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1921 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
1924 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
1925 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
1926 Intrinsic V2F64Int> {
1927 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1928 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1929 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
1930 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1931 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1932 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
1935 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
1936 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
1937 Intrinsic V2F64Int> {
1938 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1939 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1940 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
1941 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1942 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
1943 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
1946 let Predicates = [HasAVX] in {
1948 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>,
1949 sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1952 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
1953 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
1954 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1955 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
1956 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
1957 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
1958 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
1959 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
1962 // Reciprocal approximations. Note that these typically require refinement
1963 // in order to obtain suitable precision.
1964 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt,
1965 int_x86_sse_rsqrt_ss>, VEX_4V;
1966 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
1967 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
1968 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
1969 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
1971 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>,
1973 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
1974 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
1975 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
1976 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
1979 def : Pat<(f32 (fsqrt FR32:$src)),
1980 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
1981 def : Pat<(f64 (fsqrt FR64:$src)),
1982 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
1983 def : Pat<(f64 (fsqrt (load addr:$src))),
1984 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
1985 Requires<[HasAVX, OptForSize]>;
1986 def : Pat<(f32 (fsqrt (load addr:$src))),
1987 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
1988 Requires<[HasAVX, OptForSize]>;
1991 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
1992 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
1993 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
1994 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
1995 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
1996 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
1998 // Reciprocal approximations. Note that these typically require refinement
1999 // in order to obtain suitable precision.
2000 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
2001 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
2002 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
2003 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2004 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
2005 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
2007 // There is no f64 version of the reciprocal approximation instructions.
2009 //===----------------------------------------------------------------------===//
2010 // SSE 1 & 2 - Non-temporal stores
2011 //===----------------------------------------------------------------------===//
2013 let AddedComplexity = 400 in { // Prefer non-temporal versions
2014 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2015 (ins f128mem:$dst, VR128:$src),
2016 "movntps\t{$src, $dst|$dst, $src}",
2017 [(alignednontemporalstore (v4f32 VR128:$src),
2019 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2020 (ins f128mem:$dst, VR128:$src),
2021 "movntpd\t{$src, $dst|$dst, $src}",
2022 [(alignednontemporalstore (v2f64 VR128:$src),
2024 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2025 (ins f128mem:$dst, VR128:$src),
2026 "movntdq\t{$src, $dst|$dst, $src}",
2027 [(alignednontemporalstore (v2f64 VR128:$src),
2030 let ExeDomain = SSEPackedInt in
2031 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2032 (ins f128mem:$dst, VR128:$src),
2033 "movntdq\t{$src, $dst|$dst, $src}",
2034 [(alignednontemporalstore (v4f32 VR128:$src),
2037 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2038 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
2040 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2041 (ins f256mem:$dst, VR256:$src),
2042 "movntps\t{$src, $dst|$dst, $src}",
2043 [(alignednontemporalstore (v8f32 VR256:$src),
2045 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2046 (ins f256mem:$dst, VR256:$src),
2047 "movntpd\t{$src, $dst|$dst, $src}",
2048 [(alignednontemporalstore (v4f64 VR256:$src),
2050 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2051 (ins f256mem:$dst, VR256:$src),
2052 "movntdq\t{$src, $dst|$dst, $src}",
2053 [(alignednontemporalstore (v4f64 VR256:$src),
2055 let ExeDomain = SSEPackedInt in
2056 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2057 (ins f256mem:$dst, VR256:$src),
2058 "movntdq\t{$src, $dst|$dst, $src}",
2059 [(alignednontemporalstore (v8f32 VR256:$src),
2063 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2064 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2065 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2066 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2067 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2068 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2070 let AddedComplexity = 400 in { // Prefer non-temporal versions
2071 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2072 "movntps\t{$src, $dst|$dst, $src}",
2073 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2074 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2075 "movntpd\t{$src, $dst|$dst, $src}",
2076 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2078 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2079 "movntdq\t{$src, $dst|$dst, $src}",
2080 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2082 let ExeDomain = SSEPackedInt in
2083 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2084 "movntdq\t{$src, $dst|$dst, $src}",
2085 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2087 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2088 (MOVNTDQmr addr:$dst, VR128:$src)>;
2090 // There is no AVX form for instructions below this point
2091 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2092 "movnti{l}\t{$src, $dst|$dst, $src}",
2093 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2094 TB, Requires<[HasSSE2]>;
2095 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2096 "movnti{q}\t{$src, $dst|$dst, $src}",
2097 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2098 TB, Requires<[HasSSE2]>;
2101 //===----------------------------------------------------------------------===//
2102 // SSE 1 & 2 - Misc Instructions (No AVX form)
2103 //===----------------------------------------------------------------------===//
2105 // Prefetch intrinsic.
2106 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2107 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>;
2108 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2109 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>;
2110 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2111 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>;
2112 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2113 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>;
2115 // Load, store, and memory fence
2116 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2117 TB, Requires<[HasSSE1]>;
2118 def : Pat<(X86SFence), (SFENCE)>;
2120 // Alias instructions that map zero vector to pxor / xorp* for sse.
2121 // We set canFoldAsLoad because this can be converted to a constant-pool
2122 // load of an all-zeros value if folding it would be beneficial.
2123 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2124 // JIT implementation, it does not expand the instructions below like
2125 // X86MCInstLower does.
2126 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2127 isCodeGenOnly = 1 in {
2128 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2129 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
2130 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2131 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
2132 let ExeDomain = SSEPackedInt in
2133 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2134 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2137 // The same as done above but for AVX. The 128-bit versions are the
2138 // same, but re-encoded. The 256-bit does not support PI version, and
2139 // doesn't need it because on sandy bridge the register is set to zero
2140 // at the rename stage without using any execution unit, so SET0PSY
2141 // and SET0PDY can be used for vector int instructions without penalty
2142 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
2143 // JIT implementatioan, it does not expand the instructions below like
2144 // X86MCInstLower does.
2145 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
2146 isCodeGenOnly = 1, Predicates = [HasAVX] in {
2147 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2148 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
2149 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
2150 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
2151 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2152 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
2153 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
2154 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
2155 let ExeDomain = SSEPackedInt in
2156 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
2157 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
2160 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
2161 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
2162 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
2164 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
2165 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
2167 // AVX has no support for 256-bit integer instructions, but since the 128-bit
2168 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
2169 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
2170 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
2171 (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
2173 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
2174 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
2175 (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
2177 //===----------------------------------------------------------------------===//
2178 // SSE 1 & 2 - Load/Store XCSR register
2179 //===----------------------------------------------------------------------===//
2181 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2182 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2183 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2184 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2186 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2187 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2188 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2189 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2191 //===---------------------------------------------------------------------===//
2192 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2193 //===---------------------------------------------------------------------===//
2195 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2197 let neverHasSideEffects = 1 in {
2198 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2199 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2200 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2201 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2203 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2204 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2205 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2206 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2208 let canFoldAsLoad = 1, mayLoad = 1 in {
2209 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2210 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2211 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2212 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2213 let Predicates = [HasAVX] in {
2214 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2215 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2216 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2217 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2221 let mayStore = 1 in {
2222 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2223 (ins i128mem:$dst, VR128:$src),
2224 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2225 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2226 (ins i256mem:$dst, VR256:$src),
2227 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2228 let Predicates = [HasAVX] in {
2229 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2230 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2231 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2232 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2236 let neverHasSideEffects = 1 in
2237 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2238 "movdqa\t{$src, $dst|$dst, $src}", []>;
2240 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2241 "movdqu\t{$src, $dst|$dst, $src}",
2242 []>, XS, Requires<[HasSSE2]>;
2244 let canFoldAsLoad = 1, mayLoad = 1 in {
2245 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2246 "movdqa\t{$src, $dst|$dst, $src}",
2247 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2248 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2249 "movdqu\t{$src, $dst|$dst, $src}",
2250 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2251 XS, Requires<[HasSSE2]>;
2254 let mayStore = 1 in {
2255 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2256 "movdqa\t{$src, $dst|$dst, $src}",
2257 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2258 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2259 "movdqu\t{$src, $dst|$dst, $src}",
2260 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2261 XS, Requires<[HasSSE2]>;
2264 // Intrinsic forms of MOVDQU load and store
2265 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2266 "vmovdqu\t{$src, $dst|$dst, $src}",
2267 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2268 XS, VEX, Requires<[HasAVX]>;
2270 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2271 "movdqu\t{$src, $dst|$dst, $src}",
2272 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2273 XS, Requires<[HasSSE2]>;
2275 } // ExeDomain = SSEPackedInt
2277 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2278 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2279 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2281 //===---------------------------------------------------------------------===//
2282 // SSE2 - Packed Integer Arithmetic Instructions
2283 //===---------------------------------------------------------------------===//
2285 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2287 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2288 bit IsCommutable = 0, bit Is2Addr = 1> {
2289 let isCommutable = IsCommutable in
2290 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2291 (ins VR128:$src1, VR128:$src2),
2293 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2294 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2295 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2296 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2297 (ins VR128:$src1, i128mem:$src2),
2299 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2300 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2301 [(set VR128:$dst, (IntId VR128:$src1,
2302 (bitconvert (memopv2i64 addr:$src2))))]>;
2305 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2306 string OpcodeStr, Intrinsic IntId,
2307 Intrinsic IntId2, bit Is2Addr = 1> {
2308 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2309 (ins VR128:$src1, VR128:$src2),
2311 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2312 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2313 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2314 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2315 (ins VR128:$src1, i128mem:$src2),
2317 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2318 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2319 [(set VR128:$dst, (IntId VR128:$src1,
2320 (bitconvert (memopv2i64 addr:$src2))))]>;
2321 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2322 (ins VR128:$src1, i32i8imm:$src2),
2324 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2325 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2326 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2329 /// PDI_binop_rm - Simple SSE2 binary operator.
2330 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2331 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2332 let isCommutable = IsCommutable in
2333 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2334 (ins VR128:$src1, VR128:$src2),
2336 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2337 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2338 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2339 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2340 (ins VR128:$src1, i128mem:$src2),
2342 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2343 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2344 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2345 (bitconvert (memopv2i64 addr:$src2)))))]>;
2348 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2350 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2351 /// to collapse (bitconvert VT to VT) into its operand.
2353 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2354 bit IsCommutable = 0, bit Is2Addr = 1> {
2355 let isCommutable = IsCommutable in
2356 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2357 (ins VR128:$src1, VR128:$src2),
2359 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2360 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2361 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2362 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2363 (ins VR128:$src1, i128mem:$src2),
2365 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2366 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2367 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2370 } // ExeDomain = SSEPackedInt
2372 // 128-bit Integer Arithmetic
2374 let Predicates = [HasAVX] in {
2375 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2376 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2377 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2378 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2379 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2380 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2381 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2382 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2383 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2386 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2388 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2390 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2392 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2394 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2396 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2398 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2400 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2402 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2404 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2406 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2408 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2410 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2412 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2414 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2416 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2418 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2420 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2422 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2426 let Constraints = "$src1 = $dst" in {
2427 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2428 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2429 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2430 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2431 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2432 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2433 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2434 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2435 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2438 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2439 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2440 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2441 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2442 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2443 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2444 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2445 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2446 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2447 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2448 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2449 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2450 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2451 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2452 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2453 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2454 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2455 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2456 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2458 } // Constraints = "$src1 = $dst"
2460 //===---------------------------------------------------------------------===//
2461 // SSE2 - Packed Integer Logical Instructions
2462 //===---------------------------------------------------------------------===//
2464 let Predicates = [HasAVX] in {
2465 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2466 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2468 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2469 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2471 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2472 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2475 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2476 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2478 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2479 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2481 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2482 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2485 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2486 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2488 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2489 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2492 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2493 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2494 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2496 let ExeDomain = SSEPackedInt in {
2497 let neverHasSideEffects = 1 in {
2498 // 128-bit logical shifts.
2499 def VPSLLDQri : PDIi8<0x73, MRM7r,
2500 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2501 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2503 def VPSRLDQri : PDIi8<0x73, MRM3r,
2504 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2505 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2507 // PSRADQri doesn't exist in SSE[1-3].
2509 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2510 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2511 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2512 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2513 VR128:$src2)))]>, VEX_4V;
2515 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2516 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2517 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2518 [(set VR128:$dst, (v2i64 (and (vnot VR128:$src1),
2519 (memopv2i64 addr:$src2))))]>,
2524 let Constraints = "$src1 = $dst" in {
2525 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2526 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2527 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2528 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2529 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2530 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2532 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2533 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2534 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2535 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2536 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2537 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2539 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2540 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2541 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2542 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2544 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2545 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2546 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2548 let ExeDomain = SSEPackedInt in {
2549 let neverHasSideEffects = 1 in {
2550 // 128-bit logical shifts.
2551 def PSLLDQri : PDIi8<0x73, MRM7r,
2552 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2553 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
2554 def PSRLDQri : PDIi8<0x73, MRM3r,
2555 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2556 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
2557 // PSRADQri doesn't exist in SSE[1-3].
2559 def PANDNrr : PDI<0xDF, MRMSrcReg,
2560 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2561 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2563 def PANDNrm : PDI<0xDF, MRMSrcMem,
2564 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2565 "pandn\t{$src2, $dst|$dst, $src2}", []>;
2567 } // Constraints = "$src1 = $dst"
2569 let Predicates = [HasAVX] in {
2570 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2571 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2572 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2573 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2574 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2575 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
2576 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2577 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
2578 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2579 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2581 // Shift up / down and insert zero's.
2582 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2583 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2584 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2585 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2588 let Predicates = [HasSSE2] in {
2589 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
2590 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2591 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
2592 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2593 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
2594 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
2595 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
2596 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
2597 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
2598 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
2600 // Shift up / down and insert zero's.
2601 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
2602 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2603 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
2604 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
2607 //===---------------------------------------------------------------------===//
2608 // SSE2 - Packed Integer Comparison Instructions
2609 //===---------------------------------------------------------------------===//
2611 let Predicates = [HasAVX] in {
2612 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
2614 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
2616 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
2618 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
2620 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
2622 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
2626 let Constraints = "$src1 = $dst" in {
2627 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
2628 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
2629 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
2630 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
2631 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
2632 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
2633 } // Constraints = "$src1 = $dst"
2635 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
2636 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
2637 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
2638 (PCMPEQBrm VR128:$src1, addr:$src2)>;
2639 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
2640 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
2641 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
2642 (PCMPEQWrm VR128:$src1, addr:$src2)>;
2643 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
2644 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
2645 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
2646 (PCMPEQDrm VR128:$src1, addr:$src2)>;
2648 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
2649 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
2650 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
2651 (PCMPGTBrm VR128:$src1, addr:$src2)>;
2652 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
2653 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
2654 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
2655 (PCMPGTWrm VR128:$src1, addr:$src2)>;
2656 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
2657 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
2658 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
2659 (PCMPGTDrm VR128:$src1, addr:$src2)>;
2661 //===---------------------------------------------------------------------===//
2662 // SSE2 - Packed Integer Pack Instructions
2663 //===---------------------------------------------------------------------===//
2665 let Predicates = [HasAVX] in {
2666 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
2668 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
2670 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
2674 let Constraints = "$src1 = $dst" in {
2675 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
2676 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
2677 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
2678 } // Constraints = "$src1 = $dst"
2680 //===---------------------------------------------------------------------===//
2681 // SSE2 - Packed Integer Shuffle Instructions
2682 //===---------------------------------------------------------------------===//
2684 let ExeDomain = SSEPackedInt in {
2685 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
2687 def ri : Ii8<0x70, MRMSrcReg,
2688 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
2689 !strconcat(OpcodeStr,
2690 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2691 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
2693 def mi : Ii8<0x70, MRMSrcMem,
2694 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
2695 !strconcat(OpcodeStr,
2696 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
2697 [(set VR128:$dst, (vt (pshuf_frag:$src2
2698 (bc_frag (memopv2i64 addr:$src1)),
2701 } // ExeDomain = SSEPackedInt
2703 let Predicates = [HasAVX] in {
2704 let AddedComplexity = 5 in
2705 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, OpSize,
2708 // SSE2 with ImmT == Imm8 and XS prefix.
2709 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
2712 // SSE2 with ImmT == Imm8 and XD prefix.
2713 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
2717 let Predicates = [HasSSE2] in {
2718 let AddedComplexity = 5 in
2719 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
2721 // SSE2 with ImmT == Imm8 and XS prefix.
2722 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
2724 // SSE2 with ImmT == Imm8 and XD prefix.
2725 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
2728 //===---------------------------------------------------------------------===//
2729 // SSE2 - Packed Integer Unpack Instructions
2730 //===---------------------------------------------------------------------===//
2732 let ExeDomain = SSEPackedInt in {
2733 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
2734 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
2735 def rr : PDI<opc, MRMSrcReg,
2736 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2738 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2739 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2740 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))]>;
2741 def rm : PDI<opc, MRMSrcMem,
2742 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2744 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
2745 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2746 [(set VR128:$dst, (OpNode VR128:$src1,
2747 (bc_frag (memopv2i64
2751 let Predicates = [HasAVX] in {
2752 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Punpcklbw,
2753 bc_v16i8, 0>, VEX_4V;
2754 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Punpcklwd,
2755 bc_v8i16, 0>, VEX_4V;
2756 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Punpckldq,
2757 bc_v4i32, 0>, VEX_4V;
2759 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2760 /// knew to collapse (bitconvert VT to VT) into its operand.
2761 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2762 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2763 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2764 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
2765 VR128:$src2)))]>, VEX_4V;
2766 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2767 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2768 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2769 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
2770 (memopv2i64 addr:$src2))))]>, VEX_4V;
2772 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Punpckhbw,
2773 bc_v16i8, 0>, VEX_4V;
2774 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Punpckhwd,
2775 bc_v8i16, 0>, VEX_4V;
2776 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Punpckhdq,
2777 bc_v4i32, 0>, VEX_4V;
2779 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2780 /// knew to collapse (bitconvert VT to VT) into its operand.
2781 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2782 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2783 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2784 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
2785 VR128:$src2)))]>, VEX_4V;
2786 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2787 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2788 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2789 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
2790 (memopv2i64 addr:$src2))))]>, VEX_4V;
2793 let Constraints = "$src1 = $dst" in {
2794 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Punpcklbw, bc_v16i8>;
2795 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Punpcklwd, bc_v8i16>;
2796 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Punpckldq, bc_v4i32>;
2798 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2799 /// knew to collapse (bitconvert VT to VT) into its operand.
2800 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
2801 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2802 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2804 (v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)))]>;
2805 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
2806 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2807 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
2809 (v2i64 (X86Punpcklqdq VR128:$src1,
2810 (memopv2i64 addr:$src2))))]>;
2812 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Punpckhbw, bc_v16i8>;
2813 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Punpckhwd, bc_v8i16>;
2814 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Punpckhdq, bc_v4i32>;
2816 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
2817 /// knew to collapse (bitconvert VT to VT) into its operand.
2818 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
2819 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2820 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2822 (v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)))]>;
2823 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
2824 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2825 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
2827 (v2i64 (X86Punpckhqdq VR128:$src1,
2828 (memopv2i64 addr:$src2))))]>;
2831 } // ExeDomain = SSEPackedInt
2833 //===---------------------------------------------------------------------===//
2834 // SSE2 - Packed Integer Extract and Insert
2835 //===---------------------------------------------------------------------===//
2837 let ExeDomain = SSEPackedInt in {
2838 multiclass sse2_pinsrw<bit Is2Addr = 1> {
2839 def rri : Ii8<0xC4, MRMSrcReg,
2840 (outs VR128:$dst), (ins VR128:$src1,
2841 GR32:$src2, i32i8imm:$src3),
2843 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2844 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2846 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
2847 def rmi : Ii8<0xC4, MRMSrcMem,
2848 (outs VR128:$dst), (ins VR128:$src1,
2849 i16mem:$src2, i32i8imm:$src3),
2851 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2852 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
2854 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
2859 let Predicates = [HasAVX] in
2860 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
2861 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2862 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2863 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2864 imm:$src2))]>, OpSize, VEX;
2865 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
2866 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
2867 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2868 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
2872 let Predicates = [HasAVX] in {
2873 defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V;
2874 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
2875 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
2876 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2877 []>, OpSize, VEX_4V;
2880 let Constraints = "$src1 = $dst" in
2881 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
2883 } // ExeDomain = SSEPackedInt
2885 //===---------------------------------------------------------------------===//
2886 // SSE2 - Packed Mask Creation
2887 //===---------------------------------------------------------------------===//
2889 let ExeDomain = SSEPackedInt in {
2891 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2892 "pmovmskb\t{$src, $dst|$dst, $src}",
2893 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
2894 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2895 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
2896 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
2897 "pmovmskb\t{$src, $dst|$dst, $src}",
2898 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
2900 } // ExeDomain = SSEPackedInt
2902 //===---------------------------------------------------------------------===//
2903 // SSE2 - Conditional Store
2904 //===---------------------------------------------------------------------===//
2906 let ExeDomain = SSEPackedInt in {
2909 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
2910 (ins VR128:$src, VR128:$mask),
2911 "maskmovdqu\t{$mask, $src|$src, $mask}",
2912 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
2914 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
2915 (ins VR128:$src, VR128:$mask),
2916 "maskmovdqu\t{$mask, $src|$src, $mask}",
2917 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
2920 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2921 "maskmovdqu\t{$mask, $src|$src, $mask}",
2922 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
2924 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
2925 "maskmovdqu\t{$mask, $src|$src, $mask}",
2926 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
2928 } // ExeDomain = SSEPackedInt
2930 //===---------------------------------------------------------------------===//
2931 // SSE2 - Move Doubleword
2932 //===---------------------------------------------------------------------===//
2934 //===---------------------------------------------------------------------===//
2935 // Move Int Doubleword to Packed Double Int
2937 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2938 "movd\t{$src, $dst|$dst, $src}",
2940 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
2941 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2942 "movd\t{$src, $dst|$dst, $src}",
2944 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
2946 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2947 "mov{d|q}\t{$src, $dst|$dst, $src}",
2949 (v2i64 (scalar_to_vector GR64:$src)))]>, VEX;
2950 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2951 "mov{d|q}\t{$src, $dst|$dst, $src}",
2952 [(set FR64:$dst, (bitconvert GR64:$src))]>, VEX;
2954 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
2955 "movd\t{$src, $dst|$dst, $src}",
2957 (v4i32 (scalar_to_vector GR32:$src)))]>;
2958 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
2959 "movd\t{$src, $dst|$dst, $src}",
2961 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
2962 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
2963 "mov{d|q}\t{$src, $dst|$dst, $src}",
2965 (v2i64 (scalar_to_vector GR64:$src)))]>;
2966 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
2967 "mov{d|q}\t{$src, $dst|$dst, $src}",
2968 [(set FR64:$dst, (bitconvert GR64:$src))]>;
2970 //===---------------------------------------------------------------------===//
2971 // Move Int Doubleword to Single Scalar
2973 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2974 "movd\t{$src, $dst|$dst, $src}",
2975 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
2977 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2978 "movd\t{$src, $dst|$dst, $src}",
2979 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
2981 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
2982 "movd\t{$src, $dst|$dst, $src}",
2983 [(set FR32:$dst, (bitconvert GR32:$src))]>;
2985 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
2986 "movd\t{$src, $dst|$dst, $src}",
2987 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
2989 //===---------------------------------------------------------------------===//
2990 // Move Packed Doubleword Int to Packed Double Int
2992 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
2993 "movd\t{$src, $dst|$dst, $src}",
2994 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
2996 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
2997 (ins i32mem:$dst, VR128:$src),
2998 "movd\t{$src, $dst|$dst, $src}",
2999 [(store (i32 (vector_extract (v4i32 VR128:$src),
3000 (iPTR 0))), addr:$dst)]>, VEX;
3001 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3002 "movd\t{$src, $dst|$dst, $src}",
3003 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3005 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
3006 "movd\t{$src, $dst|$dst, $src}",
3007 [(store (i32 (vector_extract (v4i32 VR128:$src),
3008 (iPTR 0))), addr:$dst)]>;
3010 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3011 "mov{d|q}\t{$src, $dst|$dst, $src}",
3012 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
3014 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
3015 "movq\t{$src, $dst|$dst, $src}",
3016 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
3018 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
3019 "mov{d|q}\t{$src, $dst|$dst, $src}",
3020 [(set GR64:$dst, (bitconvert FR64:$src))]>;
3021 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
3022 "movq\t{$src, $dst|$dst, $src}",
3023 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
3025 //===---------------------------------------------------------------------===//
3026 // Move Scalar Single to Double Int
3028 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3029 "movd\t{$src, $dst|$dst, $src}",
3030 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
3031 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3032 "movd\t{$src, $dst|$dst, $src}",
3033 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3034 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3035 "movd\t{$src, $dst|$dst, $src}",
3036 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3037 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3038 "movd\t{$src, $dst|$dst, $src}",
3039 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3041 //===---------------------------------------------------------------------===//
3042 // Patterns and instructions to describe movd/movq to XMM register zero-extends
3044 let AddedComplexity = 15 in {
3045 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3046 "movd\t{$src, $dst|$dst, $src}",
3047 [(set VR128:$dst, (v4i32 (X86vzmovl
3048 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3050 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3051 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3052 [(set VR128:$dst, (v2i64 (X86vzmovl
3053 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3056 let AddedComplexity = 15 in {
3057 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3058 "movd\t{$src, $dst|$dst, $src}",
3059 [(set VR128:$dst, (v4i32 (X86vzmovl
3060 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3061 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3062 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3063 [(set VR128:$dst, (v2i64 (X86vzmovl
3064 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3067 let AddedComplexity = 20 in {
3068 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3069 "movd\t{$src, $dst|$dst, $src}",
3071 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3072 (loadi32 addr:$src))))))]>,
3074 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3075 "movd\t{$src, $dst|$dst, $src}",
3077 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3078 (loadi32 addr:$src))))))]>;
3080 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3081 (MOVZDI2PDIrm addr:$src)>;
3082 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3083 (MOVZDI2PDIrm addr:$src)>;
3084 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3085 (MOVZDI2PDIrm addr:$src)>;
3088 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
3089 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
3090 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
3091 (v4i32 (scalar_to_vector GR32:$src)), (i32 0)))),
3092 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
3093 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
3094 (v2i64 (scalar_to_vector GR64:$src)), (i32 0)))),
3095 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
3097 // These are the correct encodings of the instructions so that we know how to
3098 // read correct assembly, even though we continue to emit the wrong ones for
3099 // compatibility with Darwin's buggy assembler.
3100 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3101 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
3102 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3103 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
3104 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3105 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
3106 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3107 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
3108 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3109 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3110 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3111 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3113 //===---------------------------------------------------------------------===//
3114 // SSE2 - Move Quadword
3115 //===---------------------------------------------------------------------===//
3117 //===---------------------------------------------------------------------===//
3118 // Move Quadword Int to Packed Quadword Int
3120 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3121 "vmovq\t{$src, $dst|$dst, $src}",
3123 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3124 VEX, Requires<[HasAVX]>;
3125 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3126 "movq\t{$src, $dst|$dst, $src}",
3128 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3129 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3131 //===---------------------------------------------------------------------===//
3132 // Move Packed Quadword Int to Quadword Int
3134 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3135 "movq\t{$src, $dst|$dst, $src}",
3136 [(store (i64 (vector_extract (v2i64 VR128:$src),
3137 (iPTR 0))), addr:$dst)]>, VEX;
3138 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3139 "movq\t{$src, $dst|$dst, $src}",
3140 [(store (i64 (vector_extract (v2i64 VR128:$src),
3141 (iPTR 0))), addr:$dst)]>;
3143 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3144 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3146 //===---------------------------------------------------------------------===//
3147 // Store / copy lower 64-bits of a XMM register.
3149 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3150 "movq\t{$src, $dst|$dst, $src}",
3151 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3152 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3153 "movq\t{$src, $dst|$dst, $src}",
3154 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3156 let AddedComplexity = 20 in
3157 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3158 "vmovq\t{$src, $dst|$dst, $src}",
3160 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3161 (loadi64 addr:$src))))))]>,
3162 XS, VEX, Requires<[HasAVX]>;
3164 let AddedComplexity = 20 in {
3165 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3166 "movq\t{$src, $dst|$dst, $src}",
3168 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3169 (loadi64 addr:$src))))))]>,
3170 XS, Requires<[HasSSE2]>;
3172 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3173 (MOVZQI2PQIrm addr:$src)>;
3174 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3175 (MOVZQI2PQIrm addr:$src)>;
3176 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3179 //===---------------------------------------------------------------------===//
3180 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3181 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3183 let AddedComplexity = 15 in
3184 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3185 "vmovq\t{$src, $dst|$dst, $src}",
3186 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3187 XS, VEX, Requires<[HasAVX]>;
3188 let AddedComplexity = 15 in
3189 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3190 "movq\t{$src, $dst|$dst, $src}",
3191 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3192 XS, Requires<[HasSSE2]>;
3194 let AddedComplexity = 20 in
3195 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3196 "vmovq\t{$src, $dst|$dst, $src}",
3197 [(set VR128:$dst, (v2i64 (X86vzmovl
3198 (loadv2i64 addr:$src))))]>,
3199 XS, VEX, Requires<[HasAVX]>;
3200 let AddedComplexity = 20 in {
3201 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3202 "movq\t{$src, $dst|$dst, $src}",
3203 [(set VR128:$dst, (v2i64 (X86vzmovl
3204 (loadv2i64 addr:$src))))]>,
3205 XS, Requires<[HasSSE2]>;
3207 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3208 (MOVZPQILo2PQIrm addr:$src)>;
3211 // Instructions to match in the assembler
3212 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3213 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3214 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3215 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3216 // Recognize "movd" with GR64 destination, but encode as a "movq"
3217 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3218 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3220 // Instructions for the disassembler
3221 // xr = XMM register
3224 let Predicates = [HasAVX] in
3225 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3226 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3227 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3228 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3230 //===---------------------------------------------------------------------===//
3231 // SSE2 - Misc Instructions
3232 //===---------------------------------------------------------------------===//
3235 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3236 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3237 TB, Requires<[HasSSE2]>;
3239 // Load, store, and memory fence
3240 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3241 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3242 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3243 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3244 def : Pat<(X86LFence), (LFENCE)>;
3245 def : Pat<(X86MFence), (MFENCE)>;
3248 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3249 // was introduced with SSE2, it's backward compatible.
3250 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3252 // Alias instructions that map zero vector to pxor / xorp* for sse.
3253 // We set canFoldAsLoad because this can be converted to a constant-pool
3254 // load of an all-ones value if folding it would be beneficial.
3255 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
3256 // JIT implementation, it does not expand the instructions below like
3257 // X86MCInstLower does.
3258 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3259 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3260 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3261 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3262 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3263 isCodeGenOnly = 1, ExeDomain = SSEPackedInt, Predicates = [HasAVX] in
3264 def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3265 [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
3267 //===---------------------------------------------------------------------===//
3268 // SSE3 - Conversion Instructions
3269 //===---------------------------------------------------------------------===//
3271 // Convert Packed Double FP to Packed DW Integers
3272 let Predicates = [HasAVX] in {
3273 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3274 // register, but the same isn't true when using memory operands instead.
3275 // Provide other assembly rr and rm forms to address this explicitly.
3276 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3277 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3278 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3279 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3282 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3283 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3284 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3285 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3288 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3289 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3290 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3291 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3294 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3295 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3296 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3297 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3299 // Convert Packed DW Integers to Packed Double FP
3300 let Predicates = [HasAVX] in {
3301 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3302 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3303 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3304 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3305 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3306 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3307 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3308 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3311 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3312 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3313 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3314 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3316 // AVX 256-bit register conversion intrinsics
3317 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3318 (VCVTDQ2PDYrr VR128:$src)>;
3319 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3320 (VCVTDQ2PDYrm addr:$src)>;
3322 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3323 (VCVTPD2DQYrr VR256:$src)>;
3324 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3325 (VCVTPD2DQYrm addr:$src)>;
3327 //===---------------------------------------------------------------------===//
3328 // SSE3 - Move Instructions
3329 //===---------------------------------------------------------------------===//
3331 //===---------------------------------------------------------------------===//
3332 // Replicate Single FP - MOVSHDUP and MOVSLDUP
3334 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
3335 ValueType vt, RegisterClass RC, PatFrag mem_frag,
3336 X86MemOperand x86memop> {
3337 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3338 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3339 [(set RC:$dst, (vt (OpNode RC:$src)))]>;
3340 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3341 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3342 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>;
3345 let Predicates = [HasAVX] in {
3346 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3347 v4f32, VR128, memopv4f32, f128mem>, VEX;
3348 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3349 v4f32, VR128, memopv4f32, f128mem>, VEX;
3350 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3351 v8f32, VR256, memopv8f32, f256mem>, VEX;
3352 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3353 v8f32, VR256, memopv8f32, f256mem>, VEX;
3355 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
3356 memopv4f32, f128mem>;
3357 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
3358 memopv4f32, f128mem>;
3360 let Predicates = [HasSSE3] in {
3361 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3362 (MOVSHDUPrr VR128:$src)>;
3363 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3364 (MOVSHDUPrm addr:$src)>;
3365 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3366 (MOVSLDUPrr VR128:$src)>;
3367 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3368 (MOVSLDUPrm addr:$src)>;
3371 let Predicates = [HasAVX] in {
3372 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3373 (VMOVSHDUPrr VR128:$src)>;
3374 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3375 (VMOVSHDUPrm addr:$src)>;
3376 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3377 (VMOVSLDUPrr VR128:$src)>;
3378 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3379 (VMOVSLDUPrm addr:$src)>;
3380 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
3381 (VMOVSHDUPYrr VR256:$src)>;
3382 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
3383 (VMOVSHDUPYrm addr:$src)>;
3384 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
3385 (VMOVSLDUPYrr VR256:$src)>;
3386 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
3387 (VMOVSLDUPYrm addr:$src)>;
3390 //===---------------------------------------------------------------------===//
3391 // Replicate Double FP - MOVDDUP
3393 multiclass sse3_replicate_dfp<string OpcodeStr> {
3394 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3395 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3396 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3397 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3398 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3400 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3404 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3405 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3406 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3408 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3409 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3413 let Predicates = [HasAVX] in {
3414 // FIXME: Merge above classes when we have patterns for the ymm version
3415 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3416 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3418 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3420 // Move Unaligned Integer
3421 let Predicates = [HasAVX] in {
3422 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3423 "vlddqu\t{$src, $dst|$dst, $src}",
3424 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
3425 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3426 "vlddqu\t{$src, $dst|$dst, $src}",
3427 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
3429 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3430 "lddqu\t{$src, $dst|$dst, $src}",
3431 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
3433 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3435 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3437 // Several Move patterns
3438 let AddedComplexity = 5 in {
3439 def : Pat<(movddup (memopv2f64 addr:$src), (undef)),
3440 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3441 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3442 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3443 def : Pat<(movddup (memopv2i64 addr:$src), (undef)),
3444 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3445 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3446 (MOVDDUPrm addr:$src)>, Requires<[HasSSE3]>;
3449 //===---------------------------------------------------------------------===//
3450 // SSE3 - Arithmetic
3451 //===---------------------------------------------------------------------===//
3453 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
3454 X86MemOperand x86memop, bit Is2Addr = 1> {
3455 def rr : I<0xD0, MRMSrcReg,
3456 (outs RC:$dst), (ins RC:$src1, RC:$src2),
3458 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3459 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3460 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
3461 def rm : I<0xD0, MRMSrcMem,
3462 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3464 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3465 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3466 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
3469 let Predicates = [HasAVX],
3470 ExeDomain = SSEPackedDouble in {
3471 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
3472 f128mem, 0>, TB, XD, VEX_4V;
3473 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
3474 f128mem, 0>, TB, OpSize, VEX_4V;
3475 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
3476 f256mem, 0>, TB, XD, VEX_4V;
3477 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
3478 f256mem, 0>, TB, OpSize, VEX_4V;
3480 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
3481 ExeDomain = SSEPackedDouble in {
3482 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
3484 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
3485 f128mem>, TB, OpSize;
3488 //===---------------------------------------------------------------------===//
3489 // SSE3 Instructions
3490 //===---------------------------------------------------------------------===//
3493 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3494 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3495 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3497 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3498 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3499 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3501 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3503 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3504 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3505 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3507 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
3508 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
3509 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
3511 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3512 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3513 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
3515 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
3517 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3518 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3519 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
3522 let Predicates = [HasAVX] in {
3523 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
3524 int_x86_sse3_hadd_ps, 0>, VEX_4V;
3525 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
3526 int_x86_sse3_hadd_pd, 0>, VEX_4V;
3527 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
3528 int_x86_sse3_hsub_ps, 0>, VEX_4V;
3529 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
3530 int_x86_sse3_hsub_pd, 0>, VEX_4V;
3531 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
3532 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
3533 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
3534 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
3535 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
3536 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
3537 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
3538 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
3541 let Constraints = "$src1 = $dst" in {
3542 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
3543 int_x86_sse3_hadd_ps>;
3544 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
3545 int_x86_sse3_hadd_pd>;
3546 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
3547 int_x86_sse3_hsub_ps>;
3548 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
3549 int_x86_sse3_hsub_pd>;
3552 //===---------------------------------------------------------------------===//
3553 // SSSE3 - Packed Absolute Instructions
3554 //===---------------------------------------------------------------------===//
3557 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
3558 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
3559 PatFrag mem_frag128, Intrinsic IntId128> {
3560 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3562 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3563 [(set VR128:$dst, (IntId128 VR128:$src))]>,
3566 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3568 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3571 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
3574 let Predicates = [HasAVX] in {
3575 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
3576 int_x86_ssse3_pabs_b_128>, VEX;
3577 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
3578 int_x86_ssse3_pabs_w_128>, VEX;
3579 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
3580 int_x86_ssse3_pabs_d_128>, VEX;
3583 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
3584 int_x86_ssse3_pabs_b_128>;
3585 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
3586 int_x86_ssse3_pabs_w_128>;
3587 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
3588 int_x86_ssse3_pabs_d_128>;
3590 //===---------------------------------------------------------------------===//
3591 // SSSE3 - Packed Binary Operator Instructions
3592 //===---------------------------------------------------------------------===//
3594 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
3595 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
3596 PatFrag mem_frag128, Intrinsic IntId128,
3598 let isCommutable = 1 in
3599 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
3600 (ins VR128:$src1, VR128:$src2),
3602 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3603 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3604 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
3606 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
3607 (ins VR128:$src1, i128mem:$src2),
3609 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3610 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3612 (IntId128 VR128:$src1,
3613 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
3616 let Predicates = [HasAVX] in {
3617 let isCommutable = 0 in {
3618 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
3619 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
3620 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
3621 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
3622 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
3623 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
3624 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
3625 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
3626 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
3627 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
3628 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
3629 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
3630 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
3631 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
3632 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
3633 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
3634 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
3635 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
3636 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
3637 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
3638 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
3639 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
3641 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
3642 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
3645 // None of these have i8 immediate fields.
3646 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
3647 let isCommutable = 0 in {
3648 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
3649 int_x86_ssse3_phadd_w_128>;
3650 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
3651 int_x86_ssse3_phadd_d_128>;
3652 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
3653 int_x86_ssse3_phadd_sw_128>;
3654 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
3655 int_x86_ssse3_phsub_w_128>;
3656 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
3657 int_x86_ssse3_phsub_d_128>;
3658 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
3659 int_x86_ssse3_phsub_sw_128>;
3660 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
3661 int_x86_ssse3_pmadd_ub_sw_128>;
3662 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
3663 int_x86_ssse3_pshuf_b_128>;
3664 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
3665 int_x86_ssse3_psign_b_128>;
3666 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
3667 int_x86_ssse3_psign_w_128>;
3668 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
3669 int_x86_ssse3_psign_d_128>;
3671 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
3672 int_x86_ssse3_pmul_hr_sw_128>;
3675 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
3676 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
3677 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
3678 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
3680 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
3681 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3682 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
3683 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3684 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
3685 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
3687 //===---------------------------------------------------------------------===//
3688 // SSSE3 - Packed Align Instruction Patterns
3689 //===---------------------------------------------------------------------===//
3691 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
3692 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
3693 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
3695 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3697 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3699 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
3700 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
3702 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
3704 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
3708 let Predicates = [HasAVX] in
3709 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
3710 let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
3711 defm PALIGN : ssse3_palign<"palignr">;
3713 let Predicates = [HasSSSE3] in {
3714 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3715 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3716 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3717 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3718 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3719 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3720 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3721 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3724 let Predicates = [HasAVX] in {
3725 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3726 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3727 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3728 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3729 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3730 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3731 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
3732 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
3735 //===---------------------------------------------------------------------===//
3736 // SSSE3 Misc Instructions
3737 //===---------------------------------------------------------------------===//
3739 // Thread synchronization
3740 let usesCustomInserter = 1 in {
3741 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
3742 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
3743 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
3744 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
3747 let Uses = [EAX, ECX, EDX] in
3748 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
3749 Requires<[HasSSE3]>;
3750 let Uses = [ECX, EAX] in
3751 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
3752 Requires<[HasSSE3]>;
3754 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
3755 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
3757 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
3758 Requires<[In32BitMode]>;
3759 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
3760 Requires<[In64BitMode]>;
3762 //===---------------------------------------------------------------------===//
3763 // Non-Instruction Patterns
3764 //===---------------------------------------------------------------------===//
3766 // extload f32 -> f64. This matches load+fextend because we have a hack in
3767 // the isel (PreprocessForFPConvert) that can introduce loads after dag
3769 // Since these loads aren't folded into the fextend, we have to match it
3771 let Predicates = [HasSSE2] in
3772 def : Pat<(fextend (loadf32 addr:$src)),
3773 (CVTSS2SDrm addr:$src)>;
3775 // Bitcasts between 128-bit vector types. Return the original type since
3776 // no instruction is needed for the conversion
3777 let Predicates = [HasXMMInt] in {
3778 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
3779 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
3780 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
3781 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
3782 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
3783 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
3784 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
3785 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
3786 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
3787 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
3788 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
3789 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
3790 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
3791 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
3792 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
3793 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
3794 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
3795 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
3796 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
3797 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
3798 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
3799 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
3800 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
3801 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
3802 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
3803 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
3804 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
3805 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
3806 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
3807 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
3810 // Bitcasts between 256-bit vector types. Return the original type since
3811 // no instruction is needed for the conversion
3812 let Predicates = [HasAVX] in {
3813 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
3814 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
3815 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
3816 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
3817 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
3818 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
3819 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
3820 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
3821 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
3822 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
3823 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
3824 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
3825 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
3826 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
3827 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
3828 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
3829 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
3830 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
3831 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
3832 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
3833 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
3834 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
3835 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
3836 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
3837 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
3838 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
3839 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
3840 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
3841 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
3842 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
3845 // Move scalar to XMM zero-extended
3846 // movd to XMM register zero-extends
3847 let AddedComplexity = 15 in {
3848 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
3849 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
3850 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
3851 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
3852 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
3853 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
3854 (MOVSSrr (v4f32 (V_SET0PS)),
3855 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
3856 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
3857 (MOVSSrr (v4i32 (V_SET0PI)),
3858 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
3861 // Splat v2f64 / v2i64
3862 let AddedComplexity = 10 in {
3863 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
3864 (UNPCKLPDrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3865 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
3866 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
3869 // Special unary SHUFPSrri case.
3870 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
3871 (SHUFPSrri VR128:$src1, VR128:$src1,
3872 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3873 let AddedComplexity = 5 in
3874 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3875 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3876 Requires<[HasSSE2]>;
3877 // Special unary SHUFPDrri case.
3878 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
3879 (SHUFPDrri VR128:$src1, VR128:$src1,
3880 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3881 Requires<[HasSSE2]>;
3882 // Special unary SHUFPDrri case.
3883 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
3884 (SHUFPDrri VR128:$src1, VR128:$src1,
3885 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3886 Requires<[HasSSE2]>;
3887 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3888 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3889 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>,
3890 Requires<[HasSSE2]>;
3892 // Special binary v4i32 shuffle cases with SHUFPS.
3893 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
3894 (SHUFPSrri VR128:$src1, VR128:$src2,
3895 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3896 Requires<[HasSSE2]>;
3897 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (bc_v4i32 (memopv2i64 addr:$src2)))),
3898 (SHUFPSrmi VR128:$src1, addr:$src2,
3899 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3900 Requires<[HasSSE2]>;
3901 // Special binary v2i64 shuffle cases using SHUFPDrri.
3902 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
3903 (SHUFPDrri VR128:$src1, VR128:$src2,
3904 (SHUFFLE_get_shuf_imm VR128:$src3))>,
3905 Requires<[HasSSE2]>;
3907 let AddedComplexity = 20 in {
3908 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
3909 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
3910 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
3912 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
3913 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
3914 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
3916 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
3917 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
3918 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3919 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
3920 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
3923 let AddedComplexity = 20 in {
3924 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
3925 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
3926 (MOVLPSrm VR128:$src1, addr:$src2)>;
3927 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
3928 (MOVLPDrm VR128:$src1, addr:$src2)>;
3929 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
3930 (MOVLPSrm VR128:$src1, addr:$src2)>;
3931 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
3932 (MOVLPDrm VR128:$src1, addr:$src2)>;
3935 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
3936 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3937 (MOVLPSmr addr:$src1, VR128:$src2)>;
3938 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3939 (MOVLPDmr addr:$src1, VR128:$src2)>;
3940 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
3942 (MOVLPSmr addr:$src1, VR128:$src2)>;
3943 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
3944 (MOVLPDmr addr:$src1, VR128:$src2)>;
3946 let AddedComplexity = 15 in {
3947 // Setting the lowest element in the vector.
3948 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
3949 (MOVSSrr (v4i32 VR128:$src1),
3950 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
3951 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
3952 (MOVSDrr (v2i64 VR128:$src1),
3953 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
3955 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
3956 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
3957 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3958 Requires<[HasSSE2]>;
3959 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
3960 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
3961 Requires<[HasSSE2]>;
3964 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
3965 // fall back to this for SSE1)
3966 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
3967 (SHUFPSrri VR128:$src2, VR128:$src1,
3968 (SHUFFLE_get_shuf_imm VR128:$src3))>;
3970 // Set lowest element and zero upper elements.
3971 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
3972 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
3974 // Use movaps / movups for SSE integer load / store (one byte shorter).
3975 // The instructions selected below are then converted to MOVDQA/MOVDQU
3976 // during the SSE domain pass.
3977 let Predicates = [HasSSE1] in {
3978 def : Pat<(alignedloadv4i32 addr:$src),
3979 (MOVAPSrm addr:$src)>;
3980 def : Pat<(loadv4i32 addr:$src),
3981 (MOVUPSrm addr:$src)>;
3982 def : Pat<(alignedloadv2i64 addr:$src),
3983 (MOVAPSrm addr:$src)>;
3984 def : Pat<(loadv2i64 addr:$src),
3985 (MOVUPSrm addr:$src)>;
3987 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
3988 (MOVAPSmr addr:$dst, VR128:$src)>;
3989 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
3990 (MOVAPSmr addr:$dst, VR128:$src)>;
3991 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
3992 (MOVAPSmr addr:$dst, VR128:$src)>;
3993 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
3994 (MOVAPSmr addr:$dst, VR128:$src)>;
3995 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
3996 (MOVUPSmr addr:$dst, VR128:$src)>;
3997 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
3998 (MOVUPSmr addr:$dst, VR128:$src)>;
3999 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4000 (MOVUPSmr addr:$dst, VR128:$src)>;
4001 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4002 (MOVUPSmr addr:$dst, VR128:$src)>;
4005 // Use vmovaps/vmovups for AVX integer load/store.
4006 let Predicates = [HasAVX] in {
4007 // 128-bit load/store
4008 def : Pat<(alignedloadv4i32 addr:$src),
4009 (VMOVAPSrm addr:$src)>;
4010 def : Pat<(loadv4i32 addr:$src),
4011 (VMOVUPSrm addr:$src)>;
4012 def : Pat<(alignedloadv2i64 addr:$src),
4013 (VMOVAPSrm addr:$src)>;
4014 def : Pat<(loadv2i64 addr:$src),
4015 (VMOVUPSrm addr:$src)>;
4017 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4018 (VMOVAPSmr addr:$dst, VR128:$src)>;
4019 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4020 (VMOVAPSmr addr:$dst, VR128:$src)>;
4021 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4022 (VMOVAPSmr addr:$dst, VR128:$src)>;
4023 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4024 (VMOVAPSmr addr:$dst, VR128:$src)>;
4025 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4026 (VMOVUPSmr addr:$dst, VR128:$src)>;
4027 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4028 (VMOVUPSmr addr:$dst, VR128:$src)>;
4029 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4030 (VMOVUPSmr addr:$dst, VR128:$src)>;
4031 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4032 (VMOVUPSmr addr:$dst, VR128:$src)>;
4034 // 256-bit load/store
4035 def : Pat<(alignedloadv4i64 addr:$src),
4036 (VMOVAPSYrm addr:$src)>;
4037 def : Pat<(loadv4i64 addr:$src),
4038 (VMOVUPSYrm addr:$src)>;
4039 def : Pat<(alignedloadv8i32 addr:$src),
4040 (VMOVAPSYrm addr:$src)>;
4041 def : Pat<(loadv8i32 addr:$src),
4042 (VMOVUPSYrm addr:$src)>;
4043 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
4044 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4045 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
4046 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4047 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
4048 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4049 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
4050 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4053 //===----------------------------------------------------------------------===//
4054 // SSE4.1 - Packed Move with Sign/Zero Extend
4055 //===----------------------------------------------------------------------===//
4057 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4058 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4059 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4060 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4062 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4063 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4065 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
4069 let Predicates = [HasAVX] in {
4070 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
4072 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
4074 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4076 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4078 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4080 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4084 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4085 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4086 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4087 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4088 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4089 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4091 // Common patterns involving scalar load.
4092 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4093 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4094 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4095 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4097 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4098 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4099 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4100 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4102 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4103 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4104 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4105 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4107 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4108 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4109 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4110 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4112 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4113 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4114 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4115 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4117 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4118 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4119 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4120 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4123 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4124 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4125 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4126 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4128 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4129 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4131 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4135 let Predicates = [HasAVX] in {
4136 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4138 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4140 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4142 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4146 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4147 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4148 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4149 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4151 // Common patterns involving scalar load
4152 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4153 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4154 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4155 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4157 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4158 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4159 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4160 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4163 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4164 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4165 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4166 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4168 // Expecting a i16 load any extended to i32 value.
4169 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4170 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4171 [(set VR128:$dst, (IntId (bitconvert
4172 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4176 let Predicates = [HasAVX] in {
4177 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4179 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4182 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4183 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4185 // Common patterns involving scalar load
4186 def : Pat<(int_x86_sse41_pmovsxbq
4187 (bitconvert (v4i32 (X86vzmovl
4188 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4189 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4191 def : Pat<(int_x86_sse41_pmovzxbq
4192 (bitconvert (v4i32 (X86vzmovl
4193 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4194 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4196 //===----------------------------------------------------------------------===//
4197 // SSE4.1 - Extract Instructions
4198 //===----------------------------------------------------------------------===//
4200 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4201 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4202 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4203 (ins VR128:$src1, i32i8imm:$src2),
4204 !strconcat(OpcodeStr,
4205 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4206 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4208 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4209 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4210 !strconcat(OpcodeStr,
4211 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4214 // There's an AssertZext in the way of writing the store pattern
4215 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4218 let Predicates = [HasAVX] in {
4219 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4220 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4221 (ins VR128:$src1, i32i8imm:$src2),
4222 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4225 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4228 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4229 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4230 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4231 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4232 !strconcat(OpcodeStr,
4233 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4236 // There's an AssertZext in the way of writing the store pattern
4237 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4240 let Predicates = [HasAVX] in
4241 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4243 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4246 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4247 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4248 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4249 (ins VR128:$src1, i32i8imm:$src2),
4250 !strconcat(OpcodeStr,
4251 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4253 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4254 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4255 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4256 !strconcat(OpcodeStr,
4257 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4258 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4259 addr:$dst)]>, OpSize;
4262 let Predicates = [HasAVX] in
4263 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4265 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4267 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4268 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4269 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4270 (ins VR128:$src1, i32i8imm:$src2),
4271 !strconcat(OpcodeStr,
4272 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4274 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4275 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4276 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4277 !strconcat(OpcodeStr,
4278 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4279 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4280 addr:$dst)]>, OpSize, REX_W;
4283 let Predicates = [HasAVX] in
4284 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4286 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4288 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4290 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4291 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4292 (ins VR128:$src1, i32i8imm:$src2),
4293 !strconcat(OpcodeStr,
4294 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4296 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4298 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4299 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4300 !strconcat(OpcodeStr,
4301 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4302 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4303 addr:$dst)]>, OpSize;
4306 let Predicates = [HasAVX] in {
4307 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4308 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4309 (ins VR128:$src1, i32i8imm:$src2),
4310 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4313 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4315 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4316 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4319 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4320 Requires<[HasSSE41]>;
4322 //===----------------------------------------------------------------------===//
4323 // SSE4.1 - Insert Instructions
4324 //===----------------------------------------------------------------------===//
4326 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4327 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4328 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4330 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4332 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4334 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4335 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4336 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4338 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4340 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4342 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4343 imm:$src3))]>, OpSize;
4346 let Predicates = [HasAVX] in
4347 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4348 let Constraints = "$src1 = $dst" in
4349 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4351 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4352 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4353 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4355 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4357 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4359 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4361 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4362 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4364 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4366 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4368 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4369 imm:$src3)))]>, OpSize;
4372 let Predicates = [HasAVX] in
4373 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4374 let Constraints = "$src1 = $dst" in
4375 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4377 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4378 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4379 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4381 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4383 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4385 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4387 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4388 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4390 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4392 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4394 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4395 imm:$src3)))]>, OpSize;
4398 let Predicates = [HasAVX] in
4399 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4400 let Constraints = "$src1 = $dst" in
4401 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4403 // insertps has a few different modes, there's the first two here below which
4404 // are optimized inserts that won't zero arbitrary elements in the destination
4405 // vector. The next one matches the intrinsic and could zero arbitrary elements
4406 // in the target vector.
4407 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4408 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4409 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
4411 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4413 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4415 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4417 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4418 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
4420 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4422 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4424 (X86insrtps VR128:$src1,
4425 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4426 imm:$src3))]>, OpSize;
4429 let Constraints = "$src1 = $dst" in
4430 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4431 let Predicates = [HasAVX] in
4432 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4434 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4435 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4437 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4438 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4439 Requires<[HasSSE41]>;
4441 //===----------------------------------------------------------------------===//
4442 // SSE4.1 - Round Instructions
4443 //===----------------------------------------------------------------------===//
4445 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4446 X86MemOperand x86memop, RegisterClass RC,
4447 PatFrag mem_frag32, PatFrag mem_frag64,
4448 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4449 // Intrinsic operation, reg.
4450 // Vector intrinsic operation, reg
4451 def PSr : SS4AIi8<opcps, MRMSrcReg,
4452 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4453 !strconcat(OpcodeStr,
4454 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4455 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4458 // Vector intrinsic operation, mem
4459 def PSm : Ii8<opcps, MRMSrcMem,
4460 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4461 !strconcat(OpcodeStr,
4462 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4464 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4466 Requires<[HasSSE41]>;
4468 // Vector intrinsic operation, reg
4469 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4470 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4471 !strconcat(OpcodeStr,
4472 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4473 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4476 // Vector intrinsic operation, mem
4477 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4478 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4479 !strconcat(OpcodeStr,
4480 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4482 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4486 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4487 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4488 // Intrinsic operation, reg.
4489 // Vector intrinsic operation, reg
4490 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4491 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4492 !strconcat(OpcodeStr,
4493 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4496 // Vector intrinsic operation, mem
4497 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4498 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4499 !strconcat(OpcodeStr,
4500 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4501 []>, TA, OpSize, Requires<[HasSSE41]>;
4503 // Vector intrinsic operation, reg
4504 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4505 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4506 !strconcat(OpcodeStr,
4507 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4510 // Vector intrinsic operation, mem
4511 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4512 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4513 !strconcat(OpcodeStr,
4514 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4518 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4521 Intrinsic F64Int, bit Is2Addr = 1> {
4522 // Intrinsic operation, reg.
4523 def SSr : SS4AIi8<opcss, MRMSrcReg,
4524 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4526 !strconcat(OpcodeStr,
4527 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4528 !strconcat(OpcodeStr,
4529 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4530 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4533 // Intrinsic operation, mem.
4534 def SSm : SS4AIi8<opcss, MRMSrcMem,
4535 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4537 !strconcat(OpcodeStr,
4538 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4539 !strconcat(OpcodeStr,
4540 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4542 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
4545 // Intrinsic operation, reg.
4546 def SDr : SS4AIi8<opcsd, MRMSrcReg,
4547 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4549 !strconcat(OpcodeStr,
4550 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4551 !strconcat(OpcodeStr,
4552 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4553 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4556 // Intrinsic operation, mem.
4557 def SDm : SS4AIi8<opcsd, MRMSrcMem,
4558 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4560 !strconcat(OpcodeStr,
4561 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4562 !strconcat(OpcodeStr,
4563 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4565 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
4569 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
4571 // Intrinsic operation, reg.
4572 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
4573 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4574 !strconcat(OpcodeStr,
4575 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4578 // Intrinsic operation, mem.
4579 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
4580 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4581 !strconcat(OpcodeStr,
4582 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4585 // Intrinsic operation, reg.
4586 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
4587 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4588 !strconcat(OpcodeStr,
4589 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4592 // Intrinsic operation, mem.
4593 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
4594 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
4595 !strconcat(OpcodeStr,
4596 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4600 // FP round - roundss, roundps, roundsd, roundpd
4601 let Predicates = [HasAVX] in {
4603 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
4604 memopv4f32, memopv2f64,
4605 int_x86_sse41_round_ps,
4606 int_x86_sse41_round_pd>, VEX;
4607 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
4608 memopv8f32, memopv4f64,
4609 int_x86_avx_round_ps_256,
4610 int_x86_avx_round_pd_256>, VEX;
4611 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
4612 int_x86_sse41_round_ss,
4613 int_x86_sse41_round_sd, 0>, VEX_4V;
4615 // Instructions for the assembler
4616 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
4618 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
4620 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
4623 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
4624 memopv4f32, memopv2f64,
4625 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
4626 let Constraints = "$src1 = $dst" in
4627 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
4628 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
4630 //===----------------------------------------------------------------------===//
4631 // SSE4.1 - Packed Bit Test
4632 //===----------------------------------------------------------------------===//
4634 // ptest instruction we'll lower to this in X86ISelLowering primarily from
4635 // the intel intrinsic that corresponds to this.
4636 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4637 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4638 "vptest\t{$src2, $src1|$src1, $src2}",
4639 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4641 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4642 "vptest\t{$src2, $src1|$src1, $src2}",
4643 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4646 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
4647 "vptest\t{$src2, $src1|$src1, $src2}",
4648 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
4650 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
4651 "vptest\t{$src2, $src1|$src1, $src2}",
4652 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
4656 let Defs = [EFLAGS] in {
4657 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
4658 "ptest \t{$src2, $src1|$src1, $src2}",
4659 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
4661 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
4662 "ptest \t{$src2, $src1|$src1, $src2}",
4663 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
4667 // The bit test instructions below are AVX only
4668 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
4669 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
4670 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
4671 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4672 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
4673 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
4674 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
4675 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
4679 let Defs = [EFLAGS], Predicates = [HasAVX] in {
4680 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
4681 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
4682 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
4683 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
4686 //===----------------------------------------------------------------------===//
4687 // SSE4.1 - Misc Instructions
4688 //===----------------------------------------------------------------------===//
4690 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
4691 "popcnt{w}\t{$src, $dst|$dst, $src}",
4692 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
4693 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
4694 "popcnt{w}\t{$src, $dst|$dst, $src}",
4695 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
4697 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
4698 "popcnt{l}\t{$src, $dst|$dst, $src}",
4699 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
4700 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
4701 "popcnt{l}\t{$src, $dst|$dst, $src}",
4702 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
4704 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
4705 "popcnt{q}\t{$src, $dst|$dst, $src}",
4706 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
4707 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
4708 "popcnt{q}\t{$src, $dst|$dst, $src}",
4709 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
4713 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
4714 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
4715 Intrinsic IntId128> {
4716 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4718 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4719 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
4720 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4722 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4725 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
4728 let Predicates = [HasAVX] in
4729 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
4730 int_x86_sse41_phminposuw>, VEX;
4731 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
4732 int_x86_sse41_phminposuw>;
4734 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
4735 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
4736 Intrinsic IntId128, bit Is2Addr = 1> {
4737 let isCommutable = 1 in
4738 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4739 (ins VR128:$src1, VR128:$src2),
4741 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4742 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4743 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
4744 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4745 (ins VR128:$src1, i128mem:$src2),
4747 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4748 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4750 (IntId128 VR128:$src1,
4751 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4754 let Predicates = [HasAVX] in {
4755 let isCommutable = 0 in
4756 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
4758 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
4760 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
4762 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
4764 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
4766 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
4768 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
4770 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
4772 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
4774 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
4776 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
4780 let Constraints = "$src1 = $dst" in {
4781 let isCommutable = 0 in
4782 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
4783 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
4784 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
4785 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
4786 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
4787 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
4788 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
4789 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
4790 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
4791 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
4792 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
4795 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
4796 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
4797 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
4798 (PCMPEQQrm VR128:$src1, addr:$src2)>;
4800 /// SS48I_binop_rm - Simple SSE41 binary operator.
4801 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
4802 ValueType OpVT, bit Is2Addr = 1> {
4803 let isCommutable = 1 in
4804 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4805 (ins VR128:$src1, VR128:$src2),
4807 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4808 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4809 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
4811 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4812 (ins VR128:$src1, i128mem:$src2),
4814 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4815 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4816 [(set VR128:$dst, (OpNode VR128:$src1,
4817 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
4821 let Predicates = [HasAVX] in
4822 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
4823 let Constraints = "$src1 = $dst" in
4824 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
4826 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
4827 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
4828 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
4829 X86MemOperand x86memop, bit Is2Addr = 1> {
4830 let isCommutable = 1 in
4831 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
4832 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
4834 !strconcat(OpcodeStr,
4835 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4836 !strconcat(OpcodeStr,
4837 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4838 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
4840 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
4841 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
4843 !strconcat(OpcodeStr,
4844 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4845 !strconcat(OpcodeStr,
4846 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4849 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
4853 let Predicates = [HasAVX] in {
4854 let isCommutable = 0 in {
4855 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
4856 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4857 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
4858 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4859 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
4860 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4861 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
4862 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
4863 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
4864 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4865 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
4866 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4868 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
4869 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4870 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
4871 VR128, memopv16i8, i128mem, 0>, VEX_4V;
4872 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
4873 VR256, memopv32i8, i256mem, 0>, VEX_4V;
4876 let Constraints = "$src1 = $dst" in {
4877 let isCommutable = 0 in {
4878 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
4879 VR128, memopv16i8, i128mem>;
4880 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
4881 VR128, memopv16i8, i128mem>;
4882 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
4883 VR128, memopv16i8, i128mem>;
4884 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
4885 VR128, memopv16i8, i128mem>;
4887 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
4888 VR128, memopv16i8, i128mem>;
4889 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
4890 VR128, memopv16i8, i128mem>;
4893 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
4894 let Predicates = [HasAVX] in {
4895 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
4896 RegisterClass RC, X86MemOperand x86memop,
4897 PatFrag mem_frag, Intrinsic IntId> {
4898 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
4899 (ins RC:$src1, RC:$src2, RC:$src3),
4900 !strconcat(OpcodeStr,
4901 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4902 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
4903 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4905 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
4906 (ins RC:$src1, x86memop:$src2, RC:$src3),
4907 !strconcat(OpcodeStr,
4908 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
4910 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
4912 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
4916 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
4917 memopv16i8, int_x86_sse41_blendvpd>;
4918 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
4919 memopv16i8, int_x86_sse41_blendvps>;
4920 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
4921 memopv16i8, int_x86_sse41_pblendvb>;
4922 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
4923 memopv32i8, int_x86_avx_blendv_pd_256>;
4924 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
4925 memopv32i8, int_x86_avx_blendv_ps_256>;
4927 /// SS41I_ternary_int - SSE 4.1 ternary operator
4928 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
4929 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4930 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
4931 (ins VR128:$src1, VR128:$src2),
4932 !strconcat(OpcodeStr,
4933 "\t{$src2, $dst|$dst, $src2}"),
4934 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
4937 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
4938 (ins VR128:$src1, i128mem:$src2),
4939 !strconcat(OpcodeStr,
4940 "\t{$src2, $dst|$dst, $src2}"),
4943 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
4947 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
4948 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
4949 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
4951 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
4952 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
4954 let Predicates = [HasAVX] in
4955 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4956 "vmovntdqa\t{$src, $dst|$dst, $src}",
4957 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4959 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4960 "movntdqa\t{$src, $dst|$dst, $src}",
4961 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
4964 //===----------------------------------------------------------------------===//
4965 // SSE4.2 - Compare Instructions
4966 //===----------------------------------------------------------------------===//
4968 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
4969 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
4970 Intrinsic IntId128, bit Is2Addr = 1> {
4971 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
4972 (ins VR128:$src1, VR128:$src2),
4974 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4975 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4976 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4978 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
4979 (ins VR128:$src1, i128mem:$src2),
4981 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4982 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4984 (IntId128 VR128:$src1,
4985 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4988 let Predicates = [HasAVX] in
4989 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
4991 let Constraints = "$src1 = $dst" in
4992 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
4994 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
4995 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
4996 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
4997 (PCMPGTQrm VR128:$src1, addr:$src2)>;
4999 //===----------------------------------------------------------------------===//
5000 // SSE4.2 - String/text Processing Instructions
5001 //===----------------------------------------------------------------------===//
5003 // Packed Compare Implicit Length Strings, Return Mask
5004 multiclass pseudo_pcmpistrm<string asm> {
5005 def REG : PseudoI<(outs VR128:$dst),
5006 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5007 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
5009 def MEM : PseudoI<(outs VR128:$dst),
5010 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5011 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
5012 VR128:$src1, (load addr:$src2), imm:$src3))]>;
5015 let Defs = [EFLAGS], usesCustomInserter = 1 in {
5016 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
5017 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
5020 let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
5021 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5022 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5023 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5024 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5025 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5026 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5029 let Defs = [XMM0, EFLAGS] in {
5030 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5031 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5032 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5033 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5034 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5035 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5038 // Packed Compare Explicit Length Strings, Return Mask
5039 multiclass pseudo_pcmpestrm<string asm> {
5040 def REG : PseudoI<(outs VR128:$dst),
5041 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5042 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5043 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
5044 def MEM : PseudoI<(outs VR128:$dst),
5045 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5046 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5047 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
5050 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
5051 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
5052 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
5055 let Predicates = [HasAVX],
5056 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5057 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5058 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5059 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5060 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5061 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5062 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5065 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5066 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5067 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5068 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5069 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5070 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5071 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5074 // Packed Compare Implicit Length Strings, Return Index
5075 let Defs = [ECX, EFLAGS] in {
5076 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
5077 def rr : SS42AI<0x63, MRMSrcReg, (outs),
5078 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5079 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5080 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
5081 (implicit EFLAGS)]>, OpSize;
5082 def rm : SS42AI<0x63, MRMSrcMem, (outs),
5083 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5084 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5085 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
5086 (implicit EFLAGS)]>, OpSize;
5090 let Predicates = [HasAVX] in {
5091 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5093 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5095 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5097 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5099 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5101 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5105 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5106 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5107 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5108 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5109 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5110 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5112 // Packed Compare Explicit Length Strings, Return Index
5113 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5114 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5115 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5116 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5117 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5118 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5119 (implicit EFLAGS)]>, OpSize;
5120 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5121 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5122 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5124 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5125 (implicit EFLAGS)]>, OpSize;
5129 let Predicates = [HasAVX] in {
5130 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5132 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5134 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5136 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5138 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5140 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5144 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5145 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5146 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5147 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5148 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5149 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5151 //===----------------------------------------------------------------------===//
5152 // SSE4.2 - CRC Instructions
5153 //===----------------------------------------------------------------------===//
5155 // No CRC instructions have AVX equivalents
5157 // crc intrinsic instruction
5158 // This set of instructions are only rm, the only difference is the size
5160 let Constraints = "$src1 = $dst" in {
5161 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5162 (ins GR32:$src1, i8mem:$src2),
5163 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5165 (int_x86_sse42_crc32_32_8 GR32:$src1,
5166 (load addr:$src2)))]>;
5167 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5168 (ins GR32:$src1, GR8:$src2),
5169 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5171 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
5172 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5173 (ins GR32:$src1, i16mem:$src2),
5174 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5176 (int_x86_sse42_crc32_32_16 GR32:$src1,
5177 (load addr:$src2)))]>,
5179 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5180 (ins GR32:$src1, GR16:$src2),
5181 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5183 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
5185 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5186 (ins GR32:$src1, i32mem:$src2),
5187 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5189 (int_x86_sse42_crc32_32_32 GR32:$src1,
5190 (load addr:$src2)))]>;
5191 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5192 (ins GR32:$src1, GR32:$src2),
5193 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5195 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
5196 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5197 (ins GR64:$src1, i8mem:$src2),
5198 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5200 (int_x86_sse42_crc32_64_8 GR64:$src1,
5201 (load addr:$src2)))]>,
5203 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5204 (ins GR64:$src1, GR8:$src2),
5205 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5207 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
5209 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5210 (ins GR64:$src1, i64mem:$src2),
5211 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5213 (int_x86_sse42_crc32_64_64 GR64:$src1,
5214 (load addr:$src2)))]>,
5216 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5217 (ins GR64:$src1, GR64:$src2),
5218 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5220 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
5224 //===----------------------------------------------------------------------===//
5225 // AES-NI Instructions
5226 //===----------------------------------------------------------------------===//
5228 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5229 Intrinsic IntId128, bit Is2Addr = 1> {
5230 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5231 (ins VR128:$src1, VR128:$src2),
5233 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5234 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5235 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5237 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5238 (ins VR128:$src1, i128mem:$src2),
5240 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5241 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5243 (IntId128 VR128:$src1,
5244 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5247 // Perform One Round of an AES Encryption/Decryption Flow
5248 let Predicates = [HasAVX, HasAES] in {
5249 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5250 int_x86_aesni_aesenc, 0>, VEX_4V;
5251 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5252 int_x86_aesni_aesenclast, 0>, VEX_4V;
5253 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5254 int_x86_aesni_aesdec, 0>, VEX_4V;
5255 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5256 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5259 let Constraints = "$src1 = $dst" in {
5260 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5261 int_x86_aesni_aesenc>;
5262 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5263 int_x86_aesni_aesenclast>;
5264 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5265 int_x86_aesni_aesdec>;
5266 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5267 int_x86_aesni_aesdeclast>;
5270 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5271 (AESENCrr VR128:$src1, VR128:$src2)>;
5272 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5273 (AESENCrm VR128:$src1, addr:$src2)>;
5274 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5275 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5276 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5277 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5278 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5279 (AESDECrr VR128:$src1, VR128:$src2)>;
5280 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5281 (AESDECrm VR128:$src1, addr:$src2)>;
5282 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5283 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5284 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5285 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5287 // Perform the AES InvMixColumn Transformation
5288 let Predicates = [HasAVX, HasAES] in {
5289 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5291 "vaesimc\t{$src1, $dst|$dst, $src1}",
5293 (int_x86_aesni_aesimc VR128:$src1))]>,
5295 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5296 (ins i128mem:$src1),
5297 "vaesimc\t{$src1, $dst|$dst, $src1}",
5299 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5302 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5304 "aesimc\t{$src1, $dst|$dst, $src1}",
5306 (int_x86_aesni_aesimc VR128:$src1))]>,
5308 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5309 (ins i128mem:$src1),
5310 "aesimc\t{$src1, $dst|$dst, $src1}",
5312 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5315 // AES Round Key Generation Assist
5316 let Predicates = [HasAVX, HasAES] in {
5317 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5318 (ins VR128:$src1, i8imm:$src2),
5319 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5321 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5323 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5324 (ins i128mem:$src1, i8imm:$src2),
5325 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5327 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5331 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5332 (ins VR128:$src1, i8imm:$src2),
5333 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5335 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5337 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5338 (ins i128mem:$src1, i8imm:$src2),
5339 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5341 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5345 //===----------------------------------------------------------------------===//
5346 // CLMUL Instructions
5347 //===----------------------------------------------------------------------===//
5349 // Carry-less Multiplication instructions
5350 let Constraints = "$src1 = $dst" in {
5351 def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5352 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5353 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5356 def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5357 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5358 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5362 // AVX carry-less Multiplication instructions
5363 def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5364 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5365 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5368 def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5369 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5370 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5374 multiclass pclmul_alias<string asm, int immop> {
5375 def : InstAlias<!strconcat("pclmul", asm,
5376 "dq {$src, $dst|$dst, $src}"),
5377 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
5379 def : InstAlias<!strconcat("pclmul", asm,
5380 "dq {$src, $dst|$dst, $src}"),
5381 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
5383 def : InstAlias<!strconcat("vpclmul", asm,
5384 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5385 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
5387 def : InstAlias<!strconcat("vpclmul", asm,
5388 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5389 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
5391 defm : pclmul_alias<"hqhq", 0x11>;
5392 defm : pclmul_alias<"hqlq", 0x01>;
5393 defm : pclmul_alias<"lqhq", 0x10>;
5394 defm : pclmul_alias<"lqlq", 0x00>;
5396 //===----------------------------------------------------------------------===//
5398 //===----------------------------------------------------------------------===//
5400 //===----------------------------------------------------------------------===//
5401 // VBROADCAST - Load from memory and broadcast to all elements of the
5402 // destination operand
5404 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5405 X86MemOperand x86memop, Intrinsic Int> :
5406 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5407 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5408 [(set RC:$dst, (Int addr:$src))]>, VEX;
5410 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5411 int_x86_avx_vbroadcastss>;
5412 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5413 int_x86_avx_vbroadcastss_256>;
5414 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5415 int_x86_avx_vbroadcast_sd_256>;
5416 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5417 int_x86_avx_vbroadcastf128_pd_256>;
5419 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5420 (VBROADCASTF128 addr:$src)>;
5422 //===----------------------------------------------------------------------===//
5423 // VINSERTF128 - Insert packed floating-point values
5425 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5426 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5427 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5429 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5430 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5431 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5434 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5435 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5436 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5437 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5438 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5439 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5441 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
5443 (VINSERTF128rr VR256:$src1, VR128:$src2,
5444 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5445 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
5447 (VINSERTF128rr VR256:$src1, VR128:$src2,
5448 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5449 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
5451 (VINSERTF128rr VR256:$src1, VR128:$src2,
5452 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5453 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
5455 (VINSERTF128rr VR256:$src1, VR128:$src2,
5456 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5457 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
5459 (VINSERTF128rr VR256:$src1, VR128:$src2,
5460 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5461 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
5463 (VINSERTF128rr VR256:$src1, VR128:$src2,
5464 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5466 // Special COPY patterns
5467 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
5468 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5469 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)),
5470 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5471 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)),
5472 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5473 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)),
5474 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5475 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)),
5476 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5477 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
5478 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5480 //===----------------------------------------------------------------------===//
5481 // VEXTRACTF128 - Extract packed floating-point values
5483 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5484 (ins VR256:$src1, i8imm:$src2),
5485 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5487 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5488 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5489 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5492 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5493 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5494 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5495 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5496 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5497 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5499 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5500 (v4f32 (VEXTRACTF128rr
5501 (v8f32 VR256:$src1),
5502 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5503 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5504 (v2f64 (VEXTRACTF128rr
5505 (v4f64 VR256:$src1),
5506 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5507 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5508 (v4i32 (VEXTRACTF128rr
5509 (v8i32 VR256:$src1),
5510 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5511 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5512 (v2i64 (VEXTRACTF128rr
5513 (v4i64 VR256:$src1),
5514 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5515 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5516 (v8i16 (VEXTRACTF128rr
5517 (v16i16 VR256:$src1),
5518 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5519 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5520 (v16i8 (VEXTRACTF128rr
5521 (v32i8 VR256:$src1),
5522 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5524 // Special COPY patterns
5525 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))),
5526 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
5527 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))),
5528 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
5530 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
5531 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
5532 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))),
5533 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
5536 //===----------------------------------------------------------------------===//
5537 // VMASKMOV - Conditional SIMD Packed Loads and Stores
5539 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
5540 Intrinsic IntLd, Intrinsic IntLd256,
5541 Intrinsic IntSt, Intrinsic IntSt256,
5542 PatFrag pf128, PatFrag pf256> {
5543 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
5544 (ins VR128:$src1, f128mem:$src2),
5545 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5546 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
5548 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
5549 (ins VR256:$src1, f256mem:$src2),
5550 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5551 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
5553 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
5554 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
5555 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5556 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
5557 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
5558 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
5559 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5560 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
5563 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
5564 int_x86_avx_maskload_ps,
5565 int_x86_avx_maskload_ps_256,
5566 int_x86_avx_maskstore_ps,
5567 int_x86_avx_maskstore_ps_256,
5568 memopv4f32, memopv8f32>;
5569 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
5570 int_x86_avx_maskload_pd,
5571 int_x86_avx_maskload_pd_256,
5572 int_x86_avx_maskstore_pd,
5573 int_x86_avx_maskstore_pd_256,
5574 memopv2f64, memopv4f64>;
5576 //===----------------------------------------------------------------------===//
5577 // VPERM - Permute Floating-Point Values
5579 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
5580 RegisterClass RC, X86MemOperand x86memop_f,
5581 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
5582 Intrinsic IntVar, Intrinsic IntImm> {
5583 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
5584 (ins RC:$src1, RC:$src2),
5585 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5586 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
5587 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
5588 (ins RC:$src1, x86memop_i:$src2),
5589 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5590 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
5592 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
5593 (ins RC:$src1, i8imm:$src2),
5594 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5595 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
5596 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
5597 (ins x86memop_f:$src1, i8imm:$src2),
5598 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5599 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
5602 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
5603 memopv4f32, memopv4i32,
5604 int_x86_avx_vpermilvar_ps,
5605 int_x86_avx_vpermil_ps>;
5606 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
5607 memopv8f32, memopv8i32,
5608 int_x86_avx_vpermilvar_ps_256,
5609 int_x86_avx_vpermil_ps_256>;
5610 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
5611 memopv2f64, memopv2i64,
5612 int_x86_avx_vpermilvar_pd,
5613 int_x86_avx_vpermil_pd>;
5614 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
5615 memopv4f64, memopv4i64,
5616 int_x86_avx_vpermilvar_pd_256,
5617 int_x86_avx_vpermil_pd_256>;
5619 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
5620 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
5621 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5623 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
5624 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
5625 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5628 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
5629 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5630 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
5631 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5632 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
5633 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
5635 def : Pat<(int_x86_avx_vperm2f128_ps_256
5636 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
5637 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5638 def : Pat<(int_x86_avx_vperm2f128_pd_256
5639 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
5640 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5641 def : Pat<(int_x86_avx_vperm2f128_si_256
5642 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
5643 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
5645 // Shuffle with VPERMIL instructions
5646 def : Pat<(v8f32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
5647 (VPERMILPSYri VR256:$src1, imm:$imm)>;
5648 def : Pat<(v4f64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
5649 (VPERMILPDYri VR256:$src1, imm:$imm)>;
5650 def : Pat<(v8i32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
5651 (VPERMILPSYri VR256:$src1, imm:$imm)>;
5652 def : Pat<(v4i64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
5653 (VPERMILPDYri VR256:$src1, imm:$imm)>;
5655 //===----------------------------------------------------------------------===//
5656 // VZERO - Zero YMM registers
5658 // Zero All YMM registers
5659 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
5660 [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>;
5662 // Zero Upper bits of YMM registers
5663 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
5664 [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>;
5666 //===----------------------------------------------------------------------===//
5667 // SSE Shuffle pattern fragments
5668 //===----------------------------------------------------------------------===//
5670 // This is part of a "work in progress" refactoring. The idea is that all
5671 // vector shuffles are going to be translated into target specific nodes and
5672 // directly matched by the patterns below (which can be changed along the way)
5673 // The AVX version of some but not all of them are described here, and more
5674 // should come in a near future.
5676 // Shuffle with PSHUFD instruction folding loads. The first two patterns match
5677 // SSE2 loads, which are always promoted to v2i64. The last one should match
5678 // the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD
5679 // in SSE2, how does it ever worked? Anyway, the pattern will remain here until
5680 // we investigate further.
5681 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5683 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
5684 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
5686 (PSHUFDmi addr:$src1, imm:$imm)>;
5687 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
5689 (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked?
5691 // Shuffle with PSHUFD instruction.
5692 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5693 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5694 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5695 (PSHUFDri VR128:$src1, imm:$imm)>;
5697 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5698 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
5699 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
5700 (PSHUFDri VR128:$src1, imm:$imm)>;
5702 // Shuffle with SHUFPD instruction.
5703 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5704 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5705 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5706 def : Pat<(v2f64 (X86Shufps VR128:$src1,
5707 (memopv2f64 addr:$src2), (i8 imm:$imm))),
5708 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
5710 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5711 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5712 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5713 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5715 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5716 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5717 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5718 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
5720 // Shuffle with SHUFPS instruction.
5721 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5722 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5723 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5724 def : Pat<(v4f32 (X86Shufps VR128:$src1,
5725 (memopv4f32 addr:$src2), (i8 imm:$imm))),
5726 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5728 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5729 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5730 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5731 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5733 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5734 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5735 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>;
5736 def : Pat<(v4i32 (X86Shufps VR128:$src1,
5737 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
5738 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
5740 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5741 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>;
5742 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
5743 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
5745 // Shuffle with MOVHLPS instruction
5746 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
5747 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5748 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
5749 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
5751 // Shuffle with MOVDDUP instruction
5752 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5753 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5754 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
5755 (MOVDDUPrm addr:$src)>;
5757 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5758 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5759 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
5760 (MOVDDUPrm addr:$src)>;
5762 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5763 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5764 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
5765 (MOVDDUPrm addr:$src)>;
5767 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5768 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5769 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
5770 (MOVDDUPrm addr:$src)>;
5772 def : Pat<(X86Movddup (bc_v2f64
5773 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5774 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
5775 def : Pat<(X86Movddup (bc_v2f64
5776 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
5777 (MOVDDUPrm addr:$src)>;
5780 // Shuffle with UNPCKLPS
5781 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5782 (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5783 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
5784 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5785 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
5786 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
5788 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5789 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5790 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
5791 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5792 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
5793 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
5795 // Shuffle with UNPCKHPS
5796 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5797 (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5798 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
5799 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
5801 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5802 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5803 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
5804 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
5806 // Shuffle with VUNPCKHPSY
5807 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, (memopv8f32 addr:$src2))),
5808 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5809 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
5810 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5812 // Shuffle with UNPCKLPD
5813 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5814 (VUNPCKLPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5815 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
5816 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5817 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
5818 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
5820 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5821 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5822 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
5823 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5824 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
5825 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
5827 // Shuffle with UNPCKHPD
5828 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5829 (VUNPCKHPDrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>;
5830 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
5831 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
5833 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5834 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>;
5835 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
5836 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
5838 // Shuffle with VUNPCKHPDY
5839 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, (memopv4f64 addr:$src2))),
5840 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>, Requires<[HasAVX]>;
5841 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
5842 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>, Requires<[HasAVX]>;
5844 // Shuffle with MOVLHPS
5845 def : Pat<(X86Movlhps VR128:$src1,
5846 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5847 (MOVHPSrm VR128:$src1, addr:$src2)>;
5848 def : Pat<(X86Movlhps VR128:$src1,
5849 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
5850 (MOVHPSrm VR128:$src1, addr:$src2)>;
5851 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
5852 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5853 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
5854 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
5855 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
5856 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
5858 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the problem
5859 // is during lowering, where it's not possible to recognize the load fold cause
5860 // it has two uses through a bitcast. One use disappears at isel time and the
5861 // fold opportunity reappears.
5862 def : Pat<(v2f64 (X86Movddup VR128:$src)),
5863 (UNPCKLPDrr VR128:$src, VR128:$src)>;
5865 // Shuffle with MOVLHPD
5866 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
5867 (scalar_to_vector (loadf64 addr:$src2)))),
5868 (MOVHPDrm VR128:$src1, addr:$src2)>;
5870 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
5871 // is during lowering, where it's not possible to recognize the load fold cause
5872 // it has two uses through a bitcast. One use disappears at isel time and the
5873 // fold opportunity reappears.
5874 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
5875 (scalar_to_vector (loadf64 addr:$src2)))),
5876 (MOVHPDrm VR128:$src1, addr:$src2)>;
5878 // Shuffle with MOVSS
5879 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
5880 (MOVSSrr VR128:$src1, FR32:$src2)>;
5881 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
5882 (MOVSSrr (v4i32 VR128:$src1),
5883 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
5884 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
5885 (MOVSSrr (v4f32 VR128:$src1),
5886 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
5887 // FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem
5888 // is during lowering, where it's not possible to recognize the load fold cause
5889 // it has two uses through a bitcast. One use disappears at isel time and the
5890 // fold opportunity reappears.
5891 def : Pat<(X86Movss VR128:$src1,
5892 (bc_v4i32 (v2i64 (load addr:$src2)))),
5893 (MOVLPSrm VR128:$src1, addr:$src2)>;
5895 // Shuffle with MOVSD
5896 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
5897 (MOVSDrr VR128:$src1, FR64:$src2)>;
5898 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
5899 (MOVSDrr (v2i64 VR128:$src1),
5900 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
5901 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
5902 (MOVSDrr (v2f64 VR128:$src1),
5903 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
5904 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
5905 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5906 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
5907 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5909 // Shuffle with PSHUFHW
5910 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
5911 (PSHUFHWri VR128:$src, imm:$imm)>;
5912 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5913 (PSHUFHWmi addr:$src, imm:$imm)>;
5915 // Shuffle with PSHUFLW
5916 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
5917 (PSHUFLWri VR128:$src, imm:$imm)>;
5918 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))),
5919 (PSHUFLWmi addr:$src, imm:$imm)>;
5921 // Shuffle with MOVLPS
5922 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
5923 (MOVLPSrm VR128:$src1, addr:$src2)>;
5924 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
5925 (MOVLPSrm VR128:$src1, addr:$src2)>;
5926 def : Pat<(X86Movlps VR128:$src1,
5927 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
5928 (MOVLPSrm VR128:$src1, addr:$src2)>;
5929 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
5930 // is during lowering, where it's not possible to recognize the load fold cause
5931 // it has two uses through a bitcast. One use disappears at isel time and the
5932 // fold opportunity reappears.
5933 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
5934 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
5936 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
5937 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
5939 // Shuffle with MOVLPD
5940 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5941 (MOVLPDrm VR128:$src1, addr:$src2)>;
5942 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
5943 (MOVLPDrm VR128:$src1, addr:$src2)>;
5944 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
5945 (scalar_to_vector (loadf64 addr:$src2)))),
5946 (MOVLPDrm VR128:$src1, addr:$src2)>;
5948 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
5949 def : Pat<(store (f64 (vector_extract
5950 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5951 (MOVHPSmr addr:$dst, VR128:$src)>;
5952 def : Pat<(store (f64 (vector_extract
5953 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
5954 (MOVHPDmr addr:$dst, VR128:$src)>;
5956 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
5957 (MOVLPSmr addr:$src1, VR128:$src2)>;
5958 def : Pat<(store (v4i32 (X86Movlps
5959 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
5960 (MOVLPSmr addr:$src1, VR128:$src2)>;
5962 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5963 (MOVLPDmr addr:$src1, VR128:$src2)>;
5964 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
5965 (MOVLPDmr addr:$src1, VR128:$src2)>;