1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // Non-instruction patterns
120 //===----------------------------------------------------------------------===//
122 // A vector extract of the first f32/f64 position is a subregister copy
123 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
124 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
125 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
126 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
128 // A 128-bit subvector extract from the first 256-bit vector position
129 // is a subregister copy that needs no instruction.
130 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))),
131 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
132 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))),
133 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
135 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
136 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
137 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))),
138 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
140 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (i32 0))),
141 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
142 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (i32 0))),
143 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
145 // A 128-bit subvector insert to the first 256-bit vector position
146 // is a subregister copy that needs no instruction.
147 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
148 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
149 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)),
150 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
151 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)),
152 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
153 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)),
154 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
155 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)),
156 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
157 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
158 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
160 // Implicitly promote a 32-bit scalar to a vector.
161 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
162 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
163 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
164 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
165 // Implicitly promote a 64-bit scalar to a vector.
166 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
167 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
168 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
169 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
171 // Bitcasts between 128-bit vector types. Return the original type since
172 // no instruction is needed for the conversion
173 let Predicates = [HasXMMInt] in {
174 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
175 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
176 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
177 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
178 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
179 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
180 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
181 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
182 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
183 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
184 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
185 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
186 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
187 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
188 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
189 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
190 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
191 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
192 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
193 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
194 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
195 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
196 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
197 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
198 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
199 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
200 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
201 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
202 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
203 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
206 // Bitcasts between 256-bit vector types. Return the original type since
207 // no instruction is needed for the conversion
208 let Predicates = [HasAVX] in {
209 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
210 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
211 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
212 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
213 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
214 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
215 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
216 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
217 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
218 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
219 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
220 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
221 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
222 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
223 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
224 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
225 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
226 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
227 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
228 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
229 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
230 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
231 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
232 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
233 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
234 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
235 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
236 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
237 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
238 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
241 // Alias instructions that map fld0 to pxor for sse.
242 // FIXME: Set encoding to pseudo!
243 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
244 canFoldAsLoad = 1 in {
245 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
246 [(set FR32:$dst, fp32imm0)]>,
247 Requires<[HasSSE1]>, TB, OpSize;
248 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
249 [(set FR64:$dst, fpimm0)]>,
250 Requires<[HasSSE2]>, TB, OpSize;
251 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
252 [(set FR32:$dst, fp32imm0)]>,
253 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
254 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
255 [(set FR64:$dst, fpimm0)]>,
256 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
259 //===----------------------------------------------------------------------===//
260 // AVX & SSE - Zero/One Vectors
261 //===----------------------------------------------------------------------===//
263 // Alias instructions that map zero vector to pxor / xorp* for sse.
264 // We set canFoldAsLoad because this can be converted to a constant-pool
265 // load of an all-zeros value if folding it would be beneficial.
266 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
267 // JIT implementation, it does not expand the instructions below like
268 // X86MCInstLower does.
269 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
270 isCodeGenOnly = 1 in {
271 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
272 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
273 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
274 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
275 let ExeDomain = SSEPackedInt in
276 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
277 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
280 // The same as done above but for AVX. The 128-bit versions are the
281 // same, but re-encoded. The 256-bit does not support PI version, and
282 // doesn't need it because on sandy bridge the register is set to zero
283 // at the rename stage without using any execution unit, so SET0PSY
284 // and SET0PDY can be used for vector int instructions without penalty
285 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
286 // JIT implementatioan, it does not expand the instructions below like
287 // X86MCInstLower does.
288 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
289 isCodeGenOnly = 1, Predicates = [HasAVX] in {
290 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
291 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
292 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
293 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
294 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
295 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
296 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
297 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
298 let ExeDomain = SSEPackedInt in
299 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
300 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
303 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
304 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
305 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
307 // AVX has no support for 256-bit integer instructions, but since the 128-bit
308 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
309 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
310 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
311 (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
313 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
314 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
315 (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
317 // We set canFoldAsLoad because this can be converted to a constant-pool
318 // load of an all-ones value if folding it would be beneficial.
319 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
320 // JIT implementation, it does not expand the instructions below like
321 // X86MCInstLower does.
322 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
323 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
324 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
325 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
326 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
327 isCodeGenOnly = 1, ExeDomain = SSEPackedInt, Predicates = [HasAVX] in
328 def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
329 [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
332 //===----------------------------------------------------------------------===//
333 // SSE 1 & 2 - Move FP Scalar Instructions
335 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
336 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
337 // is used instead. Register-to-register movss/movsd is not modeled as an
338 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
339 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
340 //===----------------------------------------------------------------------===//
342 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
343 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
344 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
346 // Loading from memory automatically zeroing upper bits.
347 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
348 PatFrag mem_pat, string OpcodeStr> :
349 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
350 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
351 [(set RC:$dst, (mem_pat addr:$src))]>;
354 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
355 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
356 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
357 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
359 let canFoldAsLoad = 1, isReMaterializable = 1 in {
360 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
361 let AddedComplexity = 20 in
362 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
365 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
366 "movss\t{$src, $dst|$dst, $src}",
367 [(store FR32:$src, addr:$dst)]>, XS, VEX;
368 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
369 "movsd\t{$src, $dst|$dst, $src}",
370 [(store FR64:$src, addr:$dst)]>, XD, VEX;
373 let Constraints = "$src1 = $dst" in {
374 def MOVSSrr : sse12_move_rr<FR32, v4f32,
375 "movss\t{$src2, $dst|$dst, $src2}">, XS;
376 def MOVSDrr : sse12_move_rr<FR64, v2f64,
377 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
380 let canFoldAsLoad = 1, isReMaterializable = 1 in {
381 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
383 let AddedComplexity = 20 in
384 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
387 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
388 "movss\t{$src, $dst|$dst, $src}",
389 [(store FR32:$src, addr:$dst)]>;
390 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
391 "movsd\t{$src, $dst|$dst, $src}",
392 [(store FR64:$src, addr:$dst)]>;
395 let Predicates = [HasSSE1] in {
396 let AddedComplexity = 15 in {
397 // Extract the low 32-bit value from one vector and insert it into another.
398 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
399 (MOVSSrr (v4f32 VR128:$src1),
400 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
401 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
402 (MOVSSrr (v4i32 VR128:$src1),
403 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
405 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
406 // MOVSS to the lower bits.
407 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
408 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
409 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
410 (MOVSSrr (v4f32 (V_SET0PS)),
411 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
412 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
413 (MOVSSrr (v4i32 (V_SET0PI)),
414 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
417 let AddedComplexity = 20 in {
418 // MOVSSrm zeros the high parts of the register; represent this
419 // with SUBREG_TO_REG.
420 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
421 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
422 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
423 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
424 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
425 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
428 // Extract and store.
429 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
432 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
434 // Shuffle with MOVSS
435 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
436 (MOVSSrr VR128:$src1, FR32:$src2)>;
437 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
438 (MOVSSrr (v4i32 VR128:$src1),
439 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
440 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
441 (MOVSSrr (v4f32 VR128:$src1),
442 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
445 let Predicates = [HasSSE2] in {
446 let AddedComplexity = 15 in {
447 // Extract the low 64-bit value from one vector and insert it into another.
448 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
449 (MOVSDrr (v2f64 VR128:$src1),
450 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
451 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
452 (MOVSDrr (v2i64 VR128:$src1),
453 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
455 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
456 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
457 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
458 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
459 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
461 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
462 // MOVSD to the lower bits.
463 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
464 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
467 let AddedComplexity = 20 in {
468 // MOVSDrm zeros the high parts of the register; represent this
469 // with SUBREG_TO_REG.
470 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
471 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
472 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
473 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
474 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
475 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
476 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
477 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
478 def : Pat<(v2f64 (X86vzload addr:$src)),
479 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
482 // Extract and store.
483 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
486 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
488 // Shuffle with MOVSD
489 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
490 (MOVSDrr VR128:$src1, FR64:$src2)>;
491 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
492 (MOVSDrr (v2i64 VR128:$src1),
493 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
494 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
495 (MOVSDrr (v2f64 VR128:$src1),
496 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
497 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
498 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),sub_sd))>;
499 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
500 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),sub_sd))>;
502 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
503 // is during lowering, where it's not possible to recognize the fold cause
504 // it has two uses through a bitcast. One use disappears at isel time and the
505 // fold opportunity reappears.
506 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
507 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),sub_sd))>;
508 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
509 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),sub_sd))>;
512 let Predicates = [HasAVX] in {
513 let AddedComplexity = 15 in {
514 // Extract the low 32-bit value from one vector and insert it into another.
515 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
516 (VMOVSSrr (v4f32 VR128:$src1),
517 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
518 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
519 (VMOVSSrr (v4i32 VR128:$src1),
520 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
522 // Extract the low 64-bit value from one vector and insert it into another.
523 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
524 (VMOVSDrr (v2f64 VR128:$src1),
525 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
526 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
527 (VMOVSDrr (v2i64 VR128:$src1),
528 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
530 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
531 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
532 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
533 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
534 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>;
536 // Move scalar to XMM zero-extended, zeroing a VR128 then do a
537 // MOVS{S,D} to the lower bits.
538 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
539 (VMOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
540 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
541 (VMOVSSrr (v4f32 (V_SET0PS)),
542 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
543 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
544 (VMOVSSrr (v4i32 (V_SET0PI)),
545 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
546 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
547 (VMOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
550 let AddedComplexity = 20 in {
551 // MOVSSrm zeros the high parts of the register; represent this
552 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
553 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
554 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
555 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
556 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
557 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
558 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
560 // MOVSDrm zeros the high parts of the register; represent this
561 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
562 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
563 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
564 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
565 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
566 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
567 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
568 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
569 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
570 def : Pat<(v2f64 (X86vzload addr:$src)),
571 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
573 // Represent the same patterns above but in the form they appear for
575 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
576 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
577 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
578 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
579 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))),
580 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_sd)>;
583 // Extract and store.
584 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
587 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
588 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
591 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
593 // Shuffle with VMOVSS
594 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
595 (VMOVSSrr VR128:$src1, FR32:$src2)>;
596 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
597 (VMOVSSrr (v4i32 VR128:$src1),
598 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
599 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
600 (VMOVSSrr (v4f32 VR128:$src1),
601 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
603 // Shuffle with VMOVSD
604 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
605 (VMOVSDrr VR128:$src1, FR64:$src2)>;
606 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
607 (VMOVSDrr (v2i64 VR128:$src1),
608 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
609 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
610 (VMOVSDrr (v2f64 VR128:$src1),
611 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
612 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
613 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),
615 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
616 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),
619 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
620 // is during lowering, where it's not possible to recognize the fold cause
621 // it has two uses through a bitcast. One use disappears at isel time and the
622 // fold opportunity reappears.
623 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
624 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2),
626 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
627 (VMOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2),
631 //===----------------------------------------------------------------------===//
632 // SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
633 //===----------------------------------------------------------------------===//
635 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
636 X86MemOperand x86memop, PatFrag ld_frag,
637 string asm, Domain d,
638 bit IsReMaterializable = 1> {
639 let neverHasSideEffects = 1 in
640 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
641 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
642 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
643 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
644 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
645 [(set RC:$dst, (ld_frag addr:$src))], d>;
648 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
649 "movaps", SSEPackedSingle>, TB, VEX;
650 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
651 "movapd", SSEPackedDouble>, TB, OpSize, VEX;
652 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
653 "movups", SSEPackedSingle>, TB, VEX;
654 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
655 "movupd", SSEPackedDouble, 0>, TB, OpSize, VEX;
657 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
658 "movaps", SSEPackedSingle>, TB, VEX;
659 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
660 "movapd", SSEPackedDouble>, TB, OpSize, VEX;
661 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
662 "movups", SSEPackedSingle>, TB, VEX;
663 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
664 "movupd", SSEPackedDouble, 0>, TB, OpSize, VEX;
665 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
666 "movaps", SSEPackedSingle>, TB;
667 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
668 "movapd", SSEPackedDouble>, TB, OpSize;
669 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
670 "movups", SSEPackedSingle>, TB;
671 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
672 "movupd", SSEPackedDouble, 0>, TB, OpSize;
674 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
675 "movaps\t{$src, $dst|$dst, $src}",
676 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
677 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
678 "movapd\t{$src, $dst|$dst, $src}",
679 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
680 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
681 "movups\t{$src, $dst|$dst, $src}",
682 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
683 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
684 "movupd\t{$src, $dst|$dst, $src}",
685 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
686 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
687 "movaps\t{$src, $dst|$dst, $src}",
688 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
689 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
690 "movapd\t{$src, $dst|$dst, $src}",
691 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
692 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
693 "movups\t{$src, $dst|$dst, $src}",
694 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
695 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
696 "movupd\t{$src, $dst|$dst, $src}",
697 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
699 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
700 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
701 (VMOVUPSYmr addr:$dst, VR256:$src)>;
703 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
704 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
705 (VMOVUPDYmr addr:$dst, VR256:$src)>;
707 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
708 "movaps\t{$src, $dst|$dst, $src}",
709 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
710 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
711 "movapd\t{$src, $dst|$dst, $src}",
712 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
713 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
714 "movups\t{$src, $dst|$dst, $src}",
715 [(store (v4f32 VR128:$src), addr:$dst)]>;
716 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
717 "movupd\t{$src, $dst|$dst, $src}",
718 [(store (v2f64 VR128:$src), addr:$dst)]>;
720 let Predicates = [HasAVX] in {
721 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
722 (VMOVUPSmr addr:$dst, VR128:$src)>;
723 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
724 (VMOVUPDmr addr:$dst, VR128:$src)>;
727 let Predicates = [HasSSE1] in
728 def : Pat<(int_x86_sse_storeu_ps addr:$dst, VR128:$src),
729 (MOVUPSmr addr:$dst, VR128:$src)>;
730 let Predicates = [HasSSE2] in
731 def : Pat<(int_x86_sse2_storeu_pd addr:$dst, VR128:$src),
732 (MOVUPDmr addr:$dst, VR128:$src)>;
734 // Use movaps / movups for SSE integer load / store (one byte shorter).
735 // The instructions selected below are then converted to MOVDQA/MOVDQU
736 // during the SSE domain pass.
737 let Predicates = [HasSSE1] in {
738 def : Pat<(alignedloadv4i32 addr:$src),
739 (MOVAPSrm addr:$src)>;
740 def : Pat<(loadv4i32 addr:$src),
741 (MOVUPSrm addr:$src)>;
742 def : Pat<(alignedloadv2i64 addr:$src),
743 (MOVAPSrm addr:$src)>;
744 def : Pat<(loadv2i64 addr:$src),
745 (MOVUPSrm addr:$src)>;
747 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
748 (MOVAPSmr addr:$dst, VR128:$src)>;
749 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
750 (MOVAPSmr addr:$dst, VR128:$src)>;
751 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
752 (MOVAPSmr addr:$dst, VR128:$src)>;
753 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
754 (MOVAPSmr addr:$dst, VR128:$src)>;
755 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
756 (MOVUPSmr addr:$dst, VR128:$src)>;
757 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
758 (MOVUPSmr addr:$dst, VR128:$src)>;
759 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
760 (MOVUPSmr addr:$dst, VR128:$src)>;
761 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
762 (MOVUPSmr addr:$dst, VR128:$src)>;
765 // Use vmovaps/vmovups for AVX integer load/store.
766 let Predicates = [HasAVX] in {
767 // 128-bit load/store
768 def : Pat<(alignedloadv4i32 addr:$src),
769 (VMOVAPSrm addr:$src)>;
770 def : Pat<(loadv4i32 addr:$src),
771 (VMOVUPSrm addr:$src)>;
772 def : Pat<(alignedloadv2i64 addr:$src),
773 (VMOVAPSrm addr:$src)>;
774 def : Pat<(loadv2i64 addr:$src),
775 (VMOVUPSrm addr:$src)>;
777 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
778 (VMOVAPSmr addr:$dst, VR128:$src)>;
779 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
780 (VMOVAPSmr addr:$dst, VR128:$src)>;
781 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
782 (VMOVAPSmr addr:$dst, VR128:$src)>;
783 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
784 (VMOVAPSmr addr:$dst, VR128:$src)>;
785 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
786 (VMOVUPSmr addr:$dst, VR128:$src)>;
787 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
788 (VMOVUPSmr addr:$dst, VR128:$src)>;
789 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
790 (VMOVUPSmr addr:$dst, VR128:$src)>;
791 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
792 (VMOVUPSmr addr:$dst, VR128:$src)>;
794 // 256-bit load/store
795 def : Pat<(alignedloadv4i64 addr:$src),
796 (VMOVAPSYrm addr:$src)>;
797 def : Pat<(loadv4i64 addr:$src),
798 (VMOVUPSYrm addr:$src)>;
799 def : Pat<(alignedloadv8i32 addr:$src),
800 (VMOVAPSYrm addr:$src)>;
801 def : Pat<(loadv8i32 addr:$src),
802 (VMOVUPSYrm addr:$src)>;
803 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
804 (VMOVAPSYmr addr:$dst, VR256:$src)>;
805 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
806 (VMOVAPSYmr addr:$dst, VR256:$src)>;
807 def : Pat<(alignedstore (v16i16 VR256:$src), addr:$dst),
808 (VMOVAPSYmr addr:$dst, VR256:$src)>;
809 def : Pat<(alignedstore (v32i8 VR256:$src), addr:$dst),
810 (VMOVAPSYmr addr:$dst, VR256:$src)>;
811 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
812 (VMOVUPSYmr addr:$dst, VR256:$src)>;
813 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
814 (VMOVUPSYmr addr:$dst, VR256:$src)>;
815 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
816 (VMOVUPSYmr addr:$dst, VR256:$src)>;
817 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
818 (VMOVUPSYmr addr:$dst, VR256:$src)>;
821 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
822 // bits are disregarded. FIXME: Set encoding to pseudo!
823 let neverHasSideEffects = 1 in {
824 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
825 "movaps\t{$src, $dst|$dst, $src}", []>;
826 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
827 "movapd\t{$src, $dst|$dst, $src}", []>;
828 def FsVMOVAPSrr : VPSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
829 "movaps\t{$src, $dst|$dst, $src}", []>, VEX;
830 def FsVMOVAPDrr : VPDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
831 "movapd\t{$src, $dst|$dst, $src}", []>, VEX;
834 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
835 // bits are disregarded. FIXME: Set encoding to pseudo!
836 let canFoldAsLoad = 1, isReMaterializable = 1 in {
837 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
838 "movaps\t{$src, $dst|$dst, $src}",
839 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
840 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
841 "movapd\t{$src, $dst|$dst, $src}",
842 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
843 let isCodeGenOnly = 1 in {
844 def FsVMOVAPSrm : VPSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
845 "movaps\t{$src, $dst|$dst, $src}",
846 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>, VEX;
847 def FsVMOVAPDrm : VPDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
848 "movapd\t{$src, $dst|$dst, $src}",
849 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>, VEX;
853 //===----------------------------------------------------------------------===//
854 // SSE 1 & 2 - Move Low packed FP Instructions
855 //===----------------------------------------------------------------------===//
857 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
858 PatFrag mov_frag, string base_opc,
860 def PSrm : PI<opc, MRMSrcMem,
861 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
862 !strconcat(base_opc, "s", asm_opr),
865 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
866 SSEPackedSingle>, TB;
868 def PDrm : PI<opc, MRMSrcMem,
869 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
870 !strconcat(base_opc, "d", asm_opr),
871 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
872 (scalar_to_vector (loadf64 addr:$src2)))))],
873 SSEPackedDouble>, TB, OpSize;
876 let AddedComplexity = 20 in {
877 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
878 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
880 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
881 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
882 "\t{$src2, $dst|$dst, $src2}">;
885 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
886 "movlps\t{$src, $dst|$dst, $src}",
887 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
888 (iPTR 0))), addr:$dst)]>, VEX;
889 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
890 "movlpd\t{$src, $dst|$dst, $src}",
891 [(store (f64 (vector_extract (v2f64 VR128:$src),
892 (iPTR 0))), addr:$dst)]>, VEX;
893 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
894 "movlps\t{$src, $dst|$dst, $src}",
895 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
896 (iPTR 0))), addr:$dst)]>;
897 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
898 "movlpd\t{$src, $dst|$dst, $src}",
899 [(store (f64 (vector_extract (v2f64 VR128:$src),
900 (iPTR 0))), addr:$dst)]>;
902 let Predicates = [HasAVX] in {
903 let AddedComplexity = 20 in {
904 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
905 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
906 (VMOVLPSrm VR128:$src1, addr:$src2)>;
907 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
908 (VMOVLPSrm VR128:$src1, addr:$src2)>;
909 // vector_shuffle v1, (load v2) <2, 1> using MOVLPS
910 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
911 (VMOVLPDrm VR128:$src1, addr:$src2)>;
912 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
913 (VMOVLPDrm VR128:$src1, addr:$src2)>;
916 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
917 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
918 (VMOVLPSmr addr:$src1, VR128:$src2)>;
919 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)),
920 VR128:$src2)), addr:$src1),
921 (VMOVLPSmr addr:$src1, VR128:$src2)>;
923 // (store (vector_shuffle (load addr), v2, <2, 1>), addr) using MOVLPS
924 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
925 (VMOVLPDmr addr:$src1, VR128:$src2)>;
926 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
927 (VMOVLPDmr addr:$src1, VR128:$src2)>;
929 // Shuffle with VMOVLPS
930 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
931 (VMOVLPSrm VR128:$src1, addr:$src2)>;
932 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
933 (VMOVLPSrm VR128:$src1, addr:$src2)>;
934 def : Pat<(X86Movlps VR128:$src1,
935 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
936 (VMOVLPSrm VR128:$src1, addr:$src2)>;
938 // Shuffle with VMOVLPD
939 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
940 (VMOVLPDrm VR128:$src1, addr:$src2)>;
941 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
942 (VMOVLPDrm VR128:$src1, addr:$src2)>;
943 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
944 (scalar_to_vector (loadf64 addr:$src2)))),
945 (VMOVLPDrm VR128:$src1, addr:$src2)>;
948 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
950 (VMOVLPSmr addr:$src1, VR128:$src2)>;
951 def : Pat<(store (v4i32 (X86Movlps
952 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
953 (VMOVLPSmr addr:$src1, VR128:$src2)>;
954 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
956 (VMOVLPDmr addr:$src1, VR128:$src2)>;
957 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
959 (VMOVLPDmr addr:$src1, VR128:$src2)>;
962 let Predicates = [HasSSE1] in {
963 let AddedComplexity = 20 in {
964 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
965 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
966 (MOVLPSrm VR128:$src1, addr:$src2)>;
967 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
968 (MOVLPSrm VR128:$src1, addr:$src2)>;
971 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
972 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
973 (MOVLPSmr addr:$src1, VR128:$src2)>;
974 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)),
975 VR128:$src2)), addr:$src1),
976 (MOVLPSmr addr:$src1, VR128:$src2)>;
978 // Shuffle with MOVLPS
979 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
980 (MOVLPSrm VR128:$src1, addr:$src2)>;
981 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
982 (MOVLPSrm VR128:$src1, addr:$src2)>;
983 def : Pat<(X86Movlps VR128:$src1,
984 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
985 (MOVLPSrm VR128:$src1, addr:$src2)>;
988 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),
990 (MOVLPSmr addr:$src1, VR128:$src2)>;
991 def : Pat<(store (v4i32 (X86Movlps
992 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
994 (MOVLPSmr addr:$src1, VR128:$src2)>;
997 let Predicates = [HasSSE2] in {
998 let AddedComplexity = 20 in {
999 // vector_shuffle v1, (load v2) <2, 1> using MOVLPS
1000 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
1001 (MOVLPDrm VR128:$src1, addr:$src2)>;
1002 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
1003 (MOVLPDrm VR128:$src1, addr:$src2)>;
1006 // (store (vector_shuffle (load addr), v2, <2, 1>), addr) using MOVLPS
1007 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
1008 (MOVLPDmr addr:$src1, VR128:$src2)>;
1009 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
1010 (MOVLPDmr addr:$src1, VR128:$src2)>;
1012 // Shuffle with MOVLPD
1013 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1014 (MOVLPDrm VR128:$src1, addr:$src2)>;
1015 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
1016 (MOVLPDrm VR128:$src1, addr:$src2)>;
1017 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
1018 (scalar_to_vector (loadf64 addr:$src2)))),
1019 (MOVLPDrm VR128:$src1, addr:$src2)>;
1022 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1024 (MOVLPDmr addr:$src1, VR128:$src2)>;
1025 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),
1027 (MOVLPDmr addr:$src1, VR128:$src2)>;
1030 //===----------------------------------------------------------------------===//
1031 // SSE 1 & 2 - Move Hi packed FP Instructions
1032 //===----------------------------------------------------------------------===//
1034 let AddedComplexity = 20 in {
1035 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
1036 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
1038 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1039 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
1040 "\t{$src2, $dst|$dst, $src2}">;
1043 // v2f64 extract element 1 is always custom lowered to unpack high to low
1044 // and extract element 0 so the non-store version isn't too horrible.
1045 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1046 "movhps\t{$src, $dst|$dst, $src}",
1047 [(store (f64 (vector_extract
1048 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
1049 (undef)), (iPTR 0))), addr:$dst)]>,
1051 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1052 "movhpd\t{$src, $dst|$dst, $src}",
1053 [(store (f64 (vector_extract
1054 (v2f64 (unpckh VR128:$src, (undef))),
1055 (iPTR 0))), addr:$dst)]>,
1057 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1058 "movhps\t{$src, $dst|$dst, $src}",
1059 [(store (f64 (vector_extract
1060 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
1061 (undef)), (iPTR 0))), addr:$dst)]>;
1062 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
1063 "movhpd\t{$src, $dst|$dst, $src}",
1064 [(store (f64 (vector_extract
1065 (v2f64 (unpckh VR128:$src, (undef))),
1066 (iPTR 0))), addr:$dst)]>;
1068 let Predicates = [HasAVX] in {
1070 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1071 (VMOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
1072 def : Pat<(X86Movlhps VR128:$src1,
1073 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1074 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1075 def : Pat<(X86Movlhps VR128:$src1,
1076 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1077 (VMOVHPSrm VR128:$src1, addr:$src2)>;
1079 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
1080 // is during lowering, where it's not possible to recognize the load fold cause
1081 // it has two uses through a bitcast. One use disappears at isel time and the
1082 // fold opportunity reappears.
1083 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
1084 (scalar_to_vector (loadf64 addr:$src2)))),
1085 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1087 // FIXME: This should be matched by a X86Movhpd instead. Same as above
1088 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
1089 (scalar_to_vector (loadf64 addr:$src2)))),
1090 (VMOVHPDrm VR128:$src1, addr:$src2)>;
1093 def : Pat<(store (f64 (vector_extract
1094 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))), addr:$dst),
1095 (VMOVHPSmr addr:$dst, VR128:$src)>;
1096 def : Pat<(store (f64 (vector_extract
1097 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))), addr:$dst),
1098 (VMOVHPDmr addr:$dst, VR128:$src)>;
1101 let Predicates = [HasSSE1] in {
1103 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
1104 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
1105 def : Pat<(X86Movlhps VR128:$src1,
1106 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
1107 (MOVHPSrm VR128:$src1, addr:$src2)>;
1108 def : Pat<(X86Movlhps VR128:$src1,
1109 (bc_v4f32 (v2i64 (X86vzload addr:$src2)))),
1110 (MOVHPSrm VR128:$src1, addr:$src2)>;
1113 def : Pat<(store (f64 (vector_extract
1114 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))), addr:$dst),
1115 (MOVHPSmr addr:$dst, VR128:$src)>;
1118 let Predicates = [HasSSE2] in {
1119 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
1120 // is during lowering, where it's not possible to recognize the load fold cause
1121 // it has two uses through a bitcast. One use disappears at isel time and the
1122 // fold opportunity reappears.
1123 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
1124 (scalar_to_vector (loadf64 addr:$src2)))),
1125 (MOVHPDrm VR128:$src1, addr:$src2)>;
1127 // FIXME: This should be matched by a X86Movhpd instead. Same as above
1128 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
1129 (scalar_to_vector (loadf64 addr:$src2)))),
1130 (MOVHPDrm VR128:$src1, addr:$src2)>;
1133 def : Pat<(store (f64 (vector_extract
1134 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
1135 (MOVHPDmr addr:$dst, VR128:$src)>;
1138 //===----------------------------------------------------------------------===//
1139 // SSE 1 & 2 - Move Low to High and High to Low packed FP Instructions
1140 //===----------------------------------------------------------------------===//
1142 let AddedComplexity = 20 in {
1143 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
1144 (ins VR128:$src1, VR128:$src2),
1145 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1147 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
1149 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
1150 (ins VR128:$src1, VR128:$src2),
1151 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1153 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
1156 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
1157 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
1158 (ins VR128:$src1, VR128:$src2),
1159 "movlhps\t{$src2, $dst|$dst, $src2}",
1161 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
1162 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
1163 (ins VR128:$src1, VR128:$src2),
1164 "movhlps\t{$src2, $dst|$dst, $src2}",
1166 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
1169 let Predicates = [HasAVX] in {
1171 let AddedComplexity = 20 in {
1172 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
1173 (VMOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
1174 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
1175 (VMOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
1177 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
1178 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
1179 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1181 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
1182 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1183 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1184 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
1185 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1186 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1189 let AddedComplexity = 20 in {
1190 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
1191 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
1192 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1194 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
1195 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
1196 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
1197 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
1198 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
1201 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
1202 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1203 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1204 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
1207 let Predicates = [HasSSE1] in {
1209 let AddedComplexity = 20 in {
1210 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
1211 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
1212 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
1213 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
1215 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
1216 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
1217 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1219 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
1220 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1221 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
1222 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
1223 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
1224 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
1227 let AddedComplexity = 20 in {
1228 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
1229 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
1230 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1232 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
1233 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
1234 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
1235 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
1236 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
1239 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
1240 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1241 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
1242 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
1245 //===----------------------------------------------------------------------===//
1246 // SSE 1 & 2 - Conversion Instructions
1247 //===----------------------------------------------------------------------===//
1249 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1250 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1252 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1253 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
1254 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1255 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
1258 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1259 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
1260 string asm, Domain d> {
1261 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
1262 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
1263 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
1264 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
1267 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1268 X86MemOperand x86memop, string asm> {
1269 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
1270 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
1271 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1272 (ins DstRC:$src1, x86memop:$src),
1273 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
1276 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1277 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
1278 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1279 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
1281 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1282 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
1283 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1284 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
1287 // The assembler can recognize rr 64-bit instructions by seeing a rxx
1288 // register, but the same isn't true when only using memory operands,
1289 // provide other assembly "l" and "q" forms to address this explicitly
1290 // where appropriate to do so.
1291 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
1293 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
1295 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
1297 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
1299 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
1302 let Predicates = [HasAVX] in {
1303 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
1304 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1305 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
1306 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
1307 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
1308 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
1309 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
1310 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
1312 def : Pat<(f32 (sint_to_fp GR32:$src)),
1313 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
1314 def : Pat<(f32 (sint_to_fp GR64:$src)),
1315 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
1316 def : Pat<(f64 (sint_to_fp GR32:$src)),
1317 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
1318 def : Pat<(f64 (sint_to_fp GR64:$src)),
1319 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
1322 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
1323 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
1324 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
1325 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
1326 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
1327 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
1328 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
1329 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
1330 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
1331 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
1332 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
1333 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
1334 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
1335 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
1336 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
1337 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
1339 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
1340 // and/or XMM operand(s).
1342 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
1343 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
1345 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
1346 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1347 [(set DstRC:$dst, (Int SrcRC:$src))]>;
1348 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
1349 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1350 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
1353 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
1354 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
1355 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
1356 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
1358 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1359 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1360 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
1361 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
1362 (ins DstRC:$src1, x86memop:$src2),
1364 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
1365 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
1366 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
1369 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1370 f128mem, load, "cvtsd2si">, XD, VEX;
1371 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
1372 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
1375 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
1376 // Get rid of this hack or rename the intrinsics, there are several
1377 // intructions that only match with the intrinsic form, why create duplicates
1378 // to let them be recognized by the assembler?
1379 let Pattern = []<dag> in {
1380 defm VCVTSD2SI : sse12_cvt_s<0x2D, FR64, GR32, undef, f64mem, load,
1381 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
1382 defm VCVTSD2SI64 : sse12_cvt_s<0x2D, FR64, GR64, undef, f64mem, load,
1383 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
1385 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
1386 f128mem, load, "cvtsd2si{l}">, XD;
1387 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
1388 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
1391 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1392 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
1393 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1394 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
1396 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1397 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
1398 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1399 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
1402 let Constraints = "$src1 = $dst" in {
1403 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1404 int_x86_sse_cvtsi2ss, i32mem, loadi32,
1406 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1407 int_x86_sse_cvtsi642ss, i64mem, loadi64,
1408 "cvtsi2ss{q}">, XS, REX_W;
1409 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
1410 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
1412 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
1413 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
1414 "cvtsi2sd">, XD, REX_W;
1419 // Aliases for intrinsics
1420 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1421 f32mem, load, "cvttss2si">, XS, VEX;
1422 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1423 int_x86_sse_cvttss2si64, f32mem, load,
1424 "cvttss2si">, XS, VEX, VEX_W;
1425 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1426 f128mem, load, "cvttsd2si">, XD, VEX;
1427 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1428 int_x86_sse2_cvttsd2si64, f128mem, load,
1429 "cvttsd2si">, XD, VEX, VEX_W;
1430 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
1431 f32mem, load, "cvttss2si">, XS;
1432 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1433 int_x86_sse_cvttss2si64, f32mem, load,
1434 "cvttss2si{q}">, XS, REX_W;
1435 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
1436 f128mem, load, "cvttsd2si">, XD;
1437 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
1438 int_x86_sse2_cvttsd2si64, f128mem, load,
1439 "cvttsd2si{q}">, XD, REX_W;
1441 let Pattern = []<dag> in {
1442 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
1443 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
1444 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
1445 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
1447 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
1448 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1449 SSEPackedSingle>, TB, VEX;
1450 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
1451 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1452 SSEPackedSingle>, TB, VEX;
1455 let Pattern = []<dag> in {
1456 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
1457 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
1458 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
1459 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
1460 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
1461 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1462 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
1465 let Predicates = [HasSSE1] in {
1466 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
1467 (CVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
1468 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
1469 (CVTSS2SIrm addr:$src)>;
1470 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
1471 (CVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
1472 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
1473 (CVTSS2SI64rm addr:$src)>;
1476 let Predicates = [HasAVX] in {
1477 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
1478 (VCVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
1479 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
1480 (VCVTSS2SIrm addr:$src)>;
1481 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
1482 (VCVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
1483 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
1484 (VCVTSS2SI64rm addr:$src)>;
1489 // Convert scalar double to scalar single
1490 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
1491 (ins FR64:$src1, FR64:$src2),
1492 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
1494 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
1495 (ins FR64:$src1, f64mem:$src2),
1496 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1497 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
1499 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
1502 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
1503 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1504 [(set FR32:$dst, (fround FR64:$src))]>;
1505 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
1506 "cvtsd2ss\t{$src, $dst|$dst, $src}",
1507 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
1508 Requires<[HasSSE2, OptForSize]>;
1510 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
1511 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
1513 let Constraints = "$src1 = $dst" in
1514 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
1515 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
1517 // Convert scalar single to scalar double
1518 // SSE2 instructions with XS prefix
1519 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
1520 (ins FR32:$src1, FR32:$src2),
1521 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1522 []>, XS, Requires<[HasAVX]>, VEX_4V;
1523 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
1524 (ins FR32:$src1, f32mem:$src2),
1525 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1526 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
1528 let Predicates = [HasAVX] in {
1529 def : Pat<(f64 (fextend FR32:$src)),
1530 (VCVTSS2SDrr FR32:$src, FR32:$src)>;
1531 def : Pat<(fextend (loadf32 addr:$src)),
1532 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1533 def : Pat<(extloadf32 addr:$src),
1534 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
1537 def : Pat<(extloadf32 addr:$src),
1538 (VCVTSS2SDrr (f32 (IMPLICIT_DEF)), (MOVSSrm addr:$src))>,
1539 Requires<[HasAVX, OptForSpeed]>;
1541 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
1542 "cvtss2sd\t{$src, $dst|$dst, $src}",
1543 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1544 Requires<[HasSSE2]>;
1545 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1546 "cvtss2sd\t{$src, $dst|$dst, $src}",
1547 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1548 Requires<[HasSSE2, OptForSize]>;
1550 // extload f32 -> f64. This matches load+fextend because we have a hack in
1551 // the isel (PreprocessForFPConvert) that can introduce loads after dag
1553 // Since these loads aren't folded into the fextend, we have to match it
1555 def : Pat<(fextend (loadf32 addr:$src)),
1556 (CVTSS2SDrm addr:$src)>, Requires<[HasSSE2]>;
1557 def : Pat<(extloadf32 addr:$src),
1558 (CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[HasSSE2, OptForSpeed]>;
1560 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1561 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1562 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1563 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1564 VR128:$src2))]>, XS, VEX_4V,
1566 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1567 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1568 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1569 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1570 (load addr:$src2)))]>, XS, VEX_4V,
1572 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1573 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1574 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1575 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1576 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1577 VR128:$src2))]>, XS,
1578 Requires<[HasSSE2]>;
1579 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1580 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1581 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1582 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1583 (load addr:$src2)))]>, XS,
1584 Requires<[HasSSE2]>;
1587 // Convert doubleword to packed single/double fp
1588 // SSE2 instructions without OpSize prefix
1589 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1590 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1591 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1592 TB, VEX, Requires<[HasAVX]>;
1593 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1594 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1595 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1596 (bitconvert (memopv2i64 addr:$src))))]>,
1597 TB, VEX, Requires<[HasAVX]>;
1598 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1599 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1600 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1601 TB, Requires<[HasSSE2]>;
1602 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1603 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1604 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1605 (bitconvert (memopv2i64 addr:$src))))]>,
1606 TB, Requires<[HasSSE2]>;
1608 // FIXME: why the non-intrinsic version is described as SSE3?
1609 // SSE2 instructions with XS prefix
1610 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1611 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1612 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1613 XS, VEX, Requires<[HasAVX]>;
1614 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1615 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1616 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1617 (bitconvert (memopv2i64 addr:$src))))]>,
1618 XS, VEX, Requires<[HasAVX]>;
1619 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1620 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1621 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1622 XS, Requires<[HasSSE2]>;
1623 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1624 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1625 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1626 (bitconvert (memopv2i64 addr:$src))))]>,
1627 XS, Requires<[HasSSE2]>;
1630 // Convert packed single/double fp to doubleword
1631 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1632 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1633 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1634 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1635 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1636 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1637 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1638 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1639 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1640 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1641 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1642 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1644 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1645 "cvtps2dq\t{$src, $dst|$dst, $src}",
1646 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
1648 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
1650 "cvtps2dq\t{$src, $dst|$dst, $src}",
1651 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1652 (memop addr:$src)))]>, VEX;
1653 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1654 "cvtps2dq\t{$src, $dst|$dst, $src}",
1655 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1656 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1657 "cvtps2dq\t{$src, $dst|$dst, $src}",
1658 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1659 (memop addr:$src)))]>;
1661 // SSE2 packed instructions with XD prefix
1662 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1663 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1664 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1665 XD, VEX, Requires<[HasAVX]>;
1666 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1667 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1668 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1669 (memop addr:$src)))]>,
1670 XD, VEX, Requires<[HasAVX]>;
1671 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1672 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1673 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1674 XD, Requires<[HasSSE2]>;
1675 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1676 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1677 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1678 (memop addr:$src)))]>,
1679 XD, Requires<[HasSSE2]>;
1682 // Convert with truncation packed single/double fp to doubleword
1683 // SSE2 packed instructions with XS prefix
1684 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1685 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1686 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1687 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1688 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1689 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1690 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1691 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1692 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1693 "cvttps2dq\t{$src, $dst|$dst, $src}",
1695 (int_x86_sse2_cvttps2dq VR128:$src))]>;
1696 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1697 "cvttps2dq\t{$src, $dst|$dst, $src}",
1699 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
1701 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1702 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1704 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1705 XS, VEX, Requires<[HasAVX]>;
1706 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1707 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1708 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1709 (memop addr:$src)))]>,
1710 XS, VEX, Requires<[HasAVX]>;
1712 let Predicates = [HasSSE2] in {
1713 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1714 (Int_CVTDQ2PSrr VR128:$src)>;
1715 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1716 (CVTTPS2DQrr VR128:$src)>;
1719 let Predicates = [HasAVX] in {
1720 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1721 (Int_VCVTDQ2PSrr VR128:$src)>;
1722 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1723 (VCVTTPS2DQrr VR128:$src)>;
1724 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
1725 (VCVTDQ2PSYrr VR256:$src)>;
1726 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
1727 (VCVTTPS2DQYrr VR256:$src)>;
1730 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
1732 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1733 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
1735 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
1737 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1738 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1739 (memop addr:$src)))]>, VEX;
1740 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1741 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1742 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1743 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1744 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1745 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1746 (memop addr:$src)))]>;
1748 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1749 // register, but the same isn't true when using memory operands instead.
1750 // Provide other assembly rr and rm forms to address this explicitly.
1751 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1752 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1753 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1754 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1757 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1758 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1759 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1760 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1763 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1764 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
1765 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1766 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1768 // Convert packed single to packed double
1769 let Predicates = [HasAVX] in {
1770 // SSE2 instructions without OpSize prefix
1771 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1772 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1773 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1774 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1775 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1776 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1777 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1778 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1780 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1781 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1782 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1783 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1785 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1786 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1787 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1788 TB, VEX, Requires<[HasAVX]>;
1789 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1790 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1791 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1792 (load addr:$src)))]>,
1793 TB, VEX, Requires<[HasAVX]>;
1794 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1795 "cvtps2pd\t{$src, $dst|$dst, $src}",
1796 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1797 TB, Requires<[HasSSE2]>;
1798 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1799 "cvtps2pd\t{$src, $dst|$dst, $src}",
1800 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1801 (load addr:$src)))]>,
1802 TB, Requires<[HasSSE2]>;
1804 // Convert packed double to packed single
1805 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1806 // register, but the same isn't true when using memory operands instead.
1807 // Provide other assembly rr and rm forms to address this explicitly.
1808 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1809 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1810 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1811 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1814 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1815 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1816 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1817 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1820 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1821 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1822 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1823 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1824 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1825 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1826 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1827 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1830 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1831 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1832 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1833 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1835 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1836 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1837 (memop addr:$src)))]>;
1838 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1839 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1840 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1841 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1842 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1843 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1844 (memop addr:$src)))]>;
1846 // AVX 256-bit register conversion intrinsics
1847 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1848 // whenever possible to avoid declaring two versions of each one.
1849 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1850 (VCVTDQ2PSYrr VR256:$src)>;
1851 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1852 (VCVTDQ2PSYrm addr:$src)>;
1854 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1855 (VCVTPD2PSYrr VR256:$src)>;
1856 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1857 (VCVTPD2PSYrm addr:$src)>;
1859 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1860 (VCVTPS2DQYrr VR256:$src)>;
1861 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1862 (VCVTPS2DQYrm addr:$src)>;
1864 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1865 (VCVTPS2PDYrr VR128:$src)>;
1866 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1867 (VCVTPS2PDYrm addr:$src)>;
1869 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1870 (VCVTTPD2DQYrr VR256:$src)>;
1871 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1872 (VCVTTPD2DQYrm addr:$src)>;
1874 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1875 (VCVTTPS2DQYrr VR256:$src)>;
1876 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1877 (VCVTTPS2DQYrm addr:$src)>;
1879 // Match fround and fextend for 128/256-bit conversions
1880 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
1881 (VCVTPD2PSYrr VR256:$src)>;
1882 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
1883 (VCVTPD2PSYrm addr:$src)>;
1885 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
1886 (VCVTPS2PDYrr VR128:$src)>;
1887 def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
1888 (VCVTPS2PDYrm addr:$src)>;
1890 //===----------------------------------------------------------------------===//
1891 // SSE 1 & 2 - Compare Instructions
1892 //===----------------------------------------------------------------------===//
1894 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1895 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1896 string asm, string asm_alt> {
1897 let isAsmParserOnly = 1 in {
1898 def rr : SIi8<0xC2, MRMSrcReg,
1899 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1902 def rm : SIi8<0xC2, MRMSrcMem,
1903 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1907 // Accept explicit immediate argument form instead of comparison code.
1908 def rr_alt : SIi8<0xC2, MRMSrcReg,
1909 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1912 def rm_alt : SIi8<0xC2, MRMSrcMem,
1913 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1917 let neverHasSideEffects = 1 in {
1918 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1919 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1920 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1922 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1923 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1924 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1928 let Constraints = "$src1 = $dst" in {
1929 def CMPSSrr : SIi8<0xC2, MRMSrcReg,
1930 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, SSECC:$cc),
1931 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1932 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), FR32:$src2, imm:$cc))]>, XS;
1933 def CMPSSrm : SIi8<0xC2, MRMSrcMem,
1934 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, SSECC:$cc),
1935 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1936 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), (loadf32 addr:$src2), imm:$cc))]>, XS;
1937 def CMPSDrr : SIi8<0xC2, MRMSrcReg,
1938 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, SSECC:$cc),
1939 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1940 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), FR64:$src2, imm:$cc))]>, XD;
1941 def CMPSDrm : SIi8<0xC2, MRMSrcMem,
1942 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, SSECC:$cc),
1943 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1944 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), (loadf64 addr:$src2), imm:$cc))]>, XD;
1946 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1947 def CMPSSrr_alt : SIi8<0xC2, MRMSrcReg,
1948 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
1949 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1950 def CMPSSrm_alt : SIi8<0xC2, MRMSrcMem,
1951 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
1952 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1953 def CMPSDrr_alt : SIi8<0xC2, MRMSrcReg,
1954 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
1955 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1956 def CMPSDrm_alt : SIi8<0xC2, MRMSrcMem,
1957 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
1958 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1961 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1962 Intrinsic Int, string asm> {
1963 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1964 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1965 [(set VR128:$dst, (Int VR128:$src1,
1966 VR128:$src, imm:$cc))]>;
1967 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1968 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1969 [(set VR128:$dst, (Int VR128:$src1,
1970 (load addr:$src), imm:$cc))]>;
1973 // Aliases to match intrinsics which expect XMM operand(s).
1974 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1975 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1977 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1978 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1980 let Constraints = "$src1 = $dst" in {
1981 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1982 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1983 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1984 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1988 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1989 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1990 ValueType vt, X86MemOperand x86memop,
1991 PatFrag ld_frag, string OpcodeStr, Domain d> {
1992 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1993 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1994 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1995 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1996 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1997 [(set EFLAGS, (OpNode (vt RC:$src1),
1998 (ld_frag addr:$src2)))], d>;
2001 let Defs = [EFLAGS] in {
2002 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2003 "ucomiss", SSEPackedSingle>, TB, VEX;
2004 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2005 "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
2006 let Pattern = []<dag> in {
2007 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2008 "comiss", SSEPackedSingle>, TB, VEX;
2009 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2010 "comisd", SSEPackedDouble>, TB, OpSize, VEX;
2013 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2014 load, "ucomiss", SSEPackedSingle>, TB, VEX;
2015 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2016 load, "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
2018 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
2019 load, "comiss", SSEPackedSingle>, TB, VEX;
2020 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
2021 load, "comisd", SSEPackedDouble>, TB, OpSize, VEX;
2022 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
2023 "ucomiss", SSEPackedSingle>, TB;
2024 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
2025 "ucomisd", SSEPackedDouble>, TB, OpSize;
2027 let Pattern = []<dag> in {
2028 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
2029 "comiss", SSEPackedSingle>, TB;
2030 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
2031 "comisd", SSEPackedDouble>, TB, OpSize;
2034 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
2035 load, "ucomiss", SSEPackedSingle>, TB;
2036 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
2037 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
2039 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
2040 "comiss", SSEPackedSingle>, TB;
2041 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
2042 "comisd", SSEPackedDouble>, TB, OpSize;
2043 } // Defs = [EFLAGS]
2045 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
2046 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
2047 Intrinsic Int, string asm, string asm_alt,
2049 let isAsmParserOnly = 1 in {
2050 def rri : PIi8<0xC2, MRMSrcReg,
2051 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
2052 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
2053 def rmi : PIi8<0xC2, MRMSrcMem,
2054 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
2055 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
2058 // Accept explicit immediate argument form instead of comparison code.
2059 def rri_alt : PIi8<0xC2, MRMSrcReg,
2060 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
2062 def rmi_alt : PIi8<0xC2, MRMSrcMem,
2063 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
2067 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
2068 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
2069 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
2070 SSEPackedSingle>, TB, VEX_4V;
2071 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
2072 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
2073 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
2074 SSEPackedDouble>, TB, OpSize, VEX_4V;
2075 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
2076 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
2077 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
2078 SSEPackedSingle>, TB, VEX_4V;
2079 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
2080 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
2081 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
2082 SSEPackedDouble>, TB, OpSize, VEX_4V;
2083 let Constraints = "$src1 = $dst" in {
2084 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
2085 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
2086 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
2087 SSEPackedSingle>, TB;
2088 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
2089 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
2090 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
2091 SSEPackedDouble>, TB, OpSize;
2094 let Predicates = [HasSSE1] in {
2095 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2096 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2097 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2098 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2101 let Predicates = [HasSSE2] in {
2102 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2103 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2104 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2105 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2108 let Predicates = [HasAVX] in {
2109 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
2110 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
2111 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
2112 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
2113 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
2114 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
2115 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
2116 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
2118 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
2119 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
2120 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
2121 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
2122 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
2123 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
2124 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
2125 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
2128 //===----------------------------------------------------------------------===//
2129 // SSE 1 & 2 - Shuffle Instructions
2130 //===----------------------------------------------------------------------===//
2132 /// sse12_shuffle - sse 1 & 2 shuffle instructions
2133 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
2134 ValueType vt, string asm, PatFrag mem_frag,
2135 Domain d, bit IsConvertibleToThreeAddress = 0> {
2136 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
2137 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
2138 [(set RC:$dst, (vt (shufp:$src3
2139 RC:$src1, (mem_frag addr:$src2))))], d>;
2140 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
2141 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
2142 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
2144 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
2147 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2148 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2149 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
2150 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
2151 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
2152 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
2153 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2154 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
2155 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
2156 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
2157 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
2158 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
2160 let Constraints = "$src1 = $dst" in {
2161 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
2162 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2163 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
2165 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
2166 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
2167 memopv2f64, SSEPackedDouble>, TB, OpSize;
2170 let Predicates = [HasSSE1] in {
2171 def : Pat<(v4f32 (X86Shufps VR128:$src1,
2172 (memopv4f32 addr:$src2), (i8 imm:$imm))),
2173 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2174 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2175 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2176 def : Pat<(v4i32 (X86Shufps VR128:$src1,
2177 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2178 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2179 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2180 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2181 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
2182 // fall back to this for SSE1)
2183 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
2184 (SHUFPSrri VR128:$src2, VR128:$src1,
2185 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2186 // Special unary SHUFPSrri case.
2187 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
2188 (SHUFPSrri VR128:$src1, VR128:$src1,
2189 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2192 let Predicates = [HasSSE2] in {
2193 // Special binary v4i32 shuffle cases with SHUFPS.
2194 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
2195 (SHUFPSrri VR128:$src1, VR128:$src2,
2196 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2197 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
2198 (bc_v4i32 (memopv2i64 addr:$src2)))),
2199 (SHUFPSrmi VR128:$src1, addr:$src2,
2200 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2201 // Special unary SHUFPDrri cases.
2202 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
2203 (SHUFPDrri VR128:$src1, VR128:$src1,
2204 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2205 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
2206 (SHUFPDrri VR128:$src1, VR128:$src1,
2207 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2208 // Special binary v2i64 shuffle cases using SHUFPDrri.
2209 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
2210 (SHUFPDrri VR128:$src1, VR128:$src2,
2211 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2212 // Generic SHUFPD patterns
2213 def : Pat<(v2f64 (X86Shufps VR128:$src1,
2214 (memopv2f64 addr:$src2), (i8 imm:$imm))),
2215 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2216 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2217 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2218 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2219 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2222 let Predicates = [HasAVX] in {
2223 def : Pat<(v4f32 (X86Shufps VR128:$src1,
2224 (memopv4f32 addr:$src2), (i8 imm:$imm))),
2225 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2226 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2227 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2228 def : Pat<(v4i32 (X86Shufps VR128:$src1,
2229 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
2230 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
2231 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2232 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
2233 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
2234 // fall back to this for SSE1)
2235 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
2236 (VSHUFPSrri VR128:$src2, VR128:$src1,
2237 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2238 // Special unary SHUFPSrri case.
2239 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
2240 (VSHUFPSrri VR128:$src1, VR128:$src1,
2241 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2242 // Special binary v4i32 shuffle cases with SHUFPS.
2243 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
2244 (VSHUFPSrri VR128:$src1, VR128:$src2,
2245 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2246 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
2247 (bc_v4i32 (memopv2i64 addr:$src2)))),
2248 (VSHUFPSrmi VR128:$src1, addr:$src2,
2249 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2250 // Special unary SHUFPDrri cases.
2251 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
2252 (VSHUFPDrri VR128:$src1, VR128:$src1,
2253 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2254 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
2255 (VSHUFPDrri VR128:$src1, VR128:$src1,
2256 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2257 // Special binary v2i64 shuffle cases using SHUFPDrri.
2258 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
2259 (VSHUFPDrri VR128:$src1, VR128:$src2,
2260 (SHUFFLE_get_shuf_imm VR128:$src3))>;
2262 def : Pat<(v2f64 (X86Shufps VR128:$src1,
2263 (memopv2f64 addr:$src2), (i8 imm:$imm))),
2264 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
2265 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2266 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2267 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
2268 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
2271 def : Pat<(v8i32 (X86Shufps VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2272 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2273 def : Pat<(v8i32 (X86Shufps VR256:$src1,
2274 (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
2275 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2277 def : Pat<(v8f32 (X86Shufps VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2278 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2279 def : Pat<(v8f32 (X86Shufps VR256:$src1,
2280 (memopv8f32 addr:$src2), (i8 imm:$imm))),
2281 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2283 def : Pat<(v4i64 (X86Shufpd VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2284 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2285 def : Pat<(v4i64 (X86Shufpd VR256:$src1,
2286 (memopv4i64 addr:$src2), (i8 imm:$imm))),
2287 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2289 def : Pat<(v4f64 (X86Shufpd VR256:$src1, VR256:$src2, (i8 imm:$imm))),
2290 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
2291 def : Pat<(v4f64 (X86Shufpd VR256:$src1,
2292 (memopv4f64 addr:$src2), (i8 imm:$imm))),
2293 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
2296 //===----------------------------------------------------------------------===//
2297 // SSE 1 & 2 - Unpack Instructions
2298 //===----------------------------------------------------------------------===//
2300 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
2301 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
2302 PatFrag mem_frag, RegisterClass RC,
2303 X86MemOperand x86memop, string asm,
2305 def rr : PI<opc, MRMSrcReg,
2306 (outs RC:$dst), (ins RC:$src1, RC:$src2),
2308 (vt (OpNode RC:$src1, RC:$src2)))], d>;
2309 def rm : PI<opc, MRMSrcMem,
2310 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
2312 (vt (OpNode RC:$src1,
2313 (mem_frag addr:$src2))))], d>;
2316 let AddedComplexity = 10 in {
2317 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
2318 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2319 SSEPackedSingle>, TB, VEX_4V;
2320 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
2321 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2322 SSEPackedDouble>, TB, OpSize, VEX_4V;
2323 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
2324 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2325 SSEPackedSingle>, TB, VEX_4V;
2326 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
2327 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2328 SSEPackedDouble>, TB, OpSize, VEX_4V;
2330 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
2331 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2332 SSEPackedSingle>, TB, VEX_4V;
2333 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
2334 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2335 SSEPackedDouble>, TB, OpSize, VEX_4V;
2336 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
2337 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2338 SSEPackedSingle>, TB, VEX_4V;
2339 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
2340 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2341 SSEPackedDouble>, TB, OpSize, VEX_4V;
2343 let Constraints = "$src1 = $dst" in {
2344 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
2345 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
2346 SSEPackedSingle>, TB;
2347 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
2348 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
2349 SSEPackedDouble>, TB, OpSize;
2350 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
2351 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
2352 SSEPackedSingle>, TB;
2353 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
2354 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
2355 SSEPackedDouble>, TB, OpSize;
2356 } // Constraints = "$src1 = $dst"
2357 } // AddedComplexity
2359 let Predicates = [HasSSE1] in {
2360 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
2361 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
2362 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
2363 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
2364 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
2365 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
2366 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
2367 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
2370 let Predicates = [HasSSE2] in {
2371 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
2372 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
2373 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
2374 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
2375 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
2376 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
2377 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
2378 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
2380 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the
2381 // problem is during lowering, where it's not possible to recognize the load
2382 // fold cause it has two uses through a bitcast. One use disappears at isel
2383 // time and the fold opportunity reappears.
2384 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2385 (UNPCKLPDrr VR128:$src, VR128:$src)>;
2387 let AddedComplexity = 10 in
2388 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
2389 (UNPCKLPDrr VR128:$src, VR128:$src)>;
2392 let Predicates = [HasAVX] in {
2393 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
2394 (VUNPCKLPSrm VR128:$src1, addr:$src2)>;
2395 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
2396 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>;
2397 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
2398 (VUNPCKHPSrm VR128:$src1, addr:$src2)>;
2399 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
2400 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>;
2402 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
2403 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2404 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
2405 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2406 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
2407 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
2408 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, (memopv8i32 addr:$src2))),
2409 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
2410 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, (memopv8f32 addr:$src2))),
2411 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2412 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
2413 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2414 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, (memopv8i32 addr:$src2))),
2415 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
2416 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
2417 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
2419 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
2420 (VUNPCKLPDrm VR128:$src1, addr:$src2)>;
2421 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
2422 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>;
2423 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
2424 (VUNPCKHPDrm VR128:$src1, addr:$src2)>;
2425 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
2426 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>;
2428 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
2429 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2430 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
2431 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2432 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, (memopv4i64 addr:$src2))),
2433 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
2434 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
2435 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
2436 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, (memopv4f64 addr:$src2))),
2437 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2438 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
2439 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2440 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, (memopv4i64 addr:$src2))),
2441 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
2442 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
2443 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
2445 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the
2446 // problem is during lowering, where it's not possible to recognize the load
2447 // fold cause it has two uses through a bitcast. One use disappears at isel
2448 // time and the fold opportunity reappears.
2449 def : Pat<(v2f64 (X86Movddup VR128:$src)),
2450 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
2451 let AddedComplexity = 10 in
2452 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
2453 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
2456 //===----------------------------------------------------------------------===//
2457 // SSE 1 & 2 - Extract Floating-Point Sign mask
2458 //===----------------------------------------------------------------------===//
2460 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
2461 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
2463 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
2464 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
2465 [(set GR32:$dst, (Int RC:$src))], d>;
2466 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
2467 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
2470 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
2471 SSEPackedSingle>, TB;
2472 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
2473 SSEPackedDouble>, TB, OpSize;
2475 def : Pat<(i32 (X86fgetsign FR32:$src)),
2476 (MOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
2477 sub_ss))>, Requires<[HasSSE1]>;
2478 def : Pat<(i64 (X86fgetsign FR32:$src)),
2479 (MOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
2480 sub_ss))>, Requires<[HasSSE1]>;
2481 def : Pat<(i32 (X86fgetsign FR64:$src)),
2482 (MOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
2483 sub_sd))>, Requires<[HasSSE2]>;
2484 def : Pat<(i64 (X86fgetsign FR64:$src)),
2485 (MOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
2486 sub_sd))>, Requires<[HasSSE2]>;
2488 let Predicates = [HasAVX] in {
2489 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
2490 "movmskps", SSEPackedSingle>, TB, VEX;
2491 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
2492 "movmskpd", SSEPackedDouble>, TB,
2494 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
2495 "movmskps", SSEPackedSingle>, TB, VEX;
2496 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
2497 "movmskpd", SSEPackedDouble>, TB,
2500 def : Pat<(i32 (X86fgetsign FR32:$src)),
2501 (VMOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
2503 def : Pat<(i64 (X86fgetsign FR32:$src)),
2504 (VMOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
2506 def : Pat<(i32 (X86fgetsign FR64:$src)),
2507 (VMOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
2509 def : Pat<(i64 (X86fgetsign FR64:$src)),
2510 (VMOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
2514 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2515 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, TB, VEX;
2516 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
2517 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, TB,
2519 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
2520 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, TB, VEX;
2521 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
2522 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, TB,
2526 //===----------------------------------------------------------------------===//
2527 // SSE 1 & 2 - Logical Instructions
2528 //===----------------------------------------------------------------------===//
2530 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
2532 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
2534 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2535 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, TB, VEX_4V;
2537 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2538 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, TB, OpSize, VEX_4V;
2540 let Constraints = "$src1 = $dst" in {
2541 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2542 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
2544 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2545 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
2549 // Alias bitwise logical operations using SSE logical ops on packed FP values.
2550 let mayLoad = 0 in {
2551 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
2552 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
2553 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
2556 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
2557 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
2559 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2561 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2563 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2564 // are all promoted to v2i64, and the patterns are covered by the int
2565 // version. This is needed in SSE only, because v2i64 isn't supported on
2566 // SSE1, but only on SSE2.
2567 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2568 !strconcat(OpcodeStr, "ps"), f128mem, [],
2569 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2570 (memopv2i64 addr:$src2)))], 0>, TB, VEX_4V;
2572 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2573 !strconcat(OpcodeStr, "pd"), f128mem,
2574 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2575 (bc_v2i64 (v2f64 VR128:$src2))))],
2576 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2577 (memopv2i64 addr:$src2)))], 0>,
2579 let Constraints = "$src1 = $dst" in {
2580 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2581 !strconcat(OpcodeStr, "ps"), f128mem,
2582 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
2583 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2584 (memopv2i64 addr:$src2)))]>, TB;
2586 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2587 !strconcat(OpcodeStr, "pd"), f128mem,
2588 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2589 (bc_v2i64 (v2f64 VR128:$src2))))],
2590 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2591 (memopv2i64 addr:$src2)))]>, TB, OpSize;
2595 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
2597 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
2599 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2600 !strconcat(OpcodeStr, "ps"), f256mem,
2601 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2602 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2603 (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V;
2605 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2606 !strconcat(OpcodeStr, "pd"), f256mem,
2607 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2608 (bc_v4i64 (v4f64 VR256:$src2))))],
2609 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2610 (memopv4i64 addr:$src2)))], 0>,
2614 // AVX 256-bit packed logical ops forms
2615 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
2616 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
2617 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
2618 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
2620 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
2621 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
2622 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
2623 let isCommutable = 0 in
2624 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
2626 //===----------------------------------------------------------------------===//
2627 // SSE 1 & 2 - Arithmetic Instructions
2628 //===----------------------------------------------------------------------===//
2630 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
2633 /// In addition, we also have a special variant of the scalar form here to
2634 /// represent the associated intrinsic operation. This form is unlike the
2635 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
2636 /// and leaves the top elements unmodified (therefore these cannot be commuted).
2638 /// These three forms can each be reg+reg or reg+mem.
2641 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
2643 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2645 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
2646 OpNode, FR32, f32mem, Is2Addr>, XS;
2647 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
2648 OpNode, FR64, f64mem, Is2Addr>, XD;
2651 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
2653 let mayLoad = 0 in {
2654 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2655 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
2656 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2657 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
2661 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
2663 let mayLoad = 0 in {
2664 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
2665 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
2666 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
2667 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
2671 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
2673 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2674 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
2675 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2676 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
2679 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
2681 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2682 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
2683 SSEPackedSingle, Is2Addr>, TB;
2685 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2686 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
2687 SSEPackedDouble, Is2Addr>, TB, OpSize;
2690 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
2691 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2692 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
2693 SSEPackedSingle, 0>, TB;
2695 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2696 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
2697 SSEPackedDouble, 0>, TB, OpSize;
2700 // Binary Arithmetic instructions
2701 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
2702 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
2703 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
2704 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
2705 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
2706 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
2707 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
2708 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
2710 let isCommutable = 0 in {
2711 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
2712 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
2713 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
2714 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
2715 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
2716 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
2717 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
2718 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
2719 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
2720 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
2721 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
2722 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
2723 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
2724 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
2725 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
2726 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
2727 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
2728 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
2729 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
2730 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
2733 let Constraints = "$src1 = $dst" in {
2734 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
2735 basic_sse12_fp_binop_p<0x58, "add", fadd>,
2736 basic_sse12_fp_binop_s_int<0x58, "add">;
2737 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
2738 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
2739 basic_sse12_fp_binop_s_int<0x59, "mul">;
2741 let isCommutable = 0 in {
2742 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
2743 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
2744 basic_sse12_fp_binop_s_int<0x5C, "sub">;
2745 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
2746 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
2747 basic_sse12_fp_binop_s_int<0x5E, "div">;
2748 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
2749 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
2750 basic_sse12_fp_binop_s_int<0x5F, "max">,
2751 basic_sse12_fp_binop_p_int<0x5F, "max">;
2752 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
2753 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
2754 basic_sse12_fp_binop_s_int<0x5D, "min">,
2755 basic_sse12_fp_binop_p_int<0x5D, "min">;
2760 /// In addition, we also have a special variant of the scalar form here to
2761 /// represent the associated intrinsic operation. This form is unlike the
2762 /// plain scalar form, in that it takes an entire vector (instead of a
2763 /// scalar) and leaves the top elements undefined.
2765 /// And, we have a special variant form for a full-vector intrinsic form.
2767 /// sse1_fp_unop_s - SSE1 unops in scalar form.
2768 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
2769 SDNode OpNode, Intrinsic F32Int> {
2770 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
2771 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2772 [(set FR32:$dst, (OpNode FR32:$src))]>;
2773 // For scalar unary operations, fold a load into the operation
2774 // only in OptForSize mode. It eliminates an instruction, but it also
2775 // eliminates a whole-register clobber (the load), so it introduces a
2776 // partial register update condition.
2777 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
2778 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2779 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
2780 Requires<[HasSSE1, OptForSize]>;
2781 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2782 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2783 [(set VR128:$dst, (F32Int VR128:$src))]>;
2784 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
2785 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2786 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
2789 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
2790 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2791 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
2792 !strconcat(OpcodeStr,
2793 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2794 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1,f32mem:$src2),
2795 !strconcat(OpcodeStr,
2796 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2797 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
2798 (ins ssmem:$src1, VR128:$src2),
2799 !strconcat(OpcodeStr,
2800 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2803 /// sse1_fp_unop_p - SSE1 unops in packed form.
2804 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2805 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2806 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2807 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
2808 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2809 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2810 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
2813 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
2814 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2815 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2816 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2817 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
2818 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2819 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2820 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
2823 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
2824 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2825 Intrinsic V4F32Int> {
2826 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2827 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2828 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
2829 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2830 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2831 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
2834 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
2835 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2836 Intrinsic V4F32Int> {
2837 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2838 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2839 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
2840 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2841 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2842 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
2845 /// sse2_fp_unop_s - SSE2 unops in scalar form.
2846 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
2847 SDNode OpNode, Intrinsic F64Int> {
2848 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
2849 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2850 [(set FR64:$dst, (OpNode FR64:$src))]>;
2851 // See the comments in sse1_fp_unop_s for why this is OptForSize.
2852 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
2853 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2854 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
2855 Requires<[HasSSE2, OptForSize]>;
2856 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2857 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2858 [(set VR128:$dst, (F64Int VR128:$src))]>;
2859 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
2860 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2861 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
2864 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
2865 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2866 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
2867 !strconcat(OpcodeStr,
2868 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2869 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
2870 !strconcat(OpcodeStr,
2871 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2872 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
2873 (ins VR128:$src1, sdmem:$src2),
2874 !strconcat(OpcodeStr,
2875 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2878 /// sse2_fp_unop_p - SSE2 unops in vector forms.
2879 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
2881 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2882 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2883 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
2884 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2885 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2886 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
2889 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
2890 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2891 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2892 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2893 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
2894 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2895 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2896 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
2899 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
2900 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2901 Intrinsic V2F64Int> {
2902 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2903 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2904 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
2905 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2906 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2907 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
2910 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
2911 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2912 Intrinsic V2F64Int> {
2913 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2914 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2915 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
2916 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2917 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2918 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
2921 let Predicates = [HasAVX] in {
2923 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt">,
2924 sse2_fp_unop_s_avx<0x51, "vsqrt">, VEX_4V;
2926 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
2927 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
2928 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2929 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2930 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
2931 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
2932 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
2933 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
2936 // Reciprocal approximations. Note that these typically require refinement
2937 // in order to obtain suitable precision.
2938 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt">, VEX_4V;
2939 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
2940 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
2941 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
2942 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
2944 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp">, VEX_4V;
2945 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
2946 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
2947 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
2948 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
2951 def : Pat<(f32 (fsqrt FR32:$src)),
2952 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2953 def : Pat<(f32 (fsqrt (load addr:$src))),
2954 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2955 Requires<[HasAVX, OptForSize]>;
2956 def : Pat<(f64 (fsqrt FR64:$src)),
2957 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
2958 def : Pat<(f64 (fsqrt (load addr:$src))),
2959 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
2960 Requires<[HasAVX, OptForSize]>;
2962 def : Pat<(f32 (X86frsqrt FR32:$src)),
2963 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2964 def : Pat<(f32 (X86frsqrt (load addr:$src))),
2965 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2966 Requires<[HasAVX, OptForSize]>;
2968 def : Pat<(f32 (X86frcp FR32:$src)),
2969 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2970 def : Pat<(f32 (X86frcp (load addr:$src))),
2971 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2972 Requires<[HasAVX, OptForSize]>;
2974 let Predicates = [HasAVX] in {
2975 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
2976 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2977 (VSQRTSSr (f32 (IMPLICIT_DEF)),
2978 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2980 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
2981 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2983 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
2984 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
2985 (VSQRTSDr (f64 (IMPLICIT_DEF)),
2986 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd)),
2988 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
2989 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
2991 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
2992 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2993 (VRSQRTSSr (f32 (IMPLICIT_DEF)),
2994 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2996 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
2997 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2999 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
3000 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
3001 (VRCPSSr (f32 (IMPLICIT_DEF)),
3002 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
3004 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
3005 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
3009 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
3010 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
3011 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
3012 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
3013 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
3014 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
3016 // Reciprocal approximations. Note that these typically require refinement
3017 // in order to obtain suitable precision.
3018 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
3019 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
3020 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
3021 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
3022 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
3023 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
3025 // There is no f64 version of the reciprocal approximation instructions.
3027 //===----------------------------------------------------------------------===//
3028 // SSE 1 & 2 - Non-temporal stores
3029 //===----------------------------------------------------------------------===//
3031 let AddedComplexity = 400 in { // Prefer non-temporal versions
3032 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
3033 (ins f128mem:$dst, VR128:$src),
3034 "movntps\t{$src, $dst|$dst, $src}",
3035 [(alignednontemporalstore (v4f32 VR128:$src),
3037 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
3038 (ins f128mem:$dst, VR128:$src),
3039 "movntpd\t{$src, $dst|$dst, $src}",
3040 [(alignednontemporalstore (v2f64 VR128:$src),
3042 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
3043 (ins f128mem:$dst, VR128:$src),
3044 "movntdq\t{$src, $dst|$dst, $src}",
3045 [(alignednontemporalstore (v2f64 VR128:$src),
3048 let ExeDomain = SSEPackedInt in
3049 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
3050 (ins f128mem:$dst, VR128:$src),
3051 "movntdq\t{$src, $dst|$dst, $src}",
3052 [(alignednontemporalstore (v4f32 VR128:$src),
3055 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
3056 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
3058 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
3059 (ins f256mem:$dst, VR256:$src),
3060 "movntps\t{$src, $dst|$dst, $src}",
3061 [(alignednontemporalstore (v8f32 VR256:$src),
3063 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
3064 (ins f256mem:$dst, VR256:$src),
3065 "movntpd\t{$src, $dst|$dst, $src}",
3066 [(alignednontemporalstore (v4f64 VR256:$src),
3068 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
3069 (ins f256mem:$dst, VR256:$src),
3070 "movntdq\t{$src, $dst|$dst, $src}",
3071 [(alignednontemporalstore (v4f64 VR256:$src),
3073 let ExeDomain = SSEPackedInt in
3074 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
3075 (ins f256mem:$dst, VR256:$src),
3076 "movntdq\t{$src, $dst|$dst, $src}",
3077 [(alignednontemporalstore (v8f32 VR256:$src),
3081 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
3082 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
3083 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
3084 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
3085 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
3086 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
3088 let AddedComplexity = 400 in { // Prefer non-temporal versions
3089 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3090 "movntps\t{$src, $dst|$dst, $src}",
3091 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
3092 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3093 "movntpd\t{$src, $dst|$dst, $src}",
3094 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
3096 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3097 "movntdq\t{$src, $dst|$dst, $src}",
3098 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
3100 let ExeDomain = SSEPackedInt in
3101 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
3102 "movntdq\t{$src, $dst|$dst, $src}",
3103 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
3105 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
3106 (MOVNTDQmr addr:$dst, VR128:$src)>;
3108 // There is no AVX form for instructions below this point
3109 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
3110 "movnti{l}\t{$src, $dst|$dst, $src}",
3111 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
3112 TB, Requires<[HasSSE2]>;
3113 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
3114 "movnti{q}\t{$src, $dst|$dst, $src}",
3115 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
3116 TB, Requires<[HasSSE2]>;
3119 //===----------------------------------------------------------------------===//
3120 // SSE 1 & 2 - Prefetch and memory fence
3121 //===----------------------------------------------------------------------===//
3123 // Prefetch intrinsic.
3124 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
3125 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>;
3126 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
3127 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>;
3128 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
3129 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>;
3130 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
3131 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>;
3134 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3135 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3136 TB, Requires<[HasSSE2]>;
3138 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3139 // was introduced with SSE2, it's backward compatible.
3140 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3142 // Load, store, and memory fence
3143 def SFENCE : I<0xAE, MRM_F8, (outs), (ins),
3144 "sfence", [(int_x86_sse_sfence)]>, TB, Requires<[HasSSE1]>;
3145 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3146 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3147 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3148 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3150 def : Pat<(X86SFence), (SFENCE)>;
3151 def : Pat<(X86LFence), (LFENCE)>;
3152 def : Pat<(X86MFence), (MFENCE)>;
3154 //===----------------------------------------------------------------------===//
3155 // SSE 1 & 2 - Load/Store XCSR register
3156 //===----------------------------------------------------------------------===//
3158 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3159 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
3160 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3161 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
3163 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
3164 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
3165 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
3166 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
3168 //===---------------------------------------------------------------------===//
3169 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
3170 //===---------------------------------------------------------------------===//
3172 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3174 let neverHasSideEffects = 1 in {
3175 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3176 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3177 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3178 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3180 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3181 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
3182 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3183 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
3185 let canFoldAsLoad = 1, mayLoad = 1 in {
3186 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3187 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3188 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3189 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3190 let Predicates = [HasAVX] in {
3191 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3192 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
3193 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
3194 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
3198 let mayStore = 1 in {
3199 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
3200 (ins i128mem:$dst, VR128:$src),
3201 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3202 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
3203 (ins i256mem:$dst, VR256:$src),
3204 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
3205 let Predicates = [HasAVX] in {
3206 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3207 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
3208 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
3209 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
3213 let neverHasSideEffects = 1 in
3214 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3215 "movdqa\t{$src, $dst|$dst, $src}", []>;
3217 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3218 "movdqu\t{$src, $dst|$dst, $src}",
3219 []>, XS, Requires<[HasSSE2]>;
3221 let canFoldAsLoad = 1, mayLoad = 1 in {
3222 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3223 "movdqa\t{$src, $dst|$dst, $src}",
3224 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
3225 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3226 "movdqu\t{$src, $dst|$dst, $src}",
3227 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
3228 XS, Requires<[HasSSE2]>;
3231 let mayStore = 1 in {
3232 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3233 "movdqa\t{$src, $dst|$dst, $src}",
3234 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
3235 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3236 "movdqu\t{$src, $dst|$dst, $src}",
3237 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
3238 XS, Requires<[HasSSE2]>;
3241 // Intrinsic forms of MOVDQU load and store
3242 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3243 "vmovdqu\t{$src, $dst|$dst, $src}",
3244 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
3245 XS, VEX, Requires<[HasAVX]>;
3247 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
3248 "movdqu\t{$src, $dst|$dst, $src}",
3249 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
3250 XS, Requires<[HasSSE2]>;
3252 } // ExeDomain = SSEPackedInt
3254 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
3255 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
3256 (VMOVDQUYmr addr:$dst, VR256:$src)>;
3258 //===---------------------------------------------------------------------===//
3259 // SSE2 - Packed Integer Arithmetic Instructions
3260 //===---------------------------------------------------------------------===//
3262 let ExeDomain = SSEPackedInt in { // SSE integer instructions
3264 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
3265 bit IsCommutable = 0, bit Is2Addr = 1> {
3266 let isCommutable = IsCommutable in
3267 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
3268 (ins VR128:$src1, VR128:$src2),
3270 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3271 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3272 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
3273 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
3274 (ins VR128:$src1, i128mem:$src2),
3276 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3277 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3278 [(set VR128:$dst, (IntId VR128:$src1,
3279 (bitconvert (memopv2i64 addr:$src2))))]>;
3282 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
3283 string OpcodeStr, Intrinsic IntId,
3284 Intrinsic IntId2, bit Is2Addr = 1> {
3285 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
3286 (ins VR128:$src1, VR128:$src2),
3288 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3289 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3290 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
3291 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
3292 (ins VR128:$src1, i128mem:$src2),
3294 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3295 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3296 [(set VR128:$dst, (IntId VR128:$src1,
3297 (bitconvert (memopv2i64 addr:$src2))))]>;
3298 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
3299 (ins VR128:$src1, i32i8imm:$src2),
3301 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3302 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3303 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
3306 /// PDI_binop_rm - Simple SSE2 binary operator.
3307 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
3308 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
3309 let isCommutable = IsCommutable in
3310 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
3311 (ins VR128:$src1, VR128:$src2),
3313 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3314 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3315 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
3316 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
3317 (ins VR128:$src1, i128mem:$src2),
3319 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3320 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3321 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
3322 (bitconvert (memopv2i64 addr:$src2)))))]>;
3325 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
3327 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
3328 /// to collapse (bitconvert VT to VT) into its operand.
3330 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
3331 bit IsCommutable = 0, bit Is2Addr = 1> {
3332 let isCommutable = IsCommutable in
3333 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
3334 (ins VR128:$src1, VR128:$src2),
3336 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3337 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3338 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
3339 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
3340 (ins VR128:$src1, i128mem:$src2),
3342 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
3343 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3344 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
3347 } // ExeDomain = SSEPackedInt
3349 // 128-bit Integer Arithmetic
3351 let Predicates = [HasAVX] in {
3352 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
3353 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
3354 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
3355 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
3356 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
3357 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
3358 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
3359 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
3360 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
3363 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
3365 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
3367 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
3369 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
3371 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
3373 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
3375 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
3377 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
3379 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
3381 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
3383 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
3385 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
3387 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
3389 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
3391 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
3393 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
3395 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
3397 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
3399 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
3403 let Constraints = "$src1 = $dst" in {
3404 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
3405 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
3406 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
3407 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
3408 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
3409 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
3410 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
3411 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
3412 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
3415 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
3416 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
3417 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
3418 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
3419 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
3420 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
3421 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
3422 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
3423 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
3424 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
3425 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
3426 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
3427 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
3428 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
3429 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
3430 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
3431 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
3432 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
3433 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
3435 } // Constraints = "$src1 = $dst"
3437 //===---------------------------------------------------------------------===//
3438 // SSE2 - Packed Integer Logical Instructions
3439 //===---------------------------------------------------------------------===//
3441 let Predicates = [HasAVX] in {
3442 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
3443 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
3445 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
3446 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
3448 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
3449 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
3452 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
3453 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
3455 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
3456 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
3458 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
3459 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
3462 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
3463 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
3465 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
3466 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
3469 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
3470 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
3471 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
3473 let ExeDomain = SSEPackedInt in {
3474 let neverHasSideEffects = 1 in {
3475 // 128-bit logical shifts.
3476 def VPSLLDQri : PDIi8<0x73, MRM7r,
3477 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3478 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
3480 def VPSRLDQri : PDIi8<0x73, MRM3r,
3481 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3482 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
3484 // PSRADQri doesn't exist in SSE[1-3].
3486 def VPANDNrr : PDI<0xDF, MRMSrcReg,
3487 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3488 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3490 (v2i64 (X86andnp VR128:$src1, VR128:$src2)))]>,VEX_4V;
3492 def VPANDNrm : PDI<0xDF, MRMSrcMem,
3493 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3494 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3495 [(set VR128:$dst, (X86andnp VR128:$src1,
3496 (memopv2i64 addr:$src2)))]>, VEX_4V;
3500 let Constraints = "$src1 = $dst" in {
3501 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
3502 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
3503 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
3504 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
3505 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
3506 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
3508 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
3509 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
3510 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
3511 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
3512 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
3513 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
3515 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
3516 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
3517 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
3518 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
3520 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
3521 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
3522 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
3524 let ExeDomain = SSEPackedInt in {
3525 let neverHasSideEffects = 1 in {
3526 // 128-bit logical shifts.
3527 def PSLLDQri : PDIi8<0x73, MRM7r,
3528 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3529 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
3530 def PSRLDQri : PDIi8<0x73, MRM3r,
3531 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3532 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
3533 // PSRADQri doesn't exist in SSE[1-3].
3535 def PANDNrr : PDI<0xDF, MRMSrcReg,
3536 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3537 "pandn\t{$src2, $dst|$dst, $src2}", []>;
3539 def PANDNrm : PDI<0xDF, MRMSrcMem,
3540 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3541 "pandn\t{$src2, $dst|$dst, $src2}", []>;
3543 } // Constraints = "$src1 = $dst"
3545 let Predicates = [HasAVX] in {
3546 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
3547 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3548 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
3549 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3550 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
3551 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
3552 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
3553 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
3554 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
3555 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3557 // Shift up / down and insert zero's.
3558 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
3559 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3560 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
3561 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3564 let Predicates = [HasSSE2] in {
3565 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
3566 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3567 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
3568 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3569 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
3570 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
3571 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
3572 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
3573 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
3574 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3576 // Shift up / down and insert zero's.
3577 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
3578 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3579 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
3580 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3583 //===---------------------------------------------------------------------===//
3584 // SSE2 - Packed Integer Comparison Instructions
3585 //===---------------------------------------------------------------------===//
3587 let Predicates = [HasAVX] in {
3588 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
3590 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
3592 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
3594 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
3596 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
3598 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
3601 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
3602 (VPCMPEQBrr VR128:$src1, VR128:$src2)>;
3603 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
3604 (VPCMPEQBrm VR128:$src1, addr:$src2)>;
3605 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
3606 (VPCMPEQWrr VR128:$src1, VR128:$src2)>;
3607 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
3608 (VPCMPEQWrm VR128:$src1, addr:$src2)>;
3609 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
3610 (VPCMPEQDrr VR128:$src1, VR128:$src2)>;
3611 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
3612 (VPCMPEQDrm VR128:$src1, addr:$src2)>;
3614 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
3615 (VPCMPGTBrr VR128:$src1, VR128:$src2)>;
3616 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
3617 (VPCMPGTBrm VR128:$src1, addr:$src2)>;
3618 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
3619 (VPCMPGTWrr VR128:$src1, VR128:$src2)>;
3620 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
3621 (VPCMPGTWrm VR128:$src1, addr:$src2)>;
3622 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
3623 (VPCMPGTDrr VR128:$src1, VR128:$src2)>;
3624 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
3625 (VPCMPGTDrm VR128:$src1, addr:$src2)>;
3628 let Constraints = "$src1 = $dst" in {
3629 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
3630 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
3631 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
3632 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
3633 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
3634 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
3635 } // Constraints = "$src1 = $dst"
3637 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
3638 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
3639 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
3640 (PCMPEQBrm VR128:$src1, addr:$src2)>;
3641 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
3642 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
3643 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
3644 (PCMPEQWrm VR128:$src1, addr:$src2)>;
3645 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
3646 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
3647 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
3648 (PCMPEQDrm VR128:$src1, addr:$src2)>;
3650 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
3651 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
3652 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
3653 (PCMPGTBrm VR128:$src1, addr:$src2)>;
3654 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
3655 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
3656 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
3657 (PCMPGTWrm VR128:$src1, addr:$src2)>;
3658 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
3659 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
3660 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
3661 (PCMPGTDrm VR128:$src1, addr:$src2)>;
3663 //===---------------------------------------------------------------------===//
3664 // SSE2 - Packed Integer Pack Instructions
3665 //===---------------------------------------------------------------------===//
3667 let Predicates = [HasAVX] in {
3668 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
3670 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
3672 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
3676 let Constraints = "$src1 = $dst" in {
3677 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
3678 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
3679 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
3680 } // Constraints = "$src1 = $dst"
3682 //===---------------------------------------------------------------------===//
3683 // SSE2 - Packed Integer Shuffle Instructions
3684 //===---------------------------------------------------------------------===//
3686 let ExeDomain = SSEPackedInt in {
3687 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
3689 def ri : Ii8<0x70, MRMSrcReg,
3690 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
3691 !strconcat(OpcodeStr,
3692 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3693 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
3695 def mi : Ii8<0x70, MRMSrcMem,
3696 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
3697 !strconcat(OpcodeStr,
3698 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3699 [(set VR128:$dst, (vt (pshuf_frag:$src2
3700 (bc_frag (memopv2i64 addr:$src1)),
3703 } // ExeDomain = SSEPackedInt
3705 let Predicates = [HasAVX] in {
3706 let AddedComplexity = 5 in
3707 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize,
3710 // SSE2 with ImmT == Imm8 and XS prefix.
3711 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
3714 // SSE2 with ImmT == Imm8 and XD prefix.
3715 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
3718 let AddedComplexity = 5 in
3719 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3720 (VPSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3721 // Unary v4f32 shuffle with VPSHUF* in order to fold a load.
3722 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3723 (VPSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3725 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
3727 (VPSHUFDmi addr:$src1, imm:$imm)>;
3728 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
3730 (VPSHUFDmi addr:$src1, imm:$imm)>;
3731 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3732 (VPSHUFDri VR128:$src1, imm:$imm)>;
3733 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3734 (VPSHUFDri VR128:$src1, imm:$imm)>;
3735 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
3736 (VPSHUFHWri VR128:$src, imm:$imm)>;
3737 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
3739 (VPSHUFHWmi addr:$src, imm:$imm)>;
3740 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
3741 (VPSHUFLWri VR128:$src, imm:$imm)>;
3742 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
3744 (VPSHUFLWmi addr:$src, imm:$imm)>;
3747 let Predicates = [HasSSE2] in {
3748 let AddedComplexity = 5 in
3749 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
3751 // SSE2 with ImmT == Imm8 and XS prefix.
3752 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
3754 // SSE2 with ImmT == Imm8 and XD prefix.
3755 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
3757 let AddedComplexity = 5 in
3758 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3759 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3760 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3761 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3762 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3764 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
3766 (PSHUFDmi addr:$src1, imm:$imm)>;
3767 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
3769 (PSHUFDmi addr:$src1, imm:$imm)>;
3770 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3771 (PSHUFDri VR128:$src1, imm:$imm)>;
3772 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3773 (PSHUFDri VR128:$src1, imm:$imm)>;
3774 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
3775 (PSHUFHWri VR128:$src, imm:$imm)>;
3776 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
3778 (PSHUFHWmi addr:$src, imm:$imm)>;
3779 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
3780 (PSHUFLWri VR128:$src, imm:$imm)>;
3781 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
3783 (PSHUFLWmi addr:$src, imm:$imm)>;
3786 //===---------------------------------------------------------------------===//
3787 // SSE2 - Packed Integer Unpack Instructions
3788 //===---------------------------------------------------------------------===//
3790 let ExeDomain = SSEPackedInt in {
3791 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
3792 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
3793 def rr : PDI<opc, MRMSrcReg,
3794 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3796 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3797 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3798 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))]>;
3799 def rm : PDI<opc, MRMSrcMem,
3800 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3802 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3803 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3804 [(set VR128:$dst, (OpNode VR128:$src1,
3805 (bc_frag (memopv2i64
3809 let Predicates = [HasAVX] in {
3810 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Punpcklbw,
3811 bc_v16i8, 0>, VEX_4V;
3812 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Punpcklwd,
3813 bc_v8i16, 0>, VEX_4V;
3814 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Punpckldq,
3815 bc_v4i32, 0>, VEX_4V;
3817 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3818 /// knew to collapse (bitconvert VT to VT) into its operand.
3819 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3820 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3821 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3822 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3823 VR128:$src2)))]>, VEX_4V;
3824 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3825 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3826 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3827 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3828 (memopv2i64 addr:$src2))))]>, VEX_4V;
3830 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Punpckhbw,
3831 bc_v16i8, 0>, VEX_4V;
3832 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Punpckhwd,
3833 bc_v8i16, 0>, VEX_4V;
3834 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Punpckhdq,
3835 bc_v4i32, 0>, VEX_4V;
3837 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3838 /// knew to collapse (bitconvert VT to VT) into its operand.
3839 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3840 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3841 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3842 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3843 VR128:$src2)))]>, VEX_4V;
3844 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3845 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3846 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3847 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3848 (memopv2i64 addr:$src2))))]>, VEX_4V;
3851 let Constraints = "$src1 = $dst" in {
3852 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Punpcklbw, bc_v16i8>;
3853 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Punpcklwd, bc_v8i16>;
3854 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Punpckldq, bc_v4i32>;
3856 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3857 /// knew to collapse (bitconvert VT to VT) into its operand.
3858 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3859 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3860 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3862 (v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)))]>;
3863 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3864 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3865 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3867 (v2i64 (X86Punpcklqdq VR128:$src1,
3868 (memopv2i64 addr:$src2))))]>;
3870 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Punpckhbw, bc_v16i8>;
3871 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Punpckhwd, bc_v8i16>;
3872 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Punpckhdq, bc_v4i32>;
3874 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3875 /// knew to collapse (bitconvert VT to VT) into its operand.
3876 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3877 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3878 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3880 (v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)))]>;
3881 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3882 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3883 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3885 (v2i64 (X86Punpckhqdq VR128:$src1,
3886 (memopv2i64 addr:$src2))))]>;
3889 } // ExeDomain = SSEPackedInt
3891 //===---------------------------------------------------------------------===//
3892 // SSE2 - Packed Integer Extract and Insert
3893 //===---------------------------------------------------------------------===//
3895 let ExeDomain = SSEPackedInt in {
3896 multiclass sse2_pinsrw<bit Is2Addr = 1> {
3897 def rri : Ii8<0xC4, MRMSrcReg,
3898 (outs VR128:$dst), (ins VR128:$src1,
3899 GR32:$src2, i32i8imm:$src3),
3901 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3902 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3904 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
3905 def rmi : Ii8<0xC4, MRMSrcMem,
3906 (outs VR128:$dst), (ins VR128:$src1,
3907 i16mem:$src2, i32i8imm:$src3),
3909 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3910 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3912 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
3917 let Predicates = [HasAVX] in
3918 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
3919 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3920 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3921 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3922 imm:$src2))]>, TB, OpSize, VEX;
3923 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
3924 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3925 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3926 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3930 let Predicates = [HasAVX] in {
3931 defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
3932 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
3933 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
3934 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
3935 []>, TB, OpSize, VEX_4V;
3938 let Constraints = "$src1 = $dst" in
3939 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
3941 } // ExeDomain = SSEPackedInt
3943 //===---------------------------------------------------------------------===//
3944 // SSE2 - Packed Mask Creation
3945 //===---------------------------------------------------------------------===//
3947 let ExeDomain = SSEPackedInt in {
3949 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3950 "pmovmskb\t{$src, $dst|$dst, $src}",
3951 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
3952 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
3953 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
3954 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3955 "pmovmskb\t{$src, $dst|$dst, $src}",
3956 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
3958 } // ExeDomain = SSEPackedInt
3960 //===---------------------------------------------------------------------===//
3961 // SSE2 - Conditional Store
3962 //===---------------------------------------------------------------------===//
3964 let ExeDomain = SSEPackedInt in {
3967 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
3968 (ins VR128:$src, VR128:$mask),
3969 "maskmovdqu\t{$mask, $src|$src, $mask}",
3970 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
3972 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
3973 (ins VR128:$src, VR128:$mask),
3974 "maskmovdqu\t{$mask, $src|$src, $mask}",
3975 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
3978 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3979 "maskmovdqu\t{$mask, $src|$src, $mask}",
3980 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
3982 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3983 "maskmovdqu\t{$mask, $src|$src, $mask}",
3984 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
3986 } // ExeDomain = SSEPackedInt
3988 //===---------------------------------------------------------------------===//
3989 // SSE2 - Move Doubleword
3990 //===---------------------------------------------------------------------===//
3992 //===---------------------------------------------------------------------===//
3993 // Move Int Doubleword to Packed Double Int
3995 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3996 "movd\t{$src, $dst|$dst, $src}",
3998 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
3999 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4000 "movd\t{$src, $dst|$dst, $src}",
4002 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
4004 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4005 "mov{d|q}\t{$src, $dst|$dst, $src}",
4007 (v2i64 (scalar_to_vector GR64:$src)))]>, VEX;
4008 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4009 "mov{d|q}\t{$src, $dst|$dst, $src}",
4010 [(set FR64:$dst, (bitconvert GR64:$src))]>, VEX;
4012 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4013 "movd\t{$src, $dst|$dst, $src}",
4015 (v4i32 (scalar_to_vector GR32:$src)))]>;
4016 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4017 "movd\t{$src, $dst|$dst, $src}",
4019 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
4020 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4021 "mov{d|q}\t{$src, $dst|$dst, $src}",
4023 (v2i64 (scalar_to_vector GR64:$src)))]>;
4024 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
4025 "mov{d|q}\t{$src, $dst|$dst, $src}",
4026 [(set FR64:$dst, (bitconvert GR64:$src))]>;
4028 //===---------------------------------------------------------------------===//
4029 // Move Int Doubleword to Single Scalar
4031 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4032 "movd\t{$src, $dst|$dst, $src}",
4033 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
4035 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4036 "movd\t{$src, $dst|$dst, $src}",
4037 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
4039 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
4040 "movd\t{$src, $dst|$dst, $src}",
4041 [(set FR32:$dst, (bitconvert GR32:$src))]>;
4043 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
4044 "movd\t{$src, $dst|$dst, $src}",
4045 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
4047 //===---------------------------------------------------------------------===//
4048 // Move Packed Doubleword Int to Packed Double Int
4050 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4051 "movd\t{$src, $dst|$dst, $src}",
4052 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4054 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
4055 (ins i32mem:$dst, VR128:$src),
4056 "movd\t{$src, $dst|$dst, $src}",
4057 [(store (i32 (vector_extract (v4i32 VR128:$src),
4058 (iPTR 0))), addr:$dst)]>, VEX;
4059 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
4060 "movd\t{$src, $dst|$dst, $src}",
4061 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
4063 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
4064 "movd\t{$src, $dst|$dst, $src}",
4065 [(store (i32 (vector_extract (v4i32 VR128:$src),
4066 (iPTR 0))), addr:$dst)]>;
4068 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4069 "mov{d|q}\t{$src, $dst|$dst, $src}",
4070 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
4072 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
4073 "movq\t{$src, $dst|$dst, $src}",
4074 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
4076 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
4077 "mov{d|q}\t{$src, $dst|$dst, $src}",
4078 [(set GR64:$dst, (bitconvert FR64:$src))]>;
4079 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
4080 "movq\t{$src, $dst|$dst, $src}",
4081 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
4083 //===---------------------------------------------------------------------===//
4084 // Move Scalar Single to Double Int
4086 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4087 "movd\t{$src, $dst|$dst, $src}",
4088 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
4089 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4090 "movd\t{$src, $dst|$dst, $src}",
4091 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
4092 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
4093 "movd\t{$src, $dst|$dst, $src}",
4094 [(set GR32:$dst, (bitconvert FR32:$src))]>;
4095 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
4096 "movd\t{$src, $dst|$dst, $src}",
4097 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
4099 //===---------------------------------------------------------------------===//
4100 // Patterns and instructions to describe movd/movq to XMM register zero-extends
4102 let AddedComplexity = 15 in {
4103 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4104 "movd\t{$src, $dst|$dst, $src}",
4105 [(set VR128:$dst, (v4i32 (X86vzmovl
4106 (v4i32 (scalar_to_vector GR32:$src)))))]>,
4108 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4109 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4110 [(set VR128:$dst, (v2i64 (X86vzmovl
4111 (v2i64 (scalar_to_vector GR64:$src)))))]>,
4114 let AddedComplexity = 15 in {
4115 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
4116 "movd\t{$src, $dst|$dst, $src}",
4117 [(set VR128:$dst, (v4i32 (X86vzmovl
4118 (v4i32 (scalar_to_vector GR32:$src)))))]>;
4119 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4120 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
4121 [(set VR128:$dst, (v2i64 (X86vzmovl
4122 (v2i64 (scalar_to_vector GR64:$src)))))]>;
4125 let AddedComplexity = 20 in {
4126 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4127 "movd\t{$src, $dst|$dst, $src}",
4129 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
4130 (loadi32 addr:$src))))))]>,
4132 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4133 "movd\t{$src, $dst|$dst, $src}",
4135 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
4136 (loadi32 addr:$src))))))]>;
4138 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
4139 (MOVZDI2PDIrm addr:$src)>;
4140 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
4141 (MOVZDI2PDIrm addr:$src)>;
4142 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
4143 (MOVZDI2PDIrm addr:$src)>;
4146 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
4147 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
4148 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
4149 (v4i32 (scalar_to_vector GR32:$src)), (i32 0)))),
4150 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
4151 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
4152 (v2i64 (scalar_to_vector GR64:$src)), (i32 0)))),
4153 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
4155 // These are the correct encodings of the instructions so that we know how to
4156 // read correct assembly, even though we continue to emit the wrong ones for
4157 // compatibility with Darwin's buggy assembler.
4158 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4159 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
4160 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4161 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
4162 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4163 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
4164 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4165 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
4166 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4167 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
4168 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
4169 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
4171 //===---------------------------------------------------------------------===//
4172 // SSE2 - Move Quadword
4173 //===---------------------------------------------------------------------===//
4175 //===---------------------------------------------------------------------===//
4176 // Move Quadword Int to Packed Quadword Int
4178 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4179 "vmovq\t{$src, $dst|$dst, $src}",
4181 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4182 VEX, Requires<[HasAVX]>;
4183 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4184 "movq\t{$src, $dst|$dst, $src}",
4186 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
4187 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
4189 //===---------------------------------------------------------------------===//
4190 // Move Packed Quadword Int to Quadword Int
4192 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4193 "movq\t{$src, $dst|$dst, $src}",
4194 [(store (i64 (vector_extract (v2i64 VR128:$src),
4195 (iPTR 0))), addr:$dst)]>, VEX;
4196 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4197 "movq\t{$src, $dst|$dst, $src}",
4198 [(store (i64 (vector_extract (v2i64 VR128:$src),
4199 (iPTR 0))), addr:$dst)]>;
4201 //===---------------------------------------------------------------------===//
4202 // Store / copy lower 64-bits of a XMM register.
4204 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4205 "movq\t{$src, $dst|$dst, $src}",
4206 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
4207 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
4208 "movq\t{$src, $dst|$dst, $src}",
4209 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
4211 let AddedComplexity = 20 in
4212 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4213 "vmovq\t{$src, $dst|$dst, $src}",
4215 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4216 (loadi64 addr:$src))))))]>,
4217 XS, VEX, Requires<[HasAVX]>;
4219 let AddedComplexity = 20 in {
4220 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4221 "movq\t{$src, $dst|$dst, $src}",
4223 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
4224 (loadi64 addr:$src))))))]>,
4225 XS, Requires<[HasSSE2]>;
4227 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
4228 (MOVZQI2PQIrm addr:$src)>;
4229 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
4230 (MOVZQI2PQIrm addr:$src)>;
4231 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
4234 //===---------------------------------------------------------------------===//
4235 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
4236 // IA32 document. movq xmm1, xmm2 does clear the high bits.
4238 let AddedComplexity = 15 in
4239 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4240 "vmovq\t{$src, $dst|$dst, $src}",
4241 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
4242 XS, VEX, Requires<[HasAVX]>;
4243 let AddedComplexity = 15 in
4244 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4245 "movq\t{$src, $dst|$dst, $src}",
4246 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
4247 XS, Requires<[HasSSE2]>;
4249 let AddedComplexity = 20 in
4250 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4251 "vmovq\t{$src, $dst|$dst, $src}",
4252 [(set VR128:$dst, (v2i64 (X86vzmovl
4253 (loadv2i64 addr:$src))))]>,
4254 XS, VEX, Requires<[HasAVX]>;
4255 let AddedComplexity = 20 in {
4256 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4257 "movq\t{$src, $dst|$dst, $src}",
4258 [(set VR128:$dst, (v2i64 (X86vzmovl
4259 (loadv2i64 addr:$src))))]>,
4260 XS, Requires<[HasSSE2]>;
4262 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
4263 (MOVZPQILo2PQIrm addr:$src)>;
4266 // Instructions to match in the assembler
4267 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
4268 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
4269 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4270 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
4271 // Recognize "movd" with GR64 destination, but encode as a "movq"
4272 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
4273 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
4275 // Instructions for the disassembler
4276 // xr = XMM register
4279 let Predicates = [HasAVX] in
4280 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4281 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
4282 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4283 "movq\t{$src, $dst|$dst, $src}", []>, XS;
4285 //===---------------------------------------------------------------------===//
4286 // SSE3 - Conversion Instructions
4287 //===---------------------------------------------------------------------===//
4289 // Convert Packed Double FP to Packed DW Integers
4290 let Predicates = [HasAVX] in {
4291 // The assembler can recognize rr 256-bit instructions by seeing a ymm
4292 // register, but the same isn't true when using memory operands instead.
4293 // Provide other assembly rr and rm forms to address this explicitly.
4294 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4295 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
4296 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
4297 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
4300 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4301 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
4302 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
4303 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
4306 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
4307 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
4308 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
4309 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
4312 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
4313 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
4314 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4315 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
4317 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
4318 (VCVTPD2DQYrr VR256:$src)>;
4319 def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
4320 (VCVTPD2DQYrm addr:$src)>;
4322 // Convert Packed DW Integers to Packed Double FP
4323 let Predicates = [HasAVX] in {
4324 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
4325 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
4326 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4327 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
4328 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
4329 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
4330 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
4331 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
4334 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
4335 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
4336 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4337 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
4339 // AVX 256-bit register conversion intrinsics
4340 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
4341 (VCVTDQ2PDYrr VR128:$src)>;
4342 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
4343 (VCVTDQ2PDYrm addr:$src)>;
4345 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
4346 (VCVTPD2DQYrr VR256:$src)>;
4347 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
4348 (VCVTPD2DQYrm addr:$src)>;
4350 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
4351 (VCVTDQ2PDYrr VR128:$src)>;
4352 def : Pat<(v4f64 (sint_to_fp (memopv4i32 addr:$src))),
4353 (VCVTDQ2PDYrm addr:$src)>;
4355 //===---------------------------------------------------------------------===//
4356 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
4357 //===---------------------------------------------------------------------===//
4358 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
4359 ValueType vt, RegisterClass RC, PatFrag mem_frag,
4360 X86MemOperand x86memop> {
4361 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
4362 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4363 [(set RC:$dst, (vt (OpNode RC:$src)))]>;
4364 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
4365 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4366 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>;
4369 let Predicates = [HasAVX] in {
4370 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4371 v4f32, VR128, memopv4f32, f128mem>, VEX;
4372 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4373 v4f32, VR128, memopv4f32, f128mem>, VEX;
4374 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
4375 v8f32, VR256, memopv8f32, f256mem>, VEX;
4376 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
4377 v8f32, VR256, memopv8f32, f256mem>, VEX;
4379 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
4380 memopv4f32, f128mem>;
4381 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
4382 memopv4f32, f128mem>;
4384 let Predicates = [HasSSE3] in {
4385 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4386 (MOVSHDUPrr VR128:$src)>;
4387 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
4388 (MOVSHDUPrm addr:$src)>;
4389 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4390 (MOVSLDUPrr VR128:$src)>;
4391 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
4392 (MOVSLDUPrm addr:$src)>;
4395 let Predicates = [HasAVX] in {
4396 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
4397 (VMOVSHDUPrr VR128:$src)>;
4398 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
4399 (VMOVSHDUPrm addr:$src)>;
4400 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
4401 (VMOVSLDUPrr VR128:$src)>;
4402 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
4403 (VMOVSLDUPrm addr:$src)>;
4404 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
4405 (VMOVSHDUPYrr VR256:$src)>;
4406 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
4407 (VMOVSHDUPYrm addr:$src)>;
4408 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
4409 (VMOVSLDUPYrr VR256:$src)>;
4410 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
4411 (VMOVSLDUPYrm addr:$src)>;
4414 //===---------------------------------------------------------------------===//
4415 // SSE3 - Replicate Double FP - MOVDDUP
4416 //===---------------------------------------------------------------------===//
4418 multiclass sse3_replicate_dfp<string OpcodeStr> {
4419 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4420 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4421 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
4422 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
4423 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4425 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
4429 // FIXME: Merge with above classe when there're patterns for the ymm version
4430 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
4431 let Predicates = [HasAVX] in {
4432 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
4433 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4435 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
4436 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4441 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
4442 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
4443 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
4445 let Predicates = [HasSSE3] in {
4446 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
4448 (MOVDDUPrm addr:$src)>;
4449 let AddedComplexity = 5 in {
4450 def : Pat<(movddup (memopv2f64 addr:$src), (undef)), (MOVDDUPrm addr:$src)>;
4451 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
4452 (MOVDDUPrm addr:$src)>;
4453 def : Pat<(movddup (memopv2i64 addr:$src), (undef)), (MOVDDUPrm addr:$src)>;
4454 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
4455 (MOVDDUPrm addr:$src)>;
4457 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
4458 (MOVDDUPrm addr:$src)>;
4459 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
4460 (MOVDDUPrm addr:$src)>;
4461 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
4462 (MOVDDUPrm addr:$src)>;
4463 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
4464 (MOVDDUPrm addr:$src)>;
4465 def : Pat<(X86Movddup (bc_v2f64
4466 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
4467 (MOVDDUPrm addr:$src)>;
4470 let Predicates = [HasAVX] in {
4471 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
4473 (VMOVDDUPrm addr:$src)>;
4474 let AddedComplexity = 5 in {
4475 def : Pat<(movddup (memopv2f64 addr:$src), (undef)), (VMOVDDUPrm addr:$src)>;
4476 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
4477 (VMOVDDUPrm addr:$src)>;
4478 def : Pat<(movddup (memopv2i64 addr:$src), (undef)), (VMOVDDUPrm addr:$src)>;
4479 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
4480 (VMOVDDUPrm addr:$src)>;
4482 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
4483 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4484 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
4485 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4486 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
4487 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4488 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
4489 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4490 def : Pat<(X86Movddup (bc_v2f64
4491 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
4492 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4495 def : Pat<(X86Movddup (memopv4f64 addr:$src)),
4496 (VMOVDDUPYrm addr:$src)>;
4497 def : Pat<(X86Movddup (memopv4i64 addr:$src)),
4498 (VMOVDDUPYrm addr:$src)>;
4499 def : Pat<(X86Movddup (v4f64 (scalar_to_vector (loadf64 addr:$src)))),
4500 (VMOVDDUPYrm addr:$src)>;
4501 def : Pat<(X86Movddup (v4i64 (scalar_to_vector (loadi64 addr:$src)))),
4502 (VMOVDDUPYrm addr:$src)>;
4503 def : Pat<(X86Movddup (v4f64 VR256:$src)),
4504 (VMOVDDUPYrr VR256:$src)>;
4505 def : Pat<(X86Movddup (v4i64 VR256:$src)),
4506 (VMOVDDUPYrr VR256:$src)>;
4509 //===---------------------------------------------------------------------===//
4510 // SSE3 - Move Unaligned Integer
4511 //===---------------------------------------------------------------------===//
4513 let Predicates = [HasAVX] in {
4514 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4515 "vlddqu\t{$src, $dst|$dst, $src}",
4516 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
4517 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
4518 "vlddqu\t{$src, $dst|$dst, $src}",
4519 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
4521 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4522 "lddqu\t{$src, $dst|$dst, $src}",
4523 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
4525 //===---------------------------------------------------------------------===//
4526 // SSE3 - Arithmetic
4527 //===---------------------------------------------------------------------===//
4529 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
4530 X86MemOperand x86memop, bit Is2Addr = 1> {
4531 def rr : I<0xD0, MRMSrcReg,
4532 (outs RC:$dst), (ins RC:$src1, RC:$src2),
4534 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4535 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4536 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
4537 def rm : I<0xD0, MRMSrcMem,
4538 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4540 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4541 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4542 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
4545 let Predicates = [HasAVX],
4546 ExeDomain = SSEPackedDouble in {
4547 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
4548 f128mem, 0>, TB, XD, VEX_4V;
4549 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
4550 f128mem, 0>, TB, OpSize, VEX_4V;
4551 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
4552 f256mem, 0>, TB, XD, VEX_4V;
4553 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
4554 f256mem, 0>, TB, OpSize, VEX_4V;
4556 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
4557 ExeDomain = SSEPackedDouble in {
4558 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
4560 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
4561 f128mem>, TB, OpSize;
4564 //===---------------------------------------------------------------------===//
4565 // SSE3 Instructions
4566 //===---------------------------------------------------------------------===//
4569 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4570 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
4571 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4573 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4574 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4575 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
4577 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4579 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4580 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4581 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
4583 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4584 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
4585 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4587 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4588 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4589 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
4591 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4593 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4594 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4595 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
4598 let Predicates = [HasAVX] in {
4599 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
4600 int_x86_sse3_hadd_ps, 0>, VEX_4V;
4601 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
4602 int_x86_sse3_hadd_pd, 0>, VEX_4V;
4603 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
4604 int_x86_sse3_hsub_ps, 0>, VEX_4V;
4605 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
4606 int_x86_sse3_hsub_pd, 0>, VEX_4V;
4607 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
4608 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
4609 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
4610 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
4611 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
4612 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
4613 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
4614 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
4617 let Constraints = "$src1 = $dst" in {
4618 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
4619 int_x86_sse3_hadd_ps>;
4620 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
4621 int_x86_sse3_hadd_pd>;
4622 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
4623 int_x86_sse3_hsub_ps>;
4624 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
4625 int_x86_sse3_hsub_pd>;
4628 //===---------------------------------------------------------------------===//
4629 // SSSE3 - Packed Absolute Instructions
4630 //===---------------------------------------------------------------------===//
4633 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
4634 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
4635 PatFrag mem_frag128, Intrinsic IntId128> {
4636 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4638 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4639 [(set VR128:$dst, (IntId128 VR128:$src))]>,
4642 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4644 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4647 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
4650 let Predicates = [HasAVX] in {
4651 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
4652 int_x86_ssse3_pabs_b_128>, VEX;
4653 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
4654 int_x86_ssse3_pabs_w_128>, VEX;
4655 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
4656 int_x86_ssse3_pabs_d_128>, VEX;
4659 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
4660 int_x86_ssse3_pabs_b_128>;
4661 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
4662 int_x86_ssse3_pabs_w_128>;
4663 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
4664 int_x86_ssse3_pabs_d_128>;
4666 //===---------------------------------------------------------------------===//
4667 // SSSE3 - Packed Binary Operator Instructions
4668 //===---------------------------------------------------------------------===//
4670 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
4671 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
4672 PatFrag mem_frag128, Intrinsic IntId128,
4674 let isCommutable = 1 in
4675 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4676 (ins VR128:$src1, VR128:$src2),
4678 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4679 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4680 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4682 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4683 (ins VR128:$src1, i128mem:$src2),
4685 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4686 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4688 (IntId128 VR128:$src1,
4689 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4692 let Predicates = [HasAVX] in {
4693 let isCommutable = 0 in {
4694 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
4695 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
4696 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
4697 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
4698 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
4699 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
4700 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
4701 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
4702 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
4703 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
4704 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
4705 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
4706 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
4707 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
4708 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
4709 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
4710 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
4711 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
4712 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
4713 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
4714 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
4715 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
4717 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
4718 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
4721 // None of these have i8 immediate fields.
4722 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
4723 let isCommutable = 0 in {
4724 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
4725 int_x86_ssse3_phadd_w_128>;
4726 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
4727 int_x86_ssse3_phadd_d_128>;
4728 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
4729 int_x86_ssse3_phadd_sw_128>;
4730 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
4731 int_x86_ssse3_phsub_w_128>;
4732 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
4733 int_x86_ssse3_phsub_d_128>;
4734 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
4735 int_x86_ssse3_phsub_sw_128>;
4736 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
4737 int_x86_ssse3_pmadd_ub_sw_128>;
4738 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
4739 int_x86_ssse3_pshuf_b_128>;
4740 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
4741 int_x86_ssse3_psign_b_128>;
4742 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
4743 int_x86_ssse3_psign_w_128>;
4744 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
4745 int_x86_ssse3_psign_d_128>;
4747 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
4748 int_x86_ssse3_pmul_hr_sw_128>;
4751 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
4752 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
4753 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
4754 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
4756 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
4757 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4758 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
4759 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4760 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
4761 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4763 //===---------------------------------------------------------------------===//
4764 // SSSE3 - Packed Align Instruction Patterns
4765 //===---------------------------------------------------------------------===//
4767 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
4768 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
4769 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4771 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4773 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4775 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
4776 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4778 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4780 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4784 let Predicates = [HasAVX] in
4785 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
4786 let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
4787 defm PALIGN : ssse3_palign<"palignr">;
4789 let Predicates = [HasSSSE3] in {
4790 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4791 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4792 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4793 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4794 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4795 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4796 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4797 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4800 let Predicates = [HasAVX] in {
4801 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4802 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4803 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4804 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4805 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4806 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4807 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4808 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4811 //===---------------------------------------------------------------------===//
4812 // SSSE3 - Thread synchronization
4813 //===---------------------------------------------------------------------===//
4815 let usesCustomInserter = 1 in {
4816 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
4817 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
4818 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
4819 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
4822 let Uses = [EAX, ECX, EDX] in
4823 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
4824 Requires<[HasSSE3]>;
4825 let Uses = [ECX, EAX] in
4826 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
4827 Requires<[HasSSE3]>;
4829 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
4830 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
4832 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
4833 Requires<[In32BitMode]>;
4834 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
4835 Requires<[In64BitMode]>;
4837 // Splat v2f64 / v2i64
4838 let AddedComplexity = 10 in {
4839 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
4840 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
4843 // Set lowest element and zero upper elements.
4844 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4845 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
4847 //===----------------------------------------------------------------------===//
4848 // SSE4.1 - Packed Move with Sign/Zero Extend
4849 //===----------------------------------------------------------------------===//
4851 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4852 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4853 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4854 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4856 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4857 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4859 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
4863 let Predicates = [HasAVX] in {
4864 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
4866 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
4868 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4870 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4872 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4874 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4878 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4879 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4880 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4881 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4882 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4883 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4885 // Common patterns involving scalar load.
4886 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4887 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4888 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4889 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4891 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4892 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4893 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4894 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4896 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4897 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4898 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4899 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4901 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4902 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4903 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4904 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4906 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4907 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4908 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4909 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4911 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4912 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4913 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4914 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4917 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4918 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4919 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4920 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4922 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4923 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4925 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4929 let Predicates = [HasAVX] in {
4930 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4932 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4934 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4936 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4940 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4941 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4942 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4943 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4945 // Common patterns involving scalar load
4946 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4947 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4948 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4949 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4951 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4952 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4953 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4954 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4957 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4958 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4959 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4960 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4962 // Expecting a i16 load any extended to i32 value.
4963 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4964 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4965 [(set VR128:$dst, (IntId (bitconvert
4966 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4970 let Predicates = [HasAVX] in {
4971 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4973 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4976 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4977 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4979 // Common patterns involving scalar load
4980 def : Pat<(int_x86_sse41_pmovsxbq
4981 (bitconvert (v4i32 (X86vzmovl
4982 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4983 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4985 def : Pat<(int_x86_sse41_pmovzxbq
4986 (bitconvert (v4i32 (X86vzmovl
4987 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4988 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4990 //===----------------------------------------------------------------------===//
4991 // SSE4.1 - Extract Instructions
4992 //===----------------------------------------------------------------------===//
4994 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4995 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4996 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4997 (ins VR128:$src1, i32i8imm:$src2),
4998 !strconcat(OpcodeStr,
4999 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5000 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
5002 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5003 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
5004 !strconcat(OpcodeStr,
5005 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5008 // There's an AssertZext in the way of writing the store pattern
5009 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
5012 let Predicates = [HasAVX] in {
5013 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
5014 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
5015 (ins VR128:$src1, i32i8imm:$src2),
5016 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
5019 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
5022 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
5023 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
5024 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5025 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
5026 !strconcat(OpcodeStr,
5027 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5030 // There's an AssertZext in the way of writing the store pattern
5031 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
5034 let Predicates = [HasAVX] in
5035 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
5037 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
5040 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
5041 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
5042 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
5043 (ins VR128:$src1, i32i8imm:$src2),
5044 !strconcat(OpcodeStr,
5045 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5047 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
5048 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5049 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
5050 !strconcat(OpcodeStr,
5051 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5052 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
5053 addr:$dst)]>, OpSize;
5056 let Predicates = [HasAVX] in
5057 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
5059 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
5061 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
5062 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
5063 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
5064 (ins VR128:$src1, i32i8imm:$src2),
5065 !strconcat(OpcodeStr,
5066 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5068 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
5069 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5070 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
5071 !strconcat(OpcodeStr,
5072 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5073 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
5074 addr:$dst)]>, OpSize, REX_W;
5077 let Predicates = [HasAVX] in
5078 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
5080 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
5082 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
5084 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
5085 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
5086 (ins VR128:$src1, i32i8imm:$src2),
5087 !strconcat(OpcodeStr,
5088 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5090 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
5092 def mr : SS4AIi8<opc, MRMDestMem, (outs),
5093 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
5094 !strconcat(OpcodeStr,
5095 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5096 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
5097 addr:$dst)]>, OpSize;
5100 let Predicates = [HasAVX] in {
5101 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
5102 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
5103 (ins VR128:$src1, i32i8imm:$src2),
5104 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
5107 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
5109 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
5110 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
5113 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
5114 Requires<[HasSSE41]>;
5116 //===----------------------------------------------------------------------===//
5117 // SSE4.1 - Insert Instructions
5118 //===----------------------------------------------------------------------===//
5120 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
5121 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5122 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
5124 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5126 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5128 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
5129 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5130 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
5132 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5134 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5136 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
5137 imm:$src3))]>, OpSize;
5140 let Predicates = [HasAVX] in
5141 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
5142 let Constraints = "$src1 = $dst" in
5143 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
5145 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
5146 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5147 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
5149 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5151 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5153 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
5155 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5156 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
5158 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5160 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5162 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
5163 imm:$src3)))]>, OpSize;
5166 let Predicates = [HasAVX] in
5167 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
5168 let Constraints = "$src1 = $dst" in
5169 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
5171 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
5172 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5173 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
5175 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5177 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5179 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
5181 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5182 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
5184 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5186 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5188 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
5189 imm:$src3)))]>, OpSize;
5192 let Predicates = [HasAVX] in
5193 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
5194 let Constraints = "$src1 = $dst" in
5195 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
5197 // insertps has a few different modes, there's the first two here below which
5198 // are optimized inserts that won't zero arbitrary elements in the destination
5199 // vector. The next one matches the intrinsic and could zero arbitrary elements
5200 // in the target vector.
5201 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
5202 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
5203 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
5205 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5207 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5209 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
5211 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
5212 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
5214 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5216 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5218 (X86insrtps VR128:$src1,
5219 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
5220 imm:$src3))]>, OpSize;
5223 let Constraints = "$src1 = $dst" in
5224 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
5225 let Predicates = [HasAVX] in
5226 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
5228 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
5229 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
5231 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
5232 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
5233 Requires<[HasSSE41]>;
5235 //===----------------------------------------------------------------------===//
5236 // SSE4.1 - Round Instructions
5237 //===----------------------------------------------------------------------===//
5239 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
5240 X86MemOperand x86memop, RegisterClass RC,
5241 PatFrag mem_frag32, PatFrag mem_frag64,
5242 Intrinsic V4F32Int, Intrinsic V2F64Int> {
5243 // Intrinsic operation, reg.
5244 // Vector intrinsic operation, reg
5245 def PSr : SS4AIi8<opcps, MRMSrcReg,
5246 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
5247 !strconcat(OpcodeStr,
5248 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5249 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
5252 // Vector intrinsic operation, mem
5253 def PSm : Ii8<opcps, MRMSrcMem,
5254 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
5255 !strconcat(OpcodeStr,
5256 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5258 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
5260 Requires<[HasSSE41]>;
5262 // Vector intrinsic operation, reg
5263 def PDr : SS4AIi8<opcpd, MRMSrcReg,
5264 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
5265 !strconcat(OpcodeStr,
5266 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5267 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
5270 // Vector intrinsic operation, mem
5271 def PDm : SS4AIi8<opcpd, MRMSrcMem,
5272 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
5273 !strconcat(OpcodeStr,
5274 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5276 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
5280 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
5281 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
5282 // Intrinsic operation, reg.
5283 // Vector intrinsic operation, reg
5284 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
5285 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
5286 !strconcat(OpcodeStr,
5287 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5290 // Vector intrinsic operation, mem
5291 def PSm_AVX : Ii8<opcps, MRMSrcMem,
5292 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
5293 !strconcat(OpcodeStr,
5294 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5295 []>, TA, OpSize, Requires<[HasSSE41]>;
5297 // Vector intrinsic operation, reg
5298 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
5299 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
5300 !strconcat(OpcodeStr,
5301 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5304 // Vector intrinsic operation, mem
5305 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
5306 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
5307 !strconcat(OpcodeStr,
5308 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
5312 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
5315 Intrinsic F64Int, bit Is2Addr = 1> {
5316 // Intrinsic operation, reg.
5317 def SSr : SS4AIi8<opcss, MRMSrcReg,
5318 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5320 !strconcat(OpcodeStr,
5321 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5322 !strconcat(OpcodeStr,
5323 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5324 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
5327 // Intrinsic operation, mem.
5328 def SSm : SS4AIi8<opcss, MRMSrcMem,
5329 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
5331 !strconcat(OpcodeStr,
5332 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5333 !strconcat(OpcodeStr,
5334 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5336 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
5339 // Intrinsic operation, reg.
5340 def SDr : SS4AIi8<opcsd, MRMSrcReg,
5341 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5343 !strconcat(OpcodeStr,
5344 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5345 !strconcat(OpcodeStr,
5346 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5347 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
5350 // Intrinsic operation, mem.
5351 def SDm : SS4AIi8<opcsd, MRMSrcMem,
5352 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
5354 !strconcat(OpcodeStr,
5355 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5356 !strconcat(OpcodeStr,
5357 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5359 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
5363 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
5365 // Intrinsic operation, reg.
5366 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
5367 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5368 !strconcat(OpcodeStr,
5369 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5372 // Intrinsic operation, mem.
5373 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
5374 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
5375 !strconcat(OpcodeStr,
5376 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5379 // Intrinsic operation, reg.
5380 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
5381 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5382 !strconcat(OpcodeStr,
5383 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5386 // Intrinsic operation, mem.
5387 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
5388 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
5389 !strconcat(OpcodeStr,
5390 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5394 // FP round - roundss, roundps, roundsd, roundpd
5395 let Predicates = [HasAVX] in {
5397 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
5398 memopv4f32, memopv2f64,
5399 int_x86_sse41_round_ps,
5400 int_x86_sse41_round_pd>, VEX;
5401 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
5402 memopv8f32, memopv4f64,
5403 int_x86_avx_round_ps_256,
5404 int_x86_avx_round_pd_256>, VEX;
5405 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
5406 int_x86_sse41_round_ss,
5407 int_x86_sse41_round_sd, 0>, VEX_4V;
5409 // Instructions for the assembler
5410 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
5412 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
5414 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
5417 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
5418 memopv4f32, memopv2f64,
5419 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
5420 let Constraints = "$src1 = $dst" in
5421 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
5422 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
5424 //===----------------------------------------------------------------------===//
5425 // SSE4.1 - Packed Bit Test
5426 //===----------------------------------------------------------------------===//
5428 // ptest instruction we'll lower to this in X86ISelLowering primarily from
5429 // the intel intrinsic that corresponds to this.
5430 let Defs = [EFLAGS], Predicates = [HasAVX] in {
5431 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
5432 "vptest\t{$src2, $src1|$src1, $src2}",
5433 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
5435 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
5436 "vptest\t{$src2, $src1|$src1, $src2}",
5437 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
5440 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
5441 "vptest\t{$src2, $src1|$src1, $src2}",
5442 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
5444 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
5445 "vptest\t{$src2, $src1|$src1, $src2}",
5446 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
5450 let Defs = [EFLAGS] in {
5451 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
5452 "ptest \t{$src2, $src1|$src1, $src2}",
5453 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
5455 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
5456 "ptest \t{$src2, $src1|$src1, $src2}",
5457 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
5461 // The bit test instructions below are AVX only
5462 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
5463 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
5464 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
5465 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
5466 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
5467 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
5468 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
5469 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
5473 let Defs = [EFLAGS], Predicates = [HasAVX] in {
5474 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
5475 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
5476 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
5477 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
5480 //===----------------------------------------------------------------------===//
5481 // SSE4.1 - Misc Instructions
5482 //===----------------------------------------------------------------------===//
5484 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
5485 "popcnt{w}\t{$src, $dst|$dst, $src}",
5486 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
5487 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
5488 "popcnt{w}\t{$src, $dst|$dst, $src}",
5489 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
5491 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
5492 "popcnt{l}\t{$src, $dst|$dst, $src}",
5493 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
5494 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
5495 "popcnt{l}\t{$src, $dst|$dst, $src}",
5496 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
5498 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
5499 "popcnt{q}\t{$src, $dst|$dst, $src}",
5500 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
5501 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
5502 "popcnt{q}\t{$src, $dst|$dst, $src}",
5503 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
5507 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
5508 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
5509 Intrinsic IntId128> {
5510 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5512 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5513 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
5514 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5516 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5519 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
5522 let Predicates = [HasAVX] in
5523 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
5524 int_x86_sse41_phminposuw>, VEX;
5525 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
5526 int_x86_sse41_phminposuw>;
5528 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
5529 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
5530 Intrinsic IntId128, bit Is2Addr = 1> {
5531 let isCommutable = 1 in
5532 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5533 (ins VR128:$src1, VR128:$src2),
5535 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5536 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5537 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
5538 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5539 (ins VR128:$src1, i128mem:$src2),
5541 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5542 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5544 (IntId128 VR128:$src1,
5545 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5548 let Predicates = [HasAVX] in {
5549 let isCommutable = 0 in
5550 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
5552 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
5554 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
5556 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
5558 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
5560 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
5562 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
5564 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
5566 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
5568 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
5570 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
5573 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5574 (VPCMPEQQrr VR128:$src1, VR128:$src2)>;
5575 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5576 (VPCMPEQQrm VR128:$src1, addr:$src2)>;
5579 let Constraints = "$src1 = $dst" in {
5580 let isCommutable = 0 in
5581 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
5582 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
5583 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
5584 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
5585 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
5586 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
5587 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
5588 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
5589 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
5590 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
5591 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
5594 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5595 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
5596 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5597 (PCMPEQQrm VR128:$src1, addr:$src2)>;
5599 /// SS48I_binop_rm - Simple SSE41 binary operator.
5600 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5601 ValueType OpVT, bit Is2Addr = 1> {
5602 let isCommutable = 1 in
5603 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5604 (ins VR128:$src1, VR128:$src2),
5606 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5607 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5608 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
5610 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5611 (ins VR128:$src1, i128mem:$src2),
5613 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5614 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5615 [(set VR128:$dst, (OpNode VR128:$src1,
5616 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
5620 let Predicates = [HasAVX] in
5621 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
5622 let Constraints = "$src1 = $dst" in
5623 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
5625 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
5626 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
5627 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
5628 X86MemOperand x86memop, bit Is2Addr = 1> {
5629 let isCommutable = 1 in
5630 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
5631 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
5633 !strconcat(OpcodeStr,
5634 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5635 !strconcat(OpcodeStr,
5636 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5637 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
5639 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
5640 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
5642 !strconcat(OpcodeStr,
5643 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5644 !strconcat(OpcodeStr,
5645 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5648 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
5652 let Predicates = [HasAVX] in {
5653 let isCommutable = 0 in {
5654 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
5655 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5656 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
5657 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5658 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
5659 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5660 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
5661 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5662 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
5663 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5664 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
5665 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5667 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
5668 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5669 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
5670 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5671 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
5672 VR256, memopv32i8, i256mem, 0>, VEX_4V;
5675 let Constraints = "$src1 = $dst" in {
5676 let isCommutable = 0 in {
5677 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
5678 VR128, memopv16i8, i128mem>;
5679 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
5680 VR128, memopv16i8, i128mem>;
5681 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
5682 VR128, memopv16i8, i128mem>;
5683 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
5684 VR128, memopv16i8, i128mem>;
5686 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
5687 VR128, memopv16i8, i128mem>;
5688 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
5689 VR128, memopv16i8, i128mem>;
5692 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
5693 let Predicates = [HasAVX] in {
5694 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
5695 RegisterClass RC, X86MemOperand x86memop,
5696 PatFrag mem_frag, Intrinsic IntId> {
5697 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
5698 (ins RC:$src1, RC:$src2, RC:$src3),
5699 !strconcat(OpcodeStr,
5700 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5701 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
5702 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5704 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
5705 (ins RC:$src1, x86memop:$src2, RC:$src3),
5706 !strconcat(OpcodeStr,
5707 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5709 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
5711 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5715 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
5716 memopv16i8, int_x86_sse41_blendvpd>;
5717 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
5718 memopv16i8, int_x86_sse41_blendvps>;
5719 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
5720 memopv16i8, int_x86_sse41_pblendvb>;
5721 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
5722 memopv32i8, int_x86_avx_blendv_pd_256>;
5723 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
5724 memopv32i8, int_x86_avx_blendv_ps_256>;
5726 /// SS41I_ternary_int - SSE 4.1 ternary operator
5727 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
5728 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5729 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5730 (ins VR128:$src1, VR128:$src2),
5731 !strconcat(OpcodeStr,
5732 "\t{$src2, $dst|$dst, $src2}"),
5733 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
5736 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5737 (ins VR128:$src1, i128mem:$src2),
5738 !strconcat(OpcodeStr,
5739 "\t{$src2, $dst|$dst, $src2}"),
5742 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
5746 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
5747 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
5748 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
5750 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
5751 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
5753 let Predicates = [HasAVX] in
5754 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5755 "vmovntdqa\t{$src, $dst|$dst, $src}",
5756 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5758 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5759 "movntdqa\t{$src, $dst|$dst, $src}",
5760 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5763 //===----------------------------------------------------------------------===//
5764 // SSE4.2 - Compare Instructions
5765 //===----------------------------------------------------------------------===//
5767 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
5768 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
5769 Intrinsic IntId128, bit Is2Addr = 1> {
5770 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
5771 (ins VR128:$src1, VR128:$src2),
5773 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5774 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5775 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5777 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
5778 (ins VR128:$src1, i128mem:$src2),
5780 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5781 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5783 (IntId128 VR128:$src1,
5784 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5787 let Predicates = [HasAVX] in {
5788 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
5791 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5792 (VPCMPGTQrr VR128:$src1, VR128:$src2)>;
5793 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5794 (VPCMPGTQrm VR128:$src1, addr:$src2)>;
5797 let Constraints = "$src1 = $dst" in
5798 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
5800 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5801 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
5802 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5803 (PCMPGTQrm VR128:$src1, addr:$src2)>;
5805 //===----------------------------------------------------------------------===//
5806 // SSE4.2 - String/text Processing Instructions
5807 //===----------------------------------------------------------------------===//
5809 // Packed Compare Implicit Length Strings, Return Mask
5810 multiclass pseudo_pcmpistrm<string asm> {
5811 def REG : PseudoI<(outs VR128:$dst),
5812 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5813 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
5815 def MEM : PseudoI<(outs VR128:$dst),
5816 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5817 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
5818 VR128:$src1, (load addr:$src2), imm:$src3))]>;
5821 let Defs = [EFLAGS], usesCustomInserter = 1 in {
5822 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
5823 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
5826 let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
5827 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5828 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5829 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5830 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5831 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5832 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5835 let Defs = [XMM0, EFLAGS] in {
5836 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5837 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5838 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5839 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5840 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5841 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5844 // Packed Compare Explicit Length Strings, Return Mask
5845 multiclass pseudo_pcmpestrm<string asm> {
5846 def REG : PseudoI<(outs VR128:$dst),
5847 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5848 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5849 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
5850 def MEM : PseudoI<(outs VR128:$dst),
5851 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5852 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5853 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
5856 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
5857 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
5858 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
5861 let Predicates = [HasAVX],
5862 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5863 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5864 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5865 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5866 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5867 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5868 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5871 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5872 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5873 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5874 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5875 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5876 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5877 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5880 // Packed Compare Implicit Length Strings, Return Index
5881 let Defs = [ECX, EFLAGS] in {
5882 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
5883 def rr : SS42AI<0x63, MRMSrcReg, (outs),
5884 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5885 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5886 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
5887 (implicit EFLAGS)]>, OpSize;
5888 def rm : SS42AI<0x63, MRMSrcMem, (outs),
5889 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5890 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5891 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
5892 (implicit EFLAGS)]>, OpSize;
5896 let Predicates = [HasAVX] in {
5897 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5899 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5901 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5903 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5905 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5907 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5911 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5912 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5913 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5914 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5915 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5916 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5918 // Packed Compare Explicit Length Strings, Return Index
5919 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5920 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5921 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5922 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5923 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5924 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5925 (implicit EFLAGS)]>, OpSize;
5926 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5927 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5928 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5930 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5931 (implicit EFLAGS)]>, OpSize;
5935 let Predicates = [HasAVX] in {
5936 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5938 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5940 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5942 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5944 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5946 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5950 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5951 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5952 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5953 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5954 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5955 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5957 //===----------------------------------------------------------------------===//
5958 // SSE4.2 - CRC Instructions
5959 //===----------------------------------------------------------------------===//
5961 // No CRC instructions have AVX equivalents
5963 // crc intrinsic instruction
5964 // This set of instructions are only rm, the only difference is the size
5966 let Constraints = "$src1 = $dst" in {
5967 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5968 (ins GR32:$src1, i8mem:$src2),
5969 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5971 (int_x86_sse42_crc32_32_8 GR32:$src1,
5972 (load addr:$src2)))]>;
5973 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5974 (ins GR32:$src1, GR8:$src2),
5975 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5977 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
5978 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5979 (ins GR32:$src1, i16mem:$src2),
5980 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5982 (int_x86_sse42_crc32_32_16 GR32:$src1,
5983 (load addr:$src2)))]>,
5985 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5986 (ins GR32:$src1, GR16:$src2),
5987 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5989 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
5991 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5992 (ins GR32:$src1, i32mem:$src2),
5993 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5995 (int_x86_sse42_crc32_32_32 GR32:$src1,
5996 (load addr:$src2)))]>;
5997 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5998 (ins GR32:$src1, GR32:$src2),
5999 "crc32{l} \t{$src2, $src1|$src1, $src2}",
6001 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
6002 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
6003 (ins GR64:$src1, i8mem:$src2),
6004 "crc32{b} \t{$src2, $src1|$src1, $src2}",
6006 (int_x86_sse42_crc32_64_8 GR64:$src1,
6007 (load addr:$src2)))]>,
6009 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
6010 (ins GR64:$src1, GR8:$src2),
6011 "crc32{b} \t{$src2, $src1|$src1, $src2}",
6013 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
6015 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
6016 (ins GR64:$src1, i64mem:$src2),
6017 "crc32{q} \t{$src2, $src1|$src1, $src2}",
6019 (int_x86_sse42_crc32_64_64 GR64:$src1,
6020 (load addr:$src2)))]>,
6022 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
6023 (ins GR64:$src1, GR64:$src2),
6024 "crc32{q} \t{$src2, $src1|$src1, $src2}",
6026 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
6030 //===----------------------------------------------------------------------===//
6031 // AES-NI Instructions
6032 //===----------------------------------------------------------------------===//
6034 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
6035 Intrinsic IntId128, bit Is2Addr = 1> {
6036 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
6037 (ins VR128:$src1, VR128:$src2),
6039 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6040 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6041 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
6043 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
6044 (ins VR128:$src1, i128mem:$src2),
6046 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
6047 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
6049 (IntId128 VR128:$src1,
6050 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
6053 // Perform One Round of an AES Encryption/Decryption Flow
6054 let Predicates = [HasAVX, HasAES] in {
6055 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
6056 int_x86_aesni_aesenc, 0>, VEX_4V;
6057 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
6058 int_x86_aesni_aesenclast, 0>, VEX_4V;
6059 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
6060 int_x86_aesni_aesdec, 0>, VEX_4V;
6061 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
6062 int_x86_aesni_aesdeclast, 0>, VEX_4V;
6065 let Constraints = "$src1 = $dst" in {
6066 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
6067 int_x86_aesni_aesenc>;
6068 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
6069 int_x86_aesni_aesenclast>;
6070 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
6071 int_x86_aesni_aesdec>;
6072 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
6073 int_x86_aesni_aesdeclast>;
6076 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
6077 (AESENCrr VR128:$src1, VR128:$src2)>;
6078 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
6079 (AESENCrm VR128:$src1, addr:$src2)>;
6080 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
6081 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
6082 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
6083 (AESENCLASTrm VR128:$src1, addr:$src2)>;
6084 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
6085 (AESDECrr VR128:$src1, VR128:$src2)>;
6086 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
6087 (AESDECrm VR128:$src1, addr:$src2)>;
6088 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
6089 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
6090 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
6091 (AESDECLASTrm VR128:$src1, addr:$src2)>;
6093 // Perform the AES InvMixColumn Transformation
6094 let Predicates = [HasAVX, HasAES] in {
6095 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
6097 "vaesimc\t{$src1, $dst|$dst, $src1}",
6099 (int_x86_aesni_aesimc VR128:$src1))]>,
6101 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
6102 (ins i128mem:$src1),
6103 "vaesimc\t{$src1, $dst|$dst, $src1}",
6105 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
6108 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
6110 "aesimc\t{$src1, $dst|$dst, $src1}",
6112 (int_x86_aesni_aesimc VR128:$src1))]>,
6114 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
6115 (ins i128mem:$src1),
6116 "aesimc\t{$src1, $dst|$dst, $src1}",
6118 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
6121 // AES Round Key Generation Assist
6122 let Predicates = [HasAVX, HasAES] in {
6123 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
6124 (ins VR128:$src1, i8imm:$src2),
6125 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6127 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
6129 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
6130 (ins i128mem:$src1, i8imm:$src2),
6131 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6133 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
6137 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
6138 (ins VR128:$src1, i8imm:$src2),
6139 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6141 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
6143 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
6144 (ins i128mem:$src1, i8imm:$src2),
6145 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6147 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
6151 //===----------------------------------------------------------------------===//
6152 // CLMUL Instructions
6153 //===----------------------------------------------------------------------===//
6155 // Carry-less Multiplication instructions
6156 let Constraints = "$src1 = $dst" in {
6157 def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
6158 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
6159 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
6162 def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
6163 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
6164 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
6168 // AVX carry-less Multiplication instructions
6169 def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
6170 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
6171 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6174 def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
6175 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
6176 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6180 multiclass pclmul_alias<string asm, int immop> {
6181 def : InstAlias<!strconcat("pclmul", asm,
6182 "dq {$src, $dst|$dst, $src}"),
6183 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
6185 def : InstAlias<!strconcat("pclmul", asm,
6186 "dq {$src, $dst|$dst, $src}"),
6187 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
6189 def : InstAlias<!strconcat("vpclmul", asm,
6190 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
6191 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
6193 def : InstAlias<!strconcat("vpclmul", asm,
6194 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
6195 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
6197 defm : pclmul_alias<"hqhq", 0x11>;
6198 defm : pclmul_alias<"hqlq", 0x01>;
6199 defm : pclmul_alias<"lqhq", 0x10>;
6200 defm : pclmul_alias<"lqlq", 0x00>;
6202 //===----------------------------------------------------------------------===//
6204 //===----------------------------------------------------------------------===//
6206 //===----------------------------------------------------------------------===//
6207 // VBROADCAST - Load from memory and broadcast to all elements of the
6208 // destination operand
6210 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
6211 X86MemOperand x86memop, Intrinsic Int> :
6212 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
6213 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
6214 [(set RC:$dst, (Int addr:$src))]>, VEX;
6216 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
6217 int_x86_avx_vbroadcastss>;
6218 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
6219 int_x86_avx_vbroadcastss_256>;
6220 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
6221 int_x86_avx_vbroadcast_sd_256>;
6222 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
6223 int_x86_avx_vbroadcastf128_pd_256>;
6225 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
6226 (VBROADCASTF128 addr:$src)>;
6228 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
6229 (VBROADCASTSSY addr:$src)>;
6230 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
6231 (VBROADCASTSD addr:$src)>;
6232 def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
6233 (VBROADCASTSSY addr:$src)>;
6234 def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
6235 (VBROADCASTSD addr:$src)>;
6237 def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
6238 (VBROADCASTSS addr:$src)>;
6239 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
6240 (VBROADCASTSS addr:$src)>;
6242 //===----------------------------------------------------------------------===//
6243 // VINSERTF128 - Insert packed floating-point values
6245 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
6246 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
6247 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6249 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
6250 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
6251 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6254 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
6255 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
6256 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
6257 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
6258 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
6259 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
6261 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
6263 (VINSERTF128rr VR256:$src1, VR128:$src2,
6264 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6265 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
6267 (VINSERTF128rr VR256:$src1, VR128:$src2,
6268 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6269 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
6271 (VINSERTF128rr VR256:$src1, VR128:$src2,
6272 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6273 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
6275 (VINSERTF128rr VR256:$src1, VR128:$src2,
6276 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6277 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
6279 (VINSERTF128rr VR256:$src1, VR128:$src2,
6280 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6281 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
6283 (VINSERTF128rr VR256:$src1, VR128:$src2,
6284 (INSERT_get_vinsertf128_imm VR256:$ins))>;
6286 //===----------------------------------------------------------------------===//
6287 // VEXTRACTF128 - Extract packed floating-point values
6289 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
6290 (ins VR256:$src1, i8imm:$src2),
6291 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6293 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
6294 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
6295 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
6298 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
6299 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
6300 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
6301 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
6302 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
6303 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
6305 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6306 (v4f32 (VEXTRACTF128rr
6307 (v8f32 VR256:$src1),
6308 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6309 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6310 (v2f64 (VEXTRACTF128rr
6311 (v4f64 VR256:$src1),
6312 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6313 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6314 (v4i32 (VEXTRACTF128rr
6315 (v8i32 VR256:$src1),
6316 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6317 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6318 (v2i64 (VEXTRACTF128rr
6319 (v4i64 VR256:$src1),
6320 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6321 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6322 (v8i16 (VEXTRACTF128rr
6323 (v16i16 VR256:$src1),
6324 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6325 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6326 (v16i8 (VEXTRACTF128rr
6327 (v32i8 VR256:$src1),
6328 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6330 //===----------------------------------------------------------------------===//
6331 // VMASKMOV - Conditional SIMD Packed Loads and Stores
6333 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
6334 Intrinsic IntLd, Intrinsic IntLd256,
6335 Intrinsic IntSt, Intrinsic IntSt256,
6336 PatFrag pf128, PatFrag pf256> {
6337 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
6338 (ins VR128:$src1, f128mem:$src2),
6339 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6340 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
6342 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
6343 (ins VR256:$src1, f256mem:$src2),
6344 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6345 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
6347 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
6348 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
6349 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6350 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
6351 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
6352 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
6353 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6354 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
6357 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
6358 int_x86_avx_maskload_ps,
6359 int_x86_avx_maskload_ps_256,
6360 int_x86_avx_maskstore_ps,
6361 int_x86_avx_maskstore_ps_256,
6362 memopv4f32, memopv8f32>;
6363 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
6364 int_x86_avx_maskload_pd,
6365 int_x86_avx_maskload_pd_256,
6366 int_x86_avx_maskstore_pd,
6367 int_x86_avx_maskstore_pd_256,
6368 memopv2f64, memopv4f64>;
6370 //===----------------------------------------------------------------------===//
6371 // VPERMIL - Permute Single and Double Floating-Point Values
6373 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
6374 RegisterClass RC, X86MemOperand x86memop_f,
6375 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
6376 Intrinsic IntVar, Intrinsic IntImm> {
6377 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
6378 (ins RC:$src1, RC:$src2),
6379 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6380 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
6381 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
6382 (ins RC:$src1, x86memop_i:$src2),
6383 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6384 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
6386 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
6387 (ins RC:$src1, i8imm:$src2),
6388 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6389 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
6390 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
6391 (ins x86memop_f:$src1, i8imm:$src2),
6392 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6393 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
6396 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
6397 memopv4f32, memopv4i32,
6398 int_x86_avx_vpermilvar_ps,
6399 int_x86_avx_vpermil_ps>;
6400 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
6401 memopv8f32, memopv8i32,
6402 int_x86_avx_vpermilvar_ps_256,
6403 int_x86_avx_vpermil_ps_256>;
6404 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
6405 memopv2f64, memopv2i64,
6406 int_x86_avx_vpermilvar_pd,
6407 int_x86_avx_vpermil_pd>;
6408 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
6409 memopv4f64, memopv4i64,
6410 int_x86_avx_vpermilvar_pd_256,
6411 int_x86_avx_vpermil_pd_256>;
6413 def : Pat<(v8f32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
6414 (VPERMILPSYri VR256:$src1, imm:$imm)>;
6415 def : Pat<(v4f64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
6416 (VPERMILPDYri VR256:$src1, imm:$imm)>;
6417 def : Pat<(v8i32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
6418 (VPERMILPSYri VR256:$src1, imm:$imm)>;
6419 def : Pat<(v4i64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
6420 (VPERMILPDYri VR256:$src1, imm:$imm)>;
6422 //===----------------------------------------------------------------------===//
6423 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
6425 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
6426 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
6427 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6429 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
6430 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
6431 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6434 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
6435 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6436 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
6437 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6438 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
6439 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6441 def : Pat<(int_x86_avx_vperm2f128_ps_256
6442 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
6443 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6444 def : Pat<(int_x86_avx_vperm2f128_pd_256
6445 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
6446 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6447 def : Pat<(int_x86_avx_vperm2f128_si_256
6448 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
6449 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6451 def : Pat<(v8f32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6452 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6453 def : Pat<(v8i32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6454 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6455 def : Pat<(v4i64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6456 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6457 def : Pat<(v4f64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6458 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6459 def : Pat<(v32i8 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6460 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6461 def : Pat<(v16i16 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6462 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6464 //===----------------------------------------------------------------------===//
6465 // VZERO - Zero YMM registers
6467 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
6468 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
6469 // Zero All YMM registers
6470 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
6471 [(int_x86_avx_vzeroall)]>, TB, VEX, VEX_L, Requires<[HasAVX]>;
6473 // Zero Upper bits of YMM registers
6474 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
6475 [(int_x86_avx_vzeroupper)]>, TB, VEX, Requires<[HasAVX]>;