1 //====- X86InstrSSE.td - Describe the X86 Instruction Set --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the X86 SSE instruction set, defining the instructions,
11 // and properties of the instructions which are needed for code generation,
12 // machine code emission, and analysis.
14 //===----------------------------------------------------------------------===//
17 //===----------------------------------------------------------------------===//
18 // SSE 1 & 2 Instructions Classes
19 //===----------------------------------------------------------------------===//
21 /// sse12_fp_scalar - SSE 1 & 2 scalar instructions class
22 multiclass sse12_fp_scalar<bits<8> opc, string OpcodeStr, SDNode OpNode,
23 RegisterClass RC, X86MemOperand x86memop,
25 let isCommutable = 1 in {
26 def rr : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
28 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
29 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
30 [(set RC:$dst, (OpNode RC:$src1, RC:$src2))]>;
32 def rm : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
34 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
35 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
36 [(set RC:$dst, (OpNode RC:$src1, (load addr:$src2)))]>;
39 /// sse12_fp_scalar_int - SSE 1 & 2 scalar instructions intrinsics class
40 multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
41 string asm, string SSEVer, string FPSizeStr,
42 Operand memopr, ComplexPattern mem_cpat,
44 def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
46 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
47 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
48 [(set RC:$dst, (!cast<Intrinsic>(
49 !strconcat("int_x86_sse", SSEVer, "_", OpcodeStr, FPSizeStr))
50 RC:$src1, RC:$src2))]>;
51 def rm_Int : SI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, memopr:$src2),
53 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
54 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
55 [(set RC:$dst, (!cast<Intrinsic>(!strconcat("int_x86_sse",
56 SSEVer, "_", OpcodeStr, FPSizeStr))
57 RC:$src1, mem_cpat:$src2))]>;
60 /// sse12_fp_packed - SSE 1 & 2 packed instructions class
61 multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
62 RegisterClass RC, ValueType vt,
63 X86MemOperand x86memop, PatFrag mem_frag,
64 Domain d, bit Is2Addr = 1> {
65 let isCommutable = 1 in
66 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
68 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
69 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
70 [(set RC:$dst, (vt (OpNode RC:$src1, RC:$src2)))], d>;
72 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
74 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
75 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
76 [(set RC:$dst, (OpNode RC:$src1, (mem_frag addr:$src2)))], d>;
79 /// sse12_fp_packed_logical_rm - SSE 1 & 2 packed instructions class
80 multiclass sse12_fp_packed_logical_rm<bits<8> opc, RegisterClass RC, Domain d,
81 string OpcodeStr, X86MemOperand x86memop,
82 list<dag> pat_rr, list<dag> pat_rm,
84 let isCommutable = 1 in
85 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
87 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
88 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
90 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
92 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
93 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
97 /// sse12_fp_packed_int - SSE 1 & 2 packed instructions intrinsics class
98 multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
99 string asm, string SSEVer, string FPSizeStr,
100 X86MemOperand x86memop, PatFrag mem_frag,
101 Domain d, bit Is2Addr = 1> {
102 def rr_Int : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
104 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
105 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
106 [(set RC:$dst, (!cast<Intrinsic>(
107 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
108 RC:$src1, RC:$src2))], d>;
109 def rm_Int : PI<opc, MRMSrcMem, (outs RC:$dst), (ins RC:$src1,x86memop:$src2),
111 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
112 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
113 [(set RC:$dst, (!cast<Intrinsic>(
114 !strconcat("int_x86_", SSEVer, "_", OpcodeStr, FPSizeStr))
115 RC:$src1, (mem_frag addr:$src2)))], d>;
118 //===----------------------------------------------------------------------===//
119 // Non-instruction patterns
120 //===----------------------------------------------------------------------===//
122 def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
123 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
125 // Implicitly promote a 32-bit scalar to a vector.
126 def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
127 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
128 def : Pat<(v8f32 (scalar_to_vector FR32:$src)),
129 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
130 // Implicitly promote a 64-bit scalar to a vector.
131 def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
132 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
133 def : Pat<(v4f64 (scalar_to_vector FR64:$src)),
134 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
136 // Bitcasts between 128-bit vector types. Return the original type since
137 // no instruction is needed for the conversion
138 let Predicates = [HasXMMInt] in {
139 def : Pat<(v2i64 (bitconvert (v4i32 VR128:$src))), (v2i64 VR128:$src)>;
140 def : Pat<(v2i64 (bitconvert (v8i16 VR128:$src))), (v2i64 VR128:$src)>;
141 def : Pat<(v2i64 (bitconvert (v16i8 VR128:$src))), (v2i64 VR128:$src)>;
142 def : Pat<(v2i64 (bitconvert (v2f64 VR128:$src))), (v2i64 VR128:$src)>;
143 def : Pat<(v2i64 (bitconvert (v4f32 VR128:$src))), (v2i64 VR128:$src)>;
144 def : Pat<(v4i32 (bitconvert (v2i64 VR128:$src))), (v4i32 VR128:$src)>;
145 def : Pat<(v4i32 (bitconvert (v8i16 VR128:$src))), (v4i32 VR128:$src)>;
146 def : Pat<(v4i32 (bitconvert (v16i8 VR128:$src))), (v4i32 VR128:$src)>;
147 def : Pat<(v4i32 (bitconvert (v2f64 VR128:$src))), (v4i32 VR128:$src)>;
148 def : Pat<(v4i32 (bitconvert (v4f32 VR128:$src))), (v4i32 VR128:$src)>;
149 def : Pat<(v8i16 (bitconvert (v2i64 VR128:$src))), (v8i16 VR128:$src)>;
150 def : Pat<(v8i16 (bitconvert (v4i32 VR128:$src))), (v8i16 VR128:$src)>;
151 def : Pat<(v8i16 (bitconvert (v16i8 VR128:$src))), (v8i16 VR128:$src)>;
152 def : Pat<(v8i16 (bitconvert (v2f64 VR128:$src))), (v8i16 VR128:$src)>;
153 def : Pat<(v8i16 (bitconvert (v4f32 VR128:$src))), (v8i16 VR128:$src)>;
154 def : Pat<(v16i8 (bitconvert (v2i64 VR128:$src))), (v16i8 VR128:$src)>;
155 def : Pat<(v16i8 (bitconvert (v4i32 VR128:$src))), (v16i8 VR128:$src)>;
156 def : Pat<(v16i8 (bitconvert (v8i16 VR128:$src))), (v16i8 VR128:$src)>;
157 def : Pat<(v16i8 (bitconvert (v2f64 VR128:$src))), (v16i8 VR128:$src)>;
158 def : Pat<(v16i8 (bitconvert (v4f32 VR128:$src))), (v16i8 VR128:$src)>;
159 def : Pat<(v4f32 (bitconvert (v2i64 VR128:$src))), (v4f32 VR128:$src)>;
160 def : Pat<(v4f32 (bitconvert (v4i32 VR128:$src))), (v4f32 VR128:$src)>;
161 def : Pat<(v4f32 (bitconvert (v8i16 VR128:$src))), (v4f32 VR128:$src)>;
162 def : Pat<(v4f32 (bitconvert (v16i8 VR128:$src))), (v4f32 VR128:$src)>;
163 def : Pat<(v4f32 (bitconvert (v2f64 VR128:$src))), (v4f32 VR128:$src)>;
164 def : Pat<(v2f64 (bitconvert (v2i64 VR128:$src))), (v2f64 VR128:$src)>;
165 def : Pat<(v2f64 (bitconvert (v4i32 VR128:$src))), (v2f64 VR128:$src)>;
166 def : Pat<(v2f64 (bitconvert (v8i16 VR128:$src))), (v2f64 VR128:$src)>;
167 def : Pat<(v2f64 (bitconvert (v16i8 VR128:$src))), (v2f64 VR128:$src)>;
168 def : Pat<(v2f64 (bitconvert (v4f32 VR128:$src))), (v2f64 VR128:$src)>;
171 // Bitcasts between 256-bit vector types. Return the original type since
172 // no instruction is needed for the conversion
173 let Predicates = [HasAVX] in {
174 def : Pat<(v4f64 (bitconvert (v8f32 VR256:$src))), (v4f64 VR256:$src)>;
175 def : Pat<(v4f64 (bitconvert (v8i32 VR256:$src))), (v4f64 VR256:$src)>;
176 def : Pat<(v4f64 (bitconvert (v4i64 VR256:$src))), (v4f64 VR256:$src)>;
177 def : Pat<(v4f64 (bitconvert (v16i16 VR256:$src))), (v4f64 VR256:$src)>;
178 def : Pat<(v4f64 (bitconvert (v32i8 VR256:$src))), (v4f64 VR256:$src)>;
179 def : Pat<(v8f32 (bitconvert (v8i32 VR256:$src))), (v8f32 VR256:$src)>;
180 def : Pat<(v8f32 (bitconvert (v4i64 VR256:$src))), (v8f32 VR256:$src)>;
181 def : Pat<(v8f32 (bitconvert (v4f64 VR256:$src))), (v8f32 VR256:$src)>;
182 def : Pat<(v8f32 (bitconvert (v32i8 VR256:$src))), (v8f32 VR256:$src)>;
183 def : Pat<(v8f32 (bitconvert (v16i16 VR256:$src))), (v8f32 VR256:$src)>;
184 def : Pat<(v4i64 (bitconvert (v8f32 VR256:$src))), (v4i64 VR256:$src)>;
185 def : Pat<(v4i64 (bitconvert (v8i32 VR256:$src))), (v4i64 VR256:$src)>;
186 def : Pat<(v4i64 (bitconvert (v4f64 VR256:$src))), (v4i64 VR256:$src)>;
187 def : Pat<(v4i64 (bitconvert (v32i8 VR256:$src))), (v4i64 VR256:$src)>;
188 def : Pat<(v4i64 (bitconvert (v16i16 VR256:$src))), (v4i64 VR256:$src)>;
189 def : Pat<(v32i8 (bitconvert (v4f64 VR256:$src))), (v32i8 VR256:$src)>;
190 def : Pat<(v32i8 (bitconvert (v4i64 VR256:$src))), (v32i8 VR256:$src)>;
191 def : Pat<(v32i8 (bitconvert (v8f32 VR256:$src))), (v32i8 VR256:$src)>;
192 def : Pat<(v32i8 (bitconvert (v8i32 VR256:$src))), (v32i8 VR256:$src)>;
193 def : Pat<(v32i8 (bitconvert (v16i16 VR256:$src))), (v32i8 VR256:$src)>;
194 def : Pat<(v8i32 (bitconvert (v32i8 VR256:$src))), (v8i32 VR256:$src)>;
195 def : Pat<(v8i32 (bitconvert (v16i16 VR256:$src))), (v8i32 VR256:$src)>;
196 def : Pat<(v8i32 (bitconvert (v8f32 VR256:$src))), (v8i32 VR256:$src)>;
197 def : Pat<(v8i32 (bitconvert (v4i64 VR256:$src))), (v8i32 VR256:$src)>;
198 def : Pat<(v8i32 (bitconvert (v4f64 VR256:$src))), (v8i32 VR256:$src)>;
199 def : Pat<(v16i16 (bitconvert (v8f32 VR256:$src))), (v16i16 VR256:$src)>;
200 def : Pat<(v16i16 (bitconvert (v8i32 VR256:$src))), (v16i16 VR256:$src)>;
201 def : Pat<(v16i16 (bitconvert (v4i64 VR256:$src))), (v16i16 VR256:$src)>;
202 def : Pat<(v16i16 (bitconvert (v4f64 VR256:$src))), (v16i16 VR256:$src)>;
203 def : Pat<(v16i16 (bitconvert (v32i8 VR256:$src))), (v16i16 VR256:$src)>;
206 //===----------------------------------------------------------------------===//
207 // AVX & SSE - Zero/One Vectors
208 //===----------------------------------------------------------------------===//
210 // Alias instructions that map zero vector to pxor / xorp* for sse.
211 // We set canFoldAsLoad because this can be converted to a constant-pool
212 // load of an all-zeros value if folding it would be beneficial.
213 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
214 // JIT implementation, it does not expand the instructions below like
215 // X86MCInstLower does.
216 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
217 isCodeGenOnly = 1 in {
218 def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
219 [(set VR128:$dst, (v4f32 immAllZerosV))]>;
220 def V_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
221 [(set VR128:$dst, (v2f64 immAllZerosV))]>;
222 let ExeDomain = SSEPackedInt in
223 def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
224 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
227 // The same as done above but for AVX. The 128-bit versions are the
228 // same, but re-encoded. The 256-bit does not support PI version, and
229 // doesn't need it because on sandy bridge the register is set to zero
230 // at the rename stage without using any execution unit, so SET0PSY
231 // and SET0PDY can be used for vector int instructions without penalty
232 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
233 // JIT implementatioan, it does not expand the instructions below like
234 // X86MCInstLower does.
235 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
236 isCodeGenOnly = 1, Predicates = [HasAVX] in {
237 def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
238 [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V;
239 def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "",
240 [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V;
241 def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
242 [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V;
243 def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "",
244 [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V;
245 let ExeDomain = SSEPackedInt in
246 def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "",
247 [(set VR128:$dst, (v4i32 immAllZerosV))]>;
250 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>;
251 def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
252 def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
254 // AVX has no support for 256-bit integer instructions, but since the 128-bit
255 // VPXOR instruction writes zero to its upper part, it's safe build zeros.
256 def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
257 def : Pat<(bc_v8i32 (v8f32 immAllZerosV)),
258 (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>;
260 def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
261 def : Pat<(bc_v4i64 (v8f32 immAllZerosV)),
262 (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>;
264 //===----------------------------------------------------------------------===//
265 // SSE 1 & 2 - Move Instructions
266 //===----------------------------------------------------------------------===//
268 class sse12_move_rr<RegisterClass RC, ValueType vt, string asm> :
269 SI<0x10, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, RC:$src2), asm,
270 [(set (vt VR128:$dst), (movl VR128:$src1, (scalar_to_vector RC:$src2)))]>;
272 // Loading from memory automatically zeroing upper bits.
273 class sse12_move_rm<RegisterClass RC, X86MemOperand x86memop,
274 PatFrag mem_pat, string OpcodeStr> :
275 SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
276 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
277 [(set RC:$dst, (mem_pat addr:$src))]>;
279 // Move Instructions. Register-to-register movss/movsd is not used for FR32/64
280 // register copies because it's a partial register update; FsMOVAPSrr/FsMOVAPDrr
281 // is used instead. Register-to-register movss/movsd is not modeled as an
282 // INSERT_SUBREG because INSERT_SUBREG requires that the insert be implementable
283 // in terms of a copy, and just mentioned, we don't use movss/movsd for copies.
284 def VMOVSSrr : sse12_move_rr<FR32, v4f32,
285 "movss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS, VEX_4V;
286 def VMOVSDrr : sse12_move_rr<FR64, v2f64,
287 "movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD, VEX_4V;
289 let canFoldAsLoad = 1, isReMaterializable = 1 in {
290 def VMOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS, VEX;
292 let AddedComplexity = 20 in
293 def VMOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD, VEX;
296 let Constraints = "$src1 = $dst" in {
297 def MOVSSrr : sse12_move_rr<FR32, v4f32,
298 "movss\t{$src2, $dst|$dst, $src2}">, XS;
299 def MOVSDrr : sse12_move_rr<FR64, v2f64,
300 "movsd\t{$src2, $dst|$dst, $src2}">, XD;
303 let canFoldAsLoad = 1, isReMaterializable = 1 in {
304 def MOVSSrm : sse12_move_rm<FR32, f32mem, loadf32, "movss">, XS;
306 let AddedComplexity = 20 in
307 def MOVSDrm : sse12_move_rm<FR64, f64mem, loadf64, "movsd">, XD;
310 let AddedComplexity = 15 in {
311 // Extract the low 32-bit value from one vector and insert it into another.
312 def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
313 (MOVSSrr (v4f32 VR128:$src1),
314 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
315 // Extract the low 64-bit value from one vector and insert it into another.
316 def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
317 (MOVSDrr (v2f64 VR128:$src1),
318 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
321 let AddedComplexity = 20 in {
322 let Predicates = [HasSSE1] in {
323 // MOVSSrm zeros the high parts of the register; represent this
324 // with SUBREG_TO_REG.
325 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
326 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
327 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
328 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
329 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
330 (SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
332 let Predicates = [HasSSE2] in {
333 // MOVSDrm zeros the high parts of the register; represent this
334 // with SUBREG_TO_REG.
335 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
336 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
337 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
338 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
339 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
340 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
341 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
342 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
343 def : Pat<(v2f64 (X86vzload addr:$src)),
344 (SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
348 let AddedComplexity = 20, Predicates = [HasAVX] in {
349 // MOVSSrm zeros the high parts of the register; represent this
350 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
351 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
352 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
353 def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
354 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
355 def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
356 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
357 // MOVSDrm zeros the high parts of the register; represent this
358 // with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
359 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
360 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
361 def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
362 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
363 def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
364 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
365 def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
366 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
367 def : Pat<(v2f64 (X86vzload addr:$src)),
368 (SUBREG_TO_REG (i64 0), (VMOVSDrm addr:$src), sub_sd)>;
369 // Represent the same patterns above but in the form they appear for
371 def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
372 (v4f32 (scalar_to_vector (loadf32 addr:$src))), (i32 0)))),
373 (SUBREG_TO_REG (i32 0), (VMOVSSrm addr:$src), sub_ss)>;
374 def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
375 (v2f64 (scalar_to_vector (loadf64 addr:$src))), (i32 0)))),
376 (SUBREG_TO_REG (i32 0), (VMOVSDrm addr:$src), sub_sd)>;
379 // Store scalar value to memory.
380 def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
381 "movss\t{$src, $dst|$dst, $src}",
382 [(store FR32:$src, addr:$dst)]>;
383 def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
384 "movsd\t{$src, $dst|$dst, $src}",
385 [(store FR64:$src, addr:$dst)]>;
387 def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
388 "movss\t{$src, $dst|$dst, $src}",
389 [(store FR32:$src, addr:$dst)]>, XS, VEX;
390 def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
391 "movsd\t{$src, $dst|$dst, $src}",
392 [(store FR64:$src, addr:$dst)]>, XD, VEX;
394 // Extract and store.
395 def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
398 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
399 def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
402 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
404 // Move Aligned/Unaligned floating point values
405 multiclass sse12_mov_packed<bits<8> opc, RegisterClass RC,
406 X86MemOperand x86memop, PatFrag ld_frag,
407 string asm, Domain d,
408 bit IsReMaterializable = 1> {
409 let neverHasSideEffects = 1 in
410 def rr : PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
411 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>;
412 let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
413 def rm : PI<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
414 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
415 [(set RC:$dst, (ld_frag addr:$src))], d>;
418 defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
419 "movaps", SSEPackedSingle>, TB, VEX;
420 defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
421 "movapd", SSEPackedDouble>, TB, OpSize, VEX;
422 defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
423 "movups", SSEPackedSingle>, TB, VEX;
424 defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
425 "movupd", SSEPackedDouble, 0>, TB, OpSize, VEX;
427 defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
428 "movaps", SSEPackedSingle>, TB, VEX;
429 defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
430 "movapd", SSEPackedDouble>, TB, OpSize, VEX;
431 defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
432 "movups", SSEPackedSingle>, TB, VEX;
433 defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
434 "movupd", SSEPackedDouble, 0>, TB, OpSize, VEX;
435 defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
436 "movaps", SSEPackedSingle>, TB;
437 defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
438 "movapd", SSEPackedDouble>, TB, OpSize;
439 defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
440 "movups", SSEPackedSingle>, TB;
441 defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
442 "movupd", SSEPackedDouble, 0>, TB, OpSize;
444 def VMOVAPSmr : VPSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
445 "movaps\t{$src, $dst|$dst, $src}",
446 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>, VEX;
447 def VMOVAPDmr : VPDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
448 "movapd\t{$src, $dst|$dst, $src}",
449 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>, VEX;
450 def VMOVUPSmr : VPSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
451 "movups\t{$src, $dst|$dst, $src}",
452 [(store (v4f32 VR128:$src), addr:$dst)]>, VEX;
453 def VMOVUPDmr : VPDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
454 "movupd\t{$src, $dst|$dst, $src}",
455 [(store (v2f64 VR128:$src), addr:$dst)]>, VEX;
456 def VMOVAPSYmr : VPSI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
457 "movaps\t{$src, $dst|$dst, $src}",
458 [(alignedstore (v8f32 VR256:$src), addr:$dst)]>, VEX;
459 def VMOVAPDYmr : VPDI<0x29, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
460 "movapd\t{$src, $dst|$dst, $src}",
461 [(alignedstore (v4f64 VR256:$src), addr:$dst)]>, VEX;
462 def VMOVUPSYmr : VPSI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
463 "movups\t{$src, $dst|$dst, $src}",
464 [(store (v8f32 VR256:$src), addr:$dst)]>, VEX;
465 def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src),
466 "movupd\t{$src, $dst|$dst, $src}",
467 [(store (v4f64 VR256:$src), addr:$dst)]>, VEX;
469 def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>;
470 def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
471 (VMOVUPSYmr addr:$dst, VR256:$src)>;
473 def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>;
474 def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
475 (VMOVUPDYmr addr:$dst, VR256:$src)>;
477 def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
478 "movaps\t{$src, $dst|$dst, $src}",
479 [(alignedstore (v4f32 VR128:$src), addr:$dst)]>;
480 def MOVAPDmr : PDI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
481 "movapd\t{$src, $dst|$dst, $src}",
482 [(alignedstore (v2f64 VR128:$src), addr:$dst)]>;
483 def MOVUPSmr : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
484 "movups\t{$src, $dst|$dst, $src}",
485 [(store (v4f32 VR128:$src), addr:$dst)]>;
486 def MOVUPDmr : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
487 "movupd\t{$src, $dst|$dst, $src}",
488 [(store (v2f64 VR128:$src), addr:$dst)]>;
490 // Intrinsic forms of MOVUPS/D load and store
491 def VMOVUPSmr_Int : VPSI<0x11, MRMDestMem, (outs),
492 (ins f128mem:$dst, VR128:$src),
493 "movups\t{$src, $dst|$dst, $src}",
494 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>, VEX;
495 def VMOVUPDmr_Int : VPDI<0x11, MRMDestMem, (outs),
496 (ins f128mem:$dst, VR128:$src),
497 "movupd\t{$src, $dst|$dst, $src}",
498 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>, VEX;
500 def MOVUPSmr_Int : PSI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
501 "movups\t{$src, $dst|$dst, $src}",
502 [(int_x86_sse_storeu_ps addr:$dst, VR128:$src)]>;
503 def MOVUPDmr_Int : PDI<0x11, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
504 "movupd\t{$src, $dst|$dst, $src}",
505 [(int_x86_sse2_storeu_pd addr:$dst, VR128:$src)]>;
507 // Move Low/High packed floating point values
508 multiclass sse12_mov_hilo_packed<bits<8>opc, RegisterClass RC,
509 PatFrag mov_frag, string base_opc,
511 def PSrm : PI<opc, MRMSrcMem,
512 (outs VR128:$dst), (ins VR128:$src1, f64mem:$src2),
513 !strconcat(base_opc, "s", asm_opr),
516 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
517 SSEPackedSingle>, TB;
519 def PDrm : PI<opc, MRMSrcMem,
520 (outs RC:$dst), (ins RC:$src1, f64mem:$src2),
521 !strconcat(base_opc, "d", asm_opr),
522 [(set RC:$dst, (v2f64 (mov_frag RC:$src1,
523 (scalar_to_vector (loadf64 addr:$src2)))))],
524 SSEPackedDouble>, TB, OpSize;
527 let AddedComplexity = 20 in {
528 defm VMOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
529 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
530 defm VMOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
531 "\t{$src2, $src1, $dst|$dst, $src1, $src2}">, VEX_4V;
533 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
534 defm MOVL : sse12_mov_hilo_packed<0x12, VR128, movlp, "movlp",
535 "\t{$src2, $dst|$dst, $src2}">;
536 defm MOVH : sse12_mov_hilo_packed<0x16, VR128, movlhps, "movhp",
537 "\t{$src2, $dst|$dst, $src2}">;
540 def VMOVLPSmr : VPSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
541 "movlps\t{$src, $dst|$dst, $src}",
542 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
543 (iPTR 0))), addr:$dst)]>, VEX;
544 def VMOVLPDmr : VPDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
545 "movlpd\t{$src, $dst|$dst, $src}",
546 [(store (f64 (vector_extract (v2f64 VR128:$src),
547 (iPTR 0))), addr:$dst)]>, VEX;
548 def MOVLPSmr : PSI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
549 "movlps\t{$src, $dst|$dst, $src}",
550 [(store (f64 (vector_extract (bc_v2f64 (v4f32 VR128:$src)),
551 (iPTR 0))), addr:$dst)]>;
552 def MOVLPDmr : PDI<0x13, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
553 "movlpd\t{$src, $dst|$dst, $src}",
554 [(store (f64 (vector_extract (v2f64 VR128:$src),
555 (iPTR 0))), addr:$dst)]>;
557 // v2f64 extract element 1 is always custom lowered to unpack high to low
558 // and extract element 0 so the non-store version isn't too horrible.
559 def VMOVHPSmr : VPSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
560 "movhps\t{$src, $dst|$dst, $src}",
561 [(store (f64 (vector_extract
562 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
563 (undef)), (iPTR 0))), addr:$dst)]>,
565 def VMOVHPDmr : VPDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
566 "movhpd\t{$src, $dst|$dst, $src}",
567 [(store (f64 (vector_extract
568 (v2f64 (unpckh VR128:$src, (undef))),
569 (iPTR 0))), addr:$dst)]>,
571 def MOVHPSmr : PSI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
572 "movhps\t{$src, $dst|$dst, $src}",
573 [(store (f64 (vector_extract
574 (unpckh (bc_v2f64 (v4f32 VR128:$src)),
575 (undef)), (iPTR 0))), addr:$dst)]>;
576 def MOVHPDmr : PDI<0x17, MRMDestMem, (outs), (ins f64mem:$dst, VR128:$src),
577 "movhpd\t{$src, $dst|$dst, $src}",
578 [(store (f64 (vector_extract
579 (v2f64 (unpckh VR128:$src, (undef))),
580 (iPTR 0))), addr:$dst)]>;
582 let AddedComplexity = 20 in {
583 def VMOVLHPSrr : VPSI<0x16, MRMSrcReg, (outs VR128:$dst),
584 (ins VR128:$src1, VR128:$src2),
585 "movlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
587 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>,
589 def VMOVHLPSrr : VPSI<0x12, MRMSrcReg, (outs VR128:$dst),
590 (ins VR128:$src1, VR128:$src2),
591 "movhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
593 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>,
596 let Constraints = "$src1 = $dst", AddedComplexity = 20 in {
597 def MOVLHPSrr : PSI<0x16, MRMSrcReg, (outs VR128:$dst),
598 (ins VR128:$src1, VR128:$src2),
599 "movlhps\t{$src2, $dst|$dst, $src2}",
601 (v4f32 (movlhps VR128:$src1, VR128:$src2)))]>;
602 def MOVHLPSrr : PSI<0x12, MRMSrcReg, (outs VR128:$dst),
603 (ins VR128:$src1, VR128:$src2),
604 "movhlps\t{$src2, $dst|$dst, $src2}",
606 (v4f32 (movhlps VR128:$src1, VR128:$src2)))]>;
609 let Predicates = [HasAVX] in {
611 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
612 (VMOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
613 def : Pat<(X86Movlhps VR128:$src1,
614 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
615 (VMOVHPSrm VR128:$src1, addr:$src2)>;
616 def : Pat<(X86Movlhps VR128:$src1,
617 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
618 (VMOVHPSrm VR128:$src1, addr:$src2)>;
621 let AddedComplexity = 20 in {
622 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
623 (VMOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
624 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
625 (VMOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
627 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
628 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
629 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
631 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
632 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
633 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
634 (VMOVLHPSrr VR128:$src1, VR128:$src2)>;
635 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
636 (VMOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
639 let AddedComplexity = 20 in {
640 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
641 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
642 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
644 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
645 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
646 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
647 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
648 (VMOVHLPSrr VR128:$src1, VR128:$src1)>;
651 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
652 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
653 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
654 (VMOVHLPSrr VR128:$src1, VR128:$src2)>;
657 let Predicates = [HasSSE1] in {
659 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
660 (MOVHPSrm (v4i32 VR128:$src1), addr:$src2)>;
661 def : Pat<(X86Movlhps VR128:$src1,
662 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
663 (MOVHPSrm VR128:$src1, addr:$src2)>;
664 def : Pat<(X86Movlhps VR128:$src1,
665 (bc_v4i32 (v2i64 (X86vzload addr:$src2)))),
666 (MOVHPSrm VR128:$src1, addr:$src2)>;
669 let AddedComplexity = 20 in {
670 def : Pat<(v4f32 (movddup VR128:$src, (undef))),
671 (MOVLHPSrr (v4f32 VR128:$src), (v4f32 VR128:$src))>;
672 def : Pat<(v2i64 (movddup VR128:$src, (undef))),
673 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>;
675 // vector_shuffle v1, v2 <0, 1, 4, 5> using MOVLHPS
676 def : Pat<(v4i32 (movlhps VR128:$src1, VR128:$src2)),
677 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
679 def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)),
680 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
681 def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)),
682 (MOVLHPSrr VR128:$src1, VR128:$src2)>;
683 def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)),
684 (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>;
687 let AddedComplexity = 20 in {
688 // vector_shuffle v1, v2 <6, 7, 2, 3> using MOVHLPS
689 def : Pat<(v4i32 (movhlps VR128:$src1, VR128:$src2)),
690 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
692 // vector_shuffle v1, undef <2, ?, ?, ?> using MOVHLPS
693 def : Pat<(v4f32 (movhlps_undef VR128:$src1, (undef))),
694 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
695 def : Pat<(v4i32 (movhlps_undef VR128:$src1, (undef))),
696 (MOVHLPSrr VR128:$src1, VR128:$src1)>;
699 def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)),
700 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
701 def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)),
702 (MOVHLPSrr VR128:$src1, VR128:$src2)>;
705 //===----------------------------------------------------------------------===//
706 // SSE 1 & 2 - Conversion Instructions
707 //===----------------------------------------------------------------------===//
709 multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
710 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
712 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
713 [(set DstRC:$dst, (OpNode SrcRC:$src))]>;
714 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
715 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>;
718 multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
719 SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
720 string asm, Domain d> {
721 def rr : PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm,
722 [(set DstRC:$dst, (OpNode SrcRC:$src))], d>;
723 def rm : PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm,
724 [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))], d>;
727 multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
728 X86MemOperand x86memop, string asm> {
729 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
730 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
731 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
732 (ins DstRC:$src1, x86memop:$src),
733 !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>;
736 defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
737 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX;
738 defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
739 "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
741 defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
742 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
743 defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
744 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD,
747 // The assembler can recognize rr 64-bit instructions by seeing a rxx
748 // register, but the same isn't true when only using memory operands,
749 // provide other assembly "l" and "q" forms to address this explicitly
750 // where appropriate to do so.
751 defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS,
753 defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS,
755 defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD,
757 defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD,
759 defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD,
762 let Predicates = [HasAVX] in {
763 def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
764 (VCVTSI2SSrm (f32 (IMPLICIT_DEF)), addr:$src)>;
765 def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
766 (VCVTSI2SS64rm (f32 (IMPLICIT_DEF)), addr:$src)>;
767 def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
768 (VCVTSI2SDrm (f64 (IMPLICIT_DEF)), addr:$src)>;
769 def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
770 (VCVTSI2SD64rm (f64 (IMPLICIT_DEF)), addr:$src)>;
772 def : Pat<(f32 (sint_to_fp GR32:$src)),
773 (VCVTSI2SSrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
774 def : Pat<(f32 (sint_to_fp GR64:$src)),
775 (VCVTSI2SS64rr (f32 (IMPLICIT_DEF)), GR64:$src)>;
776 def : Pat<(f64 (sint_to_fp GR32:$src)),
777 (VCVTSI2SDrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
778 def : Pat<(f64 (sint_to_fp GR64:$src)),
779 (VCVTSI2SD64rr (f64 (IMPLICIT_DEF)), GR64:$src)>;
782 defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32,
783 "cvttss2si\t{$src, $dst|$dst, $src}">, XS;
784 defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32,
785 "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
786 defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64,
787 "cvttsd2si\t{$src, $dst|$dst, $src}">, XD;
788 defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64,
789 "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
790 defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32,
791 "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS;
792 defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64,
793 "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
794 defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32,
795 "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD;
796 defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64,
797 "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W;
799 // Conversion Instructions Intrinsics - Match intrinsics which expect MM
800 // and/or XMM operand(s).
802 multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
803 Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag,
805 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
806 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
807 [(set DstRC:$dst, (Int SrcRC:$src))]>;
808 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
809 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
810 [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>;
813 multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC,
814 RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop,
815 PatFrag ld_frag, string asm, bit Is2Addr = 1> {
816 def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2),
818 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
819 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
820 [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>;
821 def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
822 (ins DstRC:$src1, x86memop:$src2),
824 !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
825 !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
826 [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>;
829 defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
830 f128mem, load, "cvtsd2si">, XD, VEX;
831 defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64,
832 int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">,
835 // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_
836 // Get rid of this hack or rename the intrinsics, there are several
837 // intructions that only match with the intrinsic form, why create duplicates
838 // to let them be recognized by the assembler?
839 let Pattern = []<dag> in {
840 defm VCVTSD2SI : sse12_cvt_s<0x2D, FR64, GR32, undef, f64mem, load,
841 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX;
842 defm VCVTSD2SI64 : sse12_cvt_s<0x2D, FR64, GR64, undef, f64mem, load,
843 "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W;
845 defm CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si,
846 f128mem, load, "cvtsd2si{l}">, XD;
847 defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
848 f128mem, load, "cvtsd2si{q}">, XD, REX_W;
851 defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
852 int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V;
853 defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
854 int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V,
856 defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
857 int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V;
858 defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
859 int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD,
862 let Constraints = "$src1 = $dst" in {
863 defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
864 int_x86_sse_cvtsi2ss, i32mem, loadi32,
866 defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
867 int_x86_sse_cvtsi642ss, i64mem, loadi64,
868 "cvtsi2ss{q}">, XS, REX_W;
869 defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
870 int_x86_sse2_cvtsi2sd, i32mem, loadi32,
872 defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
873 int_x86_sse2_cvtsi642sd, i64mem, loadi64,
874 "cvtsi2sd">, XD, REX_W;
879 // Aliases for intrinsics
880 defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
881 f32mem, load, "cvttss2si">, XS, VEX;
882 defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
883 int_x86_sse_cvttss2si64, f32mem, load,
884 "cvttss2si">, XS, VEX, VEX_W;
885 defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
886 f128mem, load, "cvttsd2si">, XD, VEX;
887 defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
888 int_x86_sse2_cvttsd2si64, f128mem, load,
889 "cvttsd2si">, XD, VEX, VEX_W;
890 defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
891 f32mem, load, "cvttss2si">, XS;
892 defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
893 int_x86_sse_cvttss2si64, f32mem, load,
894 "cvttss2si{q}">, XS, REX_W;
895 defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
896 f128mem, load, "cvttsd2si">, XD;
897 defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
898 int_x86_sse2_cvttsd2si64, f128mem, load,
899 "cvttsd2si{q}">, XD, REX_W;
901 let Pattern = []<dag> in {
902 defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load,
903 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX;
904 defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load,
905 "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX,
907 defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load,
908 "cvtdq2ps\t{$src, $dst|$dst, $src}",
909 SSEPackedSingle>, TB, VEX;
910 defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load,
911 "cvtdq2ps\t{$src, $dst|$dst, $src}",
912 SSEPackedSingle>, TB, VEX;
915 let Pattern = []<dag> in {
916 defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/,
917 "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS;
918 defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/,
919 "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W;
920 defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/,
921 "cvtdq2ps\t{$src, $dst|$dst, $src}",
922 SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */
925 let Predicates = [HasSSE1] in {
926 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
927 (CVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
928 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
929 (CVTSS2SIrm addr:$src)>;
930 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
931 (CVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
932 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
933 (CVTSS2SI64rm addr:$src)>;
936 let Predicates = [HasAVX] in {
937 def : Pat<(int_x86_sse_cvtss2si VR128:$src),
938 (VCVTSS2SIrr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
939 def : Pat<(int_x86_sse_cvtss2si (load addr:$src)),
940 (VCVTSS2SIrm addr:$src)>;
941 def : Pat<(int_x86_sse_cvtss2si64 VR128:$src),
942 (VCVTSS2SI64rr (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
943 def : Pat<(int_x86_sse_cvtss2si64 (load addr:$src)),
944 (VCVTSS2SI64rm addr:$src)>;
949 // Convert scalar double to scalar single
950 def VCVTSD2SSrr : VSDI<0x5A, MRMSrcReg, (outs FR32:$dst),
951 (ins FR64:$src1, FR64:$src2),
952 "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
954 def VCVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst),
955 (ins FR64:$src1, f64mem:$src2),
956 "vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
957 []>, XD, Requires<[HasAVX, OptForSize]>, VEX_4V;
958 def : Pat<(f32 (fround FR64:$src)), (VCVTSD2SSrr FR64:$src, FR64:$src)>,
961 def CVTSD2SSrr : SDI<0x5A, MRMSrcReg, (outs FR32:$dst), (ins FR64:$src),
962 "cvtsd2ss\t{$src, $dst|$dst, $src}",
963 [(set FR32:$dst, (fround FR64:$src))]>;
964 def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
965 "cvtsd2ss\t{$src, $dst|$dst, $src}",
966 [(set FR32:$dst, (fround (loadf64 addr:$src)))]>, XD,
967 Requires<[HasSSE2, OptForSize]>;
969 defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
970 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>,
972 let Constraints = "$src1 = $dst" in
973 defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128,
974 int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS;
976 // Convert scalar single to scalar double
977 // SSE2 instructions with XS prefix
978 def VCVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst),
979 (ins FR32:$src1, FR32:$src2),
980 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
981 []>, XS, Requires<[HasAVX]>, VEX_4V;
982 def VCVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst),
983 (ins FR32:$src1, f32mem:$src2),
984 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
985 []>, XS, VEX_4V, Requires<[HasAVX, OptForSize]>;
987 let Predicates = [HasAVX] in {
988 def : Pat<(f64 (fextend FR32:$src)),
989 (VCVTSS2SDrr FR32:$src, FR32:$src)>;
990 def : Pat<(fextend (loadf32 addr:$src)),
991 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
992 def : Pat<(extloadf32 addr:$src),
993 (VCVTSS2SDrm (f32 (IMPLICIT_DEF)), addr:$src)>;
996 def CVTSS2SDrr : I<0x5A, MRMSrcReg, (outs FR64:$dst), (ins FR32:$src),
997 "cvtss2sd\t{$src, $dst|$dst, $src}",
998 [(set FR64:$dst, (fextend FR32:$src))]>, XS,
1000 def CVTSS2SDrm : I<0x5A, MRMSrcMem, (outs FR64:$dst), (ins f32mem:$src),
1001 "cvtss2sd\t{$src, $dst|$dst, $src}",
1002 [(set FR64:$dst, (extloadf32 addr:$src))]>, XS,
1003 Requires<[HasSSE2, OptForSize]>;
1005 def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
1006 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1007 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1008 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1009 VR128:$src2))]>, XS, VEX_4V,
1011 def Int_VCVTSS2SDrm: I<0x5A, MRMSrcMem,
1012 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1013 "vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1014 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1015 (load addr:$src2)))]>, XS, VEX_4V,
1017 let Constraints = "$src1 = $dst" in { // SSE2 instructions with XS prefix
1018 def Int_CVTSS2SDrr: I<0x5A, MRMSrcReg,
1019 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
1020 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1021 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1022 VR128:$src2))]>, XS,
1023 Requires<[HasSSE2]>;
1024 def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
1025 (outs VR128:$dst), (ins VR128:$src1, f32mem:$src2),
1026 "cvtss2sd\t{$src2, $dst|$dst, $src2}",
1027 [(set VR128:$dst, (int_x86_sse2_cvtss2sd VR128:$src1,
1028 (load addr:$src2)))]>, XS,
1029 Requires<[HasSSE2]>;
1032 def : Pat<(extloadf32 addr:$src),
1033 (CVTSS2SDrr (MOVSSrm addr:$src))>,
1034 Requires<[HasSSE2, OptForSpeed]>;
1036 // Convert doubleword to packed single/double fp
1037 // SSE2 instructions without OpSize prefix
1038 def Int_VCVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1039 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1040 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1041 TB, VEX, Requires<[HasAVX]>;
1042 def Int_VCVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1043 "vcvtdq2ps\t{$src, $dst|$dst, $src}",
1044 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1045 (bitconvert (memopv2i64 addr:$src))))]>,
1046 TB, VEX, Requires<[HasAVX]>;
1047 def Int_CVTDQ2PSrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1048 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1049 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps VR128:$src))]>,
1050 TB, Requires<[HasSSE2]>;
1051 def Int_CVTDQ2PSrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
1052 "cvtdq2ps\t{$src, $dst|$dst, $src}",
1053 [(set VR128:$dst, (int_x86_sse2_cvtdq2ps
1054 (bitconvert (memopv2i64 addr:$src))))]>,
1055 TB, Requires<[HasSSE2]>;
1057 // FIXME: why the non-intrinsic version is described as SSE3?
1058 // SSE2 instructions with XS prefix
1059 def Int_VCVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1060 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1061 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1062 XS, VEX, Requires<[HasAVX]>;
1063 def Int_VCVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1064 "vcvtdq2pd\t{$src, $dst|$dst, $src}",
1065 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1066 (bitconvert (memopv2i64 addr:$src))))]>,
1067 XS, VEX, Requires<[HasAVX]>;
1068 def Int_CVTDQ2PDrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1069 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1070 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd VR128:$src))]>,
1071 XS, Requires<[HasSSE2]>;
1072 def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
1073 "cvtdq2pd\t{$src, $dst|$dst, $src}",
1074 [(set VR128:$dst, (int_x86_sse2_cvtdq2pd
1075 (bitconvert (memopv2i64 addr:$src))))]>,
1076 XS, Requires<[HasSSE2]>;
1079 // Convert packed single/double fp to doubleword
1080 def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1081 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1082 def VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1083 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1084 def VCVTPS2DQYrr : VPDI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1085 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1086 def VCVTPS2DQYrm : VPDI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1087 "cvtps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1088 def CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1089 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1090 def CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1091 "cvtps2dq\t{$src, $dst|$dst, $src}", []>;
1093 def Int_VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1094 "cvtps2dq\t{$src, $dst|$dst, $src}",
1095 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>,
1097 def Int_VCVTPS2DQrm : VPDI<0x5B, MRMSrcMem, (outs VR128:$dst),
1099 "cvtps2dq\t{$src, $dst|$dst, $src}",
1100 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1101 (memop addr:$src)))]>, VEX;
1102 def Int_CVTPS2DQrr : PDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1103 "cvtps2dq\t{$src, $dst|$dst, $src}",
1104 [(set VR128:$dst, (int_x86_sse2_cvtps2dq VR128:$src))]>;
1105 def Int_CVTPS2DQrm : PDI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1106 "cvtps2dq\t{$src, $dst|$dst, $src}",
1107 [(set VR128:$dst, (int_x86_sse2_cvtps2dq
1108 (memop addr:$src)))]>;
1110 // SSE2 packed instructions with XD prefix
1111 def Int_VCVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1112 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1113 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1114 XD, VEX, Requires<[HasAVX]>;
1115 def Int_VCVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1116 "vcvtpd2dq\t{$src, $dst|$dst, $src}",
1117 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1118 (memop addr:$src)))]>,
1119 XD, VEX, Requires<[HasAVX]>;
1120 def Int_CVTPD2DQrr : I<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1121 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1122 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq VR128:$src))]>,
1123 XD, Requires<[HasSSE2]>;
1124 def Int_CVTPD2DQrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1125 "cvtpd2dq\t{$src, $dst|$dst, $src}",
1126 [(set VR128:$dst, (int_x86_sse2_cvtpd2dq
1127 (memop addr:$src)))]>,
1128 XD, Requires<[HasSSE2]>;
1131 // Convert with truncation packed single/double fp to doubleword
1132 // SSE2 packed instructions with XS prefix
1133 def VCVTTPS2DQrr : VSSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1134 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1135 def VCVTTPS2DQrm : VSSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1136 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1137 def VCVTTPS2DQYrr : VSSI<0x5B, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
1138 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1139 def VCVTTPS2DQYrm : VSSI<0x5B, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
1140 "cvttps2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1141 def CVTTPS2DQrr : SSI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1142 "cvttps2dq\t{$src, $dst|$dst, $src}",
1144 (int_x86_sse2_cvttps2dq VR128:$src))]>;
1145 def CVTTPS2DQrm : SSI<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1146 "cvttps2dq\t{$src, $dst|$dst, $src}",
1148 (int_x86_sse2_cvttps2dq (memop addr:$src)))]>;
1150 def Int_VCVTTPS2DQrr : I<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1151 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1153 (int_x86_sse2_cvttps2dq VR128:$src))]>,
1154 XS, VEX, Requires<[HasAVX]>;
1155 def Int_VCVTTPS2DQrm : I<0x5B, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1156 "vcvttps2dq\t{$src, $dst|$dst, $src}",
1157 [(set VR128:$dst, (int_x86_sse2_cvttps2dq
1158 (memop addr:$src)))]>,
1159 XS, VEX, Requires<[HasAVX]>;
1161 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1162 (Int_CVTDQ2PSrr VR128:$src)>, Requires<[HasSSE2]>;
1163 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1164 (CVTTPS2DQrr VR128:$src)>, Requires<[HasSSE2]>;
1166 def : Pat<(v4f32 (sint_to_fp (v4i32 VR128:$src))),
1167 (Int_VCVTDQ2PSrr VR128:$src)>, Requires<[HasAVX]>;
1168 def : Pat<(v4i32 (fp_to_sint (v4f32 VR128:$src))),
1169 (VCVTTPS2DQrr VR128:$src)>, Requires<[HasAVX]>;
1170 def : Pat<(v8f32 (sint_to_fp (v8i32 VR256:$src))),
1171 (VCVTDQ2PSYrr VR256:$src)>, Requires<[HasAVX]>;
1172 def : Pat<(v8i32 (fp_to_sint (v8f32 VR256:$src))),
1173 (VCVTTPS2DQYrr VR256:$src)>, Requires<[HasAVX]>;
1175 def Int_VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst),
1177 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1178 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>,
1180 def Int_VCVTTPD2DQrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst),
1182 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1183 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1184 (memop addr:$src)))]>, VEX;
1185 def CVTTPD2DQrr : PDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1186 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1187 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq VR128:$src))]>;
1188 def CVTTPD2DQrm : PDI<0xE6, MRMSrcMem, (outs VR128:$dst),(ins f128mem:$src),
1189 "cvttpd2dq\t{$src, $dst|$dst, $src}",
1190 [(set VR128:$dst, (int_x86_sse2_cvttpd2dq
1191 (memop addr:$src)))]>;
1193 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1194 // register, but the same isn't true when using memory operands instead.
1195 // Provide other assembly rr and rm forms to address this explicitly.
1196 def VCVTTPD2DQrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1197 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1198 def VCVTTPD2DQXrYr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1199 "cvttpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
1202 def VCVTTPD2DQXrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1203 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1204 def VCVTTPD2DQXrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1205 "cvttpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
1208 def VCVTTPD2DQYrr : VPDI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1209 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
1210 def VCVTTPD2DQYrm : VPDI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1211 "cvttpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1213 // Convert packed single to packed double
1214 let Predicates = [HasAVX] in {
1215 // SSE2 instructions without OpSize prefix
1216 def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1217 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1218 def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1219 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1220 def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
1221 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1222 def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
1223 "vcvtps2pd\t{$src, $dst|$dst, $src}", []>, TB, VEX;
1225 def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1226 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1227 def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1228 "cvtps2pd\t{$src, $dst|$dst, $src}", []>, TB;
1230 def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1231 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1232 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1233 TB, VEX, Requires<[HasAVX]>;
1234 def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1235 "vcvtps2pd\t{$src, $dst|$dst, $src}",
1236 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1237 (load addr:$src)))]>,
1238 TB, VEX, Requires<[HasAVX]>;
1239 def Int_CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1240 "cvtps2pd\t{$src, $dst|$dst, $src}",
1241 [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>,
1242 TB, Requires<[HasSSE2]>;
1243 def Int_CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
1244 "cvtps2pd\t{$src, $dst|$dst, $src}",
1245 [(set VR128:$dst, (int_x86_sse2_cvtps2pd
1246 (load addr:$src)))]>,
1247 TB, Requires<[HasSSE2]>;
1249 // Convert packed double to packed single
1250 // The assembler can recognize rr 256-bit instructions by seeing a ymm
1251 // register, but the same isn't true when using memory operands instead.
1252 // Provide other assembly rr and rm forms to address this explicitly.
1253 def VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1254 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1255 def VCVTPD2PSXrYr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1256 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>, VEX;
1259 def VCVTPD2PSXrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1260 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1261 def VCVTPD2PSXrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1262 "cvtpd2psx\t{$src, $dst|$dst, $src}", []>, VEX;
1265 def VCVTPD2PSYrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
1266 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX;
1267 def VCVTPD2PSYrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
1268 "cvtpd2psy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
1269 def CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1270 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1271 def CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1272 "cvtpd2ps\t{$src, $dst|$dst, $src}", []>;
1275 def Int_VCVTPD2PSrr : VPDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1276 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1277 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1278 def Int_VCVTPD2PSrm : VPDI<0x5A, MRMSrcMem, (outs VR128:$dst),
1280 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1281 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1282 (memop addr:$src)))]>;
1283 def Int_CVTPD2PSrr : PDI<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
1284 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1285 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps VR128:$src))]>;
1286 def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
1287 "cvtpd2ps\t{$src, $dst|$dst, $src}",
1288 [(set VR128:$dst, (int_x86_sse2_cvtpd2ps
1289 (memop addr:$src)))]>;
1291 // AVX 256-bit register conversion intrinsics
1292 // FIXME: Migrate SSE conversion intrinsics matching to use patterns as below
1293 // whenever possible to avoid declaring two versions of each one.
1294 def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src),
1295 (VCVTDQ2PSYrr VR256:$src)>;
1296 def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)),
1297 (VCVTDQ2PSYrm addr:$src)>;
1299 def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src),
1300 (VCVTPD2PSYrr VR256:$src)>;
1301 def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)),
1302 (VCVTPD2PSYrm addr:$src)>;
1304 def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src),
1305 (VCVTPS2DQYrr VR256:$src)>;
1306 def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)),
1307 (VCVTPS2DQYrm addr:$src)>;
1309 def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src),
1310 (VCVTPS2PDYrr VR128:$src)>;
1311 def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)),
1312 (VCVTPS2PDYrm addr:$src)>;
1314 def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src),
1315 (VCVTTPD2DQYrr VR256:$src)>;
1316 def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)),
1317 (VCVTTPD2DQYrm addr:$src)>;
1319 def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src),
1320 (VCVTTPS2DQYrr VR256:$src)>;
1321 def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)),
1322 (VCVTTPS2DQYrm addr:$src)>;
1324 // Match fround and fextend for 128/256-bit conversions
1325 def : Pat<(v4f32 (fround (v4f64 VR256:$src))),
1326 (VCVTPD2PSYrr VR256:$src)>;
1327 def : Pat<(v4f32 (fround (loadv4f64 addr:$src))),
1328 (VCVTPD2PSYrm addr:$src)>;
1330 def : Pat<(v4f64 (fextend (v4f32 VR128:$src))),
1331 (VCVTPS2PDYrr VR128:$src)>;
1332 def : Pat<(v4f64 (fextend (loadv4f32 addr:$src))),
1333 (VCVTPS2PDYrm addr:$src)>;
1335 //===----------------------------------------------------------------------===//
1336 // SSE 1 & 2 - Compare Instructions
1337 //===----------------------------------------------------------------------===//
1339 // sse12_cmp_scalar - sse 1 & 2 compare scalar instructions
1340 multiclass sse12_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
1341 string asm, string asm_alt> {
1342 let isAsmParserOnly = 1 in {
1343 def rr : SIi8<0xC2, MRMSrcReg,
1344 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc),
1347 def rm : SIi8<0xC2, MRMSrcMem,
1348 (outs RC:$dst), (ins RC:$src1, x86memop:$src, SSECC:$cc),
1352 // Accept explicit immediate argument form instead of comparison code.
1353 def rr_alt : SIi8<0xC2, MRMSrcReg,
1354 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1357 def rm_alt : SIi8<0xC2, MRMSrcMem,
1358 (outs RC:$dst), (ins RC:$src1, x86memop:$src, i8imm:$src2),
1362 let neverHasSideEffects = 1 in {
1363 defm VCMPSS : sse12_cmp_scalar<FR32, f32mem,
1364 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
1365 "cmpss\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1367 defm VCMPSD : sse12_cmp_scalar<FR64, f64mem,
1368 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
1369 "cmpsd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}">,
1373 let Constraints = "$src1 = $dst" in {
1374 def CMPSSrr : SIi8<0xC2, MRMSrcReg,
1375 (outs FR32:$dst), (ins FR32:$src1, FR32:$src2, SSECC:$cc),
1376 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1377 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), FR32:$src2, imm:$cc))]>, XS;
1378 def CMPSSrm : SIi8<0xC2, MRMSrcMem,
1379 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2, SSECC:$cc),
1380 "cmp${cc}ss\t{$src2, $dst|$dst, $src2}",
1381 [(set FR32:$dst, (X86cmpss (f32 FR32:$src1), (loadf32 addr:$src2), imm:$cc))]>, XS;
1382 def CMPSDrr : SIi8<0xC2, MRMSrcReg,
1383 (outs FR64:$dst), (ins FR64:$src1, FR64:$src2, SSECC:$cc),
1384 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1385 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), FR64:$src2, imm:$cc))]>, XD;
1386 def CMPSDrm : SIi8<0xC2, MRMSrcMem,
1387 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src2, SSECC:$cc),
1388 "cmp${cc}sd\t{$src2, $dst|$dst, $src2}",
1389 [(set FR64:$dst, (X86cmpsd (f64 FR64:$src1), (loadf64 addr:$src2), imm:$cc))]>, XD;
1391 let Constraints = "$src1 = $dst", neverHasSideEffects = 1 in {
1392 def CMPSSrr_alt : SIi8<0xC2, MRMSrcReg,
1393 (outs FR32:$dst), (ins FR32:$src1, FR32:$src, i8imm:$src2),
1394 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1395 def CMPSSrm_alt : SIi8<0xC2, MRMSrcMem,
1396 (outs FR32:$dst), (ins FR32:$src1, f32mem:$src, i8imm:$src2),
1397 "cmpss\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XS;
1398 def CMPSDrr_alt : SIi8<0xC2, MRMSrcReg,
1399 (outs FR64:$dst), (ins FR64:$src1, FR64:$src, i8imm:$src2),
1400 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1401 def CMPSDrm_alt : SIi8<0xC2, MRMSrcMem,
1402 (outs FR64:$dst), (ins FR64:$src1, f64mem:$src, i8imm:$src2),
1403 "cmpsd\t{$src2, $src, $dst|$dst, $src, $src2}", []>, XD;
1406 multiclass sse12_cmp_scalar_int<RegisterClass RC, X86MemOperand x86memop,
1407 Intrinsic Int, string asm> {
1408 def rr : SIi8<0xC2, MRMSrcReg, (outs VR128:$dst),
1409 (ins VR128:$src1, VR128:$src, SSECC:$cc), asm,
1410 [(set VR128:$dst, (Int VR128:$src1,
1411 VR128:$src, imm:$cc))]>;
1412 def rm : SIi8<0xC2, MRMSrcMem, (outs VR128:$dst),
1413 (ins VR128:$src1, f32mem:$src, SSECC:$cc), asm,
1414 [(set VR128:$dst, (Int VR128:$src1,
1415 (load addr:$src), imm:$cc))]>;
1418 // Aliases to match intrinsics which expect XMM operand(s).
1419 defm Int_VCMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1420 "cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}">,
1422 defm Int_VCMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1423 "cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}">,
1425 let Constraints = "$src1 = $dst" in {
1426 defm Int_CMPSS : sse12_cmp_scalar_int<VR128, f32mem, int_x86_sse_cmp_ss,
1427 "cmp${cc}ss\t{$src, $dst|$dst, $src}">, XS;
1428 defm Int_CMPSD : sse12_cmp_scalar_int<VR128, f64mem, int_x86_sse2_cmp_sd,
1429 "cmp${cc}sd\t{$src, $dst|$dst, $src}">, XD;
1433 // sse12_ord_cmp - Unordered/Ordered scalar fp compare and set EFLAGS
1434 multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
1435 ValueType vt, X86MemOperand x86memop,
1436 PatFrag ld_frag, string OpcodeStr, Domain d> {
1437 def rr: PI<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
1438 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1439 [(set EFLAGS, (OpNode (vt RC:$src1), RC:$src2))], d>;
1440 def rm: PI<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
1441 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
1442 [(set EFLAGS, (OpNode (vt RC:$src1),
1443 (ld_frag addr:$src2)))], d>;
1446 let Defs = [EFLAGS] in {
1447 defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1448 "ucomiss", SSEPackedSingle>, TB, VEX;
1449 defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1450 "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
1451 let Pattern = []<dag> in {
1452 defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1453 "comiss", SSEPackedSingle>, TB, VEX;
1454 defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1455 "comisd", SSEPackedDouble>, TB, OpSize, VEX;
1458 defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1459 load, "ucomiss", SSEPackedSingle>, TB, VEX;
1460 defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1461 load, "ucomisd", SSEPackedDouble>, TB, OpSize, VEX;
1463 defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
1464 load, "comiss", SSEPackedSingle>, TB, VEX;
1465 defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
1466 load, "comisd", SSEPackedDouble>, TB, OpSize, VEX;
1467 defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
1468 "ucomiss", SSEPackedSingle>, TB;
1469 defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
1470 "ucomisd", SSEPackedDouble>, TB, OpSize;
1472 let Pattern = []<dag> in {
1473 defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
1474 "comiss", SSEPackedSingle>, TB;
1475 defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
1476 "comisd", SSEPackedDouble>, TB, OpSize;
1479 defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
1480 load, "ucomiss", SSEPackedSingle>, TB;
1481 defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
1482 load, "ucomisd", SSEPackedDouble>, TB, OpSize;
1484 defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
1485 "comiss", SSEPackedSingle>, TB;
1486 defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
1487 "comisd", SSEPackedDouble>, TB, OpSize;
1488 } // Defs = [EFLAGS]
1490 // sse12_cmp_packed - sse 1 & 2 compared packed instructions
1491 multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
1492 Intrinsic Int, string asm, string asm_alt,
1494 let isAsmParserOnly = 1 in {
1495 def rri : PIi8<0xC2, MRMSrcReg,
1496 (outs RC:$dst), (ins RC:$src1, RC:$src, SSECC:$cc), asm,
1497 [(set RC:$dst, (Int RC:$src1, RC:$src, imm:$cc))], d>;
1498 def rmi : PIi8<0xC2, MRMSrcMem,
1499 (outs RC:$dst), (ins RC:$src1, f128mem:$src, SSECC:$cc), asm,
1500 [(set RC:$dst, (Int RC:$src1, (memop addr:$src), imm:$cc))], d>;
1503 // Accept explicit immediate argument form instead of comparison code.
1504 def rri_alt : PIi8<0xC2, MRMSrcReg,
1505 (outs RC:$dst), (ins RC:$src1, RC:$src, i8imm:$src2),
1507 def rmi_alt : PIi8<0xC2, MRMSrcMem,
1508 (outs RC:$dst), (ins RC:$src1, f128mem:$src, i8imm:$src2),
1512 defm VCMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1513 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1514 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1515 SSEPackedSingle>, TB, VEX_4V;
1516 defm VCMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1517 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1518 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1519 SSEPackedDouble>, TB, OpSize, VEX_4V;
1520 defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256,
1521 "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}",
1522 "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1523 SSEPackedSingle>, TB, VEX_4V;
1524 defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256,
1525 "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}",
1526 "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}",
1527 SSEPackedDouble>, TB, OpSize, VEX_4V;
1528 let Constraints = "$src1 = $dst" in {
1529 defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps,
1530 "cmp${cc}ps\t{$src, $dst|$dst, $src}",
1531 "cmpps\t{$src2, $src, $dst|$dst, $src, $src2}",
1532 SSEPackedSingle>, TB;
1533 defm CMPPD : sse12_cmp_packed<VR128, f128mem, int_x86_sse2_cmp_pd,
1534 "cmp${cc}pd\t{$src, $dst|$dst, $src}",
1535 "cmppd\t{$src2, $src, $dst|$dst, $src, $src2}",
1536 SSEPackedDouble>, TB, OpSize;
1539 let Predicates = [HasSSE1] in {
1540 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1541 (CMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1542 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1543 (CMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1546 let Predicates = [HasSSE2] in {
1547 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1548 (CMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1549 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1550 (CMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1553 let Predicates = [HasAVX] in {
1554 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), VR128:$src2, imm:$cc)),
1555 (VCMPPSrri (v4f32 VR128:$src1), (v4f32 VR128:$src2), imm:$cc)>;
1556 def : Pat<(v4i32 (X86cmpps (v4f32 VR128:$src1), (memop addr:$src2), imm:$cc)),
1557 (VCMPPSrmi (v4f32 VR128:$src1), addr:$src2, imm:$cc)>;
1558 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)),
1559 (VCMPPDrri VR128:$src1, VR128:$src2, imm:$cc)>;
1560 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)),
1561 (VCMPPDrmi VR128:$src1, addr:$src2, imm:$cc)>;
1563 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), VR256:$src2, imm:$cc)),
1564 (VCMPPSYrri (v8f32 VR256:$src1), (v8f32 VR256:$src2), imm:$cc)>;
1565 def : Pat<(v8i32 (X86cmpps (v8f32 VR256:$src1), (memop addr:$src2), imm:$cc)),
1566 (VCMPPSYrmi (v8f32 VR256:$src1), addr:$src2, imm:$cc)>;
1567 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), VR256:$src2, imm:$cc)),
1568 (VCMPPDYrri VR256:$src1, VR256:$src2, imm:$cc)>;
1569 def : Pat<(v4i64 (X86cmppd (v4f64 VR256:$src1), (memop addr:$src2), imm:$cc)),
1570 (VCMPPDYrmi VR256:$src1, addr:$src2, imm:$cc)>;
1573 //===----------------------------------------------------------------------===//
1574 // SSE 1 & 2 - Shuffle Instructions
1575 //===----------------------------------------------------------------------===//
1577 /// sse12_shuffle - sse 1 & 2 shuffle instructions
1578 multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
1579 ValueType vt, string asm, PatFrag mem_frag,
1580 Domain d, bit IsConvertibleToThreeAddress = 0> {
1581 def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
1582 (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm,
1583 [(set RC:$dst, (vt (shufp:$src3
1584 RC:$src1, (mem_frag addr:$src2))))], d>;
1585 let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in
1586 def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
1587 (ins RC:$src1, RC:$src2, i8imm:$src3), asm,
1589 (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>;
1592 defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1593 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1594 memopv4f32, SSEPackedSingle>, TB, VEX_4V;
1595 defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
1596 "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
1597 memopv8f32, SSEPackedSingle>, TB, VEX_4V;
1598 defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1599 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1600 memopv2f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1601 defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
1602 "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}",
1603 memopv4f64, SSEPackedDouble>, TB, OpSize, VEX_4V;
1605 let Constraints = "$src1 = $dst" in {
1606 defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
1607 "shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1608 memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>,
1610 defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
1611 "shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
1612 memopv2f64, SSEPackedDouble>, TB, OpSize;
1615 let Predicates = [HasSSE1] in {
1616 def : Pat<(v4f32 (X86Shufps VR128:$src1,
1617 (memopv4f32 addr:$src2), (i8 imm:$imm))),
1618 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1619 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1620 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1621 def : Pat<(v4i32 (X86Shufps VR128:$src1,
1622 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
1623 (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1624 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1625 (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1626 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
1627 // fall back to this for SSE1)
1628 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
1629 (SHUFPSrri VR128:$src2, VR128:$src1,
1630 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1631 // Special unary SHUFPSrri case.
1632 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
1633 (SHUFPSrri VR128:$src1, VR128:$src1,
1634 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1637 let Predicates = [HasSSE2] in {
1638 // Special binary v4i32 shuffle cases with SHUFPS.
1639 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
1640 (SHUFPSrri VR128:$src1, VR128:$src2,
1641 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1642 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
1643 (bc_v4i32 (memopv2i64 addr:$src2)))),
1644 (SHUFPSrmi VR128:$src1, addr:$src2,
1645 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1646 // Special unary SHUFPDrri cases.
1647 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
1648 (SHUFPDrri VR128:$src1, VR128:$src1,
1649 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1650 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
1651 (SHUFPDrri VR128:$src1, VR128:$src1,
1652 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1653 // Special binary v2i64 shuffle cases using SHUFPDrri.
1654 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
1655 (SHUFPDrri VR128:$src1, VR128:$src2,
1656 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1657 // Generic SHUFPD patterns
1658 def : Pat<(v2f64 (X86Shufps VR128:$src1,
1659 (memopv2f64 addr:$src2), (i8 imm:$imm))),
1660 (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
1661 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1662 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1663 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1664 (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1667 let Predicates = [HasAVX] in {
1668 def : Pat<(v4f32 (X86Shufps VR128:$src1,
1669 (memopv4f32 addr:$src2), (i8 imm:$imm))),
1670 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1671 def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1672 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1673 def : Pat<(v4i32 (X86Shufps VR128:$src1,
1674 (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))),
1675 (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>;
1676 def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1677 (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>;
1678 // vector_shuffle v1, v2 <4, 5, 2, 3> using SHUFPSrri (we prefer movsd, but
1679 // fall back to this for SSE1)
1680 def : Pat<(v4f32 (movlp:$src3 VR128:$src1, (v4f32 VR128:$src2))),
1681 (VSHUFPSrri VR128:$src2, VR128:$src1,
1682 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1683 // Special unary SHUFPSrri case.
1684 def : Pat<(v4f32 (pshufd:$src3 VR128:$src1, (undef))),
1685 (VSHUFPSrri VR128:$src1, VR128:$src1,
1686 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1687 // Special binary v4i32 shuffle cases with SHUFPS.
1688 def : Pat<(v4i32 (shufp:$src3 VR128:$src1, (v4i32 VR128:$src2))),
1689 (VSHUFPSrri VR128:$src1, VR128:$src2,
1690 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1691 def : Pat<(v4i32 (shufp:$src3 VR128:$src1,
1692 (bc_v4i32 (memopv2i64 addr:$src2)))),
1693 (VSHUFPSrmi VR128:$src1, addr:$src2,
1694 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1695 // Special unary SHUFPDrri cases.
1696 def : Pat<(v2i64 (pshufd:$src3 VR128:$src1, (undef))),
1697 (VSHUFPDrri VR128:$src1, VR128:$src1,
1698 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1699 def : Pat<(v2f64 (pshufd:$src3 VR128:$src1, (undef))),
1700 (VSHUFPDrri VR128:$src1, VR128:$src1,
1701 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1702 // Special binary v2i64 shuffle cases using SHUFPDrri.
1703 def : Pat<(v2i64 (shufp:$src3 VR128:$src1, VR128:$src2)),
1704 (VSHUFPDrri VR128:$src1, VR128:$src2,
1705 (SHUFFLE_get_shuf_imm VR128:$src3))>;
1707 def : Pat<(v2f64 (X86Shufps VR128:$src1,
1708 (memopv2f64 addr:$src2), (i8 imm:$imm))),
1709 (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>;
1710 def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1711 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1712 def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))),
1713 (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>;
1716 def : Pat<(v8i32 (X86Shufps VR256:$src1, VR256:$src2, (i8 imm:$imm))),
1717 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
1718 def : Pat<(v8i32 (X86Shufps VR256:$src1,
1719 (bc_v8i32 (memopv4i64 addr:$src2)), (i8 imm:$imm))),
1720 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
1722 def : Pat<(v8f32 (X86Shufps VR256:$src1, VR256:$src2, (i8 imm:$imm))),
1723 (VSHUFPSYrri VR256:$src1, VR256:$src2, imm:$imm)>;
1724 def : Pat<(v8f32 (X86Shufps VR256:$src1,
1725 (memopv8f32 addr:$src2), (i8 imm:$imm))),
1726 (VSHUFPSYrmi VR256:$src1, addr:$src2, imm:$imm)>;
1728 def : Pat<(v4i64 (X86Shufpd VR256:$src1, VR256:$src2, (i8 imm:$imm))),
1729 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
1730 def : Pat<(v4i64 (X86Shufpd VR256:$src1,
1731 (memopv4i64 addr:$src2), (i8 imm:$imm))),
1732 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
1734 def : Pat<(v4f64 (X86Shufpd VR256:$src1, VR256:$src2, (i8 imm:$imm))),
1735 (VSHUFPDYrri VR256:$src1, VR256:$src2, imm:$imm)>;
1736 def : Pat<(v4f64 (X86Shufpd VR256:$src1,
1737 (memopv4f64 addr:$src2), (i8 imm:$imm))),
1738 (VSHUFPDYrmi VR256:$src1, addr:$src2, imm:$imm)>;
1741 //===----------------------------------------------------------------------===//
1742 // SSE 1 & 2 - Unpack Instructions
1743 //===----------------------------------------------------------------------===//
1745 /// sse12_unpack_interleave - sse 1 & 2 unpack and interleave
1746 multiclass sse12_unpack_interleave<bits<8> opc, PatFrag OpNode, ValueType vt,
1747 PatFrag mem_frag, RegisterClass RC,
1748 X86MemOperand x86memop, string asm,
1750 def rr : PI<opc, MRMSrcReg,
1751 (outs RC:$dst), (ins RC:$src1, RC:$src2),
1753 (vt (OpNode RC:$src1, RC:$src2)))], d>;
1754 def rm : PI<opc, MRMSrcMem,
1755 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
1757 (vt (OpNode RC:$src1,
1758 (mem_frag addr:$src2))))], d>;
1761 let AddedComplexity = 10 in {
1762 defm VUNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1763 VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1764 SSEPackedSingle>, TB, VEX_4V;
1765 defm VUNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1766 VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1767 SSEPackedDouble>, TB, OpSize, VEX_4V;
1768 defm VUNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1769 VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1770 SSEPackedSingle>, TB, VEX_4V;
1771 defm VUNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1772 VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1773 SSEPackedDouble>, TB, OpSize, VEX_4V;
1775 defm VUNPCKHPSY: sse12_unpack_interleave<0x15, unpckh, v8f32, memopv8f32,
1776 VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1777 SSEPackedSingle>, TB, VEX_4V;
1778 defm VUNPCKHPDY: sse12_unpack_interleave<0x15, unpckh, v4f64, memopv4f64,
1779 VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1780 SSEPackedDouble>, TB, OpSize, VEX_4V;
1781 defm VUNPCKLPSY: sse12_unpack_interleave<0x14, unpckl, v8f32, memopv8f32,
1782 VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1783 SSEPackedSingle>, TB, VEX_4V;
1784 defm VUNPCKLPDY: sse12_unpack_interleave<0x14, unpckl, v4f64, memopv4f64,
1785 VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
1786 SSEPackedDouble>, TB, OpSize, VEX_4V;
1788 let Constraints = "$src1 = $dst" in {
1789 defm UNPCKHPS: sse12_unpack_interleave<0x15, unpckh, v4f32, memopv4f32,
1790 VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
1791 SSEPackedSingle>, TB;
1792 defm UNPCKHPD: sse12_unpack_interleave<0x15, unpckh, v2f64, memopv2f64,
1793 VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
1794 SSEPackedDouble>, TB, OpSize;
1795 defm UNPCKLPS: sse12_unpack_interleave<0x14, unpckl, v4f32, memopv4f32,
1796 VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
1797 SSEPackedSingle>, TB;
1798 defm UNPCKLPD: sse12_unpack_interleave<0x14, unpckl, v2f64, memopv2f64,
1799 VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
1800 SSEPackedDouble>, TB, OpSize;
1801 } // Constraints = "$src1 = $dst"
1802 } // AddedComplexity
1804 let Predicates = [HasSSE1] in {
1805 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
1806 (UNPCKLPSrm VR128:$src1, addr:$src2)>;
1807 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
1808 (UNPCKLPSrr VR128:$src1, VR128:$src2)>;
1809 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
1810 (UNPCKHPSrm VR128:$src1, addr:$src2)>;
1811 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
1812 (UNPCKHPSrr VR128:$src1, VR128:$src2)>;
1815 let Predicates = [HasSSE2] in {
1816 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
1817 (UNPCKLPDrm VR128:$src1, addr:$src2)>;
1818 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
1819 (UNPCKLPDrr VR128:$src1, VR128:$src2)>;
1820 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
1821 (UNPCKHPDrm VR128:$src1, addr:$src2)>;
1822 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
1823 (UNPCKHPDrr VR128:$src1, VR128:$src2)>;
1825 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the
1826 // problem is during lowering, where it's not possible to recognize the load
1827 // fold cause it has two uses through a bitcast. One use disappears at isel
1828 // time and the fold opportunity reappears.
1829 def : Pat<(v2f64 (X86Movddup VR128:$src)),
1830 (UNPCKLPDrr VR128:$src, VR128:$src)>;
1832 let AddedComplexity = 10 in
1833 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
1834 (UNPCKLPDrr VR128:$src, VR128:$src)>;
1837 let Predicates = [HasAVX] in {
1838 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))),
1839 (VUNPCKLPSrm VR128:$src1, addr:$src2)>;
1840 def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)),
1841 (VUNPCKLPSrr VR128:$src1, VR128:$src2)>;
1842 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))),
1843 (VUNPCKHPSrm VR128:$src1, addr:$src2)>;
1844 def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)),
1845 (VUNPCKHPSrr VR128:$src1, VR128:$src2)>;
1847 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, (memopv8f32 addr:$src2))),
1848 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
1849 def : Pat<(v8f32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
1850 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
1851 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, VR256:$src2)),
1852 (VUNPCKLPSYrr VR256:$src1, VR256:$src2)>;
1853 def : Pat<(v8i32 (X86Unpcklpsy VR256:$src1, (memopv8i32 addr:$src2))),
1854 (VUNPCKLPSYrm VR256:$src1, addr:$src2)>;
1855 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, (memopv8f32 addr:$src2))),
1856 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
1857 def : Pat<(v8f32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
1858 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
1859 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, (memopv8i32 addr:$src2))),
1860 (VUNPCKHPSYrm VR256:$src1, addr:$src2)>;
1861 def : Pat<(v8i32 (X86Unpckhpsy VR256:$src1, VR256:$src2)),
1862 (VUNPCKHPSYrr VR256:$src1, VR256:$src2)>;
1864 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))),
1865 (VUNPCKLPDrm VR128:$src1, addr:$src2)>;
1866 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)),
1867 (VUNPCKLPDrr VR128:$src1, VR128:$src2)>;
1868 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))),
1869 (VUNPCKHPDrm VR128:$src1, addr:$src2)>;
1870 def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)),
1871 (VUNPCKHPDrr VR128:$src1, VR128:$src2)>;
1873 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, (memopv4f64 addr:$src2))),
1874 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
1875 def : Pat<(v4f64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
1876 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
1877 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, (memopv4i64 addr:$src2))),
1878 (VUNPCKLPDYrm VR256:$src1, addr:$src2)>;
1879 def : Pat<(v4i64 (X86Unpcklpdy VR256:$src1, VR256:$src2)),
1880 (VUNPCKLPDYrr VR256:$src1, VR256:$src2)>;
1881 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, (memopv4f64 addr:$src2))),
1882 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
1883 def : Pat<(v4f64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
1884 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
1885 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, (memopv4i64 addr:$src2))),
1886 (VUNPCKHPDYrm VR256:$src1, addr:$src2)>;
1887 def : Pat<(v4i64 (X86Unpckhpdy VR256:$src1, VR256:$src2)),
1888 (VUNPCKHPDYrr VR256:$src1, VR256:$src2)>;
1890 // FIXME: Instead of X86Movddup, there should be a X86Unpcklpd here, the
1891 // problem is during lowering, where it's not possible to recognize the load
1892 // fold cause it has two uses through a bitcast. One use disappears at isel
1893 // time and the fold opportunity reappears.
1894 def : Pat<(v2f64 (X86Movddup VR128:$src)),
1895 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
1896 let AddedComplexity = 10 in
1897 def : Pat<(splat_lo (v2f64 VR128:$src), (undef)),
1898 (VUNPCKLPDrr VR128:$src, VR128:$src)>;
1901 //===----------------------------------------------------------------------===//
1902 // SSE 1 & 2 - Extract Floating-Point Sign mask
1903 //===----------------------------------------------------------------------===//
1905 /// sse12_extr_sign_mask - sse 1 & 2 unpack and interleave
1906 multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
1908 def rr32 : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins RC:$src),
1909 !strconcat(asm, "\t{$src, $dst|$dst, $src}"),
1910 [(set GR32:$dst, (Int RC:$src))], d>;
1911 def rr64 : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins RC:$src),
1912 !strconcat(asm, "\t{$src, $dst|$dst, $src}"), [], d>, REX_W;
1915 defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
1916 SSEPackedSingle>, TB;
1917 defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
1918 SSEPackedDouble>, TB, OpSize;
1920 def : Pat<(i32 (X86fgetsign FR32:$src)),
1921 (MOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1922 sub_ss))>, Requires<[HasSSE1]>;
1923 def : Pat<(i64 (X86fgetsign FR32:$src)),
1924 (MOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1925 sub_ss))>, Requires<[HasSSE1]>;
1926 def : Pat<(i32 (X86fgetsign FR64:$src)),
1927 (MOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1928 sub_sd))>, Requires<[HasSSE2]>;
1929 def : Pat<(i64 (X86fgetsign FR64:$src)),
1930 (MOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1931 sub_sd))>, Requires<[HasSSE2]>;
1933 let Predicates = [HasAVX] in {
1934 defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
1935 "movmskps", SSEPackedSingle>, TB, VEX;
1936 defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
1937 "movmskpd", SSEPackedDouble>, TB, OpSize,
1939 defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
1940 "movmskps", SSEPackedSingle>, TB, VEX;
1941 defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
1942 "movmskpd", SSEPackedDouble>, TB, OpSize,
1945 def : Pat<(i32 (X86fgetsign FR32:$src)),
1946 (VMOVMSKPSrr32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1948 def : Pat<(i64 (X86fgetsign FR32:$src)),
1949 (VMOVMSKPSrr64 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src,
1951 def : Pat<(i32 (X86fgetsign FR64:$src)),
1952 (VMOVMSKPDrr32 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1954 def : Pat<(i64 (X86fgetsign FR64:$src)),
1955 (VMOVMSKPDrr64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src,
1959 def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1960 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, TB, VEX;
1961 def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
1962 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, TB, OpSize,
1964 def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1965 "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, TB, VEX;
1966 def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src),
1967 "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, TB, OpSize,
1971 //===----------------------------------------------------------------------===//
1972 // SSE 1 & 2 - Misc aliasing of packed SSE 1 & 2 instructions
1973 //===----------------------------------------------------------------------===//
1975 // Aliases of packed SSE1 & SSE2 instructions for scalar use. These all have
1976 // names that start with 'Fs'.
1978 // Alias instructions that map fld0 to pxor for sse.
1979 let isReMaterializable = 1, isAsCheapAsAMove = 1, isCodeGenOnly = 1,
1980 canFoldAsLoad = 1 in {
1981 // FIXME: Set encoding to pseudo!
1982 def FsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1983 [(set FR32:$dst, fp32imm0)]>,
1984 Requires<[HasSSE1]>, TB, OpSize;
1985 def FsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1986 [(set FR64:$dst, fpimm0)]>,
1987 Requires<[HasSSE2]>, TB, OpSize;
1988 def VFsFLD0SS : I<0xEF, MRMInitReg, (outs FR32:$dst), (ins), "",
1989 [(set FR32:$dst, fp32imm0)]>,
1990 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1991 def VFsFLD0SD : I<0xEF, MRMInitReg, (outs FR64:$dst), (ins), "",
1992 [(set FR64:$dst, fpimm0)]>,
1993 Requires<[HasAVX]>, TB, OpSize, VEX_4V;
1996 // Alias instruction to do FR32 or FR64 reg-to-reg copy using movaps. Upper
1997 // bits are disregarded.
1998 let neverHasSideEffects = 1 in {
1999 def FsMOVAPSrr : PSI<0x28, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
2000 "movaps\t{$src, $dst|$dst, $src}", []>;
2001 def FsMOVAPDrr : PDI<0x28, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
2002 "movapd\t{$src, $dst|$dst, $src}", []>;
2005 // Alias instruction to load FR32 or FR64 from f128mem using movaps. Upper
2006 // bits are disregarded.
2007 let canFoldAsLoad = 1, isReMaterializable = 1 in {
2008 def FsMOVAPSrm : PSI<0x28, MRMSrcMem, (outs FR32:$dst), (ins f128mem:$src),
2009 "movaps\t{$src, $dst|$dst, $src}",
2010 [(set FR32:$dst, (alignedloadfsf32 addr:$src))]>;
2011 def FsMOVAPDrm : PDI<0x28, MRMSrcMem, (outs FR64:$dst), (ins f128mem:$src),
2012 "movapd\t{$src, $dst|$dst, $src}",
2013 [(set FR64:$dst, (alignedloadfsf64 addr:$src))]>;
2016 //===----------------------------------------------------------------------===//
2017 // SSE 1 & 2 - Logical Instructions
2018 //===----------------------------------------------------------------------===//
2020 /// sse12_fp_alias_pack_logical - SSE 1 & 2 aliased packed FP logical ops
2022 multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
2024 defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
2025 FR32, f32, f128mem, memopfsf32, SSEPackedSingle, 0>, TB, VEX_4V;
2027 defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
2028 FR64, f64, f128mem, memopfsf64, SSEPackedDouble, 0>, TB, OpSize, VEX_4V;
2030 let Constraints = "$src1 = $dst" in {
2031 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
2032 f32, f128mem, memopfsf32, SSEPackedSingle>, TB;
2034 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
2035 f64, f128mem, memopfsf64, SSEPackedDouble>, TB, OpSize;
2039 // Alias bitwise logical operations using SSE logical ops on packed FP values.
2040 let mayLoad = 0 in {
2041 defm FsAND : sse12_fp_alias_pack_logical<0x54, "and", X86fand>;
2042 defm FsOR : sse12_fp_alias_pack_logical<0x56, "or", X86for>;
2043 defm FsXOR : sse12_fp_alias_pack_logical<0x57, "xor", X86fxor>;
2046 let neverHasSideEffects = 1, Pattern = []<dag>, isCommutable = 0 in
2047 defm FsANDN : sse12_fp_alias_pack_logical<0x55, "andn", undef>;
2049 /// sse12_fp_packed_logical - SSE 1 & 2 packed FP logical ops
2051 multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
2053 // In AVX no need to add a pattern for 128-bit logical rr ps, because they
2054 // are all promoted to v2i64, and the patterns are covered by the int
2055 // version. This is needed in SSE only, because v2i64 isn't supported on
2056 // SSE1, but only on SSE2.
2057 defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2058 !strconcat(OpcodeStr, "ps"), f128mem, [],
2059 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2060 (memopv2i64 addr:$src2)))], 0>, TB, VEX_4V;
2062 defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2063 !strconcat(OpcodeStr, "pd"), f128mem,
2064 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2065 (bc_v2i64 (v2f64 VR128:$src2))))],
2066 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2067 (memopv2i64 addr:$src2)))], 0>,
2069 let Constraints = "$src1 = $dst" in {
2070 defm PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
2071 !strconcat(OpcodeStr, "ps"), f128mem,
2072 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
2073 [(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
2074 (memopv2i64 addr:$src2)))]>, TB;
2076 defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
2077 !strconcat(OpcodeStr, "pd"), f128mem,
2078 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2079 (bc_v2i64 (v2f64 VR128:$src2))))],
2080 [(set VR128:$dst, (OpNode (bc_v2i64 (v2f64 VR128:$src1)),
2081 (memopv2i64 addr:$src2)))]>, TB, OpSize;
2085 /// sse12_fp_packed_logical_y - AVX 256-bit SSE 1 & 2 logical ops forms
2087 multiclass sse12_fp_packed_logical_y<bits<8> opc, string OpcodeStr,
2089 defm PSY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedSingle,
2090 !strconcat(OpcodeStr, "ps"), f256mem,
2091 [(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
2092 [(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
2093 (memopv4i64 addr:$src2)))], 0>, TB, VEX_4V;
2095 defm PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
2096 !strconcat(OpcodeStr, "pd"), f256mem,
2097 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2098 (bc_v4i64 (v4f64 VR256:$src2))))],
2099 [(set VR256:$dst, (OpNode (bc_v4i64 (v4f64 VR256:$src1)),
2100 (memopv4i64 addr:$src2)))], 0>,
2104 // AVX 256-bit packed logical ops forms
2105 defm VAND : sse12_fp_packed_logical_y<0x54, "and", and>;
2106 defm VOR : sse12_fp_packed_logical_y<0x56, "or", or>;
2107 defm VXOR : sse12_fp_packed_logical_y<0x57, "xor", xor>;
2108 defm VANDN : sse12_fp_packed_logical_y<0x55, "andn", X86andnp>;
2110 defm AND : sse12_fp_packed_logical<0x54, "and", and>;
2111 defm OR : sse12_fp_packed_logical<0x56, "or", or>;
2112 defm XOR : sse12_fp_packed_logical<0x57, "xor", xor>;
2113 let isCommutable = 0 in
2114 defm ANDN : sse12_fp_packed_logical<0x55, "andn", X86andnp>;
2116 //===----------------------------------------------------------------------===//
2117 // SSE 1 & 2 - Arithmetic Instructions
2118 //===----------------------------------------------------------------------===//
2120 /// basic_sse12_fp_binop_xxx - SSE 1 & 2 binops come in both scalar and
2123 /// In addition, we also have a special variant of the scalar form here to
2124 /// represent the associated intrinsic operation. This form is unlike the
2125 /// plain scalar form, in that it takes an entire vector (instead of a scalar)
2126 /// and leaves the top elements unmodified (therefore these cannot be commuted).
2128 /// These three forms can each be reg+reg or reg+mem.
2131 /// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those
2133 multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
2135 defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"),
2136 OpNode, FR32, f32mem, Is2Addr>, XS;
2137 defm SD : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"),
2138 OpNode, FR64, f64mem, Is2Addr>, XD;
2141 multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
2143 let mayLoad = 0 in {
2144 defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
2145 v4f32, f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB;
2146 defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
2147 v2f64, f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize;
2151 multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr,
2153 let mayLoad = 0 in {
2154 defm PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR256,
2155 v8f32, f256mem, memopv8f32, SSEPackedSingle, 0>, TB;
2156 defm PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR256,
2157 v4f64, f256mem, memopv4f64, SSEPackedDouble, 0>, TB, OpSize;
2161 multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr,
2163 defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2164 !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS;
2165 defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128,
2166 !strconcat(OpcodeStr, "sd"), "2", "_sd", sdmem, sse_load_f64, Is2Addr>, XD;
2169 multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr,
2171 defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2172 !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32,
2173 SSEPackedSingle, Is2Addr>, TB;
2175 defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128,
2176 !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64,
2177 SSEPackedDouble, Is2Addr>, TB, OpSize;
2180 multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> {
2181 defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2182 !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32,
2183 SSEPackedSingle, 0>, TB;
2185 defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256,
2186 !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64,
2187 SSEPackedDouble, 0>, TB, OpSize;
2190 // Binary Arithmetic instructions
2191 defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>,
2192 basic_sse12_fp_binop_s_int<0x58, "add", 0>,
2193 basic_sse12_fp_binop_p<0x58, "add", fadd, 0>,
2194 basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V;
2195 defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>,
2196 basic_sse12_fp_binop_s_int<0x59, "mul", 0>,
2197 basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>,
2198 basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V;
2200 let isCommutable = 0 in {
2201 defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>,
2202 basic_sse12_fp_binop_s_int<0x5C, "sub", 0>,
2203 basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>,
2204 basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V;
2205 defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>,
2206 basic_sse12_fp_binop_s_int<0x5E, "div", 0>,
2207 basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>,
2208 basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V;
2209 defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>,
2210 basic_sse12_fp_binop_s_int<0x5F, "max", 0>,
2211 basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>,
2212 basic_sse12_fp_binop_p_int<0x5F, "max", 0>,
2213 basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>,
2214 basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V;
2215 defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>,
2216 basic_sse12_fp_binop_s_int<0x5D, "min", 0>,
2217 basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>,
2218 basic_sse12_fp_binop_p_int<0x5D, "min", 0>,
2219 basic_sse12_fp_binop_p_y_int<0x5D, "min">,
2220 basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V;
2223 let Constraints = "$src1 = $dst" in {
2224 defm ADD : basic_sse12_fp_binop_s<0x58, "add", fadd>,
2225 basic_sse12_fp_binop_p<0x58, "add", fadd>,
2226 basic_sse12_fp_binop_s_int<0x58, "add">;
2227 defm MUL : basic_sse12_fp_binop_s<0x59, "mul", fmul>,
2228 basic_sse12_fp_binop_p<0x59, "mul", fmul>,
2229 basic_sse12_fp_binop_s_int<0x59, "mul">;
2231 let isCommutable = 0 in {
2232 defm SUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub>,
2233 basic_sse12_fp_binop_p<0x5C, "sub", fsub>,
2234 basic_sse12_fp_binop_s_int<0x5C, "sub">;
2235 defm DIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv>,
2236 basic_sse12_fp_binop_p<0x5E, "div", fdiv>,
2237 basic_sse12_fp_binop_s_int<0x5E, "div">;
2238 defm MAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax>,
2239 basic_sse12_fp_binop_p<0x5F, "max", X86fmax>,
2240 basic_sse12_fp_binop_s_int<0x5F, "max">,
2241 basic_sse12_fp_binop_p_int<0x5F, "max">;
2242 defm MIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin>,
2243 basic_sse12_fp_binop_p<0x5D, "min", X86fmin>,
2244 basic_sse12_fp_binop_s_int<0x5D, "min">,
2245 basic_sse12_fp_binop_p_int<0x5D, "min">;
2250 /// In addition, we also have a special variant of the scalar form here to
2251 /// represent the associated intrinsic operation. This form is unlike the
2252 /// plain scalar form, in that it takes an entire vector (instead of a
2253 /// scalar) and leaves the top elements undefined.
2255 /// And, we have a special variant form for a full-vector intrinsic form.
2257 /// sse1_fp_unop_s - SSE1 unops in scalar form.
2258 multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr,
2259 SDNode OpNode, Intrinsic F32Int> {
2260 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src),
2261 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2262 [(set FR32:$dst, (OpNode FR32:$src))]>;
2263 // For scalar unary operations, fold a load into the operation
2264 // only in OptForSize mode. It eliminates an instruction, but it also
2265 // eliminates a whole-register clobber (the load), so it introduces a
2266 // partial register update condition.
2267 def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
2268 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2269 [(set FR32:$dst, (OpNode (load addr:$src)))]>, XS,
2270 Requires<[HasSSE1, OptForSize]>;
2271 def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2272 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2273 [(set VR128:$dst, (F32Int VR128:$src))]>;
2274 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src),
2275 !strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
2276 [(set VR128:$dst, (F32Int sse_load_f32:$src))]>;
2279 /// sse1_fp_unop_s_avx - AVX SSE1 unops in scalar form.
2280 multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2281 def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2),
2282 !strconcat(OpcodeStr,
2283 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2284 def SSm : SSI<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1,f32mem:$src2),
2285 !strconcat(OpcodeStr,
2286 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2287 def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
2288 (ins ssmem:$src1, VR128:$src2),
2289 !strconcat(OpcodeStr,
2290 "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2293 /// sse1_fp_unop_p - SSE1 unops in packed form.
2294 multiclass sse1_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2295 def PSr : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2296 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2297 [(set VR128:$dst, (v4f32 (OpNode VR128:$src)))]>;
2298 def PSm : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2299 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2300 [(set VR128:$dst, (OpNode (memopv4f32 addr:$src)))]>;
2303 /// sse1_fp_unop_p_y - AVX 256-bit SSE1 unops in packed form.
2304 multiclass sse1_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2305 def PSYr : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2306 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2307 [(set VR256:$dst, (v8f32 (OpNode VR256:$src)))]>;
2308 def PSYm : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2309 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2310 [(set VR256:$dst, (OpNode (memopv8f32 addr:$src)))]>;
2313 /// sse1_fp_unop_p_int - SSE1 intrinsics unops in packed forms.
2314 multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2315 Intrinsic V4F32Int> {
2316 def PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2317 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2318 [(set VR128:$dst, (V4F32Int VR128:$src))]>;
2319 def PSm_Int : PSI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2320 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2321 [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>;
2324 /// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms.
2325 multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2326 Intrinsic V4F32Int> {
2327 def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2328 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2329 [(set VR256:$dst, (V4F32Int VR256:$src))]>;
2330 def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2331 !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
2332 [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>;
2335 /// sse2_fp_unop_s - SSE2 unops in scalar form.
2336 multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr,
2337 SDNode OpNode, Intrinsic F64Int> {
2338 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src),
2339 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2340 [(set FR64:$dst, (OpNode FR64:$src))]>;
2341 // See the comments in sse1_fp_unop_s for why this is OptForSize.
2342 def SDm : I<opc, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
2343 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2344 [(set FR64:$dst, (OpNode (load addr:$src)))]>, XD,
2345 Requires<[HasSSE2, OptForSize]>;
2346 def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2347 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2348 [(set VR128:$dst, (F64Int VR128:$src))]>;
2349 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src),
2350 !strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
2351 [(set VR128:$dst, (F64Int sse_load_f64:$src))]>;
2354 /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form.
2355 multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr> {
2356 def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2),
2357 !strconcat(OpcodeStr,
2358 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2359 def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), (ins FR64:$src1,f64mem:$src2),
2360 !strconcat(OpcodeStr,
2361 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2362 def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
2363 (ins VR128:$src1, sdmem:$src2),
2364 !strconcat(OpcodeStr,
2365 "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
2368 /// sse2_fp_unop_p - SSE2 unops in vector forms.
2369 multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
2371 def PDr : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2372 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2373 [(set VR128:$dst, (v2f64 (OpNode VR128:$src)))]>;
2374 def PDm : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2375 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2376 [(set VR128:$dst, (OpNode (memopv2f64 addr:$src)))]>;
2379 /// sse2_fp_unop_p_y - AVX SSE2 256-bit unops in vector forms.
2380 multiclass sse2_fp_unop_p_y<bits<8> opc, string OpcodeStr, SDNode OpNode> {
2381 def PDYr : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2382 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2383 [(set VR256:$dst, (v4f64 (OpNode VR256:$src)))]>;
2384 def PDYm : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2385 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2386 [(set VR256:$dst, (OpNode (memopv4f64 addr:$src)))]>;
2389 /// sse2_fp_unop_p_int - SSE2 intrinsic unops in vector forms.
2390 multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr,
2391 Intrinsic V2F64Int> {
2392 def PDr_Int : PDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2393 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2394 [(set VR128:$dst, (V2F64Int VR128:$src))]>;
2395 def PDm_Int : PDI<opc, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
2396 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2397 [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>;
2400 /// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms.
2401 multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr,
2402 Intrinsic V2F64Int> {
2403 def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2404 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2405 [(set VR256:$dst, (V2F64Int VR256:$src))]>;
2406 def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
2407 !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"),
2408 [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>;
2411 let Predicates = [HasAVX] in {
2413 defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt">,
2414 sse2_fp_unop_s_avx<0x51, "vsqrt">, VEX_4V;
2416 defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>,
2417 sse2_fp_unop_p<0x51, "vsqrt", fsqrt>,
2418 sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2419 sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>,
2420 sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>,
2421 sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>,
2422 sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>,
2423 sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>,
2426 // Reciprocal approximations. Note that these typically require refinement
2427 // in order to obtain suitable precision.
2428 defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt">, VEX_4V;
2429 defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>,
2430 sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>,
2431 sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>,
2432 sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX;
2434 defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp">, VEX_4V;
2435 defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>,
2436 sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>,
2437 sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>,
2438 sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX;
2441 def : Pat<(f32 (fsqrt FR32:$src)),
2442 (VSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2443 def : Pat<(f32 (fsqrt (load addr:$src))),
2444 (VSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2445 Requires<[HasAVX, OptForSize]>;
2446 def : Pat<(f64 (fsqrt FR64:$src)),
2447 (VSQRTSDr (f64 (IMPLICIT_DEF)), FR64:$src)>, Requires<[HasAVX]>;
2448 def : Pat<(f64 (fsqrt (load addr:$src))),
2449 (VSQRTSDm (f64 (IMPLICIT_DEF)), addr:$src)>,
2450 Requires<[HasAVX, OptForSize]>;
2452 def : Pat<(f32 (X86frsqrt FR32:$src)),
2453 (VRSQRTSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2454 def : Pat<(f32 (X86frsqrt (load addr:$src))),
2455 (VRSQRTSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2456 Requires<[HasAVX, OptForSize]>;
2458 def : Pat<(f32 (X86frcp FR32:$src)),
2459 (VRCPSSr (f32 (IMPLICIT_DEF)), FR32:$src)>, Requires<[HasAVX]>;
2460 def : Pat<(f32 (X86frcp (load addr:$src))),
2461 (VRCPSSm (f32 (IMPLICIT_DEF)), addr:$src)>,
2462 Requires<[HasAVX, OptForSize]>;
2464 let Predicates = [HasAVX] in {
2465 def : Pat<(int_x86_sse_sqrt_ss VR128:$src),
2466 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2467 (VSQRTSSr (f32 (IMPLICIT_DEF)),
2468 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2470 def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
2471 (VSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2473 def : Pat<(int_x86_sse2_sqrt_sd VR128:$src),
2474 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
2475 (VSQRTSDr (f64 (IMPLICIT_DEF)),
2476 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd)),
2478 def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
2479 (VSQRTSDm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
2481 def : Pat<(int_x86_sse_rsqrt_ss VR128:$src),
2482 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2483 (VRSQRTSSr (f32 (IMPLICIT_DEF)),
2484 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2486 def : Pat<(int_x86_sse_rsqrt_ss sse_load_f32:$src),
2487 (VRSQRTSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2489 def : Pat<(int_x86_sse_rcp_ss VR128:$src),
2490 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)),
2491 (VRCPSSr (f32 (IMPLICIT_DEF)),
2492 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)),
2494 def : Pat<(int_x86_sse_rcp_ss sse_load_f32:$src),
2495 (VRCPSSm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
2499 defm SQRT : sse1_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>,
2500 sse1_fp_unop_p<0x51, "sqrt", fsqrt>,
2501 sse1_fp_unop_p_int<0x51, "sqrt", int_x86_sse_sqrt_ps>,
2502 sse2_fp_unop_s<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>,
2503 sse2_fp_unop_p<0x51, "sqrt", fsqrt>,
2504 sse2_fp_unop_p_int<0x51, "sqrt", int_x86_sse2_sqrt_pd>;
2506 // Reciprocal approximations. Note that these typically require refinement
2507 // in order to obtain suitable precision.
2508 defm RSQRT : sse1_fp_unop_s<0x52, "rsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>,
2509 sse1_fp_unop_p<0x52, "rsqrt", X86frsqrt>,
2510 sse1_fp_unop_p_int<0x52, "rsqrt", int_x86_sse_rsqrt_ps>;
2511 defm RCP : sse1_fp_unop_s<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>,
2512 sse1_fp_unop_p<0x53, "rcp", X86frcp>,
2513 sse1_fp_unop_p_int<0x53, "rcp", int_x86_sse_rcp_ps>;
2515 // There is no f64 version of the reciprocal approximation instructions.
2517 //===----------------------------------------------------------------------===//
2518 // SSE 1 & 2 - Non-temporal stores
2519 //===----------------------------------------------------------------------===//
2521 let AddedComplexity = 400 in { // Prefer non-temporal versions
2522 def VMOVNTPSmr : VPSI<0x2B, MRMDestMem, (outs),
2523 (ins f128mem:$dst, VR128:$src),
2524 "movntps\t{$src, $dst|$dst, $src}",
2525 [(alignednontemporalstore (v4f32 VR128:$src),
2527 def VMOVNTPDmr : VPDI<0x2B, MRMDestMem, (outs),
2528 (ins f128mem:$dst, VR128:$src),
2529 "movntpd\t{$src, $dst|$dst, $src}",
2530 [(alignednontemporalstore (v2f64 VR128:$src),
2532 def VMOVNTDQ_64mr : VPDI<0xE7, MRMDestMem, (outs),
2533 (ins f128mem:$dst, VR128:$src),
2534 "movntdq\t{$src, $dst|$dst, $src}",
2535 [(alignednontemporalstore (v2f64 VR128:$src),
2538 let ExeDomain = SSEPackedInt in
2539 def VMOVNTDQmr : VPDI<0xE7, MRMDestMem, (outs),
2540 (ins f128mem:$dst, VR128:$src),
2541 "movntdq\t{$src, $dst|$dst, $src}",
2542 [(alignednontemporalstore (v4f32 VR128:$src),
2545 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2546 (VMOVNTDQmr addr:$dst, VR128:$src)>, Requires<[HasAVX]>;
2548 def VMOVNTPSYmr : VPSI<0x2B, MRMDestMem, (outs),
2549 (ins f256mem:$dst, VR256:$src),
2550 "movntps\t{$src, $dst|$dst, $src}",
2551 [(alignednontemporalstore (v8f32 VR256:$src),
2553 def VMOVNTPDYmr : VPDI<0x2B, MRMDestMem, (outs),
2554 (ins f256mem:$dst, VR256:$src),
2555 "movntpd\t{$src, $dst|$dst, $src}",
2556 [(alignednontemporalstore (v4f64 VR256:$src),
2558 def VMOVNTDQY_64mr : VPDI<0xE7, MRMDestMem, (outs),
2559 (ins f256mem:$dst, VR256:$src),
2560 "movntdq\t{$src, $dst|$dst, $src}",
2561 [(alignednontemporalstore (v4f64 VR256:$src),
2563 let ExeDomain = SSEPackedInt in
2564 def VMOVNTDQYmr : VPDI<0xE7, MRMDestMem, (outs),
2565 (ins f256mem:$dst, VR256:$src),
2566 "movntdq\t{$src, $dst|$dst, $src}",
2567 [(alignednontemporalstore (v8f32 VR256:$src),
2571 def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src),
2572 (VMOVNTDQYmr addr:$dst, VR256:$src)>;
2573 def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src),
2574 (VMOVNTPDYmr addr:$dst, VR256:$src)>;
2575 def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src),
2576 (VMOVNTPSYmr addr:$dst, VR256:$src)>;
2578 let AddedComplexity = 400 in { // Prefer non-temporal versions
2579 def MOVNTPSmr : PSI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2580 "movntps\t{$src, $dst|$dst, $src}",
2581 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2582 def MOVNTPDmr : PDI<0x2B, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2583 "movntpd\t{$src, $dst|$dst, $src}",
2584 [(alignednontemporalstore(v2f64 VR128:$src), addr:$dst)]>;
2586 def MOVNTDQ_64mr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2587 "movntdq\t{$src, $dst|$dst, $src}",
2588 [(alignednontemporalstore (v2f64 VR128:$src), addr:$dst)]>;
2590 let ExeDomain = SSEPackedInt in
2591 def MOVNTDQmr : PDI<0xE7, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src),
2592 "movntdq\t{$src, $dst|$dst, $src}",
2593 [(alignednontemporalstore (v4f32 VR128:$src), addr:$dst)]>;
2595 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst),
2596 (MOVNTDQmr addr:$dst, VR128:$src)>;
2598 // There is no AVX form for instructions below this point
2599 def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
2600 "movnti{l}\t{$src, $dst|$dst, $src}",
2601 [(nontemporalstore (i32 GR32:$src), addr:$dst)]>,
2602 TB, Requires<[HasSSE2]>;
2603 def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
2604 "movnti{q}\t{$src, $dst|$dst, $src}",
2605 [(nontemporalstore (i64 GR64:$src), addr:$dst)]>,
2606 TB, Requires<[HasSSE2]>;
2609 //===----------------------------------------------------------------------===//
2610 // SSE 1 & 2 - Prefetch and memory fence
2611 //===----------------------------------------------------------------------===//
2613 // Prefetch intrinsic.
2614 def PREFETCHT0 : PSI<0x18, MRM1m, (outs), (ins i8mem:$src),
2615 "prefetcht0\t$src", [(prefetch addr:$src, imm, (i32 3), (i32 1))]>;
2616 def PREFETCHT1 : PSI<0x18, MRM2m, (outs), (ins i8mem:$src),
2617 "prefetcht1\t$src", [(prefetch addr:$src, imm, (i32 2), (i32 1))]>;
2618 def PREFETCHT2 : PSI<0x18, MRM3m, (outs), (ins i8mem:$src),
2619 "prefetcht2\t$src", [(prefetch addr:$src, imm, (i32 1), (i32 1))]>;
2620 def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src),
2621 "prefetchnta\t$src", [(prefetch addr:$src, imm, (i32 0), (i32 1))]>;
2623 // Load, store, and memory fence
2624 def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>,
2625 TB, Requires<[HasSSE1]>;
2626 def : Pat<(X86SFence), (SFENCE)>;
2628 //===----------------------------------------------------------------------===//
2629 // SSE 1 & 2 - Load/Store XCSR register
2630 //===----------------------------------------------------------------------===//
2632 def VLDMXCSR : VPSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2633 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>, VEX;
2634 def VSTMXCSR : VPSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2635 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>, VEX;
2637 def LDMXCSR : PSI<0xAE, MRM2m, (outs), (ins i32mem:$src),
2638 "ldmxcsr\t$src", [(int_x86_sse_ldmxcsr addr:$src)]>;
2639 def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst),
2640 "stmxcsr\t$dst", [(int_x86_sse_stmxcsr addr:$dst)]>;
2642 //===---------------------------------------------------------------------===//
2643 // SSE2 - Move Aligned/Unaligned Packed Integer Instructions
2644 //===---------------------------------------------------------------------===//
2646 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2648 let neverHasSideEffects = 1 in {
2649 def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2650 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2651 def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2652 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2654 def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2655 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2656 def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
2657 "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX;
2659 let canFoldAsLoad = 1, mayLoad = 1 in {
2660 def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2661 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2662 def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2663 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2664 let Predicates = [HasAVX] in {
2665 def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2666 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2667 def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
2668 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2672 let mayStore = 1 in {
2673 def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs),
2674 (ins i128mem:$dst, VR128:$src),
2675 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2676 def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs),
2677 (ins i256mem:$dst, VR256:$src),
2678 "movdqa\t{$src, $dst|$dst, $src}", []>, VEX;
2679 let Predicates = [HasAVX] in {
2680 def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2681 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2682 def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src),
2683 "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX;
2687 let neverHasSideEffects = 1 in
2688 def MOVDQArr : PDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2689 "movdqa\t{$src, $dst|$dst, $src}", []>;
2691 def MOVDQUrr : I<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
2692 "movdqu\t{$src, $dst|$dst, $src}",
2693 []>, XS, Requires<[HasSSE2]>;
2695 let canFoldAsLoad = 1, mayLoad = 1 in {
2696 def MOVDQArm : PDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2697 "movdqa\t{$src, $dst|$dst, $src}",
2698 [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>;
2699 def MOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
2700 "movdqu\t{$src, $dst|$dst, $src}",
2701 [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>,
2702 XS, Requires<[HasSSE2]>;
2705 let mayStore = 1 in {
2706 def MOVDQAmr : PDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2707 "movdqa\t{$src, $dst|$dst, $src}",
2708 [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>;
2709 def MOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2710 "movdqu\t{$src, $dst|$dst, $src}",
2711 [/*(store (v2i64 VR128:$src), addr:$dst)*/]>,
2712 XS, Requires<[HasSSE2]>;
2715 // Intrinsic forms of MOVDQU load and store
2716 def VMOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2717 "vmovdqu\t{$src, $dst|$dst, $src}",
2718 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2719 XS, VEX, Requires<[HasAVX]>;
2721 def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src),
2722 "movdqu\t{$src, $dst|$dst, $src}",
2723 [(int_x86_sse2_storeu_dq addr:$dst, VR128:$src)]>,
2724 XS, Requires<[HasSSE2]>;
2726 } // ExeDomain = SSEPackedInt
2728 def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>;
2729 def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src),
2730 (VMOVDQUYmr addr:$dst, VR256:$src)>;
2732 //===---------------------------------------------------------------------===//
2733 // SSE2 - Packed Integer Arithmetic Instructions
2734 //===---------------------------------------------------------------------===//
2736 let ExeDomain = SSEPackedInt in { // SSE integer instructions
2738 multiclass PDI_binop_rm_int<bits<8> opc, string OpcodeStr, Intrinsic IntId,
2739 bit IsCommutable = 0, bit Is2Addr = 1> {
2740 let isCommutable = IsCommutable in
2741 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2742 (ins VR128:$src1, VR128:$src2),
2744 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2745 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2746 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2747 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2748 (ins VR128:$src1, i128mem:$src2),
2750 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2751 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2752 [(set VR128:$dst, (IntId VR128:$src1,
2753 (bitconvert (memopv2i64 addr:$src2))))]>;
2756 multiclass PDI_binop_rmi_int<bits<8> opc, bits<8> opc2, Format ImmForm,
2757 string OpcodeStr, Intrinsic IntId,
2758 Intrinsic IntId2, bit Is2Addr = 1> {
2759 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2760 (ins VR128:$src1, VR128:$src2),
2762 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2763 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2764 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2))]>;
2765 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2766 (ins VR128:$src1, i128mem:$src2),
2768 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2769 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2770 [(set VR128:$dst, (IntId VR128:$src1,
2771 (bitconvert (memopv2i64 addr:$src2))))]>;
2772 def ri : PDIi8<opc2, ImmForm, (outs VR128:$dst),
2773 (ins VR128:$src1, i32i8imm:$src2),
2775 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2776 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2777 [(set VR128:$dst, (IntId2 VR128:$src1, (i32 imm:$src2)))]>;
2780 /// PDI_binop_rm - Simple SSE2 binary operator.
2781 multiclass PDI_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
2782 ValueType OpVT, bit IsCommutable = 0, bit Is2Addr = 1> {
2783 let isCommutable = IsCommutable in
2784 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2785 (ins VR128:$src1, VR128:$src2),
2787 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2788 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2789 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>;
2790 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2791 (ins VR128:$src1, i128mem:$src2),
2793 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2794 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2795 [(set VR128:$dst, (OpVT (OpNode VR128:$src1,
2796 (bitconvert (memopv2i64 addr:$src2)))))]>;
2799 /// PDI_binop_rm_v2i64 - Simple SSE2 binary operator whose type is v2i64.
2801 /// FIXME: we could eliminate this and use PDI_binop_rm instead if tblgen knew
2802 /// to collapse (bitconvert VT to VT) into its operand.
2804 multiclass PDI_binop_rm_v2i64<bits<8> opc, string OpcodeStr, SDNode OpNode,
2805 bit IsCommutable = 0, bit Is2Addr = 1> {
2806 let isCommutable = IsCommutable in
2807 def rr : PDI<opc, MRMSrcReg, (outs VR128:$dst),
2808 (ins VR128:$src1, VR128:$src2),
2810 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2811 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2812 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))]>;
2813 def rm : PDI<opc, MRMSrcMem, (outs VR128:$dst),
2814 (ins VR128:$src1, i128mem:$src2),
2816 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
2817 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
2818 [(set VR128:$dst, (OpNode VR128:$src1, (memopv2i64 addr:$src2)))]>;
2821 } // ExeDomain = SSEPackedInt
2823 // 128-bit Integer Arithmetic
2825 let Predicates = [HasAVX] in {
2826 defm VPADDB : PDI_binop_rm<0xFC, "vpaddb", add, v16i8, 1, 0 /*3addr*/>, VEX_4V;
2827 defm VPADDW : PDI_binop_rm<0xFD, "vpaddw", add, v8i16, 1, 0>, VEX_4V;
2828 defm VPADDD : PDI_binop_rm<0xFE, "vpaddd", add, v4i32, 1, 0>, VEX_4V;
2829 defm VPADDQ : PDI_binop_rm_v2i64<0xD4, "vpaddq", add, 1, 0>, VEX_4V;
2830 defm VPMULLW : PDI_binop_rm<0xD5, "vpmullw", mul, v8i16, 1, 0>, VEX_4V;
2831 defm VPSUBB : PDI_binop_rm<0xF8, "vpsubb", sub, v16i8, 0, 0>, VEX_4V;
2832 defm VPSUBW : PDI_binop_rm<0xF9, "vpsubw", sub, v8i16, 0, 0>, VEX_4V;
2833 defm VPSUBD : PDI_binop_rm<0xFA, "vpsubd", sub, v4i32, 0, 0>, VEX_4V;
2834 defm VPSUBQ : PDI_binop_rm_v2i64<0xFB, "vpsubq", sub, 0, 0>, VEX_4V;
2837 defm VPSUBSB : PDI_binop_rm_int<0xE8, "vpsubsb" , int_x86_sse2_psubs_b, 0, 0>,
2839 defm VPSUBSW : PDI_binop_rm_int<0xE9, "vpsubsw" , int_x86_sse2_psubs_w, 0, 0>,
2841 defm VPSUBUSB : PDI_binop_rm_int<0xD8, "vpsubusb", int_x86_sse2_psubus_b, 0, 0>,
2843 defm VPSUBUSW : PDI_binop_rm_int<0xD9, "vpsubusw", int_x86_sse2_psubus_w, 0, 0>,
2845 defm VPADDSB : PDI_binop_rm_int<0xEC, "vpaddsb" , int_x86_sse2_padds_b, 1, 0>,
2847 defm VPADDSW : PDI_binop_rm_int<0xED, "vpaddsw" , int_x86_sse2_padds_w, 1, 0>,
2849 defm VPADDUSB : PDI_binop_rm_int<0xDC, "vpaddusb", int_x86_sse2_paddus_b, 1, 0>,
2851 defm VPADDUSW : PDI_binop_rm_int<0xDD, "vpaddusw", int_x86_sse2_paddus_w, 1, 0>,
2853 defm VPMULHUW : PDI_binop_rm_int<0xE4, "vpmulhuw", int_x86_sse2_pmulhu_w, 1, 0>,
2855 defm VPMULHW : PDI_binop_rm_int<0xE5, "vpmulhw" , int_x86_sse2_pmulh_w, 1, 0>,
2857 defm VPMULUDQ : PDI_binop_rm_int<0xF4, "vpmuludq", int_x86_sse2_pmulu_dq, 1, 0>,
2859 defm VPMADDWD : PDI_binop_rm_int<0xF5, "vpmaddwd", int_x86_sse2_pmadd_wd, 1, 0>,
2861 defm VPAVGB : PDI_binop_rm_int<0xE0, "vpavgb", int_x86_sse2_pavg_b, 1, 0>,
2863 defm VPAVGW : PDI_binop_rm_int<0xE3, "vpavgw", int_x86_sse2_pavg_w, 1, 0>,
2865 defm VPMINUB : PDI_binop_rm_int<0xDA, "vpminub", int_x86_sse2_pminu_b, 1, 0>,
2867 defm VPMINSW : PDI_binop_rm_int<0xEA, "vpminsw", int_x86_sse2_pmins_w, 1, 0>,
2869 defm VPMAXUB : PDI_binop_rm_int<0xDE, "vpmaxub", int_x86_sse2_pmaxu_b, 1, 0>,
2871 defm VPMAXSW : PDI_binop_rm_int<0xEE, "vpmaxsw", int_x86_sse2_pmaxs_w, 1, 0>,
2873 defm VPSADBW : PDI_binop_rm_int<0xF6, "vpsadbw", int_x86_sse2_psad_bw, 1, 0>,
2877 let Constraints = "$src1 = $dst" in {
2878 defm PADDB : PDI_binop_rm<0xFC, "paddb", add, v16i8, 1>;
2879 defm PADDW : PDI_binop_rm<0xFD, "paddw", add, v8i16, 1>;
2880 defm PADDD : PDI_binop_rm<0xFE, "paddd", add, v4i32, 1>;
2881 defm PADDQ : PDI_binop_rm_v2i64<0xD4, "paddq", add, 1>;
2882 defm PMULLW : PDI_binop_rm<0xD5, "pmullw", mul, v8i16, 1>;
2883 defm PSUBB : PDI_binop_rm<0xF8, "psubb", sub, v16i8>;
2884 defm PSUBW : PDI_binop_rm<0xF9, "psubw", sub, v8i16>;
2885 defm PSUBD : PDI_binop_rm<0xFA, "psubd", sub, v4i32>;
2886 defm PSUBQ : PDI_binop_rm_v2i64<0xFB, "psubq", sub>;
2889 defm PSUBSB : PDI_binop_rm_int<0xE8, "psubsb" , int_x86_sse2_psubs_b>;
2890 defm PSUBSW : PDI_binop_rm_int<0xE9, "psubsw" , int_x86_sse2_psubs_w>;
2891 defm PSUBUSB : PDI_binop_rm_int<0xD8, "psubusb", int_x86_sse2_psubus_b>;
2892 defm PSUBUSW : PDI_binop_rm_int<0xD9, "psubusw", int_x86_sse2_psubus_w>;
2893 defm PADDSB : PDI_binop_rm_int<0xEC, "paddsb" , int_x86_sse2_padds_b, 1>;
2894 defm PADDSW : PDI_binop_rm_int<0xED, "paddsw" , int_x86_sse2_padds_w, 1>;
2895 defm PADDUSB : PDI_binop_rm_int<0xDC, "paddusb", int_x86_sse2_paddus_b, 1>;
2896 defm PADDUSW : PDI_binop_rm_int<0xDD, "paddusw", int_x86_sse2_paddus_w, 1>;
2897 defm PMULHUW : PDI_binop_rm_int<0xE4, "pmulhuw", int_x86_sse2_pmulhu_w, 1>;
2898 defm PMULHW : PDI_binop_rm_int<0xE5, "pmulhw" , int_x86_sse2_pmulh_w, 1>;
2899 defm PMULUDQ : PDI_binop_rm_int<0xF4, "pmuludq", int_x86_sse2_pmulu_dq, 1>;
2900 defm PMADDWD : PDI_binop_rm_int<0xF5, "pmaddwd", int_x86_sse2_pmadd_wd, 1>;
2901 defm PAVGB : PDI_binop_rm_int<0xE0, "pavgb", int_x86_sse2_pavg_b, 1>;
2902 defm PAVGW : PDI_binop_rm_int<0xE3, "pavgw", int_x86_sse2_pavg_w, 1>;
2903 defm PMINUB : PDI_binop_rm_int<0xDA, "pminub", int_x86_sse2_pminu_b, 1>;
2904 defm PMINSW : PDI_binop_rm_int<0xEA, "pminsw", int_x86_sse2_pmins_w, 1>;
2905 defm PMAXUB : PDI_binop_rm_int<0xDE, "pmaxub", int_x86_sse2_pmaxu_b, 1>;
2906 defm PMAXSW : PDI_binop_rm_int<0xEE, "pmaxsw", int_x86_sse2_pmaxs_w, 1>;
2907 defm PSADBW : PDI_binop_rm_int<0xF6, "psadbw", int_x86_sse2_psad_bw, 1>;
2909 } // Constraints = "$src1 = $dst"
2911 //===---------------------------------------------------------------------===//
2912 // SSE2 - Packed Integer Logical Instructions
2913 //===---------------------------------------------------------------------===//
2915 let Predicates = [HasAVX] in {
2916 defm VPSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "vpsllw",
2917 int_x86_sse2_psll_w, int_x86_sse2_pslli_w, 0>,
2919 defm VPSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "vpslld",
2920 int_x86_sse2_psll_d, int_x86_sse2_pslli_d, 0>,
2922 defm VPSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "vpsllq",
2923 int_x86_sse2_psll_q, int_x86_sse2_pslli_q, 0>,
2926 defm VPSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "vpsrlw",
2927 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w, 0>,
2929 defm VPSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "vpsrld",
2930 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d, 0>,
2932 defm VPSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "vpsrlq",
2933 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q, 0>,
2936 defm VPSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "vpsraw",
2937 int_x86_sse2_psra_w, int_x86_sse2_psrai_w, 0>,
2939 defm VPSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "vpsrad",
2940 int_x86_sse2_psra_d, int_x86_sse2_psrai_d, 0>,
2943 defm VPAND : PDI_binop_rm_v2i64<0xDB, "vpand", and, 1, 0>, VEX_4V;
2944 defm VPOR : PDI_binop_rm_v2i64<0xEB, "vpor" , or, 1, 0>, VEX_4V;
2945 defm VPXOR : PDI_binop_rm_v2i64<0xEF, "vpxor", xor, 1, 0>, VEX_4V;
2947 let ExeDomain = SSEPackedInt in {
2948 let neverHasSideEffects = 1 in {
2949 // 128-bit logical shifts.
2950 def VPSLLDQri : PDIi8<0x73, MRM7r,
2951 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2952 "vpslldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2954 def VPSRLDQri : PDIi8<0x73, MRM3r,
2955 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
2956 "vpsrldq\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>,
2958 // PSRADQri doesn't exist in SSE[1-3].
2960 def VPANDNrr : PDI<0xDF, MRMSrcReg,
2961 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
2962 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2964 (v2i64 (X86andnp VR128:$src1, VR128:$src2)))]>,VEX_4V;
2966 def VPANDNrm : PDI<0xDF, MRMSrcMem,
2967 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
2968 "vpandn\t{$src2, $src1, $dst|$dst, $src1, $src2}",
2969 [(set VR128:$dst, (X86andnp VR128:$src1,
2970 (memopv2i64 addr:$src2)))]>, VEX_4V;
2974 let Constraints = "$src1 = $dst" in {
2975 defm PSLLW : PDI_binop_rmi_int<0xF1, 0x71, MRM6r, "psllw",
2976 int_x86_sse2_psll_w, int_x86_sse2_pslli_w>;
2977 defm PSLLD : PDI_binop_rmi_int<0xF2, 0x72, MRM6r, "pslld",
2978 int_x86_sse2_psll_d, int_x86_sse2_pslli_d>;
2979 defm PSLLQ : PDI_binop_rmi_int<0xF3, 0x73, MRM6r, "psllq",
2980 int_x86_sse2_psll_q, int_x86_sse2_pslli_q>;
2982 defm PSRLW : PDI_binop_rmi_int<0xD1, 0x71, MRM2r, "psrlw",
2983 int_x86_sse2_psrl_w, int_x86_sse2_psrli_w>;
2984 defm PSRLD : PDI_binop_rmi_int<0xD2, 0x72, MRM2r, "psrld",
2985 int_x86_sse2_psrl_d, int_x86_sse2_psrli_d>;
2986 defm PSRLQ : PDI_binop_rmi_int<0xD3, 0x73, MRM2r, "psrlq",
2987 int_x86_sse2_psrl_q, int_x86_sse2_psrli_q>;
2989 defm PSRAW : PDI_binop_rmi_int<0xE1, 0x71, MRM4r, "psraw",
2990 int_x86_sse2_psra_w, int_x86_sse2_psrai_w>;
2991 defm PSRAD : PDI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
2992 int_x86_sse2_psra_d, int_x86_sse2_psrai_d>;
2994 defm PAND : PDI_binop_rm_v2i64<0xDB, "pand", and, 1>;
2995 defm POR : PDI_binop_rm_v2i64<0xEB, "por" , or, 1>;
2996 defm PXOR : PDI_binop_rm_v2i64<0xEF, "pxor", xor, 1>;
2998 let ExeDomain = SSEPackedInt in {
2999 let neverHasSideEffects = 1 in {
3000 // 128-bit logical shifts.
3001 def PSLLDQri : PDIi8<0x73, MRM7r,
3002 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3003 "pslldq\t{$src2, $dst|$dst, $src2}", []>;
3004 def PSRLDQri : PDIi8<0x73, MRM3r,
3005 (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2),
3006 "psrldq\t{$src2, $dst|$dst, $src2}", []>;
3007 // PSRADQri doesn't exist in SSE[1-3].
3009 def PANDNrr : PDI<0xDF, MRMSrcReg,
3010 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3011 "pandn\t{$src2, $dst|$dst, $src2}", []>;
3013 def PANDNrm : PDI<0xDF, MRMSrcMem,
3014 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3015 "pandn\t{$src2, $dst|$dst, $src2}", []>;
3017 } // Constraints = "$src1 = $dst"
3019 let Predicates = [HasAVX] in {
3020 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
3021 (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3022 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
3023 (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3024 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
3025 (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>;
3026 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
3027 (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>;
3028 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
3029 (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3031 // Shift up / down and insert zero's.
3032 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
3033 (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3034 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
3035 (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3038 let Predicates = [HasSSE2] in {
3039 def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2),
3040 (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3041 def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2),
3042 (v2i64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3043 def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2),
3044 (v2i64 (PSLLDQri VR128:$src1, imm:$src2))>;
3045 def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2),
3046 (v2i64 (PSRLDQri VR128:$src1, imm:$src2))>;
3047 def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)),
3048 (v2f64 (PSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>;
3050 // Shift up / down and insert zero's.
3051 def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))),
3052 (v2i64 (PSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3053 def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))),
3054 (v2i64 (PSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>;
3057 //===---------------------------------------------------------------------===//
3058 // SSE2 - Packed Integer Comparison Instructions
3059 //===---------------------------------------------------------------------===//
3061 let Predicates = [HasAVX] in {
3062 defm VPCMPEQB : PDI_binop_rm_int<0x74, "vpcmpeqb", int_x86_sse2_pcmpeq_b, 1,
3064 defm VPCMPEQW : PDI_binop_rm_int<0x75, "vpcmpeqw", int_x86_sse2_pcmpeq_w, 1,
3066 defm VPCMPEQD : PDI_binop_rm_int<0x76, "vpcmpeqd", int_x86_sse2_pcmpeq_d, 1,
3068 defm VPCMPGTB : PDI_binop_rm_int<0x64, "vpcmpgtb", int_x86_sse2_pcmpgt_b, 0,
3070 defm VPCMPGTW : PDI_binop_rm_int<0x65, "vpcmpgtw", int_x86_sse2_pcmpgt_w, 0,
3072 defm VPCMPGTD : PDI_binop_rm_int<0x66, "vpcmpgtd", int_x86_sse2_pcmpgt_d, 0,
3075 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
3076 (VPCMPEQBrr VR128:$src1, VR128:$src2)>;
3077 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
3078 (VPCMPEQBrm VR128:$src1, addr:$src2)>;
3079 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
3080 (VPCMPEQWrr VR128:$src1, VR128:$src2)>;
3081 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
3082 (VPCMPEQWrm VR128:$src1, addr:$src2)>;
3083 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
3084 (VPCMPEQDrr VR128:$src1, VR128:$src2)>;
3085 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
3086 (VPCMPEQDrm VR128:$src1, addr:$src2)>;
3088 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
3089 (VPCMPGTBrr VR128:$src1, VR128:$src2)>;
3090 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
3091 (VPCMPGTBrm VR128:$src1, addr:$src2)>;
3092 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
3093 (VPCMPGTWrr VR128:$src1, VR128:$src2)>;
3094 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
3095 (VPCMPGTWrm VR128:$src1, addr:$src2)>;
3096 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
3097 (VPCMPGTDrr VR128:$src1, VR128:$src2)>;
3098 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
3099 (VPCMPGTDrm VR128:$src1, addr:$src2)>;
3102 let Constraints = "$src1 = $dst" in {
3103 defm PCMPEQB : PDI_binop_rm_int<0x74, "pcmpeqb", int_x86_sse2_pcmpeq_b, 1>;
3104 defm PCMPEQW : PDI_binop_rm_int<0x75, "pcmpeqw", int_x86_sse2_pcmpeq_w, 1>;
3105 defm PCMPEQD : PDI_binop_rm_int<0x76, "pcmpeqd", int_x86_sse2_pcmpeq_d, 1>;
3106 defm PCMPGTB : PDI_binop_rm_int<0x64, "pcmpgtb", int_x86_sse2_pcmpgt_b>;
3107 defm PCMPGTW : PDI_binop_rm_int<0x65, "pcmpgtw", int_x86_sse2_pcmpgt_w>;
3108 defm PCMPGTD : PDI_binop_rm_int<0x66, "pcmpgtd", int_x86_sse2_pcmpgt_d>;
3109 } // Constraints = "$src1 = $dst"
3111 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, VR128:$src2)),
3112 (PCMPEQBrr VR128:$src1, VR128:$src2)>;
3113 def : Pat<(v16i8 (X86pcmpeqb VR128:$src1, (memop addr:$src2))),
3114 (PCMPEQBrm VR128:$src1, addr:$src2)>;
3115 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, VR128:$src2)),
3116 (PCMPEQWrr VR128:$src1, VR128:$src2)>;
3117 def : Pat<(v8i16 (X86pcmpeqw VR128:$src1, (memop addr:$src2))),
3118 (PCMPEQWrm VR128:$src1, addr:$src2)>;
3119 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, VR128:$src2)),
3120 (PCMPEQDrr VR128:$src1, VR128:$src2)>;
3121 def : Pat<(v4i32 (X86pcmpeqd VR128:$src1, (memop addr:$src2))),
3122 (PCMPEQDrm VR128:$src1, addr:$src2)>;
3124 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, VR128:$src2)),
3125 (PCMPGTBrr VR128:$src1, VR128:$src2)>;
3126 def : Pat<(v16i8 (X86pcmpgtb VR128:$src1, (memop addr:$src2))),
3127 (PCMPGTBrm VR128:$src1, addr:$src2)>;
3128 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, VR128:$src2)),
3129 (PCMPGTWrr VR128:$src1, VR128:$src2)>;
3130 def : Pat<(v8i16 (X86pcmpgtw VR128:$src1, (memop addr:$src2))),
3131 (PCMPGTWrm VR128:$src1, addr:$src2)>;
3132 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, VR128:$src2)),
3133 (PCMPGTDrr VR128:$src1, VR128:$src2)>;
3134 def : Pat<(v4i32 (X86pcmpgtd VR128:$src1, (memop addr:$src2))),
3135 (PCMPGTDrm VR128:$src1, addr:$src2)>;
3137 //===---------------------------------------------------------------------===//
3138 // SSE2 - Packed Integer Pack Instructions
3139 //===---------------------------------------------------------------------===//
3141 let Predicates = [HasAVX] in {
3142 defm VPACKSSWB : PDI_binop_rm_int<0x63, "vpacksswb", int_x86_sse2_packsswb_128,
3144 defm VPACKSSDW : PDI_binop_rm_int<0x6B, "vpackssdw", int_x86_sse2_packssdw_128,
3146 defm VPACKUSWB : PDI_binop_rm_int<0x67, "vpackuswb", int_x86_sse2_packuswb_128,
3150 let Constraints = "$src1 = $dst" in {
3151 defm PACKSSWB : PDI_binop_rm_int<0x63, "packsswb", int_x86_sse2_packsswb_128>;
3152 defm PACKSSDW : PDI_binop_rm_int<0x6B, "packssdw", int_x86_sse2_packssdw_128>;
3153 defm PACKUSWB : PDI_binop_rm_int<0x67, "packuswb", int_x86_sse2_packuswb_128>;
3154 } // Constraints = "$src1 = $dst"
3156 //===---------------------------------------------------------------------===//
3157 // SSE2 - Packed Integer Shuffle Instructions
3158 //===---------------------------------------------------------------------===//
3160 let ExeDomain = SSEPackedInt in {
3161 multiclass sse2_pshuffle<string OpcodeStr, ValueType vt, PatFrag pshuf_frag,
3163 def ri : Ii8<0x70, MRMSrcReg,
3164 (outs VR128:$dst), (ins VR128:$src1, i8imm:$src2),
3165 !strconcat(OpcodeStr,
3166 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3167 [(set VR128:$dst, (vt (pshuf_frag:$src2 VR128:$src1,
3169 def mi : Ii8<0x70, MRMSrcMem,
3170 (outs VR128:$dst), (ins i128mem:$src1, i8imm:$src2),
3171 !strconcat(OpcodeStr,
3172 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
3173 [(set VR128:$dst, (vt (pshuf_frag:$src2
3174 (bc_frag (memopv2i64 addr:$src1)),
3177 } // ExeDomain = SSEPackedInt
3179 let Predicates = [HasAVX] in {
3180 let AddedComplexity = 5 in
3181 defm VPSHUFD : sse2_pshuffle<"vpshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize,
3184 // SSE2 with ImmT == Imm8 and XS prefix.
3185 defm VPSHUFHW : sse2_pshuffle<"vpshufhw", v8i16, pshufhw, bc_v8i16>, XS,
3188 // SSE2 with ImmT == Imm8 and XD prefix.
3189 defm VPSHUFLW : sse2_pshuffle<"vpshuflw", v8i16, pshuflw, bc_v8i16>, XD,
3192 let AddedComplexity = 5 in
3193 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3194 (VPSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3195 // Unary v4f32 shuffle with VPSHUF* in order to fold a load.
3196 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3197 (VPSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3199 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
3201 (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>;
3202 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
3204 (VPSHUFDmi addr:$src1, imm:$imm)>;
3205 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3206 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
3207 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3208 (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>;
3209 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
3210 (VPSHUFHWri VR128:$src, imm:$imm)>;
3211 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
3213 (VPSHUFHWmi addr:$src, imm:$imm)>;
3214 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
3215 (VPSHUFLWri VR128:$src, imm:$imm)>;
3216 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
3218 (VPSHUFLWmi addr:$src, imm:$imm)>;
3221 let Predicates = [HasSSE2] in {
3222 let AddedComplexity = 5 in
3223 defm PSHUFD : sse2_pshuffle<"pshufd", v4i32, pshufd, bc_v4i32>, TB, OpSize;
3225 // SSE2 with ImmT == Imm8 and XS prefix.
3226 defm PSHUFHW : sse2_pshuffle<"pshufhw", v8i16, pshufhw, bc_v8i16>, XS;
3228 // SSE2 with ImmT == Imm8 and XD prefix.
3229 defm PSHUFLW : sse2_pshuffle<"pshuflw", v8i16, pshuflw, bc_v8i16>, XD;
3231 let AddedComplexity = 5 in
3232 def : Pat<(v4f32 (pshufd:$src2 VR128:$src1, (undef))),
3233 (PSHUFDri VR128:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3234 // Unary v4f32 shuffle with PSHUF* in order to fold a load.
3235 def : Pat<(pshufd:$src2 (bc_v4i32 (memopv4f32 addr:$src1)), (undef)),
3236 (PSHUFDmi addr:$src1, (SHUFFLE_get_shuf_imm VR128:$src2))>;
3238 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)),
3240 (PSHUFDmi addr:$src1, imm:$imm)>;
3241 def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)),
3243 (PSHUFDmi addr:$src1, imm:$imm)>;
3244 def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3245 (PSHUFDri VR128:$src1, imm:$imm)>;
3246 def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))),
3247 (PSHUFDri VR128:$src1, imm:$imm)>;
3248 def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))),
3249 (PSHUFHWri VR128:$src, imm:$imm)>;
3250 def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)),
3252 (PSHUFHWmi addr:$src, imm:$imm)>;
3253 def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))),
3254 (PSHUFLWri VR128:$src, imm:$imm)>;
3255 def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)),
3257 (PSHUFLWmi addr:$src, imm:$imm)>;
3260 //===---------------------------------------------------------------------===//
3261 // SSE2 - Packed Integer Unpack Instructions
3262 //===---------------------------------------------------------------------===//
3264 let ExeDomain = SSEPackedInt in {
3265 multiclass sse2_unpack<bits<8> opc, string OpcodeStr, ValueType vt,
3266 SDNode OpNode, PatFrag bc_frag, bit Is2Addr = 1> {
3267 def rr : PDI<opc, MRMSrcReg,
3268 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3270 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3271 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3272 [(set VR128:$dst, (vt (OpNode VR128:$src1, VR128:$src2)))]>;
3273 def rm : PDI<opc, MRMSrcMem,
3274 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3276 !strconcat(OpcodeStr,"\t{$src2, $dst|$dst, $src2}"),
3277 !strconcat(OpcodeStr,"\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
3278 [(set VR128:$dst, (OpNode VR128:$src1,
3279 (bc_frag (memopv2i64
3283 let Predicates = [HasAVX] in {
3284 defm VPUNPCKLBW : sse2_unpack<0x60, "vpunpcklbw", v16i8, X86Punpcklbw,
3285 bc_v16i8, 0>, VEX_4V;
3286 defm VPUNPCKLWD : sse2_unpack<0x61, "vpunpcklwd", v8i16, X86Punpcklwd,
3287 bc_v8i16, 0>, VEX_4V;
3288 defm VPUNPCKLDQ : sse2_unpack<0x62, "vpunpckldq", v4i32, X86Punpckldq,
3289 bc_v4i32, 0>, VEX_4V;
3291 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3292 /// knew to collapse (bitconvert VT to VT) into its operand.
3293 def VPUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3294 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3295 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3296 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3297 VR128:$src2)))]>, VEX_4V;
3298 def VPUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3299 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3300 "vpunpcklqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3301 [(set VR128:$dst, (v2i64 (X86Punpcklqdq VR128:$src1,
3302 (memopv2i64 addr:$src2))))]>, VEX_4V;
3304 defm VPUNPCKHBW : sse2_unpack<0x68, "vpunpckhbw", v16i8, X86Punpckhbw,
3305 bc_v16i8, 0>, VEX_4V;
3306 defm VPUNPCKHWD : sse2_unpack<0x69, "vpunpckhwd", v8i16, X86Punpckhwd,
3307 bc_v8i16, 0>, VEX_4V;
3308 defm VPUNPCKHDQ : sse2_unpack<0x6A, "vpunpckhdq", v4i32, X86Punpckhdq,
3309 bc_v4i32, 0>, VEX_4V;
3311 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3312 /// knew to collapse (bitconvert VT to VT) into its operand.
3313 def VPUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3314 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3315 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3316 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3317 VR128:$src2)))]>, VEX_4V;
3318 def VPUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3319 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3320 "vpunpckhqdq\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3321 [(set VR128:$dst, (v2i64 (X86Punpckhqdq VR128:$src1,
3322 (memopv2i64 addr:$src2))))]>, VEX_4V;
3325 let Constraints = "$src1 = $dst" in {
3326 defm PUNPCKLBW : sse2_unpack<0x60, "punpcklbw", v16i8, X86Punpcklbw, bc_v16i8>;
3327 defm PUNPCKLWD : sse2_unpack<0x61, "punpcklwd", v8i16, X86Punpcklwd, bc_v8i16>;
3328 defm PUNPCKLDQ : sse2_unpack<0x62, "punpckldq", v4i32, X86Punpckldq, bc_v4i32>;
3330 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3331 /// knew to collapse (bitconvert VT to VT) into its operand.
3332 def PUNPCKLQDQrr : PDI<0x6C, MRMSrcReg,
3333 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3334 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3336 (v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)))]>;
3337 def PUNPCKLQDQrm : PDI<0x6C, MRMSrcMem,
3338 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3339 "punpcklqdq\t{$src2, $dst|$dst, $src2}",
3341 (v2i64 (X86Punpcklqdq VR128:$src1,
3342 (memopv2i64 addr:$src2))))]>;
3344 defm PUNPCKHBW : sse2_unpack<0x68, "punpckhbw", v16i8, X86Punpckhbw, bc_v16i8>;
3345 defm PUNPCKHWD : sse2_unpack<0x69, "punpckhwd", v8i16, X86Punpckhwd, bc_v8i16>;
3346 defm PUNPCKHDQ : sse2_unpack<0x6A, "punpckhdq", v4i32, X86Punpckhdq, bc_v4i32>;
3348 /// FIXME: we could eliminate this and use sse2_unpack instead if tblgen
3349 /// knew to collapse (bitconvert VT to VT) into its operand.
3350 def PUNPCKHQDQrr : PDI<0x6D, MRMSrcReg,
3351 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
3352 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3354 (v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)))]>;
3355 def PUNPCKHQDQrm : PDI<0x6D, MRMSrcMem,
3356 (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2),
3357 "punpckhqdq\t{$src2, $dst|$dst, $src2}",
3359 (v2i64 (X86Punpckhqdq VR128:$src1,
3360 (memopv2i64 addr:$src2))))]>;
3363 } // ExeDomain = SSEPackedInt
3365 //===---------------------------------------------------------------------===//
3366 // SSE2 - Packed Integer Extract and Insert
3367 //===---------------------------------------------------------------------===//
3369 let ExeDomain = SSEPackedInt in {
3370 multiclass sse2_pinsrw<bit Is2Addr = 1> {
3371 def rri : Ii8<0xC4, MRMSrcReg,
3372 (outs VR128:$dst), (ins VR128:$src1,
3373 GR32:$src2, i32i8imm:$src3),
3375 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3376 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3378 (X86pinsrw VR128:$src1, GR32:$src2, imm:$src3))]>;
3379 def rmi : Ii8<0xC4, MRMSrcMem,
3380 (outs VR128:$dst), (ins VR128:$src1,
3381 i16mem:$src2, i32i8imm:$src3),
3383 "pinsrw\t{$src3, $src2, $dst|$dst, $src2, $src3}",
3384 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
3386 (X86pinsrw VR128:$src1, (extloadi16 addr:$src2),
3391 let Predicates = [HasAVX] in
3392 def VPEXTRWri : Ii8<0xC5, MRMSrcReg,
3393 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3394 "vpextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3395 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3396 imm:$src2))]>, TB, OpSize, VEX;
3397 def PEXTRWri : PDIi8<0xC5, MRMSrcReg,
3398 (outs GR32:$dst), (ins VR128:$src1, i32i8imm:$src2),
3399 "pextrw\t{$src2, $src1, $dst|$dst, $src1, $src2}",
3400 [(set GR32:$dst, (X86pextrw (v8i16 VR128:$src1),
3404 let Predicates = [HasAVX] in {
3405 defm VPINSRW : sse2_pinsrw<0>, TB, OpSize, VEX_4V;
3406 def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst),
3407 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
3408 "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
3409 []>, TB, OpSize, VEX_4V;
3412 let Constraints = "$src1 = $dst" in
3413 defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>;
3415 } // ExeDomain = SSEPackedInt
3417 //===---------------------------------------------------------------------===//
3418 // SSE2 - Packed Mask Creation
3419 //===---------------------------------------------------------------------===//
3421 let ExeDomain = SSEPackedInt in {
3423 def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3424 "pmovmskb\t{$src, $dst|$dst, $src}",
3425 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX;
3426 def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src),
3427 "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX;
3428 def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src),
3429 "pmovmskb\t{$src, $dst|$dst, $src}",
3430 [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>;
3432 } // ExeDomain = SSEPackedInt
3434 //===---------------------------------------------------------------------===//
3435 // SSE2 - Conditional Store
3436 //===---------------------------------------------------------------------===//
3438 let ExeDomain = SSEPackedInt in {
3441 def VMASKMOVDQU : VPDI<0xF7, MRMSrcReg, (outs),
3442 (ins VR128:$src, VR128:$mask),
3443 "maskmovdqu\t{$mask, $src|$src, $mask}",
3444 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>, VEX;
3446 def VMASKMOVDQU64 : VPDI<0xF7, MRMSrcReg, (outs),
3447 (ins VR128:$src, VR128:$mask),
3448 "maskmovdqu\t{$mask, $src|$src, $mask}",
3449 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>, VEX;
3452 def MASKMOVDQU : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3453 "maskmovdqu\t{$mask, $src|$src, $mask}",
3454 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, EDI)]>;
3456 def MASKMOVDQU64 : PDI<0xF7, MRMSrcReg, (outs), (ins VR128:$src, VR128:$mask),
3457 "maskmovdqu\t{$mask, $src|$src, $mask}",
3458 [(int_x86_sse2_maskmov_dqu VR128:$src, VR128:$mask, RDI)]>;
3460 } // ExeDomain = SSEPackedInt
3462 //===---------------------------------------------------------------------===//
3463 // SSE2 - Move Doubleword
3464 //===---------------------------------------------------------------------===//
3466 //===---------------------------------------------------------------------===//
3467 // Move Int Doubleword to Packed Double Int
3469 def VMOVDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3470 "movd\t{$src, $dst|$dst, $src}",
3472 (v4i32 (scalar_to_vector GR32:$src)))]>, VEX;
3473 def VMOVDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3474 "movd\t{$src, $dst|$dst, $src}",
3476 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>,
3478 def VMOV64toPQIrr : VRPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3479 "mov{d|q}\t{$src, $dst|$dst, $src}",
3481 (v2i64 (scalar_to_vector GR64:$src)))]>, VEX;
3482 def VMOV64toSDrr : VRPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3483 "mov{d|q}\t{$src, $dst|$dst, $src}",
3484 [(set FR64:$dst, (bitconvert GR64:$src))]>, VEX;
3486 def MOVDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3487 "movd\t{$src, $dst|$dst, $src}",
3489 (v4i32 (scalar_to_vector GR32:$src)))]>;
3490 def MOVDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3491 "movd\t{$src, $dst|$dst, $src}",
3493 (v4i32 (scalar_to_vector (loadi32 addr:$src))))]>;
3494 def MOV64toPQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3495 "mov{d|q}\t{$src, $dst|$dst, $src}",
3497 (v2i64 (scalar_to_vector GR64:$src)))]>;
3498 def MOV64toSDrr : RPDI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
3499 "mov{d|q}\t{$src, $dst|$dst, $src}",
3500 [(set FR64:$dst, (bitconvert GR64:$src))]>;
3502 //===---------------------------------------------------------------------===//
3503 // Move Int Doubleword to Single Scalar
3505 def VMOVDI2SSrr : VPDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3506 "movd\t{$src, $dst|$dst, $src}",
3507 [(set FR32:$dst, (bitconvert GR32:$src))]>, VEX;
3509 def VMOVDI2SSrm : VPDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3510 "movd\t{$src, $dst|$dst, $src}",
3511 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
3513 def MOVDI2SSrr : PDI<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
3514 "movd\t{$src, $dst|$dst, $src}",
3515 [(set FR32:$dst, (bitconvert GR32:$src))]>;
3517 def MOVDI2SSrm : PDI<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
3518 "movd\t{$src, $dst|$dst, $src}",
3519 [(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>;
3521 //===---------------------------------------------------------------------===//
3522 // Move Packed Doubleword Int to Packed Double Int
3524 def VMOVPDI2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3525 "movd\t{$src, $dst|$dst, $src}",
3526 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3528 def VMOVPDI2DImr : VPDI<0x7E, MRMDestMem, (outs),
3529 (ins i32mem:$dst, VR128:$src),
3530 "movd\t{$src, $dst|$dst, $src}",
3531 [(store (i32 (vector_extract (v4i32 VR128:$src),
3532 (iPTR 0))), addr:$dst)]>, VEX;
3533 def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
3534 "movd\t{$src, $dst|$dst, $src}",
3535 [(set GR32:$dst, (vector_extract (v4i32 VR128:$src),
3537 def MOVPDI2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
3538 "movd\t{$src, $dst|$dst, $src}",
3539 [(store (i32 (vector_extract (v4i32 VR128:$src),
3540 (iPTR 0))), addr:$dst)]>;
3542 def MOVPQIto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3543 "mov{d|q}\t{$src, $dst|$dst, $src}",
3544 [(set GR64:$dst, (vector_extract (v2i64 VR128:$src),
3546 def MOV64toSDrm : S3SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
3547 "movq\t{$src, $dst|$dst, $src}",
3548 [(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>;
3550 def MOVSDto64rr : RPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
3551 "mov{d|q}\t{$src, $dst|$dst, $src}",
3552 [(set GR64:$dst, (bitconvert FR64:$src))]>;
3553 def MOVSDto64mr : RPDI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
3554 "movq\t{$src, $dst|$dst, $src}",
3555 [(store (i64 (bitconvert FR64:$src)), addr:$dst)]>;
3557 //===---------------------------------------------------------------------===//
3558 // Move Scalar Single to Double Int
3560 def VMOVSS2DIrr : VPDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3561 "movd\t{$src, $dst|$dst, $src}",
3562 [(set GR32:$dst, (bitconvert FR32:$src))]>, VEX;
3563 def VMOVSS2DImr : VPDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3564 "movd\t{$src, $dst|$dst, $src}",
3565 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>, VEX;
3566 def MOVSS2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
3567 "movd\t{$src, $dst|$dst, $src}",
3568 [(set GR32:$dst, (bitconvert FR32:$src))]>;
3569 def MOVSS2DImr : PDI<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
3570 "movd\t{$src, $dst|$dst, $src}",
3571 [(store (i32 (bitconvert FR32:$src)), addr:$dst)]>;
3573 //===---------------------------------------------------------------------===//
3574 // Patterns and instructions to describe movd/movq to XMM register zero-extends
3576 let AddedComplexity = 15 in {
3577 def VMOVZDI2PDIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3578 "movd\t{$src, $dst|$dst, $src}",
3579 [(set VR128:$dst, (v4i32 (X86vzmovl
3580 (v4i32 (scalar_to_vector GR32:$src)))))]>,
3582 def VMOVZQI2PQIrr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3583 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3584 [(set VR128:$dst, (v2i64 (X86vzmovl
3585 (v2i64 (scalar_to_vector GR64:$src)))))]>,
3588 let AddedComplexity = 15 in {
3589 def MOVZDI2PDIrr : PDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
3590 "movd\t{$src, $dst|$dst, $src}",
3591 [(set VR128:$dst, (v4i32 (X86vzmovl
3592 (v4i32 (scalar_to_vector GR32:$src)))))]>;
3593 def MOVZQI2PQIrr : RPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3594 "mov{d|q}\t{$src, $dst|$dst, $src}", // X86-64 only
3595 [(set VR128:$dst, (v2i64 (X86vzmovl
3596 (v2i64 (scalar_to_vector GR64:$src)))))]>;
3599 let AddedComplexity = 20 in {
3600 def VMOVZDI2PDIrm : VPDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3601 "movd\t{$src, $dst|$dst, $src}",
3603 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3604 (loadi32 addr:$src))))))]>,
3606 def MOVZDI2PDIrm : PDI<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
3607 "movd\t{$src, $dst|$dst, $src}",
3609 (v4i32 (X86vzmovl (v4i32 (scalar_to_vector
3610 (loadi32 addr:$src))))))]>;
3612 def : Pat<(v4i32 (X86vzmovl (loadv4i32 addr:$src))),
3613 (MOVZDI2PDIrm addr:$src)>;
3614 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
3615 (MOVZDI2PDIrm addr:$src)>;
3616 def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
3617 (MOVZDI2PDIrm addr:$src)>;
3620 // AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
3621 // Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
3622 def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
3623 (v4i32 (scalar_to_vector GR32:$src)), (i32 0)))),
3624 (SUBREG_TO_REG (i32 0), (VMOVZDI2PDIrr GR32:$src), sub_xmm)>;
3625 def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
3626 (v2i64 (scalar_to_vector GR64:$src)), (i32 0)))),
3627 (SUBREG_TO_REG (i64 0), (VMOVZQI2PQIrr GR64:$src), sub_xmm)>;
3629 // These are the correct encodings of the instructions so that we know how to
3630 // read correct assembly, even though we continue to emit the wrong ones for
3631 // compatibility with Darwin's buggy assembler.
3632 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3633 (MOV64toPQIrr VR128:$dst, GR64:$src), 0>;
3634 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3635 (MOV64toSDrr FR64:$dst, GR64:$src), 0>;
3636 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3637 (MOVPQIto64rr GR64:$dst, VR128:$src), 0>;
3638 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3639 (MOVSDto64rr GR64:$dst, FR64:$src), 0>;
3640 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3641 (VMOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3642 def : InstAlias<"movq\t{$src, $dst|$dst, $src}",
3643 (MOVZQI2PQIrr VR128:$dst, GR64:$src), 0>;
3645 //===---------------------------------------------------------------------===//
3646 // SSE2 - Move Quadword
3647 //===---------------------------------------------------------------------===//
3649 //===---------------------------------------------------------------------===//
3650 // Move Quadword Int to Packed Quadword Int
3652 def VMOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3653 "vmovq\t{$src, $dst|$dst, $src}",
3655 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3656 VEX, Requires<[HasAVX]>;
3657 def MOVQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3658 "movq\t{$src, $dst|$dst, $src}",
3660 (v2i64 (scalar_to_vector (loadi64 addr:$src))))]>, XS,
3661 Requires<[HasSSE2]>; // SSE2 instruction with XS Prefix
3663 //===---------------------------------------------------------------------===//
3664 // Move Packed Quadword Int to Quadword Int
3666 def VMOVPQI2QImr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3667 "movq\t{$src, $dst|$dst, $src}",
3668 [(store (i64 (vector_extract (v2i64 VR128:$src),
3669 (iPTR 0))), addr:$dst)]>, VEX;
3670 def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3671 "movq\t{$src, $dst|$dst, $src}",
3672 [(store (i64 (vector_extract (v2i64 VR128:$src),
3673 (iPTR 0))), addr:$dst)]>;
3675 def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
3676 (f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
3678 //===---------------------------------------------------------------------===//
3679 // Store / copy lower 64-bits of a XMM register.
3681 def VMOVLQ128mr : VPDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3682 "movq\t{$src, $dst|$dst, $src}",
3683 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>, VEX;
3684 def MOVLQ128mr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
3685 "movq\t{$src, $dst|$dst, $src}",
3686 [(int_x86_sse2_storel_dq addr:$dst, VR128:$src)]>;
3688 let AddedComplexity = 20 in
3689 def VMOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3690 "vmovq\t{$src, $dst|$dst, $src}",
3692 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3693 (loadi64 addr:$src))))))]>,
3694 XS, VEX, Requires<[HasAVX]>;
3696 let AddedComplexity = 20 in {
3697 def MOVZQI2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
3698 "movq\t{$src, $dst|$dst, $src}",
3700 (v2i64 (X86vzmovl (v2i64 (scalar_to_vector
3701 (loadi64 addr:$src))))))]>,
3702 XS, Requires<[HasSSE2]>;
3704 def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
3705 (MOVZQI2PQIrm addr:$src)>;
3706 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4f32 addr:$src)))),
3707 (MOVZQI2PQIrm addr:$src)>;
3708 def : Pat<(v2i64 (X86vzload addr:$src)), (MOVZQI2PQIrm addr:$src)>;
3711 //===---------------------------------------------------------------------===//
3712 // Moving from XMM to XMM and clear upper 64 bits. Note, there is a bug in
3713 // IA32 document. movq xmm1, xmm2 does clear the high bits.
3715 let AddedComplexity = 15 in
3716 def VMOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3717 "vmovq\t{$src, $dst|$dst, $src}",
3718 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3719 XS, VEX, Requires<[HasAVX]>;
3720 let AddedComplexity = 15 in
3721 def MOVZPQILo2PQIrr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3722 "movq\t{$src, $dst|$dst, $src}",
3723 [(set VR128:$dst, (v2i64 (X86vzmovl (v2i64 VR128:$src))))]>,
3724 XS, Requires<[HasSSE2]>;
3726 let AddedComplexity = 20 in
3727 def VMOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3728 "vmovq\t{$src, $dst|$dst, $src}",
3729 [(set VR128:$dst, (v2i64 (X86vzmovl
3730 (loadv2i64 addr:$src))))]>,
3731 XS, VEX, Requires<[HasAVX]>;
3732 let AddedComplexity = 20 in {
3733 def MOVZPQILo2PQIrm : I<0x7E, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
3734 "movq\t{$src, $dst|$dst, $src}",
3735 [(set VR128:$dst, (v2i64 (X86vzmovl
3736 (loadv2i64 addr:$src))))]>,
3737 XS, Requires<[HasSSE2]>;
3739 def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))),
3740 (MOVZPQILo2PQIrm addr:$src)>;
3743 // Instructions to match in the assembler
3744 def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src),
3745 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3746 def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3747 "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3748 // Recognize "movd" with GR64 destination, but encode as a "movq"
3749 def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src),
3750 "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W;
3752 // Instructions for the disassembler
3753 // xr = XMM register
3756 let Predicates = [HasAVX] in
3757 def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3758 "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS;
3759 def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3760 "movq\t{$src, $dst|$dst, $src}", []>, XS;
3762 //===---------------------------------------------------------------------===//
3763 // SSE2 - Misc Instructions
3764 //===---------------------------------------------------------------------===//
3767 def CLFLUSH : I<0xAE, MRM7m, (outs), (ins i8mem:$src),
3768 "clflush\t$src", [(int_x86_sse2_clflush addr:$src)]>,
3769 TB, Requires<[HasSSE2]>;
3771 // Load, store, and memory fence
3772 def LFENCE : I<0xAE, MRM_E8, (outs), (ins),
3773 "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>;
3774 def MFENCE : I<0xAE, MRM_F0, (outs), (ins),
3775 "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>;
3776 def : Pat<(X86LFence), (LFENCE)>;
3777 def : Pat<(X86MFence), (MFENCE)>;
3780 // Pause. This "instruction" is encoded as "rep; nop", so even though it
3781 // was introduced with SSE2, it's backward compatible.
3782 def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP;
3784 // Alias instructions that map zero vector to pxor / xorp* for sse.
3785 // We set canFoldAsLoad because this can be converted to a constant-pool
3786 // load of an all-ones value if folding it would be beneficial.
3787 // FIXME: Change encoding to pseudo! This is blocked right now by the x86
3788 // JIT implementation, it does not expand the instructions below like
3789 // X86MCInstLower does.
3790 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3791 isCodeGenOnly = 1, ExeDomain = SSEPackedInt in
3792 def V_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3793 [(set VR128:$dst, (v4i32 immAllOnesV))]>;
3794 let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
3795 isCodeGenOnly = 1, ExeDomain = SSEPackedInt, Predicates = [HasAVX] in
3796 def AVX_SETALLONES : PDI<0x76, MRMInitReg, (outs VR128:$dst), (ins), "",
3797 [(set VR128:$dst, (v4i32 immAllOnesV))]>, VEX_4V;
3799 //===---------------------------------------------------------------------===//
3800 // SSE3 - Conversion Instructions
3801 //===---------------------------------------------------------------------===//
3803 // Convert Packed Double FP to Packed DW Integers
3804 let Predicates = [HasAVX] in {
3805 // The assembler can recognize rr 256-bit instructions by seeing a ymm
3806 // register, but the same isn't true when using memory operands instead.
3807 // Provide other assembly rr and rm forms to address this explicitly.
3808 def VCVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3809 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3810 def VCVTPD2DQXrYr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3811 "vcvtpd2dq\t{$src, $dst|$dst, $src}", []>, VEX;
3814 def VCVTPD2DQXrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3815 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3816 def VCVTPD2DQXrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3817 "vcvtpd2dqx\t{$src, $dst|$dst, $src}", []>, VEX;
3820 def VCVTPD2DQYrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR256:$src),
3821 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX;
3822 def VCVTPD2DQYrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f256mem:$src),
3823 "vcvtpd2dqy\t{$src, $dst|$dst, $src}", []>, VEX, VEX_L;
3826 def CVTPD2DQrm : S3DI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3827 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3828 def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3829 "cvtpd2dq\t{$src, $dst|$dst, $src}", []>;
3831 def : Pat<(v4i32 (fp_to_sint (v4f64 VR256:$src))),
3832 (VCVTPD2DQYrr VR256:$src)>;
3833 def : Pat<(v4i32 (fp_to_sint (memopv4f64 addr:$src))),
3834 (VCVTPD2DQYrm addr:$src)>;
3836 // Convert Packed DW Integers to Packed Double FP
3837 let Predicates = [HasAVX] in {
3838 def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3839 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3840 def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3841 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3842 def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
3843 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3844 def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
3845 "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX;
3848 def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src),
3849 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3850 def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3851 "cvtdq2pd\t{$src, $dst|$dst, $src}", []>;
3853 // AVX 256-bit register conversion intrinsics
3854 def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src),
3855 (VCVTDQ2PDYrr VR128:$src)>;
3856 def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)),
3857 (VCVTDQ2PDYrm addr:$src)>;
3859 def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src),
3860 (VCVTPD2DQYrr VR256:$src)>;
3861 def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)),
3862 (VCVTPD2DQYrm addr:$src)>;
3864 def : Pat<(v4f64 (sint_to_fp (v4i32 VR128:$src))),
3865 (VCVTDQ2PDYrr VR128:$src)>;
3866 def : Pat<(v4f64 (sint_to_fp (memopv4i32 addr:$src))),
3867 (VCVTDQ2PDYrm addr:$src)>;
3869 //===---------------------------------------------------------------------===//
3870 // SSE3 - Replicate Single FP - MOVSHDUP and MOVSLDUP
3871 //===---------------------------------------------------------------------===//
3872 multiclass sse3_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
3873 ValueType vt, RegisterClass RC, PatFrag mem_frag,
3874 X86MemOperand x86memop> {
3875 def rr : S3SI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
3876 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3877 [(set RC:$dst, (vt (OpNode RC:$src)))]>;
3878 def rm : S3SI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
3879 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3880 [(set RC:$dst, (OpNode (mem_frag addr:$src)))]>;
3883 let Predicates = [HasAVX] in {
3884 defm VMOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3885 v4f32, VR128, memopv4f32, f128mem>, VEX;
3886 defm VMOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3887 v4f32, VR128, memopv4f32, f128mem>, VEX;
3888 defm VMOVSHDUPY : sse3_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
3889 v8f32, VR256, memopv8f32, f256mem>, VEX;
3890 defm VMOVSLDUPY : sse3_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
3891 v8f32, VR256, memopv8f32, f256mem>, VEX;
3893 defm MOVSHDUP : sse3_replicate_sfp<0x16, X86Movshdup, "movshdup", v4f32, VR128,
3894 memopv4f32, f128mem>;
3895 defm MOVSLDUP : sse3_replicate_sfp<0x12, X86Movsldup, "movsldup", v4f32, VR128,
3896 memopv4f32, f128mem>;
3898 let Predicates = [HasSSE3] in {
3899 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3900 (MOVSHDUPrr VR128:$src)>;
3901 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3902 (MOVSHDUPrm addr:$src)>;
3903 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3904 (MOVSLDUPrr VR128:$src)>;
3905 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3906 (MOVSLDUPrm addr:$src)>;
3909 let Predicates = [HasAVX] in {
3910 def : Pat<(v4i32 (X86Movshdup VR128:$src)),
3911 (VMOVSHDUPrr VR128:$src)>;
3912 def : Pat<(v4i32 (X86Movshdup (bc_v4i32 (memopv2i64 addr:$src)))),
3913 (VMOVSHDUPrm addr:$src)>;
3914 def : Pat<(v4i32 (X86Movsldup VR128:$src)),
3915 (VMOVSLDUPrr VR128:$src)>;
3916 def : Pat<(v4i32 (X86Movsldup (bc_v4i32 (memopv2i64 addr:$src)))),
3917 (VMOVSLDUPrm addr:$src)>;
3918 def : Pat<(v8i32 (X86Movshdup VR256:$src)),
3919 (VMOVSHDUPYrr VR256:$src)>;
3920 def : Pat<(v8i32 (X86Movshdup (bc_v8i32 (memopv4i64 addr:$src)))),
3921 (VMOVSHDUPYrm addr:$src)>;
3922 def : Pat<(v8i32 (X86Movsldup VR256:$src)),
3923 (VMOVSLDUPYrr VR256:$src)>;
3924 def : Pat<(v8i32 (X86Movsldup (bc_v8i32 (memopv4i64 addr:$src)))),
3925 (VMOVSLDUPYrm addr:$src)>;
3928 //===---------------------------------------------------------------------===//
3929 // SSE3 - Replicate Double FP - MOVDDUP
3930 //===---------------------------------------------------------------------===//
3932 multiclass sse3_replicate_dfp<string OpcodeStr> {
3933 def rr : S3DI<0x12, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
3934 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3935 [(set VR128:$dst,(v2f64 (movddup VR128:$src, (undef))))]>;
3936 def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
3937 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3939 (v2f64 (movddup (scalar_to_vector (loadf64 addr:$src)),
3943 // FIXME: Merge with above classe when there're patterns for the ymm version
3944 multiclass sse3_replicate_dfp_y<string OpcodeStr> {
3945 let Predicates = [HasAVX] in {
3946 def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src),
3947 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3949 def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src),
3950 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
3955 defm MOVDDUP : sse3_replicate_dfp<"movddup">;
3956 defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX;
3957 defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX;
3959 let Predicates = [HasSSE3] in {
3960 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3962 (MOVDDUPrm addr:$src)>;
3963 let AddedComplexity = 5 in {
3964 def : Pat<(movddup (memopv2f64 addr:$src), (undef)), (MOVDDUPrm addr:$src)>;
3965 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3966 (MOVDDUPrm addr:$src)>;
3967 def : Pat<(movddup (memopv2i64 addr:$src), (undef)), (MOVDDUPrm addr:$src)>;
3968 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3969 (MOVDDUPrm addr:$src)>;
3971 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
3972 (MOVDDUPrm addr:$src)>;
3973 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
3974 (MOVDDUPrm addr:$src)>;
3975 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
3976 (MOVDDUPrm addr:$src)>;
3977 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
3978 (MOVDDUPrm addr:$src)>;
3979 def : Pat<(X86Movddup (bc_v2f64
3980 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
3981 (MOVDDUPrm addr:$src)>;
3984 let Predicates = [HasAVX] in {
3985 def : Pat<(movddup (bc_v2f64 (v2i64 (scalar_to_vector (loadi64 addr:$src)))),
3987 (VMOVDDUPrm addr:$src)>;
3988 let AddedComplexity = 5 in {
3989 def : Pat<(movddup (memopv2f64 addr:$src), (undef)), (VMOVDDUPrm addr:$src)>;
3990 def : Pat<(movddup (bc_v4f32 (memopv2f64 addr:$src)), (undef)),
3991 (VMOVDDUPrm addr:$src)>;
3992 def : Pat<(movddup (memopv2i64 addr:$src), (undef)), (VMOVDDUPrm addr:$src)>;
3993 def : Pat<(movddup (bc_v4i32 (memopv2i64 addr:$src)), (undef)),
3994 (VMOVDDUPrm addr:$src)>;
3996 def : Pat<(X86Movddup (memopv2f64 addr:$src)),
3997 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
3998 def : Pat<(X86Movddup (bc_v2f64 (memopv4f32 addr:$src))),
3999 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4000 def : Pat<(X86Movddup (bc_v2f64 (memopv2i64 addr:$src))),
4001 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4002 def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))),
4003 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4004 def : Pat<(X86Movddup (bc_v2f64
4005 (v2i64 (scalar_to_vector (loadi64 addr:$src))))),
4006 (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>;
4009 def : Pat<(X86Movddup (memopv4f64 addr:$src)),
4010 (VMOVDDUPYrm addr:$src)>;
4011 def : Pat<(X86Movddup (memopv4i64 addr:$src)),
4012 (VMOVDDUPYrm addr:$src)>;
4013 def : Pat<(X86Movddup (v4f64 (scalar_to_vector (loadf64 addr:$src)))),
4014 (VMOVDDUPYrm addr:$src)>;
4015 def : Pat<(X86Movddup (v4i64 (scalar_to_vector (loadi64 addr:$src)))),
4016 (VMOVDDUPYrm addr:$src)>;
4017 def : Pat<(X86Movddup (v4f64 VR256:$src)),
4018 (VMOVDDUPYrr VR256:$src)>;
4019 def : Pat<(X86Movddup (v4i64 VR256:$src)),
4020 (VMOVDDUPYrr VR256:$src)>;
4023 //===---------------------------------------------------------------------===//
4024 // SSE3 - Move Unaligned Integer
4025 //===---------------------------------------------------------------------===//
4027 let Predicates = [HasAVX] in {
4028 def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4029 "vlddqu\t{$src, $dst|$dst, $src}",
4030 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX;
4031 def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src),
4032 "vlddqu\t{$src, $dst|$dst, $src}",
4033 [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX;
4035 def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
4036 "lddqu\t{$src, $dst|$dst, $src}",
4037 [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>;
4039 //===---------------------------------------------------------------------===//
4040 // SSE3 - Arithmetic
4041 //===---------------------------------------------------------------------===//
4043 multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC,
4044 X86MemOperand x86memop, bit Is2Addr = 1> {
4045 def rr : I<0xD0, MRMSrcReg,
4046 (outs RC:$dst), (ins RC:$src1, RC:$src2),
4048 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4049 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4050 [(set RC:$dst, (Int RC:$src1, RC:$src2))]>;
4051 def rm : I<0xD0, MRMSrcMem,
4052 (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4054 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4055 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4056 [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>;
4059 let Predicates = [HasAVX],
4060 ExeDomain = SSEPackedDouble in {
4061 defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128,
4062 f128mem, 0>, TB, XD, VEX_4V;
4063 defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128,
4064 f128mem, 0>, TB, OpSize, VEX_4V;
4065 defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256,
4066 f256mem, 0>, TB, XD, VEX_4V;
4067 defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256,
4068 f256mem, 0>, TB, OpSize, VEX_4V;
4070 let Constraints = "$src1 = $dst", Predicates = [HasSSE3],
4071 ExeDomain = SSEPackedDouble in {
4072 defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128,
4074 defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128,
4075 f128mem>, TB, OpSize;
4078 //===---------------------------------------------------------------------===//
4079 // SSE3 Instructions
4080 //===---------------------------------------------------------------------===//
4083 multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4084 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
4085 def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4087 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4088 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4089 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
4091 def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4093 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4094 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4095 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
4097 multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC,
4098 X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> {
4099 def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
4101 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4102 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4103 [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>;
4105 def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2),
4107 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4108 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4109 [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>;
4112 let Predicates = [HasAVX] in {
4113 defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem,
4114 int_x86_sse3_hadd_ps, 0>, VEX_4V;
4115 defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem,
4116 int_x86_sse3_hadd_pd, 0>, VEX_4V;
4117 defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem,
4118 int_x86_sse3_hsub_ps, 0>, VEX_4V;
4119 defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem,
4120 int_x86_sse3_hsub_pd, 0>, VEX_4V;
4121 defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem,
4122 int_x86_avx_hadd_ps_256, 0>, VEX_4V;
4123 defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem,
4124 int_x86_avx_hadd_pd_256, 0>, VEX_4V;
4125 defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem,
4126 int_x86_avx_hsub_ps_256, 0>, VEX_4V;
4127 defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem,
4128 int_x86_avx_hsub_pd_256, 0>, VEX_4V;
4131 let Constraints = "$src1 = $dst" in {
4132 defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem,
4133 int_x86_sse3_hadd_ps>;
4134 defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem,
4135 int_x86_sse3_hadd_pd>;
4136 defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem,
4137 int_x86_sse3_hsub_ps>;
4138 defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem,
4139 int_x86_sse3_hsub_pd>;
4142 //===---------------------------------------------------------------------===//
4143 // SSSE3 - Packed Absolute Instructions
4144 //===---------------------------------------------------------------------===//
4147 /// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}.
4148 multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr,
4149 PatFrag mem_frag128, Intrinsic IntId128> {
4150 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4152 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4153 [(set VR128:$dst, (IntId128 VR128:$src))]>,
4156 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4158 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4161 (bitconvert (mem_frag128 addr:$src))))]>, OpSize;
4164 let Predicates = [HasAVX] in {
4165 defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8,
4166 int_x86_ssse3_pabs_b_128>, VEX;
4167 defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16,
4168 int_x86_ssse3_pabs_w_128>, VEX;
4169 defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32,
4170 int_x86_ssse3_pabs_d_128>, VEX;
4173 defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8,
4174 int_x86_ssse3_pabs_b_128>;
4175 defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16,
4176 int_x86_ssse3_pabs_w_128>;
4177 defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32,
4178 int_x86_ssse3_pabs_d_128>;
4180 //===---------------------------------------------------------------------===//
4181 // SSSE3 - Packed Binary Operator Instructions
4182 //===---------------------------------------------------------------------===//
4184 /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}.
4185 multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr,
4186 PatFrag mem_frag128, Intrinsic IntId128,
4188 let isCommutable = 1 in
4189 def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst),
4190 (ins VR128:$src1, VR128:$src2),
4192 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4193 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4194 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
4196 def rm128 : SS38I<opc, MRMSrcMem, (outs VR128:$dst),
4197 (ins VR128:$src1, i128mem:$src2),
4199 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
4200 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
4202 (IntId128 VR128:$src1,
4203 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
4206 let Predicates = [HasAVX] in {
4207 let isCommutable = 0 in {
4208 defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16,
4209 int_x86_ssse3_phadd_w_128, 0>, VEX_4V;
4210 defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32,
4211 int_x86_ssse3_phadd_d_128, 0>, VEX_4V;
4212 defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16,
4213 int_x86_ssse3_phadd_sw_128, 0>, VEX_4V;
4214 defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16,
4215 int_x86_ssse3_phsub_w_128, 0>, VEX_4V;
4216 defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32,
4217 int_x86_ssse3_phsub_d_128, 0>, VEX_4V;
4218 defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16,
4219 int_x86_ssse3_phsub_sw_128, 0>, VEX_4V;
4220 defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8,
4221 int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V;
4222 defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8,
4223 int_x86_ssse3_pshuf_b_128, 0>, VEX_4V;
4224 defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8,
4225 int_x86_ssse3_psign_b_128, 0>, VEX_4V;
4226 defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16,
4227 int_x86_ssse3_psign_w_128, 0>, VEX_4V;
4228 defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32,
4229 int_x86_ssse3_psign_d_128, 0>, VEX_4V;
4231 defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16,
4232 int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V;
4235 // None of these have i8 immediate fields.
4236 let ImmT = NoImm, Constraints = "$src1 = $dst" in {
4237 let isCommutable = 0 in {
4238 defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16,
4239 int_x86_ssse3_phadd_w_128>;
4240 defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32,
4241 int_x86_ssse3_phadd_d_128>;
4242 defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16,
4243 int_x86_ssse3_phadd_sw_128>;
4244 defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16,
4245 int_x86_ssse3_phsub_w_128>;
4246 defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32,
4247 int_x86_ssse3_phsub_d_128>;
4248 defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16,
4249 int_x86_ssse3_phsub_sw_128>;
4250 defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8,
4251 int_x86_ssse3_pmadd_ub_sw_128>;
4252 defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv16i8,
4253 int_x86_ssse3_pshuf_b_128>;
4254 defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8,
4255 int_x86_ssse3_psign_b_128>;
4256 defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16,
4257 int_x86_ssse3_psign_w_128>;
4258 defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32,
4259 int_x86_ssse3_psign_d_128>;
4261 defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16,
4262 int_x86_ssse3_pmul_hr_sw_128>;
4265 def : Pat<(X86pshufb VR128:$src, VR128:$mask),
4266 (PSHUFBrr128 VR128:$src, VR128:$mask)>, Requires<[HasSSSE3]>;
4267 def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))),
4268 (PSHUFBrm128 VR128:$src, addr:$mask)>, Requires<[HasSSSE3]>;
4270 def : Pat<(X86psignb VR128:$src1, VR128:$src2),
4271 (PSIGNBrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4272 def : Pat<(X86psignw VR128:$src1, VR128:$src2),
4273 (PSIGNWrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4274 def : Pat<(X86psignd VR128:$src1, VR128:$src2),
4275 (PSIGNDrr128 VR128:$src1, VR128:$src2)>, Requires<[HasSSSE3]>;
4277 //===---------------------------------------------------------------------===//
4278 // SSSE3 - Packed Align Instruction Patterns
4279 //===---------------------------------------------------------------------===//
4281 multiclass ssse3_palign<string asm, bit Is2Addr = 1> {
4282 def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst),
4283 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
4285 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4287 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4289 def R128rm : SS3AI<0x0F, MRMSrcMem, (outs VR128:$dst),
4290 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
4292 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4294 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4298 let Predicates = [HasAVX] in
4299 defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V;
4300 let Constraints = "$src1 = $dst", Predicates = [HasSSSE3] in
4301 defm PALIGN : ssse3_palign<"palignr">;
4303 let Predicates = [HasSSSE3] in {
4304 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4305 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4306 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4307 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4308 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4309 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4310 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4311 (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4314 let Predicates = [HasAVX] in {
4315 def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4316 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4317 def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4318 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4319 def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4320 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4321 def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))),
4322 (VPALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>;
4325 //===---------------------------------------------------------------------===//
4326 // SSSE3 Misc Instructions
4327 //===---------------------------------------------------------------------===//
4329 // Thread synchronization
4330 let usesCustomInserter = 1 in {
4331 def MONITOR : PseudoI<(outs), (ins i32mem:$src1, GR32:$src2, GR32:$src3),
4332 [(int_x86_sse3_monitor addr:$src1, GR32:$src2, GR32:$src3)]>;
4333 def MWAIT : PseudoI<(outs), (ins GR32:$src1, GR32:$src2),
4334 [(int_x86_sse3_mwait GR32:$src1, GR32:$src2)]>;
4337 let Uses = [EAX, ECX, EDX] in
4338 def MONITORrrr : I<0x01, MRM_C8, (outs), (ins), "monitor", []>, TB,
4339 Requires<[HasSSE3]>;
4340 let Uses = [ECX, EAX] in
4341 def MWAITrr : I<0x01, MRM_C9, (outs), (ins), "mwait", []>, TB,
4342 Requires<[HasSSE3]>;
4344 def : InstAlias<"mwait %eax, %ecx", (MWAITrr)>, Requires<[In32BitMode]>;
4345 def : InstAlias<"mwait %rax, %rcx", (MWAITrr)>, Requires<[In64BitMode]>;
4347 def : InstAlias<"monitor %eax, %ecx, %edx", (MONITORrrr)>,
4348 Requires<[In32BitMode]>;
4349 def : InstAlias<"monitor %rax, %rcx, %rdx", (MONITORrrr)>,
4350 Requires<[In64BitMode]>;
4352 // extload f32 -> f64. This matches load+fextend because we have a hack in
4353 // the isel (PreprocessForFPConvert) that can introduce loads after dag
4355 // Since these loads aren't folded into the fextend, we have to match it
4357 let Predicates = [HasSSE2] in
4358 def : Pat<(fextend (loadf32 addr:$src)),
4359 (CVTSS2SDrm addr:$src)>;
4361 // Move scalar to XMM zero-extended
4362 // movd to XMM register zero-extends
4363 let AddedComplexity = 15 in {
4364 // Zeroing a VR128 then do a MOVS{S|D} to the lower bits.
4365 def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64:$src)))),
4366 (MOVSDrr (v2f64 (V_SET0PS)), FR64:$src)>;
4367 def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
4368 (MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
4369 def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
4370 (MOVSSrr (v4f32 (V_SET0PS)),
4371 (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
4372 def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
4373 (MOVSSrr (v4i32 (V_SET0PI)),
4374 (EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
4377 // Splat v2f64 / v2i64
4378 let AddedComplexity = 10 in {
4379 def : Pat<(splat_lo (v2i64 VR128:$src), (undef)),
4380 (PUNPCKLQDQrr VR128:$src, VR128:$src)>, Requires<[HasSSE2]>;
4383 let AddedComplexity = 20 in {
4384 // vector_shuffle v1, (load v2) <4, 5, 2, 3> using MOVLPS
4385 def : Pat<(v4f32 (movlp VR128:$src1, (load addr:$src2))),
4386 (MOVLPSrm VR128:$src1, addr:$src2)>;
4387 def : Pat<(v2f64 (movlp VR128:$src1, (load addr:$src2))),
4388 (MOVLPDrm VR128:$src1, addr:$src2)>;
4389 def : Pat<(v4i32 (movlp VR128:$src1, (load addr:$src2))),
4390 (MOVLPSrm VR128:$src1, addr:$src2)>;
4391 def : Pat<(v2i64 (movlp VR128:$src1, (load addr:$src2))),
4392 (MOVLPDrm VR128:$src1, addr:$src2)>;
4395 // (store (vector_shuffle (load addr), v2, <4, 5, 2, 3>), addr) using MOVLPS
4396 def : Pat<(store (v4f32 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4397 (MOVLPSmr addr:$src1, VR128:$src2)>;
4398 def : Pat<(store (v2f64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4399 (MOVLPDmr addr:$src1, VR128:$src2)>;
4400 def : Pat<(store (v4i32 (movlp (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)),
4402 (MOVLPSmr addr:$src1, VR128:$src2)>;
4403 def : Pat<(store (v2i64 (movlp (load addr:$src1), VR128:$src2)), addr:$src1),
4404 (MOVLPDmr addr:$src1, VR128:$src2)>;
4406 let AddedComplexity = 15 in {
4407 // Setting the lowest element in the vector.
4408 def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
4409 (MOVSSrr (v4i32 VR128:$src1),
4410 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
4411 def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
4412 (MOVSDrr (v2i64 VR128:$src1),
4413 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
4415 // vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
4416 def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
4417 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4418 Requires<[HasSSE2]>;
4419 def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
4420 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
4421 Requires<[HasSSE2]>;
4424 // Set lowest element and zero upper elements.
4425 def : Pat<(v2f64 (X86vzmovl (v2f64 VR128:$src))),
4426 (MOVZPQILo2PQIrr VR128:$src)>, Requires<[HasSSE2]>;
4428 // Use movaps / movups for SSE integer load / store (one byte shorter).
4429 // The instructions selected below are then converted to MOVDQA/MOVDQU
4430 // during the SSE domain pass.
4431 let Predicates = [HasSSE1] in {
4432 def : Pat<(alignedloadv4i32 addr:$src),
4433 (MOVAPSrm addr:$src)>;
4434 def : Pat<(loadv4i32 addr:$src),
4435 (MOVUPSrm addr:$src)>;
4436 def : Pat<(alignedloadv2i64 addr:$src),
4437 (MOVAPSrm addr:$src)>;
4438 def : Pat<(loadv2i64 addr:$src),
4439 (MOVUPSrm addr:$src)>;
4441 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4442 (MOVAPSmr addr:$dst, VR128:$src)>;
4443 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4444 (MOVAPSmr addr:$dst, VR128:$src)>;
4445 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4446 (MOVAPSmr addr:$dst, VR128:$src)>;
4447 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4448 (MOVAPSmr addr:$dst, VR128:$src)>;
4449 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4450 (MOVUPSmr addr:$dst, VR128:$src)>;
4451 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4452 (MOVUPSmr addr:$dst, VR128:$src)>;
4453 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4454 (MOVUPSmr addr:$dst, VR128:$src)>;
4455 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4456 (MOVUPSmr addr:$dst, VR128:$src)>;
4459 // Use vmovaps/vmovups for AVX integer load/store.
4460 let Predicates = [HasAVX] in {
4461 // 128-bit load/store
4462 def : Pat<(alignedloadv4i32 addr:$src),
4463 (VMOVAPSrm addr:$src)>;
4464 def : Pat<(loadv4i32 addr:$src),
4465 (VMOVUPSrm addr:$src)>;
4466 def : Pat<(alignedloadv2i64 addr:$src),
4467 (VMOVAPSrm addr:$src)>;
4468 def : Pat<(loadv2i64 addr:$src),
4469 (VMOVUPSrm addr:$src)>;
4471 def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst),
4472 (VMOVAPSmr addr:$dst, VR128:$src)>;
4473 def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst),
4474 (VMOVAPSmr addr:$dst, VR128:$src)>;
4475 def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst),
4476 (VMOVAPSmr addr:$dst, VR128:$src)>;
4477 def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst),
4478 (VMOVAPSmr addr:$dst, VR128:$src)>;
4479 def : Pat<(store (v2i64 VR128:$src), addr:$dst),
4480 (VMOVUPSmr addr:$dst, VR128:$src)>;
4481 def : Pat<(store (v4i32 VR128:$src), addr:$dst),
4482 (VMOVUPSmr addr:$dst, VR128:$src)>;
4483 def : Pat<(store (v8i16 VR128:$src), addr:$dst),
4484 (VMOVUPSmr addr:$dst, VR128:$src)>;
4485 def : Pat<(store (v16i8 VR128:$src), addr:$dst),
4486 (VMOVUPSmr addr:$dst, VR128:$src)>;
4488 // 256-bit load/store
4489 def : Pat<(alignedloadv4i64 addr:$src),
4490 (VMOVAPSYrm addr:$src)>;
4491 def : Pat<(loadv4i64 addr:$src),
4492 (VMOVUPSYrm addr:$src)>;
4493 def : Pat<(alignedloadv8i32 addr:$src),
4494 (VMOVAPSYrm addr:$src)>;
4495 def : Pat<(loadv8i32 addr:$src),
4496 (VMOVUPSYrm addr:$src)>;
4497 def : Pat<(alignedstore (v4i64 VR256:$src), addr:$dst),
4498 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4499 def : Pat<(alignedstore (v8i32 VR256:$src), addr:$dst),
4500 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4501 def : Pat<(alignedstore (v16i16 VR256:$src), addr:$dst),
4502 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4503 def : Pat<(alignedstore (v32i8 VR256:$src), addr:$dst),
4504 (VMOVAPSYmr addr:$dst, VR256:$src)>;
4505 def : Pat<(store (v4i64 VR256:$src), addr:$dst),
4506 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4507 def : Pat<(store (v8i32 VR256:$src), addr:$dst),
4508 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4509 def : Pat<(store (v16i16 VR256:$src), addr:$dst),
4510 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4511 def : Pat<(store (v32i8 VR256:$src), addr:$dst),
4512 (VMOVUPSYmr addr:$dst, VR256:$src)>;
4515 //===----------------------------------------------------------------------===//
4516 // SSE4.1 - Packed Move with Sign/Zero Extend
4517 //===----------------------------------------------------------------------===//
4519 multiclass SS41I_binop_rm_int8<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4520 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4521 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4522 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4524 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
4525 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4527 (IntId (bitconvert (v2i64 (scalar_to_vector (loadi64 addr:$src))))))]>,
4531 let Predicates = [HasAVX] in {
4532 defm VPMOVSXBW : SS41I_binop_rm_int8<0x20, "vpmovsxbw", int_x86_sse41_pmovsxbw>,
4534 defm VPMOVSXWD : SS41I_binop_rm_int8<0x23, "vpmovsxwd", int_x86_sse41_pmovsxwd>,
4536 defm VPMOVSXDQ : SS41I_binop_rm_int8<0x25, "vpmovsxdq", int_x86_sse41_pmovsxdq>,
4538 defm VPMOVZXBW : SS41I_binop_rm_int8<0x30, "vpmovzxbw", int_x86_sse41_pmovzxbw>,
4540 defm VPMOVZXWD : SS41I_binop_rm_int8<0x33, "vpmovzxwd", int_x86_sse41_pmovzxwd>,
4542 defm VPMOVZXDQ : SS41I_binop_rm_int8<0x35, "vpmovzxdq", int_x86_sse41_pmovzxdq>,
4546 defm PMOVSXBW : SS41I_binop_rm_int8<0x20, "pmovsxbw", int_x86_sse41_pmovsxbw>;
4547 defm PMOVSXWD : SS41I_binop_rm_int8<0x23, "pmovsxwd", int_x86_sse41_pmovsxwd>;
4548 defm PMOVSXDQ : SS41I_binop_rm_int8<0x25, "pmovsxdq", int_x86_sse41_pmovsxdq>;
4549 defm PMOVZXBW : SS41I_binop_rm_int8<0x30, "pmovzxbw", int_x86_sse41_pmovzxbw>;
4550 defm PMOVZXWD : SS41I_binop_rm_int8<0x33, "pmovzxwd", int_x86_sse41_pmovzxwd>;
4551 defm PMOVZXDQ : SS41I_binop_rm_int8<0x35, "pmovzxdq", int_x86_sse41_pmovzxdq>;
4553 // Common patterns involving scalar load.
4554 def : Pat<(int_x86_sse41_pmovsxbw (vzmovl_v2i64 addr:$src)),
4555 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4556 def : Pat<(int_x86_sse41_pmovsxbw (vzload_v2i64 addr:$src)),
4557 (PMOVSXBWrm addr:$src)>, Requires<[HasSSE41]>;
4559 def : Pat<(int_x86_sse41_pmovsxwd (vzmovl_v2i64 addr:$src)),
4560 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4561 def : Pat<(int_x86_sse41_pmovsxwd (vzload_v2i64 addr:$src)),
4562 (PMOVSXWDrm addr:$src)>, Requires<[HasSSE41]>;
4564 def : Pat<(int_x86_sse41_pmovsxdq (vzmovl_v2i64 addr:$src)),
4565 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4566 def : Pat<(int_x86_sse41_pmovsxdq (vzload_v2i64 addr:$src)),
4567 (PMOVSXDQrm addr:$src)>, Requires<[HasSSE41]>;
4569 def : Pat<(int_x86_sse41_pmovzxbw (vzmovl_v2i64 addr:$src)),
4570 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4571 def : Pat<(int_x86_sse41_pmovzxbw (vzload_v2i64 addr:$src)),
4572 (PMOVZXBWrm addr:$src)>, Requires<[HasSSE41]>;
4574 def : Pat<(int_x86_sse41_pmovzxwd (vzmovl_v2i64 addr:$src)),
4575 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4576 def : Pat<(int_x86_sse41_pmovzxwd (vzload_v2i64 addr:$src)),
4577 (PMOVZXWDrm addr:$src)>, Requires<[HasSSE41]>;
4579 def : Pat<(int_x86_sse41_pmovzxdq (vzmovl_v2i64 addr:$src)),
4580 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4581 def : Pat<(int_x86_sse41_pmovzxdq (vzload_v2i64 addr:$src)),
4582 (PMOVZXDQrm addr:$src)>, Requires<[HasSSE41]>;
4585 multiclass SS41I_binop_rm_int4<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4586 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4587 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4588 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4590 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i32mem:$src),
4591 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4593 (IntId (bitconvert (v4i32 (scalar_to_vector (loadi32 addr:$src))))))]>,
4597 let Predicates = [HasAVX] in {
4598 defm VPMOVSXBD : SS41I_binop_rm_int4<0x21, "vpmovsxbd", int_x86_sse41_pmovsxbd>,
4600 defm VPMOVSXWQ : SS41I_binop_rm_int4<0x24, "vpmovsxwq", int_x86_sse41_pmovsxwq>,
4602 defm VPMOVZXBD : SS41I_binop_rm_int4<0x31, "vpmovzxbd", int_x86_sse41_pmovzxbd>,
4604 defm VPMOVZXWQ : SS41I_binop_rm_int4<0x34, "vpmovzxwq", int_x86_sse41_pmovzxwq>,
4608 defm PMOVSXBD : SS41I_binop_rm_int4<0x21, "pmovsxbd", int_x86_sse41_pmovsxbd>;
4609 defm PMOVSXWQ : SS41I_binop_rm_int4<0x24, "pmovsxwq", int_x86_sse41_pmovsxwq>;
4610 defm PMOVZXBD : SS41I_binop_rm_int4<0x31, "pmovzxbd", int_x86_sse41_pmovzxbd>;
4611 defm PMOVZXWQ : SS41I_binop_rm_int4<0x34, "pmovzxwq", int_x86_sse41_pmovzxwq>;
4613 // Common patterns involving scalar load
4614 def : Pat<(int_x86_sse41_pmovsxbd (vzmovl_v4i32 addr:$src)),
4615 (PMOVSXBDrm addr:$src)>, Requires<[HasSSE41]>;
4616 def : Pat<(int_x86_sse41_pmovsxwq (vzmovl_v4i32 addr:$src)),
4617 (PMOVSXWQrm addr:$src)>, Requires<[HasSSE41]>;
4619 def : Pat<(int_x86_sse41_pmovzxbd (vzmovl_v4i32 addr:$src)),
4620 (PMOVZXBDrm addr:$src)>, Requires<[HasSSE41]>;
4621 def : Pat<(int_x86_sse41_pmovzxwq (vzmovl_v4i32 addr:$src)),
4622 (PMOVZXWQrm addr:$src)>, Requires<[HasSSE41]>;
4625 multiclass SS41I_binop_rm_int2<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
4626 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
4627 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4628 [(set VR128:$dst, (IntId VR128:$src))]>, OpSize;
4630 // Expecting a i16 load any extended to i32 value.
4631 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst), (ins i16mem:$src),
4632 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
4633 [(set VR128:$dst, (IntId (bitconvert
4634 (v4i32 (scalar_to_vector (loadi16_anyext addr:$src))))))]>,
4638 let Predicates = [HasAVX] in {
4639 defm VPMOVSXBQ : SS41I_binop_rm_int2<0x22, "vpmovsxbq", int_x86_sse41_pmovsxbq>,
4641 defm VPMOVZXBQ : SS41I_binop_rm_int2<0x32, "vpmovzxbq", int_x86_sse41_pmovzxbq>,
4644 defm PMOVSXBQ : SS41I_binop_rm_int2<0x22, "pmovsxbq", int_x86_sse41_pmovsxbq>;
4645 defm PMOVZXBQ : SS41I_binop_rm_int2<0x32, "pmovzxbq", int_x86_sse41_pmovzxbq>;
4647 // Common patterns involving scalar load
4648 def : Pat<(int_x86_sse41_pmovsxbq
4649 (bitconvert (v4i32 (X86vzmovl
4650 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4651 (PMOVSXBQrm addr:$src)>, Requires<[HasSSE41]>;
4653 def : Pat<(int_x86_sse41_pmovzxbq
4654 (bitconvert (v4i32 (X86vzmovl
4655 (v4i32 (scalar_to_vector (loadi32 addr:$src))))))),
4656 (PMOVZXBQrm addr:$src)>, Requires<[HasSSE41]>;
4658 //===----------------------------------------------------------------------===//
4659 // SSE4.1 - Extract Instructions
4660 //===----------------------------------------------------------------------===//
4662 /// SS41I_binop_ext8 - SSE 4.1 extract 8 bits to 32 bit reg or 8 bit mem
4663 multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> {
4664 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4665 (ins VR128:$src1, i32i8imm:$src2),
4666 !strconcat(OpcodeStr,
4667 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4668 [(set GR32:$dst, (X86pextrb (v16i8 VR128:$src1), imm:$src2))]>,
4670 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4671 (ins i8mem:$dst, VR128:$src1, i32i8imm:$src2),
4672 !strconcat(OpcodeStr,
4673 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4676 // There's an AssertZext in the way of writing the store pattern
4677 // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4680 let Predicates = [HasAVX] in {
4681 defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX;
4682 def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst),
4683 (ins VR128:$src1, i32i8imm:$src2),
4684 "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX;
4687 defm PEXTRB : SS41I_extract8<0x14, "pextrb">;
4690 /// SS41I_extract16 - SSE 4.1 extract 16 bits to memory destination
4691 multiclass SS41I_extract16<bits<8> opc, string OpcodeStr> {
4692 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4693 (ins i16mem:$dst, VR128:$src1, i32i8imm:$src2),
4694 !strconcat(OpcodeStr,
4695 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4698 // There's an AssertZext in the way of writing the store pattern
4699 // (store (i16 (trunc (X86pextrw (v16i8 VR128:$src1), imm:$src2))), addr:$dst)
4702 let Predicates = [HasAVX] in
4703 defm VPEXTRW : SS41I_extract16<0x15, "vpextrw">, VEX;
4705 defm PEXTRW : SS41I_extract16<0x15, "pextrw">;
4708 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4709 multiclass SS41I_extract32<bits<8> opc, string OpcodeStr> {
4710 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4711 (ins VR128:$src1, i32i8imm:$src2),
4712 !strconcat(OpcodeStr,
4713 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4715 (extractelt (v4i32 VR128:$src1), imm:$src2))]>, OpSize;
4716 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4717 (ins i32mem:$dst, VR128:$src1, i32i8imm:$src2),
4718 !strconcat(OpcodeStr,
4719 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4720 [(store (extractelt (v4i32 VR128:$src1), imm:$src2),
4721 addr:$dst)]>, OpSize;
4724 let Predicates = [HasAVX] in
4725 defm VPEXTRD : SS41I_extract32<0x16, "vpextrd">, VEX;
4727 defm PEXTRD : SS41I_extract32<0x16, "pextrd">;
4729 /// SS41I_extract32 - SSE 4.1 extract 32 bits to int reg or memory destination
4730 multiclass SS41I_extract64<bits<8> opc, string OpcodeStr> {
4731 def rr : SS4AIi8<opc, MRMDestReg, (outs GR64:$dst),
4732 (ins VR128:$src1, i32i8imm:$src2),
4733 !strconcat(OpcodeStr,
4734 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4736 (extractelt (v2i64 VR128:$src1), imm:$src2))]>, OpSize, REX_W;
4737 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4738 (ins i64mem:$dst, VR128:$src1, i32i8imm:$src2),
4739 !strconcat(OpcodeStr,
4740 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4741 [(store (extractelt (v2i64 VR128:$src1), imm:$src2),
4742 addr:$dst)]>, OpSize, REX_W;
4745 let Predicates = [HasAVX] in
4746 defm VPEXTRQ : SS41I_extract64<0x16, "vpextrq">, VEX, VEX_W;
4748 defm PEXTRQ : SS41I_extract64<0x16, "pextrq">;
4750 /// SS41I_extractf32 - SSE 4.1 extract 32 bits fp value to int reg or memory
4752 multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> {
4753 def rr : SS4AIi8<opc, MRMDestReg, (outs GR32:$dst),
4754 (ins VR128:$src1, i32i8imm:$src2),
4755 !strconcat(OpcodeStr,
4756 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4758 (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2))]>,
4760 def mr : SS4AIi8<opc, MRMDestMem, (outs),
4761 (ins f32mem:$dst, VR128:$src1, i32i8imm:$src2),
4762 !strconcat(OpcodeStr,
4763 "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4764 [(store (extractelt (bc_v4i32 (v4f32 VR128:$src1)), imm:$src2),
4765 addr:$dst)]>, OpSize;
4768 let Predicates = [HasAVX] in {
4769 defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX;
4770 def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst),
4771 (ins VR128:$src1, i32i8imm:$src2),
4772 "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}",
4775 defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">;
4777 // Also match an EXTRACTPS store when the store is done as f32 instead of i32.
4778 def : Pat<(store (f32 (bitconvert (extractelt (bc_v4i32 (v4f32 VR128:$src1)),
4781 (EXTRACTPSmr addr:$dst, VR128:$src1, imm:$src2)>,
4782 Requires<[HasSSE41]>;
4784 //===----------------------------------------------------------------------===//
4785 // SSE4.1 - Insert Instructions
4786 //===----------------------------------------------------------------------===//
4788 multiclass SS41I_insert8<bits<8> opc, string asm, bit Is2Addr = 1> {
4789 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4790 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4792 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4794 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4796 (X86pinsrb VR128:$src1, GR32:$src2, imm:$src3))]>, OpSize;
4797 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4798 (ins VR128:$src1, i8mem:$src2, i32i8imm:$src3),
4800 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4802 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4804 (X86pinsrb VR128:$src1, (extloadi8 addr:$src2),
4805 imm:$src3))]>, OpSize;
4808 let Predicates = [HasAVX] in
4809 defm VPINSRB : SS41I_insert8<0x20, "vpinsrb", 0>, VEX_4V;
4810 let Constraints = "$src1 = $dst" in
4811 defm PINSRB : SS41I_insert8<0x20, "pinsrb">;
4813 multiclass SS41I_insert32<bits<8> opc, string asm, bit Is2Addr = 1> {
4814 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4815 (ins VR128:$src1, GR32:$src2, i32i8imm:$src3),
4817 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4819 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4821 (v4i32 (insertelt VR128:$src1, GR32:$src2, imm:$src3)))]>,
4823 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4824 (ins VR128:$src1, i32mem:$src2, i32i8imm:$src3),
4826 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4828 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4830 (v4i32 (insertelt VR128:$src1, (loadi32 addr:$src2),
4831 imm:$src3)))]>, OpSize;
4834 let Predicates = [HasAVX] in
4835 defm VPINSRD : SS41I_insert32<0x22, "vpinsrd", 0>, VEX_4V;
4836 let Constraints = "$src1 = $dst" in
4837 defm PINSRD : SS41I_insert32<0x22, "pinsrd">;
4839 multiclass SS41I_insert64<bits<8> opc, string asm, bit Is2Addr = 1> {
4840 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4841 (ins VR128:$src1, GR64:$src2, i32i8imm:$src3),
4843 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4845 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4847 (v2i64 (insertelt VR128:$src1, GR64:$src2, imm:$src3)))]>,
4849 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4850 (ins VR128:$src1, i64mem:$src2, i32i8imm:$src3),
4852 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4854 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4856 (v2i64 (insertelt VR128:$src1, (loadi64 addr:$src2),
4857 imm:$src3)))]>, OpSize;
4860 let Predicates = [HasAVX] in
4861 defm VPINSRQ : SS41I_insert64<0x22, "vpinsrq", 0>, VEX_4V, VEX_W;
4862 let Constraints = "$src1 = $dst" in
4863 defm PINSRQ : SS41I_insert64<0x22, "pinsrq">, REX_W;
4865 // insertps has a few different modes, there's the first two here below which
4866 // are optimized inserts that won't zero arbitrary elements in the destination
4867 // vector. The next one matches the intrinsic and could zero arbitrary elements
4868 // in the target vector.
4869 multiclass SS41I_insertf32<bits<8> opc, string asm, bit Is2Addr = 1> {
4870 def rr : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst),
4871 (ins VR128:$src1, VR128:$src2, u32u8imm:$src3),
4873 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4875 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4877 (X86insrtps VR128:$src1, VR128:$src2, imm:$src3))]>,
4879 def rm : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst),
4880 (ins VR128:$src1, f32mem:$src2, u32u8imm:$src3),
4882 !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4884 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4886 (X86insrtps VR128:$src1,
4887 (v4f32 (scalar_to_vector (loadf32 addr:$src2))),
4888 imm:$src3))]>, OpSize;
4891 let Constraints = "$src1 = $dst" in
4892 defm INSERTPS : SS41I_insertf32<0x21, "insertps">;
4893 let Predicates = [HasAVX] in
4894 defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V;
4896 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4897 (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4899 def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3),
4900 (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>,
4901 Requires<[HasSSE41]>;
4903 //===----------------------------------------------------------------------===//
4904 // SSE4.1 - Round Instructions
4905 //===----------------------------------------------------------------------===//
4907 multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
4908 X86MemOperand x86memop, RegisterClass RC,
4909 PatFrag mem_frag32, PatFrag mem_frag64,
4910 Intrinsic V4F32Int, Intrinsic V2F64Int> {
4911 // Intrinsic operation, reg.
4912 // Vector intrinsic operation, reg
4913 def PSr : SS4AIi8<opcps, MRMSrcReg,
4914 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4915 !strconcat(OpcodeStr,
4916 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4917 [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>,
4920 // Vector intrinsic operation, mem
4921 def PSm : Ii8<opcps, MRMSrcMem,
4922 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4923 !strconcat(OpcodeStr,
4924 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4926 (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
4928 Requires<[HasSSE41]>;
4930 // Vector intrinsic operation, reg
4931 def PDr : SS4AIi8<opcpd, MRMSrcReg,
4932 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4933 !strconcat(OpcodeStr,
4934 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4935 [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>,
4938 // Vector intrinsic operation, mem
4939 def PDm : SS4AIi8<opcpd, MRMSrcMem,
4940 (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2),
4941 !strconcat(OpcodeStr,
4942 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4944 (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
4948 multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd,
4949 RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> {
4950 // Intrinsic operation, reg.
4951 // Vector intrinsic operation, reg
4952 def PSr_AVX : SS4AIi8<opcps, MRMSrcReg,
4953 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4954 !strconcat(OpcodeStr,
4955 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4958 // Vector intrinsic operation, mem
4959 def PSm_AVX : Ii8<opcps, MRMSrcMem,
4960 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4961 !strconcat(OpcodeStr,
4962 "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4963 []>, TA, OpSize, Requires<[HasSSE41]>;
4965 // Vector intrinsic operation, reg
4966 def PDr_AVX : SS4AIi8<opcpd, MRMSrcReg,
4967 (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
4968 !strconcat(OpcodeStr,
4969 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4972 // Vector intrinsic operation, mem
4973 def PDm_AVX : SS4AIi8<opcpd, MRMSrcMem,
4974 (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
4975 !strconcat(OpcodeStr,
4976 "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
4980 multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
4983 Intrinsic F64Int, bit Is2Addr = 1> {
4984 // Intrinsic operation, reg.
4985 def SSr : SS4AIi8<opcss, MRMSrcReg,
4986 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
4988 !strconcat(OpcodeStr,
4989 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
4990 !strconcat(OpcodeStr,
4991 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
4992 [(set VR128:$dst, (F32Int VR128:$src1, VR128:$src2, imm:$src3))]>,
4995 // Intrinsic operation, mem.
4996 def SSm : SS4AIi8<opcss, MRMSrcMem,
4997 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
4999 !strconcat(OpcodeStr,
5000 "ss\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5001 !strconcat(OpcodeStr,
5002 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5004 (F32Int VR128:$src1, sse_load_f32:$src2, imm:$src3))]>,
5007 // Intrinsic operation, reg.
5008 def SDr : SS4AIi8<opcsd, MRMSrcReg,
5009 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5011 !strconcat(OpcodeStr,
5012 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5013 !strconcat(OpcodeStr,
5014 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5015 [(set VR128:$dst, (F64Int VR128:$src1, VR128:$src2, imm:$src3))]>,
5018 // Intrinsic operation, mem.
5019 def SDm : SS4AIi8<opcsd, MRMSrcMem,
5020 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
5022 !strconcat(OpcodeStr,
5023 "sd\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5024 !strconcat(OpcodeStr,
5025 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5027 (F64Int VR128:$src1, sse_load_f64:$src2, imm:$src3))]>,
5031 multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd,
5033 // Intrinsic operation, reg.
5034 def SSr_AVX : SS4AIi8<opcss, MRMSrcReg,
5035 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5036 !strconcat(OpcodeStr,
5037 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5040 // Intrinsic operation, mem.
5041 def SSm_AVX : SS4AIi8<opcss, MRMSrcMem,
5042 (outs VR128:$dst), (ins VR128:$src1, ssmem:$src2, i32i8imm:$src3),
5043 !strconcat(OpcodeStr,
5044 "ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5047 // Intrinsic operation, reg.
5048 def SDr_AVX : SS4AIi8<opcsd, MRMSrcReg,
5049 (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
5050 !strconcat(OpcodeStr,
5051 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5054 // Intrinsic operation, mem.
5055 def SDm_AVX : SS4AIi8<opcsd, MRMSrcMem,
5056 (outs VR128:$dst), (ins VR128:$src1, sdmem:$src2, i32i8imm:$src3),
5057 !strconcat(OpcodeStr,
5058 "sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5062 // FP round - roundss, roundps, roundsd, roundpd
5063 let Predicates = [HasAVX] in {
5065 defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128,
5066 memopv4f32, memopv2f64,
5067 int_x86_sse41_round_ps,
5068 int_x86_sse41_round_pd>, VEX;
5069 defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256,
5070 memopv8f32, memopv4f64,
5071 int_x86_avx_round_ps_256,
5072 int_x86_avx_round_pd_256>, VEX;
5073 defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround",
5074 int_x86_sse41_round_ss,
5075 int_x86_sse41_round_sd, 0>, VEX_4V;
5077 // Instructions for the assembler
5078 defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">,
5080 defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">,
5082 defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V;
5085 defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128,
5086 memopv4f32, memopv2f64,
5087 int_x86_sse41_round_ps, int_x86_sse41_round_pd>;
5088 let Constraints = "$src1 = $dst" in
5089 defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round",
5090 int_x86_sse41_round_ss, int_x86_sse41_round_sd>;
5092 //===----------------------------------------------------------------------===//
5093 // SSE4.1 - Packed Bit Test
5094 //===----------------------------------------------------------------------===//
5096 // ptest instruction we'll lower to this in X86ISelLowering primarily from
5097 // the intel intrinsic that corresponds to this.
5098 let Defs = [EFLAGS], Predicates = [HasAVX] in {
5099 def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
5100 "vptest\t{$src2, $src1|$src1, $src2}",
5101 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
5103 def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
5104 "vptest\t{$src2, $src1|$src1, $src2}",
5105 [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
5108 def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2),
5109 "vptest\t{$src2, $src1|$src1, $src2}",
5110 [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>,
5112 def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2),
5113 "vptest\t{$src2, $src1|$src1, $src2}",
5114 [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>,
5118 let Defs = [EFLAGS] in {
5119 def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2),
5120 "ptest \t{$src2, $src1|$src1, $src2}",
5121 [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>,
5123 def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2),
5124 "ptest \t{$src2, $src1|$src1, $src2}",
5125 [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>,
5129 // The bit test instructions below are AVX only
5130 multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC,
5131 X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> {
5132 def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2),
5133 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
5134 [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX;
5135 def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2),
5136 !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
5137 [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>,
5141 let Defs = [EFLAGS], Predicates = [HasAVX] in {
5142 defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>;
5143 defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>;
5144 defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>;
5145 defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>;
5148 //===----------------------------------------------------------------------===//
5149 // SSE4.1 - Misc Instructions
5150 //===----------------------------------------------------------------------===//
5152 def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
5153 "popcnt{w}\t{$src, $dst|$dst, $src}",
5154 [(set GR16:$dst, (ctpop GR16:$src))]>, OpSize, XS;
5155 def POPCNT16rm : I<0xB8, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
5156 "popcnt{w}\t{$src, $dst|$dst, $src}",
5157 [(set GR16:$dst, (ctpop (loadi16 addr:$src)))]>, OpSize, XS;
5159 def POPCNT32rr : I<0xB8, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
5160 "popcnt{l}\t{$src, $dst|$dst, $src}",
5161 [(set GR32:$dst, (ctpop GR32:$src))]>, XS;
5162 def POPCNT32rm : I<0xB8, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
5163 "popcnt{l}\t{$src, $dst|$dst, $src}",
5164 [(set GR32:$dst, (ctpop (loadi32 addr:$src)))]>, XS;
5166 def POPCNT64rr : RI<0xB8, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
5167 "popcnt{q}\t{$src, $dst|$dst, $src}",
5168 [(set GR64:$dst, (ctpop GR64:$src))]>, XS;
5169 def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
5170 "popcnt{q}\t{$src, $dst|$dst, $src}",
5171 [(set GR64:$dst, (ctpop (loadi64 addr:$src)))]>, XS;
5175 // SS41I_unop_rm_int_v16 - SSE 4.1 unary operator whose type is v8i16.
5176 multiclass SS41I_unop_rm_int_v16<bits<8> opc, string OpcodeStr,
5177 Intrinsic IntId128> {
5178 def rr128 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5180 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5181 [(set VR128:$dst, (IntId128 VR128:$src))]>, OpSize;
5182 def rm128 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5184 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5187 (bitconvert (memopv8i16 addr:$src))))]>, OpSize;
5190 let Predicates = [HasAVX] in
5191 defm VPHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "vphminposuw",
5192 int_x86_sse41_phminposuw>, VEX;
5193 defm PHMINPOSUW : SS41I_unop_rm_int_v16 <0x41, "phminposuw",
5194 int_x86_sse41_phminposuw>;
5196 /// SS41I_binop_rm_int - Simple SSE 4.1 binary operator
5197 multiclass SS41I_binop_rm_int<bits<8> opc, string OpcodeStr,
5198 Intrinsic IntId128, bit Is2Addr = 1> {
5199 let isCommutable = 1 in
5200 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5201 (ins VR128:$src1, VR128:$src2),
5203 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5204 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5205 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>, OpSize;
5206 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5207 (ins VR128:$src1, i128mem:$src2),
5209 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5210 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5212 (IntId128 VR128:$src1,
5213 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5216 let Predicates = [HasAVX] in {
5217 let isCommutable = 0 in
5218 defm VPACKUSDW : SS41I_binop_rm_int<0x2B, "vpackusdw", int_x86_sse41_packusdw,
5220 defm VPCMPEQQ : SS41I_binop_rm_int<0x29, "vpcmpeqq", int_x86_sse41_pcmpeqq,
5222 defm VPMINSB : SS41I_binop_rm_int<0x38, "vpminsb", int_x86_sse41_pminsb,
5224 defm VPMINSD : SS41I_binop_rm_int<0x39, "vpminsd", int_x86_sse41_pminsd,
5226 defm VPMINUD : SS41I_binop_rm_int<0x3B, "vpminud", int_x86_sse41_pminud,
5228 defm VPMINUW : SS41I_binop_rm_int<0x3A, "vpminuw", int_x86_sse41_pminuw,
5230 defm VPMAXSB : SS41I_binop_rm_int<0x3C, "vpmaxsb", int_x86_sse41_pmaxsb,
5232 defm VPMAXSD : SS41I_binop_rm_int<0x3D, "vpmaxsd", int_x86_sse41_pmaxsd,
5234 defm VPMAXUD : SS41I_binop_rm_int<0x3F, "vpmaxud", int_x86_sse41_pmaxud,
5236 defm VPMAXUW : SS41I_binop_rm_int<0x3E, "vpmaxuw", int_x86_sse41_pmaxuw,
5238 defm VPMULDQ : SS41I_binop_rm_int<0x28, "vpmuldq", int_x86_sse41_pmuldq,
5241 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5242 (VPCMPEQQrr VR128:$src1, VR128:$src2)>;
5243 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5244 (VPCMPEQQrm VR128:$src1, addr:$src2)>;
5247 let Constraints = "$src1 = $dst" in {
5248 let isCommutable = 0 in
5249 defm PACKUSDW : SS41I_binop_rm_int<0x2B, "packusdw", int_x86_sse41_packusdw>;
5250 defm PCMPEQQ : SS41I_binop_rm_int<0x29, "pcmpeqq", int_x86_sse41_pcmpeqq>;
5251 defm PMINSB : SS41I_binop_rm_int<0x38, "pminsb", int_x86_sse41_pminsb>;
5252 defm PMINSD : SS41I_binop_rm_int<0x39, "pminsd", int_x86_sse41_pminsd>;
5253 defm PMINUD : SS41I_binop_rm_int<0x3B, "pminud", int_x86_sse41_pminud>;
5254 defm PMINUW : SS41I_binop_rm_int<0x3A, "pminuw", int_x86_sse41_pminuw>;
5255 defm PMAXSB : SS41I_binop_rm_int<0x3C, "pmaxsb", int_x86_sse41_pmaxsb>;
5256 defm PMAXSD : SS41I_binop_rm_int<0x3D, "pmaxsd", int_x86_sse41_pmaxsd>;
5257 defm PMAXUD : SS41I_binop_rm_int<0x3F, "pmaxud", int_x86_sse41_pmaxud>;
5258 defm PMAXUW : SS41I_binop_rm_int<0x3E, "pmaxuw", int_x86_sse41_pmaxuw>;
5259 defm PMULDQ : SS41I_binop_rm_int<0x28, "pmuldq", int_x86_sse41_pmuldq>;
5262 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, VR128:$src2)),
5263 (PCMPEQQrr VR128:$src1, VR128:$src2)>;
5264 def : Pat<(v2i64 (X86pcmpeqq VR128:$src1, (memop addr:$src2))),
5265 (PCMPEQQrm VR128:$src1, addr:$src2)>;
5267 /// SS48I_binop_rm - Simple SSE41 binary operator.
5268 multiclass SS48I_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
5269 ValueType OpVT, bit Is2Addr = 1> {
5270 let isCommutable = 1 in
5271 def rr : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5272 (ins VR128:$src1, VR128:$src2),
5274 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5275 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5276 [(set VR128:$dst, (OpVT (OpNode VR128:$src1, VR128:$src2)))]>,
5278 def rm : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5279 (ins VR128:$src1, i128mem:$src2),
5281 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5282 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5283 [(set VR128:$dst, (OpNode VR128:$src1,
5284 (bc_v4i32 (memopv2i64 addr:$src2))))]>,
5288 let Predicates = [HasAVX] in
5289 defm VPMULLD : SS48I_binop_rm<0x40, "vpmulld", mul, v4i32, 0>, VEX_4V;
5290 let Constraints = "$src1 = $dst" in
5291 defm PMULLD : SS48I_binop_rm<0x40, "pmulld", mul, v4i32>;
5293 /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate
5294 multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr,
5295 Intrinsic IntId, RegisterClass RC, PatFrag memop_frag,
5296 X86MemOperand x86memop, bit Is2Addr = 1> {
5297 let isCommutable = 1 in
5298 def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst),
5299 (ins RC:$src1, RC:$src2, u32u8imm:$src3),
5301 !strconcat(OpcodeStr,
5302 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5303 !strconcat(OpcodeStr,
5304 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5305 [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>,
5307 def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst),
5308 (ins RC:$src1, x86memop:$src2, u32u8imm:$src3),
5310 !strconcat(OpcodeStr,
5311 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
5312 !strconcat(OpcodeStr,
5313 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")),
5316 (bitconvert (memop_frag addr:$src2)), imm:$src3))]>,
5320 let Predicates = [HasAVX] in {
5321 let isCommutable = 0 in {
5322 defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps,
5323 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5324 defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd,
5325 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5326 defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps",
5327 int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5328 defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd",
5329 int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V;
5330 defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw,
5331 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5332 defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw,
5333 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5335 defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps,
5336 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5337 defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd,
5338 VR128, memopv16i8, i128mem, 0>, VEX_4V;
5339 defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256,
5340 VR256, memopv32i8, i256mem, 0>, VEX_4V;
5343 let Constraints = "$src1 = $dst" in {
5344 let isCommutable = 0 in {
5345 defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps,
5346 VR128, memopv16i8, i128mem>;
5347 defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd,
5348 VR128, memopv16i8, i128mem>;
5349 defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw,
5350 VR128, memopv16i8, i128mem>;
5351 defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw,
5352 VR128, memopv16i8, i128mem>;
5354 defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps,
5355 VR128, memopv16i8, i128mem>;
5356 defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd,
5357 VR128, memopv16i8, i128mem>;
5360 /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators
5361 let Predicates = [HasAVX] in {
5362 multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr,
5363 RegisterClass RC, X86MemOperand x86memop,
5364 PatFrag mem_frag, Intrinsic IntId> {
5365 def rr : I<opc, MRMSrcReg, (outs RC:$dst),
5366 (ins RC:$src1, RC:$src2, RC:$src3),
5367 !strconcat(OpcodeStr,
5368 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5369 [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))],
5370 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5372 def rm : I<opc, MRMSrcMem, (outs RC:$dst),
5373 (ins RC:$src1, x86memop:$src2, RC:$src3),
5374 !strconcat(OpcodeStr,
5375 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
5377 (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)),
5379 SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM;
5383 defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem,
5384 memopv16i8, int_x86_sse41_blendvpd>;
5385 defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem,
5386 memopv16i8, int_x86_sse41_blendvps>;
5387 defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem,
5388 memopv16i8, int_x86_sse41_pblendvb>;
5389 defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem,
5390 memopv32i8, int_x86_avx_blendv_pd_256>;
5391 defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem,
5392 memopv32i8, int_x86_avx_blendv_ps_256>;
5394 /// SS41I_ternary_int - SSE 4.1 ternary operator
5395 let Uses = [XMM0], Constraints = "$src1 = $dst" in {
5396 multiclass SS41I_ternary_int<bits<8> opc, string OpcodeStr, Intrinsic IntId> {
5397 def rr0 : SS48I<opc, MRMSrcReg, (outs VR128:$dst),
5398 (ins VR128:$src1, VR128:$src2),
5399 !strconcat(OpcodeStr,
5400 "\t{$src2, $dst|$dst, $src2}"),
5401 [(set VR128:$dst, (IntId VR128:$src1, VR128:$src2, XMM0))]>,
5404 def rm0 : SS48I<opc, MRMSrcMem, (outs VR128:$dst),
5405 (ins VR128:$src1, i128mem:$src2),
5406 !strconcat(OpcodeStr,
5407 "\t{$src2, $dst|$dst, $src2}"),
5410 (bitconvert (memopv16i8 addr:$src2)), XMM0))]>, OpSize;
5414 defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>;
5415 defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>;
5416 defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>;
5418 def : Pat<(X86pblendv VR128:$src1, VR128:$src2, XMM0),
5419 (PBLENDVBrr0 VR128:$src1, VR128:$src2)>;
5421 let Predicates = [HasAVX] in
5422 def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5423 "vmovntdqa\t{$src, $dst|$dst, $src}",
5424 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5426 def MOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src),
5427 "movntdqa\t{$src, $dst|$dst, $src}",
5428 [(set VR128:$dst, (int_x86_sse41_movntdqa addr:$src))]>,
5431 //===----------------------------------------------------------------------===//
5432 // SSE4.2 - Compare Instructions
5433 //===----------------------------------------------------------------------===//
5435 /// SS42I_binop_rm_int - Simple SSE 4.2 binary operator
5436 multiclass SS42I_binop_rm_int<bits<8> opc, string OpcodeStr,
5437 Intrinsic IntId128, bit Is2Addr = 1> {
5438 def rr : SS428I<opc, MRMSrcReg, (outs VR128:$dst),
5439 (ins VR128:$src1, VR128:$src2),
5441 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5442 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5443 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5445 def rm : SS428I<opc, MRMSrcMem, (outs VR128:$dst),
5446 (ins VR128:$src1, i128mem:$src2),
5448 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5449 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5451 (IntId128 VR128:$src1,
5452 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5455 let Predicates = [HasAVX] in {
5456 defm VPCMPGTQ : SS42I_binop_rm_int<0x37, "vpcmpgtq", int_x86_sse42_pcmpgtq,
5459 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5460 (VPCMPGTQrr VR128:$src1, VR128:$src2)>;
5461 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5462 (VPCMPGTQrm VR128:$src1, addr:$src2)>;
5465 let Constraints = "$src1 = $dst" in
5466 defm PCMPGTQ : SS42I_binop_rm_int<0x37, "pcmpgtq", int_x86_sse42_pcmpgtq>;
5468 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, VR128:$src2)),
5469 (PCMPGTQrr VR128:$src1, VR128:$src2)>;
5470 def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))),
5471 (PCMPGTQrm VR128:$src1, addr:$src2)>;
5473 //===----------------------------------------------------------------------===//
5474 // SSE4.2 - String/text Processing Instructions
5475 //===----------------------------------------------------------------------===//
5477 // Packed Compare Implicit Length Strings, Return Mask
5478 multiclass pseudo_pcmpistrm<string asm> {
5479 def REG : PseudoI<(outs VR128:$dst),
5480 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5481 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2,
5483 def MEM : PseudoI<(outs VR128:$dst),
5484 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5485 [(set VR128:$dst, (int_x86_sse42_pcmpistrm128
5486 VR128:$src1, (load addr:$src2), imm:$src3))]>;
5489 let Defs = [EFLAGS], usesCustomInserter = 1 in {
5490 defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>;
5491 defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>;
5494 let Defs = [XMM0, EFLAGS], Predicates = [HasAVX] in {
5495 def VPCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5496 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5497 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5498 def VPCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5499 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5500 "vpcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize, VEX;
5503 let Defs = [XMM0, EFLAGS] in {
5504 def PCMPISTRM128rr : SS42AI<0x62, MRMSrcReg, (outs),
5505 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5506 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5507 def PCMPISTRM128rm : SS42AI<0x62, MRMSrcMem, (outs),
5508 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5509 "pcmpistrm\t{$src3, $src2, $src1|$src1, $src2, $src3}", []>, OpSize;
5512 // Packed Compare Explicit Length Strings, Return Mask
5513 multiclass pseudo_pcmpestrm<string asm> {
5514 def REG : PseudoI<(outs VR128:$dst),
5515 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5516 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5517 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>;
5518 def MEM : PseudoI<(outs VR128:$dst),
5519 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5520 [(set VR128:$dst, (int_x86_sse42_pcmpestrm128
5521 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>;
5524 let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in {
5525 defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>;
5526 defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>;
5529 let Predicates = [HasAVX],
5530 Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5531 def VPCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5532 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5533 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5534 def VPCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5535 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5536 "vpcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize, VEX;
5539 let Defs = [XMM0, EFLAGS], Uses = [EAX, EDX] in {
5540 def PCMPESTRM128rr : SS42AI<0x60, MRMSrcReg, (outs),
5541 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5542 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5543 def PCMPESTRM128rm : SS42AI<0x60, MRMSrcMem, (outs),
5544 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5545 "pcmpestrm\t{$src5, $src3, $src1|$src1, $src3, $src5}", []>, OpSize;
5548 // Packed Compare Implicit Length Strings, Return Index
5549 let Defs = [ECX, EFLAGS] in {
5550 multiclass SS42AI_pcmpistri<Intrinsic IntId128, string asm = "pcmpistri"> {
5551 def rr : SS42AI<0x63, MRMSrcReg, (outs),
5552 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5553 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5554 [(set ECX, (IntId128 VR128:$src1, VR128:$src2, imm:$src3)),
5555 (implicit EFLAGS)]>, OpSize;
5556 def rm : SS42AI<0x63, MRMSrcMem, (outs),
5557 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5558 !strconcat(asm, "\t{$src3, $src2, $src1|$src1, $src2, $src3}"),
5559 [(set ECX, (IntId128 VR128:$src1, (load addr:$src2), imm:$src3)),
5560 (implicit EFLAGS)]>, OpSize;
5564 let Predicates = [HasAVX] in {
5565 defm VPCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128, "vpcmpistri">,
5567 defm VPCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128, "vpcmpistri">,
5569 defm VPCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128, "vpcmpistri">,
5571 defm VPCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128, "vpcmpistri">,
5573 defm VPCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128, "vpcmpistri">,
5575 defm VPCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128, "vpcmpistri">,
5579 defm PCMPISTRI : SS42AI_pcmpistri<int_x86_sse42_pcmpistri128>;
5580 defm PCMPISTRIA : SS42AI_pcmpistri<int_x86_sse42_pcmpistria128>;
5581 defm PCMPISTRIC : SS42AI_pcmpistri<int_x86_sse42_pcmpistric128>;
5582 defm PCMPISTRIO : SS42AI_pcmpistri<int_x86_sse42_pcmpistrio128>;
5583 defm PCMPISTRIS : SS42AI_pcmpistri<int_x86_sse42_pcmpistris128>;
5584 defm PCMPISTRIZ : SS42AI_pcmpistri<int_x86_sse42_pcmpistriz128>;
5586 // Packed Compare Explicit Length Strings, Return Index
5587 let Defs = [ECX, EFLAGS], Uses = [EAX, EDX] in {
5588 multiclass SS42AI_pcmpestri<Intrinsic IntId128, string asm = "pcmpestri"> {
5589 def rr : SS42AI<0x61, MRMSrcReg, (outs),
5590 (ins VR128:$src1, VR128:$src3, i8imm:$src5),
5591 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5592 [(set ECX, (IntId128 VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5)),
5593 (implicit EFLAGS)]>, OpSize;
5594 def rm : SS42AI<0x61, MRMSrcMem, (outs),
5595 (ins VR128:$src1, i128mem:$src3, i8imm:$src5),
5596 !strconcat(asm, "\t{$src5, $src3, $src1|$src1, $src3, $src5}"),
5598 (IntId128 VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5)),
5599 (implicit EFLAGS)]>, OpSize;
5603 let Predicates = [HasAVX] in {
5604 defm VPCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128, "vpcmpestri">,
5606 defm VPCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128, "vpcmpestri">,
5608 defm VPCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128, "vpcmpestri">,
5610 defm VPCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128, "vpcmpestri">,
5612 defm VPCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128, "vpcmpestri">,
5614 defm VPCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128, "vpcmpestri">,
5618 defm PCMPESTRI : SS42AI_pcmpestri<int_x86_sse42_pcmpestri128>;
5619 defm PCMPESTRIA : SS42AI_pcmpestri<int_x86_sse42_pcmpestria128>;
5620 defm PCMPESTRIC : SS42AI_pcmpestri<int_x86_sse42_pcmpestric128>;
5621 defm PCMPESTRIO : SS42AI_pcmpestri<int_x86_sse42_pcmpestrio128>;
5622 defm PCMPESTRIS : SS42AI_pcmpestri<int_x86_sse42_pcmpestris128>;
5623 defm PCMPESTRIZ : SS42AI_pcmpestri<int_x86_sse42_pcmpestriz128>;
5625 //===----------------------------------------------------------------------===//
5626 // SSE4.2 - CRC Instructions
5627 //===----------------------------------------------------------------------===//
5629 // No CRC instructions have AVX equivalents
5631 // crc intrinsic instruction
5632 // This set of instructions are only rm, the only difference is the size
5634 let Constraints = "$src1 = $dst" in {
5635 def CRC32r32m8 : SS42FI<0xF0, MRMSrcMem, (outs GR32:$dst),
5636 (ins GR32:$src1, i8mem:$src2),
5637 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5639 (int_x86_sse42_crc32_32_8 GR32:$src1,
5640 (load addr:$src2)))]>;
5641 def CRC32r32r8 : SS42FI<0xF0, MRMSrcReg, (outs GR32:$dst),
5642 (ins GR32:$src1, GR8:$src2),
5643 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5645 (int_x86_sse42_crc32_32_8 GR32:$src1, GR8:$src2))]>;
5646 def CRC32r32m16 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5647 (ins GR32:$src1, i16mem:$src2),
5648 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5650 (int_x86_sse42_crc32_32_16 GR32:$src1,
5651 (load addr:$src2)))]>,
5653 def CRC32r32r16 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5654 (ins GR32:$src1, GR16:$src2),
5655 "crc32{w} \t{$src2, $src1|$src1, $src2}",
5657 (int_x86_sse42_crc32_32_16 GR32:$src1, GR16:$src2))]>,
5659 def CRC32r32m32 : SS42FI<0xF1, MRMSrcMem, (outs GR32:$dst),
5660 (ins GR32:$src1, i32mem:$src2),
5661 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5663 (int_x86_sse42_crc32_32_32 GR32:$src1,
5664 (load addr:$src2)))]>;
5665 def CRC32r32r32 : SS42FI<0xF1, MRMSrcReg, (outs GR32:$dst),
5666 (ins GR32:$src1, GR32:$src2),
5667 "crc32{l} \t{$src2, $src1|$src1, $src2}",
5669 (int_x86_sse42_crc32_32_32 GR32:$src1, GR32:$src2))]>;
5670 def CRC32r64m8 : SS42FI<0xF0, MRMSrcMem, (outs GR64:$dst),
5671 (ins GR64:$src1, i8mem:$src2),
5672 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5674 (int_x86_sse42_crc32_64_8 GR64:$src1,
5675 (load addr:$src2)))]>,
5677 def CRC32r64r8 : SS42FI<0xF0, MRMSrcReg, (outs GR64:$dst),
5678 (ins GR64:$src1, GR8:$src2),
5679 "crc32{b} \t{$src2, $src1|$src1, $src2}",
5681 (int_x86_sse42_crc32_64_8 GR64:$src1, GR8:$src2))]>,
5683 def CRC32r64m64 : SS42FI<0xF1, MRMSrcMem, (outs GR64:$dst),
5684 (ins GR64:$src1, i64mem:$src2),
5685 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5687 (int_x86_sse42_crc32_64_64 GR64:$src1,
5688 (load addr:$src2)))]>,
5690 def CRC32r64r64 : SS42FI<0xF1, MRMSrcReg, (outs GR64:$dst),
5691 (ins GR64:$src1, GR64:$src2),
5692 "crc32{q} \t{$src2, $src1|$src1, $src2}",
5694 (int_x86_sse42_crc32_64_64 GR64:$src1, GR64:$src2))]>,
5698 //===----------------------------------------------------------------------===//
5699 // AES-NI Instructions
5700 //===----------------------------------------------------------------------===//
5702 multiclass AESI_binop_rm_int<bits<8> opc, string OpcodeStr,
5703 Intrinsic IntId128, bit Is2Addr = 1> {
5704 def rr : AES8I<opc, MRMSrcReg, (outs VR128:$dst),
5705 (ins VR128:$src1, VR128:$src2),
5707 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5708 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5709 [(set VR128:$dst, (IntId128 VR128:$src1, VR128:$src2))]>,
5711 def rm : AES8I<opc, MRMSrcMem, (outs VR128:$dst),
5712 (ins VR128:$src1, i128mem:$src2),
5714 !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"),
5715 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")),
5717 (IntId128 VR128:$src1,
5718 (bitconvert (memopv16i8 addr:$src2))))]>, OpSize;
5721 // Perform One Round of an AES Encryption/Decryption Flow
5722 let Predicates = [HasAVX, HasAES] in {
5723 defm VAESENC : AESI_binop_rm_int<0xDC, "vaesenc",
5724 int_x86_aesni_aesenc, 0>, VEX_4V;
5725 defm VAESENCLAST : AESI_binop_rm_int<0xDD, "vaesenclast",
5726 int_x86_aesni_aesenclast, 0>, VEX_4V;
5727 defm VAESDEC : AESI_binop_rm_int<0xDE, "vaesdec",
5728 int_x86_aesni_aesdec, 0>, VEX_4V;
5729 defm VAESDECLAST : AESI_binop_rm_int<0xDF, "vaesdeclast",
5730 int_x86_aesni_aesdeclast, 0>, VEX_4V;
5733 let Constraints = "$src1 = $dst" in {
5734 defm AESENC : AESI_binop_rm_int<0xDC, "aesenc",
5735 int_x86_aesni_aesenc>;
5736 defm AESENCLAST : AESI_binop_rm_int<0xDD, "aesenclast",
5737 int_x86_aesni_aesenclast>;
5738 defm AESDEC : AESI_binop_rm_int<0xDE, "aesdec",
5739 int_x86_aesni_aesdec>;
5740 defm AESDECLAST : AESI_binop_rm_int<0xDF, "aesdeclast",
5741 int_x86_aesni_aesdeclast>;
5744 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, VR128:$src2)),
5745 (AESENCrr VR128:$src1, VR128:$src2)>;
5746 def : Pat<(v2i64 (int_x86_aesni_aesenc VR128:$src1, (memop addr:$src2))),
5747 (AESENCrm VR128:$src1, addr:$src2)>;
5748 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, VR128:$src2)),
5749 (AESENCLASTrr VR128:$src1, VR128:$src2)>;
5750 def : Pat<(v2i64 (int_x86_aesni_aesenclast VR128:$src1, (memop addr:$src2))),
5751 (AESENCLASTrm VR128:$src1, addr:$src2)>;
5752 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, VR128:$src2)),
5753 (AESDECrr VR128:$src1, VR128:$src2)>;
5754 def : Pat<(v2i64 (int_x86_aesni_aesdec VR128:$src1, (memop addr:$src2))),
5755 (AESDECrm VR128:$src1, addr:$src2)>;
5756 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, VR128:$src2)),
5757 (AESDECLASTrr VR128:$src1, VR128:$src2)>;
5758 def : Pat<(v2i64 (int_x86_aesni_aesdeclast VR128:$src1, (memop addr:$src2))),
5759 (AESDECLASTrm VR128:$src1, addr:$src2)>;
5761 // Perform the AES InvMixColumn Transformation
5762 let Predicates = [HasAVX, HasAES] in {
5763 def VAESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5765 "vaesimc\t{$src1, $dst|$dst, $src1}",
5767 (int_x86_aesni_aesimc VR128:$src1))]>,
5769 def VAESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5770 (ins i128mem:$src1),
5771 "vaesimc\t{$src1, $dst|$dst, $src1}",
5773 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5776 def AESIMCrr : AES8I<0xDB, MRMSrcReg, (outs VR128:$dst),
5778 "aesimc\t{$src1, $dst|$dst, $src1}",
5780 (int_x86_aesni_aesimc VR128:$src1))]>,
5782 def AESIMCrm : AES8I<0xDB, MRMSrcMem, (outs VR128:$dst),
5783 (ins i128mem:$src1),
5784 "aesimc\t{$src1, $dst|$dst, $src1}",
5786 (int_x86_aesni_aesimc (bitconvert (memopv2i64 addr:$src1))))]>,
5789 // AES Round Key Generation Assist
5790 let Predicates = [HasAVX, HasAES] in {
5791 def VAESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5792 (ins VR128:$src1, i8imm:$src2),
5793 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5795 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5797 def VAESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5798 (ins i128mem:$src1, i8imm:$src2),
5799 "vaeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5801 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5805 def AESKEYGENASSIST128rr : AESAI<0xDF, MRMSrcReg, (outs VR128:$dst),
5806 (ins VR128:$src1, i8imm:$src2),
5807 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5809 (int_x86_aesni_aeskeygenassist VR128:$src1, imm:$src2))]>,
5811 def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst),
5812 (ins i128mem:$src1, i8imm:$src2),
5813 "aeskeygenassist\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5815 (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)),
5819 //===----------------------------------------------------------------------===//
5820 // CLMUL Instructions
5821 //===----------------------------------------------------------------------===//
5823 // Carry-less Multiplication instructions
5824 let Constraints = "$src1 = $dst" in {
5825 def PCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5826 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5827 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5830 def PCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5831 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5832 "pclmulqdq\t{$src3, $src2, $dst|$dst, $src2, $src3}",
5836 // AVX carry-less Multiplication instructions
5837 def VPCLMULQDQrr : AVXCLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst),
5838 (ins VR128:$src1, VR128:$src2, i8imm:$src3),
5839 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5842 def VPCLMULQDQrm : AVXCLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst),
5843 (ins VR128:$src1, i128mem:$src2, i8imm:$src3),
5844 "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5848 multiclass pclmul_alias<string asm, int immop> {
5849 def : InstAlias<!strconcat("pclmul", asm,
5850 "dq {$src, $dst|$dst, $src}"),
5851 (PCLMULQDQrr VR128:$dst, VR128:$src, immop)>;
5853 def : InstAlias<!strconcat("pclmul", asm,
5854 "dq {$src, $dst|$dst, $src}"),
5855 (PCLMULQDQrm VR128:$dst, i128mem:$src, immop)>;
5857 def : InstAlias<!strconcat("vpclmul", asm,
5858 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5859 (VPCLMULQDQrr VR128:$dst, VR128:$src1, VR128:$src2, immop)>;
5861 def : InstAlias<!strconcat("vpclmul", asm,
5862 "dq {$src2, $src1, $dst|$dst, $src1, $src2}"),
5863 (VPCLMULQDQrm VR128:$dst, VR128:$src1, i128mem:$src2, immop)>;
5865 defm : pclmul_alias<"hqhq", 0x11>;
5866 defm : pclmul_alias<"hqlq", 0x01>;
5867 defm : pclmul_alias<"lqhq", 0x10>;
5868 defm : pclmul_alias<"lqlq", 0x00>;
5870 //===----------------------------------------------------------------------===//
5872 //===----------------------------------------------------------------------===//
5874 //===----------------------------------------------------------------------===//
5875 // VBROADCAST - Load from memory and broadcast to all elements of the
5876 // destination operand
5878 class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC,
5879 X86MemOperand x86memop, Intrinsic Int> :
5880 AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
5881 !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
5882 [(set RC:$dst, (Int addr:$src))]>, VEX;
5884 def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem,
5885 int_x86_avx_vbroadcastss>;
5886 def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem,
5887 int_x86_avx_vbroadcastss_256>;
5888 def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem,
5889 int_x86_avx_vbroadcast_sd_256>;
5890 def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem,
5891 int_x86_avx_vbroadcastf128_pd_256>;
5893 def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src),
5894 (VBROADCASTF128 addr:$src)>;
5896 def : Pat<(v8i32 (X86VBroadcast (loadi32 addr:$src))),
5897 (VBROADCASTSSY addr:$src)>;
5898 def : Pat<(v4i64 (X86VBroadcast (loadi64 addr:$src))),
5899 (VBROADCASTSD addr:$src)>;
5900 def : Pat<(v8f32 (X86VBroadcast (loadf32 addr:$src))),
5901 (VBROADCASTSSY addr:$src)>;
5902 def : Pat<(v4f64 (X86VBroadcast (loadf64 addr:$src))),
5903 (VBROADCASTSD addr:$src)>;
5905 def : Pat<(v4f32 (X86VBroadcast (loadf32 addr:$src))),
5906 (VBROADCASTSS addr:$src)>;
5907 def : Pat<(v4i32 (X86VBroadcast (loadi32 addr:$src))),
5908 (VBROADCASTSS addr:$src)>;
5910 //===----------------------------------------------------------------------===//
5911 // VINSERTF128 - Insert packed floating-point values
5913 def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst),
5914 (ins VR256:$src1, VR128:$src2, i8imm:$src3),
5915 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5917 def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst),
5918 (ins VR256:$src1, f128mem:$src2, i8imm:$src3),
5919 "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
5922 def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3),
5923 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5924 def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3),
5925 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5926 def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3),
5927 (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>;
5929 def : Pat<(vinsertf128_insert:$ins (v8f32 VR256:$src1), (v4f32 VR128:$src2),
5931 (VINSERTF128rr VR256:$src1, VR128:$src2,
5932 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5933 def : Pat<(vinsertf128_insert:$ins (v4f64 VR256:$src1), (v2f64 VR128:$src2),
5935 (VINSERTF128rr VR256:$src1, VR128:$src2,
5936 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5937 def : Pat<(vinsertf128_insert:$ins (v8i32 VR256:$src1), (v4i32 VR128:$src2),
5939 (VINSERTF128rr VR256:$src1, VR128:$src2,
5940 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5941 def : Pat<(vinsertf128_insert:$ins (v4i64 VR256:$src1), (v2i64 VR128:$src2),
5943 (VINSERTF128rr VR256:$src1, VR128:$src2,
5944 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5945 def : Pat<(vinsertf128_insert:$ins (v32i8 VR256:$src1), (v16i8 VR128:$src2),
5947 (VINSERTF128rr VR256:$src1, VR128:$src2,
5948 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5949 def : Pat<(vinsertf128_insert:$ins (v16i16 VR256:$src1), (v8i16 VR128:$src2),
5951 (VINSERTF128rr VR256:$src1, VR128:$src2,
5952 (INSERT_get_vinsertf128_imm VR256:$ins))>;
5954 // Special COPY patterns
5955 def : Pat<(insert_subvector undef, (v2i64 VR128:$src), (i32 0)),
5956 (INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5957 def : Pat<(insert_subvector undef, (v2f64 VR128:$src), (i32 0)),
5958 (INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5959 def : Pat<(insert_subvector undef, (v4i32 VR128:$src), (i32 0)),
5960 (INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5961 def : Pat<(insert_subvector undef, (v4f32 VR128:$src), (i32 0)),
5962 (INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5963 def : Pat<(insert_subvector undef, (v8i16 VR128:$src), (i32 0)),
5964 (INSERT_SUBREG (v16i16 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5965 def : Pat<(insert_subvector undef, (v16i8 VR128:$src), (i32 0)),
5966 (INSERT_SUBREG (v32i8 (IMPLICIT_DEF)), VR128:$src, sub_xmm)>;
5968 //===----------------------------------------------------------------------===//
5969 // VEXTRACTF128 - Extract packed floating-point values
5971 def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst),
5972 (ins VR256:$src1, i8imm:$src2),
5973 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5975 def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs),
5976 (ins f128mem:$dst, VR256:$src1, i8imm:$src2),
5977 "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}",
5980 def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2),
5981 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5982 def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2),
5983 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5984 def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2),
5985 (VEXTRACTF128rr VR256:$src1, imm:$src2)>;
5987 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5988 (v4f32 (VEXTRACTF128rr
5989 (v8f32 VR256:$src1),
5990 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5991 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5992 (v2f64 (VEXTRACTF128rr
5993 (v4f64 VR256:$src1),
5994 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5995 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
5996 (v4i32 (VEXTRACTF128rr
5997 (v8i32 VR256:$src1),
5998 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
5999 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6000 (v2i64 (VEXTRACTF128rr
6001 (v4i64 VR256:$src1),
6002 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6003 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6004 (v8i16 (VEXTRACTF128rr
6005 (v16i16 VR256:$src1),
6006 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6007 def : Pat<(vextractf128_extract:$ext VR256:$src1, (i32 imm)),
6008 (v16i8 (VEXTRACTF128rr
6009 (v32i8 VR256:$src1),
6010 (EXTRACT_get_vextractf128_imm VR128:$ext)))>;
6012 // Special COPY patterns
6013 def : Pat<(v4i32 (extract_subvector (v8i32 VR256:$src), (i32 0))),
6014 (v4i32 (EXTRACT_SUBREG (v8i32 VR256:$src), sub_xmm))>;
6015 def : Pat<(v4f32 (extract_subvector (v8f32 VR256:$src), (i32 0))),
6016 (v4f32 (EXTRACT_SUBREG (v8f32 VR256:$src), sub_xmm))>;
6018 def : Pat<(v2i64 (extract_subvector (v4i64 VR256:$src), (i32 0))),
6019 (v2i64 (EXTRACT_SUBREG (v4i64 VR256:$src), sub_xmm))>;
6020 def : Pat<(v2f64 (extract_subvector (v4f64 VR256:$src), (i32 0))),
6021 (v2f64 (EXTRACT_SUBREG (v4f64 VR256:$src), sub_xmm))>;
6023 def : Pat<(v8i16 (extract_subvector (v16i16 VR256:$src), (i32 0))),
6024 (v8i16 (EXTRACT_SUBREG (v16i16 VR256:$src), sub_xmm))>;
6025 def : Pat<(v16i8 (extract_subvector (v32i8 VR256:$src), (i32 0))),
6026 (v16i8 (EXTRACT_SUBREG (v32i8 VR256:$src), sub_xmm))>;
6029 //===----------------------------------------------------------------------===//
6030 // VMASKMOV - Conditional SIMD Packed Loads and Stores
6032 multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr,
6033 Intrinsic IntLd, Intrinsic IntLd256,
6034 Intrinsic IntSt, Intrinsic IntSt256,
6035 PatFrag pf128, PatFrag pf256> {
6036 def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst),
6037 (ins VR128:$src1, f128mem:$src2),
6038 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6039 [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>,
6041 def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst),
6042 (ins VR256:$src1, f256mem:$src2),
6043 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6044 [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>,
6046 def mr : AVX8I<opc_mr, MRMDestMem, (outs),
6047 (ins f128mem:$dst, VR128:$src1, VR128:$src2),
6048 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6049 [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V;
6050 def Ymr : AVX8I<opc_mr, MRMDestMem, (outs),
6051 (ins f256mem:$dst, VR256:$src1, VR256:$src2),
6052 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6053 [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V;
6056 defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps",
6057 int_x86_avx_maskload_ps,
6058 int_x86_avx_maskload_ps_256,
6059 int_x86_avx_maskstore_ps,
6060 int_x86_avx_maskstore_ps_256,
6061 memopv4f32, memopv8f32>;
6062 defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd",
6063 int_x86_avx_maskload_pd,
6064 int_x86_avx_maskload_pd_256,
6065 int_x86_avx_maskstore_pd,
6066 int_x86_avx_maskstore_pd_256,
6067 memopv2f64, memopv4f64>;
6069 //===----------------------------------------------------------------------===//
6070 // VPERMIL - Permute Single and Double Floating-Point Values
6072 multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr,
6073 RegisterClass RC, X86MemOperand x86memop_f,
6074 X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag,
6075 Intrinsic IntVar, Intrinsic IntImm> {
6076 def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst),
6077 (ins RC:$src1, RC:$src2),
6078 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6079 [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V;
6080 def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst),
6081 (ins RC:$src1, x86memop_i:$src2),
6082 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6083 [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V;
6085 def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst),
6086 (ins RC:$src1, i8imm:$src2),
6087 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6088 [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX;
6089 def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst),
6090 (ins x86memop_f:$src1, i8imm:$src2),
6091 !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
6092 [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX;
6095 defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem,
6096 memopv4f32, memopv4i32,
6097 int_x86_avx_vpermilvar_ps,
6098 int_x86_avx_vpermil_ps>;
6099 defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem,
6100 memopv8f32, memopv8i32,
6101 int_x86_avx_vpermilvar_ps_256,
6102 int_x86_avx_vpermil_ps_256>;
6103 defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem,
6104 memopv2f64, memopv2i64,
6105 int_x86_avx_vpermilvar_pd,
6106 int_x86_avx_vpermil_pd>;
6107 defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem,
6108 memopv4f64, memopv4i64,
6109 int_x86_avx_vpermilvar_pd_256,
6110 int_x86_avx_vpermil_pd_256>;
6112 def : Pat<(v8f32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
6113 (VPERMILPSYri VR256:$src1, imm:$imm)>;
6114 def : Pat<(v4f64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
6115 (VPERMILPDYri VR256:$src1, imm:$imm)>;
6116 def : Pat<(v8i32 (X86VPermilpsy VR256:$src1, (i8 imm:$imm))),
6117 (VPERMILPSYri VR256:$src1, imm:$imm)>;
6118 def : Pat<(v4i64 (X86VPermilpdy VR256:$src1, (i8 imm:$imm))),
6119 (VPERMILPDYri VR256:$src1, imm:$imm)>;
6121 //===----------------------------------------------------------------------===//
6122 // VPERM2F128 - Permute Floating-Point Values in 128-bit chunks
6124 def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst),
6125 (ins VR256:$src1, VR256:$src2, i8imm:$src3),
6126 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6128 def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst),
6129 (ins VR256:$src1, f256mem:$src2, i8imm:$src3),
6130 "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
6133 def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3),
6134 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6135 def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3),
6136 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6137 def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3),
6138 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>;
6140 def : Pat<(int_x86_avx_vperm2f128_ps_256
6141 VR256:$src1, (memopv8f32 addr:$src2), imm:$src3),
6142 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6143 def : Pat<(int_x86_avx_vperm2f128_pd_256
6144 VR256:$src1, (memopv4f64 addr:$src2), imm:$src3),
6145 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6146 def : Pat<(int_x86_avx_vperm2f128_si_256
6147 VR256:$src1, (memopv8i32 addr:$src2), imm:$src3),
6148 (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>;
6150 def : Pat<(v8f32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6151 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6152 def : Pat<(v8i32 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6153 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6154 def : Pat<(v4i64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6155 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6156 def : Pat<(v4f64 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6157 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6158 def : Pat<(v32i8 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6159 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6160 def : Pat<(v16i16 (X86VPerm2f128 VR256:$src1, VR256:$src2, (i8 imm:$imm))),
6161 (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$imm)>;
6163 //===----------------------------------------------------------------------===//
6164 // VZERO - Zero YMM registers
6166 let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
6167 YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
6168 // Zero All YMM registers
6169 def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
6170 [(int_x86_avx_vzeroall)]>, TB, VEX, VEX_L, Requires<[HasAVX]>;
6172 // Zero Upper bits of YMM registers
6173 def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
6174 [(int_x86_avx_vzeroupper)]>, TB, VEX, Requires<[HasAVX]>;
6177 //===----------------------------------------------------------------------===//
6178 // SSE Shuffle pattern fragments
6179 //===----------------------------------------------------------------------===//
6181 // This is part of a "work in progress" refactoring. The idea is that all
6182 // vector shuffles are going to be translated into target specific nodes and
6183 // directly matched by the patterns below (which can be changed along the way)
6184 // The AVX version of some but not all of them are described here, and more
6185 // should come in a near future.
6187 // Shuffle with MOVLHPD
6188 def : Pat<(v2f64 (X86Movlhpd VR128:$src1,
6189 (scalar_to_vector (loadf64 addr:$src2)))),
6190 (MOVHPDrm VR128:$src1, addr:$src2)>;
6192 // FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem
6193 // is during lowering, where it's not possible to recognize the load fold cause
6194 // it has two uses through a bitcast. One use disappears at isel time and the
6195 // fold opportunity reappears.
6196 def : Pat<(v2f64 (X86Unpcklpd VR128:$src1,
6197 (scalar_to_vector (loadf64 addr:$src2)))),
6198 (MOVHPDrm VR128:$src1, addr:$src2)>;
6200 // Shuffle with MOVSS
6201 def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))),
6202 (MOVSSrr VR128:$src1, FR32:$src2)>;
6203 def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)),
6204 (MOVSSrr (v4i32 VR128:$src1),
6205 (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
6206 def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)),
6207 (MOVSSrr (v4f32 VR128:$src1),
6208 (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
6210 // Shuffle with MOVSD
6211 def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))),
6212 (MOVSDrr VR128:$src1, FR64:$src2)>;
6213 def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
6214 (MOVSDrr (v2i64 VR128:$src1),
6215 (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
6216 def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)),
6217 (MOVSDrr (v2f64 VR128:$src1),
6218 (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
6219 def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)),
6220 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
6221 def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)),
6222 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
6224 // Shuffle with MOVLPS
6225 def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))),
6226 (MOVLPSrm VR128:$src1, addr:$src2)>;
6227 def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))),
6228 (MOVLPSrm VR128:$src1, addr:$src2)>;
6229 def : Pat<(X86Movlps VR128:$src1,
6230 (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))),
6231 (MOVLPSrm VR128:$src1, addr:$src2)>;
6232 // FIXME: Instead of a X86Movlps there should be a X86Movsd here, the problem
6233 // is during lowering, where it's not possible to recognize the load fold cause
6234 // it has two uses through a bitcast. One use disappears at isel time and the
6235 // fold opportunity reappears.
6236 def : Pat<(v4f32 (X86Movlps VR128:$src1, VR128:$src2)),
6237 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>;
6239 def : Pat<(v4i32 (X86Movlps VR128:$src1, VR128:$src2)),
6240 (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>;
6242 // Shuffle with MOVLPD
6243 def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))),
6244 (MOVLPDrm VR128:$src1, addr:$src2)>;
6245 def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))),
6246 (MOVLPDrm VR128:$src1, addr:$src2)>;
6247 def : Pat<(v2f64 (X86Movlpd VR128:$src1,
6248 (scalar_to_vector (loadf64 addr:$src2)))),
6249 (MOVLPDrm VR128:$src1, addr:$src2)>;
6251 // Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD
6252 def : Pat<(store (f64 (vector_extract
6253 (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst),
6254 (MOVHPSmr addr:$dst, VR128:$src)>;
6255 def : Pat<(store (f64 (vector_extract
6256 (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst),
6257 (MOVHPDmr addr:$dst, VR128:$src)>;
6259 def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1),
6260 (MOVLPSmr addr:$src1, VR128:$src2)>;
6261 def : Pat<(store (v4i32 (X86Movlps
6262 (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1),
6263 (MOVLPSmr addr:$src1, VR128:$src2)>;
6265 def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
6266 (MOVLPDmr addr:$src1, VR128:$src2)>;
6267 def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1),
6268 (MOVLPDmr addr:$src1, VR128:$src2)>;