1 //===-- X86InstrFragmentsSIMD.td - x86 SIMD ISA ------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file provides pattern fragments useful for SIMD instructions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // MMX specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
18 // Low word of MMX to GPR.
19 def MMX_X86movd2w : SDNode<"X86ISD::MMX_MOVD2W", SDTypeProfile<1, 1,
20 [SDTCisVT<0, i32>, SDTCisVT<1, x86mmx>]>>;
21 // GPR to low word of MMX.
22 def MMX_X86movw2d : SDNode<"X86ISD::MMX_MOVW2D", SDTypeProfile<1, 1,
23 [SDTCisVT<0, x86mmx>, SDTCisVT<1, i32>]>>;
25 //===----------------------------------------------------------------------===//
26 // MMX Pattern Fragments
27 //===----------------------------------------------------------------------===//
29 def load_mmx : PatFrag<(ops node:$ptr), (x86mmx (load node:$ptr))>;
30 def load_mvmmx : PatFrag<(ops node:$ptr),
31 (x86mmx (MMX_X86movw2d (load node:$ptr)))>;
32 def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
34 //===----------------------------------------------------------------------===//
35 // SSE specific DAG Nodes.
36 //===----------------------------------------------------------------------===//
38 def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
39 SDTCisFP<1>, SDTCisVT<3, i8>,
42 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
43 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
45 // Commutative and Associative FMIN and FMAX.
46 def X86fminc : SDNode<"X86ISD::FMINC", SDTFPBinOp,
47 [SDNPCommutative, SDNPAssociative]>;
48 def X86fmaxc : SDNode<"X86ISD::FMAXC", SDTFPBinOp,
49 [SDNPCommutative, SDNPAssociative]>;
51 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
52 [SDNPCommutative, SDNPAssociative]>;
53 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
54 [SDNPCommutative, SDNPAssociative]>;
55 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
56 [SDNPCommutative, SDNPAssociative]>;
57 def X86fandn : SDNode<"X86ISD::FANDN", SDTFPBinOp,
58 [SDNPCommutative, SDNPAssociative]>;
59 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
60 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
61 def X86fgetsign: SDNode<"X86ISD::FGETSIGNx86",SDTFPToIntOp>;
62 def X86fhadd : SDNode<"X86ISD::FHADD", SDTFPBinOp>;
63 def X86fhsub : SDNode<"X86ISD::FHSUB", SDTFPBinOp>;
64 def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>;
65 def X86hsub : SDNode<"X86ISD::HSUB", SDTIntBinOp>;
66 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
67 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
68 def X86cmps : SDNode<"X86ISD::FSETCC", SDTX86Cmps>;
69 //def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>;
70 def X86cvtdq2pd: SDNode<"X86ISD::CVTDQ2PD",
71 SDTypeProfile<1, 1, [SDTCisVT<0, v2f64>,
72 SDTCisVT<1, v4i32>]>>;
73 def X86cvtudq2pd: SDNode<"X86ISD::CVTUDQ2PD",
74 SDTypeProfile<1, 1, [SDTCisVT<0, v2f64>,
75 SDTCisVT<1, v4i32>]>>;
76 def X86pshufb : SDNode<"X86ISD::PSHUFB",
77 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
79 def X86psadbw : SDNode<"X86ISD::PSADBW",
80 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
82 def X86andnp : SDNode<"X86ISD::ANDNP",
83 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
85 def X86psign : SDNode<"X86ISD::PSIGN",
86 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
88 def X86pextrb : SDNode<"X86ISD::PEXTRB",
89 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
90 def X86pextrw : SDNode<"X86ISD::PEXTRW",
91 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
92 def X86pinsrb : SDNode<"X86ISD::PINSRB",
93 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
94 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
95 def X86pinsrw : SDNode<"X86ISD::PINSRW",
96 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
97 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
98 def X86insertps : SDNode<"X86ISD::INSERTPS",
99 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
100 SDTCisVT<2, v4f32>, SDTCisVT<3, i8>]>>;
101 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
102 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
104 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
105 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
107 def X86vzext : SDNode<"X86ISD::VZEXT",
108 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
109 SDTCisInt<0>, SDTCisInt<1>,
110 SDTCisOpSmallerThanOp<1, 0>]>>;
112 def X86vsext : SDNode<"X86ISD::VSEXT",
113 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
114 SDTCisInt<0>, SDTCisInt<1>,
115 SDTCisOpSmallerThanOp<1, 0>]>>;
117 def SDTVtrunc : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
118 SDTCisInt<0>, SDTCisInt<1>,
119 SDTCisOpSmallerThanOp<0, 1>]>;
121 def X86vtrunc : SDNode<"X86ISD::VTRUNC", SDTVtrunc>;
122 def X86vtruncs : SDNode<"X86ISD::VTRUNCS", SDTVtrunc>;
123 def X86vtruncus : SDNode<"X86ISD::VTRUNCUS", SDTVtrunc>;
125 def X86trunc : SDNode<"X86ISD::TRUNC",
126 SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>,
127 SDTCisOpSmallerThanOp<0, 1>]>>;
128 def X86vfpext : SDNode<"X86ISD::VFPEXT",
129 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
130 SDTCisFP<0>, SDTCisFP<1>,
131 SDTCisOpSmallerThanOp<1, 0>]>>;
132 def X86vfpround: SDNode<"X86ISD::VFPROUND",
133 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
134 SDTCisFP<0>, SDTCisFP<1>,
135 SDTCisOpSmallerThanOp<0, 1>]>>;
137 def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>;
138 def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>;
139 def X86cmpp : SDNode<"X86ISD::CMPP", SDTX86VFCMP>;
140 def X86pcmpeq : SDNode<"X86ISD::PCMPEQ", SDTIntBinOp, [SDNPCommutative]>;
141 def X86pcmpgt : SDNode<"X86ISD::PCMPGT", SDTIntBinOp>;
143 def X86IntCmpMask : SDTypeProfile<1, 2,
144 [SDTCisVec<0>, SDTCisSameAs<1, 2>, SDTCisInt<1>]>;
145 def X86pcmpeqm : SDNode<"X86ISD::PCMPEQM", X86IntCmpMask, [SDNPCommutative]>;
146 def X86pcmpgtm : SDNode<"X86ISD::PCMPGTM", X86IntCmpMask>;
149 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCVecEltisVT<0, i1>,
150 SDTCisVec<1>, SDTCisSameAs<2, 1>,
151 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<3, i8>]>;
152 def X86CmpMaskCCRound :
153 SDTypeProfile<1, 4, [SDTCisVec<0>,SDTCVecEltisVT<0, i1>,
154 SDTCisVec<1>, SDTCisSameAs<2, 1>,
155 SDTCisSameNumEltsAs<0, 1>, SDTCisVT<3, i8>,
157 def X86CmpMaskCCScalar :
158 SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, i8>]>;
160 def X86cmpm : SDNode<"X86ISD::CMPM", X86CmpMaskCC>;
161 def X86cmpmRnd : SDNode<"X86ISD::CMPM_RND", X86CmpMaskCCRound>;
162 def X86cmpmu : SDNode<"X86ISD::CMPMU", X86CmpMaskCC>;
163 def X86cmpms : SDNode<"X86ISD::FSETCC", X86CmpMaskCCScalar>;
165 def X86vshl : SDNode<"X86ISD::VSHL",
166 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
168 def X86vsrl : SDNode<"X86ISD::VSRL",
169 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
171 def X86vsra : SDNode<"X86ISD::VSRA",
172 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
175 def X86vshli : SDNode<"X86ISD::VSHLI", SDTIntShiftOp>;
176 def X86vsrli : SDNode<"X86ISD::VSRLI", SDTIntShiftOp>;
177 def X86vsrai : SDNode<"X86ISD::VSRAI", SDTIntShiftOp>;
179 def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
181 SDTCisSameAs<2, 1>]>;
182 def X86addus : SDNode<"X86ISD::ADDUS", SDTIntBinOp>;
183 def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>;
184 def X86adds : SDNode<"X86ISD::ADDS", SDTIntBinOp>;
185 def X86subs : SDNode<"X86ISD::SUBS", SDTIntBinOp>;
186 def X86mulhrs : SDNode<"X86ISD::MULHRS" , SDTIntBinOp>;
187 def X86avg : SDNode<"X86ISD::AVG" , SDTIntBinOp>;
188 def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
189 def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
190 def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>;
191 def X86testm : SDNode<"X86ISD::TESTM", SDTypeProfile<1, 2, [SDTCisVec<0>,
192 SDTCisVec<1>, SDTCisSameAs<2, 1>,
193 SDTCVecEltisVT<0, i1>,
194 SDTCisSameNumEltsAs<0, 1>]>>;
195 def X86testnm : SDNode<"X86ISD::TESTNM", SDTypeProfile<1, 2, [SDTCisVec<0>,
196 SDTCisVec<1>, SDTCisSameAs<2, 1>,
197 SDTCVecEltisVT<0, i1>,
198 SDTCisSameNumEltsAs<0, 1>]>>;
199 def X86select : SDNode<"X86ISD::SELECT" , SDTSelect>;
201 def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
202 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
203 SDTCisSameAs<1,2>]>>;
204 def X86pmuldq : SDNode<"X86ISD::PMULDQ",
205 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
206 SDTCisSameAs<1,2>]>>;
208 def X86extrqi : SDNode<"X86ISD::EXTRQI",
209 SDTypeProfile<1, 3, [SDTCisVT<0, v2i64>, SDTCisSameAs<0,1>,
210 SDTCisVT<2, i8>, SDTCisVT<3, i8>]>>;
211 def X86insertqi : SDNode<"X86ISD::INSERTQI",
212 SDTypeProfile<1, 4, [SDTCisVT<0, v2i64>, SDTCisSameAs<0,1>,
213 SDTCisSameAs<1,2>, SDTCisVT<3, i8>,
216 // Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get
217 // translated into one of the target nodes below during lowering.
218 // Note: this is a work in progress...
219 def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
220 def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
222 def SDTShuff3Op : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
223 SDTCisSameAs<0,2>, SDTCisSameAs<0,3>]>;
225 def SDTShuff2OpM : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
227 def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
228 SDTCisSameAs<0,1>, SDTCisInt<2>]>;
229 def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
230 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
231 def SDTFPBinOpImmRound: SDTypeProfile<1, 4, [SDTCisVec<0>, SDTCisSameAs<0,1>,
232 SDTCisSameAs<0,2>, SDTCisInt<3>, SDTCisInt<4>]>;
233 def SDTFPUnaryOpImmRound: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
234 SDTCisInt<2>, SDTCisInt<3>]>;
236 def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
237 def SDTVBroadcastm : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>]>;
239 def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
240 SDTCisSameAs<1,2>, SDTCisVT<3, i8>]>;
242 def SDTFPBinOpRound : SDTypeProfile<1, 3, [ // fadd_round, fmul_round, etc.
243 SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisFP<0>, SDTCisInt<3>]>;
245 def SDTFPUnaryOpRound : SDTypeProfile<1, 2, [ // fsqrt_round, fgetexp_round, etc.
246 SDTCisSameAs<0, 1>, SDTCisFP<0>, SDTCisInt<2>]>;
248 def SDTFma : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
249 SDTCisSameAs<1,2>, SDTCisSameAs<1,3>]>;
250 def SDTFmaRound : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>,
251 SDTCisSameAs<1,2>, SDTCisSameAs<1,3>, SDTCisInt<4>]>;
252 def STDFp1SrcRm : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>,
253 SDTCisVec<0>, SDTCisInt<2>]>;
254 def STDFp2SrcRm : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
255 SDTCisVec<0>, SDTCisInt<3>]>;
256 def STDFp3SrcRm : SDTypeProfile<1, 4, [SDTCisSameAs<0,1>,
257 SDTCisVec<0>, SDTCisInt<3>, SDTCisInt<4>]>;
259 def X86PAlignr : SDNode<"X86ISD::PALIGNR", SDTShuff3OpI>;
260 def X86VAlign : SDNode<"X86ISD::VALIGN", SDTShuff3OpI>;
261 def X86Abs : SDNode<"X86ISD::ABS", SDTIntUnaryOp>;
263 def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;
264 def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>;
265 def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>;
267 def X86Shufp : SDNode<"X86ISD::SHUFP", SDTShuff3OpI>;
268 def X86Shuf128 : SDNode<"X86ISD::SHUF128", SDTShuff3OpI>;
270 def X86Movddup : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>;
271 def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>;
272 def X86Movsldup : SDNode<"X86ISD::MOVSLDUP", SDTShuff1Op>;
274 def X86Movsd : SDNode<"X86ISD::MOVSD", SDTShuff2Op>;
275 def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>;
277 def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>;
278 def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>;
279 def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>;
281 def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>;
282 def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>;
284 def SDTPack : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<2, 1>]>;
285 def X86Packss : SDNode<"X86ISD::PACKSS", SDTPack>;
286 def X86Packus : SDNode<"X86ISD::PACKUS", SDTPack>;
288 def X86Unpckl : SDNode<"X86ISD::UNPCKL", SDTShuff2Op>;
289 def X86Unpckh : SDNode<"X86ISD::UNPCKH", SDTShuff2Op>;
291 def X86vpmaddubsw : SDNode<"X86ISD::VPMADDUBSW" , SDTPack>;
292 def X86vpmaddwd : SDNode<"X86ISD::VPMADDWD" , SDTPack>;
294 def X86VPermilpv : SDNode<"X86ISD::VPERMILPV", SDTShuff2OpM>;
295 def X86VPermilpi : SDNode<"X86ISD::VPERMILPI", SDTShuff2OpI>;
296 def X86VPermv : SDNode<"X86ISD::VPERMV", SDTShuff2Op>;
297 def X86VPermi : SDNode<"X86ISD::VPERMI", SDTShuff2OpI>;
298 def X86VPermv3 : SDNode<"X86ISD::VPERMV3", SDTShuff3Op>;
299 def X86VPermiv3 : SDNode<"X86ISD::VPERMIV3", SDTShuff3Op>;
301 def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
303 def X86VFixupimm : SDNode<"X86ISD::VFIXUPIMM", SDTFPBinOpImmRound>;
304 def X86VRange : SDNode<"X86ISD::VRANGE", SDTFPBinOpImmRound>;
305 def X86VReduce : SDNode<"X86ISD::VREDUCE", SDTFPUnaryOpImmRound>;
306 def X86VRndScale : SDNode<"X86ISD::VRNDSCALE", SDTFPUnaryOpImmRound>;
308 def X86SubVBroadcast : SDNode<"X86ISD::SUBV_BROADCAST",
309 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
310 SDTCisSubVecOfVec<1, 0>]>, []>;
311 def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
312 def X86Vinsert : SDNode<"X86ISD::VINSERT", SDTypeProfile<1, 3,
313 [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
314 def X86Vextract : SDNode<"X86ISD::VEXTRACT", SDTypeProfile<1, 2,
315 [SDTCisVec<1>, SDTCisPtrTy<2>]>, []>;
317 def X86Blendi : SDNode<"X86ISD::BLENDI", SDTBlend>;
319 def X86Addsub : SDNode<"X86ISD::ADDSUB", SDTFPBinOp>;
321 def X86faddRnd : SDNode<"X86ISD::FADD_RND", SDTFPBinOpRound>;
322 def X86fsubRnd : SDNode<"X86ISD::FSUB_RND", SDTFPBinOpRound>;
323 def X86fmulRnd : SDNode<"X86ISD::FMUL_RND", SDTFPBinOpRound>;
324 def X86fdivRnd : SDNode<"X86ISD::FDIV_RND", SDTFPBinOpRound>;
325 def X86fmaxRnd : SDNode<"X86ISD::FMAX_RND", SDTFPBinOpRound>;
326 def X86scalef : SDNode<"X86ISD::SCALEF", SDTFPBinOpRound>;
327 def X86fminRnd : SDNode<"X86ISD::FMIN_RND", SDTFPBinOpRound>;
328 def X86fsqrtRnd : SDNode<"X86ISD::FSQRT_RND", SDTFPUnaryOpRound>;
329 def X86fgetexpRnd : SDNode<"X86ISD::FGETEXP_RND", SDTFPUnaryOpRound>;
330 def X86fgetexpRnds : SDNode<"X86ISD::FGETEXP_RND", STDFp2SrcRm>;
332 def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>;
333 def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFma>;
334 def X86Fmsub : SDNode<"X86ISD::FMSUB", SDTFma>;
335 def X86Fnmsub : SDNode<"X86ISD::FNMSUB", SDTFma>;
336 def X86Fmaddsub : SDNode<"X86ISD::FMADDSUB", SDTFma>;
337 def X86Fmsubadd : SDNode<"X86ISD::FMSUBADD", SDTFma>;
339 def X86FmaddRnd : SDNode<"X86ISD::FMADD_RND", SDTFmaRound>;
340 def X86FnmaddRnd : SDNode<"X86ISD::FNMADD_RND", SDTFmaRound>;
341 def X86FmsubRnd : SDNode<"X86ISD::FMSUB_RND", SDTFmaRound>;
342 def X86FnmsubRnd : SDNode<"X86ISD::FNMSUB_RND", SDTFmaRound>;
343 def X86FmaddsubRnd : SDNode<"X86ISD::FMADDSUB_RND", SDTFmaRound>;
344 def X86FmsubaddRnd : SDNode<"X86ISD::FMSUBADD_RND", SDTFmaRound>;
346 def X86rsqrt28 : SDNode<"X86ISD::RSQRT28", STDFp1SrcRm>;
347 def X86rcp28 : SDNode<"X86ISD::RCP28", STDFp1SrcRm>;
348 def X86exp2 : SDNode<"X86ISD::EXP2", STDFp1SrcRm>;
350 def X86rsqrt28s : SDNode<"X86ISD::RSQRT28", STDFp2SrcRm>;
351 def X86rcp28s : SDNode<"X86ISD::RCP28", STDFp2SrcRm>;
352 def X86RndScales : SDNode<"X86ISD::VRNDSCALE", STDFp3SrcRm>;
353 def X86Reduces : SDNode<"X86ISD::VREDUCE", STDFp3SrcRm>;
355 def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
356 SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>,
358 def SDT_PCMPESTRI : SDTypeProfile<2, 5, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
359 SDTCisVT<2, v16i8>, SDTCisVT<3, i32>,
360 SDTCisVT<4, v16i8>, SDTCisVT<5, i32>,
363 def X86pcmpistri : SDNode<"X86ISD::PCMPISTRI", SDT_PCMPISTRI>;
364 def X86pcmpestri : SDNode<"X86ISD::PCMPESTRI", SDT_PCMPESTRI>;
366 def X86compress: SDNode<"X86ISD::COMPRESS", SDTypeProfile<1, 1,
367 [SDTCisSameAs<0, 1>, SDTCisVec<1>]>, []>;
368 def X86expand : SDNode<"X86ISD::EXPAND", SDTypeProfile<1, 1,
369 [SDTCisSameAs<0, 1>, SDTCisVec<1>]>, []>;
371 def SDTintToFPRound: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisFP<0>,
372 SDTCisSameAs<0,1>, SDTCisInt<2>, SDTCisInt<3>]>;
374 def SDTDoubleToInt: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
375 SDTCisInt<0>, SDTCVecEltisVT<1, f64>]>;
376 def SDTFloatToInt: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
377 SDTCisInt<0>, SDTCVecEltisVT<1, f32>]>;
379 def SDTDoubleToIntRnd: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
380 SDTCisInt<0>, SDTCVecEltisVT<1, f64>]>;
381 def SDTFloatToIntRnd: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
382 SDTCisInt<0>, SDTCVecEltisVT<1, f32>]>;
384 def SDTVintToFPRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
385 SDTCisFP<0>, SDTCVecEltisVT<1, i32>,
387 def SDTVlongToFPRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
388 SDTCisFP<0>, SDTCVecEltisVT<1, i64>,
391 def SDTVFPToIntRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
392 SDTCisFP<1>, SDTCVecEltisVT<0, i32>,
394 def SDTVFPToLongRound: SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
395 SDTCisFP<1>, SDTCVecEltisVT<0, i64>,
399 def X86SintToFpRnd : SDNode<"X86ISD::SINT_TO_FP_RND", SDTintToFPRound>;
400 def X86UintToFpRnd : SDNode<"X86ISD::UINT_TO_FP_RND", SDTintToFPRound>;
402 // Vector with rounding mode
404 // cvtt fp-to-int staff
405 def X86VFpToSintRnd : SDNode<"ISD::FP_TO_SINT", SDTVFPToIntRound>;
406 def X86VFpToUintRnd : SDNode<"ISD::FP_TO_UINT", SDTVFPToIntRound>;
407 def X86VFpToSlongRnd : SDNode<"ISD::FP_TO_SINT", SDTVFPToLongRound>;
408 def X86VFpToUlongRnd : SDNode<"ISD::FP_TO_UINT", SDTVFPToLongRound>;
410 def X86VSintToFpRnd : SDNode<"ISD::SINT_TO_FP", SDTVintToFPRound>;
411 def X86VUintToFpRnd : SDNode<"ISD::UINT_TO_FP", SDTVintToFPRound>;
412 def X86VSlongToFpRnd : SDNode<"ISD::SINT_TO_FP", SDTVlongToFPRound>;
413 def X86VUlongToFpRnd : SDNode<"ISD::UINT_TO_FP", SDTVlongToFPRound>;
415 // cvt fp-to-int staff
416 def X86cvtps2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTFloatToIntRnd>;
417 def X86cvtps2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTFloatToIntRnd>;
418 def X86cvtpd2IntRnd : SDNode<"X86ISD::FP_TO_SINT_RND", SDTDoubleToIntRnd>;
419 def X86cvtpd2UIntRnd : SDNode<"X86ISD::FP_TO_UINT_RND", SDTDoubleToIntRnd>;
421 // Vector without rounding mode
422 def X86cvtps2Int : SDNode<"X86ISD::FP_TO_SINT_RND", SDTFloatToInt>;
423 def X86cvtps2UInt : SDNode<"X86ISD::FP_TO_UINT_RND", SDTFloatToInt>;
424 def X86cvtpd2Int : SDNode<"X86ISD::FP_TO_SINT_RND", SDTDoubleToInt>;
425 def X86cvtpd2UInt : SDNode<"X86ISD::FP_TO_UINT_RND", SDTDoubleToInt>;
427 def X86vfpextRnd : SDNode<"X86ISD::VFPEXT",
428 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
429 SDTCisFP<0>, SDTCisFP<1>,
430 SDTCisOpSmallerThanOp<1, 0>,
432 def X86vfproundRnd: SDNode<"X86ISD::VFPROUND",
433 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
434 SDTCisFP<0>, SDTCisFP<1>,
435 SDTCVecEltisVT<0, f32>,
436 SDTCVecEltisVT<1, f64>,
439 //===----------------------------------------------------------------------===//
440 // SSE Complex Patterns
441 //===----------------------------------------------------------------------===//
443 // These are 'extloads' from a scalar to the low element of a vector, zeroing
444 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
446 def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
447 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
449 def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
450 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
453 def ssmem : Operand<v4f32> {
454 let PrintMethod = "printf32mem";
455 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
456 let ParserMatchClass = X86Mem32AsmOperand;
457 let OperandType = "OPERAND_MEMORY";
459 def sdmem : Operand<v2f64> {
460 let PrintMethod = "printf64mem";
461 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
462 let ParserMatchClass = X86Mem64AsmOperand;
463 let OperandType = "OPERAND_MEMORY";
466 //===----------------------------------------------------------------------===//
467 // SSE pattern fragments
468 //===----------------------------------------------------------------------===//
470 // 128-bit load pattern fragments
471 // NOTE: all 128-bit integer vector loads are promoted to v2i64
472 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
473 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
474 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
476 // 256-bit load pattern fragments
477 // NOTE: all 256-bit integer vector loads are promoted to v4i64
478 def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
479 def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
480 def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
482 // 512-bit load pattern fragments
483 def loadv16f32 : PatFrag<(ops node:$ptr), (v16f32 (load node:$ptr))>;
484 def loadv8f64 : PatFrag<(ops node:$ptr), (v8f64 (load node:$ptr))>;
485 def loadv64i8 : PatFrag<(ops node:$ptr), (v64i8 (load node:$ptr))>;
486 def loadv32i16 : PatFrag<(ops node:$ptr), (v32i16 (load node:$ptr))>;
487 def loadv16i32 : PatFrag<(ops node:$ptr), (v16i32 (load node:$ptr))>;
488 def loadv8i64 : PatFrag<(ops node:$ptr), (v8i64 (load node:$ptr))>;
490 // 128-/256-/512-bit extload pattern fragments
491 def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>;
492 def extloadv4f32 : PatFrag<(ops node:$ptr), (v4f64 (extloadvf32 node:$ptr))>;
493 def extloadv8f32 : PatFrag<(ops node:$ptr), (v8f64 (extloadvf32 node:$ptr))>;
495 // These are needed to match a scalar load that is used in a vector-only
496 // math instruction such as the FP logical ops: andps, andnps, orps, xorps.
497 // The memory operand is required to be a 128-bit load, so it must be converted
498 // from a vector to a scalar.
499 def loadf32_128 : PatFrag<(ops node:$ptr),
500 (f32 (vector_extract (loadv4f32 node:$ptr), (iPTR 0)))>;
501 def loadf64_128 : PatFrag<(ops node:$ptr),
502 (f64 (vector_extract (loadv2f64 node:$ptr), (iPTR 0)))>;
504 // Like 'store', but always requires 128-bit vector alignment.
505 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
506 (store node:$val, node:$ptr), [{
507 return cast<StoreSDNode>(N)->getAlignment() >= 16;
510 // Like 'store', but always requires 256-bit vector alignment.
511 def alignedstore256 : PatFrag<(ops node:$val, node:$ptr),
512 (store node:$val, node:$ptr), [{
513 return cast<StoreSDNode>(N)->getAlignment() >= 32;
516 // Like 'store', but always requires 512-bit vector alignment.
517 def alignedstore512 : PatFrag<(ops node:$val, node:$ptr),
518 (store node:$val, node:$ptr), [{
519 return cast<StoreSDNode>(N)->getAlignment() >= 64;
522 // Like 'load', but always requires 128-bit vector alignment.
523 def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
524 return cast<LoadSDNode>(N)->getAlignment() >= 16;
527 // Like 'X86vzload', but always requires 128-bit vector alignment.
528 def alignedX86vzload : PatFrag<(ops node:$ptr), (X86vzload node:$ptr), [{
529 return cast<MemSDNode>(N)->getAlignment() >= 16;
532 // Like 'load', but always requires 256-bit vector alignment.
533 def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
534 return cast<LoadSDNode>(N)->getAlignment() >= 32;
537 // Like 'load', but always requires 512-bit vector alignment.
538 def alignedload512 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
539 return cast<LoadSDNode>(N)->getAlignment() >= 64;
542 def alignedloadfsf32 : PatFrag<(ops node:$ptr),
543 (f32 (alignedload node:$ptr))>;
544 def alignedloadfsf64 : PatFrag<(ops node:$ptr),
545 (f64 (alignedload node:$ptr))>;
547 // 128-bit aligned load pattern fragments
548 // NOTE: all 128-bit integer vector loads are promoted to v2i64
549 def alignedloadv4f32 : PatFrag<(ops node:$ptr),
550 (v4f32 (alignedload node:$ptr))>;
551 def alignedloadv2f64 : PatFrag<(ops node:$ptr),
552 (v2f64 (alignedload node:$ptr))>;
553 def alignedloadv2i64 : PatFrag<(ops node:$ptr),
554 (v2i64 (alignedload node:$ptr))>;
556 // 256-bit aligned load pattern fragments
557 // NOTE: all 256-bit integer vector loads are promoted to v4i64
558 def alignedloadv8f32 : PatFrag<(ops node:$ptr),
559 (v8f32 (alignedload256 node:$ptr))>;
560 def alignedloadv4f64 : PatFrag<(ops node:$ptr),
561 (v4f64 (alignedload256 node:$ptr))>;
562 def alignedloadv4i64 : PatFrag<(ops node:$ptr),
563 (v4i64 (alignedload256 node:$ptr))>;
565 // 512-bit aligned load pattern fragments
566 def alignedloadv16f32 : PatFrag<(ops node:$ptr),
567 (v16f32 (alignedload512 node:$ptr))>;
568 def alignedloadv16i32 : PatFrag<(ops node:$ptr),
569 (v16i32 (alignedload512 node:$ptr))>;
570 def alignedloadv8f64 : PatFrag<(ops node:$ptr),
571 (v8f64 (alignedload512 node:$ptr))>;
572 def alignedloadv8i64 : PatFrag<(ops node:$ptr),
573 (v8i64 (alignedload512 node:$ptr))>;
575 // Like 'load', but uses special alignment checks suitable for use in
576 // memory operands in most SSE instructions, which are required to
577 // be naturally aligned on some targets but not on others. If the subtarget
578 // allows unaligned accesses, match any load, though this may require
579 // setting a feature bit in the processor (on startup, for example).
580 // Opteron 10h and later implement such a feature.
581 def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
582 return Subtarget->hasSSEUnalignedMem()
583 || cast<LoadSDNode>(N)->getAlignment() >= 16;
586 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
587 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
589 // 128-bit memop pattern fragments
590 // NOTE: all 128-bit integer vector loads are promoted to v2i64
591 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
592 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
593 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
595 // These are needed to match a scalar memop that is used in a vector-only
596 // math instruction such as the FP logical ops: andps, andnps, orps, xorps.
597 // The memory operand is required to be a 128-bit load, so it must be converted
598 // from a vector to a scalar.
599 def memopfsf32_128 : PatFrag<(ops node:$ptr),
600 (f32 (vector_extract (memopv4f32 node:$ptr), (iPTR 0)))>;
601 def memopfsf64_128 : PatFrag<(ops node:$ptr),
602 (f64 (vector_extract (memopv2f64 node:$ptr), (iPTR 0)))>;
605 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
607 // FIXME: 8 byte alignment for mmx reads is not required
608 def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
609 return cast<LoadSDNode>(N)->getAlignment() >= 8;
612 def memopmmx : PatFrag<(ops node:$ptr), (x86mmx (memop64 node:$ptr))>;
615 // Like 'store', but requires the non-temporal bit to be set
616 def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
617 (st node:$val, node:$ptr), [{
618 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
619 return ST->isNonTemporal();
623 def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
624 (st node:$val, node:$ptr), [{
625 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
626 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
627 ST->getAddressingMode() == ISD::UNINDEXED &&
628 ST->getAlignment() >= 16;
632 def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
633 (st node:$val, node:$ptr), [{
634 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
635 return ST->isNonTemporal() &&
636 ST->getAlignment() < 16;
640 def mgatherv4i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
641 (masked_gather node:$src1, node:$src2, node:$src3) , [{
642 if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
643 return (Mgt->getIndex().getValueType() == MVT::v4i32 ||
644 Mgt->getBasePtr().getValueType() == MVT::v4i32);
648 def mgatherv8i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
649 (masked_gather node:$src1, node:$src2, node:$src3) , [{
650 if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
651 return (Mgt->getIndex().getValueType() == MVT::v8i32 ||
652 Mgt->getBasePtr().getValueType() == MVT::v8i32);
656 def mgatherv2i64 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
657 (masked_gather node:$src1, node:$src2, node:$src3) , [{
658 if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
659 return (Mgt->getIndex().getValueType() == MVT::v2i64 ||
660 Mgt->getBasePtr().getValueType() == MVT::v2i64);
663 def mgatherv4i64 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
664 (masked_gather node:$src1, node:$src2, node:$src3) , [{
665 if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
666 return (Mgt->getIndex().getValueType() == MVT::v4i64 ||
667 Mgt->getBasePtr().getValueType() == MVT::v4i64);
670 def mgatherv8i64 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
671 (masked_gather node:$src1, node:$src2, node:$src3) , [{
672 if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
673 return (Mgt->getIndex().getValueType() == MVT::v8i64 ||
674 Mgt->getBasePtr().getValueType() == MVT::v8i64);
677 def mgatherv16i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
678 (masked_gather node:$src1, node:$src2, node:$src3) , [{
679 if (MaskedGatherSDNode *Mgt = dyn_cast<MaskedGatherSDNode>(N))
680 return (Mgt->getIndex().getValueType() == MVT::v16i32 ||
681 Mgt->getBasePtr().getValueType() == MVT::v16i32);
685 def mscatterv2i64 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
686 (masked_scatter node:$src1, node:$src2, node:$src3) , [{
687 if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
688 return (Sc->getIndex().getValueType() == MVT::v2i64 ||
689 Sc->getBasePtr().getValueType() == MVT::v2i64);
693 def mscatterv4i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
694 (masked_scatter node:$src1, node:$src2, node:$src3) , [{
695 if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
696 return (Sc->getIndex().getValueType() == MVT::v4i32 ||
697 Sc->getBasePtr().getValueType() == MVT::v4i32);
701 def mscatterv4i64 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
702 (masked_scatter node:$src1, node:$src2, node:$src3) , [{
703 if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
704 return (Sc->getIndex().getValueType() == MVT::v4i64 ||
705 Sc->getBasePtr().getValueType() == MVT::v4i64);
709 def mscatterv8i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
710 (masked_scatter node:$src1, node:$src2, node:$src3) , [{
711 if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
712 return (Sc->getIndex().getValueType() == MVT::v8i32 ||
713 Sc->getBasePtr().getValueType() == MVT::v8i32);
717 def mscatterv8i64 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
718 (masked_scatter node:$src1, node:$src2, node:$src3) , [{
719 if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
720 return (Sc->getIndex().getValueType() == MVT::v8i64 ||
721 Sc->getBasePtr().getValueType() == MVT::v8i64);
724 def mscatterv16i32 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
725 (masked_scatter node:$src1, node:$src2, node:$src3) , [{
726 if (MaskedScatterSDNode *Sc = dyn_cast<MaskedScatterSDNode>(N))
727 return (Sc->getIndex().getValueType() == MVT::v16i32 ||
728 Sc->getBasePtr().getValueType() == MVT::v16i32);
732 // 128-bit bitconvert pattern fragments
733 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
734 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
735 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
736 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
737 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
738 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
740 // 256-bit bitconvert pattern fragments
741 def bc_v32i8 : PatFrag<(ops node:$in), (v32i8 (bitconvert node:$in))>;
742 def bc_v16i16 : PatFrag<(ops node:$in), (v16i16 (bitconvert node:$in))>;
743 def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
744 def bc_v4i64 : PatFrag<(ops node:$in), (v4i64 (bitconvert node:$in))>;
745 def bc_v8f32 : PatFrag<(ops node:$in), (v8f32 (bitconvert node:$in))>;
747 // 512-bit bitconvert pattern fragments
748 def bc_v16i32 : PatFrag<(ops node:$in), (v16i32 (bitconvert node:$in))>;
749 def bc_v8i64 : PatFrag<(ops node:$in), (v8i64 (bitconvert node:$in))>;
750 def bc_v8f64 : PatFrag<(ops node:$in), (v8f64 (bitconvert node:$in))>;
751 def bc_v16f32 : PatFrag<(ops node:$in), (v16f32 (bitconvert node:$in))>;
753 def vzmovl_v2i64 : PatFrag<(ops node:$src),
754 (bitconvert (v2i64 (X86vzmovl
755 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
756 def vzmovl_v4i32 : PatFrag<(ops node:$src),
757 (bitconvert (v4i32 (X86vzmovl
758 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
760 def vzload_v2i64 : PatFrag<(ops node:$src),
761 (bitconvert (v2i64 (X86vzload node:$src)))>;
764 def fp32imm0 : PatLeaf<(f32 fpimm), [{
765 return N->isExactlyValue(+0.0);
768 def I8Imm : SDNodeXForm<imm, [{
769 // Transformation function: get the low 8 bits.
770 return getI8Imm((uint8_t)N->getZExtValue(), SDLoc(N));
773 def FROUND_NO_EXC : ImmLeaf<i32, [{ return Imm == 8; }]>;
774 def FROUND_CURRENT : ImmLeaf<i32, [{
775 return Imm == X86::STATIC_ROUNDING::CUR_DIRECTION;
778 // BYTE_imm - Transform bit immediates into byte immediates.
779 def BYTE_imm : SDNodeXForm<imm, [{
780 // Transformation function: imm >> 3
781 return getI32Imm(N->getZExtValue() >> 3, SDLoc(N));
784 // EXTRACT_get_vextract128_imm xform function: convert extract_subvector index
785 // to VEXTRACTF128/VEXTRACTI128 imm.
786 def EXTRACT_get_vextract128_imm : SDNodeXForm<extract_subvector, [{
787 return getI8Imm(X86::getExtractVEXTRACT128Immediate(N), SDLoc(N));
790 // INSERT_get_vinsert128_imm xform function: convert insert_subvector index to
791 // VINSERTF128/VINSERTI128 imm.
792 def INSERT_get_vinsert128_imm : SDNodeXForm<insert_subvector, [{
793 return getI8Imm(X86::getInsertVINSERT128Immediate(N), SDLoc(N));
796 // EXTRACT_get_vextract256_imm xform function: convert extract_subvector index
797 // to VEXTRACTF64x4 imm.
798 def EXTRACT_get_vextract256_imm : SDNodeXForm<extract_subvector, [{
799 return getI8Imm(X86::getExtractVEXTRACT256Immediate(N), SDLoc(N));
802 // INSERT_get_vinsert256_imm xform function: convert insert_subvector index to
804 def INSERT_get_vinsert256_imm : SDNodeXForm<insert_subvector, [{
805 return getI8Imm(X86::getInsertVINSERT256Immediate(N), SDLoc(N));
808 def vextract128_extract : PatFrag<(ops node:$bigvec, node:$index),
809 (extract_subvector node:$bigvec,
811 return X86::isVEXTRACT128Index(N);
812 }], EXTRACT_get_vextract128_imm>;
814 def vinsert128_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
816 (insert_subvector node:$bigvec, node:$smallvec,
818 return X86::isVINSERT128Index(N);
819 }], INSERT_get_vinsert128_imm>;
822 def vextract256_extract : PatFrag<(ops node:$bigvec, node:$index),
823 (extract_subvector node:$bigvec,
825 return X86::isVEXTRACT256Index(N);
826 }], EXTRACT_get_vextract256_imm>;
828 def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
830 (insert_subvector node:$bigvec, node:$smallvec,
832 return X86::isVINSERT256Index(N);
833 }], INSERT_get_vinsert256_imm>;
835 def masked_load_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
836 (masked_load node:$src1, node:$src2, node:$src3), [{
837 if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
838 return Load->getAlignment() >= 16;
842 def masked_load_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
843 (masked_load node:$src1, node:$src2, node:$src3), [{
844 if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
845 return Load->getAlignment() >= 32;
849 def masked_load_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
850 (masked_load node:$src1, node:$src2, node:$src3), [{
851 if (auto *Load = dyn_cast<MaskedLoadSDNode>(N))
852 return Load->getAlignment() >= 64;
856 def masked_load_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
857 (masked_load node:$src1, node:$src2, node:$src3), [{
858 return isa<MaskedLoadSDNode>(N);
861 // masked store fragments.
862 // X86mstore can't be implemented in core DAG files because some targets
863 // doesn't support vector type ( llvm-tblgen will fail)
864 def X86mstore : PatFrag<(ops node:$src1, node:$src2, node:$src3),
865 (masked_store node:$src1, node:$src2, node:$src3), [{
866 return !cast<MaskedStoreSDNode>(N)->isTruncatingStore();
869 def masked_store_aligned128 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
870 (X86mstore node:$src1, node:$src2, node:$src3), [{
871 if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
872 return Store->getAlignment() >= 16;
876 def masked_store_aligned256 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
877 (X86mstore node:$src1, node:$src2, node:$src3), [{
878 if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
879 return Store->getAlignment() >= 32;
883 def masked_store_aligned512 : PatFrag<(ops node:$src1, node:$src2, node:$src3),
884 (X86mstore node:$src1, node:$src2, node:$src3), [{
885 if (auto *Store = dyn_cast<MaskedStoreSDNode>(N))
886 return Store->getAlignment() >= 64;
890 def masked_store_unaligned : PatFrag<(ops node:$src1, node:$src2, node:$src3),
891 (X86mstore node:$src1, node:$src2, node:$src3), [{
892 return isa<MaskedStoreSDNode>(N);
895 // masked truncstore fragments
896 // X86mtruncstore can't be implemented in core DAG files because some targets
897 // doesn't support vector type ( llvm-tblgen will fail)
898 def X86mtruncstore : PatFrag<(ops node:$src1, node:$src2, node:$src3),
899 (masked_store node:$src1, node:$src2, node:$src3), [{
900 return cast<MaskedStoreSDNode>(N)->isTruncatingStore();
902 def masked_truncstorevi8 :
903 PatFrag<(ops node:$src1, node:$src2, node:$src3),
904 (X86mtruncstore node:$src1, node:$src2, node:$src3), [{
905 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
907 def masked_truncstorevi16 :
908 PatFrag<(ops node:$src1, node:$src2, node:$src3),
909 (X86mtruncstore node:$src1, node:$src2, node:$src3), [{
910 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
912 def masked_truncstorevi32 :
913 PatFrag<(ops node:$src1, node:$src2, node:$src3),
914 (X86mtruncstore node:$src1, node:$src2, node:$src3), [{
915 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;