1 //===-- X86InstrFragmentsSIMD.td - x86 SIMD ISA ------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file provides pattern fragments useful for SIMD instructions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // MMX Pattern Fragments
16 //===----------------------------------------------------------------------===//
18 def load_mmx : PatFrag<(ops node:$ptr), (x86mmx (load node:$ptr))>;
19 def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
21 //===----------------------------------------------------------------------===//
22 // SSE specific DAG Nodes.
23 //===----------------------------------------------------------------------===//
25 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
26 SDTCisFP<0>, SDTCisInt<2> ]>;
27 def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
28 SDTCisFP<1>, SDTCisVT<3, i8>]>;
30 def X86umin : SDNode<"X86ISD::UMIN", SDTIntBinOp>;
31 def X86umax : SDNode<"X86ISD::UMAX", SDTIntBinOp>;
32 def X86smin : SDNode<"X86ISD::SMIN", SDTIntBinOp>;
33 def X86smax : SDNode<"X86ISD::SMAX", SDTIntBinOp>;
35 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
36 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
38 // Commutative and Associative FMIN and FMAX.
39 def X86fminc : SDNode<"X86ISD::FMINC", SDTFPBinOp,
40 [SDNPCommutative, SDNPAssociative]>;
41 def X86fmaxc : SDNode<"X86ISD::FMAXC", SDTFPBinOp,
42 [SDNPCommutative, SDNPAssociative]>;
44 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
45 [SDNPCommutative, SDNPAssociative]>;
46 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
47 [SDNPCommutative, SDNPAssociative]>;
48 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
49 [SDNPCommutative, SDNPAssociative]>;
50 def X86fandn : SDNode<"X86ISD::FANDN", SDTFPBinOp,
51 [SDNPCommutative, SDNPAssociative]>;
52 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
53 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
54 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
55 def X86fgetsign: SDNode<"X86ISD::FGETSIGNx86",SDTFPToIntOp>;
56 def X86fhadd : SDNode<"X86ISD::FHADD", SDTFPBinOp>;
57 def X86fhsub : SDNode<"X86ISD::FHSUB", SDTFPBinOp>;
58 def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>;
59 def X86hsub : SDNode<"X86ISD::HSUB", SDTIntBinOp>;
60 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
61 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
62 def X86cmpss : SDNode<"X86ISD::FSETCCss", SDTX86Cmpss>;
63 def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>;
64 def X86pshufb : SDNode<"X86ISD::PSHUFB",
65 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
67 def X86andnp : SDNode<"X86ISD::ANDNP",
68 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
70 def X86psign : SDNode<"X86ISD::PSIGN",
71 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
73 def X86pextrb : SDNode<"X86ISD::PEXTRB",
74 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
75 def X86pextrw : SDNode<"X86ISD::PEXTRW",
76 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
77 def X86pinsrb : SDNode<"X86ISD::PINSRB",
78 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
79 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
80 def X86pinsrw : SDNode<"X86ISD::PINSRW",
81 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
82 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
83 def X86insrtps : SDNode<"X86ISD::INSERTPS",
84 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
85 SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
86 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
87 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
89 def X86vzmovly : SDNode<"X86ISD::VZEXT_MOVL",
90 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
91 SDTCisOpSmallerThanOp<1, 0> ]>>;
93 def X86vsmovl : SDNode<"X86ISD::VSEXT_MOVL",
95 [SDTCisVec<0>, SDTCisInt<1>, SDTCisInt<0>]>>;
97 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
98 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
100 def X86vzext : SDNode<"X86ISD::VZEXT",
101 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
102 SDTCisInt<0>, SDTCisInt<1>]>>;
104 def X86vsext : SDNode<"X86ISD::VSEXT",
105 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
106 SDTCisInt<0>, SDTCisInt<1>]>>;
108 def X86vfpext : SDNode<"X86ISD::VFPEXT",
109 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
110 SDTCisFP<0>, SDTCisFP<1>]>>;
111 def X86vfpround: SDNode<"X86ISD::VFPROUND",
112 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVec<1>,
113 SDTCisFP<0>, SDTCisFP<1>]>>;
115 def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>;
116 def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>;
117 def X86cmpp : SDNode<"X86ISD::CMPP", SDTX86VFCMP>;
118 def X86pcmpeq : SDNode<"X86ISD::PCMPEQ", SDTIntBinOp, [SDNPCommutative]>;
119 def X86pcmpgt : SDNode<"X86ISD::PCMPGT", SDTIntBinOp>;
121 def X86vshl : SDNode<"X86ISD::VSHL",
122 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
124 def X86vsrl : SDNode<"X86ISD::VSRL",
125 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
127 def X86vsra : SDNode<"X86ISD::VSRA",
128 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
131 def X86vshli : SDNode<"X86ISD::VSHLI", SDTIntShiftOp>;
132 def X86vsrli : SDNode<"X86ISD::VSRLI", SDTIntShiftOp>;
133 def X86vsrai : SDNode<"X86ISD::VSRAI", SDTIntShiftOp>;
135 def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
137 SDTCisSameAs<2, 1>]>;
138 def X86subus : SDNode<"X86ISD::SUBUS", SDTIntBinOp>;
139 def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
140 def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
141 def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>;
142 def X86ktest : SDNode<"X86ISD::KTEST", SDTX86CmpPTest>;
144 def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
145 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
146 SDTCisSameAs<1,2>]>>;
148 // Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get
149 // translated into one of the target nodes below during lowering.
150 // Note: this is a work in progress...
151 def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
152 def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
155 def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
156 SDTCisSameAs<0,1>, SDTCisInt<2>]>;
157 def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
158 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
160 def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
161 def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
162 SDTCisSameAs<1,2>, SDTCisVT<3, i32>]>;
164 def SDTFma : SDTypeProfile<1, 3, [SDTCisSameAs<0,1>,
165 SDTCisSameAs<1,2>, SDTCisSameAs<1,3>]>;
167 def X86PAlignr : SDNode<"X86ISD::PALIGNR", SDTShuff3OpI>;
169 def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;
170 def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>;
171 def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>;
173 def X86Shufp : SDNode<"X86ISD::SHUFP", SDTShuff3OpI>;
175 def X86Movddup : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>;
176 def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>;
177 def X86Movsldup : SDNode<"X86ISD::MOVSLDUP", SDTShuff1Op>;
179 def X86Movsd : SDNode<"X86ISD::MOVSD", SDTShuff2Op>;
180 def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>;
182 def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>;
183 def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>;
184 def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>;
186 def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>;
187 def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>;
189 def X86Unpckl : SDNode<"X86ISD::UNPCKL", SDTShuff2Op>;
190 def X86Unpckh : SDNode<"X86ISD::UNPCKH", SDTShuff2Op>;
192 def X86VPermilp : SDNode<"X86ISD::VPERMILP", SDTShuff2OpI>;
193 def X86VPermv : SDNode<"X86ISD::VPERMV", SDTShuff2Op>;
194 def X86VPermi : SDNode<"X86ISD::VPERMI", SDTShuff2OpI>;
196 def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
198 def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
200 def X86Blendi : SDNode<"X86ISD::BLENDI", SDTBlend>;
201 def X86Fmadd : SDNode<"X86ISD::FMADD", SDTFma>;
202 def X86Fnmadd : SDNode<"X86ISD::FNMADD", SDTFma>;
203 def X86Fmsub : SDNode<"X86ISD::FMSUB", SDTFma>;
204 def X86Fnmsub : SDNode<"X86ISD::FNMSUB", SDTFma>;
205 def X86Fmaddsub : SDNode<"X86ISD::FMADDSUB", SDTFma>;
206 def X86Fmsubadd : SDNode<"X86ISD::FMSUBADD", SDTFma>;
208 def SDT_PCMPISTRI : SDTypeProfile<2, 3, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
209 SDTCisVT<2, v16i8>, SDTCisVT<3, v16i8>,
211 def SDT_PCMPESTRI : SDTypeProfile<2, 5, [SDTCisVT<0, i32>, SDTCisVT<1, i32>,
212 SDTCisVT<2, v16i8>, SDTCisVT<3, i32>,
213 SDTCisVT<4, v16i8>, SDTCisVT<5, i32>,
216 def X86pcmpistri : SDNode<"X86ISD::PCMPISTRI", SDT_PCMPISTRI>;
217 def X86pcmpestri : SDNode<"X86ISD::PCMPESTRI", SDT_PCMPESTRI>;
219 //===----------------------------------------------------------------------===//
220 // SSE Complex Patterns
221 //===----------------------------------------------------------------------===//
223 // These are 'extloads' from a scalar to the low element of a vector, zeroing
224 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
226 def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
227 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
229 def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
230 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
233 def ssmem : Operand<v4f32> {
234 let PrintMethod = "printf32mem";
235 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
236 let ParserMatchClass = X86MemAsmOperand;
237 let OperandType = "OPERAND_MEMORY";
239 def sdmem : Operand<v2f64> {
240 let PrintMethod = "printf64mem";
241 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
242 let ParserMatchClass = X86MemAsmOperand;
243 let OperandType = "OPERAND_MEMORY";
246 //===----------------------------------------------------------------------===//
247 // SSE pattern fragments
248 //===----------------------------------------------------------------------===//
250 // 128-bit load pattern fragments
251 // NOTE: all 128-bit integer vector loads are promoted to v2i64
252 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
253 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
254 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
256 // 256-bit load pattern fragments
257 // NOTE: all 256-bit integer vector loads are promoted to v4i64
258 def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
259 def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
260 def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
262 // 128-/256-bit extload pattern fragments
263 def extloadv2f32 : PatFrag<(ops node:$ptr), (v2f64 (extloadvf32 node:$ptr))>;
264 def extloadv4f32 : PatFrag<(ops node:$ptr), (v4f64 (extloadvf32 node:$ptr))>;
266 // Like 'store', but always requires 128-bit vector alignment.
267 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
268 (store node:$val, node:$ptr), [{
269 return cast<StoreSDNode>(N)->getAlignment() >= 16;
272 // Like 'store', but always requires 256-bit vector alignment.
273 def alignedstore256 : PatFrag<(ops node:$val, node:$ptr),
274 (store node:$val, node:$ptr), [{
275 return cast<StoreSDNode>(N)->getAlignment() >= 32;
278 // Like 'load', but always requires 128-bit vector alignment.
279 def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
280 return cast<LoadSDNode>(N)->getAlignment() >= 16;
283 // Like 'X86vzload', but always requires 128-bit vector alignment.
284 def alignedX86vzload : PatFrag<(ops node:$ptr), (X86vzload node:$ptr), [{
285 return cast<MemSDNode>(N)->getAlignment() >= 16;
288 // Like 'load', but always requires 256-bit vector alignment.
289 def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
290 return cast<LoadSDNode>(N)->getAlignment() >= 32;
293 def alignedloadfsf32 : PatFrag<(ops node:$ptr),
294 (f32 (alignedload node:$ptr))>;
295 def alignedloadfsf64 : PatFrag<(ops node:$ptr),
296 (f64 (alignedload node:$ptr))>;
298 // 128-bit aligned load pattern fragments
299 // NOTE: all 128-bit integer vector loads are promoted to v2i64
300 def alignedloadv4f32 : PatFrag<(ops node:$ptr),
301 (v4f32 (alignedload node:$ptr))>;
302 def alignedloadv2f64 : PatFrag<(ops node:$ptr),
303 (v2f64 (alignedload node:$ptr))>;
304 def alignedloadv2i64 : PatFrag<(ops node:$ptr),
305 (v2i64 (alignedload node:$ptr))>;
307 // 256-bit aligned load pattern fragments
308 // NOTE: all 256-bit integer vector loads are promoted to v4i64
309 def alignedloadv8f32 : PatFrag<(ops node:$ptr),
310 (v8f32 (alignedload256 node:$ptr))>;
311 def alignedloadv4f64 : PatFrag<(ops node:$ptr),
312 (v4f64 (alignedload256 node:$ptr))>;
313 def alignedloadv4i64 : PatFrag<(ops node:$ptr),
314 (v4i64 (alignedload256 node:$ptr))>;
316 // Like 'load', but uses special alignment checks suitable for use in
317 // memory operands in most SSE instructions, which are required to
318 // be naturally aligned on some targets but not on others. If the subtarget
319 // allows unaligned accesses, match any load, though this may require
320 // setting a feature bit in the processor (on startup, for example).
321 // Opteron 10h and later implement such a feature.
322 def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
323 return Subtarget->hasVectorUAMem()
324 || cast<LoadSDNode>(N)->getAlignment() >= 16;
327 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
328 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
330 // 128-bit memop pattern fragments
331 // NOTE: all 128-bit integer vector loads are promoted to v2i64
332 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
333 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
334 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
336 // 256-bit memop pattern fragments
337 // NOTE: all 256-bit integer vector loads are promoted to v4i64
338 def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
339 def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
340 def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
342 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
344 // FIXME: 8 byte alignment for mmx reads is not required
345 def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
346 return cast<LoadSDNode>(N)->getAlignment() >= 8;
349 def memopmmx : PatFrag<(ops node:$ptr), (x86mmx (memop64 node:$ptr))>;
352 // Like 'store', but requires the non-temporal bit to be set
353 def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
354 (st node:$val, node:$ptr), [{
355 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
356 return ST->isNonTemporal();
360 def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
361 (st node:$val, node:$ptr), [{
362 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
363 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
364 ST->getAddressingMode() == ISD::UNINDEXED &&
365 ST->getAlignment() >= 16;
369 def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
370 (st node:$val, node:$ptr), [{
371 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
372 return ST->isNonTemporal() &&
373 ST->getAlignment() < 16;
377 // 128-bit bitconvert pattern fragments
378 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
379 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
380 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
381 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
382 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
383 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
385 // 256-bit bitconvert pattern fragments
386 def bc_v32i8 : PatFrag<(ops node:$in), (v32i8 (bitconvert node:$in))>;
387 def bc_v16i16 : PatFrag<(ops node:$in), (v16i16 (bitconvert node:$in))>;
388 def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
389 def bc_v4i64 : PatFrag<(ops node:$in), (v4i64 (bitconvert node:$in))>;
391 def vzmovl_v2i64 : PatFrag<(ops node:$src),
392 (bitconvert (v2i64 (X86vzmovl
393 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
394 def vzmovl_v4i32 : PatFrag<(ops node:$src),
395 (bitconvert (v4i32 (X86vzmovl
396 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
398 def vzload_v2i64 : PatFrag<(ops node:$src),
399 (bitconvert (v2i64 (X86vzload node:$src)))>;
402 def fp32imm0 : PatLeaf<(f32 fpimm), [{
403 return N->isExactlyValue(+0.0);
406 // BYTE_imm - Transform bit immediates into byte immediates.
407 def BYTE_imm : SDNodeXForm<imm, [{
408 // Transformation function: imm >> 3
409 return getI32Imm(N->getZExtValue() >> 3);
412 // EXTRACT_get_vextract128_imm xform function: convert extract_subvector index
413 // to VEXTRACTF128/VEXTRACTI128 imm.
414 def EXTRACT_get_vextract128_imm : SDNodeXForm<extract_subvector, [{
415 return getI8Imm(X86::getExtractVEXTRACT128Immediate(N));
418 // INSERT_get_vinsert128_imm xform function: convert insert_subvector index to
419 // VINSERTF128/VINSERTI128 imm.
420 def INSERT_get_vinsert128_imm : SDNodeXForm<insert_subvector, [{
421 return getI8Imm(X86::getInsertVINSERT128Immediate(N));
424 // EXTRACT_get_vextract256_imm xform function: convert extract_subvector index
425 // to VEXTRACTF64x4 imm.
426 def EXTRACT_get_vextract256_imm : SDNodeXForm<extract_subvector, [{
427 return getI8Imm(X86::getExtractVEXTRACT256Immediate(N));
430 // INSERT_get_vinsert256_imm xform function: convert insert_subvector index to
432 def INSERT_get_vinsert256_imm : SDNodeXForm<insert_subvector, [{
433 return getI8Imm(X86::getInsertVINSERT256Immediate(N));
436 def vextract128_extract : PatFrag<(ops node:$bigvec, node:$index),
437 (extract_subvector node:$bigvec,
439 return X86::isVEXTRACT128Index(N);
440 }], EXTRACT_get_vextract128_imm>;
442 def vinsert128_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
444 (insert_subvector node:$bigvec, node:$smallvec,
446 return X86::isVINSERT128Index(N);
447 }], INSERT_get_vinsert128_imm>;
450 def vextract256_extract : PatFrag<(ops node:$bigvec, node:$index),
451 (extract_subvector node:$bigvec,
453 return X86::isVEXTRACT256Index(N);
454 }], EXTRACT_get_vextract256_imm>;
456 def vinsert256_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
458 (insert_subvector node:$bigvec, node:$smallvec,
460 return X86::isVINSERT256Index(N);
461 }], INSERT_get_vinsert256_imm>;