1 //===-- X86InstrFragmentsSIMD.td - x86 SIMD ISA ------------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file provides pattern fragments useful for SIMD instructions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // MMX Pattern Fragments
16 //===----------------------------------------------------------------------===//
18 def load_mmx : PatFrag<(ops node:$ptr), (x86mmx (load node:$ptr))>;
19 def bc_mmx : PatFrag<(ops node:$in), (x86mmx (bitconvert node:$in))>;
21 //===----------------------------------------------------------------------===//
22 // SSE specific DAG Nodes.
23 //===----------------------------------------------------------------------===//
25 def SDTX86FPShiftOp : SDTypeProfile<1, 2, [ SDTCisSameAs<0, 1>,
26 SDTCisFP<0>, SDTCisInt<2> ]>;
27 def SDTX86VFCMP : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<1, 2>,
28 SDTCisFP<1>, SDTCisVT<3, i8>]>;
30 def X86fmin : SDNode<"X86ISD::FMIN", SDTFPBinOp>;
31 def X86fmax : SDNode<"X86ISD::FMAX", SDTFPBinOp>;
32 def X86fand : SDNode<"X86ISD::FAND", SDTFPBinOp,
33 [SDNPCommutative, SDNPAssociative]>;
34 def X86for : SDNode<"X86ISD::FOR", SDTFPBinOp,
35 [SDNPCommutative, SDNPAssociative]>;
36 def X86fxor : SDNode<"X86ISD::FXOR", SDTFPBinOp,
37 [SDNPCommutative, SDNPAssociative]>;
38 def X86frsqrt : SDNode<"X86ISD::FRSQRT", SDTFPUnaryOp>;
39 def X86frcp : SDNode<"X86ISD::FRCP", SDTFPUnaryOp>;
40 def X86fsrl : SDNode<"X86ISD::FSRL", SDTX86FPShiftOp>;
41 def X86fgetsign: SDNode<"X86ISD::FGETSIGNx86",SDTFPToIntOp>;
42 def X86fhadd : SDNode<"X86ISD::FHADD", SDTFPBinOp>;
43 def X86fhsub : SDNode<"X86ISD::FHSUB", SDTFPBinOp>;
44 def X86hadd : SDNode<"X86ISD::HADD", SDTIntBinOp>;
45 def X86hsub : SDNode<"X86ISD::HSUB", SDTIntBinOp>;
46 def X86comi : SDNode<"X86ISD::COMI", SDTX86CmpTest>;
47 def X86ucomi : SDNode<"X86ISD::UCOMI", SDTX86CmpTest>;
48 def X86cmpss : SDNode<"X86ISD::FSETCCss", SDTX86Cmpss>;
49 def X86cmpsd : SDNode<"X86ISD::FSETCCsd", SDTX86Cmpsd>;
50 def X86pshufb : SDNode<"X86ISD::PSHUFB",
51 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
53 def X86andnp : SDNode<"X86ISD::ANDNP",
54 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
56 def X86psign : SDNode<"X86ISD::PSIGN",
57 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
59 def X86pextrb : SDNode<"X86ISD::PEXTRB",
60 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
61 def X86pextrw : SDNode<"X86ISD::PEXTRW",
62 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<2>]>>;
63 def X86pinsrb : SDNode<"X86ISD::PINSRB",
64 SDTypeProfile<1, 3, [SDTCisVT<0, v16i8>, SDTCisSameAs<0,1>,
65 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
66 def X86pinsrw : SDNode<"X86ISD::PINSRW",
67 SDTypeProfile<1, 3, [SDTCisVT<0, v8i16>, SDTCisSameAs<0,1>,
68 SDTCisVT<2, i32>, SDTCisPtrTy<3>]>>;
69 def X86insrtps : SDNode<"X86ISD::INSERTPS",
70 SDTypeProfile<1, 3, [SDTCisVT<0, v4f32>, SDTCisSameAs<0,1>,
71 SDTCisVT<2, v4f32>, SDTCisPtrTy<3>]>>;
72 def X86vzmovl : SDNode<"X86ISD::VZEXT_MOVL",
73 SDTypeProfile<1, 1, [SDTCisSameAs<0,1>]>>;
74 def X86vsmovl : SDNode<"X86ISD::VSEXT_MOVL",
75 SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisInt<1>, SDTCisInt<0>]>>;
77 def X86vzload : SDNode<"X86ISD::VZEXT_LOAD", SDTLoad,
78 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
79 def X86vshldq : SDNode<"X86ISD::VSHLDQ", SDTIntShiftOp>;
80 def X86vshrdq : SDNode<"X86ISD::VSRLDQ", SDTIntShiftOp>;
81 def X86cmpp : SDNode<"X86ISD::CMPP", SDTX86VFCMP>;
82 def X86pcmpeq : SDNode<"X86ISD::PCMPEQ", SDTIntBinOp, [SDNPCommutative]>;
83 def X86pcmpgt : SDNode<"X86ISD::PCMPGT", SDTIntBinOp>;
85 def X86vshl : SDNode<"X86ISD::VSHL",
86 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
88 def X86vsrl : SDNode<"X86ISD::VSRL",
89 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
91 def X86vsra : SDNode<"X86ISD::VSRA",
92 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
95 def X86vshli : SDNode<"X86ISD::VSHLI", SDTIntShiftOp>;
96 def X86vsrli : SDNode<"X86ISD::VSRLI", SDTIntShiftOp>;
97 def X86vsrai : SDNode<"X86ISD::VSRAI", SDTIntShiftOp>;
99 def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
101 SDTCisSameAs<2, 1>]>;
102 def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
103 def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
105 def X86vpcom : SDNode<"X86ISD::VPCOM",
106 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
107 SDTCisSameAs<0,2>, SDTCisVT<3, i8>]>>;
108 def X86vpcomu : SDNode<"X86ISD::VPCOMU",
109 SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
110 SDTCisSameAs<0,2>, SDTCisVT<3, i8>]>>;
112 def X86pmuludq : SDNode<"X86ISD::PMULUDQ",
113 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
114 SDTCisSameAs<1,2>]>>;
116 // Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get
117 // translated into one of the target nodes below during lowering.
118 // Note: this is a work in progress...
119 def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
120 def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
123 def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>,
124 SDTCisSameAs<0,1>, SDTCisInt<2>]>;
125 def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
126 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
127 def SDTVPermv : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, v8i32>,
130 def SDTVBroadcast : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
131 def SDTBlend : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
132 SDTCisSameAs<1,2>, SDTCisVT<3, i32>]>;
134 def X86PAlign : SDNode<"X86ISD::PALIGN", SDTShuff3OpI>;
136 def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;
137 def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>;
138 def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>;
140 def X86Shufp : SDNode<"X86ISD::SHUFP", SDTShuff3OpI>;
142 def X86Movddup : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>;
143 def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>;
144 def X86Movsldup : SDNode<"X86ISD::MOVSLDUP", SDTShuff1Op>;
146 def X86Movsd : SDNode<"X86ISD::MOVSD", SDTShuff2Op>;
147 def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>;
149 def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>;
150 def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>;
151 def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>;
153 def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>;
154 def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>;
156 def X86Unpckl : SDNode<"X86ISD::UNPCKL", SDTShuff2Op>;
157 def X86Unpckh : SDNode<"X86ISD::UNPCKH", SDTShuff2Op>;
159 def X86VPermilp : SDNode<"X86ISD::VPERMILP", SDTShuff2OpI>;
160 def X86VPermv : SDNode<"X86ISD::VPERMV", SDTVPermv>;
161 def X86VPermi : SDNode<"X86ISD::VPERMI", SDTShuff2OpI>;
163 def X86VPerm2x128 : SDNode<"X86ISD::VPERM2X128", SDTShuff3OpI>;
165 def X86VBroadcast : SDNode<"X86ISD::VBROADCAST", SDTVBroadcast>;
167 def X86Blendpw : SDNode<"X86ISD::BLENDPW", SDTBlend>;
168 def X86Blendps : SDNode<"X86ISD::BLENDPS", SDTBlend>;
169 def X86Blendpd : SDNode<"X86ISD::BLENDPD", SDTBlend>;
171 //===----------------------------------------------------------------------===//
172 // SSE Complex Patterns
173 //===----------------------------------------------------------------------===//
175 // These are 'extloads' from a scalar to the low element of a vector, zeroing
176 // the top elements. These are used for the SSE 'ss' and 'sd' instruction
178 def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
179 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
181 def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
182 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand,
185 def ssmem : Operand<v4f32> {
186 let PrintMethod = "printf32mem";
187 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
188 let ParserMatchClass = X86MemAsmOperand;
189 let OperandType = "OPERAND_MEMORY";
191 def sdmem : Operand<v2f64> {
192 let PrintMethod = "printf64mem";
193 let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc_nosp, i32imm, i8imm);
194 let ParserMatchClass = X86MemAsmOperand;
195 let OperandType = "OPERAND_MEMORY";
198 //===----------------------------------------------------------------------===//
199 // SSE pattern fragments
200 //===----------------------------------------------------------------------===//
202 // 128-bit load pattern fragments
203 // NOTE: all 128-bit integer vector loads are promoted to v2i64
204 def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>;
205 def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>;
206 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>;
208 // 256-bit load pattern fragments
209 // NOTE: all 256-bit integer vector loads are promoted to v4i64
210 def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>;
211 def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>;
212 def loadv4i64 : PatFrag<(ops node:$ptr), (v4i64 (load node:$ptr))>;
214 // Like 'store', but always requires 128-bit vector alignment.
215 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
216 (store node:$val, node:$ptr), [{
217 return cast<StoreSDNode>(N)->getAlignment() >= 16;
220 // Like 'store', but always requires 256-bit vector alignment.
221 def alignedstore256 : PatFrag<(ops node:$val, node:$ptr),
222 (store node:$val, node:$ptr), [{
223 return cast<StoreSDNode>(N)->getAlignment() >= 32;
226 // Like 'load', but always requires 128-bit vector alignment.
227 def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
228 return cast<LoadSDNode>(N)->getAlignment() >= 16;
231 // Like 'X86vzload', but always requires 128-bit vector alignment.
232 def alignedX86vzload : PatFrag<(ops node:$ptr), (X86vzload node:$ptr), [{
233 return cast<MemSDNode>(N)->getAlignment() >= 16;
236 // Like 'load', but always requires 256-bit vector alignment.
237 def alignedload256 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
238 return cast<LoadSDNode>(N)->getAlignment() >= 32;
241 def alignedloadfsf32 : PatFrag<(ops node:$ptr),
242 (f32 (alignedload node:$ptr))>;
243 def alignedloadfsf64 : PatFrag<(ops node:$ptr),
244 (f64 (alignedload node:$ptr))>;
246 // 128-bit aligned load pattern fragments
247 // NOTE: all 128-bit integer vector loads are promoted to v2i64
248 def alignedloadv4f32 : PatFrag<(ops node:$ptr),
249 (v4f32 (alignedload node:$ptr))>;
250 def alignedloadv2f64 : PatFrag<(ops node:$ptr),
251 (v2f64 (alignedload node:$ptr))>;
252 def alignedloadv2i64 : PatFrag<(ops node:$ptr),
253 (v2i64 (alignedload node:$ptr))>;
255 // 256-bit aligned load pattern fragments
256 // NOTE: all 256-bit integer vector loads are promoted to v4i64
257 def alignedloadv8f32 : PatFrag<(ops node:$ptr),
258 (v8f32 (alignedload256 node:$ptr))>;
259 def alignedloadv4f64 : PatFrag<(ops node:$ptr),
260 (v4f64 (alignedload256 node:$ptr))>;
261 def alignedloadv4i64 : PatFrag<(ops node:$ptr),
262 (v4i64 (alignedload256 node:$ptr))>;
264 // Like 'load', but uses special alignment checks suitable for use in
265 // memory operands in most SSE instructions, which are required to
266 // be naturally aligned on some targets but not on others. If the subtarget
267 // allows unaligned accesses, match any load, though this may require
268 // setting a feature bit in the processor (on startup, for example).
269 // Opteron 10h and later implement such a feature.
270 def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
271 return Subtarget->hasVectorUAMem()
272 || cast<LoadSDNode>(N)->getAlignment() >= 16;
275 def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
276 def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>;
278 // 128-bit memop pattern fragments
279 // NOTE: all 128-bit integer vector loads are promoted to v2i64
280 def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>;
281 def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>;
282 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>;
284 // 256-bit memop pattern fragments
285 // NOTE: all 256-bit integer vector loads are promoted to v4i64
286 def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>;
287 def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>;
288 def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>;
290 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
292 // FIXME: 8 byte alignment for mmx reads is not required
293 def memop64 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
294 return cast<LoadSDNode>(N)->getAlignment() >= 8;
297 def memopmmx : PatFrag<(ops node:$ptr), (x86mmx (memop64 node:$ptr))>;
300 // Like 'store', but requires the non-temporal bit to be set
301 def nontemporalstore : PatFrag<(ops node:$val, node:$ptr),
302 (st node:$val, node:$ptr), [{
303 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
304 return ST->isNonTemporal();
308 def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
309 (st node:$val, node:$ptr), [{
310 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
311 return ST->isNonTemporal() && !ST->isTruncatingStore() &&
312 ST->getAddressingMode() == ISD::UNINDEXED &&
313 ST->getAlignment() >= 16;
317 def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr),
318 (st node:$val, node:$ptr), [{
319 if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
320 return ST->isNonTemporal() &&
321 ST->getAlignment() < 16;
325 // 128-bit bitconvert pattern fragments
326 def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>;
327 def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>;
328 def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>;
329 def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>;
330 def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>;
331 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>;
333 // 256-bit bitconvert pattern fragments
334 def bc_v32i8 : PatFrag<(ops node:$in), (v32i8 (bitconvert node:$in))>;
335 def bc_v16i16 : PatFrag<(ops node:$in), (v16i16 (bitconvert node:$in))>;
336 def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>;
337 def bc_v4i64 : PatFrag<(ops node:$in), (v4i64 (bitconvert node:$in))>;
339 def vzmovl_v2i64 : PatFrag<(ops node:$src),
340 (bitconvert (v2i64 (X86vzmovl
341 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>;
342 def vzmovl_v4i32 : PatFrag<(ops node:$src),
343 (bitconvert (v4i32 (X86vzmovl
344 (v4i32 (scalar_to_vector (loadi32 node:$src))))))>;
346 def vzload_v2i64 : PatFrag<(ops node:$src),
347 (bitconvert (v2i64 (X86vzload node:$src)))>;
350 def fp32imm0 : PatLeaf<(f32 fpimm), [{
351 return N->isExactlyValue(+0.0);
354 // BYTE_imm - Transform bit immediates into byte immediates.
355 def BYTE_imm : SDNodeXForm<imm, [{
356 // Transformation function: imm >> 3
357 return getI32Imm(N->getZExtValue() >> 3);
360 // EXTRACT_get_vextractf128_imm xform function: convert extract_subvector index
361 // to VEXTRACTF128 imm.
362 def EXTRACT_get_vextractf128_imm : SDNodeXForm<extract_subvector, [{
363 return getI8Imm(X86::getExtractVEXTRACTF128Immediate(N));
366 // INSERT_get_vinsertf128_imm xform function: convert insert_subvector index to
368 def INSERT_get_vinsertf128_imm : SDNodeXForm<insert_subvector, [{
369 return getI8Imm(X86::getInsertVINSERTF128Immediate(N));
372 def vextractf128_extract : PatFrag<(ops node:$bigvec, node:$index),
373 (extract_subvector node:$bigvec,
375 return X86::isVEXTRACTF128Index(N);
376 }], EXTRACT_get_vextractf128_imm>;
378 def vinsertf128_insert : PatFrag<(ops node:$bigvec, node:$smallvec,
380 (insert_subvector node:$bigvec, node:$smallvec,
382 return X86::isVINSERTF128Index(N);
383 }], INSERT_get_vinsertf128_imm>;