1 //===-- PPCInstrAltivec.td - The PowerPC Altivec Extension -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the Altivec extension to the PowerPC instruction set.
12 //===----------------------------------------------------------------------===//
14 // *********************************** NOTE ***********************************
15 // ** For POWER8 Little Endian, the VSX swap optimization relies on knowing **
16 // ** which VMX and VSX instructions are lane-sensitive and which are not. **
17 // ** A lane-sensitive instruction relies, implicitly or explicitly, on **
18 // ** whether lanes are numbered from left to right. An instruction like **
19 // ** VADDFP is not lane-sensitive, because each lane of the result vector **
20 // ** relies only on the corresponding lane of the source vectors. However, **
21 // ** an instruction like VMULESB is lane-sensitive, because "even" and **
22 // ** "odd" lanes are different for big-endian and little-endian numbering. **
24 // ** When adding new VMX and VSX instructions, please consider whether they **
25 // ** are lane-sensitive. If so, they must be added to a switch statement **
26 // ** in PPCVSXSwapRemoval::gatherVectorInstructions(). **
27 // ****************************************************************************
29 //===----------------------------------------------------------------------===//
30 // Altivec transformation functions and pattern fragments.
33 // Since we canonicalize buildvectors to v16i8, all vnots "-1" operands will be
35 def vnot_ppc : PatFrag<(ops node:$in),
36 (xor node:$in, (bitconvert (v16i8 immAllOnesV)))>;
38 def vpkuhum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
39 (vector_shuffle node:$lhs, node:$rhs), [{
40 return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), 0, *CurDAG);
42 def vpkuwum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
43 (vector_shuffle node:$lhs, node:$rhs), [{
44 return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), 0, *CurDAG);
46 def vpkudum_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
47 (vector_shuffle node:$lhs, node:$rhs), [{
48 return PPC::isVPKUDUMShuffleMask(cast<ShuffleVectorSDNode>(N), 0, *CurDAG);
50 def vpkuhum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
51 (vector_shuffle node:$lhs, node:$rhs), [{
52 return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), 1, *CurDAG);
54 def vpkuwum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
55 (vector_shuffle node:$lhs, node:$rhs), [{
56 return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), 1, *CurDAG);
58 def vpkudum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
59 (vector_shuffle node:$lhs, node:$rhs), [{
60 return PPC::isVPKUDUMShuffleMask(cast<ShuffleVectorSDNode>(N), 1, *CurDAG);
63 // These fragments are provided for little-endian, where the inputs must be
64 // swapped for correct semantics.
65 def vpkuhum_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
66 (vector_shuffle node:$lhs, node:$rhs), [{
67 return PPC::isVPKUHUMShuffleMask(cast<ShuffleVectorSDNode>(N), 2, *CurDAG);
69 def vpkuwum_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
70 (vector_shuffle node:$lhs, node:$rhs), [{
71 return PPC::isVPKUWUMShuffleMask(cast<ShuffleVectorSDNode>(N), 2, *CurDAG);
73 def vpkudum_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
74 (vector_shuffle node:$lhs, node:$rhs), [{
75 return PPC::isVPKUDUMShuffleMask(cast<ShuffleVectorSDNode>(N), 2, *CurDAG);
78 def vmrglb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
79 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
80 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 0, *CurDAG);
82 def vmrglh_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
83 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
84 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 0, *CurDAG);
86 def vmrglw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
87 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
88 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 0, *CurDAG);
90 def vmrghb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
91 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
92 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 0, *CurDAG);
94 def vmrghh_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
95 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
96 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 0, *CurDAG);
98 def vmrghw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
99 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
100 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 0, *CurDAG);
104 def vmrglb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
105 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
106 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 1, *CurDAG);
108 def vmrglh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
109 (vector_shuffle node:$lhs, node:$rhs), [{
110 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 1, *CurDAG);
112 def vmrglw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
113 (vector_shuffle node:$lhs, node:$rhs), [{
114 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 1, *CurDAG);
116 def vmrghb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
117 (vector_shuffle node:$lhs, node:$rhs), [{
118 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 1, *CurDAG);
120 def vmrghh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
121 (vector_shuffle node:$lhs, node:$rhs), [{
122 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 1, *CurDAG);
124 def vmrghw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
125 (vector_shuffle node:$lhs, node:$rhs), [{
126 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 1, *CurDAG);
130 // These fragments are provided for little-endian, where the inputs must be
131 // swapped for correct semantics.
132 def vmrglb_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
133 (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{
134 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 2, *CurDAG);
136 def vmrglh_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
137 (vector_shuffle node:$lhs, node:$rhs), [{
138 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 2, *CurDAG);
140 def vmrglw_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
141 (vector_shuffle node:$lhs, node:$rhs), [{
142 return PPC::isVMRGLShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 2, *CurDAG);
144 def vmrghb_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
145 (vector_shuffle node:$lhs, node:$rhs), [{
146 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 1, 2, *CurDAG);
148 def vmrghh_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
149 (vector_shuffle node:$lhs, node:$rhs), [{
150 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 2, 2, *CurDAG);
152 def vmrghw_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
153 (vector_shuffle node:$lhs, node:$rhs), [{
154 return PPC::isVMRGHShuffleMask(cast<ShuffleVectorSDNode>(N), 4, 2, *CurDAG);
158 def VSLDOI_get_imm : SDNodeXForm<vector_shuffle, [{
159 return getI32Imm(PPC::isVSLDOIShuffleMask(N, 0, *CurDAG), SDLoc(N));
161 def vsldoi_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
162 (vector_shuffle node:$lhs, node:$rhs), [{
163 return PPC::isVSLDOIShuffleMask(N, 0, *CurDAG) != -1;
167 /// VSLDOI_unary* - These are used to match vsldoi(X,X), which is turned into
168 /// vector_shuffle(X,undef,mask) by the dag combiner.
169 def VSLDOI_unary_get_imm : SDNodeXForm<vector_shuffle, [{
170 return getI32Imm(PPC::isVSLDOIShuffleMask(N, 1, *CurDAG), SDLoc(N));
172 def vsldoi_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
173 (vector_shuffle node:$lhs, node:$rhs), [{
174 return PPC::isVSLDOIShuffleMask(N, 1, *CurDAG) != -1;
175 }], VSLDOI_unary_get_imm>;
178 /// VSLDOI_swapped* - These fragments are provided for little-endian, where
179 /// the inputs must be swapped for correct semantics.
180 def VSLDOI_swapped_get_imm : SDNodeXForm<vector_shuffle, [{
181 return getI32Imm(PPC::isVSLDOIShuffleMask(N, 2, *CurDAG), SDLoc(N));
183 def vsldoi_swapped_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
184 (vector_shuffle node:$lhs, node:$rhs), [{
185 return PPC::isVSLDOIShuffleMask(N, 2, *CurDAG) != -1;
189 // VSPLT*_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
190 def VSPLTB_get_imm : SDNodeXForm<vector_shuffle, [{
191 return getI32Imm(PPC::getVSPLTImmediate(N, 1, *CurDAG), SDLoc(N));
193 def vspltb_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
194 (vector_shuffle node:$lhs, node:$rhs), [{
195 return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 1);
197 def VSPLTH_get_imm : SDNodeXForm<vector_shuffle, [{
198 return getI32Imm(PPC::getVSPLTImmediate(N, 2, *CurDAG), SDLoc(N));
200 def vsplth_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
201 (vector_shuffle node:$lhs, node:$rhs), [{
202 return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 2);
204 def VSPLTW_get_imm : SDNodeXForm<vector_shuffle, [{
205 return getI32Imm(PPC::getVSPLTImmediate(N, 4, *CurDAG), SDLoc(N));
207 def vspltw_shuffle : PatFrag<(ops node:$lhs, node:$rhs),
208 (vector_shuffle node:$lhs, node:$rhs), [{
209 return PPC::isSplatShuffleMask(cast<ShuffleVectorSDNode>(N), 4);
213 // VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
214 def VSPLTISB_get_imm : SDNodeXForm<build_vector, [{
215 return PPC::get_VSPLTI_elt(N, 1, *CurDAG);
217 def vecspltisb : PatLeaf<(build_vector), [{
218 return PPC::get_VSPLTI_elt(N, 1, *CurDAG).getNode() != 0;
219 }], VSPLTISB_get_imm>;
221 // VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
222 def VSPLTISH_get_imm : SDNodeXForm<build_vector, [{
223 return PPC::get_VSPLTI_elt(N, 2, *CurDAG);
225 def vecspltish : PatLeaf<(build_vector), [{
226 return PPC::get_VSPLTI_elt(N, 2, *CurDAG).getNode() != 0;
227 }], VSPLTISH_get_imm>;
229 // VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
230 def VSPLTISW_get_imm : SDNodeXForm<build_vector, [{
231 return PPC::get_VSPLTI_elt(N, 4, *CurDAG);
233 def vecspltisw : PatLeaf<(build_vector), [{
234 return PPC::get_VSPLTI_elt(N, 4, *CurDAG).getNode() != 0;
235 }], VSPLTISW_get_imm>;
237 //===----------------------------------------------------------------------===//
238 // Helpers for defining instructions that directly correspond to intrinsics.
240 // VA1a_Int_Ty - A VAForm_1a intrinsic definition of specific type.
241 class VA1a_Int_Ty<bits<6> xo, string opc, Intrinsic IntID, ValueType Ty>
242 : VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC),
243 !strconcat(opc, " $vD, $vA, $vB, $vC"), IIC_VecFP,
244 [(set Ty:$vD, (IntID Ty:$vA, Ty:$vB, Ty:$vC))]>;
246 // VA1a_Int_Ty2 - A VAForm_1a intrinsic definition where the type of the
247 // inputs doesn't match the type of the output.
248 class VA1a_Int_Ty2<bits<6> xo, string opc, Intrinsic IntID, ValueType OutTy,
250 : VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC),
251 !strconcat(opc, " $vD, $vA, $vB, $vC"), IIC_VecFP,
252 [(set OutTy:$vD, (IntID InTy:$vA, InTy:$vB, InTy:$vC))]>;
254 // VA1a_Int_Ty3 - A VAForm_1a intrinsic definition where there are two
255 // input types and an output type.
256 class VA1a_Int_Ty3<bits<6> xo, string opc, Intrinsic IntID, ValueType OutTy,
257 ValueType In1Ty, ValueType In2Ty>
258 : VAForm_1a<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vC),
259 !strconcat(opc, " $vD, $vA, $vB, $vC"), IIC_VecFP,
261 (IntID In1Ty:$vA, In1Ty:$vB, In2Ty:$vC))]>;
263 // VX1_Int_Ty - A VXForm_1 intrinsic definition of specific type.
264 class VX1_Int_Ty<bits<11> xo, string opc, Intrinsic IntID, ValueType Ty>
265 : VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
266 !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP,
267 [(set Ty:$vD, (IntID Ty:$vA, Ty:$vB))]>;
269 // VX1_Int_Ty2 - A VXForm_1 intrinsic definition where the type of the
270 // inputs doesn't match the type of the output.
271 class VX1_Int_Ty2<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy,
273 : VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
274 !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP,
275 [(set OutTy:$vD, (IntID InTy:$vA, InTy:$vB))]>;
277 // VX1_Int_Ty3 - A VXForm_1 intrinsic definition where there are two
278 // input types and an output type.
279 class VX1_Int_Ty3<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy,
280 ValueType In1Ty, ValueType In2Ty>
281 : VXForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
282 !strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP,
283 [(set OutTy:$vD, (IntID In1Ty:$vA, In2Ty:$vB))]>;
285 // VX2_Int_SP - A VXForm_2 intrinsic definition of vector single-precision type.
286 class VX2_Int_SP<bits<11> xo, string opc, Intrinsic IntID>
287 : VXForm_2<xo, (outs vrrc:$vD), (ins vrrc:$vB),
288 !strconcat(opc, " $vD, $vB"), IIC_VecFP,
289 [(set v4f32:$vD, (IntID v4f32:$vB))]>;
291 // VX2_Int_Ty2 - A VXForm_2 intrinsic definition where the type of the
292 // inputs doesn't match the type of the output.
293 class VX2_Int_Ty2<bits<11> xo, string opc, Intrinsic IntID, ValueType OutTy,
295 : VXForm_2<xo, (outs vrrc:$vD), (ins vrrc:$vB),
296 !strconcat(opc, " $vD, $vB"), IIC_VecFP,
297 [(set OutTy:$vD, (IntID InTy:$vB))]>;
299 class VXBX_Int_Ty<bits<11> xo, string opc, Intrinsic IntID, ValueType Ty>
300 : VXForm_BX<xo, (outs vrrc:$vD), (ins vrrc:$vA),
301 !strconcat(opc, " $vD, $vA"), IIC_VecFP,
302 [(set Ty:$vD, (IntID Ty:$vA))]>;
304 class VXCR_Int_Ty<bits<11> xo, string opc, Intrinsic IntID, ValueType Ty>
305 : VXForm_CR<xo, (outs vrrc:$vD), (ins vrrc:$vA, u1imm:$ST, u4imm:$SIX),
306 !strconcat(opc, " $vD, $vA, $ST, $SIX"), IIC_VecFP,
307 [(set Ty:$vD, (IntID Ty:$vA, imm:$ST, imm:$SIX))]>;
309 //===----------------------------------------------------------------------===//
310 // Instruction Definitions.
312 def HasAltivec : Predicate<"PPCSubTarget->hasAltivec()">;
313 let Predicates = [HasAltivec] in {
315 def DSS : DSS_Form<0, 822, (outs), (ins u5imm:$STRM),
316 "dss $STRM", IIC_LdStLoad /*FIXME*/, [(int_ppc_altivec_dss imm:$STRM)]>,
317 Deprecated<DeprecatedDST> {
322 def DSSALL : DSS_Form<1, 822, (outs), (ins),
323 "dssall", IIC_LdStLoad /*FIXME*/, [(int_ppc_altivec_dssall)]>,
324 Deprecated<DeprecatedDST> {
330 def DST : DSS_Form<0, 342, (outs), (ins u5imm:$STRM, gprc:$rA, gprc:$rB),
331 "dst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
332 [(int_ppc_altivec_dst i32:$rA, i32:$rB, imm:$STRM)]>,
333 Deprecated<DeprecatedDST>;
335 def DSTT : DSS_Form<1, 342, (outs), (ins u5imm:$STRM, gprc:$rA, gprc:$rB),
336 "dstt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
337 [(int_ppc_altivec_dstt i32:$rA, i32:$rB, imm:$STRM)]>,
338 Deprecated<DeprecatedDST>;
340 def DSTST : DSS_Form<0, 374, (outs), (ins u5imm:$STRM, gprc:$rA, gprc:$rB),
341 "dstst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
342 [(int_ppc_altivec_dstst i32:$rA, i32:$rB, imm:$STRM)]>,
343 Deprecated<DeprecatedDST>;
345 def DSTSTT : DSS_Form<1, 374, (outs), (ins u5imm:$STRM, gprc:$rA, gprc:$rB),
346 "dststt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
347 [(int_ppc_altivec_dststt i32:$rA, i32:$rB, imm:$STRM)]>,
348 Deprecated<DeprecatedDST>;
350 let isCodeGenOnly = 1 in {
351 // The very same instructions as above, but formally matching 64bit registers.
352 def DST64 : DSS_Form<0, 342, (outs), (ins u5imm:$STRM, g8rc:$rA, gprc:$rB),
353 "dst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
354 [(int_ppc_altivec_dst i64:$rA, i32:$rB, imm:$STRM)]>,
355 Deprecated<DeprecatedDST>;
357 def DSTT64 : DSS_Form<1, 342, (outs), (ins u5imm:$STRM, g8rc:$rA, gprc:$rB),
358 "dstt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
359 [(int_ppc_altivec_dstt i64:$rA, i32:$rB, imm:$STRM)]>,
360 Deprecated<DeprecatedDST>;
362 def DSTST64 : DSS_Form<0, 374, (outs), (ins u5imm:$STRM, g8rc:$rA, gprc:$rB),
363 "dstst $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
364 [(int_ppc_altivec_dstst i64:$rA, i32:$rB,
366 Deprecated<DeprecatedDST>;
368 def DSTSTT64 : DSS_Form<1, 374, (outs), (ins u5imm:$STRM, g8rc:$rA, gprc:$rB),
369 "dststt $rA, $rB, $STRM", IIC_LdStLoad /*FIXME*/,
370 [(int_ppc_altivec_dststt i64:$rA, i32:$rB,
372 Deprecated<DeprecatedDST>;
375 def MFVSCR : VXForm_4<1540, (outs vrrc:$vD), (ins),
376 "mfvscr $vD", IIC_LdStStore,
377 [(set v8i16:$vD, (int_ppc_altivec_mfvscr))]>;
378 def MTVSCR : VXForm_5<1604, (outs), (ins vrrc:$vB),
379 "mtvscr $vB", IIC_LdStLoad,
380 [(int_ppc_altivec_mtvscr v4i32:$vB)]>;
382 let PPC970_Unit = 2 in { // Loads.
383 def LVEBX: XForm_1<31, 7, (outs vrrc:$vD), (ins memrr:$src),
384 "lvebx $vD, $src", IIC_LdStLoad,
385 [(set v16i8:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;
386 def LVEHX: XForm_1<31, 39, (outs vrrc:$vD), (ins memrr:$src),
387 "lvehx $vD, $src", IIC_LdStLoad,
388 [(set v8i16:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>;
389 def LVEWX: XForm_1<31, 71, (outs vrrc:$vD), (ins memrr:$src),
390 "lvewx $vD, $src", IIC_LdStLoad,
391 [(set v4i32:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
392 def LVX : XForm_1<31, 103, (outs vrrc:$vD), (ins memrr:$src),
393 "lvx $vD, $src", IIC_LdStLoad,
394 [(set v4i32:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
395 def LVXL : XForm_1<31, 359, (outs vrrc:$vD), (ins memrr:$src),
396 "lvxl $vD, $src", IIC_LdStLoad,
397 [(set v4i32:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
400 def LVSL : XForm_1<31, 6, (outs vrrc:$vD), (ins memrr:$src),
401 "lvsl $vD, $src", IIC_LdStLoad,
402 [(set v16i8:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>,
404 def LVSR : XForm_1<31, 38, (outs vrrc:$vD), (ins memrr:$src),
405 "lvsr $vD, $src", IIC_LdStLoad,
406 [(set v16i8:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>,
409 let PPC970_Unit = 2 in { // Stores.
410 def STVEBX: XForm_8<31, 135, (outs), (ins vrrc:$rS, memrr:$dst),
411 "stvebx $rS, $dst", IIC_LdStStore,
412 [(int_ppc_altivec_stvebx v16i8:$rS, xoaddr:$dst)]>;
413 def STVEHX: XForm_8<31, 167, (outs), (ins vrrc:$rS, memrr:$dst),
414 "stvehx $rS, $dst", IIC_LdStStore,
415 [(int_ppc_altivec_stvehx v8i16:$rS, xoaddr:$dst)]>;
416 def STVEWX: XForm_8<31, 199, (outs), (ins vrrc:$rS, memrr:$dst),
417 "stvewx $rS, $dst", IIC_LdStStore,
418 [(int_ppc_altivec_stvewx v4i32:$rS, xoaddr:$dst)]>;
419 def STVX : XForm_8<31, 231, (outs), (ins vrrc:$rS, memrr:$dst),
420 "stvx $rS, $dst", IIC_LdStStore,
421 [(int_ppc_altivec_stvx v4i32:$rS, xoaddr:$dst)]>;
422 def STVXL : XForm_8<31, 487, (outs), (ins vrrc:$rS, memrr:$dst),
423 "stvxl $rS, $dst", IIC_LdStStore,
424 [(int_ppc_altivec_stvxl v4i32:$rS, xoaddr:$dst)]>;
427 let PPC970_Unit = 5 in { // VALU Operations.
428 // VA-Form instructions. 3-input AltiVec ops.
429 let isCommutable = 1 in {
430 def VMADDFP : VAForm_1<46, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vC, vrrc:$vB),
431 "vmaddfp $vD, $vA, $vC, $vB", IIC_VecFP,
433 (fma v4f32:$vA, v4f32:$vC, v4f32:$vB))]>;
435 // FIXME: The fma+fneg pattern won't match because fneg is not legal.
436 def VNMSUBFP: VAForm_1<47, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vC, vrrc:$vB),
437 "vnmsubfp $vD, $vA, $vC, $vB", IIC_VecFP,
438 [(set v4f32:$vD, (fneg (fma v4f32:$vA, v4f32:$vC,
439 (fneg v4f32:$vB))))]>;
441 def VMHADDSHS : VA1a_Int_Ty<32, "vmhaddshs", int_ppc_altivec_vmhaddshs, v8i16>;
442 def VMHRADDSHS : VA1a_Int_Ty<33, "vmhraddshs", int_ppc_altivec_vmhraddshs,
444 def VMLADDUHM : VA1a_Int_Ty<34, "vmladduhm", int_ppc_altivec_vmladduhm, v8i16>;
447 def VPERM : VA1a_Int_Ty3<43, "vperm", int_ppc_altivec_vperm,
448 v4i32, v4i32, v16i8>;
449 def VSEL : VA1a_Int_Ty<42, "vsel", int_ppc_altivec_vsel, v4i32>;
452 def VSLDOI : VAForm_2<44, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, u5imm:$SH),
453 "vsldoi $vD, $vA, $vB, $SH", IIC_VecFP,
455 (vsldoi_shuffle:$SH v16i8:$vA, v16i8:$vB))]>;
457 // VX-Form instructions. AltiVec arithmetic ops.
458 let isCommutable = 1 in {
459 def VADDFP : VXForm_1<10, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
460 "vaddfp $vD, $vA, $vB", IIC_VecFP,
461 [(set v4f32:$vD, (fadd v4f32:$vA, v4f32:$vB))]>;
463 def VADDUBM : VXForm_1<0, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
464 "vaddubm $vD, $vA, $vB", IIC_VecGeneral,
465 [(set v16i8:$vD, (add v16i8:$vA, v16i8:$vB))]>;
466 def VADDUHM : VXForm_1<64, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
467 "vadduhm $vD, $vA, $vB", IIC_VecGeneral,
468 [(set v8i16:$vD, (add v8i16:$vA, v8i16:$vB))]>;
469 def VADDUWM : VXForm_1<128, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
470 "vadduwm $vD, $vA, $vB", IIC_VecGeneral,
471 [(set v4i32:$vD, (add v4i32:$vA, v4i32:$vB))]>;
473 def VADDCUW : VX1_Int_Ty<384, "vaddcuw", int_ppc_altivec_vaddcuw, v4i32>;
474 def VADDSBS : VX1_Int_Ty<768, "vaddsbs", int_ppc_altivec_vaddsbs, v16i8>;
475 def VADDSHS : VX1_Int_Ty<832, "vaddshs", int_ppc_altivec_vaddshs, v8i16>;
476 def VADDSWS : VX1_Int_Ty<896, "vaddsws", int_ppc_altivec_vaddsws, v4i32>;
477 def VADDUBS : VX1_Int_Ty<512, "vaddubs", int_ppc_altivec_vaddubs, v16i8>;
478 def VADDUHS : VX1_Int_Ty<576, "vadduhs", int_ppc_altivec_vadduhs, v8i16>;
479 def VADDUWS : VX1_Int_Ty<640, "vadduws", int_ppc_altivec_vadduws, v4i32>;
482 let isCommutable = 1 in
483 def VAND : VXForm_1<1028, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
484 "vand $vD, $vA, $vB", IIC_VecFP,
485 [(set v4i32:$vD, (and v4i32:$vA, v4i32:$vB))]>;
486 def VANDC : VXForm_1<1092, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
487 "vandc $vD, $vA, $vB", IIC_VecFP,
488 [(set v4i32:$vD, (and v4i32:$vA,
489 (vnot_ppc v4i32:$vB)))]>;
491 def VCFSX : VXForm_1<842, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
492 "vcfsx $vD, $vB, $UIMM", IIC_VecFP,
494 (int_ppc_altivec_vcfsx v4i32:$vB, imm:$UIMM))]>;
495 def VCFUX : VXForm_1<778, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
496 "vcfux $vD, $vB, $UIMM", IIC_VecFP,
498 (int_ppc_altivec_vcfux v4i32:$vB, imm:$UIMM))]>;
499 def VCTSXS : VXForm_1<970, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
500 "vctsxs $vD, $vB, $UIMM", IIC_VecFP,
502 (int_ppc_altivec_vctsxs v4f32:$vB, imm:$UIMM))]>;
503 def VCTUXS : VXForm_1<906, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
504 "vctuxs $vD, $vB, $UIMM", IIC_VecFP,
506 (int_ppc_altivec_vctuxs v4f32:$vB, imm:$UIMM))]>;
508 // Defines with the UIM field set to 0 for floating-point
509 // to integer (fp_to_sint/fp_to_uint) conversions and integer
510 // to floating-point (sint_to_fp/uint_to_fp) conversions.
511 let isCodeGenOnly = 1, VA = 0 in {
512 def VCFSX_0 : VXForm_1<842, (outs vrrc:$vD), (ins vrrc:$vB),
513 "vcfsx $vD, $vB, 0", IIC_VecFP,
515 (int_ppc_altivec_vcfsx v4i32:$vB, 0))]>;
516 def VCTUXS_0 : VXForm_1<906, (outs vrrc:$vD), (ins vrrc:$vB),
517 "vctuxs $vD, $vB, 0", IIC_VecFP,
519 (int_ppc_altivec_vctuxs v4f32:$vB, 0))]>;
520 def VCFUX_0 : VXForm_1<778, (outs vrrc:$vD), (ins vrrc:$vB),
521 "vcfux $vD, $vB, 0", IIC_VecFP,
523 (int_ppc_altivec_vcfux v4i32:$vB, 0))]>;
524 def VCTSXS_0 : VXForm_1<970, (outs vrrc:$vD), (ins vrrc:$vB),
525 "vctsxs $vD, $vB, 0", IIC_VecFP,
527 (int_ppc_altivec_vctsxs v4f32:$vB, 0))]>;
529 def VEXPTEFP : VX2_Int_SP<394, "vexptefp", int_ppc_altivec_vexptefp>;
530 def VLOGEFP : VX2_Int_SP<458, "vlogefp", int_ppc_altivec_vlogefp>;
532 let isCommutable = 1 in {
533 def VAVGSB : VX1_Int_Ty<1282, "vavgsb", int_ppc_altivec_vavgsb, v16i8>;
534 def VAVGSH : VX1_Int_Ty<1346, "vavgsh", int_ppc_altivec_vavgsh, v8i16>;
535 def VAVGSW : VX1_Int_Ty<1410, "vavgsw", int_ppc_altivec_vavgsw, v4i32>;
536 def VAVGUB : VX1_Int_Ty<1026, "vavgub", int_ppc_altivec_vavgub, v16i8>;
537 def VAVGUH : VX1_Int_Ty<1090, "vavguh", int_ppc_altivec_vavguh, v8i16>;
538 def VAVGUW : VX1_Int_Ty<1154, "vavguw", int_ppc_altivec_vavguw, v4i32>;
540 def VMAXFP : VX1_Int_Ty<1034, "vmaxfp", int_ppc_altivec_vmaxfp, v4f32>;
541 def VMAXSB : VX1_Int_Ty< 258, "vmaxsb", int_ppc_altivec_vmaxsb, v16i8>;
542 def VMAXSH : VX1_Int_Ty< 322, "vmaxsh", int_ppc_altivec_vmaxsh, v8i16>;
543 def VMAXSW : VX1_Int_Ty< 386, "vmaxsw", int_ppc_altivec_vmaxsw, v4i32>;
544 def VMAXUB : VX1_Int_Ty< 2, "vmaxub", int_ppc_altivec_vmaxub, v16i8>;
545 def VMAXUH : VX1_Int_Ty< 66, "vmaxuh", int_ppc_altivec_vmaxuh, v8i16>;
546 def VMAXUW : VX1_Int_Ty< 130, "vmaxuw", int_ppc_altivec_vmaxuw, v4i32>;
547 def VMINFP : VX1_Int_Ty<1098, "vminfp", int_ppc_altivec_vminfp, v4f32>;
548 def VMINSB : VX1_Int_Ty< 770, "vminsb", int_ppc_altivec_vminsb, v16i8>;
549 def VMINSH : VX1_Int_Ty< 834, "vminsh", int_ppc_altivec_vminsh, v8i16>;
550 def VMINSW : VX1_Int_Ty< 898, "vminsw", int_ppc_altivec_vminsw, v4i32>;
551 def VMINUB : VX1_Int_Ty< 514, "vminub", int_ppc_altivec_vminub, v16i8>;
552 def VMINUH : VX1_Int_Ty< 578, "vminuh", int_ppc_altivec_vminuh, v8i16>;
553 def VMINUW : VX1_Int_Ty< 642, "vminuw", int_ppc_altivec_vminuw, v4i32>;
556 def VMRGHB : VXForm_1< 12, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
557 "vmrghb $vD, $vA, $vB", IIC_VecFP,
558 [(set v16i8:$vD, (vmrghb_shuffle v16i8:$vA, v16i8:$vB))]>;
559 def VMRGHH : VXForm_1< 76, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
560 "vmrghh $vD, $vA, $vB", IIC_VecFP,
561 [(set v16i8:$vD, (vmrghh_shuffle v16i8:$vA, v16i8:$vB))]>;
562 def VMRGHW : VXForm_1<140, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
563 "vmrghw $vD, $vA, $vB", IIC_VecFP,
564 [(set v16i8:$vD, (vmrghw_shuffle v16i8:$vA, v16i8:$vB))]>;
565 def VMRGLB : VXForm_1<268, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
566 "vmrglb $vD, $vA, $vB", IIC_VecFP,
567 [(set v16i8:$vD, (vmrglb_shuffle v16i8:$vA, v16i8:$vB))]>;
568 def VMRGLH : VXForm_1<332, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
569 "vmrglh $vD, $vA, $vB", IIC_VecFP,
570 [(set v16i8:$vD, (vmrglh_shuffle v16i8:$vA, v16i8:$vB))]>;
571 def VMRGLW : VXForm_1<396, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
572 "vmrglw $vD, $vA, $vB", IIC_VecFP,
573 [(set v16i8:$vD, (vmrglw_shuffle v16i8:$vA, v16i8:$vB))]>;
575 def VMSUMMBM : VA1a_Int_Ty3<37, "vmsummbm", int_ppc_altivec_vmsummbm,
576 v4i32, v16i8, v4i32>;
577 def VMSUMSHM : VA1a_Int_Ty3<40, "vmsumshm", int_ppc_altivec_vmsumshm,
578 v4i32, v8i16, v4i32>;
579 def VMSUMSHS : VA1a_Int_Ty3<41, "vmsumshs", int_ppc_altivec_vmsumshs,
580 v4i32, v8i16, v4i32>;
581 def VMSUMUBM : VA1a_Int_Ty3<36, "vmsumubm", int_ppc_altivec_vmsumubm,
582 v4i32, v16i8, v4i32>;
583 def VMSUMUHM : VA1a_Int_Ty3<38, "vmsumuhm", int_ppc_altivec_vmsumuhm,
584 v4i32, v8i16, v4i32>;
585 def VMSUMUHS : VA1a_Int_Ty3<39, "vmsumuhs", int_ppc_altivec_vmsumuhs,
586 v4i32, v8i16, v4i32>;
588 let isCommutable = 1 in {
589 def VMULESB : VX1_Int_Ty2<776, "vmulesb", int_ppc_altivec_vmulesb,
591 def VMULESH : VX1_Int_Ty2<840, "vmulesh", int_ppc_altivec_vmulesh,
593 def VMULEUB : VX1_Int_Ty2<520, "vmuleub", int_ppc_altivec_vmuleub,
595 def VMULEUH : VX1_Int_Ty2<584, "vmuleuh", int_ppc_altivec_vmuleuh,
597 def VMULOSB : VX1_Int_Ty2<264, "vmulosb", int_ppc_altivec_vmulosb,
599 def VMULOSH : VX1_Int_Ty2<328, "vmulosh", int_ppc_altivec_vmulosh,
601 def VMULOUB : VX1_Int_Ty2< 8, "vmuloub", int_ppc_altivec_vmuloub,
603 def VMULOUH : VX1_Int_Ty2< 72, "vmulouh", int_ppc_altivec_vmulouh,
607 def VREFP : VX2_Int_SP<266, "vrefp", int_ppc_altivec_vrefp>;
608 def VRFIM : VX2_Int_SP<714, "vrfim", int_ppc_altivec_vrfim>;
609 def VRFIN : VX2_Int_SP<522, "vrfin", int_ppc_altivec_vrfin>;
610 def VRFIP : VX2_Int_SP<650, "vrfip", int_ppc_altivec_vrfip>;
611 def VRFIZ : VX2_Int_SP<586, "vrfiz", int_ppc_altivec_vrfiz>;
612 def VRSQRTEFP : VX2_Int_SP<330, "vrsqrtefp", int_ppc_altivec_vrsqrtefp>;
614 def VSUBCUW : VX1_Int_Ty<1408, "vsubcuw", int_ppc_altivec_vsubcuw, v4i32>;
616 def VSUBFP : VXForm_1<74, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
617 "vsubfp $vD, $vA, $vB", IIC_VecGeneral,
618 [(set v4f32:$vD, (fsub v4f32:$vA, v4f32:$vB))]>;
619 def VSUBUBM : VXForm_1<1024, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
620 "vsububm $vD, $vA, $vB", IIC_VecGeneral,
621 [(set v16i8:$vD, (sub v16i8:$vA, v16i8:$vB))]>;
622 def VSUBUHM : VXForm_1<1088, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
623 "vsubuhm $vD, $vA, $vB", IIC_VecGeneral,
624 [(set v8i16:$vD, (sub v8i16:$vA, v8i16:$vB))]>;
625 def VSUBUWM : VXForm_1<1152, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
626 "vsubuwm $vD, $vA, $vB", IIC_VecGeneral,
627 [(set v4i32:$vD, (sub v4i32:$vA, v4i32:$vB))]>;
629 def VSUBSBS : VX1_Int_Ty<1792, "vsubsbs" , int_ppc_altivec_vsubsbs, v16i8>;
630 def VSUBSHS : VX1_Int_Ty<1856, "vsubshs" , int_ppc_altivec_vsubshs, v8i16>;
631 def VSUBSWS : VX1_Int_Ty<1920, "vsubsws" , int_ppc_altivec_vsubsws, v4i32>;
632 def VSUBUBS : VX1_Int_Ty<1536, "vsububs" , int_ppc_altivec_vsububs, v16i8>;
633 def VSUBUHS : VX1_Int_Ty<1600, "vsubuhs" , int_ppc_altivec_vsubuhs, v8i16>;
634 def VSUBUWS : VX1_Int_Ty<1664, "vsubuws" , int_ppc_altivec_vsubuws, v4i32>;
636 def VSUMSWS : VX1_Int_Ty<1928, "vsumsws" , int_ppc_altivec_vsumsws, v4i32>;
637 def VSUM2SWS: VX1_Int_Ty<1672, "vsum2sws", int_ppc_altivec_vsum2sws, v4i32>;
639 def VSUM4SBS: VX1_Int_Ty3<1800, "vsum4sbs", int_ppc_altivec_vsum4sbs,
640 v4i32, v16i8, v4i32>;
641 def VSUM4SHS: VX1_Int_Ty3<1608, "vsum4shs", int_ppc_altivec_vsum4shs,
642 v4i32, v8i16, v4i32>;
643 def VSUM4UBS: VX1_Int_Ty3<1544, "vsum4ubs", int_ppc_altivec_vsum4ubs,
644 v4i32, v16i8, v4i32>;
646 def VNOR : VXForm_1<1284, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
647 "vnor $vD, $vA, $vB", IIC_VecFP,
648 [(set v4i32:$vD, (vnot_ppc (or v4i32:$vA,
650 let isCommutable = 1 in {
651 def VOR : VXForm_1<1156, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
652 "vor $vD, $vA, $vB", IIC_VecFP,
653 [(set v4i32:$vD, (or v4i32:$vA, v4i32:$vB))]>;
654 def VXOR : VXForm_1<1220, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
655 "vxor $vD, $vA, $vB", IIC_VecFP,
656 [(set v4i32:$vD, (xor v4i32:$vA, v4i32:$vB))]>;
659 def VRLB : VX1_Int_Ty< 4, "vrlb", int_ppc_altivec_vrlb, v16i8>;
660 def VRLH : VX1_Int_Ty< 68, "vrlh", int_ppc_altivec_vrlh, v8i16>;
661 def VRLW : VX1_Int_Ty< 132, "vrlw", int_ppc_altivec_vrlw, v4i32>;
663 def VSL : VX1_Int_Ty< 452, "vsl" , int_ppc_altivec_vsl, v4i32 >;
664 def VSLO : VX1_Int_Ty<1036, "vslo", int_ppc_altivec_vslo, v4i32>;
666 def VSLB : VX1_Int_Ty< 260, "vslb", int_ppc_altivec_vslb, v16i8>;
667 def VSLH : VX1_Int_Ty< 324, "vslh", int_ppc_altivec_vslh, v8i16>;
668 def VSLW : VX1_Int_Ty< 388, "vslw", int_ppc_altivec_vslw, v4i32>;
670 def VSPLTB : VXForm_1<524, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
671 "vspltb $vD, $vB, $UIMM", IIC_VecPerm,
673 (vspltb_shuffle:$UIMM v16i8:$vB, (undef)))]>;
674 def VSPLTH : VXForm_1<588, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
675 "vsplth $vD, $vB, $UIMM", IIC_VecPerm,
677 (vsplth_shuffle:$UIMM v16i8:$vB, (undef)))]>;
678 def VSPLTW : VXForm_1<652, (outs vrrc:$vD), (ins u5imm:$UIMM, vrrc:$vB),
679 "vspltw $vD, $vB, $UIMM", IIC_VecPerm,
681 (vspltw_shuffle:$UIMM v16i8:$vB, (undef)))]>;
683 def VSR : VX1_Int_Ty< 708, "vsr" , int_ppc_altivec_vsr, v4i32>;
684 def VSRO : VX1_Int_Ty<1100, "vsro" , int_ppc_altivec_vsro, v4i32>;
686 def VSRAB : VX1_Int_Ty< 772, "vsrab", int_ppc_altivec_vsrab, v16i8>;
687 def VSRAH : VX1_Int_Ty< 836, "vsrah", int_ppc_altivec_vsrah, v8i16>;
688 def VSRAW : VX1_Int_Ty< 900, "vsraw", int_ppc_altivec_vsraw, v4i32>;
689 def VSRB : VX1_Int_Ty< 516, "vsrb" , int_ppc_altivec_vsrb , v16i8>;
690 def VSRH : VX1_Int_Ty< 580, "vsrh" , int_ppc_altivec_vsrh , v8i16>;
691 def VSRW : VX1_Int_Ty< 644, "vsrw" , int_ppc_altivec_vsrw , v4i32>;
694 def VSPLTISB : VXForm_3<780, (outs vrrc:$vD), (ins s5imm:$SIMM),
695 "vspltisb $vD, $SIMM", IIC_VecPerm,
696 [(set v16i8:$vD, (v16i8 vecspltisb:$SIMM))]>;
697 def VSPLTISH : VXForm_3<844, (outs vrrc:$vD), (ins s5imm:$SIMM),
698 "vspltish $vD, $SIMM", IIC_VecPerm,
699 [(set v8i16:$vD, (v8i16 vecspltish:$SIMM))]>;
700 def VSPLTISW : VXForm_3<908, (outs vrrc:$vD), (ins s5imm:$SIMM),
701 "vspltisw $vD, $SIMM", IIC_VecPerm,
702 [(set v4i32:$vD, (v4i32 vecspltisw:$SIMM))]>;
705 def VPKPX : VX1_Int_Ty2<782, "vpkpx", int_ppc_altivec_vpkpx,
707 def VPKSHSS : VX1_Int_Ty2<398, "vpkshss", int_ppc_altivec_vpkshss,
709 def VPKSHUS : VX1_Int_Ty2<270, "vpkshus", int_ppc_altivec_vpkshus,
711 def VPKSWSS : VX1_Int_Ty2<462, "vpkswss", int_ppc_altivec_vpkswss,
713 def VPKSWUS : VX1_Int_Ty2<334, "vpkswus", int_ppc_altivec_vpkswus,
715 def VPKUHUM : VXForm_1<14, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
716 "vpkuhum $vD, $vA, $vB", IIC_VecFP,
718 (vpkuhum_shuffle v16i8:$vA, v16i8:$vB))]>;
719 def VPKUHUS : VX1_Int_Ty2<142, "vpkuhus", int_ppc_altivec_vpkuhus,
721 def VPKUWUM : VXForm_1<78, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
722 "vpkuwum $vD, $vA, $vB", IIC_VecFP,
724 (vpkuwum_shuffle v16i8:$vA, v16i8:$vB))]>;
725 def VPKUWUS : VX1_Int_Ty2<206, "vpkuwus", int_ppc_altivec_vpkuwus,
729 def VUPKHPX : VX2_Int_Ty2<846, "vupkhpx", int_ppc_altivec_vupkhpx,
731 def VUPKHSB : VX2_Int_Ty2<526, "vupkhsb", int_ppc_altivec_vupkhsb,
733 def VUPKHSH : VX2_Int_Ty2<590, "vupkhsh", int_ppc_altivec_vupkhsh,
735 def VUPKLPX : VX2_Int_Ty2<974, "vupklpx", int_ppc_altivec_vupklpx,
737 def VUPKLSB : VX2_Int_Ty2<654, "vupklsb", int_ppc_altivec_vupklsb,
739 def VUPKLSH : VX2_Int_Ty2<718, "vupklsh", int_ppc_altivec_vupklsh,
743 // Altivec Comparisons.
745 class VCMP<bits<10> xo, string asmstr, ValueType Ty>
746 : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), asmstr,
748 [(set Ty:$vD, (Ty (PPCvcmp Ty:$vA, Ty:$vB, xo)))]>;
749 class VCMPo<bits<10> xo, string asmstr, ValueType Ty>
750 : VXRForm_1<xo, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), asmstr,
752 [(set Ty:$vD, (Ty (PPCvcmp_o Ty:$vA, Ty:$vB, xo)))]> {
757 // f32 element comparisons.0
758 def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>;
759 def VCMPBFPo : VCMPo<966, "vcmpbfp. $vD, $vA, $vB" , v4f32>;
760 def VCMPEQFP : VCMP <198, "vcmpeqfp $vD, $vA, $vB" , v4f32>;
761 def VCMPEQFPo : VCMPo<198, "vcmpeqfp. $vD, $vA, $vB", v4f32>;
762 def VCMPGEFP : VCMP <454, "vcmpgefp $vD, $vA, $vB" , v4f32>;
763 def VCMPGEFPo : VCMPo<454, "vcmpgefp. $vD, $vA, $vB", v4f32>;
764 def VCMPGTFP : VCMP <710, "vcmpgtfp $vD, $vA, $vB" , v4f32>;
765 def VCMPGTFPo : VCMPo<710, "vcmpgtfp. $vD, $vA, $vB", v4f32>;
767 // i8 element comparisons.
768 def VCMPEQUB : VCMP < 6, "vcmpequb $vD, $vA, $vB" , v16i8>;
769 def VCMPEQUBo : VCMPo< 6, "vcmpequb. $vD, $vA, $vB", v16i8>;
770 def VCMPGTSB : VCMP <774, "vcmpgtsb $vD, $vA, $vB" , v16i8>;
771 def VCMPGTSBo : VCMPo<774, "vcmpgtsb. $vD, $vA, $vB", v16i8>;
772 def VCMPGTUB : VCMP <518, "vcmpgtub $vD, $vA, $vB" , v16i8>;
773 def VCMPGTUBo : VCMPo<518, "vcmpgtub. $vD, $vA, $vB", v16i8>;
775 // i16 element comparisons.
776 def VCMPEQUH : VCMP < 70, "vcmpequh $vD, $vA, $vB" , v8i16>;
777 def VCMPEQUHo : VCMPo< 70, "vcmpequh. $vD, $vA, $vB", v8i16>;
778 def VCMPGTSH : VCMP <838, "vcmpgtsh $vD, $vA, $vB" , v8i16>;
779 def VCMPGTSHo : VCMPo<838, "vcmpgtsh. $vD, $vA, $vB", v8i16>;
780 def VCMPGTUH : VCMP <582, "vcmpgtuh $vD, $vA, $vB" , v8i16>;
781 def VCMPGTUHo : VCMPo<582, "vcmpgtuh. $vD, $vA, $vB", v8i16>;
783 // i32 element comparisons.
784 def VCMPEQUW : VCMP <134, "vcmpequw $vD, $vA, $vB" , v4i32>;
785 def VCMPEQUWo : VCMPo<134, "vcmpequw. $vD, $vA, $vB", v4i32>;
786 def VCMPGTSW : VCMP <902, "vcmpgtsw $vD, $vA, $vB" , v4i32>;
787 def VCMPGTSWo : VCMPo<902, "vcmpgtsw. $vD, $vA, $vB", v4i32>;
788 def VCMPGTUW : VCMP <646, "vcmpgtuw $vD, $vA, $vB" , v4i32>;
789 def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>;
791 let isCodeGenOnly = 1 in {
792 def V_SET0B : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
793 "vxor $vD, $vD, $vD", IIC_VecFP,
794 [(set v16i8:$vD, (v16i8 immAllZerosV))]>;
795 def V_SET0H : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
796 "vxor $vD, $vD, $vD", IIC_VecFP,
797 [(set v8i16:$vD, (v8i16 immAllZerosV))]>;
798 def V_SET0 : VXForm_setzero<1220, (outs vrrc:$vD), (ins),
799 "vxor $vD, $vD, $vD", IIC_VecFP,
800 [(set v4i32:$vD, (v4i32 immAllZerosV))]>;
803 def V_SETALLONESB : VXForm_3<908, (outs vrrc:$vD), (ins),
804 "vspltisw $vD, -1", IIC_VecFP,
805 [(set v16i8:$vD, (v16i8 immAllOnesV))]>;
806 def V_SETALLONESH : VXForm_3<908, (outs vrrc:$vD), (ins),
807 "vspltisw $vD, -1", IIC_VecFP,
808 [(set v8i16:$vD, (v8i16 immAllOnesV))]>;
809 def V_SETALLONES : VXForm_3<908, (outs vrrc:$vD), (ins),
810 "vspltisw $vD, -1", IIC_VecFP,
811 [(set v4i32:$vD, (v4i32 immAllOnesV))]>;
814 } // VALU Operations.
816 //===----------------------------------------------------------------------===//
817 // Additional Altivec Patterns
821 def : Pat<(v4i32 (load xoaddr:$src)), (LVX xoaddr:$src)>;
824 def : Pat<(store v4i32:$rS, xoaddr:$dst),
825 (STVX $rS, xoaddr:$dst)>;
828 def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
829 def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
830 def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
831 def : Pat<(v16i8 (bitconvert (v2i64 VRRC:$src))), (v16i8 VRRC:$src)>;
832 def : Pat<(v16i8 (bitconvert (v1i128 VRRC:$src))), (v16i8 VRRC:$src)>;
834 def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
835 def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
836 def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
837 def : Pat<(v8i16 (bitconvert (v2i64 VRRC:$src))), (v8i16 VRRC:$src)>;
838 def : Pat<(v8i16 (bitconvert (v1i128 VRRC:$src))), (v8i16 VRRC:$src)>;
840 def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
841 def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
842 def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
843 def : Pat<(v4i32 (bitconvert (v2i64 VRRC:$src))), (v4i32 VRRC:$src)>;
844 def : Pat<(v4i32 (bitconvert (v1i128 VRRC:$src))), (v4i32 VRRC:$src)>;
846 def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
847 def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
848 def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
849 def : Pat<(v4f32 (bitconvert (v2i64 VRRC:$src))), (v4f32 VRRC:$src)>;
850 def : Pat<(v4f32 (bitconvert (v1i128 VRRC:$src))), (v4f32 VRRC:$src)>;
852 def : Pat<(v2i64 (bitconvert (v16i8 VRRC:$src))), (v2i64 VRRC:$src)>;
853 def : Pat<(v2i64 (bitconvert (v8i16 VRRC:$src))), (v2i64 VRRC:$src)>;
854 def : Pat<(v2i64 (bitconvert (v4i32 VRRC:$src))), (v2i64 VRRC:$src)>;
855 def : Pat<(v2i64 (bitconvert (v4f32 VRRC:$src))), (v2i64 VRRC:$src)>;
856 def : Pat<(v2i64 (bitconvert (v1i128 VRRC:$src))), (v2i64 VRRC:$src)>;
858 def : Pat<(v1i128 (bitconvert (v16i8 VRRC:$src))), (v1i128 VRRC:$src)>;
859 def : Pat<(v1i128 (bitconvert (v8i16 VRRC:$src))), (v1i128 VRRC:$src)>;
860 def : Pat<(v1i128 (bitconvert (v4i32 VRRC:$src))), (v1i128 VRRC:$src)>;
861 def : Pat<(v1i128 (bitconvert (v4f32 VRRC:$src))), (v1i128 VRRC:$src)>;
862 def : Pat<(v1i128 (bitconvert (v2i64 VRRC:$src))), (v1i128 VRRC:$src)>;
866 // Match vsldoi(x,x), vpkuwum(x,x), vpkuhum(x,x)
867 def:Pat<(vsldoi_unary_shuffle:$in v16i8:$vA, undef),
868 (VSLDOI $vA, $vA, (VSLDOI_unary_get_imm $in))>;
869 def:Pat<(vpkuwum_unary_shuffle v16i8:$vA, undef),
871 def:Pat<(vpkuhum_unary_shuffle v16i8:$vA, undef),
874 // Match vsldoi(y,x), vpkuwum(y,x), vpkuhum(y,x), i.e., swapped operands.
875 // These fragments are matched for little-endian, where the inputs must
876 // be swapped for correct semantics.
877 def:Pat<(vsldoi_swapped_shuffle:$in v16i8:$vA, v16i8:$vB),
878 (VSLDOI $vB, $vA, (VSLDOI_swapped_get_imm $in))>;
879 def:Pat<(vpkuwum_swapped_shuffle v16i8:$vA, v16i8:$vB),
881 def:Pat<(vpkuhum_swapped_shuffle v16i8:$vA, v16i8:$vB),
885 def:Pat<(vmrglb_unary_shuffle v16i8:$vA, undef),
887 def:Pat<(vmrglh_unary_shuffle v16i8:$vA, undef),
889 def:Pat<(vmrglw_unary_shuffle v16i8:$vA, undef),
891 def:Pat<(vmrghb_unary_shuffle v16i8:$vA, undef),
893 def:Pat<(vmrghh_unary_shuffle v16i8:$vA, undef),
895 def:Pat<(vmrghw_unary_shuffle v16i8:$vA, undef),
898 // Match vmrg*(y,x), i.e., swapped operands. These fragments
899 // are matched for little-endian, where the inputs must be
900 // swapped for correct semantics.
901 def:Pat<(vmrglb_swapped_shuffle v16i8:$vA, v16i8:$vB),
903 def:Pat<(vmrglh_swapped_shuffle v16i8:$vA, v16i8:$vB),
905 def:Pat<(vmrglw_swapped_shuffle v16i8:$vA, v16i8:$vB),
907 def:Pat<(vmrghb_swapped_shuffle v16i8:$vA, v16i8:$vB),
909 def:Pat<(vmrghh_swapped_shuffle v16i8:$vA, v16i8:$vB),
911 def:Pat<(vmrghw_swapped_shuffle v16i8:$vA, v16i8:$vB),
914 // Logical Operations
915 def : Pat<(vnot_ppc v4i32:$vA), (VNOR $vA, $vA)>;
917 def : Pat<(vnot_ppc (or v4i32:$A, v4i32:$B)),
919 def : Pat<(and v4i32:$A, (vnot_ppc v4i32:$B)),
922 def : Pat<(fmul v4f32:$vA, v4f32:$vB),
924 (v4i32 (VSLW (V_SETALLONES), (V_SETALLONES))))>;
926 // Fused multiply add and multiply sub for packed float. These are represented
927 // separately from the real instructions above, for operations that must have
928 // the additional precision, such as Newton-Rhapson (used by divide, sqrt)
929 def : Pat<(PPCvmaddfp v4f32:$A, v4f32:$B, v4f32:$C),
930 (VMADDFP $A, $B, $C)>;
931 def : Pat<(PPCvnmsubfp v4f32:$A, v4f32:$B, v4f32:$C),
932 (VNMSUBFP $A, $B, $C)>;
934 def : Pat<(int_ppc_altivec_vmaddfp v4f32:$A, v4f32:$B, v4f32:$C),
935 (VMADDFP $A, $B, $C)>;
936 def : Pat<(int_ppc_altivec_vnmsubfp v4f32:$A, v4f32:$B, v4f32:$C),
937 (VNMSUBFP $A, $B, $C)>;
939 def : Pat<(PPCvperm v16i8:$vA, v16i8:$vB, v16i8:$vC),
940 (VPERM $vA, $vB, $vC)>;
942 def : Pat<(PPCfre v4f32:$A), (VREFP $A)>;
943 def : Pat<(PPCfrsqrte v4f32:$A), (VRSQRTEFP $A)>;
946 def : Pat<(v16i8 (shl v16i8:$vA, v16i8:$vB)),
947 (v16i8 (VSLB $vA, $vB))>;
948 def : Pat<(v8i16 (shl v8i16:$vA, v8i16:$vB)),
949 (v8i16 (VSLH $vA, $vB))>;
950 def : Pat<(v4i32 (shl v4i32:$vA, v4i32:$vB)),
951 (v4i32 (VSLW $vA, $vB))>;
953 def : Pat<(v16i8 (srl v16i8:$vA, v16i8:$vB)),
954 (v16i8 (VSRB $vA, $vB))>;
955 def : Pat<(v8i16 (srl v8i16:$vA, v8i16:$vB)),
956 (v8i16 (VSRH $vA, $vB))>;
957 def : Pat<(v4i32 (srl v4i32:$vA, v4i32:$vB)),
958 (v4i32 (VSRW $vA, $vB))>;
960 def : Pat<(v16i8 (sra v16i8:$vA, v16i8:$vB)),
961 (v16i8 (VSRAB $vA, $vB))>;
962 def : Pat<(v8i16 (sra v8i16:$vA, v8i16:$vB)),
963 (v8i16 (VSRAH $vA, $vB))>;
964 def : Pat<(v4i32 (sra v4i32:$vA, v4i32:$vB)),
965 (v4i32 (VSRAW $vA, $vB))>;
967 // Float to integer and integer to float conversions
968 def : Pat<(v4i32 (fp_to_sint v4f32:$vA)),
970 def : Pat<(v4i32 (fp_to_uint v4f32:$vA)),
972 def : Pat<(v4f32 (sint_to_fp v4i32:$vA)),
974 def : Pat<(v4f32 (uint_to_fp v4i32:$vA)),
977 // Floating-point rounding
978 def : Pat<(v4f32 (ffloor v4f32:$vA)),
980 def : Pat<(v4f32 (fceil v4f32:$vA)),
982 def : Pat<(v4f32 (ftrunc v4f32:$vA)),
984 def : Pat<(v4f32 (fnearbyint v4f32:$vA)),
989 def HasP8Altivec : Predicate<"PPCSubTarget->hasP8Altivec()">;
990 def HasP8Crypto : Predicate<"PPCSubTarget->hasP8Crypto()">;
991 let Predicates = [HasP8Altivec] in {
993 let isCommutable = 1 in {
994 def VMULESW : VX1_Int_Ty2<904, "vmulesw", int_ppc_altivec_vmulesw,
996 def VMULEUW : VX1_Int_Ty2<648, "vmuleuw", int_ppc_altivec_vmuleuw,
998 def VMULOSW : VX1_Int_Ty2<392, "vmulosw", int_ppc_altivec_vmulosw,
1000 def VMULOUW : VX1_Int_Ty2<136, "vmulouw", int_ppc_altivec_vmulouw,
1002 def VMULUWM : VXForm_1<137, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1003 "vmuluwm $vD, $vA, $vB", IIC_VecGeneral,
1004 [(set v4i32:$vD, (mul v4i32:$vA, v4i32:$vB))]>;
1005 def VMAXSD : VX1_Int_Ty<450, "vmaxsd", int_ppc_altivec_vmaxsd, v2i64>;
1006 def VMAXUD : VX1_Int_Ty<194, "vmaxud", int_ppc_altivec_vmaxud, v2i64>;
1007 def VMINSD : VX1_Int_Ty<962, "vminsd", int_ppc_altivec_vminsd, v2i64>;
1008 def VMINUD : VX1_Int_Ty<706, "vminud", int_ppc_altivec_vminud, v2i64>;
1012 def VRLD : VX1_Int_Ty<196, "vrld", int_ppc_altivec_vrld, v2i64>;
1013 def VSLD : VXForm_1<1476, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1014 "vsld $vD, $vA, $vB", IIC_VecGeneral,
1015 [(set v2i64:$vD, (shl v2i64:$vA, v2i64:$vB))]>;
1016 def VSRD : VXForm_1<1732, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1017 "vsrd $vD, $vA, $vB", IIC_VecGeneral,
1018 [(set v2i64:$vD, (srl v2i64:$vA, v2i64:$vB))]>;
1019 def VSRAD : VXForm_1<964, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1020 "vsrad $vD, $vA, $vB", IIC_VecGeneral,
1021 [(set v2i64:$vD, (sra v2i64:$vA, v2i64:$vB))]>;
1023 // Vector Integer Arithmetic Instructions
1024 let isCommutable = 1 in {
1025 def VADDUDM : VXForm_1<192, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1026 "vaddudm $vD, $vA, $vB", IIC_VecGeneral,
1027 [(set v2i64:$vD, (add v2i64:$vA, v2i64:$vB))]>;
1028 def VADDUQM : VXForm_1<256, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1029 "vadduqm $vD, $vA, $vB", IIC_VecGeneral,
1030 [(set v1i128:$vD, (add v1i128:$vA, v1i128:$vB))]>;
1033 // Vector Quadword Add
1034 def VADDEUQM : VA1a_Int_Ty<60, "vaddeuqm", int_ppc_altivec_vaddeuqm, v1i128>;
1035 def VADDCUQ : VX1_Int_Ty<320, "vaddcuq", int_ppc_altivec_vaddcuq, v1i128>;
1036 def VADDECUQ : VA1a_Int_Ty<61, "vaddecuq", int_ppc_altivec_vaddecuq, v1i128>;
1038 // Vector Doubleword Subtract
1039 def VSUBUDM : VXForm_1<1216, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1040 "vsubudm $vD, $vA, $vB", IIC_VecGeneral,
1041 [(set v2i64:$vD, (sub v2i64:$vA, v2i64:$vB))]>;
1043 // Vector Quadword Subtract
1044 def VSUBUQM : VXForm_1<1280, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1045 "vsubuqm $vD, $vA, $vB", IIC_VecGeneral,
1046 [(set v1i128:$vD, (sub v1i128:$vA, v1i128:$vB))]>;
1047 def VSUBEUQM : VA1a_Int_Ty<62, "vsubeuqm", int_ppc_altivec_vsubeuqm, v1i128>;
1048 def VSUBCUQ : VX1_Int_Ty<1344, "vsubcuq", int_ppc_altivec_vsubcuq, v1i128>;
1049 def VSUBECUQ : VA1a_Int_Ty<63, "vsubecuq", int_ppc_altivec_vsubecuq, v1i128>;
1051 // Count Leading Zeros
1052 def VCLZB : VXForm_2<1794, (outs vrrc:$vD), (ins vrrc:$vB),
1053 "vclzb $vD, $vB", IIC_VecGeneral,
1054 [(set v16i8:$vD, (ctlz v16i8:$vB))]>;
1055 def VCLZH : VXForm_2<1858, (outs vrrc:$vD), (ins vrrc:$vB),
1056 "vclzh $vD, $vB", IIC_VecGeneral,
1057 [(set v8i16:$vD, (ctlz v8i16:$vB))]>;
1058 def VCLZW : VXForm_2<1922, (outs vrrc:$vD), (ins vrrc:$vB),
1059 "vclzw $vD, $vB", IIC_VecGeneral,
1060 [(set v4i32:$vD, (ctlz v4i32:$vB))]>;
1061 def VCLZD : VXForm_2<1986, (outs vrrc:$vD), (ins vrrc:$vB),
1062 "vclzd $vD, $vB", IIC_VecGeneral,
1063 [(set v2i64:$vD, (ctlz v2i64:$vB))]>;
1066 def VPOPCNTB : VXForm_2<1795, (outs vrrc:$vD), (ins vrrc:$vB),
1067 "vpopcntb $vD, $vB", IIC_VecGeneral,
1068 [(set v16i8:$vD, (ctpop v16i8:$vB))]>;
1069 def VPOPCNTH : VXForm_2<1859, (outs vrrc:$vD), (ins vrrc:$vB),
1070 "vpopcnth $vD, $vB", IIC_VecGeneral,
1071 [(set v8i16:$vD, (ctpop v8i16:$vB))]>;
1072 def VPOPCNTW : VXForm_2<1923, (outs vrrc:$vD), (ins vrrc:$vB),
1073 "vpopcntw $vD, $vB", IIC_VecGeneral,
1074 [(set v4i32:$vD, (ctpop v4i32:$vB))]>;
1075 def VPOPCNTD : VXForm_2<1987, (outs vrrc:$vD), (ins vrrc:$vB),
1076 "vpopcntd $vD, $vB", IIC_VecGeneral,
1077 [(set v2i64:$vD, (ctpop v2i64:$vB))]>;
1079 let isCommutable = 1 in {
1080 // FIXME: Use AddedComplexity > 400 to ensure these patterns match before the
1081 // VSX equivalents. We need to fix this up at some point. Two possible
1082 // solutions for this problem:
1083 // 1. Disable Altivec patterns that compete with VSX patterns using the
1084 // !HasVSX predicate. This essentially favours VSX over Altivec, in
1085 // hopes of reducing register pressure (larger register set using VSX
1086 // instructions than VMX instructions)
1087 // 2. Employ a more disciplined use of AddedComplexity, which would provide
1088 // more fine-grained control than option 1. This would be beneficial
1089 // if we find situations where Altivec is really preferred over VSX.
1090 def VEQV : VXForm_1<1668, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1091 "veqv $vD, $vA, $vB", IIC_VecGeneral,
1092 [(set v4i32:$vD, (vnot_ppc (xor v4i32:$vA, v4i32:$vB)))]>;
1093 def VNAND : VXForm_1<1412, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1094 "vnand $vD, $vA, $vB", IIC_VecGeneral,
1095 [(set v4i32:$vD, (vnot_ppc (and v4i32:$vA, v4i32:$vB)))]>;
1098 def VORC : VXForm_1<1348, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1099 "vorc $vD, $vA, $vB", IIC_VecGeneral,
1100 [(set v4i32:$vD, (or v4i32:$vA,
1101 (vnot_ppc v4i32:$vB)))]>;
1103 // i64 element comparisons.
1104 def VCMPEQUD : VCMP <199, "vcmpequd $vD, $vA, $vB" , v2i64>;
1105 def VCMPEQUDo : VCMPo<199, "vcmpequd. $vD, $vA, $vB", v2i64>;
1106 def VCMPGTSD : VCMP <967, "vcmpgtsd $vD, $vA, $vB" , v2i64>;
1107 def VCMPGTSDo : VCMPo<967, "vcmpgtsd. $vD, $vA, $vB", v2i64>;
1108 def VCMPGTUD : VCMP <711, "vcmpgtud $vD, $vA, $vB" , v2i64>;
1109 def VCMPGTUDo : VCMPo<711, "vcmpgtud. $vD, $vA, $vB", v2i64>;
1111 // The cryptography instructions that do not require Category:Vector.Crypto
1112 def VPMSUMB : VX1_Int_Ty<1032, "vpmsumb",
1113 int_ppc_altivec_crypto_vpmsumb, v16i8>;
1114 def VPMSUMH : VX1_Int_Ty<1096, "vpmsumh",
1115 int_ppc_altivec_crypto_vpmsumh, v8i16>;
1116 def VPMSUMW : VX1_Int_Ty<1160, "vpmsumw",
1117 int_ppc_altivec_crypto_vpmsumw, v4i32>;
1118 def VPMSUMD : VX1_Int_Ty<1224, "vpmsumd",
1119 int_ppc_altivec_crypto_vpmsumd, v2i64>;
1120 def VPERMXOR : VA1a_Int_Ty<45, "vpermxor",
1121 int_ppc_altivec_crypto_vpermxor, v16i8>;
1123 // Vector doubleword integer pack and unpack.
1124 def VPKSDSS : VX1_Int_Ty2<1486, "vpksdss", int_ppc_altivec_vpksdss,
1126 def VPKSDUS : VX1_Int_Ty2<1358, "vpksdus", int_ppc_altivec_vpksdus,
1128 def VPKUDUM : VXForm_1<1102, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB),
1129 "vpkudum $vD, $vA, $vB", IIC_VecFP,
1131 (vpkudum_shuffle v16i8:$vA, v16i8:$vB))]>;
1132 def VPKUDUS : VX1_Int_Ty2<1230, "vpkudus", int_ppc_altivec_vpkudus,
1134 def VUPKHSW : VX2_Int_Ty2<1614, "vupkhsw", int_ppc_altivec_vupkhsw,
1136 def VUPKLSW : VX2_Int_Ty2<1742, "vupklsw", int_ppc_altivec_vupklsw,
1139 // Shuffle patterns for unary and swapped (LE) vector pack modulo.
1140 def:Pat<(vpkudum_unary_shuffle v16i8:$vA, undef),
1141 (VPKUDUM $vA, $vA)>;
1142 def:Pat<(vpkudum_swapped_shuffle v16i8:$vA, v16i8:$vB),
1143 (VPKUDUM $vB, $vA)>;
1146 } // end HasP8Altivec
1148 // Crypto instructions (from builtins)
1149 let Predicates = [HasP8Crypto] in {
1150 def VSHASIGMAW : VXCR_Int_Ty<1666, "vshasigmaw",
1151 int_ppc_altivec_crypto_vshasigmaw, v4i32>;
1152 def VSHASIGMAD : VXCR_Int_Ty<1730, "vshasigmad",
1153 int_ppc_altivec_crypto_vshasigmad, v2i64>;
1154 def VCIPHER : VX1_Int_Ty<1288, "vcipher", int_ppc_altivec_crypto_vcipher,
1156 def VCIPHERLAST : VX1_Int_Ty<1289, "vcipherlast",
1157 int_ppc_altivec_crypto_vcipherlast, v2i64>;
1158 def VNCIPHER : VX1_Int_Ty<1352, "vncipher",
1159 int_ppc_altivec_crypto_vncipher, v2i64>;
1160 def VNCIPHERLAST : VX1_Int_Ty<1353, "vncipherlast",
1161 int_ppc_altivec_crypto_vncipherlast, v2i64>;
1162 def VSBOX : VXBX_Int_Ty<1480, "vsbox", int_ppc_altivec_crypto_vsbox, v2i64>;