1 //===- PPCInstrAltivec.td - The PowerPC Altivec Extension --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the Altivec extension to the PowerPC instruction set.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Altivec transformation functions and pattern fragments.
18 /// VPKUHUM_shuffle_mask/VPKUWUM_shuffle_mask - Return true if this is a valid
19 /// shuffle mask for the VPKUHUM or VPKUWUM instructions.
20 def VPKUHUM_shuffle_mask : PatLeaf<(build_vector), [{
21 return PPC::isVPKUHUMShuffleMask(N, false);
23 def VPKUWUM_shuffle_mask : PatLeaf<(build_vector), [{
24 return PPC::isVPKUWUMShuffleMask(N, false);
27 def VPKUHUM_unary_shuffle_mask : PatLeaf<(build_vector), [{
28 return PPC::isVPKUHUMShuffleMask(N, true);
30 def VPKUWUM_unary_shuffle_mask : PatLeaf<(build_vector), [{
31 return PPC::isVPKUWUMShuffleMask(N, true);
35 def VMRGLB_shuffle_mask : PatLeaf<(build_vector), [{
36 return PPC::isVMRGLShuffleMask(N, 1, false);
38 def VMRGLH_shuffle_mask : PatLeaf<(build_vector), [{
39 return PPC::isVMRGLShuffleMask(N, 2, false);
41 def VMRGLW_shuffle_mask : PatLeaf<(build_vector), [{
42 return PPC::isVMRGLShuffleMask(N, 4, false);
44 def VMRGHB_shuffle_mask : PatLeaf<(build_vector), [{
45 return PPC::isVMRGHShuffleMask(N, 1, false);
47 def VMRGHH_shuffle_mask : PatLeaf<(build_vector), [{
48 return PPC::isVMRGHShuffleMask(N, 2, false);
50 def VMRGHW_shuffle_mask : PatLeaf<(build_vector), [{
51 return PPC::isVMRGHShuffleMask(N, 4, false);
54 def VMRGLB_unary_shuffle_mask : PatLeaf<(build_vector), [{
55 return PPC::isVMRGLShuffleMask(N, 1, true);
57 def VMRGLH_unary_shuffle_mask : PatLeaf<(build_vector), [{
58 return PPC::isVMRGLShuffleMask(N, 2, true);
60 def VMRGLW_unary_shuffle_mask : PatLeaf<(build_vector), [{
61 return PPC::isVMRGLShuffleMask(N, 4, true);
63 def VMRGHB_unary_shuffle_mask : PatLeaf<(build_vector), [{
64 return PPC::isVMRGHShuffleMask(N, 1, true);
66 def VMRGHH_unary_shuffle_mask : PatLeaf<(build_vector), [{
67 return PPC::isVMRGHShuffleMask(N, 2, true);
69 def VMRGHW_unary_shuffle_mask : PatLeaf<(build_vector), [{
70 return PPC::isVMRGHShuffleMask(N, 4, true);
74 def VSLDOI_get_imm : SDNodeXForm<build_vector, [{
75 return getI32Imm(PPC::isVSLDOIShuffleMask(N, false));
77 def VSLDOI_shuffle_mask : PatLeaf<(build_vector), [{
78 return PPC::isVSLDOIShuffleMask(N, false) != -1;
81 /// VSLDOI_unary* - These are used to match vsldoi(X,X), which is turned into
82 /// vector_shuffle(X,undef,mask) by the dag combiner.
83 def VSLDOI_unary_get_imm : SDNodeXForm<build_vector, [{
84 return getI32Imm(PPC::isVSLDOIShuffleMask(N, true));
86 def VSLDOI_unary_shuffle_mask : PatLeaf<(build_vector), [{
87 return PPC::isVSLDOIShuffleMask(N, true) != -1;
88 }], VSLDOI_unary_get_imm>;
91 // VSPLT*_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
92 def VSPLTB_get_imm : SDNodeXForm<build_vector, [{
93 return getI32Imm(PPC::getVSPLTImmediate(N, 1));
95 def VSPLTB_shuffle_mask : PatLeaf<(build_vector), [{
96 return PPC::isSplatShuffleMask(N, 1);
98 def VSPLTH_get_imm : SDNodeXForm<build_vector, [{
99 return getI32Imm(PPC::getVSPLTImmediate(N, 2));
101 def VSPLTH_shuffle_mask : PatLeaf<(build_vector), [{
102 return PPC::isSplatShuffleMask(N, 2);
104 def VSPLTW_get_imm : SDNodeXForm<build_vector, [{
105 return getI32Imm(PPC::getVSPLTImmediate(N, 4));
107 def VSPLTW_shuffle_mask : PatLeaf<(build_vector), [{
108 return PPC::isSplatShuffleMask(N, 4);
112 // VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
113 def VSPLTISB_get_imm : SDNodeXForm<build_vector, [{
115 PPC::isVecSplatImm(N, 1, &Val);
116 return getI32Imm(Val);
118 def vecspltisb : PatLeaf<(build_vector), [{
119 return PPC::isVecSplatImm(N, 1);
120 }], VSPLTISB_get_imm>;
122 // VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
123 def VSPLTISH_get_imm : SDNodeXForm<build_vector, [{
125 PPC::isVecSplatImm(N, 2, &Val);
126 return getI32Imm(Val);
128 def vecspltish : PatLeaf<(build_vector), [{
129 return PPC::isVecSplatImm(N, 2);
130 }], VSPLTISH_get_imm>;
132 // VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
133 def VSPLTISW_get_imm : SDNodeXForm<build_vector, [{
135 PPC::isVecSplatImm(N, 4, &Val);
136 return getI32Imm(Val);
138 def vecspltisw : PatLeaf<(build_vector), [{
139 return PPC::isVecSplatImm(N, 4);
140 }], VSPLTISW_get_imm>;
142 //===----------------------------------------------------------------------===//
143 // Helpers for defining instructions that directly correspond to intrinsics.
145 // VA1a_Int - A VAForm_1a intrinsic definition.
146 class VA1a_Int<bits<6> xo, string opc, Intrinsic IntID>
147 : VAForm_1a<xo, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
148 !strconcat(opc, " $vD, $vA, $vB, $vC"), VecFP,
149 [(set VRRC:$vD, (IntID VRRC:$vA, VRRC:$vB, VRRC:$vC))]>;
151 // VX1_Int - A VXForm_1 intrinsic definition.
152 class VX1_Int<bits<11> xo, string opc, Intrinsic IntID>
153 : VXForm_1<xo, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
154 !strconcat(opc, " $vD, $vA, $vB"), VecFP,
155 [(set VRRC:$vD, (IntID VRRC:$vA, VRRC:$vB))]>;
157 // VX2_Int - A VXForm_2 intrinsic definition.
158 class VX2_Int<bits<11> xo, string opc, Intrinsic IntID>
159 : VXForm_2<xo, (ops VRRC:$vD, VRRC:$vB),
160 !strconcat(opc, " $vD, $vB"), VecFP,
161 [(set VRRC:$vD, (IntID VRRC:$vB))]>;
163 //===----------------------------------------------------------------------===//
164 // Instruction Definitions.
166 def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC",
167 [(set VRRC:$rD, (v4f32 (undef)))]>;
169 let noResults = 1 in {
170 def DSS : DSS_Form<822, (ops u5imm:$A, u5imm:$STRM,u5imm:$ZERO1,u5imm:$ZERO2),
171 "dss $STRM, $A", LdStGeneral /*FIXME*/, []>;
172 def DST : DSS_Form<342, (ops u5imm:$T, u5imm:$STRM, GPRC:$rA, GPRC:$rB),
173 "dst $rA, $rB, $STRM, $T", LdStGeneral /*FIXME*/, []>;
174 def DSTST : DSS_Form<374, (ops u5imm:$T, u5imm:$STRM, GPRC:$rA, GPRC:$rB),
175 "dstst $rA, $rB, $STRM, $T", LdStGeneral /*FIXME*/, []>;
178 def MFVSCR : VXForm_4<1540, (ops VRRC:$vD),
179 "mfvcr $vD", LdStGeneral,
180 [(set VRRC:$vD, (int_ppc_altivec_mfvscr))]>;
181 def MTVSCR : VXForm_5<1604, (ops VRRC:$vB),
182 "mtvcr $vB", LdStGeneral,
183 [(int_ppc_altivec_mtvscr VRRC:$vB)]>;
185 let isLoad = 1, PPC970_Unit = 2 in { // Loads.
186 def LVEBX: XForm_1<31, 7, (ops VRRC:$vD, memrr:$src),
187 "lvebx $vD, $src", LdStGeneral,
188 [(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;
189 def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src),
190 "lvehx $vD, $src", LdStGeneral,
191 [(set VRRC:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>;
192 def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src),
193 "lvewx $vD, $src", LdStGeneral,
194 [(set VRRC:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
195 def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src),
196 "lvx $vD, $src", LdStGeneral,
197 [(set VRRC:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
198 def LVXL : XForm_1<31, 359, (ops VRRC:$vD, memrr:$src),
199 "lvxl $vD, $src", LdStGeneral,
200 [(set VRRC:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
203 def LVSL : XForm_1<31, 6, (ops VRRC:$vD, memrr:$src),
204 "lvsl $vD, $src", LdStGeneral,
205 [(set VRRC:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>,
207 def LVSR : XForm_1<31, 38, (ops VRRC:$vD, memrr:$src),
208 "lvsr $vD, $src", LdStGeneral,
209 [(set VRRC:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>,
212 let isStore = 1, noResults = 1, PPC970_Unit = 2 in { // Stores.
213 def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, memrr:$dst),
214 "stvebx $rS, $dst", LdStGeneral,
215 [(int_ppc_altivec_stvebx VRRC:$rS, xoaddr:$dst)]>;
216 def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, memrr:$dst),
217 "stvehx $rS, $dst", LdStGeneral,
218 [(int_ppc_altivec_stvehx VRRC:$rS, xoaddr:$dst)]>;
219 def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, memrr:$dst),
220 "stvewx $rS, $dst", LdStGeneral,
221 [(int_ppc_altivec_stvewx VRRC:$rS, xoaddr:$dst)]>;
222 def STVX : XForm_8<31, 231, (ops VRRC:$rS, memrr:$dst),
223 "stvx $rS, $dst", LdStGeneral,
224 [(int_ppc_altivec_stvx VRRC:$rS, xoaddr:$dst)]>;
225 def STVXL : XForm_8<31, 487, (ops VRRC:$rS, memrr:$dst),
226 "stvxl $rS, $dst", LdStGeneral,
227 [(int_ppc_altivec_stvxl VRRC:$rS, xoaddr:$dst)]>;
230 let PPC970_Unit = 5 in { // VALU Operations.
231 // VA-Form instructions. 3-input AltiVec ops.
232 def VMADDFP : VAForm_1<46, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
233 "vmaddfp $vD, $vA, $vC, $vB", VecFP,
234 [(set VRRC:$vD, (fadd (fmul VRRC:$vA, VRRC:$vC),
236 Requires<[FPContractions]>;
237 def VNMSUBFP: VAForm_1<47, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
238 "vnmsubfp $vD, $vA, $vC, $vB", VecFP,
239 [(set VRRC:$vD, (fneg (fsub (fmul VRRC:$vA, VRRC:$vC),
241 Requires<[FPContractions]>;
243 def VMHADDSHS : VA1a_Int<32, "vmhaddshs", int_ppc_altivec_vmhaddshs>;
244 def VMHRADDSHS : VA1a_Int<33, "vmhraddshs", int_ppc_altivec_vmhraddshs>;
245 def VMLADDUHM : VA1a_Int<34, "vmladduhm", int_ppc_altivec_vmladduhm>;
246 def VPERM : VA1a_Int<43, "vperm", int_ppc_altivec_vperm>;
247 def VSEL : VA1a_Int<42, "vsel", int_ppc_altivec_vsel>;
250 def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH),
251 "vsldoi $vD, $vA, $vB, $SH", VecFP,
253 (vector_shuffle (v16i8 VRRC:$vA), VRRC:$vB,
254 VSLDOI_shuffle_mask:$SH))]>;
256 // VX-Form instructions. AltiVec arithmetic ops.
257 def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
258 "vaddfp $vD, $vA, $vB", VecFP,
259 [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>;
261 def VADDUBM : VXForm_1<0, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
262 "vaddubm $vD, $vA, $vB", VecGeneral,
263 [(set VRRC:$vD, (add (v16i8 VRRC:$vA), VRRC:$vB))]>;
264 def VADDUHM : VXForm_1<64, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
265 "vadduhm $vD, $vA, $vB", VecGeneral,
266 [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>;
267 def VADDUWM : VXForm_1<128, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
268 "vadduwm $vD, $vA, $vB", VecGeneral,
269 [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>;
271 def VADDCUW : VX1_Int<384, "vaddcuw", int_ppc_altivec_vaddcuw>;
272 def VADDSBS : VX1_Int<768, "vaddsbs", int_ppc_altivec_vaddsbs>;
273 def VADDSHS : VX1_Int<832, "vaddshs", int_ppc_altivec_vaddshs>;
274 def VADDSWS : VX1_Int<896, "vaddsws", int_ppc_altivec_vaddsws>;
275 def VADDUBS : VX1_Int<512, "vaddubs", int_ppc_altivec_vaddubs>;
276 def VADDUHS : VX1_Int<576, "vadduhs", int_ppc_altivec_vadduhs>;
277 def VADDUWS : VX1_Int<640, "vadduws", int_ppc_altivec_vadduws>;
280 def VAND : VXForm_1<1028, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
281 "vand $vD, $vA, $vB", VecFP,
282 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>;
283 def VANDC : VXForm_1<1092, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
284 "vandc $vD, $vA, $vB", VecFP,
285 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), (vnot VRRC:$vB)))]>;
287 def VCFSX : VXForm_1<842, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
288 "vcfsx $vD, $vB, $UIMM", VecFP,
290 (int_ppc_altivec_vcfsx VRRC:$vB, imm:$UIMM))]>;
291 def VCFUX : VXForm_1<778, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
292 "vcfux $vD, $vB, $UIMM", VecFP,
294 (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>;
295 def VCTSXS : VXForm_1<970, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
296 "vctsxs $vD, $vB, $UIMM", VecFP,
298 (int_ppc_altivec_vctsxs VRRC:$vB, imm:$UIMM))]>;
299 def VCTUXS : VXForm_1<906, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
300 "vctuxs $vD, $vB, $UIMM", VecFP,
302 (int_ppc_altivec_vctuxs VRRC:$vB, imm:$UIMM))]>;
303 def VEXPTEFP : VX2_Int<394, "vexptefp", int_ppc_altivec_vexptefp>;
304 def VLOGEFP : VX2_Int<458, "vlogefp", int_ppc_altivec_vlogefp>;
306 def VAVGSB : VX1_Int<1282, "vavgsb", int_ppc_altivec_vavgsb>;
307 def VAVGSH : VX1_Int<1346, "vavgsh", int_ppc_altivec_vavgsh>;
308 def VAVGSW : VX1_Int<1410, "vavgsw", int_ppc_altivec_vavgsw>;
309 def VAVGUB : VX1_Int<1026, "vavgub", int_ppc_altivec_vavgub>;
310 def VAVGUH : VX1_Int<1090, "vavguh", int_ppc_altivec_vavguh>;
311 def VAVGUW : VX1_Int<1154, "vavguw", int_ppc_altivec_vavguw>;
313 def VMAXFP : VX1_Int<1034, "vmaxfp", int_ppc_altivec_vmaxfp>;
314 def VMAXSB : VX1_Int< 258, "vmaxsb", int_ppc_altivec_vmaxsb>;
315 def VMAXSH : VX1_Int< 322, "vmaxsh", int_ppc_altivec_vmaxsh>;
316 def VMAXSW : VX1_Int< 386, "vmaxsw", int_ppc_altivec_vmaxsw>;
317 def VMAXUB : VX1_Int< 2, "vmaxub", int_ppc_altivec_vmaxub>;
318 def VMAXUH : VX1_Int< 66, "vmaxuh", int_ppc_altivec_vmaxuh>;
319 def VMAXUW : VX1_Int< 130, "vmaxuw", int_ppc_altivec_vmaxuw>;
320 def VMINFP : VX1_Int<1098, "vminfp", int_ppc_altivec_vminfp>;
321 def VMINSB : VX1_Int< 770, "vminsb", int_ppc_altivec_vminsb>;
322 def VMINSH : VX1_Int< 834, "vminsh", int_ppc_altivec_vminsh>;
323 def VMINSW : VX1_Int< 896, "vminsw", int_ppc_altivec_vminsw>;
324 def VMINUB : VX1_Int< 514, "vminub", int_ppc_altivec_vminub>;
325 def VMINUH : VX1_Int< 578, "vminuh", int_ppc_altivec_vminuh>;
326 def VMINUW : VX1_Int< 642, "vminuw", int_ppc_altivec_vminuw>;
328 def VMRGHB : VXForm_1< 12, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
329 "vmrghb $vD, $vA, $vB", VecFP,
330 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
331 VRRC:$vB, VMRGHB_shuffle_mask))]>;
332 def VMRGHH : VXForm_1< 76, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
333 "vmrghh $vD, $vA, $vB", VecFP,
334 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
335 VRRC:$vB, VMRGHH_shuffle_mask))]>;
336 def VMRGHW : VXForm_1<140, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
337 "vmrghw $vD, $vA, $vB", VecFP,
338 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
339 VRRC:$vB, VMRGHW_shuffle_mask))]>;
340 def VMRGLB : VXForm_1<268, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
341 "vmrglb $vD, $vA, $vB", VecFP,
342 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
343 VRRC:$vB, VMRGLB_shuffle_mask))]>;
344 def VMRGLH : VXForm_1<332, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
345 "vmrglh $vD, $vA, $vB", VecFP,
346 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
347 VRRC:$vB, VMRGLH_shuffle_mask))]>;
348 def VMRGLW : VXForm_1<396, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
349 "vmrglw $vD, $vA, $vB", VecFP,
350 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
351 VRRC:$vB, VMRGLW_shuffle_mask))]>;
353 def VMSUMMBM : VA1a_Int<37, "vmsummbm", int_ppc_altivec_vmsummbm>;
354 def VMSUMSHM : VA1a_Int<40, "vmsumshm", int_ppc_altivec_vmsumshm>;
355 def VMSUMSHS : VA1a_Int<41, "vmsumshs", int_ppc_altivec_vmsumshs>;
356 def VMSUMUBM : VA1a_Int<36, "vmsumubm", int_ppc_altivec_vmsumubm>;
357 def VMSUMUHM : VA1a_Int<38, "vmsumuhm", int_ppc_altivec_vmsumuhm>;
358 def VMSUMUHS : VA1a_Int<39, "vmsumuhs", int_ppc_altivec_vmsumuhs>;
360 def VMULESB : VX1_Int<776, "vmulesb", int_ppc_altivec_vmulesb>;
361 def VMULESH : VX1_Int<840, "vmulesh", int_ppc_altivec_vmulesh>;
362 def VMULEUB : VX1_Int<520, "vmuleub", int_ppc_altivec_vmuleub>;
363 def VMULEUH : VX1_Int<584, "vmuleuh", int_ppc_altivec_vmuleuh>;
364 def VMULOSB : VX1_Int<264, "vmulosb", int_ppc_altivec_vmulosb>;
365 def VMULOSH : VX1_Int<328, "vmulosh", int_ppc_altivec_vmulosh>;
366 def VMULOUB : VX1_Int< 8, "vmuloub", int_ppc_altivec_vmuloub>;
367 def VMULOUH : VX1_Int< 72, "vmulouh", int_ppc_altivec_vmulouh>;
369 def VREFP : VX2_Int<266, "vrefp", int_ppc_altivec_vrefp>;
370 def VRFIM : VX2_Int<714, "vrfim", int_ppc_altivec_vrfim>;
371 def VRFIN : VX2_Int<522, "vrfin", int_ppc_altivec_vrfin>;
372 def VRFIP : VX2_Int<650, "vrfip", int_ppc_altivec_vrfip>;
373 def VRFIZ : VX2_Int<586, "vrfiz", int_ppc_altivec_vrfiz>;
374 def VRSQRTEFP : VX2_Int<330, "vrsqrtefp", int_ppc_altivec_vrsqrtefp>;
376 def VSUBCUW : VX1_Int<74, "vsubcuw", int_ppc_altivec_vsubcuw>;
378 def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
379 "vsubfp $vD, $vA, $vB", VecGeneral,
380 [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>;
381 def VSUBUBM : VXForm_1<1024, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
382 "vsububm $vD, $vA, $vB", VecGeneral,
383 [(set VRRC:$vD, (sub (v16i8 VRRC:$vA), VRRC:$vB))]>;
384 def VSUBUHM : VXForm_1<1088, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
385 "vsubuhm $vD, $vA, $vB", VecGeneral,
386 [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>;
387 def VSUBUWM : VXForm_1<1152, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
388 "vsubuwm $vD, $vA, $vB", VecGeneral,
389 [(set VRRC:$vD, (sub (v4i32 VRRC:$vA), VRRC:$vB))]>;
391 def VSUBSBS : VX1_Int<1792, "vsubsbs" , int_ppc_altivec_vsubsbs>;
392 def VSUBSHS : VX1_Int<1856, "vsubshs" , int_ppc_altivec_vsubshs>;
393 def VSUBSWS : VX1_Int<1920, "vsubsws" , int_ppc_altivec_vsubsws>;
394 def VSUBUBS : VX1_Int<1536, "vsububs" , int_ppc_altivec_vsububs>;
395 def VSUBUHS : VX1_Int<1600, "vsubuhs" , int_ppc_altivec_vsubuhs>;
396 def VSUBUWS : VX1_Int<1664, "vsubuws" , int_ppc_altivec_vsubuws>;
397 def VSUMSWS : VX1_Int<1928, "vsumsws" , int_ppc_altivec_vsumsws>;
398 def VSUM2SWS: VX1_Int<1672, "vsum2sws", int_ppc_altivec_vsum2sws>;
399 def VSUM4SBS: VX1_Int<1672, "vsum4sbs", int_ppc_altivec_vsum4sbs>;
400 def VSUM4SHS: VX1_Int<1608, "vsum4shs", int_ppc_altivec_vsum4shs>;
401 def VSUM4UBS: VX1_Int<1544, "vsum4ubs", int_ppc_altivec_vsum4ubs>;
403 def VNOR : VXForm_1<1284, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
404 "vnor $vD, $vA, $vB", VecFP,
405 [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>;
406 def VOR : VXForm_1<1156, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
407 "vor $vD, $vA, $vB", VecFP,
408 [(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>;
409 def VXOR : VXForm_1<1220, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
410 "vxor $vD, $vA, $vB", VecFP,
411 [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>;
413 def VRLB : VX1_Int< 4, "vrlb", int_ppc_altivec_vrlb>;
414 def VRLH : VX1_Int< 68, "vrlh", int_ppc_altivec_vrlh>;
415 def VRLW : VX1_Int< 132, "vrlw", int_ppc_altivec_vrlw>;
417 def VSL : VX1_Int< 452, "vsl" , int_ppc_altivec_vsl >;
418 def VSLO : VX1_Int<1036, "vslo", int_ppc_altivec_vslo>;
419 def VSLB : VX1_Int< 260, "vslb", int_ppc_altivec_vslb>;
420 def VSLH : VX1_Int< 324, "vslh", int_ppc_altivec_vslh>;
421 def VSLW : VX1_Int< 388, "vslw", int_ppc_altivec_vslw>;
423 def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
424 "vspltb $vD, $vB, $UIMM", VecPerm,
425 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
426 VSPLTB_shuffle_mask:$UIMM))]>;
427 def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
428 "vsplth $vD, $vB, $UIMM", VecPerm,
429 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
430 VSPLTH_shuffle_mask:$UIMM))]>;
431 def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
432 "vspltw $vD, $vB, $UIMM", VecPerm,
433 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vB), (undef),
434 VSPLTW_shuffle_mask:$UIMM))]>;
436 def VSR : VX1_Int< 708, "vsr" , int_ppc_altivec_vsr>;
437 def VSRO : VX1_Int<1100, "vsro" , int_ppc_altivec_vsro>;
438 def VSRAB : VX1_Int< 772, "vsrab", int_ppc_altivec_vsrab>;
439 def VSRAH : VX1_Int< 836, "vsrah", int_ppc_altivec_vsrah>;
440 def VSRAW : VX1_Int< 900, "vsraw", int_ppc_altivec_vsraw>;
441 def VSRB : VX1_Int< 516, "vsrb" , int_ppc_altivec_vsrb>;
442 def VSRH : VX1_Int< 580, "vsrh" , int_ppc_altivec_vsrh>;
443 def VSRW : VX1_Int< 644, "vsrw" , int_ppc_altivec_vsrw>;
446 def VSPLTISB : VXForm_3<780, (ops VRRC:$vD, s5imm:$SIMM),
447 "vspltisb $vD, $SIMM", VecPerm,
448 [(set VRRC:$vD, (v4f32 vecspltisb:$SIMM))]>;
449 def VSPLTISH : VXForm_3<844, (ops VRRC:$vD, s5imm:$SIMM),
450 "vspltish $vD, $SIMM", VecPerm,
451 [(set VRRC:$vD, (v4f32 vecspltish:$SIMM))]>;
452 def VSPLTISW : VXForm_3<908, (ops VRRC:$vD, s5imm:$SIMM),
453 "vspltisw $vD, $SIMM", VecPerm,
454 [(set VRRC:$vD, (v4f32 vecspltisw:$SIMM))]>;
457 def VPKPX : VX1_Int<782, "vpkpx", int_ppc_altivec_vpkpx>;
458 def VPKSHSS : VX1_Int<398, "vpkshss", int_ppc_altivec_vpkshss>;
459 def VPKSHUS : VX1_Int<270, "vpkshus", int_ppc_altivec_vpkshus>;
460 def VPKSWSS : VX1_Int<462, "vpkswss", int_ppc_altivec_vpkswss>;
461 def VPKSWUS : VX1_Int<334, "vpkswus", int_ppc_altivec_vpkswus>;
462 def VPKUHUM : VXForm_1<14, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
463 "vpkuhum $vD, $vA, $vB", VecFP,
464 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
465 VRRC:$vB, VPKUHUM_shuffle_mask))]>;
466 def VPKUHUS : VX1_Int<142, "vpkuhus", int_ppc_altivec_vpkuhus>;
467 def VPKUWUM : VXForm_1<78, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
468 "vpkuwum $vD, $vA, $vB", VecFP,
469 [(set VRRC:$vD, (vector_shuffle (v16i8 VRRC:$vA),
470 VRRC:$vB, VPKUWUM_shuffle_mask))]>;
471 def VPKUWUS : VX1_Int<206, "vpkuwus", int_ppc_altivec_vpkuwus>;
474 def VUPKHPX : VX2_Int<846, "vupkhpx", int_ppc_altivec_vupkhpx>;
475 def VUPKHSB : VX2_Int<526, "vupkhsb", int_ppc_altivec_vupkhsb>;
476 def VUPKHSH : VX2_Int<590, "vupkhsh", int_ppc_altivec_vupkhsh>;
477 def VUPKLPX : VX2_Int<974, "vupklpx", int_ppc_altivec_vupklpx>;
478 def VUPKLSB : VX2_Int<654, "vupklsb", int_ppc_altivec_vupklsb>;
479 def VUPKLSH : VX2_Int<718, "vupklsh", int_ppc_altivec_vupklsh>;
482 // Altivec Comparisons.
484 class VCMP<bits<10> xo, string asmstr, ValueType Ty>
485 : VXRForm_1<xo, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), asmstr, VecFPCompare,
486 [(set VRRC:$vD, (Ty (PPCvcmp VRRC:$vA, VRRC:$vB, xo)))]>;
487 class VCMPo<bits<10> xo, string asmstr, ValueType Ty>
488 : VXRForm_1<xo, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), asmstr, VecFPCompare,
489 [(set VRRC:$vD, (Ty (PPCvcmp_o VRRC:$vA, VRRC:$vB, xo)))]> {
494 // f32 element comparisons.0
495 def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>;
496 def VCMPBFPo : VCMPo<966, "vcmpbfp. $vD, $vA, $vB" , v4f32>;
497 def VCMPEQFP : VCMP <198, "vcmpeqfp $vD, $vA, $vB" , v4f32>;
498 def VCMPEQFPo : VCMPo<198, "vcmpeqfp. $vD, $vA, $vB", v4f32>;
499 def VCMPGEFP : VCMP <454, "vcmpgefp $vD, $vA, $vB" , v4f32>;
500 def VCMPGEFPo : VCMPo<454, "vcmpgefp. $vD, $vA, $vB", v4f32>;
501 def VCMPGTFP : VCMP <710, "vcmpgtfp $vD, $vA, $vB" , v4f32>;
502 def VCMPGTFPo : VCMPo<710, "vcmpgtfp. $vD, $vA, $vB", v4f32>;
504 // i8 element comparisons.
505 def VCMPEQUB : VCMP < 6, "vcmpequb $vD, $vA, $vB" , v16i8>;
506 def VCMPEQUBo : VCMPo< 6, "vcmpequb. $vD, $vA, $vB", v16i8>;
507 def VCMPGTSB : VCMP <774, "vcmpgtsb $vD, $vA, $vB" , v16i8>;
508 def VCMPGTSBo : VCMPo<774, "vcmpgtsb. $vD, $vA, $vB", v16i8>;
509 def VCMPGTUB : VCMP <518, "vcmpgtub $vD, $vA, $vB" , v16i8>;
510 def VCMPGTUBo : VCMPo<518, "vcmpgtub. $vD, $vA, $vB", v16i8>;
512 // i16 element comparisons.
513 def VCMPEQUH : VCMP < 70, "vcmpequh $vD, $vA, $vB" , v8i16>;
514 def VCMPEQUHo : VCMPo< 70, "vcmpequh. $vD, $vA, $vB", v8i16>;
515 def VCMPGTSH : VCMP <838, "vcmpgtsh $vD, $vA, $vB" , v8i16>;
516 def VCMPGTSHo : VCMPo<838, "vcmpgtsh. $vD, $vA, $vB", v8i16>;
517 def VCMPGTUH : VCMP <582, "vcmpgtuh $vD, $vA, $vB" , v8i16>;
518 def VCMPGTUHo : VCMPo<582, "vcmpgtuh. $vD, $vA, $vB", v8i16>;
520 // i32 element comparisons.
521 def VCMPEQUW : VCMP <134, "vcmpequw $vD, $vA, $vB" , v4i32>;
522 def VCMPEQUWo : VCMPo<134, "vcmpequw. $vD, $vA, $vB", v4i32>;
523 def VCMPGTSW : VCMP <902, "vcmpgtsw $vD, $vA, $vB" , v4i32>;
524 def VCMPGTSWo : VCMPo<902, "vcmpgtsw. $vD, $vA, $vB", v4i32>;
525 def VCMPGTUW : VCMP <646, "vcmpgtuw $vD, $vA, $vB" , v4i32>;
526 def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>;
528 def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD),
529 "vxor $vD, $vD, $vD", VecFP,
530 [(set VRRC:$vD, (v4f32 immAllZerosV))]>;
533 //===----------------------------------------------------------------------===//
534 // Additional Altivec Patterns
538 def : Pat<(int_ppc_altivec_dss imm:$STRM), (DSS 0, imm:$STRM, 0, 0)>;
539 def : Pat<(int_ppc_altivec_dssall), (DSS 1, 0, 0, 0)>;
540 def : Pat<(int_ppc_altivec_dst GPRC:$rA, GPRC:$rB, imm:$STRM),
541 (DST 0, imm:$STRM, GPRC:$rA, GPRC:$rB)>;
542 def : Pat<(int_ppc_altivec_dstt GPRC:$rA, GPRC:$rB, imm:$STRM),
543 (DST 1, imm:$STRM, GPRC:$rA, GPRC:$rB)>;
544 def : Pat<(int_ppc_altivec_dstst GPRC:$rA, GPRC:$rB, imm:$STRM),
545 (DSTST 0, imm:$STRM, GPRC:$rA, GPRC:$rB)>;
546 def : Pat<(int_ppc_altivec_dststt GPRC:$rA, GPRC:$rB, imm:$STRM),
547 (DSTST 1, imm:$STRM, GPRC:$rA, GPRC:$rB)>;
550 def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>;
551 def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>;
552 def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>;
553 def : Pat<(v16i8 immAllZerosV), (v16i8 (V_SET0))>;
554 def : Pat<(v8i16 immAllZerosV), (v8i16 (V_SET0))>;
555 def : Pat<(v4i32 immAllZerosV), (v4i32 (V_SET0))>;
558 def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>;
559 def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>;
560 def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>;
561 def : Pat<(v4f32 (load xoaddr:$src)), (v4f32 (LVX xoaddr:$src))>;
564 def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst),
565 (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>;
566 def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst),
567 (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>;
568 def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst),
569 (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>;
570 def : Pat<(store (v4f32 VRRC:$rS), xoaddr:$dst),
571 (STVX (v4f32 VRRC:$rS), xoaddr:$dst)>;
574 def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
575 def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
576 def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
578 def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
579 def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
580 def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
582 def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
583 def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
584 def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
586 def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
587 def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
588 def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
592 // Match vsldoi(x,x), vpkuwum(x,x), vpkuhum(x,x)
593 def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VSLDOI_unary_shuffle_mask:$in),
594 (VSLDOI VRRC:$vA, VRRC:$vA, VSLDOI_unary_shuffle_mask:$in)>;
595 def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef,VPKUWUM_unary_shuffle_mask:$in),
596 (VPKUWUM VRRC:$vA, VRRC:$vA)>;
597 def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef,VPKUHUM_unary_shuffle_mask:$in),
598 (VPKUHUM VRRC:$vA, VRRC:$vA)>;
601 def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGLB_unary_shuffle_mask:$in),
602 (VMRGLB VRRC:$vA, VRRC:$vA)>;
603 def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGLH_unary_shuffle_mask:$in),
604 (VMRGLH VRRC:$vA, VRRC:$vA)>;
605 def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGLW_unary_shuffle_mask:$in),
606 (VMRGLW VRRC:$vA, VRRC:$vA)>;
607 def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGHB_unary_shuffle_mask:$in),
608 (VMRGHB VRRC:$vA, VRRC:$vA)>;
609 def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGHH_unary_shuffle_mask:$in),
610 (VMRGHH VRRC:$vA, VRRC:$vA)>;
611 def:Pat<(vector_shuffle (v16i8 VRRC:$vA), undef, VMRGHW_unary_shuffle_mask:$in),
612 (VMRGHW VRRC:$vA, VRRC:$vA)>;
614 // Immediate vector formation with vsplti*.
615 def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>;
616 def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>;
617 def : Pat<(v16i8 vecspltisw:$invec), (v16i8 (VSPLTISW vecspltisw:$invec))>;
619 def : Pat<(v8i16 vecspltisb:$invec), (v8i16 (VSPLTISB vecspltisb:$invec))>;
620 def : Pat<(v8i16 vecspltish:$invec), (v8i16 (VSPLTISH vecspltish:$invec))>;
621 def : Pat<(v8i16 vecspltisw:$invec), (v8i16 (VSPLTISW vecspltisw:$invec))>;
623 def : Pat<(v4i32 vecspltisb:$invec), (v4i32 (VSPLTISB vecspltisb:$invec))>;
624 def : Pat<(v4i32 vecspltish:$invec), (v4i32 (VSPLTISH vecspltish:$invec))>;
625 def : Pat<(v4i32 vecspltisw:$invec), (v4i32 (VSPLTISW vecspltisw:$invec))>;
627 // Logical Operations
628 def : Pat<(v16i8 (vnot VRRC:$vA)), (v16i8 (VNOR VRRC:$vA, VRRC:$vA))>;
629 def : Pat<(v8i16 (vnot VRRC:$vA)), (v8i16 (VNOR VRRC:$vA, VRRC:$vA))>;
630 def : Pat<(v4i32 (vnot VRRC:$vA)), (v4i32 (VNOR VRRC:$vA, VRRC:$vA))>;
632 def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>;
633 def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>;
634 def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>;
635 def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>;
636 def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>;
637 def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>;
638 def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>;
639 def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>;
640 def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))),
641 (v16i8 (VANDC VRRC:$A, VRRC:$B))>;
642 def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))),
643 (v8i16 (VANDC VRRC:$A, VRRC:$B))>;
645 def : Pat<(fmul VRRC:$vA, VRRC:$vB),
646 (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>;
648 // Fused multiply add and multiply sub for packed float. These are represented
649 // separately from the real instructions above, for operations that must have
650 // the additional precision, such as Newton-Rhapson (used by divide, sqrt)
651 def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
652 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
653 def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
654 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
656 def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
657 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
658 def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
659 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
661 def : Pat<(PPCvperm (v16i8 VRRC:$vA), VRRC:$vB, VRRC:$vC),
662 (v16i8 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;