1 //===- PPCInstrAltivec.td - The PowerPC Altivec Extension --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the Altivec extension to the PowerPC instruction set.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Altivec transformation functions and pattern fragments.
18 // VSPLT_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
19 def VSPLT_get_imm : SDNodeXForm<build_vector, [{
20 return getI32Imm(PPC::getVSPLTImmediate(N));
23 def VSPLT_shuffle_mask : PatLeaf<(build_vector), [{
24 return PPC::isSplatShuffleMask(N);
28 // VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
29 def VSPLTISB_get_imm : SDNodeXForm<build_vector, [{
31 PPC::isVecSplatImm(N, 1, &Val);
32 return getI32Imm(Val);
34 def vecspltisb : PatLeaf<(build_vector), [{
35 return PPC::isVecSplatImm(N, 1);
36 }], VSPLTISB_get_imm>;
38 // VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
39 def VSPLTISH_get_imm : SDNodeXForm<build_vector, [{
41 PPC::isVecSplatImm(N, 2, &Val);
42 return getI32Imm(Val);
44 def vecspltish : PatLeaf<(build_vector), [{
45 return PPC::isVecSplatImm(N, 2);
46 }], VSPLTISH_get_imm>;
48 // VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
49 def VSPLTISW_get_imm : SDNodeXForm<build_vector, [{
51 PPC::isVecSplatImm(N, 4, &Val);
52 return getI32Imm(Val);
54 def vecspltisw : PatLeaf<(build_vector), [{
55 return PPC::isVecSplatImm(N, 4);
56 }], VSPLTISW_get_imm>;
58 class isVDOT { // vector dot instruction.
59 list<Register> Defs = [CR6];
63 //===----------------------------------------------------------------------===//
64 // Instruction Definitions.
66 def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC",
67 [(set VRRC:$rD, (v4f32 (undef)))]>;
69 let isLoad = 1, PPC970_Unit = 2 in { // Loads.
70 def LVEBX: XForm_1<31, 7, (ops VRRC:$vD, memrr:$src),
71 "lvebx $vD, $src", LdStGeneral,
72 [(set VRRC:$vD, (v16i8 (PPClve_x xoaddr:$src)))]>;
73 def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src),
74 "lvehx $vD, $src", LdStGeneral,
75 [(set VRRC:$vD, (v8i16 (PPClve_x xoaddr:$src)))]>;
76 def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src),
77 "lvewx $vD, $src", LdStGeneral,
78 [(set VRRC:$vD, (v4f32 (PPClve_x xoaddr:$src)))]>;
79 def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src),
80 "lvx $vD, $src", LdStGeneral,
81 [(set VRRC:$vD, (v4f32 (load xoaddr:$src)))]>;
84 def LVSL : XForm_1<31, 6, (ops VRRC:$vD, GPRC:$base, GPRC:$rA),
85 "lvsl $vD, $base, $rA", LdStGeneral,
87 def LVSR : XForm_1<31, 38, (ops VRRC:$vD, GPRC:$base, GPRC:$rA),
88 "lvsl $vD, $base, $rA", LdStGeneral,
91 let isStore = 1, noResults = 1, PPC970_Unit = 2 in { // Stores.
92 def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
93 "stvebx $rS, $rA, $rB", LdStGeneral,
95 def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
96 "stvehx $rS, $rA, $rB", LdStGeneral,
98 def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, GPRC:$rA, GPRC:$rB),
99 "stvewx $rS, $rA, $rB", LdStGeneral,
101 def STVX : XForm_8<31, 231, (ops VRRC:$rS, memrr:$dst),
102 "stvx $rS, $dst", LdStGeneral,
103 [(store (v4f32 VRRC:$rS), xoaddr:$dst)]>;
106 let PPC970_Unit = 5 in { // VALU Operations.
107 // VA-Form instructions. 3-input AltiVec ops.
108 def VMADDFP : VAForm_1<46, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
109 "vmaddfp $vD, $vA, $vC, $vB", VecFP,
110 [(set VRRC:$vD, (fadd (fmul VRRC:$vA, VRRC:$vC),
112 Requires<[FPContractions]>;
113 def VNMSUBFP: VAForm_1<47, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
114 "vnmsubfp $vD, $vA, $vC, $vB", VecFP,
115 [(set VRRC:$vD, (fneg (fsub (fmul VRRC:$vA, VRRC:$vC),
117 Requires<[FPContractions]>;
119 def VPERM : VAForm_1<43, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
120 "vperm $vD, $vA, $vB, $vC", VecPerm,
122 (PPCvperm (v4f32 VRRC:$vA), VRRC:$vB, VRRC:$vC))]>;
123 def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH),
124 "vsldoi $vD, $vA, $vB, $SH", VecFP,
126 (int_ppc_altivec_vsldoi VRRC:$vA, VRRC:$vB,
129 // VX-Form instructions. AltiVec arithmetic ops.
130 def VADDCUW : VXForm_1<384, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
131 "vaddcuw $vD, $vA, $vB", VecFP,
133 (int_ppc_altivec_vaddcuw VRRC:$vA, VRRC:$vB))]>;
134 def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
135 "vaddfp $vD, $vA, $vB", VecFP,
136 [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>;
138 def VADDUBM : VXForm_1<0, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
139 "vaddubm $vD, $vA, $vB", VecGeneral,
140 [(set VRRC:$vD, (add (v16i8 VRRC:$vA), VRRC:$vB))]>;
141 def VADDUHM : VXForm_1<64, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
142 "vadduhm $vD, $vA, $vB", VecGeneral,
143 [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>;
144 def VADDUWM : VXForm_1<128, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
145 "vadduwm $vD, $vA, $vB", VecGeneral,
146 [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>;
148 def VADDSBS : VXForm_1<768, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
149 "vaddsbs $vD, $vA, $vB", VecFP,
151 (int_ppc_altivec_vaddsbs VRRC:$vA, VRRC:$vB))]>;
152 def VADDSHS : VXForm_1<832, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
153 "vaddshs $vD, $vA, $vB", VecFP,
155 (int_ppc_altivec_vaddshs VRRC:$vA, VRRC:$vB))]>;
156 def VADDSWS : VXForm_1<896, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
157 "vaddsws $vD, $vA, $vB", VecFP,
159 (int_ppc_altivec_vaddsws VRRC:$vA, VRRC:$vB))]>;
161 def VADDUBS : VXForm_1<512, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
162 "vaddubs $vD, $vA, $vB", VecFP,
164 (int_ppc_altivec_vaddubs VRRC:$vA, VRRC:$vB))]>;
165 def VADDUHS : VXForm_1<576, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
166 "vadduhs $vD, $vA, $vB", VecFP,
168 (int_ppc_altivec_vadduhs VRRC:$vA, VRRC:$vB))]>;
169 def VADDUWS : VXForm_1<640, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
170 "vadduws $vD, $vA, $vB", VecFP,
172 (int_ppc_altivec_vadduws VRRC:$vA, VRRC:$vB))]>;
173 def VAND : VXForm_1<1028, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
174 "vand $vD, $vA, $vB", VecFP,
175 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>;
176 def VANDC : VXForm_1<1092, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
177 "vandc $vD, $vA, $vB", VecFP,
178 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), (vnot VRRC:$vB)))]>;
180 def VCFSX : VXForm_1<842, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
181 "vcfsx $vD, $vB, $UIMM", VecFP,
183 (int_ppc_altivec_vcfsx VRRC:$vB, imm:$UIMM))]>;
184 def VCFUX : VXForm_1<778, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
185 "vcfux $vD, $vB, $UIMM", VecFP,
187 (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>;
188 def VCTSXS : VXForm_1<970, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
189 "vctsxs $vD, $vB, $UIMM", VecFP,
191 def VCTUXS : VXForm_1<906, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
192 "vctuxs $vD, $vB, $UIMM", VecFP,
194 def VEXPTEFP : VXForm_2<394, (ops VRRC:$vD, VRRC:$vB),
195 "vexptefp $vD, $vB", VecFP,
197 def VLOGEFP : VXForm_2<458, (ops VRRC:$vD, VRRC:$vB),
198 "vlogefp $vD, $vB", VecFP,
200 def VMAXFP : VXForm_1<1034, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
201 "vmaxfp $vD, $vA, $vB", VecFP,
203 def VMINFP : VXForm_1<1098, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
204 "vminfp $vD, $vA, $vB", VecFP,
206 def VREFP : VXForm_2<266, (ops VRRC:$vD, VRRC:$vB),
207 "vrefp $vD, $vB", VecFP,
209 def VRFIM : VXForm_2<714, (ops VRRC:$vD, VRRC:$vB),
210 "vrfim $vD, $vB", VecFP,
212 def VRFIN : VXForm_2<522, (ops VRRC:$vD, VRRC:$vB),
213 "vrfin $vD, $vB", VecFP,
215 def VRFIP : VXForm_2<650, (ops VRRC:$vD, VRRC:$vB),
216 "vrfip $vD, $vB", VecFP,
218 def VRFIZ : VXForm_2<586, (ops VRRC:$vD, VRRC:$vB),
219 "vrfiz $vD, $vB", VecFP,
221 def VRSQRTEFP : VXForm_2<330, (ops VRRC:$vD, VRRC:$vB),
222 "vrsqrtefp $vD, $vB", VecFP,
223 [(set VRRC:$vD,(int_ppc_altivec_vrsqrtefp VRRC:$vB))]>;
224 def VSUBCUW : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
225 "vsubcuw $vD, $vA, $vB", VecFP,
227 (int_ppc_altivec_vsubcuw VRRC:$vA, VRRC:$vB))]>;
228 def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
229 "vsubfp $vD, $vA, $vB", VecFP,
230 [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>;
232 def VSUBUBM : VXForm_1<1024, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
233 "vsububm $vD, $vA, $vB", VecGeneral,
234 [(set VRRC:$vD, (sub (v16i8 VRRC:$vA), VRRC:$vB))]>;
235 def VSUBUHM : VXForm_1<1088, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
236 "vsubuhm $vD, $vA, $vB", VecGeneral,
237 [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>;
238 def VSUBUWM : VXForm_1<1152, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
239 "vsubuwm $vD, $vA, $vB", VecGeneral,
240 [(set VRRC:$vD, (sub (v4i32 VRRC:$vA), VRRC:$vB))]>;
242 def VSUBSBS : VXForm_1<1792, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
243 "vsubsbs $vD, $vA, $vB", VecFP,
245 (int_ppc_altivec_vsubsbs VRRC:$vA, VRRC:$vB))]>;
246 def VSUBSHS : VXForm_1<1856, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
247 "vsubshs $vD, $vA, $vB", VecFP,
249 (int_ppc_altivec_vsubshs VRRC:$vA, VRRC:$vB))]>;
250 def VSUBSWS : VXForm_1<1920, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
251 "vsubsws $vD, $vA, $vB", VecFP,
253 (int_ppc_altivec_vsubsws VRRC:$vA, VRRC:$vB))]>;
255 def VSUBUBS : VXForm_1<1536, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
256 "vsububs $vD, $vA, $vB", VecFP,
258 (int_ppc_altivec_vsububs VRRC:$vA, VRRC:$vB))]>;
259 def VSUBUHS : VXForm_1<1600, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
260 "vsubuhs $vD, $vA, $vB", VecFP,
262 (int_ppc_altivec_vsubuhs VRRC:$vA, VRRC:$vB))]>;
263 def VSUBUWS : VXForm_1<1664, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
264 "vsubuws $vD, $vA, $vB", VecFP,
266 (int_ppc_altivec_vsubuws VRRC:$vA, VRRC:$vB))]>;
268 def VNOR : VXForm_1<1284, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
269 "vnor $vD, $vA, $vB", VecFP,
270 [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>;
271 def VOR : VXForm_1<1156, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
272 "vor $vD, $vA, $vB", VecFP,
273 [(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>;
274 def VXOR : VXForm_1<1220, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
275 "vxor $vD, $vA, $vB", VecFP,
276 [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>;
278 def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
279 "vspltb $vD, $vB, $UIMM", VecPerm,
281 def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
282 "vsplth $vD, $vB, $UIMM", VecPerm,
284 def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
285 "vspltw $vD, $vB, $UIMM", VecPerm,
286 [(set VRRC:$vD, (vector_shuffle (v4f32 VRRC:$vB), (undef),
287 VSPLT_shuffle_mask:$UIMM))]>;
289 def VSPLTISB : VXForm_1<780, (ops VRRC:$vD, s5imm:$SIMM),
290 "vspltisb $vD, $SIMM", VecPerm,
291 [(set VRRC:$vD, (v4f32 vecspltisb:$SIMM))]>;
292 def VSPLTISH : VXForm_1<844, (ops VRRC:$vD, s5imm:$SIMM),
293 "vspltish $vD, $SIMM", VecPerm,
294 [(set VRRC:$vD, (v4f32 vecspltish:$SIMM))]>;
295 def VSPLTISW : VXForm_1<908, (ops VRRC:$vD, s5imm:$SIMM),
296 "vspltisw $vD, $SIMM", VecPerm,
297 [(set VRRC:$vD, (v4f32 vecspltisw:$SIMM))]>;
300 // Altivec Comparisons.
302 // f32 element comparisons.
303 def VCMPBFP : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
304 "vcmpbfp $vD, $vA, $vB", VecFPCompare,
306 (int_ppc_altivec_vcmpbfp VRRC:$vA, VRRC:$vB))]>;
307 def VCMPBFPo : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
308 "vcmpbfp. $vD, $vA, $vB", VecFPCompare,
309 [(set VRRC:$vD, (v4f32
310 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 966)))]>, isVDOT;
311 def VCMPEQFP : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
312 "vcmpeqfp $vD, $vA, $vB", VecFPCompare,
314 (int_ppc_altivec_vcmpeqfp VRRC:$vA, VRRC:$vB))]>;
315 def VCMPEQFPo : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
316 "vcmpeqfp. $vD, $vA, $vB", VecFPCompare,
317 [(set VRRC:$vD, (v4f32
318 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 198)))]>, isVDOT;
319 def VCMPGEFP : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
320 "vcmpgefp $vD, $vA, $vB", VecFPCompare,
322 (int_ppc_altivec_vcmpgefp VRRC:$vA, VRRC:$vB))]>;
323 def VCMPGEFPo : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
324 "vcmpgefp. $vD, $vA, $vB", VecFPCompare,
325 [(set VRRC:$vD, (v4f32
326 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 454)))]>, isVDOT;
327 def VCMPGTFP : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
328 "vcmpgtfp $vD, $vA, $vB", VecFPCompare,
330 (int_ppc_altivec_vcmpgtfp VRRC:$vA, VRRC:$vB))]>;
331 def VCMPGTFPo : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
332 "vcmpgtfp. $vD, $vA, $vB", VecFPCompare,
333 [(set VRRC:$vD, (v4f32
334 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 710)))]>, isVDOT;
336 // i8 element comparisons.
337 def VCMPEQUB : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
338 "vcmpequb $vD, $vA, $vB", VecFPCompare,
340 (int_ppc_altivec_vcmpequb VRRC:$vA, VRRC:$vB))]>;
341 def VCMPEQUBo : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
342 "vcmpequb. $vD, $vA, $vB", VecFPCompare,
343 [(set VRRC:$vD, (v16i8
344 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 6)))]>, isVDOT;
345 def VCMPGTSB : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
346 "vcmpgtsb $vD, $vA, $vB", VecFPCompare,
348 (int_ppc_altivec_vcmpgtsb VRRC:$vA, VRRC:$vB))]>;
349 def VCMPGTSBo : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
350 "vcmpgtsb. $vD, $vA, $vB", VecFPCompare,
351 [(set VRRC:$vD, (v16i8
352 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 774)))]>, isVDOT;
353 def VCMPGTUB : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
354 "vcmpgtub $vD, $vA, $vB", VecFPCompare,
356 (int_ppc_altivec_vcmpgtub VRRC:$vA, VRRC:$vB))]>;
357 def VCMPGTUBo : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
358 "vcmpgtub. $vD, $vA, $vB", VecFPCompare,
359 [(set VRRC:$vD, (v16i8
360 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 518)))]>, isVDOT;
362 // i16 element comparisons.
363 def VCMPEQUH : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
364 "vcmpequh $vD, $vA, $vB", VecFPCompare,
366 (int_ppc_altivec_vcmpequh VRRC:$vA, VRRC:$vB))]>;
367 def VCMPEQUHo : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
368 "vcmpequh. $vD, $vA, $vB", VecFPCompare,
369 [(set VRRC:$vD, (v8i16
370 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 70)))]>, isVDOT;
371 def VCMPGTSH : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
372 "vcmpgtsh $vD, $vA, $vB", VecFPCompare,
374 (int_ppc_altivec_vcmpgtsh VRRC:$vA, VRRC:$vB))]>;
375 def VCMPGTSHo : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
376 "vcmpgtsh. $vD, $vA, $vB", VecFPCompare,
377 [(set VRRC:$vD, (v8i16
378 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 838)))]>, isVDOT;
379 def VCMPGTUH : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
380 "vcmpgtuh $vD, $vA, $vB", VecFPCompare,
382 (int_ppc_altivec_vcmpgtuh VRRC:$vA, VRRC:$vB))]>;
383 def VCMPGTUHo : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
384 "vcmpgtuh. $vD, $vA, $vB", VecFPCompare,
385 [(set VRRC:$vD, (v8i16
386 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 582)))]>, isVDOT;
388 // i32 element comparisons.
389 def VCMPEQUW : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
390 "vcmpequw $vD, $vA, $vB", VecFPCompare,
392 (int_ppc_altivec_vcmpequw VRRC:$vA, VRRC:$vB))]>;
393 def VCMPEQUWo : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
394 "vcmpequw. $vD, $vA, $vB", VecFPCompare,
395 [(set VRRC:$vD, (v4i32
396 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 134)))]>, isVDOT;
397 def VCMPGTSW : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
398 "vcmpgtsw $vD, $vA, $vB", VecFPCompare,
400 (int_ppc_altivec_vcmpgtsw VRRC:$vA, VRRC:$vB))]>;
401 def VCMPGTSWo : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
402 "vcmpgtsw. $vD, $vA, $vB", VecFPCompare,
403 [(set VRRC:$vD, (v4i32
404 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 902)))]>, isVDOT;
405 def VCMPGTUW : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
406 "vcmpgtuw $vD, $vA, $vB", VecFPCompare,
408 (int_ppc_altivec_vcmpgtuw VRRC:$vA, VRRC:$vB))]>;
409 def VCMPGTUWo : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
410 "vcmpgtuw. $vD, $vA, $vB", VecFPCompare,
411 [(set VRRC:$vD, (v4i32
412 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 646)))]>, isVDOT;
414 def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD),
415 "vxor $vD, $vD, $vD", VecFP,
416 [(set VRRC:$vD, (v4f32 immAllZerosV))]>;
419 //===----------------------------------------------------------------------===//
420 // Additional Altivec Patterns
424 def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>;
425 def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>;
426 def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>;
427 def : Pat<(v16i8 immAllZerosV), (v16i8 (V_SET0))>;
428 def : Pat<(v8i16 immAllZerosV), (v8i16 (V_SET0))>;
429 def : Pat<(v4i32 immAllZerosV), (v4i32 (V_SET0))>;
432 def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>;
433 def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>;
434 def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>;
437 def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst),
438 (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>;
439 def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst),
440 (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>;
441 def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst),
442 (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>;
445 def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
446 def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
447 def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
449 def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
450 def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
451 def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
453 def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
454 def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
455 def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
457 def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
458 def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
459 def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
461 // Immediate vector formation with vsplti*.
462 def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>;
463 def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>;
464 def : Pat<(v16i8 vecspltisw:$invec), (v16i8 (VSPLTISW vecspltisw:$invec))>;
466 def : Pat<(v8i16 vecspltisb:$invec), (v8i16 (VSPLTISB vecspltisb:$invec))>;
467 def : Pat<(v8i16 vecspltish:$invec), (v8i16 (VSPLTISH vecspltish:$invec))>;
468 def : Pat<(v8i16 vecspltisw:$invec), (v8i16 (VSPLTISW vecspltisw:$invec))>;
470 def : Pat<(v4i32 vecspltisb:$invec), (v4i32 (VSPLTISB vecspltisb:$invec))>;
471 def : Pat<(v4i32 vecspltish:$invec), (v4i32 (VSPLTISH vecspltish:$invec))>;
472 def : Pat<(v4i32 vecspltisw:$invec), (v4i32 (VSPLTISW vecspltisw:$invec))>;
474 // Logical Operations
475 def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>;
476 def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>;
477 def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>;
478 def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>;
479 def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>;
480 def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>;
481 def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>;
482 def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>;
483 def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))),
484 (v16i8 (VANDC VRRC:$A, VRRC:$B))>;
485 def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))),
486 (v8i16 (VANDC VRRC:$A, VRRC:$B))>;
488 def : Pat<(fmul VRRC:$vA, VRRC:$vB),
489 (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>;
491 // Fused multiply add and multiply sub for packed float. These are represented
492 // separately from the real instructions above, for operations that must have
493 // the additional precision, such as Newton-Rhapson (used by divide, sqrt)
494 def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
495 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
496 def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
497 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
499 def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
500 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
501 def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
502 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
504 def : Pat<(vector_shuffle (v4i32 VRRC:$vB), (undef), VSPLT_shuffle_mask:$UIMM),
505 (v4i32 (VSPLTW VSPLT_shuffle_mask:$UIMM, VRRC:$vB))>;
507 def : Pat<(PPCvperm (v4i32 VRRC:$vA), VRRC:$vB, VRRC:$vC),
508 (v4i32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;
510 def : Pat<(v4i32 (PPClve_x xoaddr:$src)),
511 (v4i32 (LVEWX xoaddr:$src))>;