1 //===- PPCInstrAltivec.td - The PowerPC Altivec Extension --*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Chris Lattner and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the Altivec extension to the PowerPC instruction set.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Altivec transformation functions and pattern fragments.
18 // VSPLT_get_imm xform function: convert vector_shuffle mask to VSPLT* imm.
19 def VSPLT_get_imm : SDNodeXForm<build_vector, [{
20 return getI32Imm(PPC::getVSPLTImmediate(N));
23 def VSPLT_shuffle_mask : PatLeaf<(build_vector), [{
24 return PPC::isSplatShuffleMask(N);
28 // VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm.
29 def VSPLTISB_get_imm : SDNodeXForm<build_vector, [{
31 PPC::isVecSplatImm(N, 1, &Val);
32 return getI32Imm(Val);
34 def vecspltisb : PatLeaf<(build_vector), [{
35 return PPC::isVecSplatImm(N, 1);
36 }], VSPLTISB_get_imm>;
38 // VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
39 def VSPLTISH_get_imm : SDNodeXForm<build_vector, [{
41 PPC::isVecSplatImm(N, 2, &Val);
42 return getI32Imm(Val);
44 def vecspltish : PatLeaf<(build_vector), [{
45 return PPC::isVecSplatImm(N, 2);
46 }], VSPLTISH_get_imm>;
48 // VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
49 def VSPLTISW_get_imm : SDNodeXForm<build_vector, [{
51 PPC::isVecSplatImm(N, 4, &Val);
52 return getI32Imm(Val);
54 def vecspltisw : PatLeaf<(build_vector), [{
55 return PPC::isVecSplatImm(N, 4);
56 }], VSPLTISW_get_imm>;
58 class isVDOT { // vector dot instruction.
59 list<Register> Defs = [CR6];
63 //===----------------------------------------------------------------------===//
64 // Instruction Definitions.
66 def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC",
67 [(set VRRC:$rD, (v4f32 (undef)))]>;
69 let isLoad = 1, PPC970_Unit = 2 in { // Loads.
70 def LVEBX: XForm_1<31, 7, (ops VRRC:$vD, memrr:$src),
71 "lvebx $vD, $src", LdStGeneral,
72 [(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>;
73 def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src),
74 "lvehx $vD, $src", LdStGeneral,
75 [(set VRRC:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>;
76 def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src),
77 "lvewx $vD, $src", LdStGeneral,
78 [(set VRRC:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>;
79 def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src),
80 "lvx $vD, $src", LdStGeneral,
81 [(set VRRC:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>;
82 def LVXL : XForm_1<31, 359, (ops VRRC:$vD, memrr:$src),
83 "lvxl $vD, $src", LdStGeneral,
84 [(set VRRC:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>;
87 def LVSL : XForm_1<31, 6, (ops VRRC:$vD, memrr:$src),
88 "lvsl $vD, $src", LdStGeneral,
89 [(set VRRC:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>,
91 def LVSR : XForm_1<31, 38, (ops VRRC:$vD, memrr:$src),
92 "lvsl $vD, $src", LdStGeneral,
93 [(set VRRC:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>,
96 let isStore = 1, noResults = 1, PPC970_Unit = 2 in { // Stores.
97 def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, memrr:$dst),
98 "stvebx $rS, $dst", LdStGeneral,
99 [(int_ppc_altivec_stvebx VRRC:$rS, xoaddr:$dst)]>;
100 def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, memrr:$dst),
101 "stvehx $rS, $dst", LdStGeneral,
102 [(int_ppc_altivec_stvehx VRRC:$rS, xoaddr:$dst)]>;
103 def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, memrr:$dst),
104 "stvewx $rS, $dst", LdStGeneral,
105 [(int_ppc_altivec_stvewx VRRC:$rS, xoaddr:$dst)]>;
106 def STVX : XForm_8<31, 231, (ops VRRC:$rS, memrr:$dst),
107 "stvx $rS, $dst", LdStGeneral,
108 [(int_ppc_altivec_stvx VRRC:$rS, xoaddr:$dst)]>;
109 def STVXL : XForm_8<31, 487, (ops VRRC:$rS, memrr:$dst),
110 "stvxl $rS, $dst", LdStGeneral,
111 [(int_ppc_altivec_stvxl VRRC:$rS, xoaddr:$dst)]>;
114 let PPC970_Unit = 5 in { // VALU Operations.
115 // VA-Form instructions. 3-input AltiVec ops.
116 def VMADDFP : VAForm_1<46, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
117 "vmaddfp $vD, $vA, $vC, $vB", VecFP,
118 [(set VRRC:$vD, (fadd (fmul VRRC:$vA, VRRC:$vC),
120 Requires<[FPContractions]>;
121 def VNMSUBFP: VAForm_1<47, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB),
122 "vnmsubfp $vD, $vA, $vC, $vB", VecFP,
123 [(set VRRC:$vD, (fneg (fsub (fmul VRRC:$vA, VRRC:$vC),
125 Requires<[FPContractions]>;
126 def VMHADDSHS : VAForm_1a<32, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
127 "vmhaddshs $vD, $vA, $vB, $vC", VecFP,
129 (int_ppc_altivec_vmhaddshs VRRC:$vA, VRRC:$vB, VRRC:$vC))]>;
130 def VMHRADDSHS : VAForm_1a<33, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
131 "vmhraddshs $vD, $vA, $vB, $vC", VecFP,
133 (int_ppc_altivec_vmhraddshs VRRC:$vA, VRRC:$vB, VRRC:$vC))]>;
134 def VPERM : VAForm_1a<43, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
135 "vperm $vD, $vA, $vB, $vC", VecPerm,
137 (PPCvperm (v4f32 VRRC:$vA), VRRC:$vB, VRRC:$vC))]>;
138 def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH),
139 "vsldoi $vD, $vA, $vB, $SH", VecFP,
141 (int_ppc_altivec_vsldoi VRRC:$vA, VRRC:$vB,
143 def VSEL : VAForm_1a<42, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
144 "vsel $vD, $vA, $vB, $vC", VecFP,
146 (int_ppc_altivec_vsel VRRC:$vA, VRRC:$vB, VRRC:$vC))]>;
148 // VX-Form instructions. AltiVec arithmetic ops.
149 def VADDCUW : VXForm_1<384, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
150 "vaddcuw $vD, $vA, $vB", VecFP,
152 (int_ppc_altivec_vaddcuw VRRC:$vA, VRRC:$vB))]>;
153 def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
154 "vaddfp $vD, $vA, $vB", VecFP,
155 [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>;
157 def VADDUBM : VXForm_1<0, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
158 "vaddubm $vD, $vA, $vB", VecGeneral,
159 [(set VRRC:$vD, (add (v16i8 VRRC:$vA), VRRC:$vB))]>;
160 def VADDUHM : VXForm_1<64, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
161 "vadduhm $vD, $vA, $vB", VecGeneral,
162 [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>;
163 def VADDUWM : VXForm_1<128, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
164 "vadduwm $vD, $vA, $vB", VecGeneral,
165 [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>;
167 def VADDSBS : VXForm_1<768, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
168 "vaddsbs $vD, $vA, $vB", VecFP,
170 (int_ppc_altivec_vaddsbs VRRC:$vA, VRRC:$vB))]>;
171 def VADDSHS : VXForm_1<832, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
172 "vaddshs $vD, $vA, $vB", VecFP,
174 (int_ppc_altivec_vaddshs VRRC:$vA, VRRC:$vB))]>;
175 def VADDSWS : VXForm_1<896, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
176 "vaddsws $vD, $vA, $vB", VecFP,
178 (int_ppc_altivec_vaddsws VRRC:$vA, VRRC:$vB))]>;
180 def VADDUBS : VXForm_1<512, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
181 "vaddubs $vD, $vA, $vB", VecFP,
183 (int_ppc_altivec_vaddubs VRRC:$vA, VRRC:$vB))]>;
184 def VADDUHS : VXForm_1<576, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
185 "vadduhs $vD, $vA, $vB", VecFP,
187 (int_ppc_altivec_vadduhs VRRC:$vA, VRRC:$vB))]>;
188 def VADDUWS : VXForm_1<640, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
189 "vadduws $vD, $vA, $vB", VecFP,
191 (int_ppc_altivec_vadduws VRRC:$vA, VRRC:$vB))]>;
192 def VAND : VXForm_1<1028, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
193 "vand $vD, $vA, $vB", VecFP,
194 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>;
195 def VANDC : VXForm_1<1092, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
196 "vandc $vD, $vA, $vB", VecFP,
197 [(set VRRC:$vD, (and (v4i32 VRRC:$vA), (vnot VRRC:$vB)))]>;
199 def VCFSX : VXForm_1<842, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
200 "vcfsx $vD, $vB, $UIMM", VecFP,
202 (int_ppc_altivec_vcfsx VRRC:$vB, imm:$UIMM))]>;
203 def VCFUX : VXForm_1<778, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
204 "vcfux $vD, $vB, $UIMM", VecFP,
206 (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>;
207 def VCTSXS : VXForm_1<970, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
208 "vctsxs $vD, $vB, $UIMM", VecFP,
210 def VCTUXS : VXForm_1<906, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
211 "vctuxs $vD, $vB, $UIMM", VecFP,
213 def VEXPTEFP : VXForm_2<394, (ops VRRC:$vD, VRRC:$vB),
214 "vexptefp $vD, $vB", VecFP,
215 [(set VRRC:$vD, (int_ppc_altivec_vexptefp VRRC:$vB))]>;
216 def VLOGEFP : VXForm_2<458, (ops VRRC:$vD, VRRC:$vB),
217 "vlogefp $vD, $vB", VecFP,
218 [(set VRRC:$vD, (int_ppc_altivec_vlogefp VRRC:$vB))]>;
219 def VMAXFP : VXForm_1<1034, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
220 "vmaxfp $vD, $vA, $vB", VecFP,
222 def VMINFP : VXForm_1<1098, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
223 "vminfp $vD, $vA, $vB", VecFP,
225 def VMRGHH : VXForm_1<76, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
226 "vmrghh $vD, $vA, $vB", VecFP,
228 (int_ppc_altivec_vmrghh VRRC:$vA, VRRC:$vB))]>;
229 def VMRGHW : VXForm_1<140, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
230 "vmrghw $vD, $vA, $vB", VecFP,
232 (int_ppc_altivec_vmrghw VRRC:$vA, VRRC:$vB))]>;
233 def VMRGLH : VXForm_1<332, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
234 "vmrglh $vD, $vA, $vB", VecFP,
236 (int_ppc_altivec_vmrglh VRRC:$vA, VRRC:$vB))]>;
237 def VMRGLW : VXForm_1<396, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
238 "vmrglw $vD, $vA, $vB", VecFP,
240 (int_ppc_altivec_vmrglw VRRC:$vA, VRRC:$vB))]>;
242 def VMULESB : VXForm_1<776, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
243 "vmulesb $vD, $vA, $vB", VecFP,
245 (int_ppc_altivec_vmulesb VRRC:$vA, VRRC:$vB))]>;
246 def VMULESH : VXForm_1<840, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
247 "vmulesh $vD, $vA, $vB", VecFP,
249 (int_ppc_altivec_vmulesh VRRC:$vA, VRRC:$vB))]>;
250 def VMULEUB : VXForm_1<520, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
251 "vmuleub $vD, $vA, $vB", VecFP,
253 (int_ppc_altivec_vmuleub VRRC:$vA, VRRC:$vB))]>;
254 def VMULEUH : VXForm_1<584, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
255 "vmuleuh $vD, $vA, $vB", VecFP,
257 (int_ppc_altivec_vmuleuh VRRC:$vA, VRRC:$vB))]>;
258 def VMULOSB : VXForm_1<264, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
259 "vmulosb $vD, $vA, $vB", VecFP,
261 (int_ppc_altivec_vmulosb VRRC:$vA, VRRC:$vB))]>;
264 def VREFP : VXForm_2<266, (ops VRRC:$vD, VRRC:$vB),
265 "vrefp $vD, $vB", VecFP,
266 [(set VRRC:$vD, (int_ppc_altivec_vrefp VRRC:$vB))]>;
267 def VRFIM : VXForm_2<714, (ops VRRC:$vD, VRRC:$vB),
268 "vrfim $vD, $vB", VecFP,
269 [(set VRRC:$vD, (int_ppc_altivec_vrfim VRRC:$vB))]>;
270 def VRFIN : VXForm_2<522, (ops VRRC:$vD, VRRC:$vB),
271 "vrfin $vD, $vB", VecFP,
272 [(set VRRC:$vD, (int_ppc_altivec_vrfin VRRC:$vB))]>;
273 def VRFIP : VXForm_2<650, (ops VRRC:$vD, VRRC:$vB),
274 "vrfip $vD, $vB", VecFP,
275 [(set VRRC:$vD, (int_ppc_altivec_vrfip VRRC:$vB))]>;
276 def VRFIZ : VXForm_2<586, (ops VRRC:$vD, VRRC:$vB),
277 "vrfiz $vD, $vB", VecFP,
278 [(set VRRC:$vD, (int_ppc_altivec_vrfiz VRRC:$vB))]>;
279 def VRSQRTEFP : VXForm_2<330, (ops VRRC:$vD, VRRC:$vB),
280 "vrsqrtefp $vD, $vB", VecFP,
281 [(set VRRC:$vD,(int_ppc_altivec_vrsqrtefp VRRC:$vB))]>;
282 def VSUBCUW : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
283 "vsubcuw $vD, $vA, $vB", VecFP,
285 (int_ppc_altivec_vsubcuw VRRC:$vA, VRRC:$vB))]>;
286 def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
287 "vsubfp $vD, $vA, $vB", VecFP,
288 [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>;
290 def VSUBUBM : VXForm_1<1024, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
291 "vsububm $vD, $vA, $vB", VecGeneral,
292 [(set VRRC:$vD, (sub (v16i8 VRRC:$vA), VRRC:$vB))]>;
293 def VSUBUHM : VXForm_1<1088, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
294 "vsubuhm $vD, $vA, $vB", VecGeneral,
295 [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>;
296 def VSUBUWM : VXForm_1<1152, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
297 "vsubuwm $vD, $vA, $vB", VecGeneral,
298 [(set VRRC:$vD, (sub (v4i32 VRRC:$vA), VRRC:$vB))]>;
300 def VSUBSBS : VXForm_1<1792, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
301 "vsubsbs $vD, $vA, $vB", VecFP,
303 (int_ppc_altivec_vsubsbs VRRC:$vA, VRRC:$vB))]>;
304 def VSUBSHS : VXForm_1<1856, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
305 "vsubshs $vD, $vA, $vB", VecFP,
307 (int_ppc_altivec_vsubshs VRRC:$vA, VRRC:$vB))]>;
308 def VSUBSWS : VXForm_1<1920, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
309 "vsubsws $vD, $vA, $vB", VecFP,
311 (int_ppc_altivec_vsubsws VRRC:$vA, VRRC:$vB))]>;
313 def VSUBUBS : VXForm_1<1536, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
314 "vsububs $vD, $vA, $vB", VecFP,
316 (int_ppc_altivec_vsububs VRRC:$vA, VRRC:$vB))]>;
317 def VSUBUHS : VXForm_1<1600, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
318 "vsubuhs $vD, $vA, $vB", VecFP,
320 (int_ppc_altivec_vsubuhs VRRC:$vA, VRRC:$vB))]>;
321 def VSUBUWS : VXForm_1<1664, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
322 "vsubuws $vD, $vA, $vB", VecFP,
324 (int_ppc_altivec_vsubuws VRRC:$vA, VRRC:$vB))]>;
326 def VSUMSWS : VXForm_1<1928, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
327 "vsumsws $vD, $vA, $vB", VecFP,
329 (int_ppc_altivec_vsumsws VRRC:$vA, VRRC:$vB))]>;
330 def VSUM2SWS: VXForm_1<1672, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
331 "vsum2sws $vD, $vA, $vB", VecFP,
333 (int_ppc_altivec_vsum2sws VRRC:$vA, VRRC:$vB))]>;
334 def VSUM4SBS: VXForm_1<1672, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
335 "vsum4sbs $vD, $vA, $vB", VecFP,
337 (int_ppc_altivec_vsum4sbs VRRC:$vA, VRRC:$vB))]>;
338 def VSUM4SHS: VXForm_1<1608, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
339 "vsum4shs $vD, $vA, $vB", VecFP,
341 (int_ppc_altivec_vsum4shs VRRC:$vA, VRRC:$vB))]>;
342 def VSUM4UBS: VXForm_1<1544, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
343 "vsum4ubs $vD, $vA, $vB", VecFP,
345 (int_ppc_altivec_vsum4ubs VRRC:$vA, VRRC:$vB))]>;
347 def VNOR : VXForm_1<1284, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
348 "vnor $vD, $vA, $vB", VecFP,
349 [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>;
350 def VOR : VXForm_1<1156, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
351 "vor $vD, $vA, $vB", VecFP,
352 [(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>;
353 def VXOR : VXForm_1<1220, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
354 "vxor $vD, $vA, $vB", VecFP,
355 [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>;
357 def VRLB : VXForm_1<4, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
358 "vrlb $vD, $vA, $vB", VecFP,
360 (int_ppc_altivec_vrlb VRRC:$vA, VRRC:$vB))]>;
361 def VRLH : VXForm_1<68, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
362 "vrlh $vD, $vA, $vB", VecFP,
364 (int_ppc_altivec_vrlh VRRC:$vA, VRRC:$vB))]>;
365 def VRLW : VXForm_1<132, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
366 "vrlw $vD, $vA, $vB", VecFP,
368 (int_ppc_altivec_vrlw VRRC:$vA, VRRC:$vB))]>;
370 def VSLO : VXForm_1<1036, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
371 "vslo $vD, $vA, $vB", VecFP,
373 (int_ppc_altivec_vslo VRRC:$vA, VRRC:$vB))]>;
374 def VSLB : VXForm_1<260, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
375 "vslb $vD, $vA, $vB", VecFP,
377 (int_ppc_altivec_vslb VRRC:$vA, VRRC:$vB))]>;
378 def VSLH : VXForm_1<324, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
379 "vslh $vD, $vA, $vB", VecFP,
381 (int_ppc_altivec_vslh VRRC:$vA, VRRC:$vB))]>;
382 def VSLW : VXForm_1<388, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
383 "vslw $vD, $vA, $vB", VecFP,
385 (int_ppc_altivec_vslw VRRC:$vA, VRRC:$vB))]>;
387 def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
388 "vspltb $vD, $vB, $UIMM", VecPerm,
390 def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
391 "vsplth $vD, $vB, $UIMM", VecPerm,
393 def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB),
394 "vspltw $vD, $vB, $UIMM", VecPerm,
395 [(set VRRC:$vD, (vector_shuffle (v4f32 VRRC:$vB), (undef),
396 VSPLT_shuffle_mask:$UIMM))]>;
398 def VSR : VXForm_1<708, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
399 "vsr $vD, $vA, $vB", VecFP,
401 (int_ppc_altivec_vsr VRRC:$vA, VRRC:$vB))]>;
402 def VSRO : VXForm_1<1100, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
403 "vsro $vD, $vA, $vB", VecFP,
405 (int_ppc_altivec_vsro VRRC:$vA, VRRC:$vB))]>;
406 def VSRAB : VXForm_1<772, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
407 "vsrab $vD, $vA, $vB", VecFP,
409 (int_ppc_altivec_vsrab VRRC:$vA, VRRC:$vB))]>;
410 def VSRAH : VXForm_1<836, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
411 "vsrah $vD, $vA, $vB", VecFP,
413 (int_ppc_altivec_vsrah VRRC:$vA, VRRC:$vB))]>;
414 def VSRAW : VXForm_1<900, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
415 "vsraw $vD, $vA, $vB", VecFP,
417 (int_ppc_altivec_vsraw VRRC:$vA, VRRC:$vB))]>;
418 def VSRB : VXForm_1<516, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
419 "vsrb $vD, $vA, $vB", VecFP,
421 (int_ppc_altivec_vsrb VRRC:$vA, VRRC:$vB))]>;
422 def VSRH : VXForm_1<580, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
423 "vsrh $vD, $vA, $vB", VecFP,
425 (int_ppc_altivec_vsrh VRRC:$vA, VRRC:$vB))]>;
426 def VSRW : VXForm_1<644, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
427 "vsrw $vD, $vA, $vB", VecFP,
429 (int_ppc_altivec_vsrw VRRC:$vA, VRRC:$vB))]>;
432 def VSPLTISB : VXForm_3<780, (ops VRRC:$vD, s5imm:$SIMM),
433 "vspltisb $vD, $SIMM", VecPerm,
434 [(set VRRC:$vD, (v4f32 vecspltisb:$SIMM))]>;
435 def VSPLTISH : VXForm_3<844, (ops VRRC:$vD, s5imm:$SIMM),
436 "vspltish $vD, $SIMM", VecPerm,
437 [(set VRRC:$vD, (v4f32 vecspltish:$SIMM))]>;
438 def VSPLTISW : VXForm_3<908, (ops VRRC:$vD, s5imm:$SIMM),
439 "vspltisw $vD, $SIMM", VecPerm,
440 [(set VRRC:$vD, (v4f32 vecspltisw:$SIMM))]>;
443 def VPKPX : VXForm_1<782, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
444 "vpkpx $vD, $vA, $vB", VecFP,
446 (int_ppc_altivec_vpkpx VRRC:$vA, VRRC:$vB))]>;
447 def VPKSHSS : VXForm_1<398, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
448 "vpkshss $vD, $vA, $vB", VecFP,
450 (int_ppc_altivec_vpkshss VRRC:$vA, VRRC:$vB))]>;
451 def VPKSHUS : VXForm_1<270, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
452 "vpkshus $vD, $vA, $vB", VecFP,
454 (int_ppc_altivec_vpkshus VRRC:$vA, VRRC:$vB))]>;
455 def VPKSWSS : VXForm_1<462, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
456 "vpkswss $vD, $vA, $vB", VecFP,
458 (int_ppc_altivec_vpkswss VRRC:$vA, VRRC:$vB))]>;
459 def VPKSWUS : VXForm_1<334, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
460 "vpkswus $vD, $vA, $vB", VecFP,
462 (int_ppc_altivec_vpkswus VRRC:$vA, VRRC:$vB))]>;
463 def VPKUHUM : VXForm_1<14, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
464 "vpkuhum $vD, $vA, $vB", VecFP,
466 def VPKUHUS : VXForm_1<142, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
467 "vpkuhus $vD, $vA, $vB", VecFP,
469 (int_ppc_altivec_vpkuhus VRRC:$vA, VRRC:$vB))]>;
470 def VPKUWUM : VXForm_1<78, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
471 "vpkuwum $vD, $vA, $vB", VecFP,
473 def VPKUWUS : VXForm_1<206, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
474 "vpkuwus $vD, $vA, $vB", VecFP,
476 (int_ppc_altivec_vpkuwus VRRC:$vA, VRRC:$vB))]>;
479 def VUPKHPX : VXForm_2<846, (ops VRRC:$vD, VRRC:$vB),
480 "vupkhpx $vD, $vB", VecFP,
481 [(set VRRC:$vD, (int_ppc_altivec_vupkhpx VRRC:$vB))]>;
482 def VUPKHSB : VXForm_2<526, (ops VRRC:$vD, VRRC:$vB),
483 "vupkhsb $vD, $vB", VecFP,
484 [(set VRRC:$vD, (int_ppc_altivec_vupkhsb VRRC:$vB))]>;
485 def VUPKHSH : VXForm_2<590, (ops VRRC:$vD, VRRC:$vB),
486 "vupkhsh $vD, $vB", VecFP,
487 [(set VRRC:$vD, (int_ppc_altivec_vupkhsh VRRC:$vB))]>;
488 def VUPKLPX : VXForm_2<974, (ops VRRC:$vD, VRRC:$vB),
489 "vupklpx $vD, $vB", VecFP,
490 [(set VRRC:$vD, (int_ppc_altivec_vupklpx VRRC:$vB))]>;
491 def VUPKLSB : VXForm_2<654, (ops VRRC:$vD, VRRC:$vB),
492 "vupklsb $vD, $vB", VecFP,
493 [(set VRRC:$vD, (int_ppc_altivec_vupklsb VRRC:$vB))]>;
494 def VUPKLSH : VXForm_2<718, (ops VRRC:$vD, VRRC:$vB),
495 "vupklsh $vD, $vB", VecFP,
496 [(set VRRC:$vD, (int_ppc_altivec_vupklsh VRRC:$vB))]>;
499 // Altivec Comparisons.
501 // f32 element comparisons.
502 def VCMPBFP : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
503 "vcmpbfp $vD, $vA, $vB", VecFPCompare,
505 (int_ppc_altivec_vcmpbfp VRRC:$vA, VRRC:$vB))]>;
506 def VCMPBFPo : VXRForm_1<966, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
507 "vcmpbfp. $vD, $vA, $vB", VecFPCompare,
508 [(set VRRC:$vD, (v4f32
509 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 966)))]>, isVDOT;
510 def VCMPEQFP : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
511 "vcmpeqfp $vD, $vA, $vB", VecFPCompare,
513 (int_ppc_altivec_vcmpeqfp VRRC:$vA, VRRC:$vB))]>;
514 def VCMPEQFPo : VXRForm_1<198, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
515 "vcmpeqfp. $vD, $vA, $vB", VecFPCompare,
516 [(set VRRC:$vD, (v4f32
517 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 198)))]>, isVDOT;
518 def VCMPGEFP : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
519 "vcmpgefp $vD, $vA, $vB", VecFPCompare,
521 (int_ppc_altivec_vcmpgefp VRRC:$vA, VRRC:$vB))]>;
522 def VCMPGEFPo : VXRForm_1<454, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
523 "vcmpgefp. $vD, $vA, $vB", VecFPCompare,
524 [(set VRRC:$vD, (v4f32
525 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 454)))]>, isVDOT;
526 def VCMPGTFP : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
527 "vcmpgtfp $vD, $vA, $vB", VecFPCompare,
529 (int_ppc_altivec_vcmpgtfp VRRC:$vA, VRRC:$vB))]>;
530 def VCMPGTFPo : VXRForm_1<710, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
531 "vcmpgtfp. $vD, $vA, $vB", VecFPCompare,
532 [(set VRRC:$vD, (v4f32
533 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 710)))]>, isVDOT;
535 // i8 element comparisons.
536 def VCMPEQUB : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
537 "vcmpequb $vD, $vA, $vB", VecFPCompare,
539 (int_ppc_altivec_vcmpequb VRRC:$vA, VRRC:$vB))]>;
540 def VCMPEQUBo : VXRForm_1<6, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
541 "vcmpequb. $vD, $vA, $vB", VecFPCompare,
542 [(set VRRC:$vD, (v16i8
543 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 6)))]>, isVDOT;
544 def VCMPGTSB : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
545 "vcmpgtsb $vD, $vA, $vB", VecFPCompare,
547 (int_ppc_altivec_vcmpgtsb VRRC:$vA, VRRC:$vB))]>;
548 def VCMPGTSBo : VXRForm_1<774, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
549 "vcmpgtsb. $vD, $vA, $vB", VecFPCompare,
550 [(set VRRC:$vD, (v16i8
551 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 774)))]>, isVDOT;
552 def VCMPGTUB : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
553 "vcmpgtub $vD, $vA, $vB", VecFPCompare,
555 (int_ppc_altivec_vcmpgtub VRRC:$vA, VRRC:$vB))]>;
556 def VCMPGTUBo : VXRForm_1<518, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
557 "vcmpgtub. $vD, $vA, $vB", VecFPCompare,
558 [(set VRRC:$vD, (v16i8
559 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 518)))]>, isVDOT;
561 // i16 element comparisons.
562 def VCMPEQUH : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
563 "vcmpequh $vD, $vA, $vB", VecFPCompare,
565 (int_ppc_altivec_vcmpequh VRRC:$vA, VRRC:$vB))]>;
566 def VCMPEQUHo : VXRForm_1<70, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
567 "vcmpequh. $vD, $vA, $vB", VecFPCompare,
568 [(set VRRC:$vD, (v8i16
569 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 70)))]>, isVDOT;
570 def VCMPGTSH : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
571 "vcmpgtsh $vD, $vA, $vB", VecFPCompare,
573 (int_ppc_altivec_vcmpgtsh VRRC:$vA, VRRC:$vB))]>;
574 def VCMPGTSHo : VXRForm_1<838, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
575 "vcmpgtsh. $vD, $vA, $vB", VecFPCompare,
576 [(set VRRC:$vD, (v8i16
577 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 838)))]>, isVDOT;
578 def VCMPGTUH : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
579 "vcmpgtuh $vD, $vA, $vB", VecFPCompare,
581 (int_ppc_altivec_vcmpgtuh VRRC:$vA, VRRC:$vB))]>;
582 def VCMPGTUHo : VXRForm_1<582, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
583 "vcmpgtuh. $vD, $vA, $vB", VecFPCompare,
584 [(set VRRC:$vD, (v8i16
585 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 582)))]>, isVDOT;
587 // i32 element comparisons.
588 def VCMPEQUW : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
589 "vcmpequw $vD, $vA, $vB", VecFPCompare,
591 (int_ppc_altivec_vcmpequw VRRC:$vA, VRRC:$vB))]>;
592 def VCMPEQUWo : VXRForm_1<134, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
593 "vcmpequw. $vD, $vA, $vB", VecFPCompare,
594 [(set VRRC:$vD, (v4i32
595 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 134)))]>, isVDOT;
596 def VCMPGTSW : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
597 "vcmpgtsw $vD, $vA, $vB", VecFPCompare,
599 (int_ppc_altivec_vcmpgtsw VRRC:$vA, VRRC:$vB))]>;
600 def VCMPGTSWo : VXRForm_1<902, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
601 "vcmpgtsw. $vD, $vA, $vB", VecFPCompare,
602 [(set VRRC:$vD, (v4i32
603 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 902)))]>, isVDOT;
604 def VCMPGTUW : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
605 "vcmpgtuw $vD, $vA, $vB", VecFPCompare,
607 (int_ppc_altivec_vcmpgtuw VRRC:$vA, VRRC:$vB))]>;
608 def VCMPGTUWo : VXRForm_1<646, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
609 "vcmpgtuw. $vD, $vA, $vB", VecFPCompare,
610 [(set VRRC:$vD, (v4i32
611 (PPCvcmp_o VRRC:$vA, VRRC:$vB, 646)))]>, isVDOT;
613 def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD),
614 "vxor $vD, $vD, $vD", VecFP,
615 [(set VRRC:$vD, (v4f32 immAllZerosV))]>;
618 //===----------------------------------------------------------------------===//
619 // Additional Altivec Patterns
623 def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>;
624 def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>;
625 def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>;
626 def : Pat<(v16i8 immAllZerosV), (v16i8 (V_SET0))>;
627 def : Pat<(v8i16 immAllZerosV), (v8i16 (V_SET0))>;
628 def : Pat<(v4i32 immAllZerosV), (v4i32 (V_SET0))>;
631 def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>;
632 def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>;
633 def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>;
634 def : Pat<(v4f32 (load xoaddr:$src)), (v4f32 (LVX xoaddr:$src))>;
637 def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst),
638 (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>;
639 def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst),
640 (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>;
641 def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst),
642 (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>;
643 def : Pat<(store (v4f32 VRRC:$rS), xoaddr:$dst),
644 (STVX (v4f32 VRRC:$rS), xoaddr:$dst)>;
647 def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>;
648 def : Pat<(v16i8 (bitconvert (v4i32 VRRC:$src))), (v16i8 VRRC:$src)>;
649 def : Pat<(v16i8 (bitconvert (v4f32 VRRC:$src))), (v16i8 VRRC:$src)>;
651 def : Pat<(v8i16 (bitconvert (v16i8 VRRC:$src))), (v8i16 VRRC:$src)>;
652 def : Pat<(v8i16 (bitconvert (v4i32 VRRC:$src))), (v8i16 VRRC:$src)>;
653 def : Pat<(v8i16 (bitconvert (v4f32 VRRC:$src))), (v8i16 VRRC:$src)>;
655 def : Pat<(v4i32 (bitconvert (v16i8 VRRC:$src))), (v4i32 VRRC:$src)>;
656 def : Pat<(v4i32 (bitconvert (v8i16 VRRC:$src))), (v4i32 VRRC:$src)>;
657 def : Pat<(v4i32 (bitconvert (v4f32 VRRC:$src))), (v4i32 VRRC:$src)>;
659 def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>;
660 def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>;
661 def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>;
663 // Immediate vector formation with vsplti*.
664 def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>;
665 def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>;
666 def : Pat<(v16i8 vecspltisw:$invec), (v16i8 (VSPLTISW vecspltisw:$invec))>;
668 def : Pat<(v8i16 vecspltisb:$invec), (v8i16 (VSPLTISB vecspltisb:$invec))>;
669 def : Pat<(v8i16 vecspltish:$invec), (v8i16 (VSPLTISH vecspltish:$invec))>;
670 def : Pat<(v8i16 vecspltisw:$invec), (v8i16 (VSPLTISW vecspltisw:$invec))>;
672 def : Pat<(v4i32 vecspltisb:$invec), (v4i32 (VSPLTISB vecspltisb:$invec))>;
673 def : Pat<(v4i32 vecspltish:$invec), (v4i32 (VSPLTISH vecspltish:$invec))>;
674 def : Pat<(v4i32 vecspltisw:$invec), (v4i32 (VSPLTISW vecspltisw:$invec))>;
676 // Logical Operations
677 def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>;
678 def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>;
679 def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>;
680 def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>;
681 def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>;
682 def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>;
683 def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>;
684 def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>;
685 def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))),
686 (v16i8 (VANDC VRRC:$A, VRRC:$B))>;
687 def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))),
688 (v8i16 (VANDC VRRC:$A, VRRC:$B))>;
690 def : Pat<(fmul VRRC:$vA, VRRC:$vB),
691 (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>;
693 // Fused multiply add and multiply sub for packed float. These are represented
694 // separately from the real instructions above, for operations that must have
695 // the additional precision, such as Newton-Rhapson (used by divide, sqrt)
696 def : Pat<(PPCvmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
697 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
698 def : Pat<(PPCvnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
699 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
701 def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C),
702 (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
703 def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
704 (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
705 def : Pat<(int_ppc_altivec_vperm VRRC:$A, VRRC:$B, VRRC:$C),
706 (VPERM VRRC:$A, VRRC:$B, VRRC:$C)>;
707 def : Pat<(vector_shuffle (v4i32 VRRC:$vB), (undef), VSPLT_shuffle_mask:$UIMM),
708 (v4i32 (VSPLTW VSPLT_shuffle_mask:$UIMM, VRRC:$vB))>;
710 def : Pat<(PPCvperm (v4i32 VRRC:$vA), VRRC:$vB, VRRC:$vC),
711 (v4i32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>;