1 //===- ARMInstrNEON.td - NEON support for ARM -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM NEON instruction set.
12 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // NEON-specific Operands.
17 //===----------------------------------------------------------------------===//
18 def nModImm : Operand<i32> {
19 let PrintMethod = "printNEONModImmOperand";
22 def nImmSplatI8AsmOperand : AsmOperandClass { let Name = "NEONi8splat"; }
23 def nImmSplatI8 : Operand<i32> {
24 let PrintMethod = "printNEONModImmOperand";
25 let ParserMatchClass = nImmSplatI8AsmOperand;
27 def nImmSplatI16AsmOperand : AsmOperandClass { let Name = "NEONi16splat"; }
28 def nImmSplatI16 : Operand<i32> {
29 let PrintMethod = "printNEONModImmOperand";
30 let ParserMatchClass = nImmSplatI16AsmOperand;
32 def nImmSplatI32AsmOperand : AsmOperandClass { let Name = "NEONi32splat"; }
33 def nImmSplatI32 : Operand<i32> {
34 let PrintMethod = "printNEONModImmOperand";
35 let ParserMatchClass = nImmSplatI32AsmOperand;
37 def nImmVMOVI32AsmOperand : AsmOperandClass { let Name = "NEONi32vmov"; }
38 def nImmVMOVI32 : Operand<i32> {
39 let PrintMethod = "printNEONModImmOperand";
40 let ParserMatchClass = nImmVMOVI32AsmOperand;
42 def nImmVMOVF32 : Operand<i32> {
43 let PrintMethod = "printFPImmOperand";
44 let ParserMatchClass = FPImmOperand;
46 def nImmSplatI64AsmOperand : AsmOperandClass { let Name = "NEONi64splat"; }
47 def nImmSplatI64 : Operand<i32> {
48 let PrintMethod = "printNEONModImmOperand";
49 let ParserMatchClass = nImmSplatI64AsmOperand;
52 def VectorIndex8Operand : AsmOperandClass { let Name = "VectorIndex8"; }
53 def VectorIndex16Operand : AsmOperandClass { let Name = "VectorIndex16"; }
54 def VectorIndex32Operand : AsmOperandClass { let Name = "VectorIndex32"; }
55 def VectorIndex8 : Operand<i32>, ImmLeaf<i32, [{
56 return ((uint64_t)Imm) < 8;
58 let ParserMatchClass = VectorIndex8Operand;
59 let PrintMethod = "printVectorIndex";
60 let MIOperandInfo = (ops i32imm);
62 def VectorIndex16 : Operand<i32>, ImmLeaf<i32, [{
63 return ((uint64_t)Imm) < 4;
65 let ParserMatchClass = VectorIndex16Operand;
66 let PrintMethod = "printVectorIndex";
67 let MIOperandInfo = (ops i32imm);
69 def VectorIndex32 : Operand<i32>, ImmLeaf<i32, [{
70 return ((uint64_t)Imm) < 2;
72 let ParserMatchClass = VectorIndex32Operand;
73 let PrintMethod = "printVectorIndex";
74 let MIOperandInfo = (ops i32imm);
77 // Register list of one D register.
78 def VecListOneDAsmOperand : AsmOperandClass {
79 let Name = "VecListOneD";
80 let ParserMethod = "parseVectorList";
82 def VecListOneD : RegisterOperand<DPR, "printVectorListOne"> {
83 let ParserMatchClass = VecListOneDAsmOperand;
85 // Register list of two sequential D registers.
86 def VecListTwoDAsmOperand : AsmOperandClass {
87 let Name = "VecListTwoD";
88 let ParserMethod = "parseVectorList";
90 def VecListTwoD : RegisterOperand<DPR, "printVectorListTwo"> {
91 let ParserMatchClass = VecListTwoDAsmOperand;
93 // Register list of three sequential D registers.
94 def VecListThreeDAsmOperand : AsmOperandClass {
95 let Name = "VecListThreeD";
96 let ParserMethod = "parseVectorList";
98 def VecListThreeD : RegisterOperand<DPR, "printVectorListThree"> {
99 let ParserMatchClass = VecListThreeDAsmOperand;
101 // Register list of four sequential D registers.
102 def VecListFourDAsmOperand : AsmOperandClass {
103 let Name = "VecListFourD";
104 let ParserMethod = "parseVectorList";
106 def VecListFourD : RegisterOperand<DPR, "printVectorListFour"> {
107 let ParserMatchClass = VecListFourDAsmOperand;
109 // Register list of two D registers spaced by 2 (two sequential Q registers).
110 def VecListTwoQAsmOperand : AsmOperandClass {
111 let Name = "VecListTwoQ";
112 let ParserMethod = "parseVectorList";
114 def VecListTwoQ : RegisterOperand<DPR, "printVectorListTwo"> {
115 let ParserMatchClass = VecListTwoQAsmOperand;
118 //===----------------------------------------------------------------------===//
119 // NEON-specific DAG Nodes.
120 //===----------------------------------------------------------------------===//
122 def SDTARMVCMP : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<1, 2>]>;
123 def SDTARMVCMPZ : SDTypeProfile<1, 1, []>;
125 def NEONvceq : SDNode<"ARMISD::VCEQ", SDTARMVCMP>;
126 def NEONvceqz : SDNode<"ARMISD::VCEQZ", SDTARMVCMPZ>;
127 def NEONvcge : SDNode<"ARMISD::VCGE", SDTARMVCMP>;
128 def NEONvcgez : SDNode<"ARMISD::VCGEZ", SDTARMVCMPZ>;
129 def NEONvclez : SDNode<"ARMISD::VCLEZ", SDTARMVCMPZ>;
130 def NEONvcgeu : SDNode<"ARMISD::VCGEU", SDTARMVCMP>;
131 def NEONvcgt : SDNode<"ARMISD::VCGT", SDTARMVCMP>;
132 def NEONvcgtz : SDNode<"ARMISD::VCGTZ", SDTARMVCMPZ>;
133 def NEONvcltz : SDNode<"ARMISD::VCLTZ", SDTARMVCMPZ>;
134 def NEONvcgtu : SDNode<"ARMISD::VCGTU", SDTARMVCMP>;
135 def NEONvtst : SDNode<"ARMISD::VTST", SDTARMVCMP>;
137 // Types for vector shift by immediates. The "SHX" version is for long and
138 // narrow operations where the source and destination vectors have different
139 // types. The "SHINS" version is for shift and insert operations.
140 def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
142 def SDTARMVSHX : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
144 def SDTARMVSHINS : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
145 SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
147 def NEONvshl : SDNode<"ARMISD::VSHL", SDTARMVSH>;
148 def NEONvshrs : SDNode<"ARMISD::VSHRs", SDTARMVSH>;
149 def NEONvshru : SDNode<"ARMISD::VSHRu", SDTARMVSH>;
150 def NEONvshlls : SDNode<"ARMISD::VSHLLs", SDTARMVSHX>;
151 def NEONvshllu : SDNode<"ARMISD::VSHLLu", SDTARMVSHX>;
152 def NEONvshlli : SDNode<"ARMISD::VSHLLi", SDTARMVSHX>;
153 def NEONvshrn : SDNode<"ARMISD::VSHRN", SDTARMVSHX>;
155 def NEONvrshrs : SDNode<"ARMISD::VRSHRs", SDTARMVSH>;
156 def NEONvrshru : SDNode<"ARMISD::VRSHRu", SDTARMVSH>;
157 def NEONvrshrn : SDNode<"ARMISD::VRSHRN", SDTARMVSHX>;
159 def NEONvqshls : SDNode<"ARMISD::VQSHLs", SDTARMVSH>;
160 def NEONvqshlu : SDNode<"ARMISD::VQSHLu", SDTARMVSH>;
161 def NEONvqshlsu : SDNode<"ARMISD::VQSHLsu", SDTARMVSH>;
162 def NEONvqshrns : SDNode<"ARMISD::VQSHRNs", SDTARMVSHX>;
163 def NEONvqshrnu : SDNode<"ARMISD::VQSHRNu", SDTARMVSHX>;
164 def NEONvqshrnsu : SDNode<"ARMISD::VQSHRNsu", SDTARMVSHX>;
166 def NEONvqrshrns : SDNode<"ARMISD::VQRSHRNs", SDTARMVSHX>;
167 def NEONvqrshrnu : SDNode<"ARMISD::VQRSHRNu", SDTARMVSHX>;
168 def NEONvqrshrnsu : SDNode<"ARMISD::VQRSHRNsu", SDTARMVSHX>;
170 def NEONvsli : SDNode<"ARMISD::VSLI", SDTARMVSHINS>;
171 def NEONvsri : SDNode<"ARMISD::VSRI", SDTARMVSHINS>;
173 def SDTARMVGETLN : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
175 def NEONvgetlaneu : SDNode<"ARMISD::VGETLANEu", SDTARMVGETLN>;
176 def NEONvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>;
178 def SDTARMVMOVIMM : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
179 def NEONvmovImm : SDNode<"ARMISD::VMOVIMM", SDTARMVMOVIMM>;
180 def NEONvmvnImm : SDNode<"ARMISD::VMVNIMM", SDTARMVMOVIMM>;
181 def NEONvmovFPImm : SDNode<"ARMISD::VMOVFPIMM", SDTARMVMOVIMM>;
183 def SDTARMVORRIMM : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
185 def NEONvorrImm : SDNode<"ARMISD::VORRIMM", SDTARMVORRIMM>;
186 def NEONvbicImm : SDNode<"ARMISD::VBICIMM", SDTARMVORRIMM>;
188 def NEONvbsl : SDNode<"ARMISD::VBSL",
189 SDTypeProfile<1, 3, [SDTCisVec<0>,
192 SDTCisSameAs<0, 3>]>>;
194 def NEONvdup : SDNode<"ARMISD::VDUP", SDTypeProfile<1, 1, [SDTCisVec<0>]>>;
196 // VDUPLANE can produce a quad-register result from a double-register source,
197 // so the result is not constrained to match the source.
198 def NEONvduplane : SDNode<"ARMISD::VDUPLANE",
199 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
202 def SDTARMVEXT : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
203 SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
204 def NEONvext : SDNode<"ARMISD::VEXT", SDTARMVEXT>;
206 def SDTARMVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
207 def NEONvrev64 : SDNode<"ARMISD::VREV64", SDTARMVSHUF>;
208 def NEONvrev32 : SDNode<"ARMISD::VREV32", SDTARMVSHUF>;
209 def NEONvrev16 : SDNode<"ARMISD::VREV16", SDTARMVSHUF>;
211 def SDTARMVSHUF2 : SDTypeProfile<2, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
213 SDTCisSameAs<0, 3>]>;
214 def NEONzip : SDNode<"ARMISD::VZIP", SDTARMVSHUF2>;
215 def NEONuzp : SDNode<"ARMISD::VUZP", SDTARMVSHUF2>;
216 def NEONtrn : SDNode<"ARMISD::VTRN", SDTARMVSHUF2>;
218 def SDTARMVMULL : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
219 SDTCisSameAs<1, 2>]>;
220 def NEONvmulls : SDNode<"ARMISD::VMULLs", SDTARMVMULL>;
221 def NEONvmullu : SDNode<"ARMISD::VMULLu", SDTARMVMULL>;
223 def SDTARMFMAX : SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisSameAs<0, 1>,
224 SDTCisSameAs<0, 2>]>;
225 def NEONfmax : SDNode<"ARMISD::FMAX", SDTARMFMAX>;
226 def NEONfmin : SDNode<"ARMISD::FMIN", SDTARMFMAX>;
228 def NEONimmAllZerosV: PatLeaf<(NEONvmovImm (i32 timm)), [{
229 ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0));
230 unsigned EltBits = 0;
231 uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits);
232 return (EltBits == 32 && EltVal == 0);
235 def NEONimmAllOnesV: PatLeaf<(NEONvmovImm (i32 timm)), [{
236 ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0));
237 unsigned EltBits = 0;
238 uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits);
239 return (EltBits == 8 && EltVal == 0xff);
242 //===----------------------------------------------------------------------===//
243 // NEON load / store instructions
244 //===----------------------------------------------------------------------===//
246 // Use VLDM to load a Q register as a D register pair.
247 // This is a pseudo instruction that is expanded to VLDMD after reg alloc.
249 : PseudoVFPLdStM<(outs QPR:$dst), (ins GPR:$Rn),
251 [(set QPR:$dst, (v2f64 (load GPR:$Rn)))]>;
253 // Use VSTM to store a Q register as a D register pair.
254 // This is a pseudo instruction that is expanded to VSTMD after reg alloc.
256 : PseudoVFPLdStM<(outs), (ins QPR:$src, GPR:$Rn),
258 [(store (v2f64 QPR:$src), GPR:$Rn)]>;
260 // Classes for VLD* pseudo-instructions with multi-register operands.
261 // These are expanded to real instructions after register allocation.
262 class VLDQPseudo<InstrItinClass itin>
263 : PseudoNLdSt<(outs QPR:$dst), (ins addrmode6:$addr), itin, "">;
264 class VLDQWBPseudo<InstrItinClass itin>
265 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
266 (ins addrmode6:$addr, am6offset:$offset), itin,
268 class VLDQWBfixedPseudo<InstrItinClass itin>
269 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
270 (ins addrmode6:$addr), itin,
272 class VLDQWBregisterPseudo<InstrItinClass itin>
273 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
274 (ins addrmode6:$addr, rGPR:$offset), itin,
276 class VLDQQPseudo<InstrItinClass itin>
277 : PseudoNLdSt<(outs QQPR:$dst), (ins addrmode6:$addr), itin, "">;
278 class VLDQQWBPseudo<InstrItinClass itin>
279 : PseudoNLdSt<(outs QQPR:$dst, GPR:$wb),
280 (ins addrmode6:$addr, am6offset:$offset), itin,
282 class VLDQQQQPseudo<InstrItinClass itin>
283 : PseudoNLdSt<(outs QQQQPR:$dst), (ins addrmode6:$addr, QQQQPR:$src),itin,
285 class VLDQQQQWBPseudo<InstrItinClass itin>
286 : PseudoNLdSt<(outs QQQQPR:$dst, GPR:$wb),
287 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src), itin,
288 "$addr.addr = $wb, $src = $dst">;
290 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
292 // VLD1 : Vector Load (multiple single elements)
293 class VLD1D<bits<4> op7_4, string Dt>
294 : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd),
295 (ins addrmode6:$Rn), IIC_VLD1,
296 "vld1", Dt, "$Vd, $Rn", "", []> {
299 let DecoderMethod = "DecodeVLDInstruction";
301 class VLD1Q<bits<4> op7_4, string Dt>
302 : NLdSt<0,0b10,0b1010,op7_4, (outs VecListTwoD:$Vd),
303 (ins addrmode6:$Rn), IIC_VLD1x2,
304 "vld1", Dt, "$Vd, $Rn", "", []> {
306 let Inst{5-4} = Rn{5-4};
307 let DecoderMethod = "DecodeVLDInstruction";
310 def VLD1d8 : VLD1D<{0,0,0,?}, "8">;
311 def VLD1d16 : VLD1D<{0,1,0,?}, "16">;
312 def VLD1d32 : VLD1D<{1,0,0,?}, "32">;
313 def VLD1d64 : VLD1D<{1,1,0,?}, "64">;
315 def VLD1q8 : VLD1Q<{0,0,?,?}, "8">;
316 def VLD1q16 : VLD1Q<{0,1,?,?}, "16">;
317 def VLD1q32 : VLD1Q<{1,0,?,?}, "32">;
318 def VLD1q64 : VLD1Q<{1,1,?,?}, "64">;
320 def VLD1q8Pseudo : VLDQPseudo<IIC_VLD1x2>;
321 def VLD1q16Pseudo : VLDQPseudo<IIC_VLD1x2>;
322 def VLD1q32Pseudo : VLDQPseudo<IIC_VLD1x2>;
323 def VLD1q64Pseudo : VLDQPseudo<IIC_VLD1x2>;
325 // ...with address register writeback:
326 multiclass VLD1DWB<bits<4> op7_4, string Dt> {
327 def _fixed : NLdSt<0,0b10, 0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
328 (ins addrmode6:$Rn), IIC_VLD1u,
329 "vld1", Dt, "$Vd, $Rn!",
330 "$Rn.addr = $wb", []> {
331 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
333 let DecoderMethod = "DecodeVLDInstruction";
334 let AsmMatchConverter = "cvtVLDwbFixed";
336 def _register : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
337 (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1u,
338 "vld1", Dt, "$Vd, $Rn, $Rm",
339 "$Rn.addr = $wb", []> {
341 let DecoderMethod = "DecodeVLDInstruction";
342 let AsmMatchConverter = "cvtVLDwbRegister";
345 multiclass VLD1QWB<bits<4> op7_4, string Dt> {
346 def _fixed : NLdSt<0,0b10,0b1010,op7_4, (outs VecListTwoD:$Vd, GPR:$wb),
347 (ins addrmode6:$Rn), IIC_VLD1x2u,
348 "vld1", Dt, "$Vd, $Rn!",
349 "$Rn.addr = $wb", []> {
350 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
351 let Inst{5-4} = Rn{5-4};
352 let DecoderMethod = "DecodeVLDInstruction";
353 let AsmMatchConverter = "cvtVLDwbFixed";
355 def _register : NLdSt<0,0b10,0b1010,op7_4, (outs VecListTwoD:$Vd, GPR:$wb),
356 (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
357 "vld1", Dt, "$Vd, $Rn, $Rm",
358 "$Rn.addr = $wb", []> {
359 let Inst{5-4} = Rn{5-4};
360 let DecoderMethod = "DecodeVLDInstruction";
361 let AsmMatchConverter = "cvtVLDwbRegister";
365 defm VLD1d8wb : VLD1DWB<{0,0,0,?}, "8">;
366 defm VLD1d16wb : VLD1DWB<{0,1,0,?}, "16">;
367 defm VLD1d32wb : VLD1DWB<{1,0,0,?}, "32">;
368 defm VLD1d64wb : VLD1DWB<{1,1,0,?}, "64">;
369 defm VLD1q8wb : VLD1QWB<{0,0,?,?}, "8">;
370 defm VLD1q16wb : VLD1QWB<{0,1,?,?}, "16">;
371 defm VLD1q32wb : VLD1QWB<{1,0,?,?}, "32">;
372 defm VLD1q64wb : VLD1QWB<{1,1,?,?}, "64">;
374 def VLD1q8PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
375 def VLD1q16PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
376 def VLD1q32PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
377 def VLD1q64PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
378 def VLD1q8PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
379 def VLD1q16PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
380 def VLD1q32PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
381 def VLD1q64PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
383 // ...with 3 registers
384 class VLD1D3<bits<4> op7_4, string Dt>
385 : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd),
386 (ins addrmode6:$Rn), IIC_VLD1x3, "vld1", Dt,
387 "$Vd, $Rn", "", []> {
390 let DecoderMethod = "DecodeVLDInstruction";
392 multiclass VLD1D3WB<bits<4> op7_4, string Dt> {
393 def _fixed : NLdSt<0,0b10,0b0110, op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
394 (ins addrmode6:$Rn), IIC_VLD1x2u,
395 "vld1", Dt, "$Vd, $Rn!",
396 "$Rn.addr = $wb", []> {
397 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
399 let DecoderMethod = "DecodeVLDInstruction";
400 let AsmMatchConverter = "cvtVLDwbFixed";
402 def _register : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
403 (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
404 "vld1", Dt, "$Vd, $Rn, $Rm",
405 "$Rn.addr = $wb", []> {
407 let DecoderMethod = "DecodeVLDInstruction";
408 let AsmMatchConverter = "cvtVLDwbRegister";
412 def VLD1d8T : VLD1D3<{0,0,0,?}, "8">;
413 def VLD1d16T : VLD1D3<{0,1,0,?}, "16">;
414 def VLD1d32T : VLD1D3<{1,0,0,?}, "32">;
415 def VLD1d64T : VLD1D3<{1,1,0,?}, "64">;
417 defm VLD1d8Twb : VLD1D3WB<{0,0,0,?}, "8">;
418 defm VLD1d16Twb : VLD1D3WB<{0,1,0,?}, "16">;
419 defm VLD1d32Twb : VLD1D3WB<{1,0,0,?}, "32">;
420 defm VLD1d64Twb : VLD1D3WB<{1,1,0,?}, "64">;
422 def VLD1d64TPseudo : VLDQQPseudo<IIC_VLD1x3>;
424 // ...with 4 registers
425 class VLD1D4<bits<4> op7_4, string Dt>
426 : NLdSt<0, 0b10, 0b0010, op7_4, (outs VecListFourD:$Vd),
427 (ins addrmode6:$Rn), IIC_VLD1x4, "vld1", Dt,
428 "$Vd, $Rn", "", []> {
430 let Inst{5-4} = Rn{5-4};
431 let DecoderMethod = "DecodeVLDInstruction";
433 multiclass VLD1D4WB<bits<4> op7_4, string Dt> {
434 def _fixed : NLdSt<0,0b10,0b0010, op7_4, (outs VecListFourD:$Vd, GPR:$wb),
435 (ins addrmode6:$Rn), IIC_VLD1x2u,
436 "vld1", Dt, "$Vd, $Rn!",
437 "$Rn.addr = $wb", []> {
438 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
439 let Inst{5-4} = Rn{5-4};
440 let DecoderMethod = "DecodeVLDInstruction";
441 let AsmMatchConverter = "cvtVLDwbFixed";
443 def _register : NLdSt<0,0b10,0b0010,op7_4, (outs VecListFourD:$Vd, GPR:$wb),
444 (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
445 "vld1", Dt, "$Vd, $Rn, $Rm",
446 "$Rn.addr = $wb", []> {
447 let Inst{5-4} = Rn{5-4};
448 let DecoderMethod = "DecodeVLDInstruction";
449 let AsmMatchConverter = "cvtVLDwbRegister";
453 def VLD1d8Q : VLD1D4<{0,0,?,?}, "8">;
454 def VLD1d16Q : VLD1D4<{0,1,?,?}, "16">;
455 def VLD1d32Q : VLD1D4<{1,0,?,?}, "32">;
456 def VLD1d64Q : VLD1D4<{1,1,?,?}, "64">;
458 defm VLD1d8Qwb : VLD1D4WB<{0,0,?,?}, "8">;
459 defm VLD1d16Qwb : VLD1D4WB<{0,1,?,?}, "16">;
460 defm VLD1d32Qwb : VLD1D4WB<{1,0,?,?}, "32">;
461 defm VLD1d64Qwb : VLD1D4WB<{1,1,?,?}, "64">;
463 def VLD1d64QPseudo : VLDQQPseudo<IIC_VLD1x4>;
465 // VLD2 : Vector Load (multiple 2-element structures)
466 class VLD2D<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy>
467 : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd),
468 (ins addrmode6:$Rn), IIC_VLD2,
469 "vld2", Dt, "$Vd, $Rn", "", []> {
471 let Inst{5-4} = Rn{5-4};
472 let DecoderMethod = "DecodeVLDInstruction";
474 class VLD2Q<bits<4> op7_4, string Dt, RegisterOperand VdTy>
475 : NLdSt<0, 0b10, 0b0011, op7_4,
477 (ins addrmode6:$Rn), IIC_VLD2x2,
478 "vld2", Dt, "$Vd, $Rn", "", []> {
480 let Inst{5-4} = Rn{5-4};
481 let DecoderMethod = "DecodeVLDInstruction";
484 def VLD2d8 : VLD2D<0b1000, {0,0,?,?}, "8", VecListTwoD>;
485 def VLD2d16 : VLD2D<0b1000, {0,1,?,?}, "16", VecListTwoD>;
486 def VLD2d32 : VLD2D<0b1000, {1,0,?,?}, "32", VecListTwoD>;
488 def VLD2q8 : VLD2Q<{0,0,?,?}, "8", VecListFourD>;
489 def VLD2q16 : VLD2Q<{0,1,?,?}, "16", VecListFourD>;
490 def VLD2q32 : VLD2Q<{1,0,?,?}, "32", VecListFourD>;
492 def VLD2d8Pseudo : VLDQPseudo<IIC_VLD2>;
493 def VLD2d16Pseudo : VLDQPseudo<IIC_VLD2>;
494 def VLD2d32Pseudo : VLDQPseudo<IIC_VLD2>;
496 def VLD2q8Pseudo : VLDQQPseudo<IIC_VLD2x2>;
497 def VLD2q16Pseudo : VLDQQPseudo<IIC_VLD2x2>;
498 def VLD2q32Pseudo : VLDQQPseudo<IIC_VLD2x2>;
500 // ...with address register writeback:
501 class VLD2DWB<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy>
502 : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd, GPR:$wb),
503 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD2u,
504 "vld2", Dt, "$Vd, $Rn$Rm",
505 "$Rn.addr = $wb", []> {
506 let Inst{5-4} = Rn{5-4};
507 let DecoderMethod = "DecodeVLDInstruction";
509 class VLD2QWB<bits<4> op7_4, string Dt, RegisterOperand VdTy>
510 : NLdSt<0, 0b10, 0b0011, op7_4,
511 (outs VdTy:$Vd, GPR:$wb),
512 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD2x2u,
513 "vld2", Dt, "$Vd, $Rn$Rm",
514 "$Rn.addr = $wb", []> {
515 let Inst{5-4} = Rn{5-4};
516 let DecoderMethod = "DecodeVLDInstruction";
519 def VLD2d8_UPD : VLD2DWB<0b1000, {0,0,?,?}, "8", VecListTwoD>;
520 def VLD2d16_UPD : VLD2DWB<0b1000, {0,1,?,?}, "16", VecListTwoD>;
521 def VLD2d32_UPD : VLD2DWB<0b1000, {1,0,?,?}, "32", VecListTwoD>;
523 def VLD2q8_UPD : VLD2QWB<{0,0,?,?}, "8", VecListFourD>;
524 def VLD2q16_UPD : VLD2QWB<{0,1,?,?}, "16", VecListFourD>;
525 def VLD2q32_UPD : VLD2QWB<{1,0,?,?}, "32", VecListFourD>;
527 def VLD2d8Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
528 def VLD2d16Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
529 def VLD2d32Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
531 def VLD2q8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
532 def VLD2q16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
533 def VLD2q32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
535 // ...with double-spaced registers
536 def VLD2b8 : VLD2D<0b1001, {0,0,?,?}, "8", VecListTwoQ>;
537 def VLD2b16 : VLD2D<0b1001, {0,1,?,?}, "16", VecListTwoQ>;
538 def VLD2b32 : VLD2D<0b1001, {1,0,?,?}, "32", VecListTwoQ>;
539 def VLD2b8_UPD : VLD2DWB<0b1001, {0,0,?,?}, "8", VecListTwoQ>;
540 def VLD2b16_UPD : VLD2DWB<0b1001, {0,1,?,?}, "16", VecListTwoQ>;
541 def VLD2b32_UPD : VLD2DWB<0b1001, {1,0,?,?}, "32", VecListTwoQ>;
543 // VLD3 : Vector Load (multiple 3-element structures)
544 class VLD3D<bits<4> op11_8, bits<4> op7_4, string Dt>
545 : NLdSt<0, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
546 (ins addrmode6:$Rn), IIC_VLD3,
547 "vld3", Dt, "\\{$Vd, $dst2, $dst3\\}, $Rn", "", []> {
550 let DecoderMethod = "DecodeVLDInstruction";
553 def VLD3d8 : VLD3D<0b0100, {0,0,0,?}, "8">;
554 def VLD3d16 : VLD3D<0b0100, {0,1,0,?}, "16">;
555 def VLD3d32 : VLD3D<0b0100, {1,0,0,?}, "32">;
557 def VLD3d8Pseudo : VLDQQPseudo<IIC_VLD3>;
558 def VLD3d16Pseudo : VLDQQPseudo<IIC_VLD3>;
559 def VLD3d32Pseudo : VLDQQPseudo<IIC_VLD3>;
561 // ...with address register writeback:
562 class VLD3DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
563 : NLdSt<0, 0b10, op11_8, op7_4,
564 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
565 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD3u,
566 "vld3", Dt, "\\{$Vd, $dst2, $dst3\\}, $Rn$Rm",
567 "$Rn.addr = $wb", []> {
569 let DecoderMethod = "DecodeVLDInstruction";
572 def VLD3d8_UPD : VLD3DWB<0b0100, {0,0,0,?}, "8">;
573 def VLD3d16_UPD : VLD3DWB<0b0100, {0,1,0,?}, "16">;
574 def VLD3d32_UPD : VLD3DWB<0b0100, {1,0,0,?}, "32">;
576 def VLD3d8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
577 def VLD3d16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
578 def VLD3d32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
580 // ...with double-spaced registers:
581 def VLD3q8 : VLD3D<0b0101, {0,0,0,?}, "8">;
582 def VLD3q16 : VLD3D<0b0101, {0,1,0,?}, "16">;
583 def VLD3q32 : VLD3D<0b0101, {1,0,0,?}, "32">;
584 def VLD3q8_UPD : VLD3DWB<0b0101, {0,0,0,?}, "8">;
585 def VLD3q16_UPD : VLD3DWB<0b0101, {0,1,0,?}, "16">;
586 def VLD3q32_UPD : VLD3DWB<0b0101, {1,0,0,?}, "32">;
588 def VLD3q8Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
589 def VLD3q16Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
590 def VLD3q32Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
592 // ...alternate versions to be allocated odd register numbers:
593 def VLD3q8oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
594 def VLD3q16oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
595 def VLD3q32oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
597 def VLD3q8oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
598 def VLD3q16oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
599 def VLD3q32oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
601 // VLD4 : Vector Load (multiple 4-element structures)
602 class VLD4D<bits<4> op11_8, bits<4> op7_4, string Dt>
603 : NLdSt<0, 0b10, op11_8, op7_4,
604 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
605 (ins addrmode6:$Rn), IIC_VLD4,
606 "vld4", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn", "", []> {
608 let Inst{5-4} = Rn{5-4};
609 let DecoderMethod = "DecodeVLDInstruction";
612 def VLD4d8 : VLD4D<0b0000, {0,0,?,?}, "8">;
613 def VLD4d16 : VLD4D<0b0000, {0,1,?,?}, "16">;
614 def VLD4d32 : VLD4D<0b0000, {1,0,?,?}, "32">;
616 def VLD4d8Pseudo : VLDQQPseudo<IIC_VLD4>;
617 def VLD4d16Pseudo : VLDQQPseudo<IIC_VLD4>;
618 def VLD4d32Pseudo : VLDQQPseudo<IIC_VLD4>;
620 // ...with address register writeback:
621 class VLD4DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
622 : NLdSt<0, 0b10, op11_8, op7_4,
623 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
624 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD4u,
625 "vld4", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn$Rm",
626 "$Rn.addr = $wb", []> {
627 let Inst{5-4} = Rn{5-4};
628 let DecoderMethod = "DecodeVLDInstruction";
631 def VLD4d8_UPD : VLD4DWB<0b0000, {0,0,?,?}, "8">;
632 def VLD4d16_UPD : VLD4DWB<0b0000, {0,1,?,?}, "16">;
633 def VLD4d32_UPD : VLD4DWB<0b0000, {1,0,?,?}, "32">;
635 def VLD4d8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
636 def VLD4d16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
637 def VLD4d32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
639 // ...with double-spaced registers:
640 def VLD4q8 : VLD4D<0b0001, {0,0,?,?}, "8">;
641 def VLD4q16 : VLD4D<0b0001, {0,1,?,?}, "16">;
642 def VLD4q32 : VLD4D<0b0001, {1,0,?,?}, "32">;
643 def VLD4q8_UPD : VLD4DWB<0b0001, {0,0,?,?}, "8">;
644 def VLD4q16_UPD : VLD4DWB<0b0001, {0,1,?,?}, "16">;
645 def VLD4q32_UPD : VLD4DWB<0b0001, {1,0,?,?}, "32">;
647 def VLD4q8Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
648 def VLD4q16Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
649 def VLD4q32Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
651 // ...alternate versions to be allocated odd register numbers:
652 def VLD4q8oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
653 def VLD4q16oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
654 def VLD4q32oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
656 def VLD4q8oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
657 def VLD4q16oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
658 def VLD4q32oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
660 } // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
662 // Classes for VLD*LN pseudo-instructions with multi-register operands.
663 // These are expanded to real instructions after register allocation.
664 class VLDQLNPseudo<InstrItinClass itin>
665 : PseudoNLdSt<(outs QPR:$dst),
666 (ins addrmode6:$addr, QPR:$src, nohash_imm:$lane),
667 itin, "$src = $dst">;
668 class VLDQLNWBPseudo<InstrItinClass itin>
669 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
670 (ins addrmode6:$addr, am6offset:$offset, QPR:$src,
671 nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
672 class VLDQQLNPseudo<InstrItinClass itin>
673 : PseudoNLdSt<(outs QQPR:$dst),
674 (ins addrmode6:$addr, QQPR:$src, nohash_imm:$lane),
675 itin, "$src = $dst">;
676 class VLDQQLNWBPseudo<InstrItinClass itin>
677 : PseudoNLdSt<(outs QQPR:$dst, GPR:$wb),
678 (ins addrmode6:$addr, am6offset:$offset, QQPR:$src,
679 nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
680 class VLDQQQQLNPseudo<InstrItinClass itin>
681 : PseudoNLdSt<(outs QQQQPR:$dst),
682 (ins addrmode6:$addr, QQQQPR:$src, nohash_imm:$lane),
683 itin, "$src = $dst">;
684 class VLDQQQQLNWBPseudo<InstrItinClass itin>
685 : PseudoNLdSt<(outs QQQQPR:$dst, GPR:$wb),
686 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src,
687 nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
689 // VLD1LN : Vector Load (single element to one lane)
690 class VLD1LN<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
692 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd),
693 (ins addrmode6:$Rn, DPR:$src, nohash_imm:$lane),
694 IIC_VLD1ln, "vld1", Dt, "\\{$Vd[$lane]\\}, $Rn",
696 [(set DPR:$Vd, (vector_insert (Ty DPR:$src),
697 (i32 (LoadOp addrmode6:$Rn)),
700 let DecoderMethod = "DecodeVLD1LN";
702 class VLD1LN32<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
704 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd),
705 (ins addrmode6oneL32:$Rn, DPR:$src, nohash_imm:$lane),
706 IIC_VLD1ln, "vld1", Dt, "\\{$Vd[$lane]\\}, $Rn",
708 [(set DPR:$Vd, (vector_insert (Ty DPR:$src),
709 (i32 (LoadOp addrmode6oneL32:$Rn)),
712 let DecoderMethod = "DecodeVLD1LN";
714 class VLD1QLNPseudo<ValueType Ty, PatFrag LoadOp> : VLDQLNPseudo<IIC_VLD1ln> {
715 let Pattern = [(set QPR:$dst, (vector_insert (Ty QPR:$src),
716 (i32 (LoadOp addrmode6:$addr)),
720 def VLD1LNd8 : VLD1LN<0b0000, {?,?,?,0}, "8", v8i8, extloadi8> {
721 let Inst{7-5} = lane{2-0};
723 def VLD1LNd16 : VLD1LN<0b0100, {?,?,0,?}, "16", v4i16, extloadi16> {
724 let Inst{7-6} = lane{1-0};
727 def VLD1LNd32 : VLD1LN32<0b1000, {?,0,?,?}, "32", v2i32, load> {
728 let Inst{7} = lane{0};
733 def VLD1LNq8Pseudo : VLD1QLNPseudo<v16i8, extloadi8>;
734 def VLD1LNq16Pseudo : VLD1QLNPseudo<v8i16, extloadi16>;
735 def VLD1LNq32Pseudo : VLD1QLNPseudo<v4i32, load>;
737 def : Pat<(vector_insert (v2f32 DPR:$src),
738 (f32 (load addrmode6:$addr)), imm:$lane),
739 (VLD1LNd32 addrmode6:$addr, DPR:$src, imm:$lane)>;
740 def : Pat<(vector_insert (v4f32 QPR:$src),
741 (f32 (load addrmode6:$addr)), imm:$lane),
742 (VLD1LNq32Pseudo addrmode6:$addr, QPR:$src, imm:$lane)>;
744 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
746 // ...with address register writeback:
747 class VLD1LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
748 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, GPR:$wb),
749 (ins addrmode6:$Rn, am6offset:$Rm,
750 DPR:$src, nohash_imm:$lane), IIC_VLD1lnu, "vld1", Dt,
751 "\\{$Vd[$lane]\\}, $Rn$Rm",
752 "$src = $Vd, $Rn.addr = $wb", []> {
753 let DecoderMethod = "DecodeVLD1LN";
756 def VLD1LNd8_UPD : VLD1LNWB<0b0000, {?,?,?,0}, "8"> {
757 let Inst{7-5} = lane{2-0};
759 def VLD1LNd16_UPD : VLD1LNWB<0b0100, {?,?,0,?}, "16"> {
760 let Inst{7-6} = lane{1-0};
763 def VLD1LNd32_UPD : VLD1LNWB<0b1000, {?,0,?,?}, "32"> {
764 let Inst{7} = lane{0};
769 def VLD1LNq8Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
770 def VLD1LNq16Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
771 def VLD1LNq32Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
773 // VLD2LN : Vector Load (single 2-element structure to one lane)
774 class VLD2LN<bits<4> op11_8, bits<4> op7_4, string Dt>
775 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2),
776 (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, nohash_imm:$lane),
777 IIC_VLD2ln, "vld2", Dt, "\\{$Vd[$lane], $dst2[$lane]\\}, $Rn",
778 "$src1 = $Vd, $src2 = $dst2", []> {
781 let DecoderMethod = "DecodeVLD2LN";
784 def VLD2LNd8 : VLD2LN<0b0001, {?,?,?,?}, "8"> {
785 let Inst{7-5} = lane{2-0};
787 def VLD2LNd16 : VLD2LN<0b0101, {?,?,0,?}, "16"> {
788 let Inst{7-6} = lane{1-0};
790 def VLD2LNd32 : VLD2LN<0b1001, {?,0,0,?}, "32"> {
791 let Inst{7} = lane{0};
794 def VLD2LNd8Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
795 def VLD2LNd16Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
796 def VLD2LNd32Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
798 // ...with double-spaced registers:
799 def VLD2LNq16 : VLD2LN<0b0101, {?,?,1,?}, "16"> {
800 let Inst{7-6} = lane{1-0};
802 def VLD2LNq32 : VLD2LN<0b1001, {?,1,0,?}, "32"> {
803 let Inst{7} = lane{0};
806 def VLD2LNq16Pseudo : VLDQQLNPseudo<IIC_VLD2ln>;
807 def VLD2LNq32Pseudo : VLDQQLNPseudo<IIC_VLD2ln>;
809 // ...with address register writeback:
810 class VLD2LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
811 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
812 (ins addrmode6:$Rn, am6offset:$Rm,
813 DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VLD2lnu, "vld2", Dt,
814 "\\{$Vd[$lane], $dst2[$lane]\\}, $Rn$Rm",
815 "$src1 = $Vd, $src2 = $dst2, $Rn.addr = $wb", []> {
817 let DecoderMethod = "DecodeVLD2LN";
820 def VLD2LNd8_UPD : VLD2LNWB<0b0001, {?,?,?,?}, "8"> {
821 let Inst{7-5} = lane{2-0};
823 def VLD2LNd16_UPD : VLD2LNWB<0b0101, {?,?,0,?}, "16"> {
824 let Inst{7-6} = lane{1-0};
826 def VLD2LNd32_UPD : VLD2LNWB<0b1001, {?,0,0,?}, "32"> {
827 let Inst{7} = lane{0};
830 def VLD2LNd8Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
831 def VLD2LNd16Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
832 def VLD2LNd32Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
834 def VLD2LNq16_UPD : VLD2LNWB<0b0101, {?,?,1,?}, "16"> {
835 let Inst{7-6} = lane{1-0};
837 def VLD2LNq32_UPD : VLD2LNWB<0b1001, {?,1,0,?}, "32"> {
838 let Inst{7} = lane{0};
841 def VLD2LNq16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD2lnu>;
842 def VLD2LNq32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD2lnu>;
844 // VLD3LN : Vector Load (single 3-element structure to one lane)
845 class VLD3LN<bits<4> op11_8, bits<4> op7_4, string Dt>
846 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
847 (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, DPR:$src3,
848 nohash_imm:$lane), IIC_VLD3ln, "vld3", Dt,
849 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane]\\}, $Rn",
850 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3", []> {
852 let DecoderMethod = "DecodeVLD3LN";
855 def VLD3LNd8 : VLD3LN<0b0010, {?,?,?,0}, "8"> {
856 let Inst{7-5} = lane{2-0};
858 def VLD3LNd16 : VLD3LN<0b0110, {?,?,0,0}, "16"> {
859 let Inst{7-6} = lane{1-0};
861 def VLD3LNd32 : VLD3LN<0b1010, {?,0,0,0}, "32"> {
862 let Inst{7} = lane{0};
865 def VLD3LNd8Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
866 def VLD3LNd16Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
867 def VLD3LNd32Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
869 // ...with double-spaced registers:
870 def VLD3LNq16 : VLD3LN<0b0110, {?,?,1,0}, "16"> {
871 let Inst{7-6} = lane{1-0};
873 def VLD3LNq32 : VLD3LN<0b1010, {?,1,0,0}, "32"> {
874 let Inst{7} = lane{0};
877 def VLD3LNq16Pseudo : VLDQQQQLNPseudo<IIC_VLD3ln>;
878 def VLD3LNq32Pseudo : VLDQQQQLNPseudo<IIC_VLD3ln>;
880 // ...with address register writeback:
881 class VLD3LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
882 : NLdStLn<1, 0b10, op11_8, op7_4,
883 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
884 (ins addrmode6:$Rn, am6offset:$Rm,
885 DPR:$src1, DPR:$src2, DPR:$src3, nohash_imm:$lane),
886 IIC_VLD3lnu, "vld3", Dt,
887 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane]\\}, $Rn$Rm",
888 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $Rn.addr = $wb",
890 let DecoderMethod = "DecodeVLD3LN";
893 def VLD3LNd8_UPD : VLD3LNWB<0b0010, {?,?,?,0}, "8"> {
894 let Inst{7-5} = lane{2-0};
896 def VLD3LNd16_UPD : VLD3LNWB<0b0110, {?,?,0,0}, "16"> {
897 let Inst{7-6} = lane{1-0};
899 def VLD3LNd32_UPD : VLD3LNWB<0b1010, {?,0,0,0}, "32"> {
900 let Inst{7} = lane{0};
903 def VLD3LNd8Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
904 def VLD3LNd16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
905 def VLD3LNd32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
907 def VLD3LNq16_UPD : VLD3LNWB<0b0110, {?,?,1,0}, "16"> {
908 let Inst{7-6} = lane{1-0};
910 def VLD3LNq32_UPD : VLD3LNWB<0b1010, {?,1,0,0}, "32"> {
911 let Inst{7} = lane{0};
914 def VLD3LNq16Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD3lnu>;
915 def VLD3LNq32Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD3lnu>;
917 // VLD4LN : Vector Load (single 4-element structure to one lane)
918 class VLD4LN<bits<4> op11_8, bits<4> op7_4, string Dt>
919 : NLdStLn<1, 0b10, op11_8, op7_4,
920 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
921 (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
922 nohash_imm:$lane), IIC_VLD4ln, "vld4", Dt,
923 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $Rn",
924 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4", []> {
927 let DecoderMethod = "DecodeVLD4LN";
930 def VLD4LNd8 : VLD4LN<0b0011, {?,?,?,?}, "8"> {
931 let Inst{7-5} = lane{2-0};
933 def VLD4LNd16 : VLD4LN<0b0111, {?,?,0,?}, "16"> {
934 let Inst{7-6} = lane{1-0};
936 def VLD4LNd32 : VLD4LN<0b1011, {?,0,?,?}, "32"> {
937 let Inst{7} = lane{0};
941 def VLD4LNd8Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
942 def VLD4LNd16Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
943 def VLD4LNd32Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
945 // ...with double-spaced registers:
946 def VLD4LNq16 : VLD4LN<0b0111, {?,?,1,?}, "16"> {
947 let Inst{7-6} = lane{1-0};
949 def VLD4LNq32 : VLD4LN<0b1011, {?,1,?,?}, "32"> {
950 let Inst{7} = lane{0};
954 def VLD4LNq16Pseudo : VLDQQQQLNPseudo<IIC_VLD4ln>;
955 def VLD4LNq32Pseudo : VLDQQQQLNPseudo<IIC_VLD4ln>;
957 // ...with address register writeback:
958 class VLD4LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
959 : NLdStLn<1, 0b10, op11_8, op7_4,
960 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
961 (ins addrmode6:$Rn, am6offset:$Rm,
962 DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4, nohash_imm:$lane),
963 IIC_VLD4lnu, "vld4", Dt,
964 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $Rn$Rm",
965 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4, $Rn.addr = $wb",
968 let DecoderMethod = "DecodeVLD4LN" ;
971 def VLD4LNd8_UPD : VLD4LNWB<0b0011, {?,?,?,?}, "8"> {
972 let Inst{7-5} = lane{2-0};
974 def VLD4LNd16_UPD : VLD4LNWB<0b0111, {?,?,0,?}, "16"> {
975 let Inst{7-6} = lane{1-0};
977 def VLD4LNd32_UPD : VLD4LNWB<0b1011, {?,0,?,?}, "32"> {
978 let Inst{7} = lane{0};
982 def VLD4LNd8Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
983 def VLD4LNd16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
984 def VLD4LNd32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
986 def VLD4LNq16_UPD : VLD4LNWB<0b0111, {?,?,1,?}, "16"> {
987 let Inst{7-6} = lane{1-0};
989 def VLD4LNq32_UPD : VLD4LNWB<0b1011, {?,1,?,?}, "32"> {
990 let Inst{7} = lane{0};
994 def VLD4LNq16Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
995 def VLD4LNq32Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
997 } // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
999 // VLD1DUP : Vector Load (single element to all lanes)
1000 class VLD1DUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp>
1001 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd), (ins addrmode6dup:$Rn),
1002 IIC_VLD1dup, "vld1", Dt, "\\{$Vd[]\\}, $Rn", "",
1003 [(set DPR:$Vd, (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$Rn)))))]> {
1005 let Inst{4} = Rn{4};
1006 let DecoderMethod = "DecodeVLD1DupInstruction";
1008 class VLD1QDUPPseudo<ValueType Ty, PatFrag LoadOp> : VLDQPseudo<IIC_VLD1dup> {
1009 let Pattern = [(set QPR:$dst,
1010 (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$addr)))))];
1013 def VLD1DUPd8 : VLD1DUP<{0,0,0,?}, "8", v8i8, extloadi8>;
1014 def VLD1DUPd16 : VLD1DUP<{0,1,0,?}, "16", v4i16, extloadi16>;
1015 def VLD1DUPd32 : VLD1DUP<{1,0,0,?}, "32", v2i32, load>;
1017 def VLD1DUPq8Pseudo : VLD1QDUPPseudo<v16i8, extloadi8>;
1018 def VLD1DUPq16Pseudo : VLD1QDUPPseudo<v8i16, extloadi16>;
1019 def VLD1DUPq32Pseudo : VLD1QDUPPseudo<v4i32, load>;
1021 def : Pat<(v2f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
1022 (VLD1DUPd32 addrmode6:$addr)>;
1023 def : Pat<(v4f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
1024 (VLD1DUPq32Pseudo addrmode6:$addr)>;
1026 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
1028 class VLD1QDUP<bits<4> op7_4, string Dt>
1029 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, DPR:$dst2),
1030 (ins addrmode6dup:$Rn), IIC_VLD1dup,
1031 "vld1", Dt, "\\{$Vd[], $dst2[]\\}, $Rn", "", []> {
1033 let Inst{4} = Rn{4};
1034 let DecoderMethod = "DecodeVLD1DupInstruction";
1037 def VLD1DUPq8 : VLD1QDUP<{0,0,1,0}, "8">;
1038 def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16">;
1039 def VLD1DUPq32 : VLD1QDUP<{1,0,1,?}, "32">;
1041 // ...with address register writeback:
1042 class VLD1DUPWB<bits<4> op7_4, string Dt>
1043 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, GPR:$wb),
1044 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD1dupu,
1045 "vld1", Dt, "\\{$Vd[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
1046 let Inst{4} = Rn{4};
1047 let DecoderMethod = "DecodeVLD1DupInstruction";
1049 class VLD1QDUPWB<bits<4> op7_4, string Dt>
1050 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
1051 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD1dupu,
1052 "vld1", Dt, "\\{$Vd[], $dst2[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
1053 let Inst{4} = Rn{4};
1054 let DecoderMethod = "DecodeVLD1DupInstruction";
1057 def VLD1DUPd8_UPD : VLD1DUPWB<{0,0,0,0}, "8">;
1058 def VLD1DUPd16_UPD : VLD1DUPWB<{0,1,0,?}, "16">;
1059 def VLD1DUPd32_UPD : VLD1DUPWB<{1,0,0,?}, "32">;
1061 def VLD1DUPq8_UPD : VLD1QDUPWB<{0,0,1,0}, "8">;
1062 def VLD1DUPq16_UPD : VLD1QDUPWB<{0,1,1,?}, "16">;
1063 def VLD1DUPq32_UPD : VLD1QDUPWB<{1,0,1,?}, "32">;
1065 def VLD1DUPq8Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
1066 def VLD1DUPq16Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
1067 def VLD1DUPq32Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
1069 // VLD2DUP : Vector Load (single 2-element structure to all lanes)
1070 class VLD2DUP<bits<4> op7_4, string Dt>
1071 : NLdSt<1, 0b10, 0b1101, op7_4, (outs DPR:$Vd, DPR:$dst2),
1072 (ins addrmode6dup:$Rn), IIC_VLD2dup,
1073 "vld2", Dt, "\\{$Vd[], $dst2[]\\}, $Rn", "", []> {
1075 let Inst{4} = Rn{4};
1076 let DecoderMethod = "DecodeVLD2DupInstruction";
1079 def VLD2DUPd8 : VLD2DUP<{0,0,0,?}, "8">;
1080 def VLD2DUPd16 : VLD2DUP<{0,1,0,?}, "16">;
1081 def VLD2DUPd32 : VLD2DUP<{1,0,0,?}, "32">;
1083 def VLD2DUPd8Pseudo : VLDQPseudo<IIC_VLD2dup>;
1084 def VLD2DUPd16Pseudo : VLDQPseudo<IIC_VLD2dup>;
1085 def VLD2DUPd32Pseudo : VLDQPseudo<IIC_VLD2dup>;
1087 // ...with double-spaced registers (not used for codegen):
1088 def VLD2DUPd8x2 : VLD2DUP<{0,0,1,?}, "8">;
1089 def VLD2DUPd16x2 : VLD2DUP<{0,1,1,?}, "16">;
1090 def VLD2DUPd32x2 : VLD2DUP<{1,0,1,?}, "32">;
1092 // ...with address register writeback:
1093 class VLD2DUPWB<bits<4> op7_4, string Dt>
1094 : NLdSt<1, 0b10, 0b1101, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
1095 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD2dupu,
1096 "vld2", Dt, "\\{$Vd[], $dst2[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
1097 let Inst{4} = Rn{4};
1098 let DecoderMethod = "DecodeVLD2DupInstruction";
1101 def VLD2DUPd8_UPD : VLD2DUPWB<{0,0,0,0}, "8">;
1102 def VLD2DUPd16_UPD : VLD2DUPWB<{0,1,0,?}, "16">;
1103 def VLD2DUPd32_UPD : VLD2DUPWB<{1,0,0,?}, "32">;
1105 def VLD2DUPd8x2_UPD : VLD2DUPWB<{0,0,1,0}, "8">;
1106 def VLD2DUPd16x2_UPD : VLD2DUPWB<{0,1,1,?}, "16">;
1107 def VLD2DUPd32x2_UPD : VLD2DUPWB<{1,0,1,?}, "32">;
1109 def VLD2DUPd8Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
1110 def VLD2DUPd16Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
1111 def VLD2DUPd32Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
1113 // VLD3DUP : Vector Load (single 3-element structure to all lanes)
1114 class VLD3DUP<bits<4> op7_4, string Dt>
1115 : NLdSt<1, 0b10, 0b1110, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
1116 (ins addrmode6dup:$Rn), IIC_VLD3dup,
1117 "vld3", Dt, "\\{$Vd[], $dst2[], $dst3[]\\}, $Rn", "", []> {
1120 let DecoderMethod = "DecodeVLD3DupInstruction";
1123 def VLD3DUPd8 : VLD3DUP<{0,0,0,?}, "8">;
1124 def VLD3DUPd16 : VLD3DUP<{0,1,0,?}, "16">;
1125 def VLD3DUPd32 : VLD3DUP<{1,0,0,?}, "32">;
1127 def VLD3DUPd8Pseudo : VLDQQPseudo<IIC_VLD3dup>;
1128 def VLD3DUPd16Pseudo : VLDQQPseudo<IIC_VLD3dup>;
1129 def VLD3DUPd32Pseudo : VLDQQPseudo<IIC_VLD3dup>;
1131 // ...with double-spaced registers (not used for codegen):
1132 def VLD3DUPd8x2 : VLD3DUP<{0,0,1,?}, "8">;
1133 def VLD3DUPd16x2 : VLD3DUP<{0,1,1,?}, "16">;
1134 def VLD3DUPd32x2 : VLD3DUP<{1,0,1,?}, "32">;
1136 // ...with address register writeback:
1137 class VLD3DUPWB<bits<4> op7_4, string Dt>
1138 : NLdSt<1, 0b10, 0b1110, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
1139 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD3dupu,
1140 "vld3", Dt, "\\{$Vd[], $dst2[], $dst3[]\\}, $Rn$Rm",
1141 "$Rn.addr = $wb", []> {
1143 let DecoderMethod = "DecodeVLD3DupInstruction";
1146 def VLD3DUPd8_UPD : VLD3DUPWB<{0,0,0,0}, "8">;
1147 def VLD3DUPd16_UPD : VLD3DUPWB<{0,1,0,?}, "16">;
1148 def VLD3DUPd32_UPD : VLD3DUPWB<{1,0,0,?}, "32">;
1150 def VLD3DUPd8x2_UPD : VLD3DUPWB<{0,0,1,0}, "8">;
1151 def VLD3DUPd16x2_UPD : VLD3DUPWB<{0,1,1,?}, "16">;
1152 def VLD3DUPd32x2_UPD : VLD3DUPWB<{1,0,1,?}, "32">;
1154 def VLD3DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
1155 def VLD3DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
1156 def VLD3DUPd32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
1158 // VLD4DUP : Vector Load (single 4-element structure to all lanes)
1159 class VLD4DUP<bits<4> op7_4, string Dt>
1160 : NLdSt<1, 0b10, 0b1111, op7_4,
1161 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
1162 (ins addrmode6dup:$Rn), IIC_VLD4dup,
1163 "vld4", Dt, "\\{$Vd[], $dst2[], $dst3[], $dst4[]\\}, $Rn", "", []> {
1165 let Inst{4} = Rn{4};
1166 let DecoderMethod = "DecodeVLD4DupInstruction";
1169 def VLD4DUPd8 : VLD4DUP<{0,0,0,?}, "8">;
1170 def VLD4DUPd16 : VLD4DUP<{0,1,0,?}, "16">;
1171 def VLD4DUPd32 : VLD4DUP<{1,?,0,?}, "32"> { let Inst{6} = Rn{5}; }
1173 def VLD4DUPd8Pseudo : VLDQQPseudo<IIC_VLD4dup>;
1174 def VLD4DUPd16Pseudo : VLDQQPseudo<IIC_VLD4dup>;
1175 def VLD4DUPd32Pseudo : VLDQQPseudo<IIC_VLD4dup>;
1177 // ...with double-spaced registers (not used for codegen):
1178 def VLD4DUPd8x2 : VLD4DUP<{0,0,1,?}, "8">;
1179 def VLD4DUPd16x2 : VLD4DUP<{0,1,1,?}, "16">;
1180 def VLD4DUPd32x2 : VLD4DUP<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
1182 // ...with address register writeback:
1183 class VLD4DUPWB<bits<4> op7_4, string Dt>
1184 : NLdSt<1, 0b10, 0b1111, op7_4,
1185 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
1186 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD4dupu,
1187 "vld4", Dt, "\\{$Vd[], $dst2[], $dst3[], $dst4[]\\}, $Rn$Rm",
1188 "$Rn.addr = $wb", []> {
1189 let Inst{4} = Rn{4};
1190 let DecoderMethod = "DecodeVLD4DupInstruction";
1193 def VLD4DUPd8_UPD : VLD4DUPWB<{0,0,0,0}, "8">;
1194 def VLD4DUPd16_UPD : VLD4DUPWB<{0,1,0,?}, "16">;
1195 def VLD4DUPd32_UPD : VLD4DUPWB<{1,?,0,?}, "32"> { let Inst{6} = Rn{5}; }
1197 def VLD4DUPd8x2_UPD : VLD4DUPWB<{0,0,1,0}, "8">;
1198 def VLD4DUPd16x2_UPD : VLD4DUPWB<{0,1,1,?}, "16">;
1199 def VLD4DUPd32x2_UPD : VLD4DUPWB<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
1201 def VLD4DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
1202 def VLD4DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
1203 def VLD4DUPd32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
1205 } // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
1207 let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
1209 // Classes for VST* pseudo-instructions with multi-register operands.
1210 // These are expanded to real instructions after register allocation.
1211 class VSTQPseudo<InstrItinClass itin>
1212 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QPR:$src), itin, "">;
1213 class VSTQWBPseudo<InstrItinClass itin>
1214 : PseudoNLdSt<(outs GPR:$wb),
1215 (ins addrmode6:$addr, am6offset:$offset, QPR:$src), itin,
1216 "$addr.addr = $wb">;
1217 class VSTQWBfixedPseudo<InstrItinClass itin>
1218 : PseudoNLdSt<(outs GPR:$wb),
1219 (ins addrmode6:$addr, QPR:$src), itin,
1220 "$addr.addr = $wb">;
1221 class VSTQWBregisterPseudo<InstrItinClass itin>
1222 : PseudoNLdSt<(outs GPR:$wb),
1223 (ins addrmode6:$addr, rGPR:$offset, QPR:$src), itin,
1224 "$addr.addr = $wb">;
1225 class VSTQQPseudo<InstrItinClass itin>
1226 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQPR:$src), itin, "">;
1227 class VSTQQWBPseudo<InstrItinClass itin>
1228 : PseudoNLdSt<(outs GPR:$wb),
1229 (ins addrmode6:$addr, am6offset:$offset, QQPR:$src), itin,
1230 "$addr.addr = $wb">;
1231 class VSTQQQQPseudo<InstrItinClass itin>
1232 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQQQPR:$src), itin, "">;
1233 class VSTQQQQWBPseudo<InstrItinClass itin>
1234 : PseudoNLdSt<(outs GPR:$wb),
1235 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src), itin,
1236 "$addr.addr = $wb">;
1238 // VST1 : Vector Store (multiple single elements)
1239 class VST1D<bits<4> op7_4, string Dt>
1240 : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins addrmode6:$Rn, VecListOneD:$Vd),
1241 IIC_VST1, "vst1", Dt, "$Vd, $Rn", "", []> {
1243 let Inst{4} = Rn{4};
1244 let DecoderMethod = "DecodeVSTInstruction";
1246 class VST1Q<bits<4> op7_4, string Dt>
1247 : NLdSt<0,0b00,0b1010,op7_4, (outs), (ins addrmode6:$Rn, VecListTwoD:$Vd),
1248 IIC_VST1x2, "vst1", Dt, "$Vd, $Rn", "", []> {
1250 let Inst{5-4} = Rn{5-4};
1251 let DecoderMethod = "DecodeVSTInstruction";
1254 def VST1d8 : VST1D<{0,0,0,?}, "8">;
1255 def VST1d16 : VST1D<{0,1,0,?}, "16">;
1256 def VST1d32 : VST1D<{1,0,0,?}, "32">;
1257 def VST1d64 : VST1D<{1,1,0,?}, "64">;
1259 def VST1q8 : VST1Q<{0,0,?,?}, "8">;
1260 def VST1q16 : VST1Q<{0,1,?,?}, "16">;
1261 def VST1q32 : VST1Q<{1,0,?,?}, "32">;
1262 def VST1q64 : VST1Q<{1,1,?,?}, "64">;
1264 def VST1q8Pseudo : VSTQPseudo<IIC_VST1x2>;
1265 def VST1q16Pseudo : VSTQPseudo<IIC_VST1x2>;
1266 def VST1q32Pseudo : VSTQPseudo<IIC_VST1x2>;
1267 def VST1q64Pseudo : VSTQPseudo<IIC_VST1x2>;
1269 // ...with address register writeback:
1270 multiclass VST1DWB<bits<4> op7_4, string Dt> {
1271 def _fixed : NLdSt<0,0b00, 0b0111,op7_4, (outs GPR:$wb),
1272 (ins addrmode6:$Rn, VecListOneD:$Vd), IIC_VLD1u,
1273 "vst1", Dt, "$Vd, $Rn!",
1274 "$Rn.addr = $wb", []> {
1275 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
1276 let Inst{4} = Rn{4};
1277 let DecoderMethod = "DecodeVSTInstruction";
1278 let AsmMatchConverter = "cvtVSTwbFixed";
1280 def _register : NLdSt<0,0b00,0b0111,op7_4, (outs GPR:$wb),
1281 (ins addrmode6:$Rn, rGPR:$Rm, VecListOneD:$Vd),
1283 "vst1", Dt, "$Vd, $Rn, $Rm",
1284 "$Rn.addr = $wb", []> {
1285 let Inst{4} = Rn{4};
1286 let DecoderMethod = "DecodeVSTInstruction";
1287 let AsmMatchConverter = "cvtVSTwbRegister";
1290 multiclass VST1QWB<bits<4> op7_4, string Dt> {
1291 def _fixed : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb),
1292 (ins addrmode6:$Rn, VecListTwoD:$Vd), IIC_VLD1x2u,
1293 "vst1", Dt, "$Vd, $Rn!",
1294 "$Rn.addr = $wb", []> {
1295 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
1296 let Inst{5-4} = Rn{5-4};
1297 let DecoderMethod = "DecodeVSTInstruction";
1298 let AsmMatchConverter = "cvtVSTwbFixed";
1300 def _register : NLdSt<0,0b00,0b1010,op7_4, (outs GPR:$wb),
1301 (ins addrmode6:$Rn, rGPR:$Rm, VecListTwoD:$Vd),
1303 "vst1", Dt, "$Vd, $Rn, $Rm",
1304 "$Rn.addr = $wb", []> {
1305 let Inst{5-4} = Rn{5-4};
1306 let DecoderMethod = "DecodeVSTInstruction";
1307 let AsmMatchConverter = "cvtVSTwbRegister";
1311 defm VST1d8wb : VST1DWB<{0,0,0,?}, "8">;
1312 defm VST1d16wb : VST1DWB<{0,1,0,?}, "16">;
1313 defm VST1d32wb : VST1DWB<{1,0,0,?}, "32">;
1314 defm VST1d64wb : VST1DWB<{1,1,0,?}, "64">;
1316 defm VST1q8wb : VST1QWB<{0,0,?,?}, "8">;
1317 defm VST1q16wb : VST1QWB<{0,1,?,?}, "16">;
1318 defm VST1q32wb : VST1QWB<{1,0,?,?}, "32">;
1319 defm VST1q64wb : VST1QWB<{1,1,?,?}, "64">;
1321 def VST1q8PseudoWB_fixed : VSTQWBfixedPseudo<IIC_VST1x2u>;
1322 def VST1q16PseudoWB_fixed : VSTQWBfixedPseudo<IIC_VST1x2u>;
1323 def VST1q32PseudoWB_fixed : VSTQWBfixedPseudo<IIC_VST1x2u>;
1324 def VST1q64PseudoWB_fixed : VSTQWBfixedPseudo<IIC_VST1x2u>;
1325 def VST1q8PseudoWB_register : VSTQWBregisterPseudo<IIC_VST1x2u>;
1326 def VST1q16PseudoWB_register : VSTQWBregisterPseudo<IIC_VST1x2u>;
1327 def VST1q32PseudoWB_register : VSTQWBregisterPseudo<IIC_VST1x2u>;
1328 def VST1q64PseudoWB_register : VSTQWBregisterPseudo<IIC_VST1x2u>;
1330 // ...with 3 registers
1331 class VST1D3<bits<4> op7_4, string Dt>
1332 : NLdSt<0, 0b00, 0b0110, op7_4, (outs),
1333 (ins addrmode6:$Rn, VecListThreeD:$Vd),
1334 IIC_VST1x3, "vst1", Dt, "$Vd, $Rn", "", []> {
1336 let Inst{4} = Rn{4};
1337 let DecoderMethod = "DecodeVSTInstruction";
1339 multiclass VST1D3WB<bits<4> op7_4, string Dt> {
1340 def _fixed : NLdSt<0,0b00,0b0110,op7_4, (outs GPR:$wb),
1341 (ins addrmode6:$Rn, VecListThreeD:$Vd), IIC_VLD1x3u,
1342 "vst1", Dt, "$Vd, $Rn!",
1343 "$Rn.addr = $wb", []> {
1344 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
1345 let Inst{5-4} = Rn{5-4};
1346 let DecoderMethod = "DecodeVSTInstruction";
1347 let AsmMatchConverter = "cvtVSTwbFixed";
1349 def _register : NLdSt<0,0b00,0b0110,op7_4, (outs GPR:$wb),
1350 (ins addrmode6:$Rn, rGPR:$Rm, VecListThreeD:$Vd),
1352 "vst1", Dt, "$Vd, $Rn, $Rm",
1353 "$Rn.addr = $wb", []> {
1354 let Inst{5-4} = Rn{5-4};
1355 let DecoderMethod = "DecodeVSTInstruction";
1356 let AsmMatchConverter = "cvtVSTwbRegister";
1360 def VST1d8T : VST1D3<{0,0,0,?}, "8">;
1361 def VST1d16T : VST1D3<{0,1,0,?}, "16">;
1362 def VST1d32T : VST1D3<{1,0,0,?}, "32">;
1363 def VST1d64T : VST1D3<{1,1,0,?}, "64">;
1365 defm VST1d8Twb : VST1D3WB<{0,0,0,?}, "8">;
1366 defm VST1d16Twb : VST1D3WB<{0,1,0,?}, "16">;
1367 defm VST1d32Twb : VST1D3WB<{1,0,0,?}, "32">;
1368 defm VST1d64Twb : VST1D3WB<{1,1,0,?}, "64">;
1370 def VST1d64TPseudo : VSTQQPseudo<IIC_VST1x3>;
1371 def VST1d64TPseudoWB_fixed : VSTQQWBPseudo<IIC_VST1x3u>;
1372 def VST1d64TPseudoWB_register : VSTQQWBPseudo<IIC_VST1x3u>;
1374 // ...with 4 registers
1375 class VST1D4<bits<4> op7_4, string Dt>
1376 : NLdSt<0, 0b00, 0b0010, op7_4, (outs),
1377 (ins addrmode6:$Rn, VecListFourD:$Vd),
1378 IIC_VST1x4, "vst1", Dt, "$Vd, $Rn", "",
1381 let Inst{5-4} = Rn{5-4};
1382 let DecoderMethod = "DecodeVSTInstruction";
1384 multiclass VST1D4WB<bits<4> op7_4, string Dt> {
1385 def _fixed : NLdSt<0,0b00,0b0010,op7_4, (outs GPR:$wb),
1386 (ins addrmode6:$Rn, VecListFourD:$Vd), IIC_VLD1x4u,
1387 "vst1", Dt, "$Vd, $Rn!",
1388 "$Rn.addr = $wb", []> {
1389 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
1390 let Inst{5-4} = Rn{5-4};
1391 let DecoderMethod = "DecodeVSTInstruction";
1392 let AsmMatchConverter = "cvtVSTwbFixed";
1394 def _register : NLdSt<0,0b00,0b0010,op7_4, (outs GPR:$wb),
1395 (ins addrmode6:$Rn, rGPR:$Rm, VecListFourD:$Vd),
1397 "vst1", Dt, "$Vd, $Rn, $Rm",
1398 "$Rn.addr = $wb", []> {
1399 let Inst{5-4} = Rn{5-4};
1400 let DecoderMethod = "DecodeVSTInstruction";
1401 let AsmMatchConverter = "cvtVSTwbRegister";
1405 def VST1d8Q : VST1D4<{0,0,?,?}, "8">;
1406 def VST1d16Q : VST1D4<{0,1,?,?}, "16">;
1407 def VST1d32Q : VST1D4<{1,0,?,?}, "32">;
1408 def VST1d64Q : VST1D4<{1,1,?,?}, "64">;
1410 defm VST1d8Qwb : VST1D4WB<{0,0,?,?}, "8">;
1411 defm VST1d16Qwb : VST1D4WB<{0,1,?,?}, "16">;
1412 defm VST1d32Qwb : VST1D4WB<{1,0,?,?}, "32">;
1413 defm VST1d64Qwb : VST1D4WB<{1,1,?,?}, "64">;
1415 def VST1d64QPseudo : VSTQQPseudo<IIC_VST1x4>;
1416 def VST1d64QPseudoWB_fixed : VSTQQWBPseudo<IIC_VST1x4u>;
1417 def VST1d64QPseudoWB_register : VSTQQWBPseudo<IIC_VST1x4u>;
1419 // VST2 : Vector Store (multiple 2-element structures)
1420 class VST2D<bits<4> op11_8, bits<4> op7_4, string Dt>
1421 : NLdSt<0, 0b00, op11_8, op7_4, (outs),
1422 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2),
1423 IIC_VST2, "vst2", Dt, "\\{$Vd, $src2\\}, $Rn", "", []> {
1425 let Inst{5-4} = Rn{5-4};
1426 let DecoderMethod = "DecodeVSTInstruction";
1428 class VST2Q<bits<4> op7_4, string Dt>
1429 : NLdSt<0, 0b00, 0b0011, op7_4, (outs),
1430 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
1431 IIC_VST2x2, "vst2", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn",
1434 let Inst{5-4} = Rn{5-4};
1435 let DecoderMethod = "DecodeVSTInstruction";
1438 def VST2d8 : VST2D<0b1000, {0,0,?,?}, "8">;
1439 def VST2d16 : VST2D<0b1000, {0,1,?,?}, "16">;
1440 def VST2d32 : VST2D<0b1000, {1,0,?,?}, "32">;
1442 def VST2q8 : VST2Q<{0,0,?,?}, "8">;
1443 def VST2q16 : VST2Q<{0,1,?,?}, "16">;
1444 def VST2q32 : VST2Q<{1,0,?,?}, "32">;
1446 def VST2d8Pseudo : VSTQPseudo<IIC_VST2>;
1447 def VST2d16Pseudo : VSTQPseudo<IIC_VST2>;
1448 def VST2d32Pseudo : VSTQPseudo<IIC_VST2>;
1450 def VST2q8Pseudo : VSTQQPseudo<IIC_VST2x2>;
1451 def VST2q16Pseudo : VSTQQPseudo<IIC_VST2x2>;
1452 def VST2q32Pseudo : VSTQQPseudo<IIC_VST2x2>;
1454 // ...with address register writeback:
1455 class VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1456 : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
1457 (ins addrmode6:$Rn, am6offset:$Rm, DPR:$Vd, DPR:$src2),
1458 IIC_VST2u, "vst2", Dt, "\\{$Vd, $src2\\}, $Rn$Rm",
1459 "$Rn.addr = $wb", []> {
1460 let Inst{5-4} = Rn{5-4};
1461 let DecoderMethod = "DecodeVSTInstruction";
1463 class VST2QWB<bits<4> op7_4, string Dt>
1464 : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
1465 (ins addrmode6:$Rn, am6offset:$Rm,
1466 DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST2x2u,
1467 "vst2", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
1468 "$Rn.addr = $wb", []> {
1469 let Inst{5-4} = Rn{5-4};
1470 let DecoderMethod = "DecodeVSTInstruction";
1473 def VST2d8_UPD : VST2DWB<0b1000, {0,0,?,?}, "8">;
1474 def VST2d16_UPD : VST2DWB<0b1000, {0,1,?,?}, "16">;
1475 def VST2d32_UPD : VST2DWB<0b1000, {1,0,?,?}, "32">;
1477 def VST2q8_UPD : VST2QWB<{0,0,?,?}, "8">;
1478 def VST2q16_UPD : VST2QWB<{0,1,?,?}, "16">;
1479 def VST2q32_UPD : VST2QWB<{1,0,?,?}, "32">;
1481 def VST2d8Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
1482 def VST2d16Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
1483 def VST2d32Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
1485 def VST2q8Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
1486 def VST2q16Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
1487 def VST2q32Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
1489 // ...with double-spaced registers
1490 def VST2b8 : VST2D<0b1001, {0,0,?,?}, "8">;
1491 def VST2b16 : VST2D<0b1001, {0,1,?,?}, "16">;
1492 def VST2b32 : VST2D<0b1001, {1,0,?,?}, "32">;
1493 def VST2b8_UPD : VST2DWB<0b1001, {0,0,?,?}, "8">;
1494 def VST2b16_UPD : VST2DWB<0b1001, {0,1,?,?}, "16">;
1495 def VST2b32_UPD : VST2DWB<0b1001, {1,0,?,?}, "32">;
1497 // VST3 : Vector Store (multiple 3-element structures)
1498 class VST3D<bits<4> op11_8, bits<4> op7_4, string Dt>
1499 : NLdSt<0, 0b00, op11_8, op7_4, (outs),
1500 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3), IIC_VST3,
1501 "vst3", Dt, "\\{$Vd, $src2, $src3\\}, $Rn", "", []> {
1503 let Inst{4} = Rn{4};
1504 let DecoderMethod = "DecodeVSTInstruction";
1507 def VST3d8 : VST3D<0b0100, {0,0,0,?}, "8">;
1508 def VST3d16 : VST3D<0b0100, {0,1,0,?}, "16">;
1509 def VST3d32 : VST3D<0b0100, {1,0,0,?}, "32">;
1511 def VST3d8Pseudo : VSTQQPseudo<IIC_VST3>;
1512 def VST3d16Pseudo : VSTQQPseudo<IIC_VST3>;
1513 def VST3d32Pseudo : VSTQQPseudo<IIC_VST3>;
1515 // ...with address register writeback:
1516 class VST3DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1517 : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
1518 (ins addrmode6:$Rn, am6offset:$Rm,
1519 DPR:$Vd, DPR:$src2, DPR:$src3), IIC_VST3u,
1520 "vst3", Dt, "\\{$Vd, $src2, $src3\\}, $Rn$Rm",
1521 "$Rn.addr = $wb", []> {
1522 let Inst{4} = Rn{4};
1523 let DecoderMethod = "DecodeVSTInstruction";
1526 def VST3d8_UPD : VST3DWB<0b0100, {0,0,0,?}, "8">;
1527 def VST3d16_UPD : VST3DWB<0b0100, {0,1,0,?}, "16">;
1528 def VST3d32_UPD : VST3DWB<0b0100, {1,0,0,?}, "32">;
1530 def VST3d8Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
1531 def VST3d16Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
1532 def VST3d32Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
1534 // ...with double-spaced registers:
1535 def VST3q8 : VST3D<0b0101, {0,0,0,?}, "8">;
1536 def VST3q16 : VST3D<0b0101, {0,1,0,?}, "16">;
1537 def VST3q32 : VST3D<0b0101, {1,0,0,?}, "32">;
1538 def VST3q8_UPD : VST3DWB<0b0101, {0,0,0,?}, "8">;
1539 def VST3q16_UPD : VST3DWB<0b0101, {0,1,0,?}, "16">;
1540 def VST3q32_UPD : VST3DWB<0b0101, {1,0,0,?}, "32">;
1542 def VST3q8Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1543 def VST3q16Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1544 def VST3q32Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1546 // ...alternate versions to be allocated odd register numbers:
1547 def VST3q8oddPseudo : VSTQQQQPseudo<IIC_VST3>;
1548 def VST3q16oddPseudo : VSTQQQQPseudo<IIC_VST3>;
1549 def VST3q32oddPseudo : VSTQQQQPseudo<IIC_VST3>;
1551 def VST3q8oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1552 def VST3q16oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1553 def VST3q32oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1555 // VST4 : Vector Store (multiple 4-element structures)
1556 class VST4D<bits<4> op11_8, bits<4> op7_4, string Dt>
1557 : NLdSt<0, 0b00, op11_8, op7_4, (outs),
1558 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
1559 IIC_VST4, "vst4", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn",
1562 let Inst{5-4} = Rn{5-4};
1563 let DecoderMethod = "DecodeVSTInstruction";
1566 def VST4d8 : VST4D<0b0000, {0,0,?,?}, "8">;
1567 def VST4d16 : VST4D<0b0000, {0,1,?,?}, "16">;
1568 def VST4d32 : VST4D<0b0000, {1,0,?,?}, "32">;
1570 def VST4d8Pseudo : VSTQQPseudo<IIC_VST4>;
1571 def VST4d16Pseudo : VSTQQPseudo<IIC_VST4>;
1572 def VST4d32Pseudo : VSTQQPseudo<IIC_VST4>;
1574 // ...with address register writeback:
1575 class VST4DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1576 : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
1577 (ins addrmode6:$Rn, am6offset:$Rm,
1578 DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST4u,
1579 "vst4", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
1580 "$Rn.addr = $wb", []> {
1581 let Inst{5-4} = Rn{5-4};
1582 let DecoderMethod = "DecodeVSTInstruction";
1585 def VST4d8_UPD : VST4DWB<0b0000, {0,0,?,?}, "8">;
1586 def VST4d16_UPD : VST4DWB<0b0000, {0,1,?,?}, "16">;
1587 def VST4d32_UPD : VST4DWB<0b0000, {1,0,?,?}, "32">;
1589 def VST4d8Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
1590 def VST4d16Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
1591 def VST4d32Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
1593 // ...with double-spaced registers:
1594 def VST4q8 : VST4D<0b0001, {0,0,?,?}, "8">;
1595 def VST4q16 : VST4D<0b0001, {0,1,?,?}, "16">;
1596 def VST4q32 : VST4D<0b0001, {1,0,?,?}, "32">;
1597 def VST4q8_UPD : VST4DWB<0b0001, {0,0,?,?}, "8">;
1598 def VST4q16_UPD : VST4DWB<0b0001, {0,1,?,?}, "16">;
1599 def VST4q32_UPD : VST4DWB<0b0001, {1,0,?,?}, "32">;
1601 def VST4q8Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1602 def VST4q16Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1603 def VST4q32Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1605 // ...alternate versions to be allocated odd register numbers:
1606 def VST4q8oddPseudo : VSTQQQQPseudo<IIC_VST4>;
1607 def VST4q16oddPseudo : VSTQQQQPseudo<IIC_VST4>;
1608 def VST4q32oddPseudo : VSTQQQQPseudo<IIC_VST4>;
1610 def VST4q8oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1611 def VST4q16oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1612 def VST4q32oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1614 } // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
1616 // Classes for VST*LN pseudo-instructions with multi-register operands.
1617 // These are expanded to real instructions after register allocation.
1618 class VSTQLNPseudo<InstrItinClass itin>
1619 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QPR:$src, nohash_imm:$lane),
1621 class VSTQLNWBPseudo<InstrItinClass itin>
1622 : PseudoNLdSt<(outs GPR:$wb),
1623 (ins addrmode6:$addr, am6offset:$offset, QPR:$src,
1624 nohash_imm:$lane), itin, "$addr.addr = $wb">;
1625 class VSTQQLNPseudo<InstrItinClass itin>
1626 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQPR:$src, nohash_imm:$lane),
1628 class VSTQQLNWBPseudo<InstrItinClass itin>
1629 : PseudoNLdSt<(outs GPR:$wb),
1630 (ins addrmode6:$addr, am6offset:$offset, QQPR:$src,
1631 nohash_imm:$lane), itin, "$addr.addr = $wb">;
1632 class VSTQQQQLNPseudo<InstrItinClass itin>
1633 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQQQPR:$src, nohash_imm:$lane),
1635 class VSTQQQQLNWBPseudo<InstrItinClass itin>
1636 : PseudoNLdSt<(outs GPR:$wb),
1637 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src,
1638 nohash_imm:$lane), itin, "$addr.addr = $wb">;
1640 // VST1LN : Vector Store (single element from one lane)
1641 class VST1LN<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
1642 PatFrag StoreOp, SDNode ExtractOp>
1643 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1644 (ins addrmode6:$Rn, DPR:$Vd, nohash_imm:$lane),
1645 IIC_VST1ln, "vst1", Dt, "\\{$Vd[$lane]\\}, $Rn", "",
1646 [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), addrmode6:$Rn)]> {
1648 let DecoderMethod = "DecodeVST1LN";
1650 class VST1LN32<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
1651 PatFrag StoreOp, SDNode ExtractOp>
1652 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1653 (ins addrmode6oneL32:$Rn, DPR:$Vd, nohash_imm:$lane),
1654 IIC_VST1ln, "vst1", Dt, "\\{$Vd[$lane]\\}, $Rn", "",
1655 [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), addrmode6oneL32:$Rn)]>{
1657 let DecoderMethod = "DecodeVST1LN";
1659 class VST1QLNPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
1660 : VSTQLNPseudo<IIC_VST1ln> {
1661 let Pattern = [(StoreOp (ExtractOp (Ty QPR:$src), imm:$lane),
1665 def VST1LNd8 : VST1LN<0b0000, {?,?,?,0}, "8", v8i8, truncstorei8,
1667 let Inst{7-5} = lane{2-0};
1669 def VST1LNd16 : VST1LN<0b0100, {?,?,0,?}, "16", v4i16, truncstorei16,
1671 let Inst{7-6} = lane{1-0};
1672 let Inst{4} = Rn{5};
1675 def VST1LNd32 : VST1LN32<0b1000, {?,0,?,?}, "32", v2i32, store, extractelt> {
1676 let Inst{7} = lane{0};
1677 let Inst{5-4} = Rn{5-4};
1680 def VST1LNq8Pseudo : VST1QLNPseudo<v16i8, truncstorei8, NEONvgetlaneu>;
1681 def VST1LNq16Pseudo : VST1QLNPseudo<v8i16, truncstorei16, NEONvgetlaneu>;
1682 def VST1LNq32Pseudo : VST1QLNPseudo<v4i32, store, extractelt>;
1684 def : Pat<(store (extractelt (v2f32 DPR:$src), imm:$lane), addrmode6:$addr),
1685 (VST1LNd32 addrmode6:$addr, DPR:$src, imm:$lane)>;
1686 def : Pat<(store (extractelt (v4f32 QPR:$src), imm:$lane), addrmode6:$addr),
1687 (VST1LNq32Pseudo addrmode6:$addr, QPR:$src, imm:$lane)>;
1689 // ...with address register writeback:
1690 class VST1LNWB<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
1691 PatFrag StoreOp, SDNode ExtractOp>
1692 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1693 (ins addrmode6:$Rn, am6offset:$Rm,
1694 DPR:$Vd, nohash_imm:$lane), IIC_VST1lnu, "vst1", Dt,
1695 "\\{$Vd[$lane]\\}, $Rn$Rm",
1697 [(set GPR:$wb, (StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane),
1698 addrmode6:$Rn, am6offset:$Rm))]> {
1699 let DecoderMethod = "DecodeVST1LN";
1701 class VST1QLNWBPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
1702 : VSTQLNWBPseudo<IIC_VST1lnu> {
1703 let Pattern = [(set GPR:$wb, (StoreOp (ExtractOp (Ty QPR:$src), imm:$lane),
1704 addrmode6:$addr, am6offset:$offset))];
1707 def VST1LNd8_UPD : VST1LNWB<0b0000, {?,?,?,0}, "8", v8i8, post_truncsti8,
1709 let Inst{7-5} = lane{2-0};
1711 def VST1LNd16_UPD : VST1LNWB<0b0100, {?,?,0,?}, "16", v4i16, post_truncsti16,
1713 let Inst{7-6} = lane{1-0};
1714 let Inst{4} = Rn{5};
1716 def VST1LNd32_UPD : VST1LNWB<0b1000, {?,0,?,?}, "32", v2i32, post_store,
1718 let Inst{7} = lane{0};
1719 let Inst{5-4} = Rn{5-4};
1722 def VST1LNq8Pseudo_UPD : VST1QLNWBPseudo<v16i8, post_truncsti8, NEONvgetlaneu>;
1723 def VST1LNq16Pseudo_UPD : VST1QLNWBPseudo<v8i16, post_truncsti16,NEONvgetlaneu>;
1724 def VST1LNq32Pseudo_UPD : VST1QLNWBPseudo<v4i32, post_store, extractelt>;
1726 let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
1728 // VST2LN : Vector Store (single 2-element structure from one lane)
1729 class VST2LN<bits<4> op11_8, bits<4> op7_4, string Dt>
1730 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1731 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, nohash_imm:$lane),
1732 IIC_VST2ln, "vst2", Dt, "\\{$Vd[$lane], $src2[$lane]\\}, $Rn",
1735 let Inst{4} = Rn{4};
1736 let DecoderMethod = "DecodeVST2LN";
1739 def VST2LNd8 : VST2LN<0b0001, {?,?,?,?}, "8"> {
1740 let Inst{7-5} = lane{2-0};
1742 def VST2LNd16 : VST2LN<0b0101, {?,?,0,?}, "16"> {
1743 let Inst{7-6} = lane{1-0};
1745 def VST2LNd32 : VST2LN<0b1001, {?,0,0,?}, "32"> {
1746 let Inst{7} = lane{0};
1749 def VST2LNd8Pseudo : VSTQLNPseudo<IIC_VST2ln>;
1750 def VST2LNd16Pseudo : VSTQLNPseudo<IIC_VST2ln>;
1751 def VST2LNd32Pseudo : VSTQLNPseudo<IIC_VST2ln>;
1753 // ...with double-spaced registers:
1754 def VST2LNq16 : VST2LN<0b0101, {?,?,1,?}, "16"> {
1755 let Inst{7-6} = lane{1-0};
1756 let Inst{4} = Rn{4};
1758 def VST2LNq32 : VST2LN<0b1001, {?,1,0,?}, "32"> {
1759 let Inst{7} = lane{0};
1760 let Inst{4} = Rn{4};
1763 def VST2LNq16Pseudo : VSTQQLNPseudo<IIC_VST2ln>;
1764 def VST2LNq32Pseudo : VSTQQLNPseudo<IIC_VST2ln>;
1766 // ...with address register writeback:
1767 class VST2LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1768 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1769 (ins addrmode6:$addr, am6offset:$offset,
1770 DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VST2lnu, "vst2", Dt,
1771 "\\{$src1[$lane], $src2[$lane]\\}, $addr$offset",
1772 "$addr.addr = $wb", []> {
1773 let Inst{4} = Rn{4};
1774 let DecoderMethod = "DecodeVST2LN";
1777 def VST2LNd8_UPD : VST2LNWB<0b0001, {?,?,?,?}, "8"> {
1778 let Inst{7-5} = lane{2-0};
1780 def VST2LNd16_UPD : VST2LNWB<0b0101, {?,?,0,?}, "16"> {
1781 let Inst{7-6} = lane{1-0};
1783 def VST2LNd32_UPD : VST2LNWB<0b1001, {?,0,0,?}, "32"> {
1784 let Inst{7} = lane{0};
1787 def VST2LNd8Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
1788 def VST2LNd16Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
1789 def VST2LNd32Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
1791 def VST2LNq16_UPD : VST2LNWB<0b0101, {?,?,1,?}, "16"> {
1792 let Inst{7-6} = lane{1-0};
1794 def VST2LNq32_UPD : VST2LNWB<0b1001, {?,1,0,?}, "32"> {
1795 let Inst{7} = lane{0};
1798 def VST2LNq16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST2lnu>;
1799 def VST2LNq32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST2lnu>;
1801 // VST3LN : Vector Store (single 3-element structure from one lane)
1802 class VST3LN<bits<4> op11_8, bits<4> op7_4, string Dt>
1803 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1804 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3,
1805 nohash_imm:$lane), IIC_VST3ln, "vst3", Dt,
1806 "\\{$Vd[$lane], $src2[$lane], $src3[$lane]\\}, $Rn", "", []> {
1808 let DecoderMethod = "DecodeVST3LN";
1811 def VST3LNd8 : VST3LN<0b0010, {?,?,?,0}, "8"> {
1812 let Inst{7-5} = lane{2-0};
1814 def VST3LNd16 : VST3LN<0b0110, {?,?,0,0}, "16"> {
1815 let Inst{7-6} = lane{1-0};
1817 def VST3LNd32 : VST3LN<0b1010, {?,0,0,0}, "32"> {
1818 let Inst{7} = lane{0};
1821 def VST3LNd8Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
1822 def VST3LNd16Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
1823 def VST3LNd32Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
1825 // ...with double-spaced registers:
1826 def VST3LNq16 : VST3LN<0b0110, {?,?,1,0}, "16"> {
1827 let Inst{7-6} = lane{1-0};
1829 def VST3LNq32 : VST3LN<0b1010, {?,1,0,0}, "32"> {
1830 let Inst{7} = lane{0};
1833 def VST3LNq16Pseudo : VSTQQQQLNPseudo<IIC_VST3ln>;
1834 def VST3LNq32Pseudo : VSTQQQQLNPseudo<IIC_VST3ln>;
1836 // ...with address register writeback:
1837 class VST3LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1838 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1839 (ins addrmode6:$Rn, am6offset:$Rm,
1840 DPR:$Vd, DPR:$src2, DPR:$src3, nohash_imm:$lane),
1841 IIC_VST3lnu, "vst3", Dt,
1842 "\\{$Vd[$lane], $src2[$lane], $src3[$lane]\\}, $Rn$Rm",
1843 "$Rn.addr = $wb", []> {
1844 let DecoderMethod = "DecodeVST3LN";
1847 def VST3LNd8_UPD : VST3LNWB<0b0010, {?,?,?,0}, "8"> {
1848 let Inst{7-5} = lane{2-0};
1850 def VST3LNd16_UPD : VST3LNWB<0b0110, {?,?,0,0}, "16"> {
1851 let Inst{7-6} = lane{1-0};
1853 def VST3LNd32_UPD : VST3LNWB<0b1010, {?,0,0,0}, "32"> {
1854 let Inst{7} = lane{0};
1857 def VST3LNd8Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
1858 def VST3LNd16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
1859 def VST3LNd32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
1861 def VST3LNq16_UPD : VST3LNWB<0b0110, {?,?,1,0}, "16"> {
1862 let Inst{7-6} = lane{1-0};
1864 def VST3LNq32_UPD : VST3LNWB<0b1010, {?,1,0,0}, "32"> {
1865 let Inst{7} = lane{0};
1868 def VST3LNq16Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST3lnu>;
1869 def VST3LNq32Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST3lnu>;
1871 // VST4LN : Vector Store (single 4-element structure from one lane)
1872 class VST4LN<bits<4> op11_8, bits<4> op7_4, string Dt>
1873 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1874 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4,
1875 nohash_imm:$lane), IIC_VST4ln, "vst4", Dt,
1876 "\\{$Vd[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $Rn",
1879 let Inst{4} = Rn{4};
1880 let DecoderMethod = "DecodeVST4LN";
1883 def VST4LNd8 : VST4LN<0b0011, {?,?,?,?}, "8"> {
1884 let Inst{7-5} = lane{2-0};
1886 def VST4LNd16 : VST4LN<0b0111, {?,?,0,?}, "16"> {
1887 let Inst{7-6} = lane{1-0};
1889 def VST4LNd32 : VST4LN<0b1011, {?,0,?,?}, "32"> {
1890 let Inst{7} = lane{0};
1891 let Inst{5} = Rn{5};
1894 def VST4LNd8Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
1895 def VST4LNd16Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
1896 def VST4LNd32Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
1898 // ...with double-spaced registers:
1899 def VST4LNq16 : VST4LN<0b0111, {?,?,1,?}, "16"> {
1900 let Inst{7-6} = lane{1-0};
1902 def VST4LNq32 : VST4LN<0b1011, {?,1,?,?}, "32"> {
1903 let Inst{7} = lane{0};
1904 let Inst{5} = Rn{5};
1907 def VST4LNq16Pseudo : VSTQQQQLNPseudo<IIC_VST4ln>;
1908 def VST4LNq32Pseudo : VSTQQQQLNPseudo<IIC_VST4ln>;
1910 // ...with address register writeback:
1911 class VST4LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1912 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1913 (ins addrmode6:$Rn, am6offset:$Rm,
1914 DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4, nohash_imm:$lane),
1915 IIC_VST4lnu, "vst4", Dt,
1916 "\\{$Vd[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $Rn$Rm",
1917 "$Rn.addr = $wb", []> {
1918 let Inst{4} = Rn{4};
1919 let DecoderMethod = "DecodeVST4LN";
1922 def VST4LNd8_UPD : VST4LNWB<0b0011, {?,?,?,?}, "8"> {
1923 let Inst{7-5} = lane{2-0};
1925 def VST4LNd16_UPD : VST4LNWB<0b0111, {?,?,0,?}, "16"> {
1926 let Inst{7-6} = lane{1-0};
1928 def VST4LNd32_UPD : VST4LNWB<0b1011, {?,0,?,?}, "32"> {
1929 let Inst{7} = lane{0};
1930 let Inst{5} = Rn{5};
1933 def VST4LNd8Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
1934 def VST4LNd16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
1935 def VST4LNd32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
1937 def VST4LNq16_UPD : VST4LNWB<0b0111, {?,?,1,?}, "16"> {
1938 let Inst{7-6} = lane{1-0};
1940 def VST4LNq32_UPD : VST4LNWB<0b1011, {?,1,?,?}, "32"> {
1941 let Inst{7} = lane{0};
1942 let Inst{5} = Rn{5};
1945 def VST4LNq16Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST4lnu>;
1946 def VST4LNq32Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST4lnu>;
1948 } // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
1951 //===----------------------------------------------------------------------===//
1952 // NEON pattern fragments
1953 //===----------------------------------------------------------------------===//
1955 // Extract D sub-registers of Q registers.
1956 def DSubReg_i8_reg : SDNodeXForm<imm, [{
1957 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1958 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/8, MVT::i32);
1960 def DSubReg_i16_reg : SDNodeXForm<imm, [{
1961 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1962 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/4, MVT::i32);
1964 def DSubReg_i32_reg : SDNodeXForm<imm, [{
1965 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1966 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/2, MVT::i32);
1968 def DSubReg_f64_reg : SDNodeXForm<imm, [{
1969 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1970 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue(), MVT::i32);
1973 // Extract S sub-registers of Q/D registers.
1974 def SSubReg_f32_reg : SDNodeXForm<imm, [{
1975 assert(ARM::ssub_3 == ARM::ssub_0+3 && "Unexpected subreg numbering");
1976 return CurDAG->getTargetConstant(ARM::ssub_0 + N->getZExtValue(), MVT::i32);
1979 // Translate lane numbers from Q registers to D subregs.
1980 def SubReg_i8_lane : SDNodeXForm<imm, [{
1981 return CurDAG->getTargetConstant(N->getZExtValue() & 7, MVT::i32);
1983 def SubReg_i16_lane : SDNodeXForm<imm, [{
1984 return CurDAG->getTargetConstant(N->getZExtValue() & 3, MVT::i32);
1986 def SubReg_i32_lane : SDNodeXForm<imm, [{
1987 return CurDAG->getTargetConstant(N->getZExtValue() & 1, MVT::i32);
1990 //===----------------------------------------------------------------------===//
1991 // Instruction Classes
1992 //===----------------------------------------------------------------------===//
1994 // Basic 2-register operations: double- and quad-register.
1995 class N2VD<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1996 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
1997 string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
1998 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
1999 (ins DPR:$Vm), IIC_VUNAD, OpcodeStr, Dt,"$Vd, $Vm", "",
2000 [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm))))]>;
2001 class N2VQ<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2002 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
2003 string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
2004 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
2005 (ins QPR:$Vm), IIC_VUNAQ, OpcodeStr, Dt,"$Vd, $Vm", "",
2006 [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vm))))]>;
2008 // Basic 2-register intrinsics, both double- and quad-register.
2009 class N2VDInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2010 bits<2> op17_16, bits<5> op11_7, bit op4,
2011 InstrItinClass itin, string OpcodeStr, string Dt,
2012 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2013 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
2014 (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
2015 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
2016 class N2VQInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2017 bits<2> op17_16, bits<5> op11_7, bit op4,
2018 InstrItinClass itin, string OpcodeStr, string Dt,
2019 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2020 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
2021 (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
2022 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
2024 // Narrow 2-register operations.
2025 class N2VN<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2026 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
2027 InstrItinClass itin, string OpcodeStr, string Dt,
2028 ValueType TyD, ValueType TyQ, SDNode OpNode>
2029 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$Vd),
2030 (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
2031 [(set DPR:$Vd, (TyD (OpNode (TyQ QPR:$Vm))))]>;
2033 // Narrow 2-register intrinsics.
2034 class N2VNInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2035 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
2036 InstrItinClass itin, string OpcodeStr, string Dt,
2037 ValueType TyD, ValueType TyQ, Intrinsic IntOp>
2038 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$Vd),
2039 (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
2040 [(set DPR:$Vd, (TyD (IntOp (TyQ QPR:$Vm))))]>;
2042 // Long 2-register operations (currently only used for VMOVL).
2043 class N2VL<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2044 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
2045 InstrItinClass itin, string OpcodeStr, string Dt,
2046 ValueType TyQ, ValueType TyD, SDNode OpNode>
2047 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs QPR:$Vd),
2048 (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
2049 [(set QPR:$Vd, (TyQ (OpNode (TyD DPR:$Vm))))]>;
2051 // Long 2-register intrinsics.
2052 class N2VLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2053 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
2054 InstrItinClass itin, string OpcodeStr, string Dt,
2055 ValueType TyQ, ValueType TyD, Intrinsic IntOp>
2056 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs QPR:$Vd),
2057 (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
2058 [(set QPR:$Vd, (TyQ (IntOp (TyD DPR:$Vm))))]>;
2060 // 2-register shuffles (VTRN/VZIP/VUZP), both double- and quad-register.
2061 class N2VDShuffle<bits<2> op19_18, bits<5> op11_7, string OpcodeStr, string Dt>
2062 : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 0, 0, (outs DPR:$Vd, DPR:$Vm),
2063 (ins DPR:$src1, DPR:$src2), IIC_VPERMD,
2064 OpcodeStr, Dt, "$Vd, $Vm",
2065 "$src1 = $Vd, $src2 = $Vm", []>;
2066 class N2VQShuffle<bits<2> op19_18, bits<5> op11_7,
2067 InstrItinClass itin, string OpcodeStr, string Dt>
2068 : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 1, 0, (outs QPR:$Vd, QPR:$Vm),
2069 (ins QPR:$src1, QPR:$src2), itin, OpcodeStr, Dt, "$Vd, $Vm",
2070 "$src1 = $Vd, $src2 = $Vm", []>;
2072 // Basic 3-register operations: double- and quad-register.
2073 class N3VD<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2074 InstrItinClass itin, string OpcodeStr, string Dt,
2075 ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
2076 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2077 (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2078 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2079 [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]> {
2080 let isCommutable = Commutable;
2082 // Same as N3VD but no data type.
2083 class N3VDX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2084 InstrItinClass itin, string OpcodeStr,
2085 ValueType ResTy, ValueType OpTy,
2086 SDNode OpNode, bit Commutable>
2087 : N3VX<op24, op23, op21_20, op11_8, 0, op4,
2088 (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2089 OpcodeStr, "$Vd, $Vn, $Vm", "",
2090 [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>{
2091 let isCommutable = Commutable;
2094 class N3VDSL<bits<2> op21_20, bits<4> op11_8,
2095 InstrItinClass itin, string OpcodeStr, string Dt,
2096 ValueType Ty, SDNode ShOp>
2097 : N3VLane32<0, 1, op21_20, op11_8, 1, 0,
2098 (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2099 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2101 (Ty (ShOp (Ty DPR:$Vn),
2102 (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),imm:$lane)))))]> {
2103 let isCommutable = 0;
2105 class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
2106 string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
2107 : N3VLane16<0, 1, op21_20, op11_8, 1, 0,
2108 (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2109 NVMulSLFrm, IIC_VMULi16D, OpcodeStr, Dt,"$Vd, $Vn, $Vm$lane","",
2111 (Ty (ShOp (Ty DPR:$Vn),
2112 (Ty (NEONvduplane (Ty DPR_8:$Vm), imm:$lane)))))]> {
2113 let isCommutable = 0;
2116 class N3VQ<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2117 InstrItinClass itin, string OpcodeStr, string Dt,
2118 ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
2119 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2120 (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2121 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2122 [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]> {
2123 let isCommutable = Commutable;
2125 class N3VQX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2126 InstrItinClass itin, string OpcodeStr,
2127 ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
2128 : N3VX<op24, op23, op21_20, op11_8, 1, op4,
2129 (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2130 OpcodeStr, "$Vd, $Vn, $Vm", "",
2131 [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>{
2132 let isCommutable = Commutable;
2134 class N3VQSL<bits<2> op21_20, bits<4> op11_8,
2135 InstrItinClass itin, string OpcodeStr, string Dt,
2136 ValueType ResTy, ValueType OpTy, SDNode ShOp>
2137 : N3VLane32<1, 1, op21_20, op11_8, 1, 0,
2138 (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2139 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2140 [(set (ResTy QPR:$Vd),
2141 (ResTy (ShOp (ResTy QPR:$Vn),
2142 (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2144 let isCommutable = 0;
2146 class N3VQSL16<bits<2> op21_20, bits<4> op11_8, string OpcodeStr, string Dt,
2147 ValueType ResTy, ValueType OpTy, SDNode ShOp>
2148 : N3VLane16<1, 1, op21_20, op11_8, 1, 0,
2149 (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2150 NVMulSLFrm, IIC_VMULi16Q, OpcodeStr, Dt,"$Vd, $Vn, $Vm$lane", "",
2151 [(set (ResTy QPR:$Vd),
2152 (ResTy (ShOp (ResTy QPR:$Vn),
2153 (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
2155 let isCommutable = 0;
2158 // Basic 3-register intrinsics, both double- and quad-register.
2159 class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2160 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2161 ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
2162 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2163 (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), f, itin,
2164 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2165 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]> {
2166 let isCommutable = Commutable;
2168 class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2169 string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
2170 : N3VLane32<0, 1, op21_20, op11_8, 1, 0,
2171 (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2172 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2174 (Ty (IntOp (Ty DPR:$Vn),
2175 (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),
2177 let isCommutable = 0;
2179 class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2180 string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
2181 : N3VLane16<0, 1, op21_20, op11_8, 1, 0,
2182 (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2183 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2185 (Ty (IntOp (Ty DPR:$Vn),
2186 (Ty (NEONvduplane (Ty DPR_8:$Vm), imm:$lane)))))]> {
2187 let isCommutable = 0;
2189 class N3VDIntSh<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2190 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2191 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2192 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2193 (outs DPR:$Vd), (ins DPR:$Vm, DPR:$Vn), f, itin,
2194 OpcodeStr, Dt, "$Vd, $Vm, $Vn", "",
2195 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm), (OpTy DPR:$Vn))))]> {
2196 let isCommutable = 0;
2199 class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2200 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2201 ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
2202 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2203 (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), f, itin,
2204 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2205 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]> {
2206 let isCommutable = Commutable;
2208 class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2209 string OpcodeStr, string Dt,
2210 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2211 : N3VLane32<1, 1, op21_20, op11_8, 1, 0,
2212 (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2213 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2214 [(set (ResTy QPR:$Vd),
2215 (ResTy (IntOp (ResTy QPR:$Vn),
2216 (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2218 let isCommutable = 0;
2220 class N3VQIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2221 string OpcodeStr, string Dt,
2222 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2223 : N3VLane16<1, 1, op21_20, op11_8, 1, 0,
2224 (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2225 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2226 [(set (ResTy QPR:$Vd),
2227 (ResTy (IntOp (ResTy QPR:$Vn),
2228 (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
2230 let isCommutable = 0;
2232 class N3VQIntSh<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2233 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2234 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2235 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2236 (outs QPR:$Vd), (ins QPR:$Vm, QPR:$Vn), f, itin,
2237 OpcodeStr, Dt, "$Vd, $Vm, $Vn", "",
2238 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm), (OpTy QPR:$Vn))))]> {
2239 let isCommutable = 0;
2242 // Multiply-Add/Sub operations: double- and quad-register.
2243 class N3VDMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2244 InstrItinClass itin, string OpcodeStr, string Dt,
2245 ValueType Ty, SDPatternOperator MulOp, SDPatternOperator OpNode>
2246 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2247 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2248 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2249 [(set DPR:$Vd, (Ty (OpNode DPR:$src1,
2250 (Ty (MulOp DPR:$Vn, DPR:$Vm)))))]>;
2252 class N3VDMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2253 string OpcodeStr, string Dt,
2254 ValueType Ty, SDPatternOperator MulOp, SDPatternOperator ShOp>
2255 : N3VLane32<0, 1, op21_20, op11_8, 1, 0,
2257 (ins DPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2259 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2261 (Ty (ShOp (Ty DPR:$src1),
2263 (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),
2265 class N3VDMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2266 string OpcodeStr, string Dt,
2267 ValueType Ty, SDNode MulOp, SDNode ShOp>
2268 : N3VLane16<0, 1, op21_20, op11_8, 1, 0,
2270 (ins DPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2272 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2274 (Ty (ShOp (Ty DPR:$src1),
2276 (Ty (NEONvduplane (Ty DPR_8:$Vm),
2279 class N3VQMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2280 InstrItinClass itin, string OpcodeStr, string Dt, ValueType Ty,
2281 SDPatternOperator MulOp, SDPatternOperator OpNode>
2282 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2283 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2284 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2285 [(set QPR:$Vd, (Ty (OpNode QPR:$src1,
2286 (Ty (MulOp QPR:$Vn, QPR:$Vm)))))]>;
2287 class N3VQMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2288 string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
2289 SDPatternOperator MulOp, SDPatternOperator ShOp>
2290 : N3VLane32<1, 1, op21_20, op11_8, 1, 0,
2292 (ins QPR:$src1, QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2294 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2295 [(set (ResTy QPR:$Vd),
2296 (ResTy (ShOp (ResTy QPR:$src1),
2297 (ResTy (MulOp QPR:$Vn,
2298 (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2300 class N3VQMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2301 string OpcodeStr, string Dt,
2302 ValueType ResTy, ValueType OpTy,
2303 SDNode MulOp, SDNode ShOp>
2304 : N3VLane16<1, 1, op21_20, op11_8, 1, 0,
2306 (ins QPR:$src1, QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2308 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2309 [(set (ResTy QPR:$Vd),
2310 (ResTy (ShOp (ResTy QPR:$src1),
2311 (ResTy (MulOp QPR:$Vn,
2312 (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
2315 // Neon Intrinsic-Op instructions (VABA): double- and quad-register.
2316 class N3VDIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2317 InstrItinClass itin, string OpcodeStr, string Dt,
2318 ValueType Ty, Intrinsic IntOp, SDNode OpNode>
2319 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2320 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2321 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2322 [(set DPR:$Vd, (Ty (OpNode DPR:$src1,
2323 (Ty (IntOp (Ty DPR:$Vn), (Ty DPR:$Vm))))))]>;
2324 class N3VQIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2325 InstrItinClass itin, string OpcodeStr, string Dt,
2326 ValueType Ty, Intrinsic IntOp, SDNode OpNode>
2327 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2328 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2329 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2330 [(set QPR:$Vd, (Ty (OpNode QPR:$src1,
2331 (Ty (IntOp (Ty QPR:$Vn), (Ty QPR:$Vm))))))]>;
2333 // Neon 3-argument intrinsics, both double- and quad-register.
2334 // The destination register is also used as the first source operand register.
2335 class N3VDInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2336 InstrItinClass itin, string OpcodeStr, string Dt,
2337 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2338 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2339 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2340 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2341 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$src1),
2342 (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>;
2343 class N3VQInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2344 InstrItinClass itin, string OpcodeStr, string Dt,
2345 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2346 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2347 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2348 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2349 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$src1),
2350 (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>;
2352 // Long Multiply-Add/Sub operations.
2353 class N3VLMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2354 InstrItinClass itin, string OpcodeStr, string Dt,
2355 ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
2356 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2357 (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2358 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2359 [(set QPR:$Vd, (OpNode (TyQ QPR:$src1),
2360 (TyQ (MulOp (TyD DPR:$Vn),
2361 (TyD DPR:$Vm)))))]>;
2362 class N3VLMulOpSL<bit op24, bits<2> op21_20, bits<4> op11_8,
2363 InstrItinClass itin, string OpcodeStr, string Dt,
2364 ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
2365 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$Vd),
2366 (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2368 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2370 (OpNode (TyQ QPR:$src1),
2371 (TyQ (MulOp (TyD DPR:$Vn),
2372 (TyD (NEONvduplane (TyD DPR_VFP2:$Vm),
2374 class N3VLMulOpSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2375 InstrItinClass itin, string OpcodeStr, string Dt,
2376 ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
2377 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$Vd),
2378 (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2380 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2382 (OpNode (TyQ QPR:$src1),
2383 (TyQ (MulOp (TyD DPR:$Vn),
2384 (TyD (NEONvduplane (TyD DPR_8:$Vm),
2387 // Long Intrinsic-Op vector operations with explicit extend (VABAL).
2388 class N3VLIntExtOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2389 InstrItinClass itin, string OpcodeStr, string Dt,
2390 ValueType TyQ, ValueType TyD, Intrinsic IntOp, SDNode ExtOp,
2392 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2393 (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2394 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2395 [(set QPR:$Vd, (OpNode (TyQ QPR:$src1),
2396 (TyQ (ExtOp (TyD (IntOp (TyD DPR:$Vn),
2397 (TyD DPR:$Vm)))))))]>;
2399 // Neon Long 3-argument intrinsic. The destination register is
2400 // a quad-register and is also used as the first source operand register.
2401 class N3VLInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2402 InstrItinClass itin, string OpcodeStr, string Dt,
2403 ValueType TyQ, ValueType TyD, Intrinsic IntOp>
2404 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2405 (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2406 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2408 (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$Vn), (TyD DPR:$Vm))))]>;
2409 class N3VLInt3SL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2410 string OpcodeStr, string Dt,
2411 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2412 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
2414 (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2416 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2417 [(set (ResTy QPR:$Vd),
2418 (ResTy (IntOp (ResTy QPR:$src1),
2420 (OpTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2422 class N3VLInt3SL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2423 InstrItinClass itin, string OpcodeStr, string Dt,
2424 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2425 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
2427 (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2429 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2430 [(set (ResTy QPR:$Vd),
2431 (ResTy (IntOp (ResTy QPR:$src1),
2433 (OpTy (NEONvduplane (OpTy DPR_8:$Vm),
2436 // Narrowing 3-register intrinsics.
2437 class N3VNInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2438 string OpcodeStr, string Dt, ValueType TyD, ValueType TyQ,
2439 Intrinsic IntOp, bit Commutable>
2440 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2441 (outs DPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINi4D,
2442 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2443 [(set DPR:$Vd, (TyD (IntOp (TyQ QPR:$Vn), (TyQ QPR:$Vm))))]> {
2444 let isCommutable = Commutable;
2447 // Long 3-register operations.
2448 class N3VL<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2449 InstrItinClass itin, string OpcodeStr, string Dt,
2450 ValueType TyQ, ValueType TyD, SDNode OpNode, bit Commutable>
2451 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2452 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2453 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2454 [(set QPR:$Vd, (TyQ (OpNode (TyD DPR:$Vn), (TyD DPR:$Vm))))]> {
2455 let isCommutable = Commutable;
2457 class N3VLSL<bit op24, bits<2> op21_20, bits<4> op11_8,
2458 InstrItinClass itin, string OpcodeStr, string Dt,
2459 ValueType TyQ, ValueType TyD, SDNode OpNode>
2460 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
2461 (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2462 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2464 (TyQ (OpNode (TyD DPR:$Vn),
2465 (TyD (NEONvduplane (TyD DPR_VFP2:$Vm),imm:$lane)))))]>;
2466 class N3VLSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2467 InstrItinClass itin, string OpcodeStr, string Dt,
2468 ValueType TyQ, ValueType TyD, SDNode OpNode>
2469 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
2470 (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2471 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2473 (TyQ (OpNode (TyD DPR:$Vn),
2474 (TyD (NEONvduplane (TyD DPR_8:$Vm), imm:$lane)))))]>;
2476 // Long 3-register operations with explicitly extended operands.
2477 class N3VLExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2478 InstrItinClass itin, string OpcodeStr, string Dt,
2479 ValueType TyQ, ValueType TyD, SDNode OpNode, SDNode ExtOp,
2481 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2482 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2483 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2484 [(set QPR:$Vd, (OpNode (TyQ (ExtOp (TyD DPR:$Vn))),
2485 (TyQ (ExtOp (TyD DPR:$Vm)))))]> {
2486 let isCommutable = Commutable;
2489 // Long 3-register intrinsics with explicit extend (VABDL).
2490 class N3VLIntExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2491 InstrItinClass itin, string OpcodeStr, string Dt,
2492 ValueType TyQ, ValueType TyD, Intrinsic IntOp, SDNode ExtOp,
2494 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2495 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2496 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2497 [(set QPR:$Vd, (TyQ (ExtOp (TyD (IntOp (TyD DPR:$Vn),
2498 (TyD DPR:$Vm))))))]> {
2499 let isCommutable = Commutable;
2502 // Long 3-register intrinsics.
2503 class N3VLInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2504 InstrItinClass itin, string OpcodeStr, string Dt,
2505 ValueType TyQ, ValueType TyD, Intrinsic IntOp, bit Commutable>
2506 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2507 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2508 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2509 [(set QPR:$Vd, (TyQ (IntOp (TyD DPR:$Vn), (TyD DPR:$Vm))))]> {
2510 let isCommutable = Commutable;
2512 class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2513 string OpcodeStr, string Dt,
2514 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2515 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
2516 (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2517 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2518 [(set (ResTy QPR:$Vd),
2519 (ResTy (IntOp (OpTy DPR:$Vn),
2520 (OpTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2522 class N3VLIntSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2523 InstrItinClass itin, string OpcodeStr, string Dt,
2524 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2525 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
2526 (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2527 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2528 [(set (ResTy QPR:$Vd),
2529 (ResTy (IntOp (OpTy DPR:$Vn),
2530 (OpTy (NEONvduplane (OpTy DPR_8:$Vm),
2533 // Wide 3-register operations.
2534 class N3VW<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2535 string OpcodeStr, string Dt, ValueType TyQ, ValueType TyD,
2536 SDNode OpNode, SDNode ExtOp, bit Commutable>
2537 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2538 (outs QPR:$Vd), (ins QPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VSUBiD,
2539 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2540 [(set QPR:$Vd, (OpNode (TyQ QPR:$Vn),
2541 (TyQ (ExtOp (TyD DPR:$Vm)))))]> {
2542 let isCommutable = Commutable;
2545 // Pairwise long 2-register intrinsics, both double- and quad-register.
2546 class N2VDPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2547 bits<2> op17_16, bits<5> op11_7, bit op4,
2548 string OpcodeStr, string Dt,
2549 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2550 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
2551 (ins DPR:$Vm), IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
2552 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
2553 class N2VQPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2554 bits<2> op17_16, bits<5> op11_7, bit op4,
2555 string OpcodeStr, string Dt,
2556 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2557 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
2558 (ins QPR:$Vm), IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
2559 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
2561 // Pairwise long 2-register accumulate intrinsics,
2562 // both double- and quad-register.
2563 // The destination register is also used as the first source operand register.
2564 class N2VDPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2565 bits<2> op17_16, bits<5> op11_7, bit op4,
2566 string OpcodeStr, string Dt,
2567 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2568 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
2569 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vm), IIC_VPALiD,
2570 OpcodeStr, Dt, "$Vd, $Vm", "$src1 = $Vd",
2571 [(set DPR:$Vd, (ResTy (IntOp (ResTy DPR:$src1), (OpTy DPR:$Vm))))]>;
2572 class N2VQPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2573 bits<2> op17_16, bits<5> op11_7, bit op4,
2574 string OpcodeStr, string Dt,
2575 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2576 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4,
2577 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vm), IIC_VPALiQ,
2578 OpcodeStr, Dt, "$Vd, $Vm", "$src1 = $Vd",
2579 [(set QPR:$Vd, (ResTy (IntOp (ResTy QPR:$src1), (OpTy QPR:$Vm))))]>;
2581 // Shift by immediate,
2582 // both double- and quad-register.
2583 class N2VDSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2584 Format f, InstrItinClass itin, Operand ImmTy,
2585 string OpcodeStr, string Dt, ValueType Ty, SDNode OpNode>
2586 : N2VImm<op24, op23, op11_8, op7, 0, op4,
2587 (outs DPR:$Vd), (ins DPR:$Vm, ImmTy:$SIMM), f, itin,
2588 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2589 [(set DPR:$Vd, (Ty (OpNode (Ty DPR:$Vm), (i32 imm:$SIMM))))]>;
2590 class N2VQSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2591 Format f, InstrItinClass itin, Operand ImmTy,
2592 string OpcodeStr, string Dt, ValueType Ty, SDNode OpNode>
2593 : N2VImm<op24, op23, op11_8, op7, 1, op4,
2594 (outs QPR:$Vd), (ins QPR:$Vm, ImmTy:$SIMM), f, itin,
2595 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2596 [(set QPR:$Vd, (Ty (OpNode (Ty QPR:$Vm), (i32 imm:$SIMM))))]>;
2598 // Long shift by immediate.
2599 class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
2600 string OpcodeStr, string Dt,
2601 ValueType ResTy, ValueType OpTy, SDNode OpNode>
2602 : N2VImm<op24, op23, op11_8, op7, op6, op4,
2603 (outs QPR:$Vd), (ins DPR:$Vm, i32imm:$SIMM), N2RegVShLFrm,
2604 IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2605 [(set QPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm),
2606 (i32 imm:$SIMM))))]>;
2608 // Narrow shift by immediate.
2609 class N2VNSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
2610 InstrItinClass itin, string OpcodeStr, string Dt,
2611 ValueType ResTy, ValueType OpTy, Operand ImmTy, SDNode OpNode>
2612 : N2VImm<op24, op23, op11_8, op7, op6, op4,
2613 (outs DPR:$Vd), (ins QPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, itin,
2614 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2615 [(set DPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vm),
2616 (i32 imm:$SIMM))))]>;
2618 // Shift right by immediate and accumulate,
2619 // both double- and quad-register.
2620 class N2VDShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2621 Operand ImmTy, string OpcodeStr, string Dt,
2622 ValueType Ty, SDNode ShOp>
2623 : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$Vd),
2624 (ins DPR:$src1, DPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, IIC_VPALiD,
2625 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2626 [(set DPR:$Vd, (Ty (add DPR:$src1,
2627 (Ty (ShOp DPR:$Vm, (i32 imm:$SIMM))))))]>;
2628 class N2VQShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2629 Operand ImmTy, string OpcodeStr, string Dt,
2630 ValueType Ty, SDNode ShOp>
2631 : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$Vd),
2632 (ins QPR:$src1, QPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, IIC_VPALiD,
2633 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2634 [(set QPR:$Vd, (Ty (add QPR:$src1,
2635 (Ty (ShOp QPR:$Vm, (i32 imm:$SIMM))))))]>;
2637 // Shift by immediate and insert,
2638 // both double- and quad-register.
2639 class N2VDShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2640 Operand ImmTy, Format f, string OpcodeStr, string Dt,
2641 ValueType Ty,SDNode ShOp>
2642 : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$Vd),
2643 (ins DPR:$src1, DPR:$Vm, ImmTy:$SIMM), f, IIC_VSHLiD,
2644 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2645 [(set DPR:$Vd, (Ty (ShOp DPR:$src1, DPR:$Vm, (i32 imm:$SIMM))))]>;
2646 class N2VQShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2647 Operand ImmTy, Format f, string OpcodeStr, string Dt,
2648 ValueType Ty,SDNode ShOp>
2649 : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$Vd),
2650 (ins QPR:$src1, QPR:$Vm, ImmTy:$SIMM), f, IIC_VSHLiQ,
2651 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2652 [(set QPR:$Vd, (Ty (ShOp QPR:$src1, QPR:$Vm, (i32 imm:$SIMM))))]>;
2654 // Convert, with fractional bits immediate,
2655 // both double- and quad-register.
2656 class N2VCvtD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2657 string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
2659 : N2VImm<op24, op23, op11_8, op7, 0, op4,
2660 (outs DPR:$Vd), (ins DPR:$Vm, neon_vcvt_imm32:$SIMM), NVCVTFrm,
2661 IIC_VUNAD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2662 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm), (i32 imm:$SIMM))))]>;
2663 class N2VCvtQ<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2664 string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
2666 : N2VImm<op24, op23, op11_8, op7, 1, op4,
2667 (outs QPR:$Vd), (ins QPR:$Vm, neon_vcvt_imm32:$SIMM), NVCVTFrm,
2668 IIC_VUNAQ, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2669 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm), (i32 imm:$SIMM))))]>;
2671 //===----------------------------------------------------------------------===//
2673 //===----------------------------------------------------------------------===//
2675 // Abbreviations used in multiclass suffixes:
2676 // Q = quarter int (8 bit) elements
2677 // H = half int (16 bit) elements
2678 // S = single int (32 bit) elements
2679 // D = double int (64 bit) elements
2681 // Neon 2-register vector operations and intrinsics.
2683 // Neon 2-register comparisons.
2684 // source operand element sizes of 8, 16 and 32 bits:
2685 multiclass N2V_QHS_cmp<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2686 bits<5> op11_7, bit op4, string opc, string Dt,
2687 string asm, SDNode OpNode> {
2688 // 64-bit vector types.
2689 def v8i8 : N2V<op24_23, op21_20, 0b00, op17_16, op11_7, 0, op4,
2690 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2691 opc, !strconcat(Dt, "8"), asm, "",
2692 [(set DPR:$Vd, (v8i8 (OpNode (v8i8 DPR:$Vm))))]>;
2693 def v4i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 0, op4,
2694 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2695 opc, !strconcat(Dt, "16"), asm, "",
2696 [(set DPR:$Vd, (v4i16 (OpNode (v4i16 DPR:$Vm))))]>;
2697 def v2i32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 0, op4,
2698 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2699 opc, !strconcat(Dt, "32"), asm, "",
2700 [(set DPR:$Vd, (v2i32 (OpNode (v2i32 DPR:$Vm))))]>;
2701 def v2f32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 0, op4,
2702 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2703 opc, "f32", asm, "",
2704 [(set DPR:$Vd, (v2i32 (OpNode (v2f32 DPR:$Vm))))]> {
2705 let Inst{10} = 1; // overwrite F = 1
2708 // 128-bit vector types.
2709 def v16i8 : N2V<op24_23, op21_20, 0b00, op17_16, op11_7, 1, op4,
2710 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2711 opc, !strconcat(Dt, "8"), asm, "",
2712 [(set QPR:$Vd, (v16i8 (OpNode (v16i8 QPR:$Vm))))]>;
2713 def v8i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 1, op4,
2714 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2715 opc, !strconcat(Dt, "16"), asm, "",
2716 [(set QPR:$Vd, (v8i16 (OpNode (v8i16 QPR:$Vm))))]>;
2717 def v4i32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 1, op4,
2718 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2719 opc, !strconcat(Dt, "32"), asm, "",
2720 [(set QPR:$Vd, (v4i32 (OpNode (v4i32 QPR:$Vm))))]>;
2721 def v4f32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 1, op4,
2722 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2723 opc, "f32", asm, "",
2724 [(set QPR:$Vd, (v4i32 (OpNode (v4f32 QPR:$Vm))))]> {
2725 let Inst{10} = 1; // overwrite F = 1
2730 // Neon 2-register vector intrinsics,
2731 // element sizes of 8, 16 and 32 bits:
2732 multiclass N2VInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2733 bits<5> op11_7, bit op4,
2734 InstrItinClass itinD, InstrItinClass itinQ,
2735 string OpcodeStr, string Dt, Intrinsic IntOp> {
2736 // 64-bit vector types.
2737 def v8i8 : N2VDInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
2738 itinD, OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
2739 def v4i16 : N2VDInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
2740 itinD, OpcodeStr, !strconcat(Dt, "16"),v4i16,v4i16,IntOp>;
2741 def v2i32 : N2VDInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
2742 itinD, OpcodeStr, !strconcat(Dt, "32"),v2i32,v2i32,IntOp>;
2744 // 128-bit vector types.
2745 def v16i8 : N2VQInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
2746 itinQ, OpcodeStr, !strconcat(Dt, "8"), v16i8,v16i8,IntOp>;
2747 def v8i16 : N2VQInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
2748 itinQ, OpcodeStr, !strconcat(Dt, "16"),v8i16,v8i16,IntOp>;
2749 def v4i32 : N2VQInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
2750 itinQ, OpcodeStr, !strconcat(Dt, "32"),v4i32,v4i32,IntOp>;
2754 // Neon Narrowing 2-register vector operations,
2755 // source operand element sizes of 16, 32 and 64 bits:
2756 multiclass N2VN_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2757 bits<5> op11_7, bit op6, bit op4,
2758 InstrItinClass itin, string OpcodeStr, string Dt,
2760 def v8i8 : N2VN<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
2761 itin, OpcodeStr, !strconcat(Dt, "16"),
2762 v8i8, v8i16, OpNode>;
2763 def v4i16 : N2VN<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
2764 itin, OpcodeStr, !strconcat(Dt, "32"),
2765 v4i16, v4i32, OpNode>;
2766 def v2i32 : N2VN<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
2767 itin, OpcodeStr, !strconcat(Dt, "64"),
2768 v2i32, v2i64, OpNode>;
2771 // Neon Narrowing 2-register vector intrinsics,
2772 // source operand element sizes of 16, 32 and 64 bits:
2773 multiclass N2VNInt_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2774 bits<5> op11_7, bit op6, bit op4,
2775 InstrItinClass itin, string OpcodeStr, string Dt,
2777 def v8i8 : N2VNInt<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
2778 itin, OpcodeStr, !strconcat(Dt, "16"),
2779 v8i8, v8i16, IntOp>;
2780 def v4i16 : N2VNInt<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
2781 itin, OpcodeStr, !strconcat(Dt, "32"),
2782 v4i16, v4i32, IntOp>;
2783 def v2i32 : N2VNInt<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
2784 itin, OpcodeStr, !strconcat(Dt, "64"),
2785 v2i32, v2i64, IntOp>;
2789 // Neon Lengthening 2-register vector intrinsic (currently specific to VMOVL).
2790 // source operand element sizes of 16, 32 and 64 bits:
2791 multiclass N2VL_QHS<bits<2> op24_23, bits<5> op11_7, bit op6, bit op4,
2792 string OpcodeStr, string Dt, SDNode OpNode> {
2793 def v8i16 : N2VL<op24_23, 0b00, 0b10, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
2794 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, OpNode>;
2795 def v4i32 : N2VL<op24_23, 0b01, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
2796 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, OpNode>;
2797 def v2i64 : N2VL<op24_23, 0b10, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
2798 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, OpNode>;
2802 // Neon 3-register vector operations.
2804 // First with only element sizes of 8, 16 and 32 bits:
2805 multiclass N3V_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
2806 InstrItinClass itinD16, InstrItinClass itinD32,
2807 InstrItinClass itinQ16, InstrItinClass itinQ32,
2808 string OpcodeStr, string Dt,
2809 SDNode OpNode, bit Commutable = 0> {
2810 // 64-bit vector types.
2811 def v8i8 : N3VD<op24, op23, 0b00, op11_8, op4, itinD16,
2812 OpcodeStr, !strconcat(Dt, "8"),
2813 v8i8, v8i8, OpNode, Commutable>;
2814 def v4i16 : N3VD<op24, op23, 0b01, op11_8, op4, itinD16,
2815 OpcodeStr, !strconcat(Dt, "16"),
2816 v4i16, v4i16, OpNode, Commutable>;
2817 def v2i32 : N3VD<op24, op23, 0b10, op11_8, op4, itinD32,
2818 OpcodeStr, !strconcat(Dt, "32"),
2819 v2i32, v2i32, OpNode, Commutable>;
2821 // 128-bit vector types.
2822 def v16i8 : N3VQ<op24, op23, 0b00, op11_8, op4, itinQ16,
2823 OpcodeStr, !strconcat(Dt, "8"),
2824 v16i8, v16i8, OpNode, Commutable>;
2825 def v8i16 : N3VQ<op24, op23, 0b01, op11_8, op4, itinQ16,
2826 OpcodeStr, !strconcat(Dt, "16"),
2827 v8i16, v8i16, OpNode, Commutable>;
2828 def v4i32 : N3VQ<op24, op23, 0b10, op11_8, op4, itinQ32,
2829 OpcodeStr, !strconcat(Dt, "32"),
2830 v4i32, v4i32, OpNode, Commutable>;
2833 multiclass N3VSL_HS<bits<4> op11_8, string OpcodeStr, string Dt, SDNode ShOp> {
2834 def v4i16 : N3VDSL16<0b01, op11_8, OpcodeStr, !strconcat(Dt, "16"),
2836 def v2i32 : N3VDSL<0b10, op11_8, IIC_VMULi32D, OpcodeStr, !strconcat(Dt,"32"),
2838 def v8i16 : N3VQSL16<0b01, op11_8, OpcodeStr, !strconcat(Dt, "16"),
2839 v8i16, v4i16, ShOp>;
2840 def v4i32 : N3VQSL<0b10, op11_8, IIC_VMULi32Q, OpcodeStr, !strconcat(Dt,"32"),
2841 v4i32, v2i32, ShOp>;
2844 // ....then also with element size 64 bits:
2845 multiclass N3V_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
2846 InstrItinClass itinD, InstrItinClass itinQ,
2847 string OpcodeStr, string Dt,
2848 SDNode OpNode, bit Commutable = 0>
2849 : N3V_QHS<op24, op23, op11_8, op4, itinD, itinD, itinQ, itinQ,
2850 OpcodeStr, Dt, OpNode, Commutable> {
2851 def v1i64 : N3VD<op24, op23, 0b11, op11_8, op4, itinD,
2852 OpcodeStr, !strconcat(Dt, "64"),
2853 v1i64, v1i64, OpNode, Commutable>;
2854 def v2i64 : N3VQ<op24, op23, 0b11, op11_8, op4, itinQ,
2855 OpcodeStr, !strconcat(Dt, "64"),
2856 v2i64, v2i64, OpNode, Commutable>;
2860 // Neon 3-register vector intrinsics.
2862 // First with only element sizes of 16 and 32 bits:
2863 multiclass N3VInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2864 InstrItinClass itinD16, InstrItinClass itinD32,
2865 InstrItinClass itinQ16, InstrItinClass itinQ32,
2866 string OpcodeStr, string Dt,
2867 Intrinsic IntOp, bit Commutable = 0> {
2868 // 64-bit vector types.
2869 def v4i16 : N3VDInt<op24, op23, 0b01, op11_8, op4, f, itinD16,
2870 OpcodeStr, !strconcat(Dt, "16"),
2871 v4i16, v4i16, IntOp, Commutable>;
2872 def v2i32 : N3VDInt<op24, op23, 0b10, op11_8, op4, f, itinD32,
2873 OpcodeStr, !strconcat(Dt, "32"),
2874 v2i32, v2i32, IntOp, Commutable>;
2876 // 128-bit vector types.
2877 def v8i16 : N3VQInt<op24, op23, 0b01, op11_8, op4, f, itinQ16,
2878 OpcodeStr, !strconcat(Dt, "16"),
2879 v8i16, v8i16, IntOp, Commutable>;
2880 def v4i32 : N3VQInt<op24, op23, 0b10, op11_8, op4, f, itinQ32,
2881 OpcodeStr, !strconcat(Dt, "32"),
2882 v4i32, v4i32, IntOp, Commutable>;
2884 multiclass N3VInt_HSSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2885 InstrItinClass itinD16, InstrItinClass itinD32,
2886 InstrItinClass itinQ16, InstrItinClass itinQ32,
2887 string OpcodeStr, string Dt,
2889 // 64-bit vector types.
2890 def v4i16 : N3VDIntSh<op24, op23, 0b01, op11_8, op4, f, itinD16,
2891 OpcodeStr, !strconcat(Dt, "16"),
2892 v4i16, v4i16, IntOp>;
2893 def v2i32 : N3VDIntSh<op24, op23, 0b10, op11_8, op4, f, itinD32,
2894 OpcodeStr, !strconcat(Dt, "32"),
2895 v2i32, v2i32, IntOp>;
2897 // 128-bit vector types.
2898 def v8i16 : N3VQIntSh<op24, op23, 0b01, op11_8, op4, f, itinQ16,
2899 OpcodeStr, !strconcat(Dt, "16"),
2900 v8i16, v8i16, IntOp>;
2901 def v4i32 : N3VQIntSh<op24, op23, 0b10, op11_8, op4, f, itinQ32,
2902 OpcodeStr, !strconcat(Dt, "32"),
2903 v4i32, v4i32, IntOp>;
2906 multiclass N3VIntSL_HS<bits<4> op11_8,
2907 InstrItinClass itinD16, InstrItinClass itinD32,
2908 InstrItinClass itinQ16, InstrItinClass itinQ32,
2909 string OpcodeStr, string Dt, Intrinsic IntOp> {
2910 def v4i16 : N3VDIntSL16<0b01, op11_8, itinD16,
2911 OpcodeStr, !strconcat(Dt, "16"), v4i16, IntOp>;
2912 def v2i32 : N3VDIntSL<0b10, op11_8, itinD32,
2913 OpcodeStr, !strconcat(Dt, "32"), v2i32, IntOp>;
2914 def v8i16 : N3VQIntSL16<0b01, op11_8, itinQ16,
2915 OpcodeStr, !strconcat(Dt, "16"), v8i16, v4i16, IntOp>;
2916 def v4i32 : N3VQIntSL<0b10, op11_8, itinQ32,
2917 OpcodeStr, !strconcat(Dt, "32"), v4i32, v2i32, IntOp>;
2920 // ....then also with element size of 8 bits:
2921 multiclass N3VInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2922 InstrItinClass itinD16, InstrItinClass itinD32,
2923 InstrItinClass itinQ16, InstrItinClass itinQ32,
2924 string OpcodeStr, string Dt,
2925 Intrinsic IntOp, bit Commutable = 0>
2926 : N3VInt_HS<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2927 OpcodeStr, Dt, IntOp, Commutable> {
2928 def v8i8 : N3VDInt<op24, op23, 0b00, op11_8, op4, f, itinD16,
2929 OpcodeStr, !strconcat(Dt, "8"),
2930 v8i8, v8i8, IntOp, Commutable>;
2931 def v16i8 : N3VQInt<op24, op23, 0b00, op11_8, op4, f, itinQ16,
2932 OpcodeStr, !strconcat(Dt, "8"),
2933 v16i8, v16i8, IntOp, Commutable>;
2935 multiclass N3VInt_QHSSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2936 InstrItinClass itinD16, InstrItinClass itinD32,
2937 InstrItinClass itinQ16, InstrItinClass itinQ32,
2938 string OpcodeStr, string Dt,
2940 : N3VInt_HSSh<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2941 OpcodeStr, Dt, IntOp> {
2942 def v8i8 : N3VDIntSh<op24, op23, 0b00, op11_8, op4, f, itinD16,
2943 OpcodeStr, !strconcat(Dt, "8"),
2945 def v16i8 : N3VQIntSh<op24, op23, 0b00, op11_8, op4, f, itinQ16,
2946 OpcodeStr, !strconcat(Dt, "8"),
2947 v16i8, v16i8, IntOp>;
2951 // ....then also with element size of 64 bits:
2952 multiclass N3VInt_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2953 InstrItinClass itinD16, InstrItinClass itinD32,
2954 InstrItinClass itinQ16, InstrItinClass itinQ32,
2955 string OpcodeStr, string Dt,
2956 Intrinsic IntOp, bit Commutable = 0>
2957 : N3VInt_QHS<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2958 OpcodeStr, Dt, IntOp, Commutable> {
2959 def v1i64 : N3VDInt<op24, op23, 0b11, op11_8, op4, f, itinD32,
2960 OpcodeStr, !strconcat(Dt, "64"),
2961 v1i64, v1i64, IntOp, Commutable>;
2962 def v2i64 : N3VQInt<op24, op23, 0b11, op11_8, op4, f, itinQ32,
2963 OpcodeStr, !strconcat(Dt, "64"),
2964 v2i64, v2i64, IntOp, Commutable>;
2966 multiclass N3VInt_QHSDSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2967 InstrItinClass itinD16, InstrItinClass itinD32,
2968 InstrItinClass itinQ16, InstrItinClass itinQ32,
2969 string OpcodeStr, string Dt,
2971 : N3VInt_QHSSh<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2972 OpcodeStr, Dt, IntOp> {
2973 def v1i64 : N3VDIntSh<op24, op23, 0b11, op11_8, op4, f, itinD32,
2974 OpcodeStr, !strconcat(Dt, "64"),
2975 v1i64, v1i64, IntOp>;
2976 def v2i64 : N3VQIntSh<op24, op23, 0b11, op11_8, op4, f, itinQ32,
2977 OpcodeStr, !strconcat(Dt, "64"),
2978 v2i64, v2i64, IntOp>;
2981 // Neon Narrowing 3-register vector intrinsics,
2982 // source operand element sizes of 16, 32 and 64 bits:
2983 multiclass N3VNInt_HSD<bit op24, bit op23, bits<4> op11_8, bit op4,
2984 string OpcodeStr, string Dt,
2985 Intrinsic IntOp, bit Commutable = 0> {
2986 def v8i8 : N3VNInt<op24, op23, 0b00, op11_8, op4,
2987 OpcodeStr, !strconcat(Dt, "16"),
2988 v8i8, v8i16, IntOp, Commutable>;
2989 def v4i16 : N3VNInt<op24, op23, 0b01, op11_8, op4,
2990 OpcodeStr, !strconcat(Dt, "32"),
2991 v4i16, v4i32, IntOp, Commutable>;
2992 def v2i32 : N3VNInt<op24, op23, 0b10, op11_8, op4,
2993 OpcodeStr, !strconcat(Dt, "64"),
2994 v2i32, v2i64, IntOp, Commutable>;
2998 // Neon Long 3-register vector operations.
3000 multiclass N3VL_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3001 InstrItinClass itin16, InstrItinClass itin32,
3002 string OpcodeStr, string Dt,
3003 SDNode OpNode, bit Commutable = 0> {
3004 def v8i16 : N3VL<op24, op23, 0b00, op11_8, op4, itin16,
3005 OpcodeStr, !strconcat(Dt, "8"),
3006 v8i16, v8i8, OpNode, Commutable>;
3007 def v4i32 : N3VL<op24, op23, 0b01, op11_8, op4, itin16,
3008 OpcodeStr, !strconcat(Dt, "16"),
3009 v4i32, v4i16, OpNode, Commutable>;
3010 def v2i64 : N3VL<op24, op23, 0b10, op11_8, op4, itin32,
3011 OpcodeStr, !strconcat(Dt, "32"),
3012 v2i64, v2i32, OpNode, Commutable>;
3015 multiclass N3VLSL_HS<bit op24, bits<4> op11_8,
3016 InstrItinClass itin, string OpcodeStr, string Dt,
3018 def v4i16 : N3VLSL16<op24, 0b01, op11_8, itin, OpcodeStr,
3019 !strconcat(Dt, "16"), v4i32, v4i16, OpNode>;
3020 def v2i32 : N3VLSL<op24, 0b10, op11_8, itin, OpcodeStr,
3021 !strconcat(Dt, "32"), v2i64, v2i32, OpNode>;
3024 multiclass N3VLExt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3025 InstrItinClass itin16, InstrItinClass itin32,
3026 string OpcodeStr, string Dt,
3027 SDNode OpNode, SDNode ExtOp, bit Commutable = 0> {
3028 def v8i16 : N3VLExt<op24, op23, 0b00, op11_8, op4, itin16,
3029 OpcodeStr, !strconcat(Dt, "8"),
3030 v8i16, v8i8, OpNode, ExtOp, Commutable>;
3031 def v4i32 : N3VLExt<op24, op23, 0b01, op11_8, op4, itin16,
3032 OpcodeStr, !strconcat(Dt, "16"),
3033 v4i32, v4i16, OpNode, ExtOp, Commutable>;
3034 def v2i64 : N3VLExt<op24, op23, 0b10, op11_8, op4, itin32,
3035 OpcodeStr, !strconcat(Dt, "32"),
3036 v2i64, v2i32, OpNode, ExtOp, Commutable>;
3039 // Neon Long 3-register vector intrinsics.
3041 // First with only element sizes of 16 and 32 bits:
3042 multiclass N3VLInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
3043 InstrItinClass itin16, InstrItinClass itin32,
3044 string OpcodeStr, string Dt,
3045 Intrinsic IntOp, bit Commutable = 0> {
3046 def v4i32 : N3VLInt<op24, op23, 0b01, op11_8, op4, itin16,
3047 OpcodeStr, !strconcat(Dt, "16"),
3048 v4i32, v4i16, IntOp, Commutable>;
3049 def v2i64 : N3VLInt<op24, op23, 0b10, op11_8, op4, itin32,
3050 OpcodeStr, !strconcat(Dt, "32"),
3051 v2i64, v2i32, IntOp, Commutable>;
3054 multiclass N3VLIntSL_HS<bit op24, bits<4> op11_8,
3055 InstrItinClass itin, string OpcodeStr, string Dt,
3057 def v4i16 : N3VLIntSL16<op24, 0b01, op11_8, itin,
3058 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
3059 def v2i32 : N3VLIntSL<op24, 0b10, op11_8, itin,
3060 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
3063 // ....then also with element size of 8 bits:
3064 multiclass N3VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3065 InstrItinClass itin16, InstrItinClass itin32,
3066 string OpcodeStr, string Dt,
3067 Intrinsic IntOp, bit Commutable = 0>
3068 : N3VLInt_HS<op24, op23, op11_8, op4, itin16, itin32, OpcodeStr, Dt,
3069 IntOp, Commutable> {
3070 def v8i16 : N3VLInt<op24, op23, 0b00, op11_8, op4, itin16,
3071 OpcodeStr, !strconcat(Dt, "8"),
3072 v8i16, v8i8, IntOp, Commutable>;
3075 // ....with explicit extend (VABDL).
3076 multiclass N3VLIntExt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3077 InstrItinClass itin, string OpcodeStr, string Dt,
3078 Intrinsic IntOp, SDNode ExtOp, bit Commutable = 0> {
3079 def v8i16 : N3VLIntExt<op24, op23, 0b00, op11_8, op4, itin,
3080 OpcodeStr, !strconcat(Dt, "8"),
3081 v8i16, v8i8, IntOp, ExtOp, Commutable>;
3082 def v4i32 : N3VLIntExt<op24, op23, 0b01, op11_8, op4, itin,
3083 OpcodeStr, !strconcat(Dt, "16"),
3084 v4i32, v4i16, IntOp, ExtOp, Commutable>;
3085 def v2i64 : N3VLIntExt<op24, op23, 0b10, op11_8, op4, itin,
3086 OpcodeStr, !strconcat(Dt, "32"),
3087 v2i64, v2i32, IntOp, ExtOp, Commutable>;
3091 // Neon Wide 3-register vector intrinsics,
3092 // source operand element sizes of 8, 16 and 32 bits:
3093 multiclass N3VW_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3094 string OpcodeStr, string Dt,
3095 SDNode OpNode, SDNode ExtOp, bit Commutable = 0> {
3096 def v8i16 : N3VW<op24, op23, 0b00, op11_8, op4,
3097 OpcodeStr, !strconcat(Dt, "8"),
3098 v8i16, v8i8, OpNode, ExtOp, Commutable>;
3099 def v4i32 : N3VW<op24, op23, 0b01, op11_8, op4,
3100 OpcodeStr, !strconcat(Dt, "16"),
3101 v4i32, v4i16, OpNode, ExtOp, Commutable>;
3102 def v2i64 : N3VW<op24, op23, 0b10, op11_8, op4,
3103 OpcodeStr, !strconcat(Dt, "32"),
3104 v2i64, v2i32, OpNode, ExtOp, Commutable>;
3108 // Neon Multiply-Op vector operations,
3109 // element sizes of 8, 16 and 32 bits:
3110 multiclass N3VMulOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3111 InstrItinClass itinD16, InstrItinClass itinD32,
3112 InstrItinClass itinQ16, InstrItinClass itinQ32,
3113 string OpcodeStr, string Dt, SDNode OpNode> {
3114 // 64-bit vector types.
3115 def v8i8 : N3VDMulOp<op24, op23, 0b00, op11_8, op4, itinD16,
3116 OpcodeStr, !strconcat(Dt, "8"), v8i8, mul, OpNode>;
3117 def v4i16 : N3VDMulOp<op24, op23, 0b01, op11_8, op4, itinD16,
3118 OpcodeStr, !strconcat(Dt, "16"), v4i16, mul, OpNode>;
3119 def v2i32 : N3VDMulOp<op24, op23, 0b10, op11_8, op4, itinD32,
3120 OpcodeStr, !strconcat(Dt, "32"), v2i32, mul, OpNode>;
3122 // 128-bit vector types.
3123 def v16i8 : N3VQMulOp<op24, op23, 0b00, op11_8, op4, itinQ16,
3124 OpcodeStr, !strconcat(Dt, "8"), v16i8, mul, OpNode>;
3125 def v8i16 : N3VQMulOp<op24, op23, 0b01, op11_8, op4, itinQ16,
3126 OpcodeStr, !strconcat(Dt, "16"), v8i16, mul, OpNode>;
3127 def v4i32 : N3VQMulOp<op24, op23, 0b10, op11_8, op4, itinQ32,
3128 OpcodeStr, !strconcat(Dt, "32"), v4i32, mul, OpNode>;
3131 multiclass N3VMulOpSL_HS<bits<4> op11_8,
3132 InstrItinClass itinD16, InstrItinClass itinD32,
3133 InstrItinClass itinQ16, InstrItinClass itinQ32,
3134 string OpcodeStr, string Dt, SDNode ShOp> {
3135 def v4i16 : N3VDMulOpSL16<0b01, op11_8, itinD16,
3136 OpcodeStr, !strconcat(Dt, "16"), v4i16, mul, ShOp>;
3137 def v2i32 : N3VDMulOpSL<0b10, op11_8, itinD32,
3138 OpcodeStr, !strconcat(Dt, "32"), v2i32, mul, ShOp>;
3139 def v8i16 : N3VQMulOpSL16<0b01, op11_8, itinQ16,
3140 OpcodeStr, !strconcat(Dt, "16"), v8i16, v4i16,
3142 def v4i32 : N3VQMulOpSL<0b10, op11_8, itinQ32,
3143 OpcodeStr, !strconcat(Dt, "32"), v4i32, v2i32,
3147 // Neon Intrinsic-Op vector operations,
3148 // element sizes of 8, 16 and 32 bits:
3149 multiclass N3VIntOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3150 InstrItinClass itinD, InstrItinClass itinQ,
3151 string OpcodeStr, string Dt, Intrinsic IntOp,
3153 // 64-bit vector types.
3154 def v8i8 : N3VDIntOp<op24, op23, 0b00, op11_8, op4, itinD,
3155 OpcodeStr, !strconcat(Dt, "8"), v8i8, IntOp, OpNode>;
3156 def v4i16 : N3VDIntOp<op24, op23, 0b01, op11_8, op4, itinD,
3157 OpcodeStr, !strconcat(Dt, "16"), v4i16, IntOp, OpNode>;
3158 def v2i32 : N3VDIntOp<op24, op23, 0b10, op11_8, op4, itinD,
3159 OpcodeStr, !strconcat(Dt, "32"), v2i32, IntOp, OpNode>;
3161 // 128-bit vector types.
3162 def v16i8 : N3VQIntOp<op24, op23, 0b00, op11_8, op4, itinQ,
3163 OpcodeStr, !strconcat(Dt, "8"), v16i8, IntOp, OpNode>;
3164 def v8i16 : N3VQIntOp<op24, op23, 0b01, op11_8, op4, itinQ,
3165 OpcodeStr, !strconcat(Dt, "16"), v8i16, IntOp, OpNode>;
3166 def v4i32 : N3VQIntOp<op24, op23, 0b10, op11_8, op4, itinQ,
3167 OpcodeStr, !strconcat(Dt, "32"), v4i32, IntOp, OpNode>;
3170 // Neon 3-argument intrinsics,
3171 // element sizes of 8, 16 and 32 bits:
3172 multiclass N3VInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3173 InstrItinClass itinD, InstrItinClass itinQ,
3174 string OpcodeStr, string Dt, Intrinsic IntOp> {
3175 // 64-bit vector types.
3176 def v8i8 : N3VDInt3<op24, op23, 0b00, op11_8, op4, itinD,
3177 OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
3178 def v4i16 : N3VDInt3<op24, op23, 0b01, op11_8, op4, itinD,
3179 OpcodeStr, !strconcat(Dt, "16"), v4i16, v4i16, IntOp>;
3180 def v2i32 : N3VDInt3<op24, op23, 0b10, op11_8, op4, itinD,
3181 OpcodeStr, !strconcat(Dt, "32"), v2i32, v2i32, IntOp>;
3183 // 128-bit vector types.
3184 def v16i8 : N3VQInt3<op24, op23, 0b00, op11_8, op4, itinQ,
3185 OpcodeStr, !strconcat(Dt, "8"), v16i8, v16i8, IntOp>;
3186 def v8i16 : N3VQInt3<op24, op23, 0b01, op11_8, op4, itinQ,
3187 OpcodeStr, !strconcat(Dt, "16"), v8i16, v8i16, IntOp>;
3188 def v4i32 : N3VQInt3<op24, op23, 0b10, op11_8, op4, itinQ,
3189 OpcodeStr, !strconcat(Dt, "32"), v4i32, v4i32, IntOp>;
3193 // Neon Long Multiply-Op vector operations,
3194 // element sizes of 8, 16 and 32 bits:
3195 multiclass N3VLMulOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3196 InstrItinClass itin16, InstrItinClass itin32,
3197 string OpcodeStr, string Dt, SDNode MulOp,
3199 def v8i16 : N3VLMulOp<op24, op23, 0b00, op11_8, op4, itin16, OpcodeStr,
3200 !strconcat(Dt, "8"), v8i16, v8i8, MulOp, OpNode>;
3201 def v4i32 : N3VLMulOp<op24, op23, 0b01, op11_8, op4, itin16, OpcodeStr,
3202 !strconcat(Dt, "16"), v4i32, v4i16, MulOp, OpNode>;
3203 def v2i64 : N3VLMulOp<op24, op23, 0b10, op11_8, op4, itin32, OpcodeStr,
3204 !strconcat(Dt, "32"), v2i64, v2i32, MulOp, OpNode>;
3207 multiclass N3VLMulOpSL_HS<bit op24, bits<4> op11_8, string OpcodeStr,
3208 string Dt, SDNode MulOp, SDNode OpNode> {
3209 def v4i16 : N3VLMulOpSL16<op24, 0b01, op11_8, IIC_VMACi16D, OpcodeStr,
3210 !strconcat(Dt,"16"), v4i32, v4i16, MulOp, OpNode>;
3211 def v2i32 : N3VLMulOpSL<op24, 0b10, op11_8, IIC_VMACi32D, OpcodeStr,
3212 !strconcat(Dt, "32"), v2i64, v2i32, MulOp, OpNode>;
3216 // Neon Long 3-argument intrinsics.
3218 // First with only element sizes of 16 and 32 bits:
3219 multiclass N3VLInt3_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
3220 InstrItinClass itin16, InstrItinClass itin32,
3221 string OpcodeStr, string Dt, Intrinsic IntOp> {
3222 def v4i32 : N3VLInt3<op24, op23, 0b01, op11_8, op4, itin16,
3223 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
3224 def v2i64 : N3VLInt3<op24, op23, 0b10, op11_8, op4, itin32,
3225 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
3228 multiclass N3VLInt3SL_HS<bit op24, bits<4> op11_8,
3229 string OpcodeStr, string Dt, Intrinsic IntOp> {
3230 def v4i16 : N3VLInt3SL16<op24, 0b01, op11_8, IIC_VMACi16D,
3231 OpcodeStr, !strconcat(Dt,"16"), v4i32, v4i16, IntOp>;
3232 def v2i32 : N3VLInt3SL<op24, 0b10, op11_8, IIC_VMACi32D,
3233 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
3236 // ....then also with element size of 8 bits:
3237 multiclass N3VLInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3238 InstrItinClass itin16, InstrItinClass itin32,
3239 string OpcodeStr, string Dt, Intrinsic IntOp>
3240 : N3VLInt3_HS<op24, op23, op11_8, op4, itin16, itin32, OpcodeStr, Dt, IntOp> {
3241 def v8i16 : N3VLInt3<op24, op23, 0b00, op11_8, op4, itin16,
3242 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, IntOp>;
3245 // ....with explicit extend (VABAL).
3246 multiclass N3VLIntExtOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3247 InstrItinClass itin, string OpcodeStr, string Dt,
3248 Intrinsic IntOp, SDNode ExtOp, SDNode OpNode> {
3249 def v8i16 : N3VLIntExtOp<op24, op23, 0b00, op11_8, op4, itin,
3250 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8,
3251 IntOp, ExtOp, OpNode>;
3252 def v4i32 : N3VLIntExtOp<op24, op23, 0b01, op11_8, op4, itin,
3253 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16,
3254 IntOp, ExtOp, OpNode>;
3255 def v2i64 : N3VLIntExtOp<op24, op23, 0b10, op11_8, op4, itin,
3256 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32,
3257 IntOp, ExtOp, OpNode>;
3261 // Neon Pairwise long 2-register intrinsics,
3262 // element sizes of 8, 16 and 32 bits:
3263 multiclass N2VPLInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
3264 bits<5> op11_7, bit op4,
3265 string OpcodeStr, string Dt, Intrinsic IntOp> {
3266 // 64-bit vector types.
3267 def v8i8 : N2VDPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3268 OpcodeStr, !strconcat(Dt, "8"), v4i16, v8i8, IntOp>;
3269 def v4i16 : N2VDPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3270 OpcodeStr, !strconcat(Dt, "16"), v2i32, v4i16, IntOp>;
3271 def v2i32 : N2VDPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3272 OpcodeStr, !strconcat(Dt, "32"), v1i64, v2i32, IntOp>;
3274 // 128-bit vector types.
3275 def v16i8 : N2VQPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3276 OpcodeStr, !strconcat(Dt, "8"), v8i16, v16i8, IntOp>;
3277 def v8i16 : N2VQPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3278 OpcodeStr, !strconcat(Dt, "16"), v4i32, v8i16, IntOp>;
3279 def v4i32 : N2VQPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3280 OpcodeStr, !strconcat(Dt, "32"), v2i64, v4i32, IntOp>;
3284 // Neon Pairwise long 2-register accumulate intrinsics,
3285 // element sizes of 8, 16 and 32 bits:
3286 multiclass N2VPLInt2_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
3287 bits<5> op11_7, bit op4,
3288 string OpcodeStr, string Dt, Intrinsic IntOp> {
3289 // 64-bit vector types.
3290 def v8i8 : N2VDPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3291 OpcodeStr, !strconcat(Dt, "8"), v4i16, v8i8, IntOp>;
3292 def v4i16 : N2VDPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3293 OpcodeStr, !strconcat(Dt, "16"), v2i32, v4i16, IntOp>;
3294 def v2i32 : N2VDPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3295 OpcodeStr, !strconcat(Dt, "32"), v1i64, v2i32, IntOp>;
3297 // 128-bit vector types.
3298 def v16i8 : N2VQPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3299 OpcodeStr, !strconcat(Dt, "8"), v8i16, v16i8, IntOp>;
3300 def v8i16 : N2VQPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3301 OpcodeStr, !strconcat(Dt, "16"), v4i32, v8i16, IntOp>;
3302 def v4i32 : N2VQPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3303 OpcodeStr, !strconcat(Dt, "32"), v2i64, v4i32, IntOp>;
3307 // Neon 2-register vector shift by immediate,
3308 // with f of either N2RegVShLFrm or N2RegVShRFrm
3309 // element sizes of 8, 16, 32 and 64 bits:
3310 multiclass N2VShL_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3311 InstrItinClass itin, string OpcodeStr, string Dt,
3313 // 64-bit vector types.
3314 def v8i8 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3315 OpcodeStr, !strconcat(Dt, "8"), v8i8, OpNode> {
3316 let Inst{21-19} = 0b001; // imm6 = 001xxx
3318 def v4i16 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3319 OpcodeStr, !strconcat(Dt, "16"), v4i16, OpNode> {
3320 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3322 def v2i32 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3323 OpcodeStr, !strconcat(Dt, "32"), v2i32, OpNode> {
3324 let Inst{21} = 0b1; // imm6 = 1xxxxx
3326 def v1i64 : N2VDSh<op24, op23, op11_8, 1, op4, N2RegVShLFrm, itin, i32imm,
3327 OpcodeStr, !strconcat(Dt, "64"), v1i64, OpNode>;
3330 // 128-bit vector types.
3331 def v16i8 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3332 OpcodeStr, !strconcat(Dt, "8"), v16i8, OpNode> {
3333 let Inst{21-19} = 0b001; // imm6 = 001xxx
3335 def v8i16 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3336 OpcodeStr, !strconcat(Dt, "16"), v8i16, OpNode> {
3337 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3339 def v4i32 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3340 OpcodeStr, !strconcat(Dt, "32"), v4i32, OpNode> {
3341 let Inst{21} = 0b1; // imm6 = 1xxxxx
3343 def v2i64 : N2VQSh<op24, op23, op11_8, 1, op4, N2RegVShLFrm, itin, i32imm,
3344 OpcodeStr, !strconcat(Dt, "64"), v2i64, OpNode>;
3347 multiclass N2VShR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3348 InstrItinClass itin, string OpcodeStr, string Dt,
3350 // 64-bit vector types.
3351 def v8i8 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm8,
3352 OpcodeStr, !strconcat(Dt, "8"), v8i8, OpNode> {
3353 let Inst{21-19} = 0b001; // imm6 = 001xxx
3355 def v4i16 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm16,
3356 OpcodeStr, !strconcat(Dt, "16"), v4i16, OpNode> {
3357 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3359 def v2i32 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm32,
3360 OpcodeStr, !strconcat(Dt, "32"), v2i32, OpNode> {
3361 let Inst{21} = 0b1; // imm6 = 1xxxxx
3363 def v1i64 : N2VDSh<op24, op23, op11_8, 1, op4, N2RegVShRFrm, itin, shr_imm64,
3364 OpcodeStr, !strconcat(Dt, "64"), v1i64, OpNode>;
3367 // 128-bit vector types.
3368 def v16i8 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm8,
3369 OpcodeStr, !strconcat(Dt, "8"), v16i8, OpNode> {
3370 let Inst{21-19} = 0b001; // imm6 = 001xxx
3372 def v8i16 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm16,
3373 OpcodeStr, !strconcat(Dt, "16"), v8i16, OpNode> {
3374 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3376 def v4i32 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm32,
3377 OpcodeStr, !strconcat(Dt, "32"), v4i32, OpNode> {
3378 let Inst{21} = 0b1; // imm6 = 1xxxxx
3380 def v2i64 : N2VQSh<op24, op23, op11_8, 1, op4, N2RegVShRFrm, itin, shr_imm64,
3381 OpcodeStr, !strconcat(Dt, "64"), v2i64, OpNode>;
3385 // Neon Shift-Accumulate vector operations,
3386 // element sizes of 8, 16, 32 and 64 bits:
3387 multiclass N2VShAdd_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3388 string OpcodeStr, string Dt, SDNode ShOp> {
3389 // 64-bit vector types.
3390 def v8i8 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm8,
3391 OpcodeStr, !strconcat(Dt, "8"), v8i8, ShOp> {
3392 let Inst{21-19} = 0b001; // imm6 = 001xxx
3394 def v4i16 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm16,
3395 OpcodeStr, !strconcat(Dt, "16"), v4i16, ShOp> {
3396 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3398 def v2i32 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm32,
3399 OpcodeStr, !strconcat(Dt, "32"), v2i32, ShOp> {
3400 let Inst{21} = 0b1; // imm6 = 1xxxxx
3402 def v1i64 : N2VDShAdd<op24, op23, op11_8, 1, op4, shr_imm64,
3403 OpcodeStr, !strconcat(Dt, "64"), v1i64, ShOp>;
3406 // 128-bit vector types.
3407 def v16i8 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm8,
3408 OpcodeStr, !strconcat(Dt, "8"), v16i8, ShOp> {
3409 let Inst{21-19} = 0b001; // imm6 = 001xxx
3411 def v8i16 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm16,
3412 OpcodeStr, !strconcat(Dt, "16"), v8i16, ShOp> {
3413 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3415 def v4i32 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm32,
3416 OpcodeStr, !strconcat(Dt, "32"), v4i32, ShOp> {
3417 let Inst{21} = 0b1; // imm6 = 1xxxxx
3419 def v2i64 : N2VQShAdd<op24, op23, op11_8, 1, op4, shr_imm64,
3420 OpcodeStr, !strconcat(Dt, "64"), v2i64, ShOp>;
3424 // Neon Shift-Insert vector operations,
3425 // with f of either N2RegVShLFrm or N2RegVShRFrm
3426 // element sizes of 8, 16, 32 and 64 bits:
3427 multiclass N2VShInsL_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3429 // 64-bit vector types.
3430 def v8i8 : N2VDShIns<op24, op23, op11_8, 0, op4, i32imm,
3431 N2RegVShLFrm, OpcodeStr, "8", v8i8, NEONvsli> {
3432 let Inst{21-19} = 0b001; // imm6 = 001xxx
3434 def v4i16 : N2VDShIns<op24, op23, op11_8, 0, op4, i32imm,
3435 N2RegVShLFrm, OpcodeStr, "16", v4i16, NEONvsli> {
3436 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3438 def v2i32 : N2VDShIns<op24, op23, op11_8, 0, op4, i32imm,
3439 N2RegVShLFrm, OpcodeStr, "32", v2i32, NEONvsli> {
3440 let Inst{21} = 0b1; // imm6 = 1xxxxx
3442 def v1i64 : N2VDShIns<op24, op23, op11_8, 1, op4, i32imm,
3443 N2RegVShLFrm, OpcodeStr, "64", v1i64, NEONvsli>;
3446 // 128-bit vector types.
3447 def v16i8 : N2VQShIns<op24, op23, op11_8, 0, op4, i32imm,
3448 N2RegVShLFrm, OpcodeStr, "8", v16i8, NEONvsli> {
3449 let Inst{21-19} = 0b001; // imm6 = 001xxx
3451 def v8i16 : N2VQShIns<op24, op23, op11_8, 0, op4, i32imm,
3452 N2RegVShLFrm, OpcodeStr, "16", v8i16, NEONvsli> {
3453 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3455 def v4i32 : N2VQShIns<op24, op23, op11_8, 0, op4, i32imm,
3456 N2RegVShLFrm, OpcodeStr, "32", v4i32, NEONvsli> {
3457 let Inst{21} = 0b1; // imm6 = 1xxxxx
3459 def v2i64 : N2VQShIns<op24, op23, op11_8, 1, op4, i32imm,
3460 N2RegVShLFrm, OpcodeStr, "64", v2i64, NEONvsli>;
3463 multiclass N2VShInsR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3465 // 64-bit vector types.
3466 def v8i8 : N2VDShIns<op24, op23, op11_8, 0, op4, shr_imm8,
3467 N2RegVShRFrm, OpcodeStr, "8", v8i8, NEONvsri> {
3468 let Inst{21-19} = 0b001; // imm6 = 001xxx
3470 def v4i16 : N2VDShIns<op24, op23, op11_8, 0, op4, shr_imm16,
3471 N2RegVShRFrm, OpcodeStr, "16", v4i16, NEONvsri> {
3472 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3474 def v2i32 : N2VDShIns<op24, op23, op11_8, 0, op4, shr_imm32,
3475 N2RegVShRFrm, OpcodeStr, "32", v2i32, NEONvsri> {
3476 let Inst{21} = 0b1; // imm6 = 1xxxxx
3478 def v1i64 : N2VDShIns<op24, op23, op11_8, 1, op4, shr_imm64,
3479 N2RegVShRFrm, OpcodeStr, "64", v1i64, NEONvsri>;
3482 // 128-bit vector types.
3483 def v16i8 : N2VQShIns<op24, op23, op11_8, 0, op4, shr_imm8,
3484 N2RegVShRFrm, OpcodeStr, "8", v16i8, NEONvsri> {
3485 let Inst{21-19} = 0b001; // imm6 = 001xxx
3487 def v8i16 : N2VQShIns<op24, op23, op11_8, 0, op4, shr_imm16,
3488 N2RegVShRFrm, OpcodeStr, "16", v8i16, NEONvsri> {
3489 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3491 def v4i32 : N2VQShIns<op24, op23, op11_8, 0, op4, shr_imm32,
3492 N2RegVShRFrm, OpcodeStr, "32", v4i32, NEONvsri> {
3493 let Inst{21} = 0b1; // imm6 = 1xxxxx
3495 def v2i64 : N2VQShIns<op24, op23, op11_8, 1, op4, shr_imm64,
3496 N2RegVShRFrm, OpcodeStr, "64", v2i64, NEONvsri>;
3500 // Neon Shift Long operations,
3501 // element sizes of 8, 16, 32 bits:
3502 multiclass N2VLSh_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
3503 bit op4, string OpcodeStr, string Dt, SDNode OpNode> {
3504 def v8i16 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
3505 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, OpNode> {
3506 let Inst{21-19} = 0b001; // imm6 = 001xxx
3508 def v4i32 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
3509 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, OpNode> {
3510 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3512 def v2i64 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
3513 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, OpNode> {
3514 let Inst{21} = 0b1; // imm6 = 1xxxxx
3518 // Neon Shift Narrow operations,
3519 // element sizes of 16, 32, 64 bits:
3520 multiclass N2VNSh_HSD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
3521 bit op4, InstrItinClass itin, string OpcodeStr, string Dt,
3523 def v8i8 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
3524 OpcodeStr, !strconcat(Dt, "16"),
3525 v8i8, v8i16, shr_imm8, OpNode> {
3526 let Inst{21-19} = 0b001; // imm6 = 001xxx
3528 def v4i16 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
3529 OpcodeStr, !strconcat(Dt, "32"),
3530 v4i16, v4i32, shr_imm16, OpNode> {
3531 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3533 def v2i32 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
3534 OpcodeStr, !strconcat(Dt, "64"),
3535 v2i32, v2i64, shr_imm32, OpNode> {
3536 let Inst{21} = 0b1; // imm6 = 1xxxxx
3540 //===----------------------------------------------------------------------===//
3541 // Instruction Definitions.
3542 //===----------------------------------------------------------------------===//
3544 // Vector Add Operations.
3546 // VADD : Vector Add (integer and floating-point)
3547 defm VADD : N3V_QHSD<0, 0, 0b1000, 0, IIC_VBINiD, IIC_VBINiQ, "vadd", "i",
3549 def VADDfd : N3VD<0, 0, 0b00, 0b1101, 0, IIC_VBIND, "vadd", "f32",
3550 v2f32, v2f32, fadd, 1>;
3551 def VADDfq : N3VQ<0, 0, 0b00, 0b1101, 0, IIC_VBINQ, "vadd", "f32",
3552 v4f32, v4f32, fadd, 1>;
3553 // VADDL : Vector Add Long (Q = D + D)
3554 defm VADDLs : N3VLExt_QHS<0,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD,
3555 "vaddl", "s", add, sext, 1>;
3556 defm VADDLu : N3VLExt_QHS<1,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD,
3557 "vaddl", "u", add, zext, 1>;
3558 // VADDW : Vector Add Wide (Q = Q + D)
3559 defm VADDWs : N3VW_QHS<0,1,0b0001,0, "vaddw", "s", add, sext, 0>;
3560 defm VADDWu : N3VW_QHS<1,1,0b0001,0, "vaddw", "u", add, zext, 0>;
3561 // VHADD : Vector Halving Add
3562 defm VHADDs : N3VInt_QHS<0, 0, 0b0000, 0, N3RegFrm,
3563 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3564 "vhadd", "s", int_arm_neon_vhadds, 1>;
3565 defm VHADDu : N3VInt_QHS<1, 0, 0b0000, 0, N3RegFrm,
3566 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3567 "vhadd", "u", int_arm_neon_vhaddu, 1>;
3568 // VRHADD : Vector Rounding Halving Add
3569 defm VRHADDs : N3VInt_QHS<0, 0, 0b0001, 0, N3RegFrm,
3570 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3571 "vrhadd", "s", int_arm_neon_vrhadds, 1>;
3572 defm VRHADDu : N3VInt_QHS<1, 0, 0b0001, 0, N3RegFrm,
3573 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3574 "vrhadd", "u", int_arm_neon_vrhaddu, 1>;
3575 // VQADD : Vector Saturating Add
3576 defm VQADDs : N3VInt_QHSD<0, 0, 0b0000, 1, N3RegFrm,
3577 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3578 "vqadd", "s", int_arm_neon_vqadds, 1>;
3579 defm VQADDu : N3VInt_QHSD<1, 0, 0b0000, 1, N3RegFrm,
3580 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3581 "vqadd", "u", int_arm_neon_vqaddu, 1>;
3582 // VADDHN : Vector Add and Narrow Returning High Half (D = Q + Q)
3583 defm VADDHN : N3VNInt_HSD<0,1,0b0100,0, "vaddhn", "i",
3584 int_arm_neon_vaddhn, 1>;
3585 // VRADDHN : Vector Rounding Add and Narrow Returning High Half (D = Q + Q)
3586 defm VRADDHN : N3VNInt_HSD<1,1,0b0100,0, "vraddhn", "i",
3587 int_arm_neon_vraddhn, 1>;
3589 // Vector Multiply Operations.
3591 // VMUL : Vector Multiply (integer, polynomial and floating-point)
3592 defm VMUL : N3V_QHS<0, 0, 0b1001, 1, IIC_VMULi16D, IIC_VMULi32D,
3593 IIC_VMULi16Q, IIC_VMULi32Q, "vmul", "i", mul, 1>;
3594 def VMULpd : N3VDInt<1, 0, 0b00, 0b1001, 1, N3RegFrm, IIC_VMULi16D, "vmul",
3595 "p8", v8i8, v8i8, int_arm_neon_vmulp, 1>;
3596 def VMULpq : N3VQInt<1, 0, 0b00, 0b1001, 1, N3RegFrm, IIC_VMULi16Q, "vmul",
3597 "p8", v16i8, v16i8, int_arm_neon_vmulp, 1>;
3598 def VMULfd : N3VD<1, 0, 0b00, 0b1101, 1, IIC_VFMULD, "vmul", "f32",
3599 v2f32, v2f32, fmul, 1>;
3600 def VMULfq : N3VQ<1, 0, 0b00, 0b1101, 1, IIC_VFMULQ, "vmul", "f32",
3601 v4f32, v4f32, fmul, 1>;
3602 defm VMULsl : N3VSL_HS<0b1000, "vmul", "i", mul>;
3603 def VMULslfd : N3VDSL<0b10, 0b1001, IIC_VBIND, "vmul", "f32", v2f32, fmul>;
3604 def VMULslfq : N3VQSL<0b10, 0b1001, IIC_VBINQ, "vmul", "f32", v4f32,
3607 def : Pat<(v8i16 (mul (v8i16 QPR:$src1),
3608 (v8i16 (NEONvduplane (v8i16 QPR:$src2), imm:$lane)))),
3609 (v8i16 (VMULslv8i16 (v8i16 QPR:$src1),
3610 (v4i16 (EXTRACT_SUBREG QPR:$src2,
3611 (DSubReg_i16_reg imm:$lane))),
3612 (SubReg_i16_lane imm:$lane)))>;
3613 def : Pat<(v4i32 (mul (v4i32 QPR:$src1),
3614 (v4i32 (NEONvduplane (v4i32 QPR:$src2), imm:$lane)))),
3615 (v4i32 (VMULslv4i32 (v4i32 QPR:$src1),
3616 (v2i32 (EXTRACT_SUBREG QPR:$src2,
3617 (DSubReg_i32_reg imm:$lane))),
3618 (SubReg_i32_lane imm:$lane)))>;
3619 def : Pat<(v4f32 (fmul (v4f32 QPR:$src1),
3620 (v4f32 (NEONvduplane (v4f32 QPR:$src2), imm:$lane)))),
3621 (v4f32 (VMULslfq (v4f32 QPR:$src1),
3622 (v2f32 (EXTRACT_SUBREG QPR:$src2,
3623 (DSubReg_i32_reg imm:$lane))),
3624 (SubReg_i32_lane imm:$lane)))>;
3626 // VQDMULH : Vector Saturating Doubling Multiply Returning High Half
3627 defm VQDMULH : N3VInt_HS<0, 0, 0b1011, 0, N3RegFrm, IIC_VMULi16D, IIC_VMULi32D,
3628 IIC_VMULi16Q, IIC_VMULi32Q,
3629 "vqdmulh", "s", int_arm_neon_vqdmulh, 1>;
3630 defm VQDMULHsl: N3VIntSL_HS<0b1100, IIC_VMULi16D, IIC_VMULi32D,
3631 IIC_VMULi16Q, IIC_VMULi32Q,
3632 "vqdmulh", "s", int_arm_neon_vqdmulh>;
3633 def : Pat<(v8i16 (int_arm_neon_vqdmulh (v8i16 QPR:$src1),
3634 (v8i16 (NEONvduplane (v8i16 QPR:$src2),
3636 (v8i16 (VQDMULHslv8i16 (v8i16 QPR:$src1),
3637 (v4i16 (EXTRACT_SUBREG QPR:$src2,
3638 (DSubReg_i16_reg imm:$lane))),
3639 (SubReg_i16_lane imm:$lane)))>;
3640 def : Pat<(v4i32 (int_arm_neon_vqdmulh (v4i32 QPR:$src1),
3641 (v4i32 (NEONvduplane (v4i32 QPR:$src2),
3643 (v4i32 (VQDMULHslv4i32 (v4i32 QPR:$src1),
3644 (v2i32 (EXTRACT_SUBREG QPR:$src2,
3645 (DSubReg_i32_reg imm:$lane))),
3646 (SubReg_i32_lane imm:$lane)))>;
3648 // VQRDMULH : Vector Rounding Saturating Doubling Multiply Returning High Half
3649 defm VQRDMULH : N3VInt_HS<1, 0, 0b1011, 0, N3RegFrm,
3650 IIC_VMULi16D,IIC_VMULi32D,IIC_VMULi16Q,IIC_VMULi32Q,
3651 "vqrdmulh", "s", int_arm_neon_vqrdmulh, 1>;
3652 defm VQRDMULHsl : N3VIntSL_HS<0b1101, IIC_VMULi16D, IIC_VMULi32D,
3653 IIC_VMULi16Q, IIC_VMULi32Q,
3654 "vqrdmulh", "s", int_arm_neon_vqrdmulh>;
3655 def : Pat<(v8i16 (int_arm_neon_vqrdmulh (v8i16 QPR:$src1),
3656 (v8i16 (NEONvduplane (v8i16 QPR:$src2),
3658 (v8i16 (VQRDMULHslv8i16 (v8i16 QPR:$src1),
3659 (v4i16 (EXTRACT_SUBREG QPR:$src2,
3660 (DSubReg_i16_reg imm:$lane))),
3661 (SubReg_i16_lane imm:$lane)))>;
3662 def : Pat<(v4i32 (int_arm_neon_vqrdmulh (v4i32 QPR:$src1),
3663 (v4i32 (NEONvduplane (v4i32 QPR:$src2),
3665 (v4i32 (VQRDMULHslv4i32 (v4i32 QPR:$src1),
3666 (v2i32 (EXTRACT_SUBREG QPR:$src2,
3667 (DSubReg_i32_reg imm:$lane))),
3668 (SubReg_i32_lane imm:$lane)))>;
3670 // VMULL : Vector Multiply Long (integer and polynomial) (Q = D * D)
3671 defm VMULLs : N3VL_QHS<0,1,0b1100,0, IIC_VMULi16D, IIC_VMULi32D,
3672 "vmull", "s", NEONvmulls, 1>;
3673 defm VMULLu : N3VL_QHS<1,1,0b1100,0, IIC_VMULi16D, IIC_VMULi32D,
3674 "vmull", "u", NEONvmullu, 1>;
3675 def VMULLp : N3VLInt<0, 1, 0b00, 0b1110, 0, IIC_VMULi16D, "vmull", "p8",
3676 v8i16, v8i8, int_arm_neon_vmullp, 1>;
3677 defm VMULLsls : N3VLSL_HS<0, 0b1010, IIC_VMULi16D, "vmull", "s", NEONvmulls>;
3678 defm VMULLslu : N3VLSL_HS<1, 0b1010, IIC_VMULi16D, "vmull", "u", NEONvmullu>;
3680 // VQDMULL : Vector Saturating Doubling Multiply Long (Q = D * D)
3681 defm VQDMULL : N3VLInt_HS<0,1,0b1101,0, IIC_VMULi16D, IIC_VMULi32D,
3682 "vqdmull", "s", int_arm_neon_vqdmull, 1>;
3683 defm VQDMULLsl: N3VLIntSL_HS<0, 0b1011, IIC_VMULi16D,
3684 "vqdmull", "s", int_arm_neon_vqdmull>;
3686 // Vector Multiply-Accumulate and Multiply-Subtract Operations.
3688 // VMLA : Vector Multiply Accumulate (integer and floating-point)
3689 defm VMLA : N3VMulOp_QHS<0, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
3690 IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
3691 def VMLAfd : N3VDMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACD, "vmla", "f32",
3692 v2f32, fmul_su, fadd_mlx>,
3693 Requires<[HasNEON, UseFPVMLx]>;
3694 def VMLAfq : N3VQMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACQ, "vmla", "f32",
3695 v4f32, fmul_su, fadd_mlx>,
3696 Requires<[HasNEON, UseFPVMLx]>;
3697 defm VMLAsl : N3VMulOpSL_HS<0b0000, IIC_VMACi16D, IIC_VMACi32D,
3698 IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
3699 def VMLAslfd : N3VDMulOpSL<0b10, 0b0001, IIC_VMACD, "vmla", "f32",
3700 v2f32, fmul_su, fadd_mlx>,
3701 Requires<[HasNEON, UseFPVMLx]>;
3702 def VMLAslfq : N3VQMulOpSL<0b10, 0b0001, IIC_VMACQ, "vmla", "f32",
3703 v4f32, v2f32, fmul_su, fadd_mlx>,
3704 Requires<[HasNEON, UseFPVMLx]>;
3706 def : Pat<(v8i16 (add (v8i16 QPR:$src1),
3707 (mul (v8i16 QPR:$src2),
3708 (v8i16 (NEONvduplane (v8i16 QPR:$src3), imm:$lane))))),
3709 (v8i16 (VMLAslv8i16 (v8i16 QPR:$src1), (v8i16 QPR:$src2),
3710 (v4i16 (EXTRACT_SUBREG QPR:$src3,
3711 (DSubReg_i16_reg imm:$lane))),
3712 (SubReg_i16_lane imm:$lane)))>;
3714 def : Pat<(v4i32 (add (v4i32 QPR:$src1),
3715 (mul (v4i32 QPR:$src2),
3716 (v4i32 (NEONvduplane (v4i32 QPR:$src3), imm:$lane))))),
3717 (v4i32 (VMLAslv4i32 (v4i32 QPR:$src1), (v4i32 QPR:$src2),
3718 (v2i32 (EXTRACT_SUBREG QPR:$src3,
3719 (DSubReg_i32_reg imm:$lane))),
3720 (SubReg_i32_lane imm:$lane)))>;
3722 def : Pat<(v4f32 (fadd_mlx (v4f32 QPR:$src1),
3723 (fmul_su (v4f32 QPR:$src2),
3724 (v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
3725 (v4f32 (VMLAslfq (v4f32 QPR:$src1),
3727 (v2f32 (EXTRACT_SUBREG QPR:$src3,
3728 (DSubReg_i32_reg imm:$lane))),
3729 (SubReg_i32_lane imm:$lane)))>,
3730 Requires<[HasNEON, UseFPVMLx]>;
3732 // VMLAL : Vector Multiply Accumulate Long (Q += D * D)
3733 defm VMLALs : N3VLMulOp_QHS<0,1,0b1000,0, IIC_VMACi16D, IIC_VMACi32D,
3734 "vmlal", "s", NEONvmulls, add>;
3735 defm VMLALu : N3VLMulOp_QHS<1,1,0b1000,0, IIC_VMACi16D, IIC_VMACi32D,
3736 "vmlal", "u", NEONvmullu, add>;
3738 defm VMLALsls : N3VLMulOpSL_HS<0, 0b0010, "vmlal", "s", NEONvmulls, add>;
3739 defm VMLALslu : N3VLMulOpSL_HS<1, 0b0010, "vmlal", "u", NEONvmullu, add>;
3741 // VQDMLAL : Vector Saturating Doubling Multiply Accumulate Long (Q += D * D)
3742 defm VQDMLAL : N3VLInt3_HS<0, 1, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
3743 "vqdmlal", "s", int_arm_neon_vqdmlal>;
3744 defm VQDMLALsl: N3VLInt3SL_HS<0, 0b0011, "vqdmlal", "s", int_arm_neon_vqdmlal>;
3746 // VMLS : Vector Multiply Subtract (integer and floating-point)
3747 defm VMLS : N3VMulOp_QHS<1, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
3748 IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
3749 def VMLSfd : N3VDMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACD, "vmls", "f32",
3750 v2f32, fmul_su, fsub_mlx>,
3751 Requires<[HasNEON, UseFPVMLx]>;
3752 def VMLSfq : N3VQMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACQ, "vmls", "f32",
3753 v4f32, fmul_su, fsub_mlx>,
3754 Requires<[HasNEON, UseFPVMLx]>;
3755 defm VMLSsl : N3VMulOpSL_HS<0b0100, IIC_VMACi16D, IIC_VMACi32D,
3756 IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
3757 def VMLSslfd : N3VDMulOpSL<0b10, 0b0101, IIC_VMACD, "vmls", "f32",
3758 v2f32, fmul_su, fsub_mlx>,
3759 Requires<[HasNEON, UseFPVMLx]>;
3760 def VMLSslfq : N3VQMulOpSL<0b10, 0b0101, IIC_VMACQ, "vmls", "f32",
3761 v4f32, v2f32, fmul_su, fsub_mlx>,
3762 Requires<[HasNEON, UseFPVMLx]>;
3764 def : Pat<(v8i16 (sub (v8i16 QPR:$src1),
3765 (mul (v8i16 QPR:$src2),
3766 (v8i16 (NEONvduplane (v8i16 QPR:$src3), imm:$lane))))),
3767 (v8i16 (VMLSslv8i16 (v8i16 QPR:$src1), (v8i16 QPR:$src2),
3768 (v4i16 (EXTRACT_SUBREG QPR:$src3,
3769 (DSubReg_i16_reg imm:$lane))),
3770 (SubReg_i16_lane imm:$lane)))>;
3772 def : Pat<(v4i32 (sub (v4i32 QPR:$src1),
3773 (mul (v4i32 QPR:$src2),
3774 (v4i32 (NEONvduplane (v4i32 QPR:$src3), imm:$lane))))),
3775 (v4i32 (VMLSslv4i32 (v4i32 QPR:$src1), (v4i32 QPR:$src2),
3776 (v2i32 (EXTRACT_SUBREG QPR:$src3,
3777 (DSubReg_i32_reg imm:$lane))),
3778 (SubReg_i32_lane imm:$lane)))>;
3780 def : Pat<(v4f32 (fsub_mlx (v4f32 QPR:$src1),
3781 (fmul_su (v4f32 QPR:$src2),
3782 (v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
3783 (v4f32 (VMLSslfq (v4f32 QPR:$src1), (v4f32 QPR:$src2),
3784 (v2f32 (EXTRACT_SUBREG QPR:$src3,
3785 (DSubReg_i32_reg imm:$lane))),
3786 (SubReg_i32_lane imm:$lane)))>,
3787 Requires<[HasNEON, UseFPVMLx]>;
3789 // VMLSL : Vector Multiply Subtract Long (Q -= D * D)
3790 defm VMLSLs : N3VLMulOp_QHS<0,1,0b1010,0, IIC_VMACi16D, IIC_VMACi32D,
3791 "vmlsl", "s", NEONvmulls, sub>;
3792 defm VMLSLu : N3VLMulOp_QHS<1,1,0b1010,0, IIC_VMACi16D, IIC_VMACi32D,
3793 "vmlsl", "u", NEONvmullu, sub>;
3795 defm VMLSLsls : N3VLMulOpSL_HS<0, 0b0110, "vmlsl", "s", NEONvmulls, sub>;
3796 defm VMLSLslu : N3VLMulOpSL_HS<1, 0b0110, "vmlsl", "u", NEONvmullu, sub>;
3798 // VQDMLSL : Vector Saturating Doubling Multiply Subtract Long (Q -= D * D)
3799 defm VQDMLSL : N3VLInt3_HS<0, 1, 0b1011, 0, IIC_VMACi16D, IIC_VMACi32D,
3800 "vqdmlsl", "s", int_arm_neon_vqdmlsl>;
3801 defm VQDMLSLsl: N3VLInt3SL_HS<0, 0b111, "vqdmlsl", "s", int_arm_neon_vqdmlsl>;
3803 // Vector Subtract Operations.
3805 // VSUB : Vector Subtract (integer and floating-point)
3806 defm VSUB : N3V_QHSD<1, 0, 0b1000, 0, IIC_VSUBiD, IIC_VSUBiQ,
3807 "vsub", "i", sub, 0>;
3808 def VSUBfd : N3VD<0, 0, 0b10, 0b1101, 0, IIC_VBIND, "vsub", "f32",
3809 v2f32, v2f32, fsub, 0>;
3810 def VSUBfq : N3VQ<0, 0, 0b10, 0b1101, 0, IIC_VBINQ, "vsub", "f32",
3811 v4f32, v4f32, fsub, 0>;
3812 // VSUBL : Vector Subtract Long (Q = D - D)
3813 defm VSUBLs : N3VLExt_QHS<0,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD,
3814 "vsubl", "s", sub, sext, 0>;
3815 defm VSUBLu : N3VLExt_QHS<1,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD,
3816 "vsubl", "u", sub, zext, 0>;
3817 // VSUBW : Vector Subtract Wide (Q = Q - D)
3818 defm VSUBWs : N3VW_QHS<0,1,0b0011,0, "vsubw", "s", sub, sext, 0>;
3819 defm VSUBWu : N3VW_QHS<1,1,0b0011,0, "vsubw", "u", sub, zext, 0>;
3820 // VHSUB : Vector Halving Subtract
3821 defm VHSUBs : N3VInt_QHS<0, 0, 0b0010, 0, N3RegFrm,
3822 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3823 "vhsub", "s", int_arm_neon_vhsubs, 0>;
3824 defm VHSUBu : N3VInt_QHS<1, 0, 0b0010, 0, N3RegFrm,
3825 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3826 "vhsub", "u", int_arm_neon_vhsubu, 0>;
3827 // VQSUB : Vector Saturing Subtract
3828 defm VQSUBs : N3VInt_QHSD<0, 0, 0b0010, 1, N3RegFrm,
3829 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3830 "vqsub", "s", int_arm_neon_vqsubs, 0>;
3831 defm VQSUBu : N3VInt_QHSD<1, 0, 0b0010, 1, N3RegFrm,
3832 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3833 "vqsub", "u", int_arm_neon_vqsubu, 0>;
3834 // VSUBHN : Vector Subtract and Narrow Returning High Half (D = Q - Q)
3835 defm VSUBHN : N3VNInt_HSD<0,1,0b0110,0, "vsubhn", "i",
3836 int_arm_neon_vsubhn, 0>;
3837 // VRSUBHN : Vector Rounding Subtract and Narrow Returning High Half (D=Q-Q)
3838 defm VRSUBHN : N3VNInt_HSD<1,1,0b0110,0, "vrsubhn", "i",
3839 int_arm_neon_vrsubhn, 0>;
3841 // Vector Comparisons.
3843 // VCEQ : Vector Compare Equal
3844 defm VCEQ : N3V_QHS<1, 0, 0b1000, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3845 IIC_VSUBi4Q, "vceq", "i", NEONvceq, 1>;
3846 def VCEQfd : N3VD<0,0,0b00,0b1110,0, IIC_VBIND, "vceq", "f32", v2i32, v2f32,
3848 def VCEQfq : N3VQ<0,0,0b00,0b1110,0, IIC_VBINQ, "vceq", "f32", v4i32, v4f32,
3851 defm VCEQz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00010, 0, "vceq", "i",
3852 "$Vd, $Vm, #0", NEONvceqz>;
3854 // VCGE : Vector Compare Greater Than or Equal
3855 defm VCGEs : N3V_QHS<0, 0, 0b0011, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3856 IIC_VSUBi4Q, "vcge", "s", NEONvcge, 0>;
3857 defm VCGEu : N3V_QHS<1, 0, 0b0011, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3858 IIC_VSUBi4Q, "vcge", "u", NEONvcgeu, 0>;
3859 def VCGEfd : N3VD<1,0,0b00,0b1110,0, IIC_VBIND, "vcge", "f32", v2i32, v2f32,
3861 def VCGEfq : N3VQ<1,0,0b00,0b1110,0, IIC_VBINQ, "vcge", "f32", v4i32, v4f32,
3864 defm VCGEz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00001, 0, "vcge", "s",
3865 "$Vd, $Vm, #0", NEONvcgez>;
3866 defm VCLEz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00011, 0, "vcle", "s",
3867 "$Vd, $Vm, #0", NEONvclez>;
3869 // VCGT : Vector Compare Greater Than
3870 defm VCGTs : N3V_QHS<0, 0, 0b0011, 0, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3871 IIC_VSUBi4Q, "vcgt", "s", NEONvcgt, 0>;
3872 defm VCGTu : N3V_QHS<1, 0, 0b0011, 0, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3873 IIC_VSUBi4Q, "vcgt", "u", NEONvcgtu, 0>;
3874 def VCGTfd : N3VD<1,0,0b10,0b1110,0, IIC_VBIND, "vcgt", "f32", v2i32, v2f32,
3876 def VCGTfq : N3VQ<1,0,0b10,0b1110,0, IIC_VBINQ, "vcgt", "f32", v4i32, v4f32,
3879 defm VCGTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00000, 0, "vcgt", "s",
3880 "$Vd, $Vm, #0", NEONvcgtz>;
3881 defm VCLTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00100, 0, "vclt", "s",
3882 "$Vd, $Vm, #0", NEONvcltz>;
3884 // VACGE : Vector Absolute Compare Greater Than or Equal (aka VCAGE)
3885 def VACGEd : N3VDInt<1, 0, 0b00, 0b1110, 1, N3RegFrm, IIC_VBIND, "vacge",
3886 "f32", v2i32, v2f32, int_arm_neon_vacged, 0>;
3887 def VACGEq : N3VQInt<1, 0, 0b00, 0b1110, 1, N3RegFrm, IIC_VBINQ, "vacge",
3888 "f32", v4i32, v4f32, int_arm_neon_vacgeq, 0>;
3889 // VACGT : Vector Absolute Compare Greater Than (aka VCAGT)
3890 def VACGTd : N3VDInt<1, 0, 0b10, 0b1110, 1, N3RegFrm, IIC_VBIND, "vacgt",
3891 "f32", v2i32, v2f32, int_arm_neon_vacgtd, 0>;
3892 def VACGTq : N3VQInt<1, 0, 0b10, 0b1110, 1, N3RegFrm, IIC_VBINQ, "vacgt",
3893 "f32", v4i32, v4f32, int_arm_neon_vacgtq, 0>;
3894 // VTST : Vector Test Bits
3895 defm VTST : N3V_QHS<0, 0, 0b1000, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
3896 IIC_VBINi4Q, "vtst", "", NEONvtst, 1>;
3898 // Vector Bitwise Operations.
3900 def vnotd : PatFrag<(ops node:$in),
3901 (xor node:$in, (bitconvert (v8i8 NEONimmAllOnesV)))>;
3902 def vnotq : PatFrag<(ops node:$in),
3903 (xor node:$in, (bitconvert (v16i8 NEONimmAllOnesV)))>;
3906 // VAND : Vector Bitwise AND
3907 def VANDd : N3VDX<0, 0, 0b00, 0b0001, 1, IIC_VBINiD, "vand",
3908 v2i32, v2i32, and, 1>;
3909 def VANDq : N3VQX<0, 0, 0b00, 0b0001, 1, IIC_VBINiQ, "vand",
3910 v4i32, v4i32, and, 1>;
3912 // VEOR : Vector Bitwise Exclusive OR
3913 def VEORd : N3VDX<1, 0, 0b00, 0b0001, 1, IIC_VBINiD, "veor",
3914 v2i32, v2i32, xor, 1>;
3915 def VEORq : N3VQX<1, 0, 0b00, 0b0001, 1, IIC_VBINiQ, "veor",
3916 v4i32, v4i32, xor, 1>;
3918 // VORR : Vector Bitwise OR
3919 def VORRd : N3VDX<0, 0, 0b10, 0b0001, 1, IIC_VBINiD, "vorr",
3920 v2i32, v2i32, or, 1>;
3921 def VORRq : N3VQX<0, 0, 0b10, 0b0001, 1, IIC_VBINiQ, "vorr",
3922 v4i32, v4i32, or, 1>;
3924 def VORRiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 0, 1,
3925 (outs DPR:$Vd), (ins nImmSplatI16:$SIMM, DPR:$src),
3927 "vorr", "i16", "$Vd, $SIMM", "$src = $Vd",
3929 (v4i16 (NEONvorrImm DPR:$src, timm:$SIMM)))]> {
3930 let Inst{9} = SIMM{9};
3933 def VORRiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 0, 1,
3934 (outs DPR:$Vd), (ins nImmSplatI32:$SIMM, DPR:$src),
3936 "vorr", "i32", "$Vd, $SIMM", "$src = $Vd",
3938 (v2i32 (NEONvorrImm DPR:$src, timm:$SIMM)))]> {
3939 let Inst{10-9} = SIMM{10-9};
3942 def VORRiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 0, 1,
3943 (outs QPR:$Vd), (ins nImmSplatI16:$SIMM, QPR:$src),
3945 "vorr", "i16", "$Vd, $SIMM", "$src = $Vd",
3947 (v8i16 (NEONvorrImm QPR:$src, timm:$SIMM)))]> {
3948 let Inst{9} = SIMM{9};
3951 def VORRiv4i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 1, 0, 1,
3952 (outs QPR:$Vd), (ins nImmSplatI32:$SIMM, QPR:$src),
3954 "vorr", "i32", "$Vd, $SIMM", "$src = $Vd",
3956 (v4i32 (NEONvorrImm QPR:$src, timm:$SIMM)))]> {
3957 let Inst{10-9} = SIMM{10-9};
3961 // VBIC : Vector Bitwise Bit Clear (AND NOT)
3962 def VBICd : N3VX<0, 0, 0b01, 0b0001, 0, 1, (outs DPR:$Vd),
3963 (ins DPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VBINiD,
3964 "vbic", "$Vd, $Vn, $Vm", "",
3965 [(set DPR:$Vd, (v2i32 (and DPR:$Vn,
3966 (vnotd DPR:$Vm))))]>;
3967 def VBICq : N3VX<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
3968 (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINiQ,
3969 "vbic", "$Vd, $Vn, $Vm", "",
3970 [(set QPR:$Vd, (v4i32 (and QPR:$Vn,
3971 (vnotq QPR:$Vm))))]>;
3973 def VBICiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 1, 1,
3974 (outs DPR:$Vd), (ins nImmSplatI16:$SIMM, DPR:$src),
3976 "vbic", "i16", "$Vd, $SIMM", "$src = $Vd",
3978 (v4i16 (NEONvbicImm DPR:$src, timm:$SIMM)))]> {
3979 let Inst{9} = SIMM{9};
3982 def VBICiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 1, 1,
3983 (outs DPR:$Vd), (ins nImmSplatI32:$SIMM, DPR:$src),
3985 "vbic", "i32", "$Vd, $SIMM", "$src = $Vd",
3987 (v2i32 (NEONvbicImm DPR:$src, timm:$SIMM)))]> {
3988 let Inst{10-9} = SIMM{10-9};
3991 def VBICiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 1, 1,
3992 (outs QPR:$Vd), (ins nImmSplatI16:$SIMM, QPR:$src),
3994 "vbic", "i16", "$Vd, $SIMM", "$src = $Vd",
3996 (v8i16 (NEONvbicImm QPR:$src, timm:$SIMM)))]> {
3997 let Inst{9} = SIMM{9};
4000 def VBICiv4i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 1, 1, 1,
4001 (outs QPR:$Vd), (ins nImmSplatI32:$SIMM, QPR:$src),
4003 "vbic", "i32", "$Vd, $SIMM", "$src = $Vd",
4005 (v4i32 (NEONvbicImm QPR:$src, timm:$SIMM)))]> {
4006 let Inst{10-9} = SIMM{10-9};
4009 // VORN : Vector Bitwise OR NOT
4010 def VORNd : N3VX<0, 0, 0b11, 0b0001, 0, 1, (outs DPR:$Vd),
4011 (ins DPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VBINiD,
4012 "vorn", "$Vd, $Vn, $Vm", "",
4013 [(set DPR:$Vd, (v2i32 (or DPR:$Vn,
4014 (vnotd DPR:$Vm))))]>;
4015 def VORNq : N3VX<0, 0, 0b11, 0b0001, 1, 1, (outs QPR:$Vd),
4016 (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINiQ,
4017 "vorn", "$Vd, $Vn, $Vm", "",
4018 [(set QPR:$Vd, (v4i32 (or QPR:$Vn,
4019 (vnotq QPR:$Vm))))]>;
4021 // VMVN : Vector Bitwise NOT (Immediate)
4023 let isReMaterializable = 1 in {
4025 def VMVNv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 1, 1, (outs DPR:$Vd),
4026 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
4027 "vmvn", "i16", "$Vd, $SIMM", "",
4028 [(set DPR:$Vd, (v4i16 (NEONvmvnImm timm:$SIMM)))]> {
4029 let Inst{9} = SIMM{9};
4032 def VMVNv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 1, 1, (outs QPR:$Vd),
4033 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
4034 "vmvn", "i16", "$Vd, $SIMM", "",
4035 [(set QPR:$Vd, (v8i16 (NEONvmvnImm timm:$SIMM)))]> {
4036 let Inst{9} = SIMM{9};
4039 def VMVNv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 1, 1, (outs DPR:$Vd),
4040 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
4041 "vmvn", "i32", "$Vd, $SIMM", "",
4042 [(set DPR:$Vd, (v2i32 (NEONvmvnImm timm:$SIMM)))]> {
4043 let Inst{11-8} = SIMM{11-8};
4046 def VMVNv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 1, 1, (outs QPR:$Vd),
4047 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
4048 "vmvn", "i32", "$Vd, $SIMM", "",
4049 [(set QPR:$Vd, (v4i32 (NEONvmvnImm timm:$SIMM)))]> {
4050 let Inst{11-8} = SIMM{11-8};
4054 // VMVN : Vector Bitwise NOT
4055 def VMVNd : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 0, 0,
4056 (outs DPR:$Vd), (ins DPR:$Vm), IIC_VSUBiD,
4057 "vmvn", "$Vd, $Vm", "",
4058 [(set DPR:$Vd, (v2i32 (vnotd DPR:$Vm)))]>;
4059 def VMVNq : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 1, 0,
4060 (outs QPR:$Vd), (ins QPR:$Vm), IIC_VSUBiD,
4061 "vmvn", "$Vd, $Vm", "",
4062 [(set QPR:$Vd, (v4i32 (vnotq QPR:$Vm)))]>;
4063 def : Pat<(v2i32 (vnotd DPR:$src)), (VMVNd DPR:$src)>;
4064 def : Pat<(v4i32 (vnotq QPR:$src)), (VMVNq QPR:$src)>;
4066 // VBSL : Vector Bitwise Select
4067 def VBSLd : N3VX<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$Vd),
4068 (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
4069 N3RegFrm, IIC_VCNTiD,
4070 "vbsl", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4072 (v2i32 (NEONvbsl DPR:$src1, DPR:$Vn, DPR:$Vm)))]>;
4074 def : Pat<(v2i32 (or (and DPR:$Vn, DPR:$Vd),
4075 (and DPR:$Vm, (vnotd DPR:$Vd)))),
4076 (VBSLd DPR:$Vd, DPR:$Vn, DPR:$Vm)>;
4078 def VBSLq : N3VX<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
4079 (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
4080 N3RegFrm, IIC_VCNTiQ,
4081 "vbsl", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4083 (v4i32 (NEONvbsl QPR:$src1, QPR:$Vn, QPR:$Vm)))]>;
4085 def : Pat<(v4i32 (or (and QPR:$Vn, QPR:$Vd),
4086 (and QPR:$Vm, (vnotq QPR:$Vd)))),
4087 (VBSLq QPR:$Vd, QPR:$Vn, QPR:$Vm)>;
4089 // VBIF : Vector Bitwise Insert if False
4090 // like VBSL but with: "vbif $dst, $src3, $src1", "$src2 = $dst",
4091 // FIXME: This instruction's encoding MAY NOT BE correct.
4092 def VBIFd : N3VX<1, 0, 0b11, 0b0001, 0, 1,
4093 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
4094 N3RegFrm, IIC_VBINiD,
4095 "vbif", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4097 def VBIFq : N3VX<1, 0, 0b11, 0b0001, 1, 1,
4098 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
4099 N3RegFrm, IIC_VBINiQ,
4100 "vbif", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4103 // VBIT : Vector Bitwise Insert if True
4104 // like VBSL but with: "vbit $dst, $src2, $src1", "$src3 = $dst",
4105 // FIXME: This instruction's encoding MAY NOT BE correct.
4106 def VBITd : N3VX<1, 0, 0b10, 0b0001, 0, 1,
4107 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
4108 N3RegFrm, IIC_VBINiD,
4109 "vbit", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4111 def VBITq : N3VX<1, 0, 0b10, 0b0001, 1, 1,
4112 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
4113 N3RegFrm, IIC_VBINiQ,
4114 "vbit", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4117 // VBIT/VBIF are not yet implemented. The TwoAddress pass will not go looking
4118 // for equivalent operations with different register constraints; it just
4121 // Vector Absolute Differences.
4123 // VABD : Vector Absolute Difference
4124 defm VABDs : N3VInt_QHS<0, 0, 0b0111, 0, N3RegFrm,
4125 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4126 "vabd", "s", int_arm_neon_vabds, 1>;
4127 defm VABDu : N3VInt_QHS<1, 0, 0b0111, 0, N3RegFrm,
4128 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4129 "vabd", "u", int_arm_neon_vabdu, 1>;
4130 def VABDfd : N3VDInt<1, 0, 0b10, 0b1101, 0, N3RegFrm, IIC_VBIND,
4131 "vabd", "f32", v2f32, v2f32, int_arm_neon_vabds, 1>;
4132 def VABDfq : N3VQInt<1, 0, 0b10, 0b1101, 0, N3RegFrm, IIC_VBINQ,
4133 "vabd", "f32", v4f32, v4f32, int_arm_neon_vabds, 1>;
4135 // VABDL : Vector Absolute Difference Long (Q = | D - D |)
4136 defm VABDLs : N3VLIntExt_QHS<0,1,0b0111,0, IIC_VSUBi4Q,
4137 "vabdl", "s", int_arm_neon_vabds, zext, 1>;
4138 defm VABDLu : N3VLIntExt_QHS<1,1,0b0111,0, IIC_VSUBi4Q,
4139 "vabdl", "u", int_arm_neon_vabdu, zext, 1>;
4141 // VABA : Vector Absolute Difference and Accumulate
4142 defm VABAs : N3VIntOp_QHS<0,0,0b0111,1, IIC_VABAD, IIC_VABAQ,
4143 "vaba", "s", int_arm_neon_vabds, add>;
4144 defm VABAu : N3VIntOp_QHS<1,0,0b0111,1, IIC_VABAD, IIC_VABAQ,
4145 "vaba", "u", int_arm_neon_vabdu, add>;
4147 // VABAL : Vector Absolute Difference and Accumulate Long (Q += | D - D |)
4148 defm VABALs : N3VLIntExtOp_QHS<0,1,0b0101,0, IIC_VABAD,
4149 "vabal", "s", int_arm_neon_vabds, zext, add>;
4150 defm VABALu : N3VLIntExtOp_QHS<1,1,0b0101,0, IIC_VABAD,
4151 "vabal", "u", int_arm_neon_vabdu, zext, add>;
4153 // Vector Maximum and Minimum.
4155 // VMAX : Vector Maximum
4156 defm VMAXs : N3VInt_QHS<0, 0, 0b0110, 0, N3RegFrm,
4157 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4158 "vmax", "s", int_arm_neon_vmaxs, 1>;
4159 defm VMAXu : N3VInt_QHS<1, 0, 0b0110, 0, N3RegFrm,
4160 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4161 "vmax", "u", int_arm_neon_vmaxu, 1>;
4162 def VMAXfd : N3VDInt<0, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VBIND,
4164 v2f32, v2f32, int_arm_neon_vmaxs, 1>;
4165 def VMAXfq : N3VQInt<0, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VBINQ,
4167 v4f32, v4f32, int_arm_neon_vmaxs, 1>;
4169 // VMIN : Vector Minimum
4170 defm VMINs : N3VInt_QHS<0, 0, 0b0110, 1, N3RegFrm,
4171 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4172 "vmin", "s", int_arm_neon_vmins, 1>;
4173 defm VMINu : N3VInt_QHS<1, 0, 0b0110, 1, N3RegFrm,
4174 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4175 "vmin", "u", int_arm_neon_vminu, 1>;
4176 def VMINfd : N3VDInt<0, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VBIND,
4178 v2f32, v2f32, int_arm_neon_vmins, 1>;
4179 def VMINfq : N3VQInt<0, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VBINQ,
4181 v4f32, v4f32, int_arm_neon_vmins, 1>;
4183 // Vector Pairwise Operations.
4185 // VPADD : Vector Pairwise Add
4186 def VPADDi8 : N3VDInt<0, 0, 0b00, 0b1011, 1, N3RegFrm, IIC_VSHLiD,
4188 v8i8, v8i8, int_arm_neon_vpadd, 0>;
4189 def VPADDi16 : N3VDInt<0, 0, 0b01, 0b1011, 1, N3RegFrm, IIC_VSHLiD,
4191 v4i16, v4i16, int_arm_neon_vpadd, 0>;
4192 def VPADDi32 : N3VDInt<0, 0, 0b10, 0b1011, 1, N3RegFrm, IIC_VSHLiD,
4194 v2i32, v2i32, int_arm_neon_vpadd, 0>;
4195 def VPADDf : N3VDInt<1, 0, 0b00, 0b1101, 0, N3RegFrm,
4196 IIC_VPBIND, "vpadd", "f32",
4197 v2f32, v2f32, int_arm_neon_vpadd, 0>;
4199 // VPADDL : Vector Pairwise Add Long
4200 defm VPADDLs : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00100, 0, "vpaddl", "s",
4201 int_arm_neon_vpaddls>;
4202 defm VPADDLu : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00101, 0, "vpaddl", "u",
4203 int_arm_neon_vpaddlu>;
4205 // VPADAL : Vector Pairwise Add and Accumulate Long
4206 defm VPADALs : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b01100, 0, "vpadal", "s",
4207 int_arm_neon_vpadals>;
4208 defm VPADALu : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b01101, 0, "vpadal", "u",
4209 int_arm_neon_vpadalu>;
4211 // VPMAX : Vector Pairwise Maximum
4212 def VPMAXs8 : N3VDInt<0, 0, 0b00, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4213 "s8", v8i8, v8i8, int_arm_neon_vpmaxs, 0>;
4214 def VPMAXs16 : N3VDInt<0, 0, 0b01, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4215 "s16", v4i16, v4i16, int_arm_neon_vpmaxs, 0>;
4216 def VPMAXs32 : N3VDInt<0, 0, 0b10, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4217 "s32", v2i32, v2i32, int_arm_neon_vpmaxs, 0>;
4218 def VPMAXu8 : N3VDInt<1, 0, 0b00, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4219 "u8", v8i8, v8i8, int_arm_neon_vpmaxu, 0>;
4220 def VPMAXu16 : N3VDInt<1, 0, 0b01, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4221 "u16", v4i16, v4i16, int_arm_neon_vpmaxu, 0>;
4222 def VPMAXu32 : N3VDInt<1, 0, 0b10, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4223 "u32", v2i32, v2i32, int_arm_neon_vpmaxu, 0>;
4224 def VPMAXf : N3VDInt<1, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VPBIND, "vpmax",
4225 "f32", v2f32, v2f32, int_arm_neon_vpmaxs, 0>;
4227 // VPMIN : Vector Pairwise Minimum
4228 def VPMINs8 : N3VDInt<0, 0, 0b00, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4229 "s8", v8i8, v8i8, int_arm_neon_vpmins, 0>;
4230 def VPMINs16 : N3VDInt<0, 0, 0b01, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4231 "s16", v4i16, v4i16, int_arm_neon_vpmins, 0>;
4232 def VPMINs32 : N3VDInt<0, 0, 0b10, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4233 "s32", v2i32, v2i32, int_arm_neon_vpmins, 0>;
4234 def VPMINu8 : N3VDInt<1, 0, 0b00, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4235 "u8", v8i8, v8i8, int_arm_neon_vpminu, 0>;
4236 def VPMINu16 : N3VDInt<1, 0, 0b01, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4237 "u16", v4i16, v4i16, int_arm_neon_vpminu, 0>;
4238 def VPMINu32 : N3VDInt<1, 0, 0b10, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4239 "u32", v2i32, v2i32, int_arm_neon_vpminu, 0>;
4240 def VPMINf : N3VDInt<1, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VPBIND, "vpmin",
4241 "f32", v2f32, v2f32, int_arm_neon_vpmins, 0>;
4243 // Vector Reciprocal and Reciprocal Square Root Estimate and Step.
4245 // VRECPE : Vector Reciprocal Estimate
4246 def VRECPEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
4247 IIC_VUNAD, "vrecpe", "u32",
4248 v2i32, v2i32, int_arm_neon_vrecpe>;
4249 def VRECPEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
4250 IIC_VUNAQ, "vrecpe", "u32",
4251 v4i32, v4i32, int_arm_neon_vrecpe>;
4252 def VRECPEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0,
4253 IIC_VUNAD, "vrecpe", "f32",
4254 v2f32, v2f32, int_arm_neon_vrecpe>;
4255 def VRECPEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0,
4256 IIC_VUNAQ, "vrecpe", "f32",
4257 v4f32, v4f32, int_arm_neon_vrecpe>;
4259 // VRECPS : Vector Reciprocal Step
4260 def VRECPSfd : N3VDInt<0, 0, 0b00, 0b1111, 1, N3RegFrm,
4261 IIC_VRECSD, "vrecps", "f32",
4262 v2f32, v2f32, int_arm_neon_vrecps, 1>;
4263 def VRECPSfq : N3VQInt<0, 0, 0b00, 0b1111, 1, N3RegFrm,
4264 IIC_VRECSQ, "vrecps", "f32",
4265 v4f32, v4f32, int_arm_neon_vrecps, 1>;
4267 // VRSQRTE : Vector Reciprocal Square Root Estimate
4268 def VRSQRTEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0,
4269 IIC_VUNAD, "vrsqrte", "u32",
4270 v2i32, v2i32, int_arm_neon_vrsqrte>;
4271 def VRSQRTEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0,
4272 IIC_VUNAQ, "vrsqrte", "u32",
4273 v4i32, v4i32, int_arm_neon_vrsqrte>;
4274 def VRSQRTEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0,
4275 IIC_VUNAD, "vrsqrte", "f32",
4276 v2f32, v2f32, int_arm_neon_vrsqrte>;
4277 def VRSQRTEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0,
4278 IIC_VUNAQ, "vrsqrte", "f32",
4279 v4f32, v4f32, int_arm_neon_vrsqrte>;
4281 // VRSQRTS : Vector Reciprocal Square Root Step
4282 def VRSQRTSfd : N3VDInt<0, 0, 0b10, 0b1111, 1, N3RegFrm,
4283 IIC_VRECSD, "vrsqrts", "f32",
4284 v2f32, v2f32, int_arm_neon_vrsqrts, 1>;
4285 def VRSQRTSfq : N3VQInt<0, 0, 0b10, 0b1111, 1, N3RegFrm,
4286 IIC_VRECSQ, "vrsqrts", "f32",
4287 v4f32, v4f32, int_arm_neon_vrsqrts, 1>;
4291 // VSHL : Vector Shift
4292 defm VSHLs : N3VInt_QHSDSh<0, 0, 0b0100, 0, N3RegVShFrm,
4293 IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ, IIC_VSHLiQ,
4294 "vshl", "s", int_arm_neon_vshifts>;
4295 defm VSHLu : N3VInt_QHSDSh<1, 0, 0b0100, 0, N3RegVShFrm,
4296 IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ, IIC_VSHLiQ,
4297 "vshl", "u", int_arm_neon_vshiftu>;
4299 // VSHL : Vector Shift Left (Immediate)
4300 defm VSHLi : N2VShL_QHSD<0, 1, 0b0101, 1, IIC_VSHLiD, "vshl", "i", NEONvshl>;
4302 // VSHR : Vector Shift Right (Immediate)
4303 defm VSHRs : N2VShR_QHSD<0, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "s",NEONvshrs>;
4304 defm VSHRu : N2VShR_QHSD<1, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "u",NEONvshru>;
4306 // VSHLL : Vector Shift Left Long
4307 defm VSHLLs : N2VLSh_QHS<0, 1, 0b1010, 0, 0, 1, "vshll", "s", NEONvshlls>;
4308 defm VSHLLu : N2VLSh_QHS<1, 1, 0b1010, 0, 0, 1, "vshll", "u", NEONvshllu>;
4310 // VSHLL : Vector Shift Left Long (with maximum shift count)
4311 class N2VLShMax<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
4312 bit op6, bit op4, string OpcodeStr, string Dt, ValueType ResTy,
4313 ValueType OpTy, SDNode OpNode>
4314 : N2VLSh<op24, op23, op11_8, op7, op6, op4, OpcodeStr, Dt,
4315 ResTy, OpTy, OpNode> {
4316 let Inst{21-16} = op21_16;
4317 let DecoderMethod = "DecodeVSHLMaxInstruction";
4319 def VSHLLi8 : N2VLShMax<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll", "i8",
4320 v8i16, v8i8, NEONvshlli>;
4321 def VSHLLi16 : N2VLShMax<1, 1, 0b110110, 0b0011, 0, 0, 0, "vshll", "i16",
4322 v4i32, v4i16, NEONvshlli>;
4323 def VSHLLi32 : N2VLShMax<1, 1, 0b111010, 0b0011, 0, 0, 0, "vshll", "i32",
4324 v2i64, v2i32, NEONvshlli>;
4326 // VSHRN : Vector Shift Right and Narrow
4327 defm VSHRN : N2VNSh_HSD<0,1,0b1000,0,0,1, IIC_VSHLiD, "vshrn", "i",
4330 // VRSHL : Vector Rounding Shift
4331 defm VRSHLs : N3VInt_QHSDSh<0, 0, 0b0101, 0, N3RegVShFrm,
4332 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4333 "vrshl", "s", int_arm_neon_vrshifts>;
4334 defm VRSHLu : N3VInt_QHSDSh<1, 0, 0b0101, 0, N3RegVShFrm,
4335 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4336 "vrshl", "u", int_arm_neon_vrshiftu>;
4337 // VRSHR : Vector Rounding Shift Right
4338 defm VRSHRs : N2VShR_QHSD<0,1,0b0010,1, IIC_VSHLi4D, "vrshr", "s",NEONvrshrs>;
4339 defm VRSHRu : N2VShR_QHSD<1,1,0b0010,1, IIC_VSHLi4D, "vrshr", "u",NEONvrshru>;
4341 // VRSHRN : Vector Rounding Shift Right and Narrow
4342 defm VRSHRN : N2VNSh_HSD<0, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vrshrn", "i",
4345 // VQSHL : Vector Saturating Shift
4346 defm VQSHLs : N3VInt_QHSDSh<0, 0, 0b0100, 1, N3RegVShFrm,
4347 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4348 "vqshl", "s", int_arm_neon_vqshifts>;
4349 defm VQSHLu : N3VInt_QHSDSh<1, 0, 0b0100, 1, N3RegVShFrm,
4350 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4351 "vqshl", "u", int_arm_neon_vqshiftu>;
4352 // VQSHL : Vector Saturating Shift Left (Immediate)
4353 defm VQSHLsi : N2VShL_QHSD<0,1,0b0111,1, IIC_VSHLi4D, "vqshl", "s",NEONvqshls>;
4354 defm VQSHLui : N2VShL_QHSD<1,1,0b0111,1, IIC_VSHLi4D, "vqshl", "u",NEONvqshlu>;
4356 // VQSHLU : Vector Saturating Shift Left (Immediate, Unsigned)
4357 defm VQSHLsu : N2VShL_QHSD<1,1,0b0110,1, IIC_VSHLi4D,"vqshlu","s",NEONvqshlsu>;
4359 // VQSHRN : Vector Saturating Shift Right and Narrow
4360 defm VQSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 0, 1, IIC_VSHLi4D, "vqshrn", "s",
4362 defm VQSHRNu : N2VNSh_HSD<1, 1, 0b1001, 0, 0, 1, IIC_VSHLi4D, "vqshrn", "u",
4365 // VQSHRUN : Vector Saturating Shift Right and Narrow (Unsigned)
4366 defm VQSHRUN : N2VNSh_HSD<1, 1, 0b1000, 0, 0, 1, IIC_VSHLi4D, "vqshrun", "s",
4369 // VQRSHL : Vector Saturating Rounding Shift
4370 defm VQRSHLs : N3VInt_QHSDSh<0, 0, 0b0101, 1, N3RegVShFrm,
4371 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4372 "vqrshl", "s", int_arm_neon_vqrshifts>;
4373 defm VQRSHLu : N3VInt_QHSDSh<1, 0, 0b0101, 1, N3RegVShFrm,
4374 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4375 "vqrshl", "u", int_arm_neon_vqrshiftu>;
4377 // VQRSHRN : Vector Saturating Rounding Shift Right and Narrow
4378 defm VQRSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 1, 1, IIC_VSHLi4D, "vqrshrn", "s",
4380 defm VQRSHRNu : N2VNSh_HSD<1, 1, 0b1001, 0, 1, 1, IIC_VSHLi4D, "vqrshrn", "u",
4383 // VQRSHRUN : Vector Saturating Rounding Shift Right and Narrow (Unsigned)
4384 defm VQRSHRUN : N2VNSh_HSD<1, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vqrshrun", "s",
4387 // VSRA : Vector Shift Right and Accumulate
4388 defm VSRAs : N2VShAdd_QHSD<0, 1, 0b0001, 1, "vsra", "s", NEONvshrs>;
4389 defm VSRAu : N2VShAdd_QHSD<1, 1, 0b0001, 1, "vsra", "u", NEONvshru>;
4390 // VRSRA : Vector Rounding Shift Right and Accumulate
4391 defm VRSRAs : N2VShAdd_QHSD<0, 1, 0b0011, 1, "vrsra", "s", NEONvrshrs>;
4392 defm VRSRAu : N2VShAdd_QHSD<1, 1, 0b0011, 1, "vrsra", "u", NEONvrshru>;
4394 // VSLI : Vector Shift Left and Insert
4395 defm VSLI : N2VShInsL_QHSD<1, 1, 0b0101, 1, "vsli">;
4397 // VSRI : Vector Shift Right and Insert
4398 defm VSRI : N2VShInsR_QHSD<1, 1, 0b0100, 1, "vsri">;
4400 // Vector Absolute and Saturating Absolute.
4402 // VABS : Vector Absolute Value
4403 defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0,
4404 IIC_VUNAiD, IIC_VUNAiQ, "vabs", "s",
4406 def VABSfd : N2VDInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
4407 IIC_VUNAD, "vabs", "f32",
4408 v2f32, v2f32, int_arm_neon_vabs>;
4409 def VABSfq : N2VQInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
4410 IIC_VUNAQ, "vabs", "f32",
4411 v4f32, v4f32, int_arm_neon_vabs>;
4413 // VQABS : Vector Saturating Absolute Value
4414 defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0,
4415 IIC_VQUNAiD, IIC_VQUNAiQ, "vqabs", "s",
4416 int_arm_neon_vqabs>;
4420 def vnegd : PatFrag<(ops node:$in),
4421 (sub (bitconvert (v2i32 NEONimmAllZerosV)), node:$in)>;
4422 def vnegq : PatFrag<(ops node:$in),
4423 (sub (bitconvert (v4i32 NEONimmAllZerosV)), node:$in)>;
4425 class VNEGD<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
4426 : N2V<0b11, 0b11, size, 0b01, 0b00111, 0, 0, (outs DPR:$Vd), (ins DPR:$Vm),
4427 IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
4428 [(set DPR:$Vd, (Ty (vnegd DPR:$Vm)))]>;
4429 class VNEGQ<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
4430 : N2V<0b11, 0b11, size, 0b01, 0b00111, 1, 0, (outs QPR:$Vd), (ins QPR:$Vm),
4431 IIC_VSHLiQ, OpcodeStr, Dt, "$Vd, $Vm", "",
4432 [(set QPR:$Vd, (Ty (vnegq QPR:$Vm)))]>;
4434 // VNEG : Vector Negate (integer)
4435 def VNEGs8d : VNEGD<0b00, "vneg", "s8", v8i8>;
4436 def VNEGs16d : VNEGD<0b01, "vneg", "s16", v4i16>;
4437 def VNEGs32d : VNEGD<0b10, "vneg", "s32", v2i32>;
4438 def VNEGs8q : VNEGQ<0b00, "vneg", "s8", v16i8>;
4439 def VNEGs16q : VNEGQ<0b01, "vneg", "s16", v8i16>;
4440 def VNEGs32q : VNEGQ<0b10, "vneg", "s32", v4i32>;
4442 // VNEG : Vector Negate (floating-point)
4443 def VNEGfd : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 0, 0,
4444 (outs DPR:$Vd), (ins DPR:$Vm), IIC_VUNAD,
4445 "vneg", "f32", "$Vd, $Vm", "",
4446 [(set DPR:$Vd, (v2f32 (fneg DPR:$Vm)))]>;
4447 def VNEGf32q : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 1, 0,
4448 (outs QPR:$Vd), (ins QPR:$Vm), IIC_VUNAQ,
4449 "vneg", "f32", "$Vd, $Vm", "",
4450 [(set QPR:$Vd, (v4f32 (fneg QPR:$Vm)))]>;
4452 def : Pat<(v8i8 (vnegd DPR:$src)), (VNEGs8d DPR:$src)>;
4453 def : Pat<(v4i16 (vnegd DPR:$src)), (VNEGs16d DPR:$src)>;
4454 def : Pat<(v2i32 (vnegd DPR:$src)), (VNEGs32d DPR:$src)>;
4455 def : Pat<(v16i8 (vnegq QPR:$src)), (VNEGs8q QPR:$src)>;
4456 def : Pat<(v8i16 (vnegq QPR:$src)), (VNEGs16q QPR:$src)>;
4457 def : Pat<(v4i32 (vnegq QPR:$src)), (VNEGs32q QPR:$src)>;
4459 // VQNEG : Vector Saturating Negate
4460 defm VQNEG : N2VInt_QHS<0b11, 0b11, 0b00, 0b01111, 0,
4461 IIC_VQUNAiD, IIC_VQUNAiQ, "vqneg", "s",
4462 int_arm_neon_vqneg>;
4464 // Vector Bit Counting Operations.
4466 // VCLS : Vector Count Leading Sign Bits
4467 defm VCLS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01000, 0,
4468 IIC_VCNTiD, IIC_VCNTiQ, "vcls", "s",
4470 // VCLZ : Vector Count Leading Zeros
4471 defm VCLZ : N2VInt_QHS<0b11, 0b11, 0b00, 0b01001, 0,
4472 IIC_VCNTiD, IIC_VCNTiQ, "vclz", "i",
4474 // VCNT : Vector Count One Bits
4475 def VCNTd : N2VDInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
4476 IIC_VCNTiD, "vcnt", "8",
4477 v8i8, v8i8, int_arm_neon_vcnt>;
4478 def VCNTq : N2VQInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
4479 IIC_VCNTiQ, "vcnt", "8",
4480 v16i8, v16i8, int_arm_neon_vcnt>;
4483 def VSWPd : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 0, 0,
4484 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
4485 "vswp", "$Vd, $Vm", "", []>;
4486 def VSWPq : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 1, 0,
4487 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
4488 "vswp", "$Vd, $Vm", "", []>;
4490 // Vector Move Operations.
4492 // VMOV : Vector Move (Register)
4493 def : InstAlias<"vmov${p} $Vd, $Vm",
4494 (VORRd DPR:$Vd, DPR:$Vm, DPR:$Vm, pred:$p)>;
4495 def : InstAlias<"vmov${p} $Vd, $Vm",
4496 (VORRq QPR:$Vd, QPR:$Vm, QPR:$Vm, pred:$p)>;
4497 defm : VFPDTAnyNoF64InstAlias<"vmov${p}", "$Vd, $Vm",
4498 (VORRd DPR:$Vd, DPR:$Vm, DPR:$Vm, pred:$p)>;
4499 defm : VFPDTAnyNoF64InstAlias<"vmov${p}", "$Vd, $Vm",
4500 (VORRq QPR:$Vd, QPR:$Vm, QPR:$Vm, pred:$p)>;
4502 // VMOV : Vector Move (Immediate)
4504 let isReMaterializable = 1 in {
4505 def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$Vd),
4506 (ins nImmSplatI8:$SIMM), IIC_VMOVImm,
4507 "vmov", "i8", "$Vd, $SIMM", "",
4508 [(set DPR:$Vd, (v8i8 (NEONvmovImm timm:$SIMM)))]>;
4509 def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$Vd),
4510 (ins nImmSplatI8:$SIMM), IIC_VMOVImm,
4511 "vmov", "i8", "$Vd, $SIMM", "",
4512 [(set QPR:$Vd, (v16i8 (NEONvmovImm timm:$SIMM)))]>;
4514 def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 0, 1, (outs DPR:$Vd),
4515 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
4516 "vmov", "i16", "$Vd, $SIMM", "",
4517 [(set DPR:$Vd, (v4i16 (NEONvmovImm timm:$SIMM)))]> {
4518 let Inst{9} = SIMM{9};
4521 def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 0, 1, (outs QPR:$Vd),
4522 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
4523 "vmov", "i16", "$Vd, $SIMM", "",
4524 [(set QPR:$Vd, (v8i16 (NEONvmovImm timm:$SIMM)))]> {
4525 let Inst{9} = SIMM{9};
4528 def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 0, 1, (outs DPR:$Vd),
4529 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
4530 "vmov", "i32", "$Vd, $SIMM", "",
4531 [(set DPR:$Vd, (v2i32 (NEONvmovImm timm:$SIMM)))]> {
4532 let Inst{11-8} = SIMM{11-8};
4535 def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 0, 1, (outs QPR:$Vd),
4536 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
4537 "vmov", "i32", "$Vd, $SIMM", "",
4538 [(set QPR:$Vd, (v4i32 (NEONvmovImm timm:$SIMM)))]> {
4539 let Inst{11-8} = SIMM{11-8};
4542 def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$Vd),
4543 (ins nImmSplatI64:$SIMM), IIC_VMOVImm,
4544 "vmov", "i64", "$Vd, $SIMM", "",
4545 [(set DPR:$Vd, (v1i64 (NEONvmovImm timm:$SIMM)))]>;
4546 def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$Vd),
4547 (ins nImmSplatI64:$SIMM), IIC_VMOVImm,
4548 "vmov", "i64", "$Vd, $SIMM", "",
4549 [(set QPR:$Vd, (v2i64 (NEONvmovImm timm:$SIMM)))]>;
4551 def VMOVv2f32 : N1ModImm<1, 0b000, 0b1111, 0, 0, 0, 1, (outs DPR:$Vd),
4552 (ins nImmVMOVF32:$SIMM), IIC_VMOVImm,
4553 "vmov", "f32", "$Vd, $SIMM", "",
4554 [(set DPR:$Vd, (v2f32 (NEONvmovFPImm timm:$SIMM)))]>;
4555 def VMOVv4f32 : N1ModImm<1, 0b000, 0b1111, 0, 1, 0, 1, (outs QPR:$Vd),
4556 (ins nImmVMOVF32:$SIMM), IIC_VMOVImm,
4557 "vmov", "f32", "$Vd, $SIMM", "",
4558 [(set QPR:$Vd, (v4f32 (NEONvmovFPImm timm:$SIMM)))]>;
4559 } // isReMaterializable
4561 // VMOV : Vector Get Lane (move scalar to ARM core register)
4563 def VGETLNs8 : NVGetLane<{1,1,1,0,0,1,?,1}, 0b1011, {?,?},
4564 (outs GPR:$R), (ins DPR:$V, VectorIndex8:$lane),
4565 IIC_VMOVSI, "vmov", "s8", "$R, $V$lane",
4566 [(set GPR:$R, (NEONvgetlanes (v8i8 DPR:$V),
4568 let Inst{21} = lane{2};
4569 let Inst{6-5} = lane{1-0};
4571 def VGETLNs16 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, {?,1},
4572 (outs GPR:$R), (ins DPR:$V, VectorIndex16:$lane),
4573 IIC_VMOVSI, "vmov", "s16", "$R, $V$lane",
4574 [(set GPR:$R, (NEONvgetlanes (v4i16 DPR:$V),
4576 let Inst{21} = lane{1};
4577 let Inst{6} = lane{0};
4579 def VGETLNu8 : NVGetLane<{1,1,1,0,1,1,?,1}, 0b1011, {?,?},
4580 (outs GPR:$R), (ins DPR:$V, VectorIndex8:$lane),
4581 IIC_VMOVSI, "vmov", "u8", "$R, $V$lane",
4582 [(set GPR:$R, (NEONvgetlaneu (v8i8 DPR:$V),
4584 let Inst{21} = lane{2};
4585 let Inst{6-5} = lane{1-0};
4587 def VGETLNu16 : NVGetLane<{1,1,1,0,1,0,?,1}, 0b1011, {?,1},
4588 (outs GPR:$R), (ins DPR:$V, VectorIndex16:$lane),
4589 IIC_VMOVSI, "vmov", "u16", "$R, $V$lane",
4590 [(set GPR:$R, (NEONvgetlaneu (v4i16 DPR:$V),
4592 let Inst{21} = lane{1};
4593 let Inst{6} = lane{0};
4595 def VGETLNi32 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, 0b00,
4596 (outs GPR:$R), (ins DPR:$V, VectorIndex32:$lane),
4597 IIC_VMOVSI, "vmov", "32", "$R, $V$lane",
4598 [(set GPR:$R, (extractelt (v2i32 DPR:$V),
4600 let Inst{21} = lane{0};
4602 // def VGETLNf32: see FMRDH and FMRDL in ARMInstrVFP.td
4603 def : Pat<(NEONvgetlanes (v16i8 QPR:$src), imm:$lane),
4604 (VGETLNs8 (v8i8 (EXTRACT_SUBREG QPR:$src,
4605 (DSubReg_i8_reg imm:$lane))),
4606 (SubReg_i8_lane imm:$lane))>;
4607 def : Pat<(NEONvgetlanes (v8i16 QPR:$src), imm:$lane),
4608 (VGETLNs16 (v4i16 (EXTRACT_SUBREG QPR:$src,
4609 (DSubReg_i16_reg imm:$lane))),
4610 (SubReg_i16_lane imm:$lane))>;
4611 def : Pat<(NEONvgetlaneu (v16i8 QPR:$src), imm:$lane),
4612 (VGETLNu8 (v8i8 (EXTRACT_SUBREG QPR:$src,
4613 (DSubReg_i8_reg imm:$lane))),
4614 (SubReg_i8_lane imm:$lane))>;
4615 def : Pat<(NEONvgetlaneu (v8i16 QPR:$src), imm:$lane),
4616 (VGETLNu16 (v4i16 (EXTRACT_SUBREG QPR:$src,
4617 (DSubReg_i16_reg imm:$lane))),
4618 (SubReg_i16_lane imm:$lane))>;
4619 def : Pat<(extractelt (v4i32 QPR:$src), imm:$lane),
4620 (VGETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src,
4621 (DSubReg_i32_reg imm:$lane))),
4622 (SubReg_i32_lane imm:$lane))>;
4623 def : Pat<(extractelt (v2f32 DPR:$src1), imm:$src2),
4624 (EXTRACT_SUBREG (v2f32 (COPY_TO_REGCLASS (v2f32 DPR:$src1),DPR_VFP2)),
4625 (SSubReg_f32_reg imm:$src2))>;
4626 def : Pat<(extractelt (v4f32 QPR:$src1), imm:$src2),
4627 (EXTRACT_SUBREG (v4f32 (COPY_TO_REGCLASS (v4f32 QPR:$src1),QPR_VFP2)),
4628 (SSubReg_f32_reg imm:$src2))>;
4629 //def : Pat<(extractelt (v2i64 QPR:$src1), imm:$src2),
4630 // (EXTRACT_SUBREG QPR:$src1, (DSubReg_f64_reg imm:$src2))>;
4631 def : Pat<(extractelt (v2f64 QPR:$src1), imm:$src2),
4632 (EXTRACT_SUBREG QPR:$src1, (DSubReg_f64_reg imm:$src2))>;
4635 // VMOV : Vector Set Lane (move ARM core register to scalar)
4637 let Constraints = "$src1 = $V" in {
4638 def VSETLNi8 : NVSetLane<{1,1,1,0,0,1,?,0}, 0b1011, {?,?}, (outs DPR:$V),
4639 (ins DPR:$src1, GPR:$R, VectorIndex8:$lane),
4640 IIC_VMOVISL, "vmov", "8", "$V$lane, $R",
4641 [(set DPR:$V, (vector_insert (v8i8 DPR:$src1),
4642 GPR:$R, imm:$lane))]> {
4643 let Inst{21} = lane{2};
4644 let Inst{6-5} = lane{1-0};
4646 def VSETLNi16 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, {?,1}, (outs DPR:$V),
4647 (ins DPR:$src1, GPR:$R, VectorIndex16:$lane),
4648 IIC_VMOVISL, "vmov", "16", "$V$lane, $R",
4649 [(set DPR:$V, (vector_insert (v4i16 DPR:$src1),
4650 GPR:$R, imm:$lane))]> {
4651 let Inst{21} = lane{1};
4652 let Inst{6} = lane{0};
4654 def VSETLNi32 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, 0b00, (outs DPR:$V),
4655 (ins DPR:$src1, GPR:$R, VectorIndex32:$lane),
4656 IIC_VMOVISL, "vmov", "32", "$V$lane, $R",
4657 [(set DPR:$V, (insertelt (v2i32 DPR:$src1),
4658 GPR:$R, imm:$lane))]> {
4659 let Inst{21} = lane{0};
4662 def : Pat<(vector_insert (v16i8 QPR:$src1), GPR:$src2, imm:$lane),
4663 (v16i8 (INSERT_SUBREG QPR:$src1,
4664 (v8i8 (VSETLNi8 (v8i8 (EXTRACT_SUBREG QPR:$src1,
4665 (DSubReg_i8_reg imm:$lane))),
4666 GPR:$src2, (SubReg_i8_lane imm:$lane))),
4667 (DSubReg_i8_reg imm:$lane)))>;
4668 def : Pat<(vector_insert (v8i16 QPR:$src1), GPR:$src2, imm:$lane),
4669 (v8i16 (INSERT_SUBREG QPR:$src1,
4670 (v4i16 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1,
4671 (DSubReg_i16_reg imm:$lane))),
4672 GPR:$src2, (SubReg_i16_lane imm:$lane))),
4673 (DSubReg_i16_reg imm:$lane)))>;
4674 def : Pat<(insertelt (v4i32 QPR:$src1), GPR:$src2, imm:$lane),
4675 (v4i32 (INSERT_SUBREG QPR:$src1,
4676 (v2i32 (VSETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src1,
4677 (DSubReg_i32_reg imm:$lane))),
4678 GPR:$src2, (SubReg_i32_lane imm:$lane))),
4679 (DSubReg_i32_reg imm:$lane)))>;
4681 def : Pat<(v2f32 (insertelt DPR:$src1, SPR:$src2, imm:$src3)),
4682 (INSERT_SUBREG (v2f32 (COPY_TO_REGCLASS DPR:$src1, DPR_VFP2)),
4683 SPR:$src2, (SSubReg_f32_reg imm:$src3))>;
4684 def : Pat<(v4f32 (insertelt QPR:$src1, SPR:$src2, imm:$src3)),
4685 (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS QPR:$src1, QPR_VFP2)),
4686 SPR:$src2, (SSubReg_f32_reg imm:$src3))>;
4688 //def : Pat<(v2i64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
4689 // (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>;
4690 def : Pat<(v2f64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
4691 (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>;
4693 def : Pat<(v2f32 (scalar_to_vector SPR:$src)),
4694 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$src, ssub_0)>;
4695 def : Pat<(v2f64 (scalar_to_vector (f64 DPR:$src))),
4696 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), DPR:$src, dsub_0)>;
4697 def : Pat<(v4f32 (scalar_to_vector SPR:$src)),
4698 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), SPR:$src, ssub_0)>;
4700 def : Pat<(v8i8 (scalar_to_vector GPR:$src)),
4701 (VSETLNi8 (v8i8 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
4702 def : Pat<(v4i16 (scalar_to_vector GPR:$src)),
4703 (VSETLNi16 (v4i16 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
4704 def : Pat<(v2i32 (scalar_to_vector GPR:$src)),
4705 (VSETLNi32 (v2i32 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
4707 def : Pat<(v16i8 (scalar_to_vector GPR:$src)),
4708 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4709 (VSETLNi8 (v8i8 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
4711 def : Pat<(v8i16 (scalar_to_vector GPR:$src)),
4712 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
4713 (VSETLNi16 (v4i16 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
4715 def : Pat<(v4i32 (scalar_to_vector GPR:$src)),
4716 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4717 (VSETLNi32 (v2i32 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
4720 // VDUP : Vector Duplicate (from ARM core register to all elements)
4722 class VDUPD<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
4723 : NVDup<opcod1, 0b1011, opcod3, (outs DPR:$V), (ins GPR:$R),
4724 IIC_VMOVIS, "vdup", Dt, "$V, $R",
4725 [(set DPR:$V, (Ty (NEONvdup (i32 GPR:$R))))]>;
4726 class VDUPQ<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
4727 : NVDup<opcod1, 0b1011, opcod3, (outs QPR:$V), (ins GPR:$R),
4728 IIC_VMOVIS, "vdup", Dt, "$V, $R",
4729 [(set QPR:$V, (Ty (NEONvdup (i32 GPR:$R))))]>;
4731 def VDUP8d : VDUPD<0b11101100, 0b00, "8", v8i8>;
4732 def VDUP16d : VDUPD<0b11101000, 0b01, "16", v4i16>;
4733 def VDUP32d : VDUPD<0b11101000, 0b00, "32", v2i32>;
4734 def VDUP8q : VDUPQ<0b11101110, 0b00, "8", v16i8>;
4735 def VDUP16q : VDUPQ<0b11101010, 0b01, "16", v8i16>;
4736 def VDUP32q : VDUPQ<0b11101010, 0b00, "32", v4i32>;
4738 def : Pat<(v2f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32d GPR:$R)>;
4739 def : Pat<(v4f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32q GPR:$R)>;
4741 // VDUP : Vector Duplicate Lane (from scalar to all elements)
4743 class VDUPLND<bits<4> op19_16, string OpcodeStr, string Dt,
4744 ValueType Ty, Operand IdxTy>
4745 : NVDupLane<op19_16, 0, (outs DPR:$Vd), (ins DPR:$Vm, IdxTy:$lane),
4746 IIC_VMOVD, OpcodeStr, Dt, "$Vd, $Vm$lane",
4747 [(set DPR:$Vd, (Ty (NEONvduplane (Ty DPR:$Vm), imm:$lane)))]>;
4749 class VDUPLNQ<bits<4> op19_16, string OpcodeStr, string Dt,
4750 ValueType ResTy, ValueType OpTy, Operand IdxTy>
4751 : NVDupLane<op19_16, 1, (outs QPR:$Vd), (ins DPR:$Vm, IdxTy:$lane),
4752 IIC_VMOVQ, OpcodeStr, Dt, "$Vd, $Vm$lane",
4753 [(set QPR:$Vd, (ResTy (NEONvduplane (OpTy DPR:$Vm),
4754 VectorIndex32:$lane)))]>;
4756 // Inst{19-16} is partially specified depending on the element size.
4758 def VDUPLN8d : VDUPLND<{?,?,?,1}, "vdup", "8", v8i8, VectorIndex8> {
4760 let Inst{19-17} = lane{2-0};
4762 def VDUPLN16d : VDUPLND<{?,?,1,0}, "vdup", "16", v4i16, VectorIndex16> {
4764 let Inst{19-18} = lane{1-0};
4766 def VDUPLN32d : VDUPLND<{?,1,0,0}, "vdup", "32", v2i32, VectorIndex32> {
4768 let Inst{19} = lane{0};
4770 def VDUPLN8q : VDUPLNQ<{?,?,?,1}, "vdup", "8", v16i8, v8i8, VectorIndex8> {
4772 let Inst{19-17} = lane{2-0};
4774 def VDUPLN16q : VDUPLNQ<{?,?,1,0}, "vdup", "16", v8i16, v4i16, VectorIndex16> {
4776 let Inst{19-18} = lane{1-0};
4778 def VDUPLN32q : VDUPLNQ<{?,1,0,0}, "vdup", "32", v4i32, v2i32, VectorIndex32> {
4780 let Inst{19} = lane{0};
4783 def : Pat<(v2f32 (NEONvduplane (v2f32 DPR:$Vm), imm:$lane)),
4784 (VDUPLN32d DPR:$Vm, imm:$lane)>;
4786 def : Pat<(v4f32 (NEONvduplane (v2f32 DPR:$Vm), imm:$lane)),
4787 (VDUPLN32q DPR:$Vm, imm:$lane)>;
4789 def : Pat<(v16i8 (NEONvduplane (v16i8 QPR:$src), imm:$lane)),
4790 (v16i8 (VDUPLN8q (v8i8 (EXTRACT_SUBREG QPR:$src,
4791 (DSubReg_i8_reg imm:$lane))),
4792 (SubReg_i8_lane imm:$lane)))>;
4793 def : Pat<(v8i16 (NEONvduplane (v8i16 QPR:$src), imm:$lane)),
4794 (v8i16 (VDUPLN16q (v4i16 (EXTRACT_SUBREG QPR:$src,
4795 (DSubReg_i16_reg imm:$lane))),
4796 (SubReg_i16_lane imm:$lane)))>;
4797 def : Pat<(v4i32 (NEONvduplane (v4i32 QPR:$src), imm:$lane)),
4798 (v4i32 (VDUPLN32q (v2i32 (EXTRACT_SUBREG QPR:$src,
4799 (DSubReg_i32_reg imm:$lane))),
4800 (SubReg_i32_lane imm:$lane)))>;
4801 def : Pat<(v4f32 (NEONvduplane (v4f32 QPR:$src), imm:$lane)),
4802 (v4f32 (VDUPLN32q (v2f32 (EXTRACT_SUBREG QPR:$src,
4803 (DSubReg_i32_reg imm:$lane))),
4804 (SubReg_i32_lane imm:$lane)))>;
4806 def VDUPfdf : PseudoNeonI<(outs DPR:$dst), (ins SPR:$src), IIC_VMOVD, "",
4807 [(set DPR:$dst, (v2f32 (NEONvdup (f32 SPR:$src))))]>;
4808 def VDUPfqf : PseudoNeonI<(outs QPR:$dst), (ins SPR:$src), IIC_VMOVD, "",
4809 [(set QPR:$dst, (v4f32 (NEONvdup (f32 SPR:$src))))]>;
4811 // VMOVN : Vector Narrowing Move
4812 defm VMOVN : N2VN_HSD<0b11,0b11,0b10,0b00100,0,0, IIC_VMOVN,
4813 "vmovn", "i", trunc>;
4814 // VQMOVN : Vector Saturating Narrowing Move
4815 defm VQMOVNs : N2VNInt_HSD<0b11,0b11,0b10,0b00101,0,0, IIC_VQUNAiD,
4816 "vqmovn", "s", int_arm_neon_vqmovns>;
4817 defm VQMOVNu : N2VNInt_HSD<0b11,0b11,0b10,0b00101,1,0, IIC_VQUNAiD,
4818 "vqmovn", "u", int_arm_neon_vqmovnu>;
4819 defm VQMOVNsu : N2VNInt_HSD<0b11,0b11,0b10,0b00100,1,0, IIC_VQUNAiD,
4820 "vqmovun", "s", int_arm_neon_vqmovnsu>;
4821 // VMOVL : Vector Lengthening Move
4822 defm VMOVLs : N2VL_QHS<0b01,0b10100,0,1, "vmovl", "s", sext>;
4823 defm VMOVLu : N2VL_QHS<0b11,0b10100,0,1, "vmovl", "u", zext>;
4825 // Vector Conversions.
4827 // VCVT : Vector Convert Between Floating-Point and Integers
4828 def VCVTf2sd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
4829 v2i32, v2f32, fp_to_sint>;
4830 def VCVTf2ud : N2VD<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
4831 v2i32, v2f32, fp_to_uint>;
4832 def VCVTs2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
4833 v2f32, v2i32, sint_to_fp>;
4834 def VCVTu2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
4835 v2f32, v2i32, uint_to_fp>;
4837 def VCVTf2sq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
4838 v4i32, v4f32, fp_to_sint>;
4839 def VCVTf2uq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
4840 v4i32, v4f32, fp_to_uint>;
4841 def VCVTs2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
4842 v4f32, v4i32, sint_to_fp>;
4843 def VCVTu2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
4844 v4f32, v4i32, uint_to_fp>;
4846 // VCVT : Vector Convert Between Floating-Point and Fixed-Point.
4847 let DecoderMethod = "DecodeVCVTD" in {
4848 def VCVTf2xsd : N2VCvtD<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
4849 v2i32, v2f32, int_arm_neon_vcvtfp2fxs>;
4850 def VCVTf2xud : N2VCvtD<1, 1, 0b1111, 0, 1, "vcvt", "u32.f32",
4851 v2i32, v2f32, int_arm_neon_vcvtfp2fxu>;
4852 def VCVTxs2fd : N2VCvtD<0, 1, 0b1110, 0, 1, "vcvt", "f32.s32",
4853 v2f32, v2i32, int_arm_neon_vcvtfxs2fp>;
4854 def VCVTxu2fd : N2VCvtD<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
4855 v2f32, v2i32, int_arm_neon_vcvtfxu2fp>;
4858 let DecoderMethod = "DecodeVCVTQ" in {
4859 def VCVTf2xsq : N2VCvtQ<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
4860 v4i32, v4f32, int_arm_neon_vcvtfp2fxs>;
4861 def VCVTf2xuq : N2VCvtQ<1, 1, 0b1111, 0, 1, "vcvt", "u32.f32",
4862 v4i32, v4f32, int_arm_neon_vcvtfp2fxu>;
4863 def VCVTxs2fq : N2VCvtQ<0, 1, 0b1110, 0, 1, "vcvt", "f32.s32",
4864 v4f32, v4i32, int_arm_neon_vcvtfxs2fp>;
4865 def VCVTxu2fq : N2VCvtQ<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
4866 v4f32, v4i32, int_arm_neon_vcvtfxu2fp>;
4869 // VCVT : Vector Convert Between Half-Precision and Single-Precision.
4870 def VCVTf2h : N2VNInt<0b11, 0b11, 0b01, 0b10, 0b01100, 0, 0,
4871 IIC_VUNAQ, "vcvt", "f16.f32",
4872 v4i16, v4f32, int_arm_neon_vcvtfp2hf>,
4873 Requires<[HasNEON, HasFP16]>;
4874 def VCVTh2f : N2VLInt<0b11, 0b11, 0b01, 0b10, 0b01110, 0, 0,
4875 IIC_VUNAQ, "vcvt", "f32.f16",
4876 v4f32, v4i16, int_arm_neon_vcvthf2fp>,
4877 Requires<[HasNEON, HasFP16]>;
4881 // VREV64 : Vector Reverse elements within 64-bit doublewords
4883 class VREV64D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4884 : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 0, 0, (outs DPR:$Vd),
4885 (ins DPR:$Vm), IIC_VMOVD,
4886 OpcodeStr, Dt, "$Vd, $Vm", "",
4887 [(set DPR:$Vd, (Ty (NEONvrev64 (Ty DPR:$Vm))))]>;
4888 class VREV64Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4889 : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 1, 0, (outs QPR:$Vd),
4890 (ins QPR:$Vm), IIC_VMOVQ,
4891 OpcodeStr, Dt, "$Vd, $Vm", "",
4892 [(set QPR:$Vd, (Ty (NEONvrev64 (Ty QPR:$Vm))))]>;
4894 def VREV64d8 : VREV64D<0b00, "vrev64", "8", v8i8>;
4895 def VREV64d16 : VREV64D<0b01, "vrev64", "16", v4i16>;
4896 def VREV64d32 : VREV64D<0b10, "vrev64", "32", v2i32>;
4897 def : Pat<(v2f32 (NEONvrev64 (v2f32 DPR:$Vm))), (VREV64d32 DPR:$Vm)>;
4899 def VREV64q8 : VREV64Q<0b00, "vrev64", "8", v16i8>;
4900 def VREV64q16 : VREV64Q<0b01, "vrev64", "16", v8i16>;
4901 def VREV64q32 : VREV64Q<0b10, "vrev64", "32", v4i32>;
4902 def : Pat<(v4f32 (NEONvrev64 (v4f32 QPR:$Vm))), (VREV64q32 QPR:$Vm)>;
4904 // VREV32 : Vector Reverse elements within 32-bit words
4906 class VREV32D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4907 : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 0, 0, (outs DPR:$Vd),
4908 (ins DPR:$Vm), IIC_VMOVD,
4909 OpcodeStr, Dt, "$Vd, $Vm", "",
4910 [(set DPR:$Vd, (Ty (NEONvrev32 (Ty DPR:$Vm))))]>;
4911 class VREV32Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4912 : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 1, 0, (outs QPR:$Vd),
4913 (ins QPR:$Vm), IIC_VMOVQ,
4914 OpcodeStr, Dt, "$Vd, $Vm", "",
4915 [(set QPR:$Vd, (Ty (NEONvrev32 (Ty QPR:$Vm))))]>;
4917 def VREV32d8 : VREV32D<0b00, "vrev32", "8", v8i8>;
4918 def VREV32d16 : VREV32D<0b01, "vrev32", "16", v4i16>;
4920 def VREV32q8 : VREV32Q<0b00, "vrev32", "8", v16i8>;
4921 def VREV32q16 : VREV32Q<0b01, "vrev32", "16", v8i16>;
4923 // VREV16 : Vector Reverse elements within 16-bit halfwords
4925 class VREV16D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4926 : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 0, 0, (outs DPR:$Vd),
4927 (ins DPR:$Vm), IIC_VMOVD,
4928 OpcodeStr, Dt, "$Vd, $Vm", "",
4929 [(set DPR:$Vd, (Ty (NEONvrev16 (Ty DPR:$Vm))))]>;
4930 class VREV16Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4931 : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 1, 0, (outs QPR:$Vd),
4932 (ins QPR:$Vm), IIC_VMOVQ,
4933 OpcodeStr, Dt, "$Vd, $Vm", "",
4934 [(set QPR:$Vd, (Ty (NEONvrev16 (Ty QPR:$Vm))))]>;
4936 def VREV16d8 : VREV16D<0b00, "vrev16", "8", v8i8>;
4937 def VREV16q8 : VREV16Q<0b00, "vrev16", "8", v16i8>;
4939 // Other Vector Shuffles.
4941 // Aligned extractions: really just dropping registers
4943 class AlignedVEXTq<ValueType DestTy, ValueType SrcTy, SDNodeXForm LaneCVT>
4944 : Pat<(DestTy (vector_extract_subvec (SrcTy QPR:$src), (i32 imm:$start))),
4945 (EXTRACT_SUBREG (SrcTy QPR:$src), (LaneCVT imm:$start))>;
4947 def : AlignedVEXTq<v8i8, v16i8, DSubReg_i8_reg>;
4949 def : AlignedVEXTq<v4i16, v8i16, DSubReg_i16_reg>;
4951 def : AlignedVEXTq<v2i32, v4i32, DSubReg_i32_reg>;
4953 def : AlignedVEXTq<v1i64, v2i64, DSubReg_f64_reg>;
4955 def : AlignedVEXTq<v2f32, v4f32, DSubReg_i32_reg>;
4958 // VEXT : Vector Extract
4960 class VEXTd<string OpcodeStr, string Dt, ValueType Ty>
4961 : N3V<0,1,0b11,{?,?,?,?},0,0, (outs DPR:$Vd),
4962 (ins DPR:$Vn, DPR:$Vm, i32imm:$index), NVExtFrm,
4963 IIC_VEXTD, OpcodeStr, Dt, "$Vd, $Vn, $Vm, $index", "",
4964 [(set DPR:$Vd, (Ty (NEONvext (Ty DPR:$Vn),
4965 (Ty DPR:$Vm), imm:$index)))]> {
4967 let Inst{11-8} = index{3-0};
4970 class VEXTq<string OpcodeStr, string Dt, ValueType Ty>
4971 : N3V<0,1,0b11,{?,?,?,?},1,0, (outs QPR:$Vd),
4972 (ins QPR:$Vn, QPR:$Vm, i32imm:$index), NVExtFrm,
4973 IIC_VEXTQ, OpcodeStr, Dt, "$Vd, $Vn, $Vm, $index", "",
4974 [(set QPR:$Vd, (Ty (NEONvext (Ty QPR:$Vn),
4975 (Ty QPR:$Vm), imm:$index)))]> {
4977 let Inst{11-8} = index{3-0};
4980 def VEXTd8 : VEXTd<"vext", "8", v8i8> {
4981 let Inst{11-8} = index{3-0};
4983 def VEXTd16 : VEXTd<"vext", "16", v4i16> {
4984 let Inst{11-9} = index{2-0};
4987 def VEXTd32 : VEXTd<"vext", "32", v2i32> {
4988 let Inst{11-10} = index{1-0};
4989 let Inst{9-8} = 0b00;
4991 def : Pat<(v2f32 (NEONvext (v2f32 DPR:$Vn),
4994 (VEXTd32 DPR:$Vn, DPR:$Vm, imm:$index)>;
4996 def VEXTq8 : VEXTq<"vext", "8", v16i8> {
4997 let Inst{11-8} = index{3-0};
4999 def VEXTq16 : VEXTq<"vext", "16", v8i16> {
5000 let Inst{11-9} = index{2-0};
5003 def VEXTq32 : VEXTq<"vext", "32", v4i32> {
5004 let Inst{11-10} = index{1-0};
5005 let Inst{9-8} = 0b00;
5007 def : Pat<(v4f32 (NEONvext (v4f32 QPR:$Vn),
5010 (VEXTq32 QPR:$Vn, QPR:$Vm, imm:$index)>;
5012 // VTRN : Vector Transpose
5014 def VTRNd8 : N2VDShuffle<0b00, 0b00001, "vtrn", "8">;
5015 def VTRNd16 : N2VDShuffle<0b01, 0b00001, "vtrn", "16">;
5016 def VTRNd32 : N2VDShuffle<0b10, 0b00001, "vtrn", "32">;
5018 def VTRNq8 : N2VQShuffle<0b00, 0b00001, IIC_VPERMQ, "vtrn", "8">;
5019 def VTRNq16 : N2VQShuffle<0b01, 0b00001, IIC_VPERMQ, "vtrn", "16">;
5020 def VTRNq32 : N2VQShuffle<0b10, 0b00001, IIC_VPERMQ, "vtrn", "32">;
5022 // VUZP : Vector Unzip (Deinterleave)
5024 def VUZPd8 : N2VDShuffle<0b00, 0b00010, "vuzp", "8">;
5025 def VUZPd16 : N2VDShuffle<0b01, 0b00010, "vuzp", "16">;
5026 def VUZPd32 : N2VDShuffle<0b10, 0b00010, "vuzp", "32">;
5028 def VUZPq8 : N2VQShuffle<0b00, 0b00010, IIC_VPERMQ3, "vuzp", "8">;
5029 def VUZPq16 : N2VQShuffle<0b01, 0b00010, IIC_VPERMQ3, "vuzp", "16">;
5030 def VUZPq32 : N2VQShuffle<0b10, 0b00010, IIC_VPERMQ3, "vuzp", "32">;
5032 // VZIP : Vector Zip (Interleave)
5034 def VZIPd8 : N2VDShuffle<0b00, 0b00011, "vzip", "8">;
5035 def VZIPd16 : N2VDShuffle<0b01, 0b00011, "vzip", "16">;
5036 def VZIPd32 : N2VDShuffle<0b10, 0b00011, "vzip", "32">;
5038 def VZIPq8 : N2VQShuffle<0b00, 0b00011, IIC_VPERMQ3, "vzip", "8">;
5039 def VZIPq16 : N2VQShuffle<0b01, 0b00011, IIC_VPERMQ3, "vzip", "16">;
5040 def VZIPq32 : N2VQShuffle<0b10, 0b00011, IIC_VPERMQ3, "vzip", "32">;
5042 // Vector Table Lookup and Table Extension.
5044 // VTBL : Vector Table Lookup
5045 let DecoderMethod = "DecodeTBLInstruction" in {
5047 : N3V<1,1,0b11,0b1000,0,0, (outs DPR:$Vd),
5048 (ins VecListOneD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTB1,
5049 "vtbl", "8", "$Vd, $Vn, $Vm", "",
5050 [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbl1 VecListOneD:$Vn, DPR:$Vm)))]>;
5051 let hasExtraSrcRegAllocReq = 1 in {
5053 : N3V<1,1,0b11,0b1001,0,0, (outs DPR:$Vd),
5054 (ins DPR:$Vn, DPR:$tbl2, DPR:$Vm), NVTBLFrm, IIC_VTB2,
5055 "vtbl", "8", "$Vd, \\{$Vn, $tbl2\\}, $Vm", "", []>;
5057 : N3V<1,1,0b11,0b1010,0,0, (outs DPR:$Vd),
5058 (ins DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$Vm), NVTBLFrm, IIC_VTB3,
5059 "vtbl", "8", "$Vd, \\{$Vn, $tbl2, $tbl3\\}, $Vm", "", []>;
5061 : N3V<1,1,0b11,0b1011,0,0, (outs DPR:$Vd),
5062 (ins DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$Vm),
5064 "vtbl", "8", "$Vd, \\{$Vn, $tbl2, $tbl3, $tbl4\\}, $Vm", "", []>;
5065 } // hasExtraSrcRegAllocReq = 1
5068 : PseudoNeonI<(outs DPR:$dst), (ins QPR:$tbl, DPR:$src), IIC_VTB2, "", []>;
5070 : PseudoNeonI<(outs DPR:$dst), (ins QQPR:$tbl, DPR:$src), IIC_VTB3, "", []>;
5072 : PseudoNeonI<(outs DPR:$dst), (ins QQPR:$tbl, DPR:$src), IIC_VTB4, "", []>;
5074 // VTBX : Vector Table Extension
5076 : N3V<1,1,0b11,0b1000,1,0, (outs DPR:$Vd),
5077 (ins DPR:$orig, VecListOneD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTBX1,
5078 "vtbx", "8", "$Vd, $Vn, $Vm", "$orig = $Vd",
5079 [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbx1
5080 DPR:$orig, VecListOneD:$Vn, DPR:$Vm)))]>;
5081 let hasExtraSrcRegAllocReq = 1 in {
5083 : N3V<1,1,0b11,0b1001,1,0, (outs DPR:$Vd),
5084 (ins DPR:$orig, DPR:$Vn, DPR:$tbl2, DPR:$Vm), NVTBLFrm, IIC_VTBX2,
5085 "vtbx", "8", "$Vd, \\{$Vn, $tbl2\\}, $Vm", "$orig = $Vd", []>;
5087 : N3V<1,1,0b11,0b1010,1,0, (outs DPR:$Vd),
5088 (ins DPR:$orig, DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$Vm),
5089 NVTBLFrm, IIC_VTBX3,
5090 "vtbx", "8", "$Vd, \\{$Vn, $tbl2, $tbl3\\}, $Vm",
5093 : N3V<1,1,0b11,0b1011,1,0, (outs DPR:$Vd), (ins DPR:$orig, DPR:$Vn,
5094 DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$Vm), NVTBLFrm, IIC_VTBX4,
5095 "vtbx", "8", "$Vd, \\{$Vn, $tbl2, $tbl3, $tbl4\\}, $Vm",
5097 } // hasExtraSrcRegAllocReq = 1
5100 : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QPR:$tbl, DPR:$src),
5101 IIC_VTBX2, "$orig = $dst", []>;
5103 : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QQPR:$tbl, DPR:$src),
5104 IIC_VTBX3, "$orig = $dst", []>;
5106 : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QQPR:$tbl, DPR:$src),
5107 IIC_VTBX4, "$orig = $dst", []>;
5108 } // DecoderMethod = "DecodeTBLInstruction"
5110 //===----------------------------------------------------------------------===//
5111 // NEON instructions for single-precision FP math
5112 //===----------------------------------------------------------------------===//
5114 class N2VSPat<SDNode OpNode, NeonI Inst>
5115 : NEONFPPat<(f32 (OpNode SPR:$a)),
5117 (v2f32 (COPY_TO_REGCLASS (Inst
5119 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5120 SPR:$a, ssub_0)), DPR_VFP2)), ssub_0)>;
5122 class N3VSPat<SDNode OpNode, NeonI Inst>
5123 : NEONFPPat<(f32 (OpNode SPR:$a, SPR:$b)),
5125 (v2f32 (COPY_TO_REGCLASS (Inst
5127 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5130 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5131 SPR:$b, ssub_0)), DPR_VFP2)), ssub_0)>;
5133 class N3VSMulOpPat<SDNode MulNode, SDNode OpNode, NeonI Inst>
5134 : NEONFPPat<(f32 (OpNode SPR:$acc, (f32 (MulNode SPR:$a, SPR:$b)))),
5136 (v2f32 (COPY_TO_REGCLASS (Inst
5138 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5141 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5144 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5145 SPR:$b, ssub_0)), DPR_VFP2)), ssub_0)>;
5147 def : N3VSPat<fadd, VADDfd>;
5148 def : N3VSPat<fsub, VSUBfd>;
5149 def : N3VSPat<fmul, VMULfd>;
5150 def : N3VSMulOpPat<fmul, fadd, VMLAfd>,
5151 Requires<[HasNEON, UseNEONForFP, UseFPVMLx]>;
5152 def : N3VSMulOpPat<fmul, fsub, VMLSfd>,
5153 Requires<[HasNEON, UseNEONForFP, UseFPVMLx]>;
5154 def : N2VSPat<fabs, VABSfd>;
5155 def : N2VSPat<fneg, VNEGfd>;
5156 def : N3VSPat<NEONfmax, VMAXfd>;
5157 def : N3VSPat<NEONfmin, VMINfd>;
5158 def : N2VSPat<arm_ftosi, VCVTf2sd>;
5159 def : N2VSPat<arm_ftoui, VCVTf2ud>;
5160 def : N2VSPat<arm_sitof, VCVTs2fd>;
5161 def : N2VSPat<arm_uitof, VCVTu2fd>;
5163 //===----------------------------------------------------------------------===//
5164 // Non-Instruction Patterns
5165 //===----------------------------------------------------------------------===//
5168 def : Pat<(v1i64 (bitconvert (v2i32 DPR:$src))), (v1i64 DPR:$src)>;
5169 def : Pat<(v1i64 (bitconvert (v4i16 DPR:$src))), (v1i64 DPR:$src)>;
5170 def : Pat<(v1i64 (bitconvert (v8i8 DPR:$src))), (v1i64 DPR:$src)>;
5171 def : Pat<(v1i64 (bitconvert (f64 DPR:$src))), (v1i64 DPR:$src)>;
5172 def : Pat<(v1i64 (bitconvert (v2f32 DPR:$src))), (v1i64 DPR:$src)>;
5173 def : Pat<(v2i32 (bitconvert (v1i64 DPR:$src))), (v2i32 DPR:$src)>;
5174 def : Pat<(v2i32 (bitconvert (v4i16 DPR:$src))), (v2i32 DPR:$src)>;
5175 def : Pat<(v2i32 (bitconvert (v8i8 DPR:$src))), (v2i32 DPR:$src)>;
5176 def : Pat<(v2i32 (bitconvert (f64 DPR:$src))), (v2i32 DPR:$src)>;
5177 def : Pat<(v2i32 (bitconvert (v2f32 DPR:$src))), (v2i32 DPR:$src)>;
5178 def : Pat<(v4i16 (bitconvert (v1i64 DPR:$src))), (v4i16 DPR:$src)>;
5179 def : Pat<(v4i16 (bitconvert (v2i32 DPR:$src))), (v4i16 DPR:$src)>;
5180 def : Pat<(v4i16 (bitconvert (v8i8 DPR:$src))), (v4i16 DPR:$src)>;
5181 def : Pat<(v4i16 (bitconvert (f64 DPR:$src))), (v4i16 DPR:$src)>;
5182 def : Pat<(v4i16 (bitconvert (v2f32 DPR:$src))), (v4i16 DPR:$src)>;
5183 def : Pat<(v8i8 (bitconvert (v1i64 DPR:$src))), (v8i8 DPR:$src)>;
5184 def : Pat<(v8i8 (bitconvert (v2i32 DPR:$src))), (v8i8 DPR:$src)>;
5185 def : Pat<(v8i8 (bitconvert (v4i16 DPR:$src))), (v8i8 DPR:$src)>;
5186 def : Pat<(v8i8 (bitconvert (f64 DPR:$src))), (v8i8 DPR:$src)>;
5187 def : Pat<(v8i8 (bitconvert (v2f32 DPR:$src))), (v8i8 DPR:$src)>;
5188 def : Pat<(f64 (bitconvert (v1i64 DPR:$src))), (f64 DPR:$src)>;
5189 def : Pat<(f64 (bitconvert (v2i32 DPR:$src))), (f64 DPR:$src)>;
5190 def : Pat<(f64 (bitconvert (v4i16 DPR:$src))), (f64 DPR:$src)>;
5191 def : Pat<(f64 (bitconvert (v8i8 DPR:$src))), (f64 DPR:$src)>;
5192 def : Pat<(f64 (bitconvert (v2f32 DPR:$src))), (f64 DPR:$src)>;
5193 def : Pat<(v2f32 (bitconvert (f64 DPR:$src))), (v2f32 DPR:$src)>;
5194 def : Pat<(v2f32 (bitconvert (v1i64 DPR:$src))), (v2f32 DPR:$src)>;
5195 def : Pat<(v2f32 (bitconvert (v2i32 DPR:$src))), (v2f32 DPR:$src)>;
5196 def : Pat<(v2f32 (bitconvert (v4i16 DPR:$src))), (v2f32 DPR:$src)>;
5197 def : Pat<(v2f32 (bitconvert (v8i8 DPR:$src))), (v2f32 DPR:$src)>;
5199 def : Pat<(v2i64 (bitconvert (v4i32 QPR:$src))), (v2i64 QPR:$src)>;
5200 def : Pat<(v2i64 (bitconvert (v8i16 QPR:$src))), (v2i64 QPR:$src)>;
5201 def : Pat<(v2i64 (bitconvert (v16i8 QPR:$src))), (v2i64 QPR:$src)>;
5202 def : Pat<(v2i64 (bitconvert (v2f64 QPR:$src))), (v2i64 QPR:$src)>;
5203 def : Pat<(v2i64 (bitconvert (v4f32 QPR:$src))), (v2i64 QPR:$src)>;
5204 def : Pat<(v4i32 (bitconvert (v2i64 QPR:$src))), (v4i32 QPR:$src)>;
5205 def : Pat<(v4i32 (bitconvert (v8i16 QPR:$src))), (v4i32 QPR:$src)>;
5206 def : Pat<(v4i32 (bitconvert (v16i8 QPR:$src))), (v4i32 QPR:$src)>;
5207 def : Pat<(v4i32 (bitconvert (v2f64 QPR:$src))), (v4i32 QPR:$src)>;
5208 def : Pat<(v4i32 (bitconvert (v4f32 QPR:$src))), (v4i32 QPR:$src)>;
5209 def : Pat<(v8i16 (bitconvert (v2i64 QPR:$src))), (v8i16 QPR:$src)>;
5210 def : Pat<(v8i16 (bitconvert (v4i32 QPR:$src))), (v8i16 QPR:$src)>;
5211 def : Pat<(v8i16 (bitconvert (v16i8 QPR:$src))), (v8i16 QPR:$src)>;
5212 def : Pat<(v8i16 (bitconvert (v2f64 QPR:$src))), (v8i16 QPR:$src)>;
5213 def : Pat<(v8i16 (bitconvert (v4f32 QPR:$src))), (v8i16 QPR:$src)>;
5214 def : Pat<(v16i8 (bitconvert (v2i64 QPR:$src))), (v16i8 QPR:$src)>;
5215 def : Pat<(v16i8 (bitconvert (v4i32 QPR:$src))), (v16i8 QPR:$src)>;
5216 def : Pat<(v16i8 (bitconvert (v8i16 QPR:$src))), (v16i8 QPR:$src)>;
5217 def : Pat<(v16i8 (bitconvert (v2f64 QPR:$src))), (v16i8 QPR:$src)>;
5218 def : Pat<(v16i8 (bitconvert (v4f32 QPR:$src))), (v16i8 QPR:$src)>;
5219 def : Pat<(v4f32 (bitconvert (v2i64 QPR:$src))), (v4f32 QPR:$src)>;
5220 def : Pat<(v4f32 (bitconvert (v4i32 QPR:$src))), (v4f32 QPR:$src)>;
5221 def : Pat<(v4f32 (bitconvert (v8i16 QPR:$src))), (v4f32 QPR:$src)>;
5222 def : Pat<(v4f32 (bitconvert (v16i8 QPR:$src))), (v4f32 QPR:$src)>;
5223 def : Pat<(v4f32 (bitconvert (v2f64 QPR:$src))), (v4f32 QPR:$src)>;
5224 def : Pat<(v2f64 (bitconvert (v2i64 QPR:$src))), (v2f64 QPR:$src)>;
5225 def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
5226 def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
5227 def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
5228 def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;
5231 //===----------------------------------------------------------------------===//
5232 // Assembler aliases
5235 // VAND/VEOR/VORR accept but do not require a type suffix.
5236 defm : VFPDTAnyInstAlias<"vand${p}", "$Vd, $Vn, $Vm",
5237 (VANDd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
5238 defm : VFPDTAnyInstAlias<"vand${p}", "$Vd, $Vn, $Vm",
5239 (VANDq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
5240 defm : VFPDTAnyInstAlias<"veor${p}", "$Vd, $Vn, $Vm",
5241 (VEORd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
5242 defm : VFPDTAnyInstAlias<"veor${p}", "$Vd, $Vn, $Vm",
5243 (VEORq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
5244 defm : VFPDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
5245 (VORRd DPR:$Vd, DPR:$Vn, DPR:$Vm, pred:$p)>;
5246 defm : VFPDTAnyInstAlias<"vorr${p}", "$Vd, $Vn, $Vm",
5247 (VORRq QPR:$Vd, QPR:$Vn, QPR:$Vm, pred:$p)>;
5249 // VLD1 requires a size suffix, but also accepts type specific variants.
5250 // Load one D register.
5251 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5252 (VLD1d8 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
5253 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5254 (VLD1d16 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
5255 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5256 (VLD1d32 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
5257 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5258 (VLD1d64 VecListOneD:$Vd, addrmode6:$Rn, pred:$p)>;
5259 // with writeback, fixed stride
5260 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5261 (VLD1d8wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5262 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5263 (VLD1d16wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5264 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5265 (VLD1d32wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5266 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5267 (VLD1d64wb_fixed VecListOneD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5268 // with writeback, register stride
5269 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5270 (VLD1d8wb_register VecListOneD:$Vd, zero_reg, addrmode6:$Rn,
5271 rGPR:$Rm, pred:$p)>;
5272 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5273 (VLD1d16wb_register VecListOneD:$Vd, zero_reg, addrmode6:$Rn,
5274 rGPR:$Rm, pred:$p)>;
5275 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5276 (VLD1d32wb_register VecListOneD:$Vd, zero_reg, addrmode6:$Rn,
5277 rGPR:$Rm, pred:$p)>;
5278 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5279 (VLD1d64wb_register VecListOneD:$Vd, zero_reg, addrmode6:$Rn,
5280 rGPR:$Rm, pred:$p)>;
5282 // Load two D registers.
5283 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5284 (VLD1q8 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
5285 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5286 (VLD1q16 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
5287 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5288 (VLD1q32 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
5289 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5290 (VLD1q64 VecListTwoD:$Vd, addrmode6:$Rn, pred:$p)>;
5291 // with writeback, fixed stride
5292 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5293 (VLD1q8wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5294 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5295 (VLD1q16wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5296 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5297 (VLD1q32wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5298 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5299 (VLD1q64wb_fixed VecListTwoD:$Vd, zero_reg, addrmode6:$Rn, pred:$p)>;
5300 // with writeback, register stride
5301 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5302 (VLD1q8wb_register VecListTwoD:$Vd, zero_reg, addrmode6:$Rn,
5303 rGPR:$Rm, pred:$p)>;
5304 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5305 (VLD1q16wb_register VecListTwoD:$Vd, zero_reg, addrmode6:$Rn,
5306 rGPR:$Rm, pred:$p)>;
5307 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5308 (VLD1q32wb_register VecListTwoD:$Vd, zero_reg, addrmode6:$Rn,
5309 rGPR:$Rm, pred:$p)>;
5310 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5311 (VLD1q64wb_register VecListTwoD:$Vd, zero_reg, addrmode6:$Rn,
5312 rGPR:$Rm, pred:$p)>;
5314 // Load three D registers.
5315 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5316 (VLD1d8T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
5317 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5318 (VLD1d16T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
5319 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5320 (VLD1d32T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
5321 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5322 (VLD1d64T VecListThreeD:$Vd, addrmode6:$Rn, pred:$p)>;
5323 // with writeback, fixed stride
5324 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5325 (VLD1d8Twb_fixed VecListThreeD:$Vd, zero_reg,
5326 addrmode6:$Rn, pred:$p)>;
5327 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5328 (VLD1d16Twb_fixed VecListThreeD:$Vd, zero_reg,
5329 addrmode6:$Rn, pred:$p)>;
5330 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5331 (VLD1d32Twb_fixed VecListThreeD:$Vd, zero_reg,
5332 addrmode6:$Rn, pred:$p)>;
5333 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5334 (VLD1d64Twb_fixed VecListThreeD:$Vd, zero_reg,
5335 addrmode6:$Rn, pred:$p)>;
5336 // with writeback, register stride
5337 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5338 (VLD1d8Twb_register VecListThreeD:$Vd, zero_reg,
5339 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5340 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5341 (VLD1d16Twb_register VecListThreeD:$Vd, zero_reg,
5342 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5343 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5344 (VLD1d32Twb_register VecListThreeD:$Vd, zero_reg,
5345 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5346 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5347 (VLD1d64Twb_register VecListThreeD:$Vd, zero_reg,
5348 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5351 // Load four D registers.
5352 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5353 (VLD1d8Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
5354 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5355 (VLD1d16Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
5356 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5357 (VLD1d32Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
5358 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn",
5359 (VLD1d64Q VecListFourD:$Vd, addrmode6:$Rn, pred:$p)>;
5360 // with writeback, fixed stride
5361 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5362 (VLD1d8Qwb_fixed VecListFourD:$Vd, zero_reg,
5363 addrmode6:$Rn, pred:$p)>;
5364 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5365 (VLD1d16Qwb_fixed VecListFourD:$Vd, zero_reg,
5366 addrmode6:$Rn, pred:$p)>;
5367 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5368 (VLD1d32Qwb_fixed VecListFourD:$Vd, zero_reg,
5369 addrmode6:$Rn, pred:$p)>;
5370 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn!",
5371 (VLD1d64Qwb_fixed VecListFourD:$Vd, zero_reg,
5372 addrmode6:$Rn, pred:$p)>;
5373 // with writeback, register stride
5374 defm : VFPDT8ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5375 (VLD1d8Qwb_register VecListFourD:$Vd, zero_reg,
5376 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5377 defm : VFPDT16ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5378 (VLD1d16Qwb_register VecListFourD:$Vd, zero_reg,
5379 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5380 defm : VFPDT32ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5381 (VLD1d32Qwb_register VecListFourD:$Vd, zero_reg,
5382 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5383 defm : VFPDT64ReqInstAlias<"vld1${p}", "$Vd, $Rn, $Rm",
5384 (VLD1d64Qwb_register VecListFourD:$Vd, zero_reg,
5385 addrmode6:$Rn, rGPR:$Rm, pred:$p)>;
5387 // VST1 requires a size suffix, but also accepts type specific variants.
5388 // Store one D register.
5389 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5390 (VST1d8 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5391 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5392 (VST1d16 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5393 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5394 (VST1d32 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5395 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5396 (VST1d64 addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5397 // with writeback, fixed stride
5398 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5399 (VST1d8wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5400 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5401 (VST1d16wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5402 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5403 (VST1d32wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5404 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5405 (VST1d64wb_fixed zero_reg, addrmode6:$Rn, VecListOneD:$Vd, pred:$p)>;
5406 // with writeback, register stride
5407 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5408 (VST1d8wb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5409 VecListOneD:$Vd, pred:$p)>;
5410 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5411 (VST1d16wb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5412 VecListOneD:$Vd, pred:$p)>;
5413 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5414 (VST1d32wb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5415 VecListOneD:$Vd, pred:$p)>;
5416 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5417 (VST1d64wb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5418 VecListOneD:$Vd, pred:$p)>;
5420 // Store two D registers.
5421 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5422 (VST1q8 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5423 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5424 (VST1q16 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5425 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5426 (VST1q32 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5427 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5428 (VST1q64 addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5429 // with writeback, fixed stride
5430 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5431 (VST1q8wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5432 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5433 (VST1q16wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5434 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5435 (VST1q32wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5436 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5437 (VST1q64wb_fixed zero_reg, addrmode6:$Rn, VecListTwoD:$Vd, pred:$p)>;
5438 // with writeback, register stride
5439 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5440 (VST1q8wb_register zero_reg, addrmode6:$Rn,
5441 rGPR:$Rm, VecListTwoD:$Vd, pred:$p)>;
5442 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5443 (VST1q16wb_register zero_reg, addrmode6:$Rn,
5444 rGPR:$Rm, VecListTwoD:$Vd, pred:$p)>;
5445 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5446 (VST1q32wb_register zero_reg, addrmode6:$Rn,
5447 rGPR:$Rm, VecListTwoD:$Vd, pred:$p)>;
5448 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5449 (VST1q64wb_register zero_reg, addrmode6:$Rn,
5450 rGPR:$Rm, VecListTwoD:$Vd, pred:$p)>;
5452 // Load three D registers.
5453 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5454 (VST1d8T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5455 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5456 (VST1d16T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5457 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5458 (VST1d32T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5459 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5460 (VST1d64T addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5461 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5462 (VST1d8Twb_fixed zero_reg, addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5463 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5464 (VST1d16Twb_fixed zero_reg, addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5465 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5466 (VST1d32Twb_fixed zero_reg, addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5467 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5468 (VST1d64Twb_fixed zero_reg, addrmode6:$Rn, VecListThreeD:$Vd, pred:$p)>;
5469 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5470 (VST1d8Twb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5471 VecListThreeD:$Vd, pred:$p)>;
5472 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5473 (VST1d16Twb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5474 VecListThreeD:$Vd, pred:$p)>;
5475 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5476 (VST1d32Twb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5477 VecListThreeD:$Vd, pred:$p)>;
5478 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5479 (VST1d64Twb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5480 VecListThreeD:$Vd, pred:$p)>;
5482 // Load four D registers.
5483 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5484 (VST1d8Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5485 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5486 (VST1d16Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5487 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5488 (VST1d32Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5489 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn",
5490 (VST1d64Q addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5491 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5492 (VST1d8Qwb_fixed zero_reg, addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5493 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5494 (VST1d16Qwb_fixed zero_reg, addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5495 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5496 (VST1d32Qwb_fixed zero_reg, addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5497 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn!",
5498 (VST1d64Qwb_fixed zero_reg, addrmode6:$Rn, VecListFourD:$Vd, pred:$p)>;
5499 defm : VFPDT8ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5500 (VST1d8Qwb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5501 VecListFourD:$Vd, pred:$p)>;
5502 defm : VFPDT16ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5503 (VST1d16Qwb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5504 VecListFourD:$Vd, pred:$p)>;
5505 defm : VFPDT32ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5506 (VST1d32Qwb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5507 VecListFourD:$Vd, pred:$p)>;
5508 defm : VFPDT64ReqInstAlias<"vst1${p}", "$Vd, $Rn, $Rm",
5509 (VST1d64Qwb_register zero_reg, addrmode6:$Rn, rGPR:$Rm,
5510 VecListFourD:$Vd, pred:$p)>;
5513 // VTRN instructions data type suffix aliases for more-specific types.
5514 defm : VFPDT8ReqInstAlias <"vtrn${p}", "$Dd, $Dm",
5515 (VTRNd8 DPR:$Dd, DPR:$Dm, pred:$p)>;
5516 defm : VFPDT16ReqInstAlias<"vtrn${p}", "$Dd, $Dm",
5517 (VTRNd16 DPR:$Dd, DPR:$Dm, pred:$p)>;
5518 defm : VFPDT32ReqInstAlias<"vtrn${p}", "$Dd, $Dm",
5519 (VTRNd32 DPR:$Dd, DPR:$Dm, pred:$p)>;
5521 defm : VFPDT8ReqInstAlias <"vtrn${p}", "$Qd, $Qm",
5522 (VTRNq8 QPR:$Qd, QPR:$Qm, pred:$p)>;
5523 defm : VFPDT16ReqInstAlias<"vtrn${p}", "$Qd, $Qm",
5524 (VTRNq16 QPR:$Qd, QPR:$Qm, pred:$p)>;
5525 defm : VFPDT32ReqInstAlias<"vtrn${p}", "$Qd, $Qm",
5526 (VTRNq32 QPR:$Qd, QPR:$Qm, pred:$p)>;