1 //===- ARMInstrNEON.td - NEON support for ARM -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM NEON instruction set.
12 //===----------------------------------------------------------------------===//
15 //===----------------------------------------------------------------------===//
16 // NEON-specific Operands.
17 //===----------------------------------------------------------------------===//
18 def nModImm : Operand<i32> {
19 let PrintMethod = "printNEONModImmOperand";
22 def nImmSplatI8AsmOperand : AsmOperandClass { let Name = "NEONi8splat"; }
23 def nImmSplatI8 : Operand<i32> {
24 let PrintMethod = "printNEONModImmOperand";
25 let ParserMatchClass = nImmSplatI8AsmOperand;
27 def nImmSplatI16AsmOperand : AsmOperandClass { let Name = "NEONi16splat"; }
28 def nImmSplatI16 : Operand<i32> {
29 let PrintMethod = "printNEONModImmOperand";
30 let ParserMatchClass = nImmSplatI16AsmOperand;
32 def nImmSplatI32AsmOperand : AsmOperandClass { let Name = "NEONi32splat"; }
33 def nImmSplatI32 : Operand<i32> {
34 let PrintMethod = "printNEONModImmOperand";
35 let ParserMatchClass = nImmSplatI32AsmOperand;
37 def nImmVMOVI32AsmOperand : AsmOperandClass { let Name = "NEONi32vmov"; }
38 def nImmVMOVI32 : Operand<i32> {
39 let PrintMethod = "printNEONModImmOperand";
40 let ParserMatchClass = nImmVMOVI32AsmOperand;
42 def nImmSplatI64AsmOperand : AsmOperandClass { let Name = "NEONi64splat"; }
43 def nImmSplatI64 : Operand<i32> {
44 let PrintMethod = "printNEONModImmOperand";
45 let ParserMatchClass = nImmSplatI64AsmOperand;
48 def VectorIndex8Operand : AsmOperandClass { let Name = "VectorIndex8"; }
49 def VectorIndex16Operand : AsmOperandClass { let Name = "VectorIndex16"; }
50 def VectorIndex32Operand : AsmOperandClass { let Name = "VectorIndex32"; }
51 def VectorIndex8 : Operand<i32>, ImmLeaf<i32, [{
52 return ((uint64_t)Imm) < 8;
54 let ParserMatchClass = VectorIndex8Operand;
55 let PrintMethod = "printVectorIndex";
56 let MIOperandInfo = (ops i32imm);
58 def VectorIndex16 : Operand<i32>, ImmLeaf<i32, [{
59 return ((uint64_t)Imm) < 4;
61 let ParserMatchClass = VectorIndex16Operand;
62 let PrintMethod = "printVectorIndex";
63 let MIOperandInfo = (ops i32imm);
65 def VectorIndex32 : Operand<i32>, ImmLeaf<i32, [{
66 return ((uint64_t)Imm) < 2;
68 let ParserMatchClass = VectorIndex32Operand;
69 let PrintMethod = "printVectorIndex";
70 let MIOperandInfo = (ops i32imm);
73 def VecListOneDAsmOperand : AsmOperandClass {
74 let Name = "VecListOneD";
75 let ParserMethod = "parseVectorList";
77 def VecListOneD : RegisterOperand<DPR, "printVectorListOne"> {
78 let ParserMatchClass = VecListOneDAsmOperand;
80 // Register list of two sequential D registers.
81 def VecListTwoDAsmOperand : AsmOperandClass {
82 let Name = "VecListTwoD";
83 let ParserMethod = "parseVectorList";
85 def VecListTwoD : RegisterOperand<DPR, "printVectorListTwo"> {
86 let ParserMatchClass = VecListTwoDAsmOperand;
88 // Register list of three sequential D registers.
89 def VecListThreeDAsmOperand : AsmOperandClass {
90 let Name = "VecListThreeD";
91 let ParserMethod = "parseVectorList";
93 def VecListThreeD : RegisterOperand<DPR, "printVectorListThree"> {
94 let ParserMatchClass = VecListThreeDAsmOperand;
96 // Register list of four sequential D registers.
97 def VecListFourDAsmOperand : AsmOperandClass {
98 let Name = "VecListFourD";
99 let ParserMethod = "parseVectorList";
101 def VecListFourD : RegisterOperand<DPR, "printVectorListFour"> {
102 let ParserMatchClass = VecListFourDAsmOperand;
104 // Register list of two D registers spaced by 2 (two sequential Q registers).
105 def VecListTwoQAsmOperand : AsmOperandClass {
106 let Name = "VecListTwoQ";
107 let ParserMethod = "parseVectorList";
109 def VecListTwoQ : RegisterOperand<DPR, "printVectorListTwo"> {
110 let ParserMatchClass = VecListTwoQAsmOperand;
113 //===----------------------------------------------------------------------===//
114 // NEON-specific DAG Nodes.
115 //===----------------------------------------------------------------------===//
117 def SDTARMVCMP : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<1, 2>]>;
118 def SDTARMVCMPZ : SDTypeProfile<1, 1, []>;
120 def NEONvceq : SDNode<"ARMISD::VCEQ", SDTARMVCMP>;
121 def NEONvceqz : SDNode<"ARMISD::VCEQZ", SDTARMVCMPZ>;
122 def NEONvcge : SDNode<"ARMISD::VCGE", SDTARMVCMP>;
123 def NEONvcgez : SDNode<"ARMISD::VCGEZ", SDTARMVCMPZ>;
124 def NEONvclez : SDNode<"ARMISD::VCLEZ", SDTARMVCMPZ>;
125 def NEONvcgeu : SDNode<"ARMISD::VCGEU", SDTARMVCMP>;
126 def NEONvcgt : SDNode<"ARMISD::VCGT", SDTARMVCMP>;
127 def NEONvcgtz : SDNode<"ARMISD::VCGTZ", SDTARMVCMPZ>;
128 def NEONvcltz : SDNode<"ARMISD::VCLTZ", SDTARMVCMPZ>;
129 def NEONvcgtu : SDNode<"ARMISD::VCGTU", SDTARMVCMP>;
130 def NEONvtst : SDNode<"ARMISD::VTST", SDTARMVCMP>;
132 // Types for vector shift by immediates. The "SHX" version is for long and
133 // narrow operations where the source and destination vectors have different
134 // types. The "SHINS" version is for shift and insert operations.
135 def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
137 def SDTARMVSHX : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
139 def SDTARMVSHINS : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
140 SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
142 def NEONvshl : SDNode<"ARMISD::VSHL", SDTARMVSH>;
143 def NEONvshrs : SDNode<"ARMISD::VSHRs", SDTARMVSH>;
144 def NEONvshru : SDNode<"ARMISD::VSHRu", SDTARMVSH>;
145 def NEONvshlls : SDNode<"ARMISD::VSHLLs", SDTARMVSHX>;
146 def NEONvshllu : SDNode<"ARMISD::VSHLLu", SDTARMVSHX>;
147 def NEONvshlli : SDNode<"ARMISD::VSHLLi", SDTARMVSHX>;
148 def NEONvshrn : SDNode<"ARMISD::VSHRN", SDTARMVSHX>;
150 def NEONvrshrs : SDNode<"ARMISD::VRSHRs", SDTARMVSH>;
151 def NEONvrshru : SDNode<"ARMISD::VRSHRu", SDTARMVSH>;
152 def NEONvrshrn : SDNode<"ARMISD::VRSHRN", SDTARMVSHX>;
154 def NEONvqshls : SDNode<"ARMISD::VQSHLs", SDTARMVSH>;
155 def NEONvqshlu : SDNode<"ARMISD::VQSHLu", SDTARMVSH>;
156 def NEONvqshlsu : SDNode<"ARMISD::VQSHLsu", SDTARMVSH>;
157 def NEONvqshrns : SDNode<"ARMISD::VQSHRNs", SDTARMVSHX>;
158 def NEONvqshrnu : SDNode<"ARMISD::VQSHRNu", SDTARMVSHX>;
159 def NEONvqshrnsu : SDNode<"ARMISD::VQSHRNsu", SDTARMVSHX>;
161 def NEONvqrshrns : SDNode<"ARMISD::VQRSHRNs", SDTARMVSHX>;
162 def NEONvqrshrnu : SDNode<"ARMISD::VQRSHRNu", SDTARMVSHX>;
163 def NEONvqrshrnsu : SDNode<"ARMISD::VQRSHRNsu", SDTARMVSHX>;
165 def NEONvsli : SDNode<"ARMISD::VSLI", SDTARMVSHINS>;
166 def NEONvsri : SDNode<"ARMISD::VSRI", SDTARMVSHINS>;
168 def SDTARMVGETLN : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
170 def NEONvgetlaneu : SDNode<"ARMISD::VGETLANEu", SDTARMVGETLN>;
171 def NEONvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>;
173 def SDTARMVMOVIMM : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
174 def NEONvmovImm : SDNode<"ARMISD::VMOVIMM", SDTARMVMOVIMM>;
175 def NEONvmvnImm : SDNode<"ARMISD::VMVNIMM", SDTARMVMOVIMM>;
177 def SDTARMVORRIMM : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
179 def NEONvorrImm : SDNode<"ARMISD::VORRIMM", SDTARMVORRIMM>;
180 def NEONvbicImm : SDNode<"ARMISD::VBICIMM", SDTARMVORRIMM>;
182 def NEONvbsl : SDNode<"ARMISD::VBSL",
183 SDTypeProfile<1, 3, [SDTCisVec<0>,
186 SDTCisSameAs<0, 3>]>>;
188 def NEONvdup : SDNode<"ARMISD::VDUP", SDTypeProfile<1, 1, [SDTCisVec<0>]>>;
190 // VDUPLANE can produce a quad-register result from a double-register source,
191 // so the result is not constrained to match the source.
192 def NEONvduplane : SDNode<"ARMISD::VDUPLANE",
193 SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVec<1>,
196 def SDTARMVEXT : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
197 SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
198 def NEONvext : SDNode<"ARMISD::VEXT", SDTARMVEXT>;
200 def SDTARMVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
201 def NEONvrev64 : SDNode<"ARMISD::VREV64", SDTARMVSHUF>;
202 def NEONvrev32 : SDNode<"ARMISD::VREV32", SDTARMVSHUF>;
203 def NEONvrev16 : SDNode<"ARMISD::VREV16", SDTARMVSHUF>;
205 def SDTARMVSHUF2 : SDTypeProfile<2, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
207 SDTCisSameAs<0, 3>]>;
208 def NEONzip : SDNode<"ARMISD::VZIP", SDTARMVSHUF2>;
209 def NEONuzp : SDNode<"ARMISD::VUZP", SDTARMVSHUF2>;
210 def NEONtrn : SDNode<"ARMISD::VTRN", SDTARMVSHUF2>;
212 def SDTARMVMULL : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
213 SDTCisSameAs<1, 2>]>;
214 def NEONvmulls : SDNode<"ARMISD::VMULLs", SDTARMVMULL>;
215 def NEONvmullu : SDNode<"ARMISD::VMULLu", SDTARMVMULL>;
217 def SDTARMFMAX : SDTypeProfile<1, 2, [SDTCisVT<0, f32>, SDTCisSameAs<0, 1>,
218 SDTCisSameAs<0, 2>]>;
219 def NEONfmax : SDNode<"ARMISD::FMAX", SDTARMFMAX>;
220 def NEONfmin : SDNode<"ARMISD::FMIN", SDTARMFMAX>;
222 def NEONimmAllZerosV: PatLeaf<(NEONvmovImm (i32 timm)), [{
223 ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0));
224 unsigned EltBits = 0;
225 uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits);
226 return (EltBits == 32 && EltVal == 0);
229 def NEONimmAllOnesV: PatLeaf<(NEONvmovImm (i32 timm)), [{
230 ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0));
231 unsigned EltBits = 0;
232 uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits);
233 return (EltBits == 8 && EltVal == 0xff);
236 //===----------------------------------------------------------------------===//
237 // NEON load / store instructions
238 //===----------------------------------------------------------------------===//
240 // Use VLDM to load a Q register as a D register pair.
241 // This is a pseudo instruction that is expanded to VLDMD after reg alloc.
243 : PseudoVFPLdStM<(outs QPR:$dst), (ins GPR:$Rn),
245 [(set QPR:$dst, (v2f64 (load GPR:$Rn)))]>;
247 // Use VSTM to store a Q register as a D register pair.
248 // This is a pseudo instruction that is expanded to VSTMD after reg alloc.
250 : PseudoVFPLdStM<(outs), (ins QPR:$src, GPR:$Rn),
252 [(store (v2f64 QPR:$src), GPR:$Rn)]>;
254 // Classes for VLD* pseudo-instructions with multi-register operands.
255 // These are expanded to real instructions after register allocation.
256 class VLDQPseudo<InstrItinClass itin>
257 : PseudoNLdSt<(outs QPR:$dst), (ins addrmode6:$addr), itin, "">;
258 class VLDQWBPseudo<InstrItinClass itin>
259 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
260 (ins addrmode6:$addr, am6offset:$offset), itin,
262 class VLDQWBfixedPseudo<InstrItinClass itin>
263 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
264 (ins addrmode6:$addr), itin,
266 class VLDQWBregisterPseudo<InstrItinClass itin>
267 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
268 (ins addrmode6:$addr, rGPR:$offset), itin,
270 class VLDQQPseudo<InstrItinClass itin>
271 : PseudoNLdSt<(outs QQPR:$dst), (ins addrmode6:$addr), itin, "">;
272 class VLDQQWBPseudo<InstrItinClass itin>
273 : PseudoNLdSt<(outs QQPR:$dst, GPR:$wb),
274 (ins addrmode6:$addr, am6offset:$offset), itin,
276 class VLDQQQQPseudo<InstrItinClass itin>
277 : PseudoNLdSt<(outs QQQQPR:$dst), (ins addrmode6:$addr, QQQQPR:$src),itin,
279 class VLDQQQQWBPseudo<InstrItinClass itin>
280 : PseudoNLdSt<(outs QQQQPR:$dst, GPR:$wb),
281 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src), itin,
282 "$addr.addr = $wb, $src = $dst">;
284 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
286 // VLD1 : Vector Load (multiple single elements)
287 class VLD1D<bits<4> op7_4, string Dt>
288 : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd),
289 (ins addrmode6:$Rn), IIC_VLD1,
290 "vld1", Dt, "$Vd, $Rn", "", []> {
293 let DecoderMethod = "DecodeVLDInstruction";
295 class VLD1Q<bits<4> op7_4, string Dt>
296 : NLdSt<0,0b10,0b1010,op7_4, (outs VecListTwoD:$Vd),
297 (ins addrmode6:$Rn), IIC_VLD1x2,
298 "vld1", Dt, "$Vd, $Rn", "", []> {
300 let Inst{5-4} = Rn{5-4};
301 let DecoderMethod = "DecodeVLDInstruction";
304 def VLD1d8 : VLD1D<{0,0,0,?}, "8">;
305 def VLD1d16 : VLD1D<{0,1,0,?}, "16">;
306 def VLD1d32 : VLD1D<{1,0,0,?}, "32">;
307 def VLD1d64 : VLD1D<{1,1,0,?}, "64">;
309 def VLD1q8 : VLD1Q<{0,0,?,?}, "8">;
310 def VLD1q16 : VLD1Q<{0,1,?,?}, "16">;
311 def VLD1q32 : VLD1Q<{1,0,?,?}, "32">;
312 def VLD1q64 : VLD1Q<{1,1,?,?}, "64">;
314 def VLD1q8Pseudo : VLDQPseudo<IIC_VLD1x2>;
315 def VLD1q16Pseudo : VLDQPseudo<IIC_VLD1x2>;
316 def VLD1q32Pseudo : VLDQPseudo<IIC_VLD1x2>;
317 def VLD1q64Pseudo : VLDQPseudo<IIC_VLD1x2>;
319 // ...with address register writeback:
320 multiclass VLD1DWB<bits<4> op7_4, string Dt> {
321 def _fixed : NLdSt<0,0b10, 0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
322 (ins addrmode6:$Rn), IIC_VLD1u,
323 "vld1", Dt, "$Vd, $Rn!",
324 "$Rn.addr = $wb", []> {
325 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
327 let DecoderMethod = "DecodeVLDInstruction";
328 let AsmMatchConverter = "cvtVLDwbFixed";
330 def _register : NLdSt<0,0b10,0b0111,op7_4, (outs VecListOneD:$Vd, GPR:$wb),
331 (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1u,
332 "vld1", Dt, "$Vd, $Rn, $Rm",
333 "$Rn.addr = $wb", []> {
335 let DecoderMethod = "DecodeVLDInstruction";
336 let AsmMatchConverter = "cvtVLDwbRegister";
339 multiclass VLD1QWB<bits<4> op7_4, string Dt> {
340 def _fixed : NLdSt<0,0b10,0b1010,op7_4, (outs VecListTwoD:$Vd, GPR:$wb),
341 (ins addrmode6:$Rn), IIC_VLD1x2u,
342 "vld1", Dt, "$Vd, $Rn!",
343 "$Rn.addr = $wb", []> {
344 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
345 let Inst{5-4} = Rn{5-4};
346 let DecoderMethod = "DecodeVLDInstruction";
347 let AsmMatchConverter = "cvtVLDwbFixed";
349 def _register : NLdSt<0,0b10,0b1010,op7_4, (outs VecListTwoD:$Vd, GPR:$wb),
350 (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
351 "vld1", Dt, "$Vd, $Rn, $Rm",
352 "$Rn.addr = $wb", []> {
353 let Inst{5-4} = Rn{5-4};
354 let DecoderMethod = "DecodeVLDInstruction";
355 let AsmMatchConverter = "cvtVLDwbRegister";
359 defm VLD1d8wb : VLD1DWB<{0,0,0,?}, "8">;
360 defm VLD1d16wb : VLD1DWB<{0,1,0,?}, "16">;
361 defm VLD1d32wb : VLD1DWB<{1,0,0,?}, "32">;
362 defm VLD1d64wb : VLD1DWB<{1,1,0,?}, "64">;
363 defm VLD1q8wb : VLD1QWB<{0,0,?,?}, "8">;
364 defm VLD1q16wb : VLD1QWB<{0,1,?,?}, "16">;
365 defm VLD1q32wb : VLD1QWB<{1,0,?,?}, "32">;
366 defm VLD1q64wb : VLD1QWB<{1,1,?,?}, "64">;
368 def VLD1q8PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
369 def VLD1q16PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
370 def VLD1q32PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
371 def VLD1q64PseudoWB_fixed : VLDQWBfixedPseudo<IIC_VLD1x2u>;
372 def VLD1q8PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
373 def VLD1q16PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
374 def VLD1q32PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
375 def VLD1q64PseudoWB_register : VLDQWBregisterPseudo<IIC_VLD1x2u>;
377 // ...with 3 registers
378 class VLD1D3<bits<4> op7_4, string Dt>
379 : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd),
380 (ins addrmode6:$Rn), IIC_VLD1x3, "vld1", Dt,
381 "$Vd, $Rn", "", []> {
384 let DecoderMethod = "DecodeVLDInstruction";
386 multiclass VLD1D3WB<bits<4> op7_4, string Dt> {
387 def _fixed : NLdSt<0,0b10,0b0110, op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
388 (ins addrmode6:$Rn), IIC_VLD1x2u,
389 "vld1", Dt, "$Vd, $Rn!",
390 "$Rn.addr = $wb", []> {
391 let Rm = 0b1101; // NLdSt will assign to the right encoding bits.
392 let Inst{5-4} = Rn{5-4};
393 let DecoderMethod = "DecodeVLDInstruction";
394 let AsmMatchConverter = "cvtVLDwbFixed";
396 def _register : NLdSt<0,0b10,0b0110,op7_4, (outs VecListThreeD:$Vd, GPR:$wb),
397 (ins addrmode6:$Rn, rGPR:$Rm), IIC_VLD1x2u,
398 "vld1", Dt, "$Vd, $Rn, $Rm",
399 "$Rn.addr = $wb", []> {
400 let Inst{5-4} = Rn{5-4};
401 let DecoderMethod = "DecodeVLDInstruction";
402 let AsmMatchConverter = "cvtVLDwbRegister";
406 def VLD1d8T : VLD1D3<{0,0,0,?}, "8">;
407 def VLD1d16T : VLD1D3<{0,1,0,?}, "16">;
408 def VLD1d32T : VLD1D3<{1,0,0,?}, "32">;
409 def VLD1d64T : VLD1D3<{1,1,0,?}, "64">;
411 defm VLD1d8Twb : VLD1D3WB<{0,0,0,?}, "8">;
412 defm VLD1d16Twb : VLD1D3WB<{0,1,0,?}, "16">;
413 defm VLD1d32Twb : VLD1D3WB<{1,0,0,?}, "32">;
414 defm VLD1d64Twb : VLD1D3WB<{1,1,0,?}, "64">;
416 def VLD1d64TPseudo : VLDQQPseudo<IIC_VLD1x3>;
418 // ...with 4 registers
419 class VLD1D4<bits<4> op7_4, string Dt>
420 : NLdSt<0, 0b10, 0b0010, op7_4, (outs VecListFourD:$Vd),
421 (ins addrmode6:$Rn), IIC_VLD1x4, "vld1", Dt,
422 "$Vd, $Rn", "", []> {
424 let Inst{5-4} = Rn{5-4};
425 let DecoderMethod = "DecodeVLDInstruction";
427 class VLD1D4WB<bits<4> op7_4, string Dt>
428 : NLdSt<0, 0b10, 0b0010, op7_4, (outs VecListFourD:$Vd, GPR:$wb),
429 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD1x4u, "vld1", Dt,
430 "$Vd, $Rn$Rm", "$Rn.addr = $wb",
432 let Inst{5-4} = Rn{5-4};
433 let DecoderMethod = "DecodeVLDInstruction";
436 def VLD1d8Q : VLD1D4<{0,0,?,?}, "8">;
437 def VLD1d16Q : VLD1D4<{0,1,?,?}, "16">;
438 def VLD1d32Q : VLD1D4<{1,0,?,?}, "32">;
439 def VLD1d64Q : VLD1D4<{1,1,?,?}, "64">;
441 def VLD1d8Q_UPD : VLD1D4WB<{0,0,?,?}, "8">;
442 def VLD1d16Q_UPD : VLD1D4WB<{0,1,?,?}, "16">;
443 def VLD1d32Q_UPD : VLD1D4WB<{1,0,?,?}, "32">;
444 def VLD1d64Q_UPD : VLD1D4WB<{1,1,?,?}, "64">;
446 def VLD1d64QPseudo : VLDQQPseudo<IIC_VLD1x4>;
447 def VLD1d64QPseudo_UPD : VLDQQWBPseudo<IIC_VLD1x4u>;
449 // VLD2 : Vector Load (multiple 2-element structures)
450 class VLD2D<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy>
451 : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd),
452 (ins addrmode6:$Rn), IIC_VLD2,
453 "vld2", Dt, "$Vd, $Rn", "", []> {
455 let Inst{5-4} = Rn{5-4};
456 let DecoderMethod = "DecodeVLDInstruction";
458 class VLD2Q<bits<4> op7_4, string Dt, RegisterOperand VdTy>
459 : NLdSt<0, 0b10, 0b0011, op7_4,
461 (ins addrmode6:$Rn), IIC_VLD2x2,
462 "vld2", Dt, "$Vd, $Rn", "", []> {
464 let Inst{5-4} = Rn{5-4};
465 let DecoderMethod = "DecodeVLDInstruction";
468 def VLD2d8 : VLD2D<0b1000, {0,0,?,?}, "8", VecListTwoD>;
469 def VLD2d16 : VLD2D<0b1000, {0,1,?,?}, "16", VecListTwoD>;
470 def VLD2d32 : VLD2D<0b1000, {1,0,?,?}, "32", VecListTwoD>;
472 def VLD2q8 : VLD2Q<{0,0,?,?}, "8", VecListFourD>;
473 def VLD2q16 : VLD2Q<{0,1,?,?}, "16", VecListFourD>;
474 def VLD2q32 : VLD2Q<{1,0,?,?}, "32", VecListFourD>;
476 def VLD2d8Pseudo : VLDQPseudo<IIC_VLD2>;
477 def VLD2d16Pseudo : VLDQPseudo<IIC_VLD2>;
478 def VLD2d32Pseudo : VLDQPseudo<IIC_VLD2>;
480 def VLD2q8Pseudo : VLDQQPseudo<IIC_VLD2x2>;
481 def VLD2q16Pseudo : VLDQQPseudo<IIC_VLD2x2>;
482 def VLD2q32Pseudo : VLDQQPseudo<IIC_VLD2x2>;
484 // ...with address register writeback:
485 class VLD2DWB<bits<4> op11_8, bits<4> op7_4, string Dt, RegisterOperand VdTy>
486 : NLdSt<0, 0b10, op11_8, op7_4, (outs VdTy:$Vd, GPR:$wb),
487 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD2u,
488 "vld2", Dt, "$Vd, $Rn$Rm",
489 "$Rn.addr = $wb", []> {
490 let Inst{5-4} = Rn{5-4};
491 let DecoderMethod = "DecodeVLDInstruction";
493 class VLD2QWB<bits<4> op7_4, string Dt, RegisterOperand VdTy>
494 : NLdSt<0, 0b10, 0b0011, op7_4,
495 (outs VdTy:$Vd, GPR:$wb),
496 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD2x2u,
497 "vld2", Dt, "$Vd, $Rn$Rm",
498 "$Rn.addr = $wb", []> {
499 let Inst{5-4} = Rn{5-4};
500 let DecoderMethod = "DecodeVLDInstruction";
503 def VLD2d8_UPD : VLD2DWB<0b1000, {0,0,?,?}, "8", VecListTwoD>;
504 def VLD2d16_UPD : VLD2DWB<0b1000, {0,1,?,?}, "16", VecListTwoD>;
505 def VLD2d32_UPD : VLD2DWB<0b1000, {1,0,?,?}, "32", VecListTwoD>;
507 def VLD2q8_UPD : VLD2QWB<{0,0,?,?}, "8", VecListFourD>;
508 def VLD2q16_UPD : VLD2QWB<{0,1,?,?}, "16", VecListFourD>;
509 def VLD2q32_UPD : VLD2QWB<{1,0,?,?}, "32", VecListFourD>;
511 def VLD2d8Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
512 def VLD2d16Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
513 def VLD2d32Pseudo_UPD : VLDQWBPseudo<IIC_VLD2u>;
515 def VLD2q8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
516 def VLD2q16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
517 def VLD2q32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD2x2u>;
519 // ...with double-spaced registers
520 def VLD2b8 : VLD2D<0b1001, {0,0,?,?}, "8", VecListTwoQ>;
521 def VLD2b16 : VLD2D<0b1001, {0,1,?,?}, "16", VecListTwoQ>;
522 def VLD2b32 : VLD2D<0b1001, {1,0,?,?}, "32", VecListTwoQ>;
523 def VLD2b8_UPD : VLD2DWB<0b1001, {0,0,?,?}, "8", VecListTwoQ>;
524 def VLD2b16_UPD : VLD2DWB<0b1001, {0,1,?,?}, "16", VecListTwoQ>;
525 def VLD2b32_UPD : VLD2DWB<0b1001, {1,0,?,?}, "32", VecListTwoQ>;
527 // VLD3 : Vector Load (multiple 3-element structures)
528 class VLD3D<bits<4> op11_8, bits<4> op7_4, string Dt>
529 : NLdSt<0, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
530 (ins addrmode6:$Rn), IIC_VLD3,
531 "vld3", Dt, "\\{$Vd, $dst2, $dst3\\}, $Rn", "", []> {
534 let DecoderMethod = "DecodeVLDInstruction";
537 def VLD3d8 : VLD3D<0b0100, {0,0,0,?}, "8">;
538 def VLD3d16 : VLD3D<0b0100, {0,1,0,?}, "16">;
539 def VLD3d32 : VLD3D<0b0100, {1,0,0,?}, "32">;
541 def VLD3d8Pseudo : VLDQQPseudo<IIC_VLD3>;
542 def VLD3d16Pseudo : VLDQQPseudo<IIC_VLD3>;
543 def VLD3d32Pseudo : VLDQQPseudo<IIC_VLD3>;
545 // ...with address register writeback:
546 class VLD3DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
547 : NLdSt<0, 0b10, op11_8, op7_4,
548 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
549 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD3u,
550 "vld3", Dt, "\\{$Vd, $dst2, $dst3\\}, $Rn$Rm",
551 "$Rn.addr = $wb", []> {
553 let DecoderMethod = "DecodeVLDInstruction";
556 def VLD3d8_UPD : VLD3DWB<0b0100, {0,0,0,?}, "8">;
557 def VLD3d16_UPD : VLD3DWB<0b0100, {0,1,0,?}, "16">;
558 def VLD3d32_UPD : VLD3DWB<0b0100, {1,0,0,?}, "32">;
560 def VLD3d8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
561 def VLD3d16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
562 def VLD3d32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3u>;
564 // ...with double-spaced registers:
565 def VLD3q8 : VLD3D<0b0101, {0,0,0,?}, "8">;
566 def VLD3q16 : VLD3D<0b0101, {0,1,0,?}, "16">;
567 def VLD3q32 : VLD3D<0b0101, {1,0,0,?}, "32">;
568 def VLD3q8_UPD : VLD3DWB<0b0101, {0,0,0,?}, "8">;
569 def VLD3q16_UPD : VLD3DWB<0b0101, {0,1,0,?}, "16">;
570 def VLD3q32_UPD : VLD3DWB<0b0101, {1,0,0,?}, "32">;
572 def VLD3q8Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
573 def VLD3q16Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
574 def VLD3q32Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
576 // ...alternate versions to be allocated odd register numbers:
577 def VLD3q8oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
578 def VLD3q16oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
579 def VLD3q32oddPseudo : VLDQQQQPseudo<IIC_VLD3>;
581 def VLD3q8oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
582 def VLD3q16oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
583 def VLD3q32oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD3u>;
585 // VLD4 : Vector Load (multiple 4-element structures)
586 class VLD4D<bits<4> op11_8, bits<4> op7_4, string Dt>
587 : NLdSt<0, 0b10, op11_8, op7_4,
588 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
589 (ins addrmode6:$Rn), IIC_VLD4,
590 "vld4", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn", "", []> {
592 let Inst{5-4} = Rn{5-4};
593 let DecoderMethod = "DecodeVLDInstruction";
596 def VLD4d8 : VLD4D<0b0000, {0,0,?,?}, "8">;
597 def VLD4d16 : VLD4D<0b0000, {0,1,?,?}, "16">;
598 def VLD4d32 : VLD4D<0b0000, {1,0,?,?}, "32">;
600 def VLD4d8Pseudo : VLDQQPseudo<IIC_VLD4>;
601 def VLD4d16Pseudo : VLDQQPseudo<IIC_VLD4>;
602 def VLD4d32Pseudo : VLDQQPseudo<IIC_VLD4>;
604 // ...with address register writeback:
605 class VLD4DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
606 : NLdSt<0, 0b10, op11_8, op7_4,
607 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
608 (ins addrmode6:$Rn, am6offset:$Rm), IIC_VLD4u,
609 "vld4", Dt, "\\{$Vd, $dst2, $dst3, $dst4\\}, $Rn$Rm",
610 "$Rn.addr = $wb", []> {
611 let Inst{5-4} = Rn{5-4};
612 let DecoderMethod = "DecodeVLDInstruction";
615 def VLD4d8_UPD : VLD4DWB<0b0000, {0,0,?,?}, "8">;
616 def VLD4d16_UPD : VLD4DWB<0b0000, {0,1,?,?}, "16">;
617 def VLD4d32_UPD : VLD4DWB<0b0000, {1,0,?,?}, "32">;
619 def VLD4d8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
620 def VLD4d16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
621 def VLD4d32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4u>;
623 // ...with double-spaced registers:
624 def VLD4q8 : VLD4D<0b0001, {0,0,?,?}, "8">;
625 def VLD4q16 : VLD4D<0b0001, {0,1,?,?}, "16">;
626 def VLD4q32 : VLD4D<0b0001, {1,0,?,?}, "32">;
627 def VLD4q8_UPD : VLD4DWB<0b0001, {0,0,?,?}, "8">;
628 def VLD4q16_UPD : VLD4DWB<0b0001, {0,1,?,?}, "16">;
629 def VLD4q32_UPD : VLD4DWB<0b0001, {1,0,?,?}, "32">;
631 def VLD4q8Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
632 def VLD4q16Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
633 def VLD4q32Pseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
635 // ...alternate versions to be allocated odd register numbers:
636 def VLD4q8oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
637 def VLD4q16oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
638 def VLD4q32oddPseudo : VLDQQQQPseudo<IIC_VLD4>;
640 def VLD4q8oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
641 def VLD4q16oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
642 def VLD4q32oddPseudo_UPD : VLDQQQQWBPseudo<IIC_VLD4u>;
644 } // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
646 // Classes for VLD*LN pseudo-instructions with multi-register operands.
647 // These are expanded to real instructions after register allocation.
648 class VLDQLNPseudo<InstrItinClass itin>
649 : PseudoNLdSt<(outs QPR:$dst),
650 (ins addrmode6:$addr, QPR:$src, nohash_imm:$lane),
651 itin, "$src = $dst">;
652 class VLDQLNWBPseudo<InstrItinClass itin>
653 : PseudoNLdSt<(outs QPR:$dst, GPR:$wb),
654 (ins addrmode6:$addr, am6offset:$offset, QPR:$src,
655 nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
656 class VLDQQLNPseudo<InstrItinClass itin>
657 : PseudoNLdSt<(outs QQPR:$dst),
658 (ins addrmode6:$addr, QQPR:$src, nohash_imm:$lane),
659 itin, "$src = $dst">;
660 class VLDQQLNWBPseudo<InstrItinClass itin>
661 : PseudoNLdSt<(outs QQPR:$dst, GPR:$wb),
662 (ins addrmode6:$addr, am6offset:$offset, QQPR:$src,
663 nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
664 class VLDQQQQLNPseudo<InstrItinClass itin>
665 : PseudoNLdSt<(outs QQQQPR:$dst),
666 (ins addrmode6:$addr, QQQQPR:$src, nohash_imm:$lane),
667 itin, "$src = $dst">;
668 class VLDQQQQLNWBPseudo<InstrItinClass itin>
669 : PseudoNLdSt<(outs QQQQPR:$dst, GPR:$wb),
670 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src,
671 nohash_imm:$lane), itin, "$addr.addr = $wb, $src = $dst">;
673 // VLD1LN : Vector Load (single element to one lane)
674 class VLD1LN<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
676 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd),
677 (ins addrmode6:$Rn, DPR:$src, nohash_imm:$lane),
678 IIC_VLD1ln, "vld1", Dt, "\\{$Vd[$lane]\\}, $Rn",
680 [(set DPR:$Vd, (vector_insert (Ty DPR:$src),
681 (i32 (LoadOp addrmode6:$Rn)),
684 let DecoderMethod = "DecodeVLD1LN";
686 class VLD1LN32<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
688 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd),
689 (ins addrmode6oneL32:$Rn, DPR:$src, nohash_imm:$lane),
690 IIC_VLD1ln, "vld1", Dt, "\\{$Vd[$lane]\\}, $Rn",
692 [(set DPR:$Vd, (vector_insert (Ty DPR:$src),
693 (i32 (LoadOp addrmode6oneL32:$Rn)),
696 let DecoderMethod = "DecodeVLD1LN";
698 class VLD1QLNPseudo<ValueType Ty, PatFrag LoadOp> : VLDQLNPseudo<IIC_VLD1ln> {
699 let Pattern = [(set QPR:$dst, (vector_insert (Ty QPR:$src),
700 (i32 (LoadOp addrmode6:$addr)),
704 def VLD1LNd8 : VLD1LN<0b0000, {?,?,?,0}, "8", v8i8, extloadi8> {
705 let Inst{7-5} = lane{2-0};
707 def VLD1LNd16 : VLD1LN<0b0100, {?,?,0,?}, "16", v4i16, extloadi16> {
708 let Inst{7-6} = lane{1-0};
711 def VLD1LNd32 : VLD1LN32<0b1000, {?,0,?,?}, "32", v2i32, load> {
712 let Inst{7} = lane{0};
717 def VLD1LNq8Pseudo : VLD1QLNPseudo<v16i8, extloadi8>;
718 def VLD1LNq16Pseudo : VLD1QLNPseudo<v8i16, extloadi16>;
719 def VLD1LNq32Pseudo : VLD1QLNPseudo<v4i32, load>;
721 def : Pat<(vector_insert (v2f32 DPR:$src),
722 (f32 (load addrmode6:$addr)), imm:$lane),
723 (VLD1LNd32 addrmode6:$addr, DPR:$src, imm:$lane)>;
724 def : Pat<(vector_insert (v4f32 QPR:$src),
725 (f32 (load addrmode6:$addr)), imm:$lane),
726 (VLD1LNq32Pseudo addrmode6:$addr, QPR:$src, imm:$lane)>;
728 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
730 // ...with address register writeback:
731 class VLD1LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
732 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, GPR:$wb),
733 (ins addrmode6:$Rn, am6offset:$Rm,
734 DPR:$src, nohash_imm:$lane), IIC_VLD1lnu, "vld1", Dt,
735 "\\{$Vd[$lane]\\}, $Rn$Rm",
736 "$src = $Vd, $Rn.addr = $wb", []> {
737 let DecoderMethod = "DecodeVLD1LN";
740 def VLD1LNd8_UPD : VLD1LNWB<0b0000, {?,?,?,0}, "8"> {
741 let Inst{7-5} = lane{2-0};
743 def VLD1LNd16_UPD : VLD1LNWB<0b0100, {?,?,0,?}, "16"> {
744 let Inst{7-6} = lane{1-0};
747 def VLD1LNd32_UPD : VLD1LNWB<0b1000, {?,0,?,?}, "32"> {
748 let Inst{7} = lane{0};
753 def VLD1LNq8Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
754 def VLD1LNq16Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
755 def VLD1LNq32Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD1lnu>;
757 // VLD2LN : Vector Load (single 2-element structure to one lane)
758 class VLD2LN<bits<4> op11_8, bits<4> op7_4, string Dt>
759 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2),
760 (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, nohash_imm:$lane),
761 IIC_VLD2ln, "vld2", Dt, "\\{$Vd[$lane], $dst2[$lane]\\}, $Rn",
762 "$src1 = $Vd, $src2 = $dst2", []> {
765 let DecoderMethod = "DecodeVLD2LN";
768 def VLD2LNd8 : VLD2LN<0b0001, {?,?,?,?}, "8"> {
769 let Inst{7-5} = lane{2-0};
771 def VLD2LNd16 : VLD2LN<0b0101, {?,?,0,?}, "16"> {
772 let Inst{7-6} = lane{1-0};
774 def VLD2LNd32 : VLD2LN<0b1001, {?,0,0,?}, "32"> {
775 let Inst{7} = lane{0};
778 def VLD2LNd8Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
779 def VLD2LNd16Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
780 def VLD2LNd32Pseudo : VLDQLNPseudo<IIC_VLD2ln>;
782 // ...with double-spaced registers:
783 def VLD2LNq16 : VLD2LN<0b0101, {?,?,1,?}, "16"> {
784 let Inst{7-6} = lane{1-0};
786 def VLD2LNq32 : VLD2LN<0b1001, {?,1,0,?}, "32"> {
787 let Inst{7} = lane{0};
790 def VLD2LNq16Pseudo : VLDQQLNPseudo<IIC_VLD2ln>;
791 def VLD2LNq32Pseudo : VLDQQLNPseudo<IIC_VLD2ln>;
793 // ...with address register writeback:
794 class VLD2LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
795 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
796 (ins addrmode6:$Rn, am6offset:$Rm,
797 DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VLD2lnu, "vld2", Dt,
798 "\\{$Vd[$lane], $dst2[$lane]\\}, $Rn$Rm",
799 "$src1 = $Vd, $src2 = $dst2, $Rn.addr = $wb", []> {
801 let DecoderMethod = "DecodeVLD2LN";
804 def VLD2LNd8_UPD : VLD2LNWB<0b0001, {?,?,?,?}, "8"> {
805 let Inst{7-5} = lane{2-0};
807 def VLD2LNd16_UPD : VLD2LNWB<0b0101, {?,?,0,?}, "16"> {
808 let Inst{7-6} = lane{1-0};
810 def VLD2LNd32_UPD : VLD2LNWB<0b1001, {?,0,0,?}, "32"> {
811 let Inst{7} = lane{0};
814 def VLD2LNd8Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
815 def VLD2LNd16Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
816 def VLD2LNd32Pseudo_UPD : VLDQLNWBPseudo<IIC_VLD2lnu>;
818 def VLD2LNq16_UPD : VLD2LNWB<0b0101, {?,?,1,?}, "16"> {
819 let Inst{7-6} = lane{1-0};
821 def VLD2LNq32_UPD : VLD2LNWB<0b1001, {?,1,0,?}, "32"> {
822 let Inst{7} = lane{0};
825 def VLD2LNq16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD2lnu>;
826 def VLD2LNq32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD2lnu>;
828 // VLD3LN : Vector Load (single 3-element structure to one lane)
829 class VLD3LN<bits<4> op11_8, bits<4> op7_4, string Dt>
830 : NLdStLn<1, 0b10, op11_8, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
831 (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, DPR:$src3,
832 nohash_imm:$lane), IIC_VLD3ln, "vld3", Dt,
833 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane]\\}, $Rn",
834 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3", []> {
836 let DecoderMethod = "DecodeVLD3LN";
839 def VLD3LNd8 : VLD3LN<0b0010, {?,?,?,0}, "8"> {
840 let Inst{7-5} = lane{2-0};
842 def VLD3LNd16 : VLD3LN<0b0110, {?,?,0,0}, "16"> {
843 let Inst{7-6} = lane{1-0};
845 def VLD3LNd32 : VLD3LN<0b1010, {?,0,0,0}, "32"> {
846 let Inst{7} = lane{0};
849 def VLD3LNd8Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
850 def VLD3LNd16Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
851 def VLD3LNd32Pseudo : VLDQQLNPseudo<IIC_VLD3ln>;
853 // ...with double-spaced registers:
854 def VLD3LNq16 : VLD3LN<0b0110, {?,?,1,0}, "16"> {
855 let Inst{7-6} = lane{1-0};
857 def VLD3LNq32 : VLD3LN<0b1010, {?,1,0,0}, "32"> {
858 let Inst{7} = lane{0};
861 def VLD3LNq16Pseudo : VLDQQQQLNPseudo<IIC_VLD3ln>;
862 def VLD3LNq32Pseudo : VLDQQQQLNPseudo<IIC_VLD3ln>;
864 // ...with address register writeback:
865 class VLD3LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
866 : NLdStLn<1, 0b10, op11_8, op7_4,
867 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
868 (ins addrmode6:$Rn, am6offset:$Rm,
869 DPR:$src1, DPR:$src2, DPR:$src3, nohash_imm:$lane),
870 IIC_VLD3lnu, "vld3", Dt,
871 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane]\\}, $Rn$Rm",
872 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $Rn.addr = $wb",
874 let DecoderMethod = "DecodeVLD3LN";
877 def VLD3LNd8_UPD : VLD3LNWB<0b0010, {?,?,?,0}, "8"> {
878 let Inst{7-5} = lane{2-0};
880 def VLD3LNd16_UPD : VLD3LNWB<0b0110, {?,?,0,0}, "16"> {
881 let Inst{7-6} = lane{1-0};
883 def VLD3LNd32_UPD : VLD3LNWB<0b1010, {?,0,0,0}, "32"> {
884 let Inst{7} = lane{0};
887 def VLD3LNd8Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
888 def VLD3LNd16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
889 def VLD3LNd32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD3lnu>;
891 def VLD3LNq16_UPD : VLD3LNWB<0b0110, {?,?,1,0}, "16"> {
892 let Inst{7-6} = lane{1-0};
894 def VLD3LNq32_UPD : VLD3LNWB<0b1010, {?,1,0,0}, "32"> {
895 let Inst{7} = lane{0};
898 def VLD3LNq16Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD3lnu>;
899 def VLD3LNq32Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD3lnu>;
901 // VLD4LN : Vector Load (single 4-element structure to one lane)
902 class VLD4LN<bits<4> op11_8, bits<4> op7_4, string Dt>
903 : NLdStLn<1, 0b10, op11_8, op7_4,
904 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
905 (ins addrmode6:$Rn, DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4,
906 nohash_imm:$lane), IIC_VLD4ln, "vld4", Dt,
907 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $Rn",
908 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4", []> {
911 let DecoderMethod = "DecodeVLD4LN";
914 def VLD4LNd8 : VLD4LN<0b0011, {?,?,?,?}, "8"> {
915 let Inst{7-5} = lane{2-0};
917 def VLD4LNd16 : VLD4LN<0b0111, {?,?,0,?}, "16"> {
918 let Inst{7-6} = lane{1-0};
920 def VLD4LNd32 : VLD4LN<0b1011, {?,0,?,?}, "32"> {
921 let Inst{7} = lane{0};
925 def VLD4LNd8Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
926 def VLD4LNd16Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
927 def VLD4LNd32Pseudo : VLDQQLNPseudo<IIC_VLD4ln>;
929 // ...with double-spaced registers:
930 def VLD4LNq16 : VLD4LN<0b0111, {?,?,1,?}, "16"> {
931 let Inst{7-6} = lane{1-0};
933 def VLD4LNq32 : VLD4LN<0b1011, {?,1,?,?}, "32"> {
934 let Inst{7} = lane{0};
938 def VLD4LNq16Pseudo : VLDQQQQLNPseudo<IIC_VLD4ln>;
939 def VLD4LNq32Pseudo : VLDQQQQLNPseudo<IIC_VLD4ln>;
941 // ...with address register writeback:
942 class VLD4LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
943 : NLdStLn<1, 0b10, op11_8, op7_4,
944 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
945 (ins addrmode6:$Rn, am6offset:$Rm,
946 DPR:$src1, DPR:$src2, DPR:$src3, DPR:$src4, nohash_imm:$lane),
947 IIC_VLD4lnu, "vld4", Dt,
948 "\\{$Vd[$lane], $dst2[$lane], $dst3[$lane], $dst4[$lane]\\}, $Rn$Rm",
949 "$src1 = $Vd, $src2 = $dst2, $src3 = $dst3, $src4 = $dst4, $Rn.addr = $wb",
952 let DecoderMethod = "DecodeVLD4LN" ;
955 def VLD4LNd8_UPD : VLD4LNWB<0b0011, {?,?,?,?}, "8"> {
956 let Inst{7-5} = lane{2-0};
958 def VLD4LNd16_UPD : VLD4LNWB<0b0111, {?,?,0,?}, "16"> {
959 let Inst{7-6} = lane{1-0};
961 def VLD4LNd32_UPD : VLD4LNWB<0b1011, {?,0,?,?}, "32"> {
962 let Inst{7} = lane{0};
966 def VLD4LNd8Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
967 def VLD4LNd16Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
968 def VLD4LNd32Pseudo_UPD : VLDQQLNWBPseudo<IIC_VLD4lnu>;
970 def VLD4LNq16_UPD : VLD4LNWB<0b0111, {?,?,1,?}, "16"> {
971 let Inst{7-6} = lane{1-0};
973 def VLD4LNq32_UPD : VLD4LNWB<0b1011, {?,1,?,?}, "32"> {
974 let Inst{7} = lane{0};
978 def VLD4LNq16Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
979 def VLD4LNq32Pseudo_UPD : VLDQQQQLNWBPseudo<IIC_VLD4lnu>;
981 } // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
983 // VLD1DUP : Vector Load (single element to all lanes)
984 class VLD1DUP<bits<4> op7_4, string Dt, ValueType Ty, PatFrag LoadOp>
985 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd), (ins addrmode6dup:$Rn),
986 IIC_VLD1dup, "vld1", Dt, "\\{$Vd[]\\}, $Rn", "",
987 [(set DPR:$Vd, (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$Rn)))))]> {
990 let DecoderMethod = "DecodeVLD1DupInstruction";
992 class VLD1QDUPPseudo<ValueType Ty, PatFrag LoadOp> : VLDQPseudo<IIC_VLD1dup> {
993 let Pattern = [(set QPR:$dst,
994 (Ty (NEONvdup (i32 (LoadOp addrmode6dup:$addr)))))];
997 def VLD1DUPd8 : VLD1DUP<{0,0,0,?}, "8", v8i8, extloadi8>;
998 def VLD1DUPd16 : VLD1DUP<{0,1,0,?}, "16", v4i16, extloadi16>;
999 def VLD1DUPd32 : VLD1DUP<{1,0,0,?}, "32", v2i32, load>;
1001 def VLD1DUPq8Pseudo : VLD1QDUPPseudo<v16i8, extloadi8>;
1002 def VLD1DUPq16Pseudo : VLD1QDUPPseudo<v8i16, extloadi16>;
1003 def VLD1DUPq32Pseudo : VLD1QDUPPseudo<v4i32, load>;
1005 def : Pat<(v2f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
1006 (VLD1DUPd32 addrmode6:$addr)>;
1007 def : Pat<(v4f32 (NEONvdup (f32 (load addrmode6dup:$addr)))),
1008 (VLD1DUPq32Pseudo addrmode6:$addr)>;
1010 let mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1 in {
1012 class VLD1QDUP<bits<4> op7_4, string Dt>
1013 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, DPR:$dst2),
1014 (ins addrmode6dup:$Rn), IIC_VLD1dup,
1015 "vld1", Dt, "\\{$Vd[], $dst2[]\\}, $Rn", "", []> {
1017 let Inst{4} = Rn{4};
1018 let DecoderMethod = "DecodeVLD1DupInstruction";
1021 def VLD1DUPq8 : VLD1QDUP<{0,0,1,0}, "8">;
1022 def VLD1DUPq16 : VLD1QDUP<{0,1,1,?}, "16">;
1023 def VLD1DUPq32 : VLD1QDUP<{1,0,1,?}, "32">;
1025 // ...with address register writeback:
1026 class VLD1DUPWB<bits<4> op7_4, string Dt>
1027 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, GPR:$wb),
1028 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD1dupu,
1029 "vld1", Dt, "\\{$Vd[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
1030 let Inst{4} = Rn{4};
1031 let DecoderMethod = "DecodeVLD1DupInstruction";
1033 class VLD1QDUPWB<bits<4> op7_4, string Dt>
1034 : NLdSt<1, 0b10, 0b1100, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
1035 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD1dupu,
1036 "vld1", Dt, "\\{$Vd[], $dst2[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
1037 let Inst{4} = Rn{4};
1038 let DecoderMethod = "DecodeVLD1DupInstruction";
1041 def VLD1DUPd8_UPD : VLD1DUPWB<{0,0,0,0}, "8">;
1042 def VLD1DUPd16_UPD : VLD1DUPWB<{0,1,0,?}, "16">;
1043 def VLD1DUPd32_UPD : VLD1DUPWB<{1,0,0,?}, "32">;
1045 def VLD1DUPq8_UPD : VLD1QDUPWB<{0,0,1,0}, "8">;
1046 def VLD1DUPq16_UPD : VLD1QDUPWB<{0,1,1,?}, "16">;
1047 def VLD1DUPq32_UPD : VLD1QDUPWB<{1,0,1,?}, "32">;
1049 def VLD1DUPq8Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
1050 def VLD1DUPq16Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
1051 def VLD1DUPq32Pseudo_UPD : VLDQWBPseudo<IIC_VLD1dupu>;
1053 // VLD2DUP : Vector Load (single 2-element structure to all lanes)
1054 class VLD2DUP<bits<4> op7_4, string Dt>
1055 : NLdSt<1, 0b10, 0b1101, op7_4, (outs DPR:$Vd, DPR:$dst2),
1056 (ins addrmode6dup:$Rn), IIC_VLD2dup,
1057 "vld2", Dt, "\\{$Vd[], $dst2[]\\}, $Rn", "", []> {
1059 let Inst{4} = Rn{4};
1060 let DecoderMethod = "DecodeVLD2DupInstruction";
1063 def VLD2DUPd8 : VLD2DUP<{0,0,0,?}, "8">;
1064 def VLD2DUPd16 : VLD2DUP<{0,1,0,?}, "16">;
1065 def VLD2DUPd32 : VLD2DUP<{1,0,0,?}, "32">;
1067 def VLD2DUPd8Pseudo : VLDQPseudo<IIC_VLD2dup>;
1068 def VLD2DUPd16Pseudo : VLDQPseudo<IIC_VLD2dup>;
1069 def VLD2DUPd32Pseudo : VLDQPseudo<IIC_VLD2dup>;
1071 // ...with double-spaced registers (not used for codegen):
1072 def VLD2DUPd8x2 : VLD2DUP<{0,0,1,?}, "8">;
1073 def VLD2DUPd16x2 : VLD2DUP<{0,1,1,?}, "16">;
1074 def VLD2DUPd32x2 : VLD2DUP<{1,0,1,?}, "32">;
1076 // ...with address register writeback:
1077 class VLD2DUPWB<bits<4> op7_4, string Dt>
1078 : NLdSt<1, 0b10, 0b1101, op7_4, (outs DPR:$Vd, DPR:$dst2, GPR:$wb),
1079 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD2dupu,
1080 "vld2", Dt, "\\{$Vd[], $dst2[]\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
1081 let Inst{4} = Rn{4};
1082 let DecoderMethod = "DecodeVLD2DupInstruction";
1085 def VLD2DUPd8_UPD : VLD2DUPWB<{0,0,0,0}, "8">;
1086 def VLD2DUPd16_UPD : VLD2DUPWB<{0,1,0,?}, "16">;
1087 def VLD2DUPd32_UPD : VLD2DUPWB<{1,0,0,?}, "32">;
1089 def VLD2DUPd8x2_UPD : VLD2DUPWB<{0,0,1,0}, "8">;
1090 def VLD2DUPd16x2_UPD : VLD2DUPWB<{0,1,1,?}, "16">;
1091 def VLD2DUPd32x2_UPD : VLD2DUPWB<{1,0,1,?}, "32">;
1093 def VLD2DUPd8Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
1094 def VLD2DUPd16Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
1095 def VLD2DUPd32Pseudo_UPD : VLDQWBPseudo<IIC_VLD2dupu>;
1097 // VLD3DUP : Vector Load (single 3-element structure to all lanes)
1098 class VLD3DUP<bits<4> op7_4, string Dt>
1099 : NLdSt<1, 0b10, 0b1110, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3),
1100 (ins addrmode6dup:$Rn), IIC_VLD3dup,
1101 "vld3", Dt, "\\{$Vd[], $dst2[], $dst3[]\\}, $Rn", "", []> {
1104 let DecoderMethod = "DecodeVLD3DupInstruction";
1107 def VLD3DUPd8 : VLD3DUP<{0,0,0,?}, "8">;
1108 def VLD3DUPd16 : VLD3DUP<{0,1,0,?}, "16">;
1109 def VLD3DUPd32 : VLD3DUP<{1,0,0,?}, "32">;
1111 def VLD3DUPd8Pseudo : VLDQQPseudo<IIC_VLD3dup>;
1112 def VLD3DUPd16Pseudo : VLDQQPseudo<IIC_VLD3dup>;
1113 def VLD3DUPd32Pseudo : VLDQQPseudo<IIC_VLD3dup>;
1115 // ...with double-spaced registers (not used for codegen):
1116 def VLD3DUPd8x2 : VLD3DUP<{0,0,1,?}, "8">;
1117 def VLD3DUPd16x2 : VLD3DUP<{0,1,1,?}, "16">;
1118 def VLD3DUPd32x2 : VLD3DUP<{1,0,1,?}, "32">;
1120 // ...with address register writeback:
1121 class VLD3DUPWB<bits<4> op7_4, string Dt>
1122 : NLdSt<1, 0b10, 0b1110, op7_4, (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, GPR:$wb),
1123 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD3dupu,
1124 "vld3", Dt, "\\{$Vd[], $dst2[], $dst3[]\\}, $Rn$Rm",
1125 "$Rn.addr = $wb", []> {
1127 let DecoderMethod = "DecodeVLD3DupInstruction";
1130 def VLD3DUPd8_UPD : VLD3DUPWB<{0,0,0,0}, "8">;
1131 def VLD3DUPd16_UPD : VLD3DUPWB<{0,1,0,?}, "16">;
1132 def VLD3DUPd32_UPD : VLD3DUPWB<{1,0,0,?}, "32">;
1134 def VLD3DUPd8x2_UPD : VLD3DUPWB<{0,0,1,0}, "8">;
1135 def VLD3DUPd16x2_UPD : VLD3DUPWB<{0,1,1,?}, "16">;
1136 def VLD3DUPd32x2_UPD : VLD3DUPWB<{1,0,1,?}, "32">;
1138 def VLD3DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
1139 def VLD3DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
1140 def VLD3DUPd32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD3dupu>;
1142 // VLD4DUP : Vector Load (single 4-element structure to all lanes)
1143 class VLD4DUP<bits<4> op7_4, string Dt>
1144 : NLdSt<1, 0b10, 0b1111, op7_4,
1145 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4),
1146 (ins addrmode6dup:$Rn), IIC_VLD4dup,
1147 "vld4", Dt, "\\{$Vd[], $dst2[], $dst3[], $dst4[]\\}, $Rn", "", []> {
1149 let Inst{4} = Rn{4};
1150 let DecoderMethod = "DecodeVLD4DupInstruction";
1153 def VLD4DUPd8 : VLD4DUP<{0,0,0,?}, "8">;
1154 def VLD4DUPd16 : VLD4DUP<{0,1,0,?}, "16">;
1155 def VLD4DUPd32 : VLD4DUP<{1,?,0,?}, "32"> { let Inst{6} = Rn{5}; }
1157 def VLD4DUPd8Pseudo : VLDQQPseudo<IIC_VLD4dup>;
1158 def VLD4DUPd16Pseudo : VLDQQPseudo<IIC_VLD4dup>;
1159 def VLD4DUPd32Pseudo : VLDQQPseudo<IIC_VLD4dup>;
1161 // ...with double-spaced registers (not used for codegen):
1162 def VLD4DUPd8x2 : VLD4DUP<{0,0,1,?}, "8">;
1163 def VLD4DUPd16x2 : VLD4DUP<{0,1,1,?}, "16">;
1164 def VLD4DUPd32x2 : VLD4DUP<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
1166 // ...with address register writeback:
1167 class VLD4DUPWB<bits<4> op7_4, string Dt>
1168 : NLdSt<1, 0b10, 0b1111, op7_4,
1169 (outs DPR:$Vd, DPR:$dst2, DPR:$dst3, DPR:$dst4, GPR:$wb),
1170 (ins addrmode6dup:$Rn, am6offset:$Rm), IIC_VLD4dupu,
1171 "vld4", Dt, "\\{$Vd[], $dst2[], $dst3[], $dst4[]\\}, $Rn$Rm",
1172 "$Rn.addr = $wb", []> {
1173 let Inst{4} = Rn{4};
1174 let DecoderMethod = "DecodeVLD4DupInstruction";
1177 def VLD4DUPd8_UPD : VLD4DUPWB<{0,0,0,0}, "8">;
1178 def VLD4DUPd16_UPD : VLD4DUPWB<{0,1,0,?}, "16">;
1179 def VLD4DUPd32_UPD : VLD4DUPWB<{1,?,0,?}, "32"> { let Inst{6} = Rn{5}; }
1181 def VLD4DUPd8x2_UPD : VLD4DUPWB<{0,0,1,0}, "8">;
1182 def VLD4DUPd16x2_UPD : VLD4DUPWB<{0,1,1,?}, "16">;
1183 def VLD4DUPd32x2_UPD : VLD4DUPWB<{1,?,1,?}, "32"> { let Inst{6} = Rn{5}; }
1185 def VLD4DUPd8Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
1186 def VLD4DUPd16Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
1187 def VLD4DUPd32Pseudo_UPD : VLDQQWBPseudo<IIC_VLD4dupu>;
1189 } // mayLoad = 1, neverHasSideEffects = 1, hasExtraDefRegAllocReq = 1
1191 let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
1193 // Classes for VST* pseudo-instructions with multi-register operands.
1194 // These are expanded to real instructions after register allocation.
1195 class VSTQPseudo<InstrItinClass itin>
1196 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QPR:$src), itin, "">;
1197 class VSTQWBPseudo<InstrItinClass itin>
1198 : PseudoNLdSt<(outs GPR:$wb),
1199 (ins addrmode6:$addr, am6offset:$offset, QPR:$src), itin,
1200 "$addr.addr = $wb">;
1201 class VSTQQPseudo<InstrItinClass itin>
1202 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQPR:$src), itin, "">;
1203 class VSTQQWBPseudo<InstrItinClass itin>
1204 : PseudoNLdSt<(outs GPR:$wb),
1205 (ins addrmode6:$addr, am6offset:$offset, QQPR:$src), itin,
1206 "$addr.addr = $wb">;
1207 class VSTQQQQPseudo<InstrItinClass itin>
1208 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQQQPR:$src), itin, "">;
1209 class VSTQQQQWBPseudo<InstrItinClass itin>
1210 : PseudoNLdSt<(outs GPR:$wb),
1211 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src), itin,
1212 "$addr.addr = $wb">;
1214 // VST1 : Vector Store (multiple single elements)
1215 class VST1D<bits<4> op7_4, string Dt>
1216 : NLdSt<0,0b00,0b0111,op7_4, (outs), (ins addrmode6:$Rn, VecListOneD:$Vd),
1217 IIC_VST1, "vst1", Dt, "$Vd, $Rn", "", []> {
1219 let Inst{4} = Rn{4};
1220 let DecoderMethod = "DecodeVSTInstruction";
1222 class VST1Q<bits<4> op7_4, string Dt>
1223 : NLdSt<0,0b00,0b1010,op7_4, (outs),
1224 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2), IIC_VST1x2,
1225 "vst1", Dt, "\\{$Vd, $src2\\}, $Rn", "", []> {
1227 let Inst{5-4} = Rn{5-4};
1228 let DecoderMethod = "DecodeVSTInstruction";
1231 def VST1d8 : VST1D<{0,0,0,?}, "8">;
1232 def VST1d16 : VST1D<{0,1,0,?}, "16">;
1233 def VST1d32 : VST1D<{1,0,0,?}, "32">;
1234 def VST1d64 : VST1D<{1,1,0,?}, "64">;
1236 def VST1q8 : VST1Q<{0,0,?,?}, "8">;
1237 def VST1q16 : VST1Q<{0,1,?,?}, "16">;
1238 def VST1q32 : VST1Q<{1,0,?,?}, "32">;
1239 def VST1q64 : VST1Q<{1,1,?,?}, "64">;
1241 def VST1q8Pseudo : VSTQPseudo<IIC_VST1x2>;
1242 def VST1q16Pseudo : VSTQPseudo<IIC_VST1x2>;
1243 def VST1q32Pseudo : VSTQPseudo<IIC_VST1x2>;
1244 def VST1q64Pseudo : VSTQPseudo<IIC_VST1x2>;
1246 // ...with address register writeback:
1247 class VST1DWB<bits<4> op7_4, string Dt>
1248 : NLdSt<0, 0b00, 0b0111, op7_4, (outs GPR:$wb),
1249 (ins addrmode6:$Rn, am6offset:$Rm, DPR:$Vd), IIC_VST1u,
1250 "vst1", Dt, "\\{$Vd\\}, $Rn$Rm", "$Rn.addr = $wb", []> {
1251 let Inst{4} = Rn{4};
1252 let DecoderMethod = "DecodeVSTInstruction";
1254 class VST1QWB<bits<4> op7_4, string Dt>
1255 : NLdSt<0, 0b00, 0b1010, op7_4, (outs GPR:$wb),
1256 (ins addrmode6:$Rn, am6offset:$Rm, DPR:$Vd, DPR:$src2),
1257 IIC_VST1x2u, "vst1", Dt, "\\{$Vd, $src2\\}, $Rn$Rm",
1258 "$Rn.addr = $wb", []> {
1259 let Inst{5-4} = Rn{5-4};
1260 let DecoderMethod = "DecodeVSTInstruction";
1263 def VST1d8_UPD : VST1DWB<{0,0,0,?}, "8">;
1264 def VST1d16_UPD : VST1DWB<{0,1,0,?}, "16">;
1265 def VST1d32_UPD : VST1DWB<{1,0,0,?}, "32">;
1266 def VST1d64_UPD : VST1DWB<{1,1,0,?}, "64">;
1268 def VST1q8_UPD : VST1QWB<{0,0,?,?}, "8">;
1269 def VST1q16_UPD : VST1QWB<{0,1,?,?}, "16">;
1270 def VST1q32_UPD : VST1QWB<{1,0,?,?}, "32">;
1271 def VST1q64_UPD : VST1QWB<{1,1,?,?}, "64">;
1273 def VST1q8Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
1274 def VST1q16Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
1275 def VST1q32Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
1276 def VST1q64Pseudo_UPD : VSTQWBPseudo<IIC_VST1x2u>;
1278 // ...with 3 registers
1279 class VST1D3<bits<4> op7_4, string Dt>
1280 : NLdSt<0, 0b00, 0b0110, op7_4, (outs),
1281 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3),
1282 IIC_VST1x3, "vst1", Dt, "\\{$Vd, $src2, $src3\\}, $Rn", "", []> {
1284 let Inst{4} = Rn{4};
1285 let DecoderMethod = "DecodeVSTInstruction";
1287 class VST1D3WB<bits<4> op7_4, string Dt>
1288 : NLdSt<0, 0b00, 0b0110, op7_4, (outs GPR:$wb),
1289 (ins addrmode6:$Rn, am6offset:$Rm,
1290 DPR:$Vd, DPR:$src2, DPR:$src3),
1291 IIC_VST1x3u, "vst1", Dt, "\\{$Vd, $src2, $src3\\}, $Rn$Rm",
1292 "$Rn.addr = $wb", []> {
1293 let Inst{4} = Rn{4};
1294 let DecoderMethod = "DecodeVSTInstruction";
1297 def VST1d8T : VST1D3<{0,0,0,?}, "8">;
1298 def VST1d16T : VST1D3<{0,1,0,?}, "16">;
1299 def VST1d32T : VST1D3<{1,0,0,?}, "32">;
1300 def VST1d64T : VST1D3<{1,1,0,?}, "64">;
1302 def VST1d8T_UPD : VST1D3WB<{0,0,0,?}, "8">;
1303 def VST1d16T_UPD : VST1D3WB<{0,1,0,?}, "16">;
1304 def VST1d32T_UPD : VST1D3WB<{1,0,0,?}, "32">;
1305 def VST1d64T_UPD : VST1D3WB<{1,1,0,?}, "64">;
1307 def VST1d64TPseudo : VSTQQPseudo<IIC_VST1x3>;
1308 def VST1d64TPseudo_UPD : VSTQQWBPseudo<IIC_VST1x3u>;
1310 // ...with 4 registers
1311 class VST1D4<bits<4> op7_4, string Dt>
1312 : NLdSt<0, 0b00, 0b0010, op7_4, (outs),
1313 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
1314 IIC_VST1x4, "vst1", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn", "",
1317 let Inst{5-4} = Rn{5-4};
1318 let DecoderMethod = "DecodeVSTInstruction";
1320 class VST1D4WB<bits<4> op7_4, string Dt>
1321 : NLdSt<0, 0b00, 0b0010, op7_4, (outs GPR:$wb),
1322 (ins addrmode6:$Rn, am6offset:$Rm,
1323 DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST1x4u,
1324 "vst1", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
1325 "$Rn.addr = $wb", []> {
1326 let Inst{5-4} = Rn{5-4};
1327 let DecoderMethod = "DecodeVSTInstruction";
1330 def VST1d8Q : VST1D4<{0,0,?,?}, "8">;
1331 def VST1d16Q : VST1D4<{0,1,?,?}, "16">;
1332 def VST1d32Q : VST1D4<{1,0,?,?}, "32">;
1333 def VST1d64Q : VST1D4<{1,1,?,?}, "64">;
1335 def VST1d8Q_UPD : VST1D4WB<{0,0,?,?}, "8">;
1336 def VST1d16Q_UPD : VST1D4WB<{0,1,?,?}, "16">;
1337 def VST1d32Q_UPD : VST1D4WB<{1,0,?,?}, "32">;
1338 def VST1d64Q_UPD : VST1D4WB<{1,1,?,?}, "64">;
1340 def VST1d64QPseudo : VSTQQPseudo<IIC_VST1x4>;
1341 def VST1d64QPseudo_UPD : VSTQQWBPseudo<IIC_VST1x4u>;
1343 // VST2 : Vector Store (multiple 2-element structures)
1344 class VST2D<bits<4> op11_8, bits<4> op7_4, string Dt>
1345 : NLdSt<0, 0b00, op11_8, op7_4, (outs),
1346 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2),
1347 IIC_VST2, "vst2", Dt, "\\{$Vd, $src2\\}, $Rn", "", []> {
1349 let Inst{5-4} = Rn{5-4};
1350 let DecoderMethod = "DecodeVSTInstruction";
1352 class VST2Q<bits<4> op7_4, string Dt>
1353 : NLdSt<0, 0b00, 0b0011, op7_4, (outs),
1354 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
1355 IIC_VST2x2, "vst2", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn",
1358 let Inst{5-4} = Rn{5-4};
1359 let DecoderMethod = "DecodeVSTInstruction";
1362 def VST2d8 : VST2D<0b1000, {0,0,?,?}, "8">;
1363 def VST2d16 : VST2D<0b1000, {0,1,?,?}, "16">;
1364 def VST2d32 : VST2D<0b1000, {1,0,?,?}, "32">;
1366 def VST2q8 : VST2Q<{0,0,?,?}, "8">;
1367 def VST2q16 : VST2Q<{0,1,?,?}, "16">;
1368 def VST2q32 : VST2Q<{1,0,?,?}, "32">;
1370 def VST2d8Pseudo : VSTQPseudo<IIC_VST2>;
1371 def VST2d16Pseudo : VSTQPseudo<IIC_VST2>;
1372 def VST2d32Pseudo : VSTQPseudo<IIC_VST2>;
1374 def VST2q8Pseudo : VSTQQPseudo<IIC_VST2x2>;
1375 def VST2q16Pseudo : VSTQQPseudo<IIC_VST2x2>;
1376 def VST2q32Pseudo : VSTQQPseudo<IIC_VST2x2>;
1378 // ...with address register writeback:
1379 class VST2DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1380 : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
1381 (ins addrmode6:$Rn, am6offset:$Rm, DPR:$Vd, DPR:$src2),
1382 IIC_VST2u, "vst2", Dt, "\\{$Vd, $src2\\}, $Rn$Rm",
1383 "$Rn.addr = $wb", []> {
1384 let Inst{5-4} = Rn{5-4};
1385 let DecoderMethod = "DecodeVSTInstruction";
1387 class VST2QWB<bits<4> op7_4, string Dt>
1388 : NLdSt<0, 0b00, 0b0011, op7_4, (outs GPR:$wb),
1389 (ins addrmode6:$Rn, am6offset:$Rm,
1390 DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST2x2u,
1391 "vst2", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
1392 "$Rn.addr = $wb", []> {
1393 let Inst{5-4} = Rn{5-4};
1394 let DecoderMethod = "DecodeVSTInstruction";
1397 def VST2d8_UPD : VST2DWB<0b1000, {0,0,?,?}, "8">;
1398 def VST2d16_UPD : VST2DWB<0b1000, {0,1,?,?}, "16">;
1399 def VST2d32_UPD : VST2DWB<0b1000, {1,0,?,?}, "32">;
1401 def VST2q8_UPD : VST2QWB<{0,0,?,?}, "8">;
1402 def VST2q16_UPD : VST2QWB<{0,1,?,?}, "16">;
1403 def VST2q32_UPD : VST2QWB<{1,0,?,?}, "32">;
1405 def VST2d8Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
1406 def VST2d16Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
1407 def VST2d32Pseudo_UPD : VSTQWBPseudo<IIC_VST2u>;
1409 def VST2q8Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
1410 def VST2q16Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
1411 def VST2q32Pseudo_UPD : VSTQQWBPseudo<IIC_VST2x2u>;
1413 // ...with double-spaced registers
1414 def VST2b8 : VST2D<0b1001, {0,0,?,?}, "8">;
1415 def VST2b16 : VST2D<0b1001, {0,1,?,?}, "16">;
1416 def VST2b32 : VST2D<0b1001, {1,0,?,?}, "32">;
1417 def VST2b8_UPD : VST2DWB<0b1001, {0,0,?,?}, "8">;
1418 def VST2b16_UPD : VST2DWB<0b1001, {0,1,?,?}, "16">;
1419 def VST2b32_UPD : VST2DWB<0b1001, {1,0,?,?}, "32">;
1421 // VST3 : Vector Store (multiple 3-element structures)
1422 class VST3D<bits<4> op11_8, bits<4> op7_4, string Dt>
1423 : NLdSt<0, 0b00, op11_8, op7_4, (outs),
1424 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3), IIC_VST3,
1425 "vst3", Dt, "\\{$Vd, $src2, $src3\\}, $Rn", "", []> {
1427 let Inst{4} = Rn{4};
1428 let DecoderMethod = "DecodeVSTInstruction";
1431 def VST3d8 : VST3D<0b0100, {0,0,0,?}, "8">;
1432 def VST3d16 : VST3D<0b0100, {0,1,0,?}, "16">;
1433 def VST3d32 : VST3D<0b0100, {1,0,0,?}, "32">;
1435 def VST3d8Pseudo : VSTQQPseudo<IIC_VST3>;
1436 def VST3d16Pseudo : VSTQQPseudo<IIC_VST3>;
1437 def VST3d32Pseudo : VSTQQPseudo<IIC_VST3>;
1439 // ...with address register writeback:
1440 class VST3DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1441 : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
1442 (ins addrmode6:$Rn, am6offset:$Rm,
1443 DPR:$Vd, DPR:$src2, DPR:$src3), IIC_VST3u,
1444 "vst3", Dt, "\\{$Vd, $src2, $src3\\}, $Rn$Rm",
1445 "$Rn.addr = $wb", []> {
1446 let Inst{4} = Rn{4};
1447 let DecoderMethod = "DecodeVSTInstruction";
1450 def VST3d8_UPD : VST3DWB<0b0100, {0,0,0,?}, "8">;
1451 def VST3d16_UPD : VST3DWB<0b0100, {0,1,0,?}, "16">;
1452 def VST3d32_UPD : VST3DWB<0b0100, {1,0,0,?}, "32">;
1454 def VST3d8Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
1455 def VST3d16Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
1456 def VST3d32Pseudo_UPD : VSTQQWBPseudo<IIC_VST3u>;
1458 // ...with double-spaced registers:
1459 def VST3q8 : VST3D<0b0101, {0,0,0,?}, "8">;
1460 def VST3q16 : VST3D<0b0101, {0,1,0,?}, "16">;
1461 def VST3q32 : VST3D<0b0101, {1,0,0,?}, "32">;
1462 def VST3q8_UPD : VST3DWB<0b0101, {0,0,0,?}, "8">;
1463 def VST3q16_UPD : VST3DWB<0b0101, {0,1,0,?}, "16">;
1464 def VST3q32_UPD : VST3DWB<0b0101, {1,0,0,?}, "32">;
1466 def VST3q8Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1467 def VST3q16Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1468 def VST3q32Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1470 // ...alternate versions to be allocated odd register numbers:
1471 def VST3q8oddPseudo : VSTQQQQPseudo<IIC_VST3>;
1472 def VST3q16oddPseudo : VSTQQQQPseudo<IIC_VST3>;
1473 def VST3q32oddPseudo : VSTQQQQPseudo<IIC_VST3>;
1475 def VST3q8oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1476 def VST3q16oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1477 def VST3q32oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST3u>;
1479 // VST4 : Vector Store (multiple 4-element structures)
1480 class VST4D<bits<4> op11_8, bits<4> op7_4, string Dt>
1481 : NLdSt<0, 0b00, op11_8, op7_4, (outs),
1482 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4),
1483 IIC_VST4, "vst4", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn",
1486 let Inst{5-4} = Rn{5-4};
1487 let DecoderMethod = "DecodeVSTInstruction";
1490 def VST4d8 : VST4D<0b0000, {0,0,?,?}, "8">;
1491 def VST4d16 : VST4D<0b0000, {0,1,?,?}, "16">;
1492 def VST4d32 : VST4D<0b0000, {1,0,?,?}, "32">;
1494 def VST4d8Pseudo : VSTQQPseudo<IIC_VST4>;
1495 def VST4d16Pseudo : VSTQQPseudo<IIC_VST4>;
1496 def VST4d32Pseudo : VSTQQPseudo<IIC_VST4>;
1498 // ...with address register writeback:
1499 class VST4DWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1500 : NLdSt<0, 0b00, op11_8, op7_4, (outs GPR:$wb),
1501 (ins addrmode6:$Rn, am6offset:$Rm,
1502 DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4), IIC_VST4u,
1503 "vst4", Dt, "\\{$Vd, $src2, $src3, $src4\\}, $Rn$Rm",
1504 "$Rn.addr = $wb", []> {
1505 let Inst{5-4} = Rn{5-4};
1506 let DecoderMethod = "DecodeVSTInstruction";
1509 def VST4d8_UPD : VST4DWB<0b0000, {0,0,?,?}, "8">;
1510 def VST4d16_UPD : VST4DWB<0b0000, {0,1,?,?}, "16">;
1511 def VST4d32_UPD : VST4DWB<0b0000, {1,0,?,?}, "32">;
1513 def VST4d8Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
1514 def VST4d16Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
1515 def VST4d32Pseudo_UPD : VSTQQWBPseudo<IIC_VST4u>;
1517 // ...with double-spaced registers:
1518 def VST4q8 : VST4D<0b0001, {0,0,?,?}, "8">;
1519 def VST4q16 : VST4D<0b0001, {0,1,?,?}, "16">;
1520 def VST4q32 : VST4D<0b0001, {1,0,?,?}, "32">;
1521 def VST4q8_UPD : VST4DWB<0b0001, {0,0,?,?}, "8">;
1522 def VST4q16_UPD : VST4DWB<0b0001, {0,1,?,?}, "16">;
1523 def VST4q32_UPD : VST4DWB<0b0001, {1,0,?,?}, "32">;
1525 def VST4q8Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1526 def VST4q16Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1527 def VST4q32Pseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1529 // ...alternate versions to be allocated odd register numbers:
1530 def VST4q8oddPseudo : VSTQQQQPseudo<IIC_VST4>;
1531 def VST4q16oddPseudo : VSTQQQQPseudo<IIC_VST4>;
1532 def VST4q32oddPseudo : VSTQQQQPseudo<IIC_VST4>;
1534 def VST4q8oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1535 def VST4q16oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1536 def VST4q32oddPseudo_UPD : VSTQQQQWBPseudo<IIC_VST4u>;
1538 } // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
1540 // Classes for VST*LN pseudo-instructions with multi-register operands.
1541 // These are expanded to real instructions after register allocation.
1542 class VSTQLNPseudo<InstrItinClass itin>
1543 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QPR:$src, nohash_imm:$lane),
1545 class VSTQLNWBPseudo<InstrItinClass itin>
1546 : PseudoNLdSt<(outs GPR:$wb),
1547 (ins addrmode6:$addr, am6offset:$offset, QPR:$src,
1548 nohash_imm:$lane), itin, "$addr.addr = $wb">;
1549 class VSTQQLNPseudo<InstrItinClass itin>
1550 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQPR:$src, nohash_imm:$lane),
1552 class VSTQQLNWBPseudo<InstrItinClass itin>
1553 : PseudoNLdSt<(outs GPR:$wb),
1554 (ins addrmode6:$addr, am6offset:$offset, QQPR:$src,
1555 nohash_imm:$lane), itin, "$addr.addr = $wb">;
1556 class VSTQQQQLNPseudo<InstrItinClass itin>
1557 : PseudoNLdSt<(outs), (ins addrmode6:$addr, QQQQPR:$src, nohash_imm:$lane),
1559 class VSTQQQQLNWBPseudo<InstrItinClass itin>
1560 : PseudoNLdSt<(outs GPR:$wb),
1561 (ins addrmode6:$addr, am6offset:$offset, QQQQPR:$src,
1562 nohash_imm:$lane), itin, "$addr.addr = $wb">;
1564 // VST1LN : Vector Store (single element from one lane)
1565 class VST1LN<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
1566 PatFrag StoreOp, SDNode ExtractOp>
1567 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1568 (ins addrmode6:$Rn, DPR:$Vd, nohash_imm:$lane),
1569 IIC_VST1ln, "vst1", Dt, "\\{$Vd[$lane]\\}, $Rn", "",
1570 [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), addrmode6:$Rn)]> {
1572 let DecoderMethod = "DecodeVST1LN";
1574 class VST1LN32<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
1575 PatFrag StoreOp, SDNode ExtractOp>
1576 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1577 (ins addrmode6oneL32:$Rn, DPR:$Vd, nohash_imm:$lane),
1578 IIC_VST1ln, "vst1", Dt, "\\{$Vd[$lane]\\}, $Rn", "",
1579 [(StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane), addrmode6oneL32:$Rn)]>{
1581 let DecoderMethod = "DecodeVST1LN";
1583 class VST1QLNPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
1584 : VSTQLNPseudo<IIC_VST1ln> {
1585 let Pattern = [(StoreOp (ExtractOp (Ty QPR:$src), imm:$lane),
1589 def VST1LNd8 : VST1LN<0b0000, {?,?,?,0}, "8", v8i8, truncstorei8,
1591 let Inst{7-5} = lane{2-0};
1593 def VST1LNd16 : VST1LN<0b0100, {?,?,0,?}, "16", v4i16, truncstorei16,
1595 let Inst{7-6} = lane{1-0};
1596 let Inst{4} = Rn{5};
1599 def VST1LNd32 : VST1LN32<0b1000, {?,0,?,?}, "32", v2i32, store, extractelt> {
1600 let Inst{7} = lane{0};
1601 let Inst{5-4} = Rn{5-4};
1604 def VST1LNq8Pseudo : VST1QLNPseudo<v16i8, truncstorei8, NEONvgetlaneu>;
1605 def VST1LNq16Pseudo : VST1QLNPseudo<v8i16, truncstorei16, NEONvgetlaneu>;
1606 def VST1LNq32Pseudo : VST1QLNPseudo<v4i32, store, extractelt>;
1608 def : Pat<(store (extractelt (v2f32 DPR:$src), imm:$lane), addrmode6:$addr),
1609 (VST1LNd32 addrmode6:$addr, DPR:$src, imm:$lane)>;
1610 def : Pat<(store (extractelt (v4f32 QPR:$src), imm:$lane), addrmode6:$addr),
1611 (VST1LNq32Pseudo addrmode6:$addr, QPR:$src, imm:$lane)>;
1613 // ...with address register writeback:
1614 class VST1LNWB<bits<4> op11_8, bits<4> op7_4, string Dt, ValueType Ty,
1615 PatFrag StoreOp, SDNode ExtractOp>
1616 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1617 (ins addrmode6:$Rn, am6offset:$Rm,
1618 DPR:$Vd, nohash_imm:$lane), IIC_VST1lnu, "vst1", Dt,
1619 "\\{$Vd[$lane]\\}, $Rn$Rm",
1621 [(set GPR:$wb, (StoreOp (ExtractOp (Ty DPR:$Vd), imm:$lane),
1622 addrmode6:$Rn, am6offset:$Rm))]> {
1623 let DecoderMethod = "DecodeVST1LN";
1625 class VST1QLNWBPseudo<ValueType Ty, PatFrag StoreOp, SDNode ExtractOp>
1626 : VSTQLNWBPseudo<IIC_VST1lnu> {
1627 let Pattern = [(set GPR:$wb, (StoreOp (ExtractOp (Ty QPR:$src), imm:$lane),
1628 addrmode6:$addr, am6offset:$offset))];
1631 def VST1LNd8_UPD : VST1LNWB<0b0000, {?,?,?,0}, "8", v8i8, post_truncsti8,
1633 let Inst{7-5} = lane{2-0};
1635 def VST1LNd16_UPD : VST1LNWB<0b0100, {?,?,0,?}, "16", v4i16, post_truncsti16,
1637 let Inst{7-6} = lane{1-0};
1638 let Inst{4} = Rn{5};
1640 def VST1LNd32_UPD : VST1LNWB<0b1000, {?,0,?,?}, "32", v2i32, post_store,
1642 let Inst{7} = lane{0};
1643 let Inst{5-4} = Rn{5-4};
1646 def VST1LNq8Pseudo_UPD : VST1QLNWBPseudo<v16i8, post_truncsti8, NEONvgetlaneu>;
1647 def VST1LNq16Pseudo_UPD : VST1QLNWBPseudo<v8i16, post_truncsti16,NEONvgetlaneu>;
1648 def VST1LNq32Pseudo_UPD : VST1QLNWBPseudo<v4i32, post_store, extractelt>;
1650 let mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1 in {
1652 // VST2LN : Vector Store (single 2-element structure from one lane)
1653 class VST2LN<bits<4> op11_8, bits<4> op7_4, string Dt>
1654 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1655 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, nohash_imm:$lane),
1656 IIC_VST2ln, "vst2", Dt, "\\{$Vd[$lane], $src2[$lane]\\}, $Rn",
1659 let Inst{4} = Rn{4};
1660 let DecoderMethod = "DecodeVST2LN";
1663 def VST2LNd8 : VST2LN<0b0001, {?,?,?,?}, "8"> {
1664 let Inst{7-5} = lane{2-0};
1666 def VST2LNd16 : VST2LN<0b0101, {?,?,0,?}, "16"> {
1667 let Inst{7-6} = lane{1-0};
1669 def VST2LNd32 : VST2LN<0b1001, {?,0,0,?}, "32"> {
1670 let Inst{7} = lane{0};
1673 def VST2LNd8Pseudo : VSTQLNPseudo<IIC_VST2ln>;
1674 def VST2LNd16Pseudo : VSTQLNPseudo<IIC_VST2ln>;
1675 def VST2LNd32Pseudo : VSTQLNPseudo<IIC_VST2ln>;
1677 // ...with double-spaced registers:
1678 def VST2LNq16 : VST2LN<0b0101, {?,?,1,?}, "16"> {
1679 let Inst{7-6} = lane{1-0};
1680 let Inst{4} = Rn{4};
1682 def VST2LNq32 : VST2LN<0b1001, {?,1,0,?}, "32"> {
1683 let Inst{7} = lane{0};
1684 let Inst{4} = Rn{4};
1687 def VST2LNq16Pseudo : VSTQQLNPseudo<IIC_VST2ln>;
1688 def VST2LNq32Pseudo : VSTQQLNPseudo<IIC_VST2ln>;
1690 // ...with address register writeback:
1691 class VST2LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1692 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1693 (ins addrmode6:$addr, am6offset:$offset,
1694 DPR:$src1, DPR:$src2, nohash_imm:$lane), IIC_VST2lnu, "vst2", Dt,
1695 "\\{$src1[$lane], $src2[$lane]\\}, $addr$offset",
1696 "$addr.addr = $wb", []> {
1697 let Inst{4} = Rn{4};
1698 let DecoderMethod = "DecodeVST2LN";
1701 def VST2LNd8_UPD : VST2LNWB<0b0001, {?,?,?,?}, "8"> {
1702 let Inst{7-5} = lane{2-0};
1704 def VST2LNd16_UPD : VST2LNWB<0b0101, {?,?,0,?}, "16"> {
1705 let Inst{7-6} = lane{1-0};
1707 def VST2LNd32_UPD : VST2LNWB<0b1001, {?,0,0,?}, "32"> {
1708 let Inst{7} = lane{0};
1711 def VST2LNd8Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
1712 def VST2LNd16Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
1713 def VST2LNd32Pseudo_UPD : VSTQLNWBPseudo<IIC_VST2lnu>;
1715 def VST2LNq16_UPD : VST2LNWB<0b0101, {?,?,1,?}, "16"> {
1716 let Inst{7-6} = lane{1-0};
1718 def VST2LNq32_UPD : VST2LNWB<0b1001, {?,1,0,?}, "32"> {
1719 let Inst{7} = lane{0};
1722 def VST2LNq16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST2lnu>;
1723 def VST2LNq32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST2lnu>;
1725 // VST3LN : Vector Store (single 3-element structure from one lane)
1726 class VST3LN<bits<4> op11_8, bits<4> op7_4, string Dt>
1727 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1728 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3,
1729 nohash_imm:$lane), IIC_VST3ln, "vst3", Dt,
1730 "\\{$Vd[$lane], $src2[$lane], $src3[$lane]\\}, $Rn", "", []> {
1732 let DecoderMethod = "DecodeVST3LN";
1735 def VST3LNd8 : VST3LN<0b0010, {?,?,?,0}, "8"> {
1736 let Inst{7-5} = lane{2-0};
1738 def VST3LNd16 : VST3LN<0b0110, {?,?,0,0}, "16"> {
1739 let Inst{7-6} = lane{1-0};
1741 def VST3LNd32 : VST3LN<0b1010, {?,0,0,0}, "32"> {
1742 let Inst{7} = lane{0};
1745 def VST3LNd8Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
1746 def VST3LNd16Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
1747 def VST3LNd32Pseudo : VSTQQLNPseudo<IIC_VST3ln>;
1749 // ...with double-spaced registers:
1750 def VST3LNq16 : VST3LN<0b0110, {?,?,1,0}, "16"> {
1751 let Inst{7-6} = lane{1-0};
1753 def VST3LNq32 : VST3LN<0b1010, {?,1,0,0}, "32"> {
1754 let Inst{7} = lane{0};
1757 def VST3LNq16Pseudo : VSTQQQQLNPseudo<IIC_VST3ln>;
1758 def VST3LNq32Pseudo : VSTQQQQLNPseudo<IIC_VST3ln>;
1760 // ...with address register writeback:
1761 class VST3LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1762 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1763 (ins addrmode6:$Rn, am6offset:$Rm,
1764 DPR:$Vd, DPR:$src2, DPR:$src3, nohash_imm:$lane),
1765 IIC_VST3lnu, "vst3", Dt,
1766 "\\{$Vd[$lane], $src2[$lane], $src3[$lane]\\}, $Rn$Rm",
1767 "$Rn.addr = $wb", []> {
1768 let DecoderMethod = "DecodeVST3LN";
1771 def VST3LNd8_UPD : VST3LNWB<0b0010, {?,?,?,0}, "8"> {
1772 let Inst{7-5} = lane{2-0};
1774 def VST3LNd16_UPD : VST3LNWB<0b0110, {?,?,0,0}, "16"> {
1775 let Inst{7-6} = lane{1-0};
1777 def VST3LNd32_UPD : VST3LNWB<0b1010, {?,0,0,0}, "32"> {
1778 let Inst{7} = lane{0};
1781 def VST3LNd8Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
1782 def VST3LNd16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
1783 def VST3LNd32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST3lnu>;
1785 def VST3LNq16_UPD : VST3LNWB<0b0110, {?,?,1,0}, "16"> {
1786 let Inst{7-6} = lane{1-0};
1788 def VST3LNq32_UPD : VST3LNWB<0b1010, {?,1,0,0}, "32"> {
1789 let Inst{7} = lane{0};
1792 def VST3LNq16Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST3lnu>;
1793 def VST3LNq32Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST3lnu>;
1795 // VST4LN : Vector Store (single 4-element structure from one lane)
1796 class VST4LN<bits<4> op11_8, bits<4> op7_4, string Dt>
1797 : NLdStLn<1, 0b00, op11_8, op7_4, (outs),
1798 (ins addrmode6:$Rn, DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4,
1799 nohash_imm:$lane), IIC_VST4ln, "vst4", Dt,
1800 "\\{$Vd[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $Rn",
1803 let Inst{4} = Rn{4};
1804 let DecoderMethod = "DecodeVST4LN";
1807 def VST4LNd8 : VST4LN<0b0011, {?,?,?,?}, "8"> {
1808 let Inst{7-5} = lane{2-0};
1810 def VST4LNd16 : VST4LN<0b0111, {?,?,0,?}, "16"> {
1811 let Inst{7-6} = lane{1-0};
1813 def VST4LNd32 : VST4LN<0b1011, {?,0,?,?}, "32"> {
1814 let Inst{7} = lane{0};
1815 let Inst{5} = Rn{5};
1818 def VST4LNd8Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
1819 def VST4LNd16Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
1820 def VST4LNd32Pseudo : VSTQQLNPseudo<IIC_VST4ln>;
1822 // ...with double-spaced registers:
1823 def VST4LNq16 : VST4LN<0b0111, {?,?,1,?}, "16"> {
1824 let Inst{7-6} = lane{1-0};
1826 def VST4LNq32 : VST4LN<0b1011, {?,1,?,?}, "32"> {
1827 let Inst{7} = lane{0};
1828 let Inst{5} = Rn{5};
1831 def VST4LNq16Pseudo : VSTQQQQLNPseudo<IIC_VST4ln>;
1832 def VST4LNq32Pseudo : VSTQQQQLNPseudo<IIC_VST4ln>;
1834 // ...with address register writeback:
1835 class VST4LNWB<bits<4> op11_8, bits<4> op7_4, string Dt>
1836 : NLdStLn<1, 0b00, op11_8, op7_4, (outs GPR:$wb),
1837 (ins addrmode6:$Rn, am6offset:$Rm,
1838 DPR:$Vd, DPR:$src2, DPR:$src3, DPR:$src4, nohash_imm:$lane),
1839 IIC_VST4lnu, "vst4", Dt,
1840 "\\{$Vd[$lane], $src2[$lane], $src3[$lane], $src4[$lane]\\}, $Rn$Rm",
1841 "$Rn.addr = $wb", []> {
1842 let Inst{4} = Rn{4};
1843 let DecoderMethod = "DecodeVST4LN";
1846 def VST4LNd8_UPD : VST4LNWB<0b0011, {?,?,?,?}, "8"> {
1847 let Inst{7-5} = lane{2-0};
1849 def VST4LNd16_UPD : VST4LNWB<0b0111, {?,?,0,?}, "16"> {
1850 let Inst{7-6} = lane{1-0};
1852 def VST4LNd32_UPD : VST4LNWB<0b1011, {?,0,?,?}, "32"> {
1853 let Inst{7} = lane{0};
1854 let Inst{5} = Rn{5};
1857 def VST4LNd8Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
1858 def VST4LNd16Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
1859 def VST4LNd32Pseudo_UPD : VSTQQLNWBPseudo<IIC_VST4lnu>;
1861 def VST4LNq16_UPD : VST4LNWB<0b0111, {?,?,1,?}, "16"> {
1862 let Inst{7-6} = lane{1-0};
1864 def VST4LNq32_UPD : VST4LNWB<0b1011, {?,1,?,?}, "32"> {
1865 let Inst{7} = lane{0};
1866 let Inst{5} = Rn{5};
1869 def VST4LNq16Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST4lnu>;
1870 def VST4LNq32Pseudo_UPD : VSTQQQQLNWBPseudo<IIC_VST4lnu>;
1872 } // mayStore = 1, neverHasSideEffects = 1, hasExtraSrcRegAllocReq = 1
1875 //===----------------------------------------------------------------------===//
1876 // NEON pattern fragments
1877 //===----------------------------------------------------------------------===//
1879 // Extract D sub-registers of Q registers.
1880 def DSubReg_i8_reg : SDNodeXForm<imm, [{
1881 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1882 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/8, MVT::i32);
1884 def DSubReg_i16_reg : SDNodeXForm<imm, [{
1885 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1886 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/4, MVT::i32);
1888 def DSubReg_i32_reg : SDNodeXForm<imm, [{
1889 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1890 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue()/2, MVT::i32);
1892 def DSubReg_f64_reg : SDNodeXForm<imm, [{
1893 assert(ARM::dsub_7 == ARM::dsub_0+7 && "Unexpected subreg numbering");
1894 return CurDAG->getTargetConstant(ARM::dsub_0 + N->getZExtValue(), MVT::i32);
1897 // Extract S sub-registers of Q/D registers.
1898 def SSubReg_f32_reg : SDNodeXForm<imm, [{
1899 assert(ARM::ssub_3 == ARM::ssub_0+3 && "Unexpected subreg numbering");
1900 return CurDAG->getTargetConstant(ARM::ssub_0 + N->getZExtValue(), MVT::i32);
1903 // Translate lane numbers from Q registers to D subregs.
1904 def SubReg_i8_lane : SDNodeXForm<imm, [{
1905 return CurDAG->getTargetConstant(N->getZExtValue() & 7, MVT::i32);
1907 def SubReg_i16_lane : SDNodeXForm<imm, [{
1908 return CurDAG->getTargetConstant(N->getZExtValue() & 3, MVT::i32);
1910 def SubReg_i32_lane : SDNodeXForm<imm, [{
1911 return CurDAG->getTargetConstant(N->getZExtValue() & 1, MVT::i32);
1914 //===----------------------------------------------------------------------===//
1915 // Instruction Classes
1916 //===----------------------------------------------------------------------===//
1918 // Basic 2-register operations: double- and quad-register.
1919 class N2VD<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1920 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
1921 string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
1922 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
1923 (ins DPR:$Vm), IIC_VUNAD, OpcodeStr, Dt,"$Vd, $Vm", "",
1924 [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm))))]>;
1925 class N2VQ<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1926 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
1927 string Dt, ValueType ResTy, ValueType OpTy, SDNode OpNode>
1928 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
1929 (ins QPR:$Vm), IIC_VUNAQ, OpcodeStr, Dt,"$Vd, $Vm", "",
1930 [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vm))))]>;
1932 // Basic 2-register intrinsics, both double- and quad-register.
1933 class N2VDInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1934 bits<2> op17_16, bits<5> op11_7, bit op4,
1935 InstrItinClass itin, string OpcodeStr, string Dt,
1936 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
1937 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
1938 (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
1939 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
1940 class N2VQInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1941 bits<2> op17_16, bits<5> op11_7, bit op4,
1942 InstrItinClass itin, string OpcodeStr, string Dt,
1943 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
1944 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
1945 (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
1946 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
1948 // Narrow 2-register operations.
1949 class N2VN<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1950 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
1951 InstrItinClass itin, string OpcodeStr, string Dt,
1952 ValueType TyD, ValueType TyQ, SDNode OpNode>
1953 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$Vd),
1954 (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
1955 [(set DPR:$Vd, (TyD (OpNode (TyQ QPR:$Vm))))]>;
1957 // Narrow 2-register intrinsics.
1958 class N2VNInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1959 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
1960 InstrItinClass itin, string OpcodeStr, string Dt,
1961 ValueType TyD, ValueType TyQ, Intrinsic IntOp>
1962 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$Vd),
1963 (ins QPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
1964 [(set DPR:$Vd, (TyD (IntOp (TyQ QPR:$Vm))))]>;
1966 // Long 2-register operations (currently only used for VMOVL).
1967 class N2VL<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1968 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
1969 InstrItinClass itin, string OpcodeStr, string Dt,
1970 ValueType TyQ, ValueType TyD, SDNode OpNode>
1971 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs QPR:$Vd),
1972 (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
1973 [(set QPR:$Vd, (TyQ (OpNode (TyD DPR:$Vm))))]>;
1975 // Long 2-register intrinsics.
1976 class N2VLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
1977 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
1978 InstrItinClass itin, string OpcodeStr, string Dt,
1979 ValueType TyQ, ValueType TyD, Intrinsic IntOp>
1980 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs QPR:$Vd),
1981 (ins DPR:$Vm), itin, OpcodeStr, Dt, "$Vd, $Vm", "",
1982 [(set QPR:$Vd, (TyQ (IntOp (TyD DPR:$Vm))))]>;
1984 // 2-register shuffles (VTRN/VZIP/VUZP), both double- and quad-register.
1985 class N2VDShuffle<bits<2> op19_18, bits<5> op11_7, string OpcodeStr, string Dt>
1986 : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 0, 0, (outs DPR:$Vd, DPR:$Vm),
1987 (ins DPR:$src1, DPR:$src2), IIC_VPERMD,
1988 OpcodeStr, Dt, "$Vd, $Vm",
1989 "$src1 = $Vd, $src2 = $Vm", []>;
1990 class N2VQShuffle<bits<2> op19_18, bits<5> op11_7,
1991 InstrItinClass itin, string OpcodeStr, string Dt>
1992 : N2V<0b11, 0b11, op19_18, 0b10, op11_7, 1, 0, (outs QPR:$Vd, QPR:$Vm),
1993 (ins QPR:$src1, QPR:$src2), itin, OpcodeStr, Dt, "$Vd, $Vm",
1994 "$src1 = $Vd, $src2 = $Vm", []>;
1996 // Basic 3-register operations: double- and quad-register.
1997 class N3VD<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
1998 InstrItinClass itin, string OpcodeStr, string Dt,
1999 ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
2000 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2001 (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2002 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2003 [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]> {
2004 let isCommutable = Commutable;
2006 // Same as N3VD but no data type.
2007 class N3VDX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2008 InstrItinClass itin, string OpcodeStr,
2009 ValueType ResTy, ValueType OpTy,
2010 SDNode OpNode, bit Commutable>
2011 : N3VX<op24, op23, op21_20, op11_8, 0, op4,
2012 (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2013 OpcodeStr, "$Vd, $Vn, $Vm", "",
2014 [(set DPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>{
2015 let isCommutable = Commutable;
2018 class N3VDSL<bits<2> op21_20, bits<4> op11_8,
2019 InstrItinClass itin, string OpcodeStr, string Dt,
2020 ValueType Ty, SDNode ShOp>
2021 : N3VLane32<0, 1, op21_20, op11_8, 1, 0,
2022 (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2023 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2025 (Ty (ShOp (Ty DPR:$Vn),
2026 (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),imm:$lane)))))]> {
2027 let isCommutable = 0;
2029 class N3VDSL16<bits<2> op21_20, bits<4> op11_8,
2030 string OpcodeStr, string Dt, ValueType Ty, SDNode ShOp>
2031 : N3VLane16<0, 1, op21_20, op11_8, 1, 0,
2032 (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2033 NVMulSLFrm, IIC_VMULi16D, OpcodeStr, Dt,"$Vd, $Vn, $Vm$lane","",
2035 (Ty (ShOp (Ty DPR:$Vn),
2036 (Ty (NEONvduplane (Ty DPR_8:$Vm), imm:$lane)))))]> {
2037 let isCommutable = 0;
2040 class N3VQ<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2041 InstrItinClass itin, string OpcodeStr, string Dt,
2042 ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
2043 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2044 (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2045 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2046 [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]> {
2047 let isCommutable = Commutable;
2049 class N3VQX<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2050 InstrItinClass itin, string OpcodeStr,
2051 ValueType ResTy, ValueType OpTy, SDNode OpNode, bit Commutable>
2052 : N3VX<op24, op23, op21_20, op11_8, 1, op4,
2053 (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2054 OpcodeStr, "$Vd, $Vn, $Vm", "",
2055 [(set QPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>{
2056 let isCommutable = Commutable;
2058 class N3VQSL<bits<2> op21_20, bits<4> op11_8,
2059 InstrItinClass itin, string OpcodeStr, string Dt,
2060 ValueType ResTy, ValueType OpTy, SDNode ShOp>
2061 : N3VLane32<1, 1, op21_20, op11_8, 1, 0,
2062 (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2063 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2064 [(set (ResTy QPR:$Vd),
2065 (ResTy (ShOp (ResTy QPR:$Vn),
2066 (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2068 let isCommutable = 0;
2070 class N3VQSL16<bits<2> op21_20, bits<4> op11_8, string OpcodeStr, string Dt,
2071 ValueType ResTy, ValueType OpTy, SDNode ShOp>
2072 : N3VLane16<1, 1, op21_20, op11_8, 1, 0,
2073 (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2074 NVMulSLFrm, IIC_VMULi16Q, OpcodeStr, Dt,"$Vd, $Vn, $Vm$lane", "",
2075 [(set (ResTy QPR:$Vd),
2076 (ResTy (ShOp (ResTy QPR:$Vn),
2077 (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
2079 let isCommutable = 0;
2082 // Basic 3-register intrinsics, both double- and quad-register.
2083 class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2084 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2085 ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
2086 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2087 (outs DPR:$Vd), (ins DPR:$Vn, DPR:$Vm), f, itin,
2088 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2089 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]> {
2090 let isCommutable = Commutable;
2092 class N3VDIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2093 string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
2094 : N3VLane32<0, 1, op21_20, op11_8, 1, 0,
2095 (outs DPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2096 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2098 (Ty (IntOp (Ty DPR:$Vn),
2099 (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),
2101 let isCommutable = 0;
2103 class N3VDIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2104 string OpcodeStr, string Dt, ValueType Ty, Intrinsic IntOp>
2105 : N3VLane16<0, 1, op21_20, op11_8, 1, 0,
2106 (outs DPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2107 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2109 (Ty (IntOp (Ty DPR:$Vn),
2110 (Ty (NEONvduplane (Ty DPR_8:$Vm), imm:$lane)))))]> {
2111 let isCommutable = 0;
2113 class N3VDIntSh<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2114 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2115 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2116 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2117 (outs DPR:$Vd), (ins DPR:$Vm, DPR:$Vn), f, itin,
2118 OpcodeStr, Dt, "$Vd, $Vm, $Vn", "",
2119 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm), (OpTy DPR:$Vn))))]> {
2120 let isCommutable = 0;
2123 class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2124 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2125 ValueType ResTy, ValueType OpTy, Intrinsic IntOp, bit Commutable>
2126 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2127 (outs QPR:$Vd), (ins QPR:$Vn, QPR:$Vm), f, itin,
2128 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2129 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]> {
2130 let isCommutable = Commutable;
2132 class N3VQIntSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2133 string OpcodeStr, string Dt,
2134 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2135 : N3VLane32<1, 1, op21_20, op11_8, 1, 0,
2136 (outs QPR:$Vd), (ins QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2137 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2138 [(set (ResTy QPR:$Vd),
2139 (ResTy (IntOp (ResTy QPR:$Vn),
2140 (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2142 let isCommutable = 0;
2144 class N3VQIntSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2145 string OpcodeStr, string Dt,
2146 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2147 : N3VLane16<1, 1, op21_20, op11_8, 1, 0,
2148 (outs QPR:$Vd), (ins QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2149 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2150 [(set (ResTy QPR:$Vd),
2151 (ResTy (IntOp (ResTy QPR:$Vn),
2152 (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
2154 let isCommutable = 0;
2156 class N3VQIntSh<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2157 Format f, InstrItinClass itin, string OpcodeStr, string Dt,
2158 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2159 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2160 (outs QPR:$Vd), (ins QPR:$Vm, QPR:$Vn), f, itin,
2161 OpcodeStr, Dt, "$Vd, $Vm, $Vn", "",
2162 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm), (OpTy QPR:$Vn))))]> {
2163 let isCommutable = 0;
2166 // Multiply-Add/Sub operations: double- and quad-register.
2167 class N3VDMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2168 InstrItinClass itin, string OpcodeStr, string Dt,
2169 ValueType Ty, SDPatternOperator MulOp, SDPatternOperator OpNode>
2170 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2171 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2172 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2173 [(set DPR:$Vd, (Ty (OpNode DPR:$src1,
2174 (Ty (MulOp DPR:$Vn, DPR:$Vm)))))]>;
2176 class N3VDMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2177 string OpcodeStr, string Dt,
2178 ValueType Ty, SDPatternOperator MulOp, SDPatternOperator ShOp>
2179 : N3VLane32<0, 1, op21_20, op11_8, 1, 0,
2181 (ins DPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2183 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2185 (Ty (ShOp (Ty DPR:$src1),
2187 (Ty (NEONvduplane (Ty DPR_VFP2:$Vm),
2189 class N3VDMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2190 string OpcodeStr, string Dt,
2191 ValueType Ty, SDNode MulOp, SDNode ShOp>
2192 : N3VLane16<0, 1, op21_20, op11_8, 1, 0,
2194 (ins DPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2196 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2198 (Ty (ShOp (Ty DPR:$src1),
2200 (Ty (NEONvduplane (Ty DPR_8:$Vm),
2203 class N3VQMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2204 InstrItinClass itin, string OpcodeStr, string Dt, ValueType Ty,
2205 SDPatternOperator MulOp, SDPatternOperator OpNode>
2206 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2207 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2208 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2209 [(set QPR:$Vd, (Ty (OpNode QPR:$src1,
2210 (Ty (MulOp QPR:$Vn, QPR:$Vm)))))]>;
2211 class N3VQMulOpSL<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2212 string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
2213 SDPatternOperator MulOp, SDPatternOperator ShOp>
2214 : N3VLane32<1, 1, op21_20, op11_8, 1, 0,
2216 (ins QPR:$src1, QPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2218 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2219 [(set (ResTy QPR:$Vd),
2220 (ResTy (ShOp (ResTy QPR:$src1),
2221 (ResTy (MulOp QPR:$Vn,
2222 (ResTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2224 class N3VQMulOpSL16<bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2225 string OpcodeStr, string Dt,
2226 ValueType ResTy, ValueType OpTy,
2227 SDNode MulOp, SDNode ShOp>
2228 : N3VLane16<1, 1, op21_20, op11_8, 1, 0,
2230 (ins QPR:$src1, QPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2232 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2233 [(set (ResTy QPR:$Vd),
2234 (ResTy (ShOp (ResTy QPR:$src1),
2235 (ResTy (MulOp QPR:$Vn,
2236 (ResTy (NEONvduplane (OpTy DPR_8:$Vm),
2239 // Neon Intrinsic-Op instructions (VABA): double- and quad-register.
2240 class N3VDIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2241 InstrItinClass itin, string OpcodeStr, string Dt,
2242 ValueType Ty, Intrinsic IntOp, SDNode OpNode>
2243 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2244 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2245 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2246 [(set DPR:$Vd, (Ty (OpNode DPR:$src1,
2247 (Ty (IntOp (Ty DPR:$Vn), (Ty DPR:$Vm))))))]>;
2248 class N3VQIntOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2249 InstrItinClass itin, string OpcodeStr, string Dt,
2250 ValueType Ty, Intrinsic IntOp, SDNode OpNode>
2251 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2252 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2253 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2254 [(set QPR:$Vd, (Ty (OpNode QPR:$src1,
2255 (Ty (IntOp (Ty QPR:$Vn), (Ty QPR:$Vm))))))]>;
2257 // Neon 3-argument intrinsics, both double- and quad-register.
2258 // The destination register is also used as the first source operand register.
2259 class N3VDInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2260 InstrItinClass itin, string OpcodeStr, string Dt,
2261 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2262 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2263 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2264 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2265 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$src1),
2266 (OpTy DPR:$Vn), (OpTy DPR:$Vm))))]>;
2267 class N3VQInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2268 InstrItinClass itin, string OpcodeStr, string Dt,
2269 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2270 : N3V<op24, op23, op21_20, op11_8, 1, op4,
2271 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm), N3RegFrm, itin,
2272 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2273 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$src1),
2274 (OpTy QPR:$Vn), (OpTy QPR:$Vm))))]>;
2276 // Long Multiply-Add/Sub operations.
2277 class N3VLMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2278 InstrItinClass itin, string OpcodeStr, string Dt,
2279 ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
2280 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2281 (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2282 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2283 [(set QPR:$Vd, (OpNode (TyQ QPR:$src1),
2284 (TyQ (MulOp (TyD DPR:$Vn),
2285 (TyD DPR:$Vm)))))]>;
2286 class N3VLMulOpSL<bit op24, bits<2> op21_20, bits<4> op11_8,
2287 InstrItinClass itin, string OpcodeStr, string Dt,
2288 ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
2289 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$Vd),
2290 (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2292 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2294 (OpNode (TyQ QPR:$src1),
2295 (TyQ (MulOp (TyD DPR:$Vn),
2296 (TyD (NEONvduplane (TyD DPR_VFP2:$Vm),
2298 class N3VLMulOpSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2299 InstrItinClass itin, string OpcodeStr, string Dt,
2300 ValueType TyQ, ValueType TyD, SDNode MulOp, SDNode OpNode>
2301 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0, (outs QPR:$Vd),
2302 (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2304 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2306 (OpNode (TyQ QPR:$src1),
2307 (TyQ (MulOp (TyD DPR:$Vn),
2308 (TyD (NEONvduplane (TyD DPR_8:$Vm),
2311 // Long Intrinsic-Op vector operations with explicit extend (VABAL).
2312 class N3VLIntExtOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2313 InstrItinClass itin, string OpcodeStr, string Dt,
2314 ValueType TyQ, ValueType TyD, Intrinsic IntOp, SDNode ExtOp,
2316 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2317 (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2318 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2319 [(set QPR:$Vd, (OpNode (TyQ QPR:$src1),
2320 (TyQ (ExtOp (TyD (IntOp (TyD DPR:$Vn),
2321 (TyD DPR:$Vm)))))))]>;
2323 // Neon Long 3-argument intrinsic. The destination register is
2324 // a quad-register and is also used as the first source operand register.
2325 class N3VLInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2326 InstrItinClass itin, string OpcodeStr, string Dt,
2327 ValueType TyQ, ValueType TyD, Intrinsic IntOp>
2328 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2329 (outs QPR:$Vd), (ins QPR:$src1, DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2330 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "$src1 = $Vd",
2332 (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$Vn), (TyD DPR:$Vm))))]>;
2333 class N3VLInt3SL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2334 string OpcodeStr, string Dt,
2335 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2336 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
2338 (ins QPR:$src1, DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2340 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2341 [(set (ResTy QPR:$Vd),
2342 (ResTy (IntOp (ResTy QPR:$src1),
2344 (OpTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2346 class N3VLInt3SL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2347 InstrItinClass itin, string OpcodeStr, string Dt,
2348 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2349 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
2351 (ins QPR:$src1, DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2353 OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "$src1 = $Vd",
2354 [(set (ResTy QPR:$Vd),
2355 (ResTy (IntOp (ResTy QPR:$src1),
2357 (OpTy (NEONvduplane (OpTy DPR_8:$Vm),
2360 // Narrowing 3-register intrinsics.
2361 class N3VNInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2362 string OpcodeStr, string Dt, ValueType TyD, ValueType TyQ,
2363 Intrinsic IntOp, bit Commutable>
2364 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2365 (outs DPR:$Vd), (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINi4D,
2366 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2367 [(set DPR:$Vd, (TyD (IntOp (TyQ QPR:$Vn), (TyQ QPR:$Vm))))]> {
2368 let isCommutable = Commutable;
2371 // Long 3-register operations.
2372 class N3VL<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2373 InstrItinClass itin, string OpcodeStr, string Dt,
2374 ValueType TyQ, ValueType TyD, SDNode OpNode, bit Commutable>
2375 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2376 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2377 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2378 [(set QPR:$Vd, (TyQ (OpNode (TyD DPR:$Vn), (TyD DPR:$Vm))))]> {
2379 let isCommutable = Commutable;
2381 class N3VLSL<bit op24, bits<2> op21_20, bits<4> op11_8,
2382 InstrItinClass itin, string OpcodeStr, string Dt,
2383 ValueType TyQ, ValueType TyD, SDNode OpNode>
2384 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
2385 (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2386 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2388 (TyQ (OpNode (TyD DPR:$Vn),
2389 (TyD (NEONvduplane (TyD DPR_VFP2:$Vm),imm:$lane)))))]>;
2390 class N3VLSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2391 InstrItinClass itin, string OpcodeStr, string Dt,
2392 ValueType TyQ, ValueType TyD, SDNode OpNode>
2393 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
2394 (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2395 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2397 (TyQ (OpNode (TyD DPR:$Vn),
2398 (TyD (NEONvduplane (TyD DPR_8:$Vm), imm:$lane)))))]>;
2400 // Long 3-register operations with explicitly extended operands.
2401 class N3VLExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2402 InstrItinClass itin, string OpcodeStr, string Dt,
2403 ValueType TyQ, ValueType TyD, SDNode OpNode, SDNode ExtOp,
2405 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2406 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2407 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2408 [(set QPR:$Vd, (OpNode (TyQ (ExtOp (TyD DPR:$Vn))),
2409 (TyQ (ExtOp (TyD DPR:$Vm)))))]> {
2410 let isCommutable = Commutable;
2413 // Long 3-register intrinsics with explicit extend (VABDL).
2414 class N3VLIntExt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2415 InstrItinClass itin, string OpcodeStr, string Dt,
2416 ValueType TyQ, ValueType TyD, Intrinsic IntOp, SDNode ExtOp,
2418 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2419 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2420 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2421 [(set QPR:$Vd, (TyQ (ExtOp (TyD (IntOp (TyD DPR:$Vn),
2422 (TyD DPR:$Vm))))))]> {
2423 let isCommutable = Commutable;
2426 // Long 3-register intrinsics.
2427 class N3VLInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2428 InstrItinClass itin, string OpcodeStr, string Dt,
2429 ValueType TyQ, ValueType TyD, Intrinsic IntOp, bit Commutable>
2430 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2431 (outs QPR:$Vd), (ins DPR:$Vn, DPR:$Vm), N3RegFrm, itin,
2432 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2433 [(set QPR:$Vd, (TyQ (IntOp (TyD DPR:$Vn), (TyD DPR:$Vm))))]> {
2434 let isCommutable = Commutable;
2436 class N3VLIntSL<bit op24, bits<2> op21_20, bits<4> op11_8, InstrItinClass itin,
2437 string OpcodeStr, string Dt,
2438 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2439 : N3VLane32<op24, 1, op21_20, op11_8, 1, 0,
2440 (outs QPR:$Vd), (ins DPR:$Vn, DPR_VFP2:$Vm, VectorIndex32:$lane),
2441 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2442 [(set (ResTy QPR:$Vd),
2443 (ResTy (IntOp (OpTy DPR:$Vn),
2444 (OpTy (NEONvduplane (OpTy DPR_VFP2:$Vm),
2446 class N3VLIntSL16<bit op24, bits<2> op21_20, bits<4> op11_8,
2447 InstrItinClass itin, string OpcodeStr, string Dt,
2448 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2449 : N3VLane16<op24, 1, op21_20, op11_8, 1, 0,
2450 (outs QPR:$Vd), (ins DPR:$Vn, DPR_8:$Vm, VectorIndex16:$lane),
2451 NVMulSLFrm, itin, OpcodeStr, Dt, "$Vd, $Vn, $Vm$lane", "",
2452 [(set (ResTy QPR:$Vd),
2453 (ResTy (IntOp (OpTy DPR:$Vn),
2454 (OpTy (NEONvduplane (OpTy DPR_8:$Vm),
2457 // Wide 3-register operations.
2458 class N3VW<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
2459 string OpcodeStr, string Dt, ValueType TyQ, ValueType TyD,
2460 SDNode OpNode, SDNode ExtOp, bit Commutable>
2461 : N3V<op24, op23, op21_20, op11_8, 0, op4,
2462 (outs QPR:$Vd), (ins QPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VSUBiD,
2463 OpcodeStr, Dt, "$Vd, $Vn, $Vm", "",
2464 [(set QPR:$Vd, (OpNode (TyQ QPR:$Vn),
2465 (TyQ (ExtOp (TyD DPR:$Vm)))))]> {
2466 let isCommutable = Commutable;
2469 // Pairwise long 2-register intrinsics, both double- and quad-register.
2470 class N2VDPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2471 bits<2> op17_16, bits<5> op11_7, bit op4,
2472 string OpcodeStr, string Dt,
2473 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2474 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$Vd),
2475 (ins DPR:$Vm), IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
2476 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm))))]>;
2477 class N2VQPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2478 bits<2> op17_16, bits<5> op11_7, bit op4,
2479 string OpcodeStr, string Dt,
2480 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2481 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$Vd),
2482 (ins QPR:$Vm), IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
2483 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm))))]>;
2485 // Pairwise long 2-register accumulate intrinsics,
2486 // both double- and quad-register.
2487 // The destination register is also used as the first source operand register.
2488 class N2VDPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2489 bits<2> op17_16, bits<5> op11_7, bit op4,
2490 string OpcodeStr, string Dt,
2491 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2492 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
2493 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vm), IIC_VPALiD,
2494 OpcodeStr, Dt, "$Vd, $Vm", "$src1 = $Vd",
2495 [(set DPR:$Vd, (ResTy (IntOp (ResTy DPR:$src1), (OpTy DPR:$Vm))))]>;
2496 class N2VQPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
2497 bits<2> op17_16, bits<5> op11_7, bit op4,
2498 string OpcodeStr, string Dt,
2499 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
2500 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4,
2501 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vm), IIC_VPALiQ,
2502 OpcodeStr, Dt, "$Vd, $Vm", "$src1 = $Vd",
2503 [(set QPR:$Vd, (ResTy (IntOp (ResTy QPR:$src1), (OpTy QPR:$Vm))))]>;
2505 // Shift by immediate,
2506 // both double- and quad-register.
2507 class N2VDSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2508 Format f, InstrItinClass itin, Operand ImmTy,
2509 string OpcodeStr, string Dt, ValueType Ty, SDNode OpNode>
2510 : N2VImm<op24, op23, op11_8, op7, 0, op4,
2511 (outs DPR:$Vd), (ins DPR:$Vm, ImmTy:$SIMM), f, itin,
2512 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2513 [(set DPR:$Vd, (Ty (OpNode (Ty DPR:$Vm), (i32 imm:$SIMM))))]>;
2514 class N2VQSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2515 Format f, InstrItinClass itin, Operand ImmTy,
2516 string OpcodeStr, string Dt, ValueType Ty, SDNode OpNode>
2517 : N2VImm<op24, op23, op11_8, op7, 1, op4,
2518 (outs QPR:$Vd), (ins QPR:$Vm, ImmTy:$SIMM), f, itin,
2519 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2520 [(set QPR:$Vd, (Ty (OpNode (Ty QPR:$Vm), (i32 imm:$SIMM))))]>;
2522 // Long shift by immediate.
2523 class N2VLSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
2524 string OpcodeStr, string Dt,
2525 ValueType ResTy, ValueType OpTy, SDNode OpNode>
2526 : N2VImm<op24, op23, op11_8, op7, op6, op4,
2527 (outs QPR:$Vd), (ins DPR:$Vm, i32imm:$SIMM), N2RegVShLFrm,
2528 IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2529 [(set QPR:$Vd, (ResTy (OpNode (OpTy DPR:$Vm),
2530 (i32 imm:$SIMM))))]>;
2532 // Narrow shift by immediate.
2533 class N2VNSh<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6, bit op4,
2534 InstrItinClass itin, string OpcodeStr, string Dt,
2535 ValueType ResTy, ValueType OpTy, Operand ImmTy, SDNode OpNode>
2536 : N2VImm<op24, op23, op11_8, op7, op6, op4,
2537 (outs DPR:$Vd), (ins QPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, itin,
2538 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2539 [(set DPR:$Vd, (ResTy (OpNode (OpTy QPR:$Vm),
2540 (i32 imm:$SIMM))))]>;
2542 // Shift right by immediate and accumulate,
2543 // both double- and quad-register.
2544 class N2VDShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2545 Operand ImmTy, string OpcodeStr, string Dt,
2546 ValueType Ty, SDNode ShOp>
2547 : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$Vd),
2548 (ins DPR:$src1, DPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, IIC_VPALiD,
2549 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2550 [(set DPR:$Vd, (Ty (add DPR:$src1,
2551 (Ty (ShOp DPR:$Vm, (i32 imm:$SIMM))))))]>;
2552 class N2VQShAdd<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2553 Operand ImmTy, string OpcodeStr, string Dt,
2554 ValueType Ty, SDNode ShOp>
2555 : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$Vd),
2556 (ins QPR:$src1, QPR:$Vm, ImmTy:$SIMM), N2RegVShRFrm, IIC_VPALiD,
2557 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2558 [(set QPR:$Vd, (Ty (add QPR:$src1,
2559 (Ty (ShOp QPR:$Vm, (i32 imm:$SIMM))))))]>;
2561 // Shift by immediate and insert,
2562 // both double- and quad-register.
2563 class N2VDShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2564 Operand ImmTy, Format f, string OpcodeStr, string Dt,
2565 ValueType Ty,SDNode ShOp>
2566 : N2VImm<op24, op23, op11_8, op7, 0, op4, (outs DPR:$Vd),
2567 (ins DPR:$src1, DPR:$Vm, ImmTy:$SIMM), f, IIC_VSHLiD,
2568 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2569 [(set DPR:$Vd, (Ty (ShOp DPR:$src1, DPR:$Vm, (i32 imm:$SIMM))))]>;
2570 class N2VQShIns<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2571 Operand ImmTy, Format f, string OpcodeStr, string Dt,
2572 ValueType Ty,SDNode ShOp>
2573 : N2VImm<op24, op23, op11_8, op7, 1, op4, (outs QPR:$Vd),
2574 (ins QPR:$src1, QPR:$Vm, ImmTy:$SIMM), f, IIC_VSHLiQ,
2575 OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "$src1 = $Vd",
2576 [(set QPR:$Vd, (Ty (ShOp QPR:$src1, QPR:$Vm, (i32 imm:$SIMM))))]>;
2578 // Convert, with fractional bits immediate,
2579 // both double- and quad-register.
2580 class N2VCvtD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2581 string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
2583 : N2VImm<op24, op23, op11_8, op7, 0, op4,
2584 (outs DPR:$Vd), (ins DPR:$Vm, neon_vcvt_imm32:$SIMM), NVCVTFrm,
2585 IIC_VUNAD, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2586 [(set DPR:$Vd, (ResTy (IntOp (OpTy DPR:$Vm), (i32 imm:$SIMM))))]>;
2587 class N2VCvtQ<bit op24, bit op23, bits<4> op11_8, bit op7, bit op4,
2588 string OpcodeStr, string Dt, ValueType ResTy, ValueType OpTy,
2590 : N2VImm<op24, op23, op11_8, op7, 1, op4,
2591 (outs QPR:$Vd), (ins QPR:$Vm, neon_vcvt_imm32:$SIMM), NVCVTFrm,
2592 IIC_VUNAQ, OpcodeStr, Dt, "$Vd, $Vm, $SIMM", "",
2593 [(set QPR:$Vd, (ResTy (IntOp (OpTy QPR:$Vm), (i32 imm:$SIMM))))]>;
2595 //===----------------------------------------------------------------------===//
2597 //===----------------------------------------------------------------------===//
2599 // Abbreviations used in multiclass suffixes:
2600 // Q = quarter int (8 bit) elements
2601 // H = half int (16 bit) elements
2602 // S = single int (32 bit) elements
2603 // D = double int (64 bit) elements
2605 // Neon 2-register vector operations and intrinsics.
2607 // Neon 2-register comparisons.
2608 // source operand element sizes of 8, 16 and 32 bits:
2609 multiclass N2V_QHS_cmp<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2610 bits<5> op11_7, bit op4, string opc, string Dt,
2611 string asm, SDNode OpNode> {
2612 // 64-bit vector types.
2613 def v8i8 : N2V<op24_23, op21_20, 0b00, op17_16, op11_7, 0, op4,
2614 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2615 opc, !strconcat(Dt, "8"), asm, "",
2616 [(set DPR:$Vd, (v8i8 (OpNode (v8i8 DPR:$Vm))))]>;
2617 def v4i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 0, op4,
2618 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2619 opc, !strconcat(Dt, "16"), asm, "",
2620 [(set DPR:$Vd, (v4i16 (OpNode (v4i16 DPR:$Vm))))]>;
2621 def v2i32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 0, op4,
2622 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2623 opc, !strconcat(Dt, "32"), asm, "",
2624 [(set DPR:$Vd, (v2i32 (OpNode (v2i32 DPR:$Vm))))]>;
2625 def v2f32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 0, op4,
2626 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
2627 opc, "f32", asm, "",
2628 [(set DPR:$Vd, (v2i32 (OpNode (v2f32 DPR:$Vm))))]> {
2629 let Inst{10} = 1; // overwrite F = 1
2632 // 128-bit vector types.
2633 def v16i8 : N2V<op24_23, op21_20, 0b00, op17_16, op11_7, 1, op4,
2634 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2635 opc, !strconcat(Dt, "8"), asm, "",
2636 [(set QPR:$Vd, (v16i8 (OpNode (v16i8 QPR:$Vm))))]>;
2637 def v8i16 : N2V<op24_23, op21_20, 0b01, op17_16, op11_7, 1, op4,
2638 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2639 opc, !strconcat(Dt, "16"), asm, "",
2640 [(set QPR:$Vd, (v8i16 (OpNode (v8i16 QPR:$Vm))))]>;
2641 def v4i32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 1, op4,
2642 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2643 opc, !strconcat(Dt, "32"), asm, "",
2644 [(set QPR:$Vd, (v4i32 (OpNode (v4i32 QPR:$Vm))))]>;
2645 def v4f32 : N2V<op24_23, op21_20, 0b10, op17_16, op11_7, 1, op4,
2646 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
2647 opc, "f32", asm, "",
2648 [(set QPR:$Vd, (v4i32 (OpNode (v4f32 QPR:$Vm))))]> {
2649 let Inst{10} = 1; // overwrite F = 1
2654 // Neon 2-register vector intrinsics,
2655 // element sizes of 8, 16 and 32 bits:
2656 multiclass N2VInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2657 bits<5> op11_7, bit op4,
2658 InstrItinClass itinD, InstrItinClass itinQ,
2659 string OpcodeStr, string Dt, Intrinsic IntOp> {
2660 // 64-bit vector types.
2661 def v8i8 : N2VDInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
2662 itinD, OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
2663 def v4i16 : N2VDInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
2664 itinD, OpcodeStr, !strconcat(Dt, "16"),v4i16,v4i16,IntOp>;
2665 def v2i32 : N2VDInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
2666 itinD, OpcodeStr, !strconcat(Dt, "32"),v2i32,v2i32,IntOp>;
2668 // 128-bit vector types.
2669 def v16i8 : N2VQInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
2670 itinQ, OpcodeStr, !strconcat(Dt, "8"), v16i8,v16i8,IntOp>;
2671 def v8i16 : N2VQInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
2672 itinQ, OpcodeStr, !strconcat(Dt, "16"),v8i16,v8i16,IntOp>;
2673 def v4i32 : N2VQInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
2674 itinQ, OpcodeStr, !strconcat(Dt, "32"),v4i32,v4i32,IntOp>;
2678 // Neon Narrowing 2-register vector operations,
2679 // source operand element sizes of 16, 32 and 64 bits:
2680 multiclass N2VN_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2681 bits<5> op11_7, bit op6, bit op4,
2682 InstrItinClass itin, string OpcodeStr, string Dt,
2684 def v8i8 : N2VN<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
2685 itin, OpcodeStr, !strconcat(Dt, "16"),
2686 v8i8, v8i16, OpNode>;
2687 def v4i16 : N2VN<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
2688 itin, OpcodeStr, !strconcat(Dt, "32"),
2689 v4i16, v4i32, OpNode>;
2690 def v2i32 : N2VN<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
2691 itin, OpcodeStr, !strconcat(Dt, "64"),
2692 v2i32, v2i64, OpNode>;
2695 // Neon Narrowing 2-register vector intrinsics,
2696 // source operand element sizes of 16, 32 and 64 bits:
2697 multiclass N2VNInt_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
2698 bits<5> op11_7, bit op6, bit op4,
2699 InstrItinClass itin, string OpcodeStr, string Dt,
2701 def v8i8 : N2VNInt<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
2702 itin, OpcodeStr, !strconcat(Dt, "16"),
2703 v8i8, v8i16, IntOp>;
2704 def v4i16 : N2VNInt<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
2705 itin, OpcodeStr, !strconcat(Dt, "32"),
2706 v4i16, v4i32, IntOp>;
2707 def v2i32 : N2VNInt<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
2708 itin, OpcodeStr, !strconcat(Dt, "64"),
2709 v2i32, v2i64, IntOp>;
2713 // Neon Lengthening 2-register vector intrinsic (currently specific to VMOVL).
2714 // source operand element sizes of 16, 32 and 64 bits:
2715 multiclass N2VL_QHS<bits<2> op24_23, bits<5> op11_7, bit op6, bit op4,
2716 string OpcodeStr, string Dt, SDNode OpNode> {
2717 def v8i16 : N2VL<op24_23, 0b00, 0b10, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
2718 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, OpNode>;
2719 def v4i32 : N2VL<op24_23, 0b01, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
2720 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, OpNode>;
2721 def v2i64 : N2VL<op24_23, 0b10, 0b00, 0b00, op11_7, op6, op4, IIC_VQUNAiD,
2722 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, OpNode>;
2726 // Neon 3-register vector operations.
2728 // First with only element sizes of 8, 16 and 32 bits:
2729 multiclass N3V_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
2730 InstrItinClass itinD16, InstrItinClass itinD32,
2731 InstrItinClass itinQ16, InstrItinClass itinQ32,
2732 string OpcodeStr, string Dt,
2733 SDNode OpNode, bit Commutable = 0> {
2734 // 64-bit vector types.
2735 def v8i8 : N3VD<op24, op23, 0b00, op11_8, op4, itinD16,
2736 OpcodeStr, !strconcat(Dt, "8"),
2737 v8i8, v8i8, OpNode, Commutable>;
2738 def v4i16 : N3VD<op24, op23, 0b01, op11_8, op4, itinD16,
2739 OpcodeStr, !strconcat(Dt, "16"),
2740 v4i16, v4i16, OpNode, Commutable>;
2741 def v2i32 : N3VD<op24, op23, 0b10, op11_8, op4, itinD32,
2742 OpcodeStr, !strconcat(Dt, "32"),
2743 v2i32, v2i32, OpNode, Commutable>;
2745 // 128-bit vector types.
2746 def v16i8 : N3VQ<op24, op23, 0b00, op11_8, op4, itinQ16,
2747 OpcodeStr, !strconcat(Dt, "8"),
2748 v16i8, v16i8, OpNode, Commutable>;
2749 def v8i16 : N3VQ<op24, op23, 0b01, op11_8, op4, itinQ16,
2750 OpcodeStr, !strconcat(Dt, "16"),
2751 v8i16, v8i16, OpNode, Commutable>;
2752 def v4i32 : N3VQ<op24, op23, 0b10, op11_8, op4, itinQ32,
2753 OpcodeStr, !strconcat(Dt, "32"),
2754 v4i32, v4i32, OpNode, Commutable>;
2757 multiclass N3VSL_HS<bits<4> op11_8, string OpcodeStr, string Dt, SDNode ShOp> {
2758 def v4i16 : N3VDSL16<0b01, op11_8, OpcodeStr, !strconcat(Dt, "16"),
2760 def v2i32 : N3VDSL<0b10, op11_8, IIC_VMULi32D, OpcodeStr, !strconcat(Dt,"32"),
2762 def v8i16 : N3VQSL16<0b01, op11_8, OpcodeStr, !strconcat(Dt, "16"),
2763 v8i16, v4i16, ShOp>;
2764 def v4i32 : N3VQSL<0b10, op11_8, IIC_VMULi32Q, OpcodeStr, !strconcat(Dt,"32"),
2765 v4i32, v2i32, ShOp>;
2768 // ....then also with element size 64 bits:
2769 multiclass N3V_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
2770 InstrItinClass itinD, InstrItinClass itinQ,
2771 string OpcodeStr, string Dt,
2772 SDNode OpNode, bit Commutable = 0>
2773 : N3V_QHS<op24, op23, op11_8, op4, itinD, itinD, itinQ, itinQ,
2774 OpcodeStr, Dt, OpNode, Commutable> {
2775 def v1i64 : N3VD<op24, op23, 0b11, op11_8, op4, itinD,
2776 OpcodeStr, !strconcat(Dt, "64"),
2777 v1i64, v1i64, OpNode, Commutable>;
2778 def v2i64 : N3VQ<op24, op23, 0b11, op11_8, op4, itinQ,
2779 OpcodeStr, !strconcat(Dt, "64"),
2780 v2i64, v2i64, OpNode, Commutable>;
2784 // Neon 3-register vector intrinsics.
2786 // First with only element sizes of 16 and 32 bits:
2787 multiclass N3VInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2788 InstrItinClass itinD16, InstrItinClass itinD32,
2789 InstrItinClass itinQ16, InstrItinClass itinQ32,
2790 string OpcodeStr, string Dt,
2791 Intrinsic IntOp, bit Commutable = 0> {
2792 // 64-bit vector types.
2793 def v4i16 : N3VDInt<op24, op23, 0b01, op11_8, op4, f, itinD16,
2794 OpcodeStr, !strconcat(Dt, "16"),
2795 v4i16, v4i16, IntOp, Commutable>;
2796 def v2i32 : N3VDInt<op24, op23, 0b10, op11_8, op4, f, itinD32,
2797 OpcodeStr, !strconcat(Dt, "32"),
2798 v2i32, v2i32, IntOp, Commutable>;
2800 // 128-bit vector types.
2801 def v8i16 : N3VQInt<op24, op23, 0b01, op11_8, op4, f, itinQ16,
2802 OpcodeStr, !strconcat(Dt, "16"),
2803 v8i16, v8i16, IntOp, Commutable>;
2804 def v4i32 : N3VQInt<op24, op23, 0b10, op11_8, op4, f, itinQ32,
2805 OpcodeStr, !strconcat(Dt, "32"),
2806 v4i32, v4i32, IntOp, Commutable>;
2808 multiclass N3VInt_HSSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2809 InstrItinClass itinD16, InstrItinClass itinD32,
2810 InstrItinClass itinQ16, InstrItinClass itinQ32,
2811 string OpcodeStr, string Dt,
2813 // 64-bit vector types.
2814 def v4i16 : N3VDIntSh<op24, op23, 0b01, op11_8, op4, f, itinD16,
2815 OpcodeStr, !strconcat(Dt, "16"),
2816 v4i16, v4i16, IntOp>;
2817 def v2i32 : N3VDIntSh<op24, op23, 0b10, op11_8, op4, f, itinD32,
2818 OpcodeStr, !strconcat(Dt, "32"),
2819 v2i32, v2i32, IntOp>;
2821 // 128-bit vector types.
2822 def v8i16 : N3VQIntSh<op24, op23, 0b01, op11_8, op4, f, itinQ16,
2823 OpcodeStr, !strconcat(Dt, "16"),
2824 v8i16, v8i16, IntOp>;
2825 def v4i32 : N3VQIntSh<op24, op23, 0b10, op11_8, op4, f, itinQ32,
2826 OpcodeStr, !strconcat(Dt, "32"),
2827 v4i32, v4i32, IntOp>;
2830 multiclass N3VIntSL_HS<bits<4> op11_8,
2831 InstrItinClass itinD16, InstrItinClass itinD32,
2832 InstrItinClass itinQ16, InstrItinClass itinQ32,
2833 string OpcodeStr, string Dt, Intrinsic IntOp> {
2834 def v4i16 : N3VDIntSL16<0b01, op11_8, itinD16,
2835 OpcodeStr, !strconcat(Dt, "16"), v4i16, IntOp>;
2836 def v2i32 : N3VDIntSL<0b10, op11_8, itinD32,
2837 OpcodeStr, !strconcat(Dt, "32"), v2i32, IntOp>;
2838 def v8i16 : N3VQIntSL16<0b01, op11_8, itinQ16,
2839 OpcodeStr, !strconcat(Dt, "16"), v8i16, v4i16, IntOp>;
2840 def v4i32 : N3VQIntSL<0b10, op11_8, itinQ32,
2841 OpcodeStr, !strconcat(Dt, "32"), v4i32, v2i32, IntOp>;
2844 // ....then also with element size of 8 bits:
2845 multiclass N3VInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2846 InstrItinClass itinD16, InstrItinClass itinD32,
2847 InstrItinClass itinQ16, InstrItinClass itinQ32,
2848 string OpcodeStr, string Dt,
2849 Intrinsic IntOp, bit Commutable = 0>
2850 : N3VInt_HS<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2851 OpcodeStr, Dt, IntOp, Commutable> {
2852 def v8i8 : N3VDInt<op24, op23, 0b00, op11_8, op4, f, itinD16,
2853 OpcodeStr, !strconcat(Dt, "8"),
2854 v8i8, v8i8, IntOp, Commutable>;
2855 def v16i8 : N3VQInt<op24, op23, 0b00, op11_8, op4, f, itinQ16,
2856 OpcodeStr, !strconcat(Dt, "8"),
2857 v16i8, v16i8, IntOp, Commutable>;
2859 multiclass N3VInt_QHSSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2860 InstrItinClass itinD16, InstrItinClass itinD32,
2861 InstrItinClass itinQ16, InstrItinClass itinQ32,
2862 string OpcodeStr, string Dt,
2864 : N3VInt_HSSh<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2865 OpcodeStr, Dt, IntOp> {
2866 def v8i8 : N3VDIntSh<op24, op23, 0b00, op11_8, op4, f, itinD16,
2867 OpcodeStr, !strconcat(Dt, "8"),
2869 def v16i8 : N3VQIntSh<op24, op23, 0b00, op11_8, op4, f, itinQ16,
2870 OpcodeStr, !strconcat(Dt, "8"),
2871 v16i8, v16i8, IntOp>;
2875 // ....then also with element size of 64 bits:
2876 multiclass N3VInt_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2877 InstrItinClass itinD16, InstrItinClass itinD32,
2878 InstrItinClass itinQ16, InstrItinClass itinQ32,
2879 string OpcodeStr, string Dt,
2880 Intrinsic IntOp, bit Commutable = 0>
2881 : N3VInt_QHS<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2882 OpcodeStr, Dt, IntOp, Commutable> {
2883 def v1i64 : N3VDInt<op24, op23, 0b11, op11_8, op4, f, itinD32,
2884 OpcodeStr, !strconcat(Dt, "64"),
2885 v1i64, v1i64, IntOp, Commutable>;
2886 def v2i64 : N3VQInt<op24, op23, 0b11, op11_8, op4, f, itinQ32,
2887 OpcodeStr, !strconcat(Dt, "64"),
2888 v2i64, v2i64, IntOp, Commutable>;
2890 multiclass N3VInt_QHSDSh<bit op24, bit op23, bits<4> op11_8, bit op4, Format f,
2891 InstrItinClass itinD16, InstrItinClass itinD32,
2892 InstrItinClass itinQ16, InstrItinClass itinQ32,
2893 string OpcodeStr, string Dt,
2895 : N3VInt_QHSSh<op24, op23, op11_8, op4, f, itinD16, itinD32, itinQ16, itinQ32,
2896 OpcodeStr, Dt, IntOp> {
2897 def v1i64 : N3VDIntSh<op24, op23, 0b11, op11_8, op4, f, itinD32,
2898 OpcodeStr, !strconcat(Dt, "64"),
2899 v1i64, v1i64, IntOp>;
2900 def v2i64 : N3VQIntSh<op24, op23, 0b11, op11_8, op4, f, itinQ32,
2901 OpcodeStr, !strconcat(Dt, "64"),
2902 v2i64, v2i64, IntOp>;
2905 // Neon Narrowing 3-register vector intrinsics,
2906 // source operand element sizes of 16, 32 and 64 bits:
2907 multiclass N3VNInt_HSD<bit op24, bit op23, bits<4> op11_8, bit op4,
2908 string OpcodeStr, string Dt,
2909 Intrinsic IntOp, bit Commutable = 0> {
2910 def v8i8 : N3VNInt<op24, op23, 0b00, op11_8, op4,
2911 OpcodeStr, !strconcat(Dt, "16"),
2912 v8i8, v8i16, IntOp, Commutable>;
2913 def v4i16 : N3VNInt<op24, op23, 0b01, op11_8, op4,
2914 OpcodeStr, !strconcat(Dt, "32"),
2915 v4i16, v4i32, IntOp, Commutable>;
2916 def v2i32 : N3VNInt<op24, op23, 0b10, op11_8, op4,
2917 OpcodeStr, !strconcat(Dt, "64"),
2918 v2i32, v2i64, IntOp, Commutable>;
2922 // Neon Long 3-register vector operations.
2924 multiclass N3VL_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
2925 InstrItinClass itin16, InstrItinClass itin32,
2926 string OpcodeStr, string Dt,
2927 SDNode OpNode, bit Commutable = 0> {
2928 def v8i16 : N3VL<op24, op23, 0b00, op11_8, op4, itin16,
2929 OpcodeStr, !strconcat(Dt, "8"),
2930 v8i16, v8i8, OpNode, Commutable>;
2931 def v4i32 : N3VL<op24, op23, 0b01, op11_8, op4, itin16,
2932 OpcodeStr, !strconcat(Dt, "16"),
2933 v4i32, v4i16, OpNode, Commutable>;
2934 def v2i64 : N3VL<op24, op23, 0b10, op11_8, op4, itin32,
2935 OpcodeStr, !strconcat(Dt, "32"),
2936 v2i64, v2i32, OpNode, Commutable>;
2939 multiclass N3VLSL_HS<bit op24, bits<4> op11_8,
2940 InstrItinClass itin, string OpcodeStr, string Dt,
2942 def v4i16 : N3VLSL16<op24, 0b01, op11_8, itin, OpcodeStr,
2943 !strconcat(Dt, "16"), v4i32, v4i16, OpNode>;
2944 def v2i32 : N3VLSL<op24, 0b10, op11_8, itin, OpcodeStr,
2945 !strconcat(Dt, "32"), v2i64, v2i32, OpNode>;
2948 multiclass N3VLExt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
2949 InstrItinClass itin16, InstrItinClass itin32,
2950 string OpcodeStr, string Dt,
2951 SDNode OpNode, SDNode ExtOp, bit Commutable = 0> {
2952 def v8i16 : N3VLExt<op24, op23, 0b00, op11_8, op4, itin16,
2953 OpcodeStr, !strconcat(Dt, "8"),
2954 v8i16, v8i8, OpNode, ExtOp, Commutable>;
2955 def v4i32 : N3VLExt<op24, op23, 0b01, op11_8, op4, itin16,
2956 OpcodeStr, !strconcat(Dt, "16"),
2957 v4i32, v4i16, OpNode, ExtOp, Commutable>;
2958 def v2i64 : N3VLExt<op24, op23, 0b10, op11_8, op4, itin32,
2959 OpcodeStr, !strconcat(Dt, "32"),
2960 v2i64, v2i32, OpNode, ExtOp, Commutable>;
2963 // Neon Long 3-register vector intrinsics.
2965 // First with only element sizes of 16 and 32 bits:
2966 multiclass N3VLInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
2967 InstrItinClass itin16, InstrItinClass itin32,
2968 string OpcodeStr, string Dt,
2969 Intrinsic IntOp, bit Commutable = 0> {
2970 def v4i32 : N3VLInt<op24, op23, 0b01, op11_8, op4, itin16,
2971 OpcodeStr, !strconcat(Dt, "16"),
2972 v4i32, v4i16, IntOp, Commutable>;
2973 def v2i64 : N3VLInt<op24, op23, 0b10, op11_8, op4, itin32,
2974 OpcodeStr, !strconcat(Dt, "32"),
2975 v2i64, v2i32, IntOp, Commutable>;
2978 multiclass N3VLIntSL_HS<bit op24, bits<4> op11_8,
2979 InstrItinClass itin, string OpcodeStr, string Dt,
2981 def v4i16 : N3VLIntSL16<op24, 0b01, op11_8, itin,
2982 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
2983 def v2i32 : N3VLIntSL<op24, 0b10, op11_8, itin,
2984 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
2987 // ....then also with element size of 8 bits:
2988 multiclass N3VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
2989 InstrItinClass itin16, InstrItinClass itin32,
2990 string OpcodeStr, string Dt,
2991 Intrinsic IntOp, bit Commutable = 0>
2992 : N3VLInt_HS<op24, op23, op11_8, op4, itin16, itin32, OpcodeStr, Dt,
2993 IntOp, Commutable> {
2994 def v8i16 : N3VLInt<op24, op23, 0b00, op11_8, op4, itin16,
2995 OpcodeStr, !strconcat(Dt, "8"),
2996 v8i16, v8i8, IntOp, Commutable>;
2999 // ....with explicit extend (VABDL).
3000 multiclass N3VLIntExt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3001 InstrItinClass itin, string OpcodeStr, string Dt,
3002 Intrinsic IntOp, SDNode ExtOp, bit Commutable = 0> {
3003 def v8i16 : N3VLIntExt<op24, op23, 0b00, op11_8, op4, itin,
3004 OpcodeStr, !strconcat(Dt, "8"),
3005 v8i16, v8i8, IntOp, ExtOp, Commutable>;
3006 def v4i32 : N3VLIntExt<op24, op23, 0b01, op11_8, op4, itin,
3007 OpcodeStr, !strconcat(Dt, "16"),
3008 v4i32, v4i16, IntOp, ExtOp, Commutable>;
3009 def v2i64 : N3VLIntExt<op24, op23, 0b10, op11_8, op4, itin,
3010 OpcodeStr, !strconcat(Dt, "32"),
3011 v2i64, v2i32, IntOp, ExtOp, Commutable>;
3015 // Neon Wide 3-register vector intrinsics,
3016 // source operand element sizes of 8, 16 and 32 bits:
3017 multiclass N3VW_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3018 string OpcodeStr, string Dt,
3019 SDNode OpNode, SDNode ExtOp, bit Commutable = 0> {
3020 def v8i16 : N3VW<op24, op23, 0b00, op11_8, op4,
3021 OpcodeStr, !strconcat(Dt, "8"),
3022 v8i16, v8i8, OpNode, ExtOp, Commutable>;
3023 def v4i32 : N3VW<op24, op23, 0b01, op11_8, op4,
3024 OpcodeStr, !strconcat(Dt, "16"),
3025 v4i32, v4i16, OpNode, ExtOp, Commutable>;
3026 def v2i64 : N3VW<op24, op23, 0b10, op11_8, op4,
3027 OpcodeStr, !strconcat(Dt, "32"),
3028 v2i64, v2i32, OpNode, ExtOp, Commutable>;
3032 // Neon Multiply-Op vector operations,
3033 // element sizes of 8, 16 and 32 bits:
3034 multiclass N3VMulOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3035 InstrItinClass itinD16, InstrItinClass itinD32,
3036 InstrItinClass itinQ16, InstrItinClass itinQ32,
3037 string OpcodeStr, string Dt, SDNode OpNode> {
3038 // 64-bit vector types.
3039 def v8i8 : N3VDMulOp<op24, op23, 0b00, op11_8, op4, itinD16,
3040 OpcodeStr, !strconcat(Dt, "8"), v8i8, mul, OpNode>;
3041 def v4i16 : N3VDMulOp<op24, op23, 0b01, op11_8, op4, itinD16,
3042 OpcodeStr, !strconcat(Dt, "16"), v4i16, mul, OpNode>;
3043 def v2i32 : N3VDMulOp<op24, op23, 0b10, op11_8, op4, itinD32,
3044 OpcodeStr, !strconcat(Dt, "32"), v2i32, mul, OpNode>;
3046 // 128-bit vector types.
3047 def v16i8 : N3VQMulOp<op24, op23, 0b00, op11_8, op4, itinQ16,
3048 OpcodeStr, !strconcat(Dt, "8"), v16i8, mul, OpNode>;
3049 def v8i16 : N3VQMulOp<op24, op23, 0b01, op11_8, op4, itinQ16,
3050 OpcodeStr, !strconcat(Dt, "16"), v8i16, mul, OpNode>;
3051 def v4i32 : N3VQMulOp<op24, op23, 0b10, op11_8, op4, itinQ32,
3052 OpcodeStr, !strconcat(Dt, "32"), v4i32, mul, OpNode>;
3055 multiclass N3VMulOpSL_HS<bits<4> op11_8,
3056 InstrItinClass itinD16, InstrItinClass itinD32,
3057 InstrItinClass itinQ16, InstrItinClass itinQ32,
3058 string OpcodeStr, string Dt, SDNode ShOp> {
3059 def v4i16 : N3VDMulOpSL16<0b01, op11_8, itinD16,
3060 OpcodeStr, !strconcat(Dt, "16"), v4i16, mul, ShOp>;
3061 def v2i32 : N3VDMulOpSL<0b10, op11_8, itinD32,
3062 OpcodeStr, !strconcat(Dt, "32"), v2i32, mul, ShOp>;
3063 def v8i16 : N3VQMulOpSL16<0b01, op11_8, itinQ16,
3064 OpcodeStr, !strconcat(Dt, "16"), v8i16, v4i16,
3066 def v4i32 : N3VQMulOpSL<0b10, op11_8, itinQ32,
3067 OpcodeStr, !strconcat(Dt, "32"), v4i32, v2i32,
3071 // Neon Intrinsic-Op vector operations,
3072 // element sizes of 8, 16 and 32 bits:
3073 multiclass N3VIntOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3074 InstrItinClass itinD, InstrItinClass itinQ,
3075 string OpcodeStr, string Dt, Intrinsic IntOp,
3077 // 64-bit vector types.
3078 def v8i8 : N3VDIntOp<op24, op23, 0b00, op11_8, op4, itinD,
3079 OpcodeStr, !strconcat(Dt, "8"), v8i8, IntOp, OpNode>;
3080 def v4i16 : N3VDIntOp<op24, op23, 0b01, op11_8, op4, itinD,
3081 OpcodeStr, !strconcat(Dt, "16"), v4i16, IntOp, OpNode>;
3082 def v2i32 : N3VDIntOp<op24, op23, 0b10, op11_8, op4, itinD,
3083 OpcodeStr, !strconcat(Dt, "32"), v2i32, IntOp, OpNode>;
3085 // 128-bit vector types.
3086 def v16i8 : N3VQIntOp<op24, op23, 0b00, op11_8, op4, itinQ,
3087 OpcodeStr, !strconcat(Dt, "8"), v16i8, IntOp, OpNode>;
3088 def v8i16 : N3VQIntOp<op24, op23, 0b01, op11_8, op4, itinQ,
3089 OpcodeStr, !strconcat(Dt, "16"), v8i16, IntOp, OpNode>;
3090 def v4i32 : N3VQIntOp<op24, op23, 0b10, op11_8, op4, itinQ,
3091 OpcodeStr, !strconcat(Dt, "32"), v4i32, IntOp, OpNode>;
3094 // Neon 3-argument intrinsics,
3095 // element sizes of 8, 16 and 32 bits:
3096 multiclass N3VInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3097 InstrItinClass itinD, InstrItinClass itinQ,
3098 string OpcodeStr, string Dt, Intrinsic IntOp> {
3099 // 64-bit vector types.
3100 def v8i8 : N3VDInt3<op24, op23, 0b00, op11_8, op4, itinD,
3101 OpcodeStr, !strconcat(Dt, "8"), v8i8, v8i8, IntOp>;
3102 def v4i16 : N3VDInt3<op24, op23, 0b01, op11_8, op4, itinD,
3103 OpcodeStr, !strconcat(Dt, "16"), v4i16, v4i16, IntOp>;
3104 def v2i32 : N3VDInt3<op24, op23, 0b10, op11_8, op4, itinD,
3105 OpcodeStr, !strconcat(Dt, "32"), v2i32, v2i32, IntOp>;
3107 // 128-bit vector types.
3108 def v16i8 : N3VQInt3<op24, op23, 0b00, op11_8, op4, itinQ,
3109 OpcodeStr, !strconcat(Dt, "8"), v16i8, v16i8, IntOp>;
3110 def v8i16 : N3VQInt3<op24, op23, 0b01, op11_8, op4, itinQ,
3111 OpcodeStr, !strconcat(Dt, "16"), v8i16, v8i16, IntOp>;
3112 def v4i32 : N3VQInt3<op24, op23, 0b10, op11_8, op4, itinQ,
3113 OpcodeStr, !strconcat(Dt, "32"), v4i32, v4i32, IntOp>;
3117 // Neon Long Multiply-Op vector operations,
3118 // element sizes of 8, 16 and 32 bits:
3119 multiclass N3VLMulOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3120 InstrItinClass itin16, InstrItinClass itin32,
3121 string OpcodeStr, string Dt, SDNode MulOp,
3123 def v8i16 : N3VLMulOp<op24, op23, 0b00, op11_8, op4, itin16, OpcodeStr,
3124 !strconcat(Dt, "8"), v8i16, v8i8, MulOp, OpNode>;
3125 def v4i32 : N3VLMulOp<op24, op23, 0b01, op11_8, op4, itin16, OpcodeStr,
3126 !strconcat(Dt, "16"), v4i32, v4i16, MulOp, OpNode>;
3127 def v2i64 : N3VLMulOp<op24, op23, 0b10, op11_8, op4, itin32, OpcodeStr,
3128 !strconcat(Dt, "32"), v2i64, v2i32, MulOp, OpNode>;
3131 multiclass N3VLMulOpSL_HS<bit op24, bits<4> op11_8, string OpcodeStr,
3132 string Dt, SDNode MulOp, SDNode OpNode> {
3133 def v4i16 : N3VLMulOpSL16<op24, 0b01, op11_8, IIC_VMACi16D, OpcodeStr,
3134 !strconcat(Dt,"16"), v4i32, v4i16, MulOp, OpNode>;
3135 def v2i32 : N3VLMulOpSL<op24, 0b10, op11_8, IIC_VMACi32D, OpcodeStr,
3136 !strconcat(Dt, "32"), v2i64, v2i32, MulOp, OpNode>;
3140 // Neon Long 3-argument intrinsics.
3142 // First with only element sizes of 16 and 32 bits:
3143 multiclass N3VLInt3_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
3144 InstrItinClass itin16, InstrItinClass itin32,
3145 string OpcodeStr, string Dt, Intrinsic IntOp> {
3146 def v4i32 : N3VLInt3<op24, op23, 0b01, op11_8, op4, itin16,
3147 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, IntOp>;
3148 def v2i64 : N3VLInt3<op24, op23, 0b10, op11_8, op4, itin32,
3149 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
3152 multiclass N3VLInt3SL_HS<bit op24, bits<4> op11_8,
3153 string OpcodeStr, string Dt, Intrinsic IntOp> {
3154 def v4i16 : N3VLInt3SL16<op24, 0b01, op11_8, IIC_VMACi16D,
3155 OpcodeStr, !strconcat(Dt,"16"), v4i32, v4i16, IntOp>;
3156 def v2i32 : N3VLInt3SL<op24, 0b10, op11_8, IIC_VMACi32D,
3157 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, IntOp>;
3160 // ....then also with element size of 8 bits:
3161 multiclass N3VLInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3162 InstrItinClass itin16, InstrItinClass itin32,
3163 string OpcodeStr, string Dt, Intrinsic IntOp>
3164 : N3VLInt3_HS<op24, op23, op11_8, op4, itin16, itin32, OpcodeStr, Dt, IntOp> {
3165 def v8i16 : N3VLInt3<op24, op23, 0b00, op11_8, op4, itin16,
3166 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, IntOp>;
3169 // ....with explicit extend (VABAL).
3170 multiclass N3VLIntExtOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
3171 InstrItinClass itin, string OpcodeStr, string Dt,
3172 Intrinsic IntOp, SDNode ExtOp, SDNode OpNode> {
3173 def v8i16 : N3VLIntExtOp<op24, op23, 0b00, op11_8, op4, itin,
3174 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8,
3175 IntOp, ExtOp, OpNode>;
3176 def v4i32 : N3VLIntExtOp<op24, op23, 0b01, op11_8, op4, itin,
3177 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16,
3178 IntOp, ExtOp, OpNode>;
3179 def v2i64 : N3VLIntExtOp<op24, op23, 0b10, op11_8, op4, itin,
3180 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32,
3181 IntOp, ExtOp, OpNode>;
3185 // Neon Pairwise long 2-register intrinsics,
3186 // element sizes of 8, 16 and 32 bits:
3187 multiclass N2VPLInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
3188 bits<5> op11_7, bit op4,
3189 string OpcodeStr, string Dt, Intrinsic IntOp> {
3190 // 64-bit vector types.
3191 def v8i8 : N2VDPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3192 OpcodeStr, !strconcat(Dt, "8"), v4i16, v8i8, IntOp>;
3193 def v4i16 : N2VDPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3194 OpcodeStr, !strconcat(Dt, "16"), v2i32, v4i16, IntOp>;
3195 def v2i32 : N2VDPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3196 OpcodeStr, !strconcat(Dt, "32"), v1i64, v2i32, IntOp>;
3198 // 128-bit vector types.
3199 def v16i8 : N2VQPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3200 OpcodeStr, !strconcat(Dt, "8"), v8i16, v16i8, IntOp>;
3201 def v8i16 : N2VQPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3202 OpcodeStr, !strconcat(Dt, "16"), v4i32, v8i16, IntOp>;
3203 def v4i32 : N2VQPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3204 OpcodeStr, !strconcat(Dt, "32"), v2i64, v4i32, IntOp>;
3208 // Neon Pairwise long 2-register accumulate intrinsics,
3209 // element sizes of 8, 16 and 32 bits:
3210 multiclass N2VPLInt2_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
3211 bits<5> op11_7, bit op4,
3212 string OpcodeStr, string Dt, Intrinsic IntOp> {
3213 // 64-bit vector types.
3214 def v8i8 : N2VDPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3215 OpcodeStr, !strconcat(Dt, "8"), v4i16, v8i8, IntOp>;
3216 def v4i16 : N2VDPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3217 OpcodeStr, !strconcat(Dt, "16"), v2i32, v4i16, IntOp>;
3218 def v2i32 : N2VDPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3219 OpcodeStr, !strconcat(Dt, "32"), v1i64, v2i32, IntOp>;
3221 // 128-bit vector types.
3222 def v16i8 : N2VQPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
3223 OpcodeStr, !strconcat(Dt, "8"), v8i16, v16i8, IntOp>;
3224 def v8i16 : N2VQPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
3225 OpcodeStr, !strconcat(Dt, "16"), v4i32, v8i16, IntOp>;
3226 def v4i32 : N2VQPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
3227 OpcodeStr, !strconcat(Dt, "32"), v2i64, v4i32, IntOp>;
3231 // Neon 2-register vector shift by immediate,
3232 // with f of either N2RegVShLFrm or N2RegVShRFrm
3233 // element sizes of 8, 16, 32 and 64 bits:
3234 multiclass N2VShL_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3235 InstrItinClass itin, string OpcodeStr, string Dt,
3237 // 64-bit vector types.
3238 def v8i8 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3239 OpcodeStr, !strconcat(Dt, "8"), v8i8, OpNode> {
3240 let Inst{21-19} = 0b001; // imm6 = 001xxx
3242 def v4i16 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3243 OpcodeStr, !strconcat(Dt, "16"), v4i16, OpNode> {
3244 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3246 def v2i32 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3247 OpcodeStr, !strconcat(Dt, "32"), v2i32, OpNode> {
3248 let Inst{21} = 0b1; // imm6 = 1xxxxx
3250 def v1i64 : N2VDSh<op24, op23, op11_8, 1, op4, N2RegVShLFrm, itin, i32imm,
3251 OpcodeStr, !strconcat(Dt, "64"), v1i64, OpNode>;
3254 // 128-bit vector types.
3255 def v16i8 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3256 OpcodeStr, !strconcat(Dt, "8"), v16i8, OpNode> {
3257 let Inst{21-19} = 0b001; // imm6 = 001xxx
3259 def v8i16 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3260 OpcodeStr, !strconcat(Dt, "16"), v8i16, OpNode> {
3261 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3263 def v4i32 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShLFrm, itin, i32imm,
3264 OpcodeStr, !strconcat(Dt, "32"), v4i32, OpNode> {
3265 let Inst{21} = 0b1; // imm6 = 1xxxxx
3267 def v2i64 : N2VQSh<op24, op23, op11_8, 1, op4, N2RegVShLFrm, itin, i32imm,
3268 OpcodeStr, !strconcat(Dt, "64"), v2i64, OpNode>;
3271 multiclass N2VShR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3272 InstrItinClass itin, string OpcodeStr, string Dt,
3274 // 64-bit vector types.
3275 def v8i8 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm8,
3276 OpcodeStr, !strconcat(Dt, "8"), v8i8, OpNode> {
3277 let Inst{21-19} = 0b001; // imm6 = 001xxx
3279 def v4i16 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm16,
3280 OpcodeStr, !strconcat(Dt, "16"), v4i16, OpNode> {
3281 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3283 def v2i32 : N2VDSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm32,
3284 OpcodeStr, !strconcat(Dt, "32"), v2i32, OpNode> {
3285 let Inst{21} = 0b1; // imm6 = 1xxxxx
3287 def v1i64 : N2VDSh<op24, op23, op11_8, 1, op4, N2RegVShRFrm, itin, shr_imm64,
3288 OpcodeStr, !strconcat(Dt, "64"), v1i64, OpNode>;
3291 // 128-bit vector types.
3292 def v16i8 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm8,
3293 OpcodeStr, !strconcat(Dt, "8"), v16i8, OpNode> {
3294 let Inst{21-19} = 0b001; // imm6 = 001xxx
3296 def v8i16 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm16,
3297 OpcodeStr, !strconcat(Dt, "16"), v8i16, OpNode> {
3298 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3300 def v4i32 : N2VQSh<op24, op23, op11_8, 0, op4, N2RegVShRFrm, itin, shr_imm32,
3301 OpcodeStr, !strconcat(Dt, "32"), v4i32, OpNode> {
3302 let Inst{21} = 0b1; // imm6 = 1xxxxx
3304 def v2i64 : N2VQSh<op24, op23, op11_8, 1, op4, N2RegVShRFrm, itin, shr_imm64,
3305 OpcodeStr, !strconcat(Dt, "64"), v2i64, OpNode>;
3309 // Neon Shift-Accumulate vector operations,
3310 // element sizes of 8, 16, 32 and 64 bits:
3311 multiclass N2VShAdd_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3312 string OpcodeStr, string Dt, SDNode ShOp> {
3313 // 64-bit vector types.
3314 def v8i8 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm8,
3315 OpcodeStr, !strconcat(Dt, "8"), v8i8, ShOp> {
3316 let Inst{21-19} = 0b001; // imm6 = 001xxx
3318 def v4i16 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm16,
3319 OpcodeStr, !strconcat(Dt, "16"), v4i16, ShOp> {
3320 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3322 def v2i32 : N2VDShAdd<op24, op23, op11_8, 0, op4, shr_imm32,
3323 OpcodeStr, !strconcat(Dt, "32"), v2i32, ShOp> {
3324 let Inst{21} = 0b1; // imm6 = 1xxxxx
3326 def v1i64 : N2VDShAdd<op24, op23, op11_8, 1, op4, shr_imm64,
3327 OpcodeStr, !strconcat(Dt, "64"), v1i64, ShOp>;
3330 // 128-bit vector types.
3331 def v16i8 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm8,
3332 OpcodeStr, !strconcat(Dt, "8"), v16i8, ShOp> {
3333 let Inst{21-19} = 0b001; // imm6 = 001xxx
3335 def v8i16 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm16,
3336 OpcodeStr, !strconcat(Dt, "16"), v8i16, ShOp> {
3337 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3339 def v4i32 : N2VQShAdd<op24, op23, op11_8, 0, op4, shr_imm32,
3340 OpcodeStr, !strconcat(Dt, "32"), v4i32, ShOp> {
3341 let Inst{21} = 0b1; // imm6 = 1xxxxx
3343 def v2i64 : N2VQShAdd<op24, op23, op11_8, 1, op4, shr_imm64,
3344 OpcodeStr, !strconcat(Dt, "64"), v2i64, ShOp>;
3348 // Neon Shift-Insert vector operations,
3349 // with f of either N2RegVShLFrm or N2RegVShRFrm
3350 // element sizes of 8, 16, 32 and 64 bits:
3351 multiclass N2VShInsL_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3353 // 64-bit vector types.
3354 def v8i8 : N2VDShIns<op24, op23, op11_8, 0, op4, i32imm,
3355 N2RegVShLFrm, OpcodeStr, "8", v8i8, NEONvsli> {
3356 let Inst{21-19} = 0b001; // imm6 = 001xxx
3358 def v4i16 : N2VDShIns<op24, op23, op11_8, 0, op4, i32imm,
3359 N2RegVShLFrm, OpcodeStr, "16", v4i16, NEONvsli> {
3360 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3362 def v2i32 : N2VDShIns<op24, op23, op11_8, 0, op4, i32imm,
3363 N2RegVShLFrm, OpcodeStr, "32", v2i32, NEONvsli> {
3364 let Inst{21} = 0b1; // imm6 = 1xxxxx
3366 def v1i64 : N2VDShIns<op24, op23, op11_8, 1, op4, i32imm,
3367 N2RegVShLFrm, OpcodeStr, "64", v1i64, NEONvsli>;
3370 // 128-bit vector types.
3371 def v16i8 : N2VQShIns<op24, op23, op11_8, 0, op4, i32imm,
3372 N2RegVShLFrm, OpcodeStr, "8", v16i8, NEONvsli> {
3373 let Inst{21-19} = 0b001; // imm6 = 001xxx
3375 def v8i16 : N2VQShIns<op24, op23, op11_8, 0, op4, i32imm,
3376 N2RegVShLFrm, OpcodeStr, "16", v8i16, NEONvsli> {
3377 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3379 def v4i32 : N2VQShIns<op24, op23, op11_8, 0, op4, i32imm,
3380 N2RegVShLFrm, OpcodeStr, "32", v4i32, NEONvsli> {
3381 let Inst{21} = 0b1; // imm6 = 1xxxxx
3383 def v2i64 : N2VQShIns<op24, op23, op11_8, 1, op4, i32imm,
3384 N2RegVShLFrm, OpcodeStr, "64", v2i64, NEONvsli>;
3387 multiclass N2VShInsR_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
3389 // 64-bit vector types.
3390 def v8i8 : N2VDShIns<op24, op23, op11_8, 0, op4, shr_imm8,
3391 N2RegVShRFrm, OpcodeStr, "8", v8i8, NEONvsri> {
3392 let Inst{21-19} = 0b001; // imm6 = 001xxx
3394 def v4i16 : N2VDShIns<op24, op23, op11_8, 0, op4, shr_imm16,
3395 N2RegVShRFrm, OpcodeStr, "16", v4i16, NEONvsri> {
3396 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3398 def v2i32 : N2VDShIns<op24, op23, op11_8, 0, op4, shr_imm32,
3399 N2RegVShRFrm, OpcodeStr, "32", v2i32, NEONvsri> {
3400 let Inst{21} = 0b1; // imm6 = 1xxxxx
3402 def v1i64 : N2VDShIns<op24, op23, op11_8, 1, op4, shr_imm64,
3403 N2RegVShRFrm, OpcodeStr, "64", v1i64, NEONvsri>;
3406 // 128-bit vector types.
3407 def v16i8 : N2VQShIns<op24, op23, op11_8, 0, op4, shr_imm8,
3408 N2RegVShRFrm, OpcodeStr, "8", v16i8, NEONvsri> {
3409 let Inst{21-19} = 0b001; // imm6 = 001xxx
3411 def v8i16 : N2VQShIns<op24, op23, op11_8, 0, op4, shr_imm16,
3412 N2RegVShRFrm, OpcodeStr, "16", v8i16, NEONvsri> {
3413 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3415 def v4i32 : N2VQShIns<op24, op23, op11_8, 0, op4, shr_imm32,
3416 N2RegVShRFrm, OpcodeStr, "32", v4i32, NEONvsri> {
3417 let Inst{21} = 0b1; // imm6 = 1xxxxx
3419 def v2i64 : N2VQShIns<op24, op23, op11_8, 1, op4, shr_imm64,
3420 N2RegVShRFrm, OpcodeStr, "64", v2i64, NEONvsri>;
3424 // Neon Shift Long operations,
3425 // element sizes of 8, 16, 32 bits:
3426 multiclass N2VLSh_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
3427 bit op4, string OpcodeStr, string Dt, SDNode OpNode> {
3428 def v8i16 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
3429 OpcodeStr, !strconcat(Dt, "8"), v8i16, v8i8, OpNode> {
3430 let Inst{21-19} = 0b001; // imm6 = 001xxx
3432 def v4i32 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
3433 OpcodeStr, !strconcat(Dt, "16"), v4i32, v4i16, OpNode> {
3434 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3436 def v2i64 : N2VLSh<op24, op23, op11_8, op7, op6, op4,
3437 OpcodeStr, !strconcat(Dt, "32"), v2i64, v2i32, OpNode> {
3438 let Inst{21} = 0b1; // imm6 = 1xxxxx
3442 // Neon Shift Narrow operations,
3443 // element sizes of 16, 32, 64 bits:
3444 multiclass N2VNSh_HSD<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
3445 bit op4, InstrItinClass itin, string OpcodeStr, string Dt,
3447 def v8i8 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
3448 OpcodeStr, !strconcat(Dt, "16"),
3449 v8i8, v8i16, shr_imm8, OpNode> {
3450 let Inst{21-19} = 0b001; // imm6 = 001xxx
3452 def v4i16 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
3453 OpcodeStr, !strconcat(Dt, "32"),
3454 v4i16, v4i32, shr_imm16, OpNode> {
3455 let Inst{21-20} = 0b01; // imm6 = 01xxxx
3457 def v2i32 : N2VNSh<op24, op23, op11_8, op7, op6, op4, itin,
3458 OpcodeStr, !strconcat(Dt, "64"),
3459 v2i32, v2i64, shr_imm32, OpNode> {
3460 let Inst{21} = 0b1; // imm6 = 1xxxxx
3464 //===----------------------------------------------------------------------===//
3465 // Instruction Definitions.
3466 //===----------------------------------------------------------------------===//
3468 // Vector Add Operations.
3470 // VADD : Vector Add (integer and floating-point)
3471 defm VADD : N3V_QHSD<0, 0, 0b1000, 0, IIC_VBINiD, IIC_VBINiQ, "vadd", "i",
3473 def VADDfd : N3VD<0, 0, 0b00, 0b1101, 0, IIC_VBIND, "vadd", "f32",
3474 v2f32, v2f32, fadd, 1>;
3475 def VADDfq : N3VQ<0, 0, 0b00, 0b1101, 0, IIC_VBINQ, "vadd", "f32",
3476 v4f32, v4f32, fadd, 1>;
3477 // VADDL : Vector Add Long (Q = D + D)
3478 defm VADDLs : N3VLExt_QHS<0,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD,
3479 "vaddl", "s", add, sext, 1>;
3480 defm VADDLu : N3VLExt_QHS<1,1,0b0000,0, IIC_VSHLiD, IIC_VSHLiD,
3481 "vaddl", "u", add, zext, 1>;
3482 // VADDW : Vector Add Wide (Q = Q + D)
3483 defm VADDWs : N3VW_QHS<0,1,0b0001,0, "vaddw", "s", add, sext, 0>;
3484 defm VADDWu : N3VW_QHS<1,1,0b0001,0, "vaddw", "u", add, zext, 0>;
3485 // VHADD : Vector Halving Add
3486 defm VHADDs : N3VInt_QHS<0, 0, 0b0000, 0, N3RegFrm,
3487 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3488 "vhadd", "s", int_arm_neon_vhadds, 1>;
3489 defm VHADDu : N3VInt_QHS<1, 0, 0b0000, 0, N3RegFrm,
3490 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3491 "vhadd", "u", int_arm_neon_vhaddu, 1>;
3492 // VRHADD : Vector Rounding Halving Add
3493 defm VRHADDs : N3VInt_QHS<0, 0, 0b0001, 0, N3RegFrm,
3494 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3495 "vrhadd", "s", int_arm_neon_vrhadds, 1>;
3496 defm VRHADDu : N3VInt_QHS<1, 0, 0b0001, 0, N3RegFrm,
3497 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3498 "vrhadd", "u", int_arm_neon_vrhaddu, 1>;
3499 // VQADD : Vector Saturating Add
3500 defm VQADDs : N3VInt_QHSD<0, 0, 0b0000, 1, N3RegFrm,
3501 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3502 "vqadd", "s", int_arm_neon_vqadds, 1>;
3503 defm VQADDu : N3VInt_QHSD<1, 0, 0b0000, 1, N3RegFrm,
3504 IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q, IIC_VBINi4Q,
3505 "vqadd", "u", int_arm_neon_vqaddu, 1>;
3506 // VADDHN : Vector Add and Narrow Returning High Half (D = Q + Q)
3507 defm VADDHN : N3VNInt_HSD<0,1,0b0100,0, "vaddhn", "i",
3508 int_arm_neon_vaddhn, 1>;
3509 // VRADDHN : Vector Rounding Add and Narrow Returning High Half (D = Q + Q)
3510 defm VRADDHN : N3VNInt_HSD<1,1,0b0100,0, "vraddhn", "i",
3511 int_arm_neon_vraddhn, 1>;
3513 // Vector Multiply Operations.
3515 // VMUL : Vector Multiply (integer, polynomial and floating-point)
3516 defm VMUL : N3V_QHS<0, 0, 0b1001, 1, IIC_VMULi16D, IIC_VMULi32D,
3517 IIC_VMULi16Q, IIC_VMULi32Q, "vmul", "i", mul, 1>;
3518 def VMULpd : N3VDInt<1, 0, 0b00, 0b1001, 1, N3RegFrm, IIC_VMULi16D, "vmul",
3519 "p8", v8i8, v8i8, int_arm_neon_vmulp, 1>;
3520 def VMULpq : N3VQInt<1, 0, 0b00, 0b1001, 1, N3RegFrm, IIC_VMULi16Q, "vmul",
3521 "p8", v16i8, v16i8, int_arm_neon_vmulp, 1>;
3522 def VMULfd : N3VD<1, 0, 0b00, 0b1101, 1, IIC_VFMULD, "vmul", "f32",
3523 v2f32, v2f32, fmul, 1>;
3524 def VMULfq : N3VQ<1, 0, 0b00, 0b1101, 1, IIC_VFMULQ, "vmul", "f32",
3525 v4f32, v4f32, fmul, 1>;
3526 defm VMULsl : N3VSL_HS<0b1000, "vmul", "i", mul>;
3527 def VMULslfd : N3VDSL<0b10, 0b1001, IIC_VBIND, "vmul", "f32", v2f32, fmul>;
3528 def VMULslfq : N3VQSL<0b10, 0b1001, IIC_VBINQ, "vmul", "f32", v4f32,
3531 def : Pat<(v8i16 (mul (v8i16 QPR:$src1),
3532 (v8i16 (NEONvduplane (v8i16 QPR:$src2), imm:$lane)))),
3533 (v8i16 (VMULslv8i16 (v8i16 QPR:$src1),
3534 (v4i16 (EXTRACT_SUBREG QPR:$src2,
3535 (DSubReg_i16_reg imm:$lane))),
3536 (SubReg_i16_lane imm:$lane)))>;
3537 def : Pat<(v4i32 (mul (v4i32 QPR:$src1),
3538 (v4i32 (NEONvduplane (v4i32 QPR:$src2), imm:$lane)))),
3539 (v4i32 (VMULslv4i32 (v4i32 QPR:$src1),
3540 (v2i32 (EXTRACT_SUBREG QPR:$src2,
3541 (DSubReg_i32_reg imm:$lane))),
3542 (SubReg_i32_lane imm:$lane)))>;
3543 def : Pat<(v4f32 (fmul (v4f32 QPR:$src1),
3544 (v4f32 (NEONvduplane (v4f32 QPR:$src2), imm:$lane)))),
3545 (v4f32 (VMULslfq (v4f32 QPR:$src1),
3546 (v2f32 (EXTRACT_SUBREG QPR:$src2,
3547 (DSubReg_i32_reg imm:$lane))),
3548 (SubReg_i32_lane imm:$lane)))>;
3550 // VQDMULH : Vector Saturating Doubling Multiply Returning High Half
3551 defm VQDMULH : N3VInt_HS<0, 0, 0b1011, 0, N3RegFrm, IIC_VMULi16D, IIC_VMULi32D,
3552 IIC_VMULi16Q, IIC_VMULi32Q,
3553 "vqdmulh", "s", int_arm_neon_vqdmulh, 1>;
3554 defm VQDMULHsl: N3VIntSL_HS<0b1100, IIC_VMULi16D, IIC_VMULi32D,
3555 IIC_VMULi16Q, IIC_VMULi32Q,
3556 "vqdmulh", "s", int_arm_neon_vqdmulh>;
3557 def : Pat<(v8i16 (int_arm_neon_vqdmulh (v8i16 QPR:$src1),
3558 (v8i16 (NEONvduplane (v8i16 QPR:$src2),
3560 (v8i16 (VQDMULHslv8i16 (v8i16 QPR:$src1),
3561 (v4i16 (EXTRACT_SUBREG QPR:$src2,
3562 (DSubReg_i16_reg imm:$lane))),
3563 (SubReg_i16_lane imm:$lane)))>;
3564 def : Pat<(v4i32 (int_arm_neon_vqdmulh (v4i32 QPR:$src1),
3565 (v4i32 (NEONvduplane (v4i32 QPR:$src2),
3567 (v4i32 (VQDMULHslv4i32 (v4i32 QPR:$src1),
3568 (v2i32 (EXTRACT_SUBREG QPR:$src2,
3569 (DSubReg_i32_reg imm:$lane))),
3570 (SubReg_i32_lane imm:$lane)))>;
3572 // VQRDMULH : Vector Rounding Saturating Doubling Multiply Returning High Half
3573 defm VQRDMULH : N3VInt_HS<1, 0, 0b1011, 0, N3RegFrm,
3574 IIC_VMULi16D,IIC_VMULi32D,IIC_VMULi16Q,IIC_VMULi32Q,
3575 "vqrdmulh", "s", int_arm_neon_vqrdmulh, 1>;
3576 defm VQRDMULHsl : N3VIntSL_HS<0b1101, IIC_VMULi16D, IIC_VMULi32D,
3577 IIC_VMULi16Q, IIC_VMULi32Q,
3578 "vqrdmulh", "s", int_arm_neon_vqrdmulh>;
3579 def : Pat<(v8i16 (int_arm_neon_vqrdmulh (v8i16 QPR:$src1),
3580 (v8i16 (NEONvduplane (v8i16 QPR:$src2),
3582 (v8i16 (VQRDMULHslv8i16 (v8i16 QPR:$src1),
3583 (v4i16 (EXTRACT_SUBREG QPR:$src2,
3584 (DSubReg_i16_reg imm:$lane))),
3585 (SubReg_i16_lane imm:$lane)))>;
3586 def : Pat<(v4i32 (int_arm_neon_vqrdmulh (v4i32 QPR:$src1),
3587 (v4i32 (NEONvduplane (v4i32 QPR:$src2),
3589 (v4i32 (VQRDMULHslv4i32 (v4i32 QPR:$src1),
3590 (v2i32 (EXTRACT_SUBREG QPR:$src2,
3591 (DSubReg_i32_reg imm:$lane))),
3592 (SubReg_i32_lane imm:$lane)))>;
3594 // VMULL : Vector Multiply Long (integer and polynomial) (Q = D * D)
3595 defm VMULLs : N3VL_QHS<0,1,0b1100,0, IIC_VMULi16D, IIC_VMULi32D,
3596 "vmull", "s", NEONvmulls, 1>;
3597 defm VMULLu : N3VL_QHS<1,1,0b1100,0, IIC_VMULi16D, IIC_VMULi32D,
3598 "vmull", "u", NEONvmullu, 1>;
3599 def VMULLp : N3VLInt<0, 1, 0b00, 0b1110, 0, IIC_VMULi16D, "vmull", "p8",
3600 v8i16, v8i8, int_arm_neon_vmullp, 1>;
3601 defm VMULLsls : N3VLSL_HS<0, 0b1010, IIC_VMULi16D, "vmull", "s", NEONvmulls>;
3602 defm VMULLslu : N3VLSL_HS<1, 0b1010, IIC_VMULi16D, "vmull", "u", NEONvmullu>;
3604 // VQDMULL : Vector Saturating Doubling Multiply Long (Q = D * D)
3605 defm VQDMULL : N3VLInt_HS<0,1,0b1101,0, IIC_VMULi16D, IIC_VMULi32D,
3606 "vqdmull", "s", int_arm_neon_vqdmull, 1>;
3607 defm VQDMULLsl: N3VLIntSL_HS<0, 0b1011, IIC_VMULi16D,
3608 "vqdmull", "s", int_arm_neon_vqdmull>;
3610 // Vector Multiply-Accumulate and Multiply-Subtract Operations.
3612 // VMLA : Vector Multiply Accumulate (integer and floating-point)
3613 defm VMLA : N3VMulOp_QHS<0, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
3614 IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
3615 def VMLAfd : N3VDMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACD, "vmla", "f32",
3616 v2f32, fmul_su, fadd_mlx>,
3617 Requires<[HasNEON, UseFPVMLx]>;
3618 def VMLAfq : N3VQMulOp<0, 0, 0b00, 0b1101, 1, IIC_VMACQ, "vmla", "f32",
3619 v4f32, fmul_su, fadd_mlx>,
3620 Requires<[HasNEON, UseFPVMLx]>;
3621 defm VMLAsl : N3VMulOpSL_HS<0b0000, IIC_VMACi16D, IIC_VMACi32D,
3622 IIC_VMACi16Q, IIC_VMACi32Q, "vmla", "i", add>;
3623 def VMLAslfd : N3VDMulOpSL<0b10, 0b0001, IIC_VMACD, "vmla", "f32",
3624 v2f32, fmul_su, fadd_mlx>,
3625 Requires<[HasNEON, UseFPVMLx]>;
3626 def VMLAslfq : N3VQMulOpSL<0b10, 0b0001, IIC_VMACQ, "vmla", "f32",
3627 v4f32, v2f32, fmul_su, fadd_mlx>,
3628 Requires<[HasNEON, UseFPVMLx]>;
3630 def : Pat<(v8i16 (add (v8i16 QPR:$src1),
3631 (mul (v8i16 QPR:$src2),
3632 (v8i16 (NEONvduplane (v8i16 QPR:$src3), imm:$lane))))),
3633 (v8i16 (VMLAslv8i16 (v8i16 QPR:$src1), (v8i16 QPR:$src2),
3634 (v4i16 (EXTRACT_SUBREG QPR:$src3,
3635 (DSubReg_i16_reg imm:$lane))),
3636 (SubReg_i16_lane imm:$lane)))>;
3638 def : Pat<(v4i32 (add (v4i32 QPR:$src1),
3639 (mul (v4i32 QPR:$src2),
3640 (v4i32 (NEONvduplane (v4i32 QPR:$src3), imm:$lane))))),
3641 (v4i32 (VMLAslv4i32 (v4i32 QPR:$src1), (v4i32 QPR:$src2),
3642 (v2i32 (EXTRACT_SUBREG QPR:$src3,
3643 (DSubReg_i32_reg imm:$lane))),
3644 (SubReg_i32_lane imm:$lane)))>;
3646 def : Pat<(v4f32 (fadd_mlx (v4f32 QPR:$src1),
3647 (fmul_su (v4f32 QPR:$src2),
3648 (v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
3649 (v4f32 (VMLAslfq (v4f32 QPR:$src1),
3651 (v2f32 (EXTRACT_SUBREG QPR:$src3,
3652 (DSubReg_i32_reg imm:$lane))),
3653 (SubReg_i32_lane imm:$lane)))>,
3654 Requires<[HasNEON, UseFPVMLx]>;
3656 // VMLAL : Vector Multiply Accumulate Long (Q += D * D)
3657 defm VMLALs : N3VLMulOp_QHS<0,1,0b1000,0, IIC_VMACi16D, IIC_VMACi32D,
3658 "vmlal", "s", NEONvmulls, add>;
3659 defm VMLALu : N3VLMulOp_QHS<1,1,0b1000,0, IIC_VMACi16D, IIC_VMACi32D,
3660 "vmlal", "u", NEONvmullu, add>;
3662 defm VMLALsls : N3VLMulOpSL_HS<0, 0b0010, "vmlal", "s", NEONvmulls, add>;
3663 defm VMLALslu : N3VLMulOpSL_HS<1, 0b0010, "vmlal", "u", NEONvmullu, add>;
3665 // VQDMLAL : Vector Saturating Doubling Multiply Accumulate Long (Q += D * D)
3666 defm VQDMLAL : N3VLInt3_HS<0, 1, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
3667 "vqdmlal", "s", int_arm_neon_vqdmlal>;
3668 defm VQDMLALsl: N3VLInt3SL_HS<0, 0b0011, "vqdmlal", "s", int_arm_neon_vqdmlal>;
3670 // VMLS : Vector Multiply Subtract (integer and floating-point)
3671 defm VMLS : N3VMulOp_QHS<1, 0, 0b1001, 0, IIC_VMACi16D, IIC_VMACi32D,
3672 IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
3673 def VMLSfd : N3VDMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACD, "vmls", "f32",
3674 v2f32, fmul_su, fsub_mlx>,
3675 Requires<[HasNEON, UseFPVMLx]>;
3676 def VMLSfq : N3VQMulOp<0, 0, 0b10, 0b1101, 1, IIC_VMACQ, "vmls", "f32",
3677 v4f32, fmul_su, fsub_mlx>,
3678 Requires<[HasNEON, UseFPVMLx]>;
3679 defm VMLSsl : N3VMulOpSL_HS<0b0100, IIC_VMACi16D, IIC_VMACi32D,
3680 IIC_VMACi16Q, IIC_VMACi32Q, "vmls", "i", sub>;
3681 def VMLSslfd : N3VDMulOpSL<0b10, 0b0101, IIC_VMACD, "vmls", "f32",
3682 v2f32, fmul_su, fsub_mlx>,
3683 Requires<[HasNEON, UseFPVMLx]>;
3684 def VMLSslfq : N3VQMulOpSL<0b10, 0b0101, IIC_VMACQ, "vmls", "f32",
3685 v4f32, v2f32, fmul_su, fsub_mlx>,
3686 Requires<[HasNEON, UseFPVMLx]>;
3688 def : Pat<(v8i16 (sub (v8i16 QPR:$src1),
3689 (mul (v8i16 QPR:$src2),
3690 (v8i16 (NEONvduplane (v8i16 QPR:$src3), imm:$lane))))),
3691 (v8i16 (VMLSslv8i16 (v8i16 QPR:$src1), (v8i16 QPR:$src2),
3692 (v4i16 (EXTRACT_SUBREG QPR:$src3,
3693 (DSubReg_i16_reg imm:$lane))),
3694 (SubReg_i16_lane imm:$lane)))>;
3696 def : Pat<(v4i32 (sub (v4i32 QPR:$src1),
3697 (mul (v4i32 QPR:$src2),
3698 (v4i32 (NEONvduplane (v4i32 QPR:$src3), imm:$lane))))),
3699 (v4i32 (VMLSslv4i32 (v4i32 QPR:$src1), (v4i32 QPR:$src2),
3700 (v2i32 (EXTRACT_SUBREG QPR:$src3,
3701 (DSubReg_i32_reg imm:$lane))),
3702 (SubReg_i32_lane imm:$lane)))>;
3704 def : Pat<(v4f32 (fsub_mlx (v4f32 QPR:$src1),
3705 (fmul_su (v4f32 QPR:$src2),
3706 (v4f32 (NEONvduplane (v4f32 QPR:$src3), imm:$lane))))),
3707 (v4f32 (VMLSslfq (v4f32 QPR:$src1), (v4f32 QPR:$src2),
3708 (v2f32 (EXTRACT_SUBREG QPR:$src3,
3709 (DSubReg_i32_reg imm:$lane))),
3710 (SubReg_i32_lane imm:$lane)))>,
3711 Requires<[HasNEON, UseFPVMLx]>;
3713 // VMLSL : Vector Multiply Subtract Long (Q -= D * D)
3714 defm VMLSLs : N3VLMulOp_QHS<0,1,0b1010,0, IIC_VMACi16D, IIC_VMACi32D,
3715 "vmlsl", "s", NEONvmulls, sub>;
3716 defm VMLSLu : N3VLMulOp_QHS<1,1,0b1010,0, IIC_VMACi16D, IIC_VMACi32D,
3717 "vmlsl", "u", NEONvmullu, sub>;
3719 defm VMLSLsls : N3VLMulOpSL_HS<0, 0b0110, "vmlsl", "s", NEONvmulls, sub>;
3720 defm VMLSLslu : N3VLMulOpSL_HS<1, 0b0110, "vmlsl", "u", NEONvmullu, sub>;
3722 // VQDMLSL : Vector Saturating Doubling Multiply Subtract Long (Q -= D * D)
3723 defm VQDMLSL : N3VLInt3_HS<0, 1, 0b1011, 0, IIC_VMACi16D, IIC_VMACi32D,
3724 "vqdmlsl", "s", int_arm_neon_vqdmlsl>;
3725 defm VQDMLSLsl: N3VLInt3SL_HS<0, 0b111, "vqdmlsl", "s", int_arm_neon_vqdmlsl>;
3727 // Vector Subtract Operations.
3729 // VSUB : Vector Subtract (integer and floating-point)
3730 defm VSUB : N3V_QHSD<1, 0, 0b1000, 0, IIC_VSUBiD, IIC_VSUBiQ,
3731 "vsub", "i", sub, 0>;
3732 def VSUBfd : N3VD<0, 0, 0b10, 0b1101, 0, IIC_VBIND, "vsub", "f32",
3733 v2f32, v2f32, fsub, 0>;
3734 def VSUBfq : N3VQ<0, 0, 0b10, 0b1101, 0, IIC_VBINQ, "vsub", "f32",
3735 v4f32, v4f32, fsub, 0>;
3736 // VSUBL : Vector Subtract Long (Q = D - D)
3737 defm VSUBLs : N3VLExt_QHS<0,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD,
3738 "vsubl", "s", sub, sext, 0>;
3739 defm VSUBLu : N3VLExt_QHS<1,1,0b0010,0, IIC_VSHLiD, IIC_VSHLiD,
3740 "vsubl", "u", sub, zext, 0>;
3741 // VSUBW : Vector Subtract Wide (Q = Q - D)
3742 defm VSUBWs : N3VW_QHS<0,1,0b0011,0, "vsubw", "s", sub, sext, 0>;
3743 defm VSUBWu : N3VW_QHS<1,1,0b0011,0, "vsubw", "u", sub, zext, 0>;
3744 // VHSUB : Vector Halving Subtract
3745 defm VHSUBs : N3VInt_QHS<0, 0, 0b0010, 0, N3RegFrm,
3746 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3747 "vhsub", "s", int_arm_neon_vhsubs, 0>;
3748 defm VHSUBu : N3VInt_QHS<1, 0, 0b0010, 0, N3RegFrm,
3749 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3750 "vhsub", "u", int_arm_neon_vhsubu, 0>;
3751 // VQSUB : Vector Saturing Subtract
3752 defm VQSUBs : N3VInt_QHSD<0, 0, 0b0010, 1, N3RegFrm,
3753 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3754 "vqsub", "s", int_arm_neon_vqsubs, 0>;
3755 defm VQSUBu : N3VInt_QHSD<1, 0, 0b0010, 1, N3RegFrm,
3756 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
3757 "vqsub", "u", int_arm_neon_vqsubu, 0>;
3758 // VSUBHN : Vector Subtract and Narrow Returning High Half (D = Q - Q)
3759 defm VSUBHN : N3VNInt_HSD<0,1,0b0110,0, "vsubhn", "i",
3760 int_arm_neon_vsubhn, 0>;
3761 // VRSUBHN : Vector Rounding Subtract and Narrow Returning High Half (D=Q-Q)
3762 defm VRSUBHN : N3VNInt_HSD<1,1,0b0110,0, "vrsubhn", "i",
3763 int_arm_neon_vrsubhn, 0>;
3765 // Vector Comparisons.
3767 // VCEQ : Vector Compare Equal
3768 defm VCEQ : N3V_QHS<1, 0, 0b1000, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3769 IIC_VSUBi4Q, "vceq", "i", NEONvceq, 1>;
3770 def VCEQfd : N3VD<0,0,0b00,0b1110,0, IIC_VBIND, "vceq", "f32", v2i32, v2f32,
3772 def VCEQfq : N3VQ<0,0,0b00,0b1110,0, IIC_VBINQ, "vceq", "f32", v4i32, v4f32,
3775 defm VCEQz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00010, 0, "vceq", "i",
3776 "$Vd, $Vm, #0", NEONvceqz>;
3778 // VCGE : Vector Compare Greater Than or Equal
3779 defm VCGEs : N3V_QHS<0, 0, 0b0011, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3780 IIC_VSUBi4Q, "vcge", "s", NEONvcge, 0>;
3781 defm VCGEu : N3V_QHS<1, 0, 0b0011, 1, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3782 IIC_VSUBi4Q, "vcge", "u", NEONvcgeu, 0>;
3783 def VCGEfd : N3VD<1,0,0b00,0b1110,0, IIC_VBIND, "vcge", "f32", v2i32, v2f32,
3785 def VCGEfq : N3VQ<1,0,0b00,0b1110,0, IIC_VBINQ, "vcge", "f32", v4i32, v4f32,
3788 defm VCGEz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00001, 0, "vcge", "s",
3789 "$Vd, $Vm, #0", NEONvcgez>;
3790 defm VCLEz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00011, 0, "vcle", "s",
3791 "$Vd, $Vm, #0", NEONvclez>;
3793 // VCGT : Vector Compare Greater Than
3794 defm VCGTs : N3V_QHS<0, 0, 0b0011, 0, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3795 IIC_VSUBi4Q, "vcgt", "s", NEONvcgt, 0>;
3796 defm VCGTu : N3V_QHS<1, 0, 0b0011, 0, IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q,
3797 IIC_VSUBi4Q, "vcgt", "u", NEONvcgtu, 0>;
3798 def VCGTfd : N3VD<1,0,0b10,0b1110,0, IIC_VBIND, "vcgt", "f32", v2i32, v2f32,
3800 def VCGTfq : N3VQ<1,0,0b10,0b1110,0, IIC_VBINQ, "vcgt", "f32", v4i32, v4f32,
3803 defm VCGTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00000, 0, "vcgt", "s",
3804 "$Vd, $Vm, #0", NEONvcgtz>;
3805 defm VCLTz : N2V_QHS_cmp<0b11, 0b11, 0b01, 0b00100, 0, "vclt", "s",
3806 "$Vd, $Vm, #0", NEONvcltz>;
3808 // VACGE : Vector Absolute Compare Greater Than or Equal (aka VCAGE)
3809 def VACGEd : N3VDInt<1, 0, 0b00, 0b1110, 1, N3RegFrm, IIC_VBIND, "vacge",
3810 "f32", v2i32, v2f32, int_arm_neon_vacged, 0>;
3811 def VACGEq : N3VQInt<1, 0, 0b00, 0b1110, 1, N3RegFrm, IIC_VBINQ, "vacge",
3812 "f32", v4i32, v4f32, int_arm_neon_vacgeq, 0>;
3813 // VACGT : Vector Absolute Compare Greater Than (aka VCAGT)
3814 def VACGTd : N3VDInt<1, 0, 0b10, 0b1110, 1, N3RegFrm, IIC_VBIND, "vacgt",
3815 "f32", v2i32, v2f32, int_arm_neon_vacgtd, 0>;
3816 def VACGTq : N3VQInt<1, 0, 0b10, 0b1110, 1, N3RegFrm, IIC_VBINQ, "vacgt",
3817 "f32", v4i32, v4f32, int_arm_neon_vacgtq, 0>;
3818 // VTST : Vector Test Bits
3819 defm VTST : N3V_QHS<0, 0, 0b1000, 1, IIC_VBINi4D, IIC_VBINi4D, IIC_VBINi4Q,
3820 IIC_VBINi4Q, "vtst", "", NEONvtst, 1>;
3822 // Vector Bitwise Operations.
3824 def vnotd : PatFrag<(ops node:$in),
3825 (xor node:$in, (bitconvert (v8i8 NEONimmAllOnesV)))>;
3826 def vnotq : PatFrag<(ops node:$in),
3827 (xor node:$in, (bitconvert (v16i8 NEONimmAllOnesV)))>;
3830 // VAND : Vector Bitwise AND
3831 def VANDd : N3VDX<0, 0, 0b00, 0b0001, 1, IIC_VBINiD, "vand",
3832 v2i32, v2i32, and, 1>;
3833 def VANDq : N3VQX<0, 0, 0b00, 0b0001, 1, IIC_VBINiQ, "vand",
3834 v4i32, v4i32, and, 1>;
3836 // VEOR : Vector Bitwise Exclusive OR
3837 def VEORd : N3VDX<1, 0, 0b00, 0b0001, 1, IIC_VBINiD, "veor",
3838 v2i32, v2i32, xor, 1>;
3839 def VEORq : N3VQX<1, 0, 0b00, 0b0001, 1, IIC_VBINiQ, "veor",
3840 v4i32, v4i32, xor, 1>;
3842 // VORR : Vector Bitwise OR
3843 def VORRd : N3VDX<0, 0, 0b10, 0b0001, 1, IIC_VBINiD, "vorr",
3844 v2i32, v2i32, or, 1>;
3845 def VORRq : N3VQX<0, 0, 0b10, 0b0001, 1, IIC_VBINiQ, "vorr",
3846 v4i32, v4i32, or, 1>;
3848 def VORRiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 0, 1,
3849 (outs DPR:$Vd), (ins nImmSplatI16:$SIMM, DPR:$src),
3851 "vorr", "i16", "$Vd, $SIMM", "$src = $Vd",
3853 (v4i16 (NEONvorrImm DPR:$src, timm:$SIMM)))]> {
3854 let Inst{9} = SIMM{9};
3857 def VORRiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 0, 1,
3858 (outs DPR:$Vd), (ins nImmSplatI32:$SIMM, DPR:$src),
3860 "vorr", "i32", "$Vd, $SIMM", "$src = $Vd",
3862 (v2i32 (NEONvorrImm DPR:$src, timm:$SIMM)))]> {
3863 let Inst{10-9} = SIMM{10-9};
3866 def VORRiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 0, 1,
3867 (outs QPR:$Vd), (ins nImmSplatI16:$SIMM, QPR:$src),
3869 "vorr", "i16", "$Vd, $SIMM", "$src = $Vd",
3871 (v8i16 (NEONvorrImm QPR:$src, timm:$SIMM)))]> {
3872 let Inst{9} = SIMM{9};
3875 def VORRiv4i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 1, 0, 1,
3876 (outs QPR:$Vd), (ins nImmSplatI32:$SIMM, QPR:$src),
3878 "vorr", "i32", "$Vd, $SIMM", "$src = $Vd",
3880 (v4i32 (NEONvorrImm QPR:$src, timm:$SIMM)))]> {
3881 let Inst{10-9} = SIMM{10-9};
3885 // VBIC : Vector Bitwise Bit Clear (AND NOT)
3886 def VBICd : N3VX<0, 0, 0b01, 0b0001, 0, 1, (outs DPR:$Vd),
3887 (ins DPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VBINiD,
3888 "vbic", "$Vd, $Vn, $Vm", "",
3889 [(set DPR:$Vd, (v2i32 (and DPR:$Vn,
3890 (vnotd DPR:$Vm))))]>;
3891 def VBICq : N3VX<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
3892 (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINiQ,
3893 "vbic", "$Vd, $Vn, $Vm", "",
3894 [(set QPR:$Vd, (v4i32 (and QPR:$Vn,
3895 (vnotq QPR:$Vm))))]>;
3897 def VBICiv4i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 0, 1, 1,
3898 (outs DPR:$Vd), (ins nImmSplatI16:$SIMM, DPR:$src),
3900 "vbic", "i16", "$Vd, $SIMM", "$src = $Vd",
3902 (v4i16 (NEONvbicImm DPR:$src, timm:$SIMM)))]> {
3903 let Inst{9} = SIMM{9};
3906 def VBICiv2i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 0, 1, 1,
3907 (outs DPR:$Vd), (ins nImmSplatI32:$SIMM, DPR:$src),
3909 "vbic", "i32", "$Vd, $SIMM", "$src = $Vd",
3911 (v2i32 (NEONvbicImm DPR:$src, timm:$SIMM)))]> {
3912 let Inst{10-9} = SIMM{10-9};
3915 def VBICiv8i16 : N1ModImm<1, 0b000, {1,0,?,1}, 0, 1, 1, 1,
3916 (outs QPR:$Vd), (ins nImmSplatI16:$SIMM, QPR:$src),
3918 "vbic", "i16", "$Vd, $SIMM", "$src = $Vd",
3920 (v8i16 (NEONvbicImm QPR:$src, timm:$SIMM)))]> {
3921 let Inst{9} = SIMM{9};
3924 def VBICiv4i32 : N1ModImm<1, 0b000, {0,?,?,1}, 0, 1, 1, 1,
3925 (outs QPR:$Vd), (ins nImmSplatI32:$SIMM, QPR:$src),
3927 "vbic", "i32", "$Vd, $SIMM", "$src = $Vd",
3929 (v4i32 (NEONvbicImm QPR:$src, timm:$SIMM)))]> {
3930 let Inst{10-9} = SIMM{10-9};
3933 // VORN : Vector Bitwise OR NOT
3934 def VORNd : N3VX<0, 0, 0b11, 0b0001, 0, 1, (outs DPR:$Vd),
3935 (ins DPR:$Vn, DPR:$Vm), N3RegFrm, IIC_VBINiD,
3936 "vorn", "$Vd, $Vn, $Vm", "",
3937 [(set DPR:$Vd, (v2i32 (or DPR:$Vn,
3938 (vnotd DPR:$Vm))))]>;
3939 def VORNq : N3VX<0, 0, 0b11, 0b0001, 1, 1, (outs QPR:$Vd),
3940 (ins QPR:$Vn, QPR:$Vm), N3RegFrm, IIC_VBINiQ,
3941 "vorn", "$Vd, $Vn, $Vm", "",
3942 [(set QPR:$Vd, (v4i32 (or QPR:$Vn,
3943 (vnotq QPR:$Vm))))]>;
3945 // VMVN : Vector Bitwise NOT (Immediate)
3947 let isReMaterializable = 1 in {
3949 def VMVNv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 1, 1, (outs DPR:$Vd),
3950 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
3951 "vmvn", "i16", "$Vd, $SIMM", "",
3952 [(set DPR:$Vd, (v4i16 (NEONvmvnImm timm:$SIMM)))]> {
3953 let Inst{9} = SIMM{9};
3956 def VMVNv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 1, 1, (outs QPR:$Vd),
3957 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
3958 "vmvn", "i16", "$Vd, $SIMM", "",
3959 [(set QPR:$Vd, (v8i16 (NEONvmvnImm timm:$SIMM)))]> {
3960 let Inst{9} = SIMM{9};
3963 def VMVNv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 1, 1, (outs DPR:$Vd),
3964 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
3965 "vmvn", "i32", "$Vd, $SIMM", "",
3966 [(set DPR:$Vd, (v2i32 (NEONvmvnImm timm:$SIMM)))]> {
3967 let Inst{11-8} = SIMM{11-8};
3970 def VMVNv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 1, 1, (outs QPR:$Vd),
3971 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
3972 "vmvn", "i32", "$Vd, $SIMM", "",
3973 [(set QPR:$Vd, (v4i32 (NEONvmvnImm timm:$SIMM)))]> {
3974 let Inst{11-8} = SIMM{11-8};
3978 // VMVN : Vector Bitwise NOT
3979 def VMVNd : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 0, 0,
3980 (outs DPR:$Vd), (ins DPR:$Vm), IIC_VSUBiD,
3981 "vmvn", "$Vd, $Vm", "",
3982 [(set DPR:$Vd, (v2i32 (vnotd DPR:$Vm)))]>;
3983 def VMVNq : N2VX<0b11, 0b11, 0b00, 0b00, 0b01011, 1, 0,
3984 (outs QPR:$Vd), (ins QPR:$Vm), IIC_VSUBiD,
3985 "vmvn", "$Vd, $Vm", "",
3986 [(set QPR:$Vd, (v4i32 (vnotq QPR:$Vm)))]>;
3987 def : Pat<(v2i32 (vnotd DPR:$src)), (VMVNd DPR:$src)>;
3988 def : Pat<(v4i32 (vnotq QPR:$src)), (VMVNq QPR:$src)>;
3990 // VBSL : Vector Bitwise Select
3991 def VBSLd : N3VX<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$Vd),
3992 (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
3993 N3RegFrm, IIC_VCNTiD,
3994 "vbsl", "$Vd, $Vn, $Vm", "$src1 = $Vd",
3996 (v2i32 (NEONvbsl DPR:$src1, DPR:$Vn, DPR:$Vm)))]>;
3998 def : Pat<(v2i32 (or (and DPR:$Vn, DPR:$Vd),
3999 (and DPR:$Vm, (vnotd DPR:$Vd)))),
4000 (VBSLd DPR:$Vd, DPR:$Vn, DPR:$Vm)>;
4002 def VBSLq : N3VX<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$Vd),
4003 (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
4004 N3RegFrm, IIC_VCNTiQ,
4005 "vbsl", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4007 (v4i32 (NEONvbsl QPR:$src1, QPR:$Vn, QPR:$Vm)))]>;
4009 def : Pat<(v4i32 (or (and QPR:$Vn, QPR:$Vd),
4010 (and QPR:$Vm, (vnotq QPR:$Vd)))),
4011 (VBSLq QPR:$Vd, QPR:$Vn, QPR:$Vm)>;
4013 // VBIF : Vector Bitwise Insert if False
4014 // like VBSL but with: "vbif $dst, $src3, $src1", "$src2 = $dst",
4015 // FIXME: This instruction's encoding MAY NOT BE correct.
4016 def VBIFd : N3VX<1, 0, 0b11, 0b0001, 0, 1,
4017 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
4018 N3RegFrm, IIC_VBINiD,
4019 "vbif", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4021 def VBIFq : N3VX<1, 0, 0b11, 0b0001, 1, 1,
4022 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
4023 N3RegFrm, IIC_VBINiQ,
4024 "vbif", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4027 // VBIT : Vector Bitwise Insert if True
4028 // like VBSL but with: "vbit $dst, $src2, $src1", "$src3 = $dst",
4029 // FIXME: This instruction's encoding MAY NOT BE correct.
4030 def VBITd : N3VX<1, 0, 0b10, 0b0001, 0, 1,
4031 (outs DPR:$Vd), (ins DPR:$src1, DPR:$Vn, DPR:$Vm),
4032 N3RegFrm, IIC_VBINiD,
4033 "vbit", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4035 def VBITq : N3VX<1, 0, 0b10, 0b0001, 1, 1,
4036 (outs QPR:$Vd), (ins QPR:$src1, QPR:$Vn, QPR:$Vm),
4037 N3RegFrm, IIC_VBINiQ,
4038 "vbit", "$Vd, $Vn, $Vm", "$src1 = $Vd",
4041 // VBIT/VBIF are not yet implemented. The TwoAddress pass will not go looking
4042 // for equivalent operations with different register constraints; it just
4045 // Vector Absolute Differences.
4047 // VABD : Vector Absolute Difference
4048 defm VABDs : N3VInt_QHS<0, 0, 0b0111, 0, N3RegFrm,
4049 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4050 "vabd", "s", int_arm_neon_vabds, 1>;
4051 defm VABDu : N3VInt_QHS<1, 0, 0b0111, 0, N3RegFrm,
4052 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4053 "vabd", "u", int_arm_neon_vabdu, 1>;
4054 def VABDfd : N3VDInt<1, 0, 0b10, 0b1101, 0, N3RegFrm, IIC_VBIND,
4055 "vabd", "f32", v2f32, v2f32, int_arm_neon_vabds, 1>;
4056 def VABDfq : N3VQInt<1, 0, 0b10, 0b1101, 0, N3RegFrm, IIC_VBINQ,
4057 "vabd", "f32", v4f32, v4f32, int_arm_neon_vabds, 1>;
4059 // VABDL : Vector Absolute Difference Long (Q = | D - D |)
4060 defm VABDLs : N3VLIntExt_QHS<0,1,0b0111,0, IIC_VSUBi4Q,
4061 "vabdl", "s", int_arm_neon_vabds, zext, 1>;
4062 defm VABDLu : N3VLIntExt_QHS<1,1,0b0111,0, IIC_VSUBi4Q,
4063 "vabdl", "u", int_arm_neon_vabdu, zext, 1>;
4065 // VABA : Vector Absolute Difference and Accumulate
4066 defm VABAs : N3VIntOp_QHS<0,0,0b0111,1, IIC_VABAD, IIC_VABAQ,
4067 "vaba", "s", int_arm_neon_vabds, add>;
4068 defm VABAu : N3VIntOp_QHS<1,0,0b0111,1, IIC_VABAD, IIC_VABAQ,
4069 "vaba", "u", int_arm_neon_vabdu, add>;
4071 // VABAL : Vector Absolute Difference and Accumulate Long (Q += | D - D |)
4072 defm VABALs : N3VLIntExtOp_QHS<0,1,0b0101,0, IIC_VABAD,
4073 "vabal", "s", int_arm_neon_vabds, zext, add>;
4074 defm VABALu : N3VLIntExtOp_QHS<1,1,0b0101,0, IIC_VABAD,
4075 "vabal", "u", int_arm_neon_vabdu, zext, add>;
4077 // Vector Maximum and Minimum.
4079 // VMAX : Vector Maximum
4080 defm VMAXs : N3VInt_QHS<0, 0, 0b0110, 0, N3RegFrm,
4081 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4082 "vmax", "s", int_arm_neon_vmaxs, 1>;
4083 defm VMAXu : N3VInt_QHS<1, 0, 0b0110, 0, N3RegFrm,
4084 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4085 "vmax", "u", int_arm_neon_vmaxu, 1>;
4086 def VMAXfd : N3VDInt<0, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VBIND,
4088 v2f32, v2f32, int_arm_neon_vmaxs, 1>;
4089 def VMAXfq : N3VQInt<0, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VBINQ,
4091 v4f32, v4f32, int_arm_neon_vmaxs, 1>;
4093 // VMIN : Vector Minimum
4094 defm VMINs : N3VInt_QHS<0, 0, 0b0110, 1, N3RegFrm,
4095 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4096 "vmin", "s", int_arm_neon_vmins, 1>;
4097 defm VMINu : N3VInt_QHS<1, 0, 0b0110, 1, N3RegFrm,
4098 IIC_VSUBi4D, IIC_VSUBi4D, IIC_VSUBi4Q, IIC_VSUBi4Q,
4099 "vmin", "u", int_arm_neon_vminu, 1>;
4100 def VMINfd : N3VDInt<0, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VBIND,
4102 v2f32, v2f32, int_arm_neon_vmins, 1>;
4103 def VMINfq : N3VQInt<0, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VBINQ,
4105 v4f32, v4f32, int_arm_neon_vmins, 1>;
4107 // Vector Pairwise Operations.
4109 // VPADD : Vector Pairwise Add
4110 def VPADDi8 : N3VDInt<0, 0, 0b00, 0b1011, 1, N3RegFrm, IIC_VSHLiD,
4112 v8i8, v8i8, int_arm_neon_vpadd, 0>;
4113 def VPADDi16 : N3VDInt<0, 0, 0b01, 0b1011, 1, N3RegFrm, IIC_VSHLiD,
4115 v4i16, v4i16, int_arm_neon_vpadd, 0>;
4116 def VPADDi32 : N3VDInt<0, 0, 0b10, 0b1011, 1, N3RegFrm, IIC_VSHLiD,
4118 v2i32, v2i32, int_arm_neon_vpadd, 0>;
4119 def VPADDf : N3VDInt<1, 0, 0b00, 0b1101, 0, N3RegFrm,
4120 IIC_VPBIND, "vpadd", "f32",
4121 v2f32, v2f32, int_arm_neon_vpadd, 0>;
4123 // VPADDL : Vector Pairwise Add Long
4124 defm VPADDLs : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00100, 0, "vpaddl", "s",
4125 int_arm_neon_vpaddls>;
4126 defm VPADDLu : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00101, 0, "vpaddl", "u",
4127 int_arm_neon_vpaddlu>;
4129 // VPADAL : Vector Pairwise Add and Accumulate Long
4130 defm VPADALs : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b01100, 0, "vpadal", "s",
4131 int_arm_neon_vpadals>;
4132 defm VPADALu : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b01101, 0, "vpadal", "u",
4133 int_arm_neon_vpadalu>;
4135 // VPMAX : Vector Pairwise Maximum
4136 def VPMAXs8 : N3VDInt<0, 0, 0b00, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4137 "s8", v8i8, v8i8, int_arm_neon_vpmaxs, 0>;
4138 def VPMAXs16 : N3VDInt<0, 0, 0b01, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4139 "s16", v4i16, v4i16, int_arm_neon_vpmaxs, 0>;
4140 def VPMAXs32 : N3VDInt<0, 0, 0b10, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4141 "s32", v2i32, v2i32, int_arm_neon_vpmaxs, 0>;
4142 def VPMAXu8 : N3VDInt<1, 0, 0b00, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4143 "u8", v8i8, v8i8, int_arm_neon_vpmaxu, 0>;
4144 def VPMAXu16 : N3VDInt<1, 0, 0b01, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4145 "u16", v4i16, v4i16, int_arm_neon_vpmaxu, 0>;
4146 def VPMAXu32 : N3VDInt<1, 0, 0b10, 0b1010, 0, N3RegFrm, IIC_VSUBi4D, "vpmax",
4147 "u32", v2i32, v2i32, int_arm_neon_vpmaxu, 0>;
4148 def VPMAXf : N3VDInt<1, 0, 0b00, 0b1111, 0, N3RegFrm, IIC_VPBIND, "vpmax",
4149 "f32", v2f32, v2f32, int_arm_neon_vpmaxs, 0>;
4151 // VPMIN : Vector Pairwise Minimum
4152 def VPMINs8 : N3VDInt<0, 0, 0b00, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4153 "s8", v8i8, v8i8, int_arm_neon_vpmins, 0>;
4154 def VPMINs16 : N3VDInt<0, 0, 0b01, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4155 "s16", v4i16, v4i16, int_arm_neon_vpmins, 0>;
4156 def VPMINs32 : N3VDInt<0, 0, 0b10, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4157 "s32", v2i32, v2i32, int_arm_neon_vpmins, 0>;
4158 def VPMINu8 : N3VDInt<1, 0, 0b00, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4159 "u8", v8i8, v8i8, int_arm_neon_vpminu, 0>;
4160 def VPMINu16 : N3VDInt<1, 0, 0b01, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4161 "u16", v4i16, v4i16, int_arm_neon_vpminu, 0>;
4162 def VPMINu32 : N3VDInt<1, 0, 0b10, 0b1010, 1, N3RegFrm, IIC_VSUBi4D, "vpmin",
4163 "u32", v2i32, v2i32, int_arm_neon_vpminu, 0>;
4164 def VPMINf : N3VDInt<1, 0, 0b10, 0b1111, 0, N3RegFrm, IIC_VPBIND, "vpmin",
4165 "f32", v2f32, v2f32, int_arm_neon_vpmins, 0>;
4167 // Vector Reciprocal and Reciprocal Square Root Estimate and Step.
4169 // VRECPE : Vector Reciprocal Estimate
4170 def VRECPEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
4171 IIC_VUNAD, "vrecpe", "u32",
4172 v2i32, v2i32, int_arm_neon_vrecpe>;
4173 def VRECPEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0,
4174 IIC_VUNAQ, "vrecpe", "u32",
4175 v4i32, v4i32, int_arm_neon_vrecpe>;
4176 def VRECPEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0,
4177 IIC_VUNAD, "vrecpe", "f32",
4178 v2f32, v2f32, int_arm_neon_vrecpe>;
4179 def VRECPEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0,
4180 IIC_VUNAQ, "vrecpe", "f32",
4181 v4f32, v4f32, int_arm_neon_vrecpe>;
4183 // VRECPS : Vector Reciprocal Step
4184 def VRECPSfd : N3VDInt<0, 0, 0b00, 0b1111, 1, N3RegFrm,
4185 IIC_VRECSD, "vrecps", "f32",
4186 v2f32, v2f32, int_arm_neon_vrecps, 1>;
4187 def VRECPSfq : N3VQInt<0, 0, 0b00, 0b1111, 1, N3RegFrm,
4188 IIC_VRECSQ, "vrecps", "f32",
4189 v4f32, v4f32, int_arm_neon_vrecps, 1>;
4191 // VRSQRTE : Vector Reciprocal Square Root Estimate
4192 def VRSQRTEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0,
4193 IIC_VUNAD, "vrsqrte", "u32",
4194 v2i32, v2i32, int_arm_neon_vrsqrte>;
4195 def VRSQRTEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0,
4196 IIC_VUNAQ, "vrsqrte", "u32",
4197 v4i32, v4i32, int_arm_neon_vrsqrte>;
4198 def VRSQRTEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0,
4199 IIC_VUNAD, "vrsqrte", "f32",
4200 v2f32, v2f32, int_arm_neon_vrsqrte>;
4201 def VRSQRTEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0,
4202 IIC_VUNAQ, "vrsqrte", "f32",
4203 v4f32, v4f32, int_arm_neon_vrsqrte>;
4205 // VRSQRTS : Vector Reciprocal Square Root Step
4206 def VRSQRTSfd : N3VDInt<0, 0, 0b10, 0b1111, 1, N3RegFrm,
4207 IIC_VRECSD, "vrsqrts", "f32",
4208 v2f32, v2f32, int_arm_neon_vrsqrts, 1>;
4209 def VRSQRTSfq : N3VQInt<0, 0, 0b10, 0b1111, 1, N3RegFrm,
4210 IIC_VRECSQ, "vrsqrts", "f32",
4211 v4f32, v4f32, int_arm_neon_vrsqrts, 1>;
4215 // VSHL : Vector Shift
4216 defm VSHLs : N3VInt_QHSDSh<0, 0, 0b0100, 0, N3RegVShFrm,
4217 IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ, IIC_VSHLiQ,
4218 "vshl", "s", int_arm_neon_vshifts>;
4219 defm VSHLu : N3VInt_QHSDSh<1, 0, 0b0100, 0, N3RegVShFrm,
4220 IIC_VSHLiD, IIC_VSHLiD, IIC_VSHLiQ, IIC_VSHLiQ,
4221 "vshl", "u", int_arm_neon_vshiftu>;
4223 // VSHL : Vector Shift Left (Immediate)
4224 defm VSHLi : N2VShL_QHSD<0, 1, 0b0101, 1, IIC_VSHLiD, "vshl", "i", NEONvshl>;
4226 // VSHR : Vector Shift Right (Immediate)
4227 defm VSHRs : N2VShR_QHSD<0, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "s",NEONvshrs>;
4228 defm VSHRu : N2VShR_QHSD<1, 1, 0b0000, 1, IIC_VSHLiD, "vshr", "u",NEONvshru>;
4230 // VSHLL : Vector Shift Left Long
4231 defm VSHLLs : N2VLSh_QHS<0, 1, 0b1010, 0, 0, 1, "vshll", "s", NEONvshlls>;
4232 defm VSHLLu : N2VLSh_QHS<1, 1, 0b1010, 0, 0, 1, "vshll", "u", NEONvshllu>;
4234 // VSHLL : Vector Shift Left Long (with maximum shift count)
4235 class N2VLShMax<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
4236 bit op6, bit op4, string OpcodeStr, string Dt, ValueType ResTy,
4237 ValueType OpTy, SDNode OpNode>
4238 : N2VLSh<op24, op23, op11_8, op7, op6, op4, OpcodeStr, Dt,
4239 ResTy, OpTy, OpNode> {
4240 let Inst{21-16} = op21_16;
4241 let DecoderMethod = "DecodeVSHLMaxInstruction";
4243 def VSHLLi8 : N2VLShMax<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll", "i8",
4244 v8i16, v8i8, NEONvshlli>;
4245 def VSHLLi16 : N2VLShMax<1, 1, 0b110110, 0b0011, 0, 0, 0, "vshll", "i16",
4246 v4i32, v4i16, NEONvshlli>;
4247 def VSHLLi32 : N2VLShMax<1, 1, 0b111010, 0b0011, 0, 0, 0, "vshll", "i32",
4248 v2i64, v2i32, NEONvshlli>;
4250 // VSHRN : Vector Shift Right and Narrow
4251 defm VSHRN : N2VNSh_HSD<0,1,0b1000,0,0,1, IIC_VSHLiD, "vshrn", "i",
4254 // VRSHL : Vector Rounding Shift
4255 defm VRSHLs : N3VInt_QHSDSh<0, 0, 0b0101, 0, N3RegVShFrm,
4256 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4257 "vrshl", "s", int_arm_neon_vrshifts>;
4258 defm VRSHLu : N3VInt_QHSDSh<1, 0, 0b0101, 0, N3RegVShFrm,
4259 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4260 "vrshl", "u", int_arm_neon_vrshiftu>;
4261 // VRSHR : Vector Rounding Shift Right
4262 defm VRSHRs : N2VShR_QHSD<0,1,0b0010,1, IIC_VSHLi4D, "vrshr", "s",NEONvrshrs>;
4263 defm VRSHRu : N2VShR_QHSD<1,1,0b0010,1, IIC_VSHLi4D, "vrshr", "u",NEONvrshru>;
4265 // VRSHRN : Vector Rounding Shift Right and Narrow
4266 defm VRSHRN : N2VNSh_HSD<0, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vrshrn", "i",
4269 // VQSHL : Vector Saturating Shift
4270 defm VQSHLs : N3VInt_QHSDSh<0, 0, 0b0100, 1, N3RegVShFrm,
4271 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4272 "vqshl", "s", int_arm_neon_vqshifts>;
4273 defm VQSHLu : N3VInt_QHSDSh<1, 0, 0b0100, 1, N3RegVShFrm,
4274 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4275 "vqshl", "u", int_arm_neon_vqshiftu>;
4276 // VQSHL : Vector Saturating Shift Left (Immediate)
4277 defm VQSHLsi : N2VShL_QHSD<0,1,0b0111,1, IIC_VSHLi4D, "vqshl", "s",NEONvqshls>;
4278 defm VQSHLui : N2VShL_QHSD<1,1,0b0111,1, IIC_VSHLi4D, "vqshl", "u",NEONvqshlu>;
4280 // VQSHLU : Vector Saturating Shift Left (Immediate, Unsigned)
4281 defm VQSHLsu : N2VShL_QHSD<1,1,0b0110,1, IIC_VSHLi4D,"vqshlu","s",NEONvqshlsu>;
4283 // VQSHRN : Vector Saturating Shift Right and Narrow
4284 defm VQSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 0, 1, IIC_VSHLi4D, "vqshrn", "s",
4286 defm VQSHRNu : N2VNSh_HSD<1, 1, 0b1001, 0, 0, 1, IIC_VSHLi4D, "vqshrn", "u",
4289 // VQSHRUN : Vector Saturating Shift Right and Narrow (Unsigned)
4290 defm VQSHRUN : N2VNSh_HSD<1, 1, 0b1000, 0, 0, 1, IIC_VSHLi4D, "vqshrun", "s",
4293 // VQRSHL : Vector Saturating Rounding Shift
4294 defm VQRSHLs : N3VInt_QHSDSh<0, 0, 0b0101, 1, N3RegVShFrm,
4295 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4296 "vqrshl", "s", int_arm_neon_vqrshifts>;
4297 defm VQRSHLu : N3VInt_QHSDSh<1, 0, 0b0101, 1, N3RegVShFrm,
4298 IIC_VSHLi4D, IIC_VSHLi4D, IIC_VSHLi4Q, IIC_VSHLi4Q,
4299 "vqrshl", "u", int_arm_neon_vqrshiftu>;
4301 // VQRSHRN : Vector Saturating Rounding Shift Right and Narrow
4302 defm VQRSHRNs : N2VNSh_HSD<0, 1, 0b1001, 0, 1, 1, IIC_VSHLi4D, "vqrshrn", "s",
4304 defm VQRSHRNu : N2VNSh_HSD<1, 1, 0b1001, 0, 1, 1, IIC_VSHLi4D, "vqrshrn", "u",
4307 // VQRSHRUN : Vector Saturating Rounding Shift Right and Narrow (Unsigned)
4308 defm VQRSHRUN : N2VNSh_HSD<1, 1, 0b1000, 0, 1, 1, IIC_VSHLi4D, "vqrshrun", "s",
4311 // VSRA : Vector Shift Right and Accumulate
4312 defm VSRAs : N2VShAdd_QHSD<0, 1, 0b0001, 1, "vsra", "s", NEONvshrs>;
4313 defm VSRAu : N2VShAdd_QHSD<1, 1, 0b0001, 1, "vsra", "u", NEONvshru>;
4314 // VRSRA : Vector Rounding Shift Right and Accumulate
4315 defm VRSRAs : N2VShAdd_QHSD<0, 1, 0b0011, 1, "vrsra", "s", NEONvrshrs>;
4316 defm VRSRAu : N2VShAdd_QHSD<1, 1, 0b0011, 1, "vrsra", "u", NEONvrshru>;
4318 // VSLI : Vector Shift Left and Insert
4319 defm VSLI : N2VShInsL_QHSD<1, 1, 0b0101, 1, "vsli">;
4321 // VSRI : Vector Shift Right and Insert
4322 defm VSRI : N2VShInsR_QHSD<1, 1, 0b0100, 1, "vsri">;
4324 // Vector Absolute and Saturating Absolute.
4326 // VABS : Vector Absolute Value
4327 defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0,
4328 IIC_VUNAiD, IIC_VUNAiQ, "vabs", "s",
4330 def VABSfd : N2VDInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
4331 IIC_VUNAD, "vabs", "f32",
4332 v2f32, v2f32, int_arm_neon_vabs>;
4333 def VABSfq : N2VQInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0,
4334 IIC_VUNAQ, "vabs", "f32",
4335 v4f32, v4f32, int_arm_neon_vabs>;
4337 // VQABS : Vector Saturating Absolute Value
4338 defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0,
4339 IIC_VQUNAiD, IIC_VQUNAiQ, "vqabs", "s",
4340 int_arm_neon_vqabs>;
4344 def vnegd : PatFrag<(ops node:$in),
4345 (sub (bitconvert (v2i32 NEONimmAllZerosV)), node:$in)>;
4346 def vnegq : PatFrag<(ops node:$in),
4347 (sub (bitconvert (v4i32 NEONimmAllZerosV)), node:$in)>;
4349 class VNEGD<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
4350 : N2V<0b11, 0b11, size, 0b01, 0b00111, 0, 0, (outs DPR:$Vd), (ins DPR:$Vm),
4351 IIC_VSHLiD, OpcodeStr, Dt, "$Vd, $Vm", "",
4352 [(set DPR:$Vd, (Ty (vnegd DPR:$Vm)))]>;
4353 class VNEGQ<bits<2> size, string OpcodeStr, string Dt, ValueType Ty>
4354 : N2V<0b11, 0b11, size, 0b01, 0b00111, 1, 0, (outs QPR:$Vd), (ins QPR:$Vm),
4355 IIC_VSHLiQ, OpcodeStr, Dt, "$Vd, $Vm", "",
4356 [(set QPR:$Vd, (Ty (vnegq QPR:$Vm)))]>;
4358 // VNEG : Vector Negate (integer)
4359 def VNEGs8d : VNEGD<0b00, "vneg", "s8", v8i8>;
4360 def VNEGs16d : VNEGD<0b01, "vneg", "s16", v4i16>;
4361 def VNEGs32d : VNEGD<0b10, "vneg", "s32", v2i32>;
4362 def VNEGs8q : VNEGQ<0b00, "vneg", "s8", v16i8>;
4363 def VNEGs16q : VNEGQ<0b01, "vneg", "s16", v8i16>;
4364 def VNEGs32q : VNEGQ<0b10, "vneg", "s32", v4i32>;
4366 // VNEG : Vector Negate (floating-point)
4367 def VNEGfd : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 0, 0,
4368 (outs DPR:$Vd), (ins DPR:$Vm), IIC_VUNAD,
4369 "vneg", "f32", "$Vd, $Vm", "",
4370 [(set DPR:$Vd, (v2f32 (fneg DPR:$Vm)))]>;
4371 def VNEGf32q : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 1, 0,
4372 (outs QPR:$Vd), (ins QPR:$Vm), IIC_VUNAQ,
4373 "vneg", "f32", "$Vd, $Vm", "",
4374 [(set QPR:$Vd, (v4f32 (fneg QPR:$Vm)))]>;
4376 def : Pat<(v8i8 (vnegd DPR:$src)), (VNEGs8d DPR:$src)>;
4377 def : Pat<(v4i16 (vnegd DPR:$src)), (VNEGs16d DPR:$src)>;
4378 def : Pat<(v2i32 (vnegd DPR:$src)), (VNEGs32d DPR:$src)>;
4379 def : Pat<(v16i8 (vnegq QPR:$src)), (VNEGs8q QPR:$src)>;
4380 def : Pat<(v8i16 (vnegq QPR:$src)), (VNEGs16q QPR:$src)>;
4381 def : Pat<(v4i32 (vnegq QPR:$src)), (VNEGs32q QPR:$src)>;
4383 // VQNEG : Vector Saturating Negate
4384 defm VQNEG : N2VInt_QHS<0b11, 0b11, 0b00, 0b01111, 0,
4385 IIC_VQUNAiD, IIC_VQUNAiQ, "vqneg", "s",
4386 int_arm_neon_vqneg>;
4388 // Vector Bit Counting Operations.
4390 // VCLS : Vector Count Leading Sign Bits
4391 defm VCLS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01000, 0,
4392 IIC_VCNTiD, IIC_VCNTiQ, "vcls", "s",
4394 // VCLZ : Vector Count Leading Zeros
4395 defm VCLZ : N2VInt_QHS<0b11, 0b11, 0b00, 0b01001, 0,
4396 IIC_VCNTiD, IIC_VCNTiQ, "vclz", "i",
4398 // VCNT : Vector Count One Bits
4399 def VCNTd : N2VDInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
4400 IIC_VCNTiD, "vcnt", "8",
4401 v8i8, v8i8, int_arm_neon_vcnt>;
4402 def VCNTq : N2VQInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0,
4403 IIC_VCNTiQ, "vcnt", "8",
4404 v16i8, v16i8, int_arm_neon_vcnt>;
4407 def VSWPd : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 0, 0,
4408 (outs DPR:$Vd), (ins DPR:$Vm), NoItinerary,
4409 "vswp", "$Vd, $Vm", "", []>;
4410 def VSWPq : N2VX<0b11, 0b11, 0b00, 0b10, 0b00000, 1, 0,
4411 (outs QPR:$Vd), (ins QPR:$Vm), NoItinerary,
4412 "vswp", "$Vd, $Vm", "", []>;
4414 // Vector Move Operations.
4416 // VMOV : Vector Move (Register)
4417 def : InstAlias<"vmov${p} $Vd, $Vm",
4418 (VORRd DPR:$Vd, DPR:$Vm, DPR:$Vm, pred:$p)>;
4419 def : InstAlias<"vmov${p} $Vd, $Vm",
4420 (VORRq QPR:$Vd, QPR:$Vm, QPR:$Vm, pred:$p)>;
4422 // VMOV : Vector Move (Immediate)
4424 let isReMaterializable = 1 in {
4425 def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$Vd),
4426 (ins nImmSplatI8:$SIMM), IIC_VMOVImm,
4427 "vmov", "i8", "$Vd, $SIMM", "",
4428 [(set DPR:$Vd, (v8i8 (NEONvmovImm timm:$SIMM)))]>;
4429 def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$Vd),
4430 (ins nImmSplatI8:$SIMM), IIC_VMOVImm,
4431 "vmov", "i8", "$Vd, $SIMM", "",
4432 [(set QPR:$Vd, (v16i8 (NEONvmovImm timm:$SIMM)))]>;
4434 def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 0, 1, (outs DPR:$Vd),
4435 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
4436 "vmov", "i16", "$Vd, $SIMM", "",
4437 [(set DPR:$Vd, (v4i16 (NEONvmovImm timm:$SIMM)))]> {
4438 let Inst{9} = SIMM{9};
4441 def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 0, 1, (outs QPR:$Vd),
4442 (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
4443 "vmov", "i16", "$Vd, $SIMM", "",
4444 [(set QPR:$Vd, (v8i16 (NEONvmovImm timm:$SIMM)))]> {
4445 let Inst{9} = SIMM{9};
4448 def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 0, 1, (outs DPR:$Vd),
4449 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
4450 "vmov", "i32", "$Vd, $SIMM", "",
4451 [(set DPR:$Vd, (v2i32 (NEONvmovImm timm:$SIMM)))]> {
4452 let Inst{11-8} = SIMM{11-8};
4455 def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 0, 1, (outs QPR:$Vd),
4456 (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
4457 "vmov", "i32", "$Vd, $SIMM", "",
4458 [(set QPR:$Vd, (v4i32 (NEONvmovImm timm:$SIMM)))]> {
4459 let Inst{11-8} = SIMM{11-8};
4462 def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$Vd),
4463 (ins nImmSplatI64:$SIMM), IIC_VMOVImm,
4464 "vmov", "i64", "$Vd, $SIMM", "",
4465 [(set DPR:$Vd, (v1i64 (NEONvmovImm timm:$SIMM)))]>;
4466 def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$Vd),
4467 (ins nImmSplatI64:$SIMM), IIC_VMOVImm,
4468 "vmov", "i64", "$Vd, $SIMM", "",
4469 [(set QPR:$Vd, (v2i64 (NEONvmovImm timm:$SIMM)))]>;
4470 } // isReMaterializable
4472 // VMOV : Vector Get Lane (move scalar to ARM core register)
4474 def VGETLNs8 : NVGetLane<{1,1,1,0,0,1,?,1}, 0b1011, {?,?},
4475 (outs GPR:$R), (ins DPR:$V, VectorIndex8:$lane),
4476 IIC_VMOVSI, "vmov", "s8", "$R, $V$lane",
4477 [(set GPR:$R, (NEONvgetlanes (v8i8 DPR:$V),
4479 let Inst{21} = lane{2};
4480 let Inst{6-5} = lane{1-0};
4482 def VGETLNs16 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, {?,1},
4483 (outs GPR:$R), (ins DPR:$V, VectorIndex16:$lane),
4484 IIC_VMOVSI, "vmov", "s16", "$R, $V$lane",
4485 [(set GPR:$R, (NEONvgetlanes (v4i16 DPR:$V),
4487 let Inst{21} = lane{1};
4488 let Inst{6} = lane{0};
4490 def VGETLNu8 : NVGetLane<{1,1,1,0,1,1,?,1}, 0b1011, {?,?},
4491 (outs GPR:$R), (ins DPR:$V, VectorIndex8:$lane),
4492 IIC_VMOVSI, "vmov", "u8", "$R, $V$lane",
4493 [(set GPR:$R, (NEONvgetlaneu (v8i8 DPR:$V),
4495 let Inst{21} = lane{2};
4496 let Inst{6-5} = lane{1-0};
4498 def VGETLNu16 : NVGetLane<{1,1,1,0,1,0,?,1}, 0b1011, {?,1},
4499 (outs GPR:$R), (ins DPR:$V, VectorIndex16:$lane),
4500 IIC_VMOVSI, "vmov", "u16", "$R, $V$lane",
4501 [(set GPR:$R, (NEONvgetlaneu (v4i16 DPR:$V),
4503 let Inst{21} = lane{1};
4504 let Inst{6} = lane{0};
4506 def VGETLNi32 : NVGetLane<{1,1,1,0,0,0,?,1}, 0b1011, 0b00,
4507 (outs GPR:$R), (ins DPR:$V, VectorIndex32:$lane),
4508 IIC_VMOVSI, "vmov", "32", "$R, $V$lane",
4509 [(set GPR:$R, (extractelt (v2i32 DPR:$V),
4511 let Inst{21} = lane{0};
4513 // def VGETLNf32: see FMRDH and FMRDL in ARMInstrVFP.td
4514 def : Pat<(NEONvgetlanes (v16i8 QPR:$src), imm:$lane),
4515 (VGETLNs8 (v8i8 (EXTRACT_SUBREG QPR:$src,
4516 (DSubReg_i8_reg imm:$lane))),
4517 (SubReg_i8_lane imm:$lane))>;
4518 def : Pat<(NEONvgetlanes (v8i16 QPR:$src), imm:$lane),
4519 (VGETLNs16 (v4i16 (EXTRACT_SUBREG QPR:$src,
4520 (DSubReg_i16_reg imm:$lane))),
4521 (SubReg_i16_lane imm:$lane))>;
4522 def : Pat<(NEONvgetlaneu (v16i8 QPR:$src), imm:$lane),
4523 (VGETLNu8 (v8i8 (EXTRACT_SUBREG QPR:$src,
4524 (DSubReg_i8_reg imm:$lane))),
4525 (SubReg_i8_lane imm:$lane))>;
4526 def : Pat<(NEONvgetlaneu (v8i16 QPR:$src), imm:$lane),
4527 (VGETLNu16 (v4i16 (EXTRACT_SUBREG QPR:$src,
4528 (DSubReg_i16_reg imm:$lane))),
4529 (SubReg_i16_lane imm:$lane))>;
4530 def : Pat<(extractelt (v4i32 QPR:$src), imm:$lane),
4531 (VGETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src,
4532 (DSubReg_i32_reg imm:$lane))),
4533 (SubReg_i32_lane imm:$lane))>;
4534 def : Pat<(extractelt (v2f32 DPR:$src1), imm:$src2),
4535 (EXTRACT_SUBREG (v2f32 (COPY_TO_REGCLASS (v2f32 DPR:$src1),DPR_VFP2)),
4536 (SSubReg_f32_reg imm:$src2))>;
4537 def : Pat<(extractelt (v4f32 QPR:$src1), imm:$src2),
4538 (EXTRACT_SUBREG (v4f32 (COPY_TO_REGCLASS (v4f32 QPR:$src1),QPR_VFP2)),
4539 (SSubReg_f32_reg imm:$src2))>;
4540 //def : Pat<(extractelt (v2i64 QPR:$src1), imm:$src2),
4541 // (EXTRACT_SUBREG QPR:$src1, (DSubReg_f64_reg imm:$src2))>;
4542 def : Pat<(extractelt (v2f64 QPR:$src1), imm:$src2),
4543 (EXTRACT_SUBREG QPR:$src1, (DSubReg_f64_reg imm:$src2))>;
4546 // VMOV : Vector Set Lane (move ARM core register to scalar)
4548 let Constraints = "$src1 = $V" in {
4549 def VSETLNi8 : NVSetLane<{1,1,1,0,0,1,?,0}, 0b1011, {?,?}, (outs DPR:$V),
4550 (ins DPR:$src1, GPR:$R, VectorIndex8:$lane),
4551 IIC_VMOVISL, "vmov", "8", "$V$lane, $R",
4552 [(set DPR:$V, (vector_insert (v8i8 DPR:$src1),
4553 GPR:$R, imm:$lane))]> {
4554 let Inst{21} = lane{2};
4555 let Inst{6-5} = lane{1-0};
4557 def VSETLNi16 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, {?,1}, (outs DPR:$V),
4558 (ins DPR:$src1, GPR:$R, VectorIndex16:$lane),
4559 IIC_VMOVISL, "vmov", "16", "$V$lane, $R",
4560 [(set DPR:$V, (vector_insert (v4i16 DPR:$src1),
4561 GPR:$R, imm:$lane))]> {
4562 let Inst{21} = lane{1};
4563 let Inst{6} = lane{0};
4565 def VSETLNi32 : NVSetLane<{1,1,1,0,0,0,?,0}, 0b1011, 0b00, (outs DPR:$V),
4566 (ins DPR:$src1, GPR:$R, VectorIndex32:$lane),
4567 IIC_VMOVISL, "vmov", "32", "$V$lane, $R",
4568 [(set DPR:$V, (insertelt (v2i32 DPR:$src1),
4569 GPR:$R, imm:$lane))]> {
4570 let Inst{21} = lane{0};
4573 def : Pat<(vector_insert (v16i8 QPR:$src1), GPR:$src2, imm:$lane),
4574 (v16i8 (INSERT_SUBREG QPR:$src1,
4575 (v8i8 (VSETLNi8 (v8i8 (EXTRACT_SUBREG QPR:$src1,
4576 (DSubReg_i8_reg imm:$lane))),
4577 GPR:$src2, (SubReg_i8_lane imm:$lane))),
4578 (DSubReg_i8_reg imm:$lane)))>;
4579 def : Pat<(vector_insert (v8i16 QPR:$src1), GPR:$src2, imm:$lane),
4580 (v8i16 (INSERT_SUBREG QPR:$src1,
4581 (v4i16 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1,
4582 (DSubReg_i16_reg imm:$lane))),
4583 GPR:$src2, (SubReg_i16_lane imm:$lane))),
4584 (DSubReg_i16_reg imm:$lane)))>;
4585 def : Pat<(insertelt (v4i32 QPR:$src1), GPR:$src2, imm:$lane),
4586 (v4i32 (INSERT_SUBREG QPR:$src1,
4587 (v2i32 (VSETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src1,
4588 (DSubReg_i32_reg imm:$lane))),
4589 GPR:$src2, (SubReg_i32_lane imm:$lane))),
4590 (DSubReg_i32_reg imm:$lane)))>;
4592 def : Pat<(v2f32 (insertelt DPR:$src1, SPR:$src2, imm:$src3)),
4593 (INSERT_SUBREG (v2f32 (COPY_TO_REGCLASS DPR:$src1, DPR_VFP2)),
4594 SPR:$src2, (SSubReg_f32_reg imm:$src3))>;
4595 def : Pat<(v4f32 (insertelt QPR:$src1, SPR:$src2, imm:$src3)),
4596 (INSERT_SUBREG (v4f32 (COPY_TO_REGCLASS QPR:$src1, QPR_VFP2)),
4597 SPR:$src2, (SSubReg_f32_reg imm:$src3))>;
4599 //def : Pat<(v2i64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
4600 // (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>;
4601 def : Pat<(v2f64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
4602 (INSERT_SUBREG QPR:$src1, DPR:$src2, (DSubReg_f64_reg imm:$src3))>;
4604 def : Pat<(v2f32 (scalar_to_vector SPR:$src)),
4605 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), SPR:$src, ssub_0)>;
4606 def : Pat<(v2f64 (scalar_to_vector (f64 DPR:$src))),
4607 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), DPR:$src, dsub_0)>;
4608 def : Pat<(v4f32 (scalar_to_vector SPR:$src)),
4609 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), SPR:$src, ssub_0)>;
4611 def : Pat<(v8i8 (scalar_to_vector GPR:$src)),
4612 (VSETLNi8 (v8i8 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
4613 def : Pat<(v4i16 (scalar_to_vector GPR:$src)),
4614 (VSETLNi16 (v4i16 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
4615 def : Pat<(v2i32 (scalar_to_vector GPR:$src)),
4616 (VSETLNi32 (v2i32 (IMPLICIT_DEF)), GPR:$src, (i32 0))>;
4618 def : Pat<(v16i8 (scalar_to_vector GPR:$src)),
4619 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4620 (VSETLNi8 (v8i8 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
4622 def : Pat<(v8i16 (scalar_to_vector GPR:$src)),
4623 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
4624 (VSETLNi16 (v4i16 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
4626 def : Pat<(v4i32 (scalar_to_vector GPR:$src)),
4627 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4628 (VSETLNi32 (v2i32 (IMPLICIT_DEF)), GPR:$src, (i32 0)),
4631 // VDUP : Vector Duplicate (from ARM core register to all elements)
4633 class VDUPD<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
4634 : NVDup<opcod1, 0b1011, opcod3, (outs DPR:$V), (ins GPR:$R),
4635 IIC_VMOVIS, "vdup", Dt, "$V, $R",
4636 [(set DPR:$V, (Ty (NEONvdup (i32 GPR:$R))))]>;
4637 class VDUPQ<bits<8> opcod1, bits<2> opcod3, string Dt, ValueType Ty>
4638 : NVDup<opcod1, 0b1011, opcod3, (outs QPR:$V), (ins GPR:$R),
4639 IIC_VMOVIS, "vdup", Dt, "$V, $R",
4640 [(set QPR:$V, (Ty (NEONvdup (i32 GPR:$R))))]>;
4642 def VDUP8d : VDUPD<0b11101100, 0b00, "8", v8i8>;
4643 def VDUP16d : VDUPD<0b11101000, 0b01, "16", v4i16>;
4644 def VDUP32d : VDUPD<0b11101000, 0b00, "32", v2i32>;
4645 def VDUP8q : VDUPQ<0b11101110, 0b00, "8", v16i8>;
4646 def VDUP16q : VDUPQ<0b11101010, 0b01, "16", v8i16>;
4647 def VDUP32q : VDUPQ<0b11101010, 0b00, "32", v4i32>;
4649 def : Pat<(v2f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32d GPR:$R)>;
4650 def : Pat<(v4f32 (NEONvdup (f32 (bitconvert GPR:$R)))), (VDUP32q GPR:$R)>;
4652 // VDUP : Vector Duplicate Lane (from scalar to all elements)
4654 class VDUPLND<bits<4> op19_16, string OpcodeStr, string Dt,
4655 ValueType Ty, Operand IdxTy>
4656 : NVDupLane<op19_16, 0, (outs DPR:$Vd), (ins DPR:$Vm, IdxTy:$lane),
4657 IIC_VMOVD, OpcodeStr, Dt, "$Vd, $Vm$lane",
4658 [(set DPR:$Vd, (Ty (NEONvduplane (Ty DPR:$Vm), imm:$lane)))]>;
4660 class VDUPLNQ<bits<4> op19_16, string OpcodeStr, string Dt,
4661 ValueType ResTy, ValueType OpTy, Operand IdxTy>
4662 : NVDupLane<op19_16, 1, (outs QPR:$Vd), (ins DPR:$Vm, IdxTy:$lane),
4663 IIC_VMOVQ, OpcodeStr, Dt, "$Vd, $Vm$lane",
4664 [(set QPR:$Vd, (ResTy (NEONvduplane (OpTy DPR:$Vm),
4665 VectorIndex32:$lane)))]>;
4667 // Inst{19-16} is partially specified depending on the element size.
4669 def VDUPLN8d : VDUPLND<{?,?,?,1}, "vdup", "8", v8i8, VectorIndex8> {
4671 let Inst{19-17} = lane{2-0};
4673 def VDUPLN16d : VDUPLND<{?,?,1,0}, "vdup", "16", v4i16, VectorIndex16> {
4675 let Inst{19-18} = lane{1-0};
4677 def VDUPLN32d : VDUPLND<{?,1,0,0}, "vdup", "32", v2i32, VectorIndex32> {
4679 let Inst{19} = lane{0};
4681 def VDUPLN8q : VDUPLNQ<{?,?,?,1}, "vdup", "8", v16i8, v8i8, VectorIndex8> {
4683 let Inst{19-17} = lane{2-0};
4685 def VDUPLN16q : VDUPLNQ<{?,?,1,0}, "vdup", "16", v8i16, v4i16, VectorIndex16> {
4687 let Inst{19-18} = lane{1-0};
4689 def VDUPLN32q : VDUPLNQ<{?,1,0,0}, "vdup", "32", v4i32, v2i32, VectorIndex32> {
4691 let Inst{19} = lane{0};
4694 def : Pat<(v2f32 (NEONvduplane (v2f32 DPR:$Vm), imm:$lane)),
4695 (VDUPLN32d DPR:$Vm, imm:$lane)>;
4697 def : Pat<(v4f32 (NEONvduplane (v2f32 DPR:$Vm), imm:$lane)),
4698 (VDUPLN32q DPR:$Vm, imm:$lane)>;
4700 def : Pat<(v16i8 (NEONvduplane (v16i8 QPR:$src), imm:$lane)),
4701 (v16i8 (VDUPLN8q (v8i8 (EXTRACT_SUBREG QPR:$src,
4702 (DSubReg_i8_reg imm:$lane))),
4703 (SubReg_i8_lane imm:$lane)))>;
4704 def : Pat<(v8i16 (NEONvduplane (v8i16 QPR:$src), imm:$lane)),
4705 (v8i16 (VDUPLN16q (v4i16 (EXTRACT_SUBREG QPR:$src,
4706 (DSubReg_i16_reg imm:$lane))),
4707 (SubReg_i16_lane imm:$lane)))>;
4708 def : Pat<(v4i32 (NEONvduplane (v4i32 QPR:$src), imm:$lane)),
4709 (v4i32 (VDUPLN32q (v2i32 (EXTRACT_SUBREG QPR:$src,
4710 (DSubReg_i32_reg imm:$lane))),
4711 (SubReg_i32_lane imm:$lane)))>;
4712 def : Pat<(v4f32 (NEONvduplane (v4f32 QPR:$src), imm:$lane)),
4713 (v4f32 (VDUPLN32q (v2f32 (EXTRACT_SUBREG QPR:$src,
4714 (DSubReg_i32_reg imm:$lane))),
4715 (SubReg_i32_lane imm:$lane)))>;
4717 def VDUPfdf : PseudoNeonI<(outs DPR:$dst), (ins SPR:$src), IIC_VMOVD, "",
4718 [(set DPR:$dst, (v2f32 (NEONvdup (f32 SPR:$src))))]>;
4719 def VDUPfqf : PseudoNeonI<(outs QPR:$dst), (ins SPR:$src), IIC_VMOVD, "",
4720 [(set QPR:$dst, (v4f32 (NEONvdup (f32 SPR:$src))))]>;
4722 // VMOVN : Vector Narrowing Move
4723 defm VMOVN : N2VN_HSD<0b11,0b11,0b10,0b00100,0,0, IIC_VMOVN,
4724 "vmovn", "i", trunc>;
4725 // VQMOVN : Vector Saturating Narrowing Move
4726 defm VQMOVNs : N2VNInt_HSD<0b11,0b11,0b10,0b00101,0,0, IIC_VQUNAiD,
4727 "vqmovn", "s", int_arm_neon_vqmovns>;
4728 defm VQMOVNu : N2VNInt_HSD<0b11,0b11,0b10,0b00101,1,0, IIC_VQUNAiD,
4729 "vqmovn", "u", int_arm_neon_vqmovnu>;
4730 defm VQMOVNsu : N2VNInt_HSD<0b11,0b11,0b10,0b00100,1,0, IIC_VQUNAiD,
4731 "vqmovun", "s", int_arm_neon_vqmovnsu>;
4732 // VMOVL : Vector Lengthening Move
4733 defm VMOVLs : N2VL_QHS<0b01,0b10100,0,1, "vmovl", "s", sext>;
4734 defm VMOVLu : N2VL_QHS<0b11,0b10100,0,1, "vmovl", "u", zext>;
4736 // Vector Conversions.
4738 // VCVT : Vector Convert Between Floating-Point and Integers
4739 def VCVTf2sd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
4740 v2i32, v2f32, fp_to_sint>;
4741 def VCVTf2ud : N2VD<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
4742 v2i32, v2f32, fp_to_uint>;
4743 def VCVTs2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
4744 v2f32, v2i32, sint_to_fp>;
4745 def VCVTu2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
4746 v2f32, v2i32, uint_to_fp>;
4748 def VCVTf2sq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt", "s32.f32",
4749 v4i32, v4f32, fp_to_sint>;
4750 def VCVTf2uq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt", "u32.f32",
4751 v4i32, v4f32, fp_to_uint>;
4752 def VCVTs2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt", "f32.s32",
4753 v4f32, v4i32, sint_to_fp>;
4754 def VCVTu2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt", "f32.u32",
4755 v4f32, v4i32, uint_to_fp>;
4757 // VCVT : Vector Convert Between Floating-Point and Fixed-Point.
4758 def VCVTf2xsd : N2VCvtD<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
4759 v2i32, v2f32, int_arm_neon_vcvtfp2fxs>;
4760 def VCVTf2xud : N2VCvtD<1, 1, 0b1111, 0, 1, "vcvt", "u32.f32",
4761 v2i32, v2f32, int_arm_neon_vcvtfp2fxu>;
4762 def VCVTxs2fd : N2VCvtD<0, 1, 0b1110, 0, 1, "vcvt", "f32.s32",
4763 v2f32, v2i32, int_arm_neon_vcvtfxs2fp>;
4764 def VCVTxu2fd : N2VCvtD<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
4765 v2f32, v2i32, int_arm_neon_vcvtfxu2fp>;
4767 def VCVTf2xsq : N2VCvtQ<0, 1, 0b1111, 0, 1, "vcvt", "s32.f32",
4768 v4i32, v4f32, int_arm_neon_vcvtfp2fxs>;
4769 def VCVTf2xuq : N2VCvtQ<1, 1, 0b1111, 0, 1, "vcvt", "u32.f32",
4770 v4i32, v4f32, int_arm_neon_vcvtfp2fxu>;
4771 def VCVTxs2fq : N2VCvtQ<0, 1, 0b1110, 0, 1, "vcvt", "f32.s32",
4772 v4f32, v4i32, int_arm_neon_vcvtfxs2fp>;
4773 def VCVTxu2fq : N2VCvtQ<1, 1, 0b1110, 0, 1, "vcvt", "f32.u32",
4774 v4f32, v4i32, int_arm_neon_vcvtfxu2fp>;
4776 // VCVT : Vector Convert Between Half-Precision and Single-Precision.
4777 def VCVTf2h : N2VNInt<0b11, 0b11, 0b01, 0b10, 0b01100, 0, 0,
4778 IIC_VUNAQ, "vcvt", "f16.f32",
4779 v4i16, v4f32, int_arm_neon_vcvtfp2hf>,
4780 Requires<[HasNEON, HasFP16]>;
4781 def VCVTh2f : N2VLInt<0b11, 0b11, 0b01, 0b10, 0b01110, 0, 0,
4782 IIC_VUNAQ, "vcvt", "f32.f16",
4783 v4f32, v4i16, int_arm_neon_vcvthf2fp>,
4784 Requires<[HasNEON, HasFP16]>;
4788 // VREV64 : Vector Reverse elements within 64-bit doublewords
4790 class VREV64D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4791 : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 0, 0, (outs DPR:$Vd),
4792 (ins DPR:$Vm), IIC_VMOVD,
4793 OpcodeStr, Dt, "$Vd, $Vm", "",
4794 [(set DPR:$Vd, (Ty (NEONvrev64 (Ty DPR:$Vm))))]>;
4795 class VREV64Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4796 : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 1, 0, (outs QPR:$Vd),
4797 (ins QPR:$Vm), IIC_VMOVQ,
4798 OpcodeStr, Dt, "$Vd, $Vm", "",
4799 [(set QPR:$Vd, (Ty (NEONvrev64 (Ty QPR:$Vm))))]>;
4801 def VREV64d8 : VREV64D<0b00, "vrev64", "8", v8i8>;
4802 def VREV64d16 : VREV64D<0b01, "vrev64", "16", v4i16>;
4803 def VREV64d32 : VREV64D<0b10, "vrev64", "32", v2i32>;
4804 def : Pat<(v2f32 (NEONvrev64 (v2f32 DPR:$Vm))), (VREV64d32 DPR:$Vm)>;
4806 def VREV64q8 : VREV64Q<0b00, "vrev64", "8", v16i8>;
4807 def VREV64q16 : VREV64Q<0b01, "vrev64", "16", v8i16>;
4808 def VREV64q32 : VREV64Q<0b10, "vrev64", "32", v4i32>;
4809 def : Pat<(v4f32 (NEONvrev64 (v4f32 QPR:$Vm))), (VREV64q32 QPR:$Vm)>;
4811 // VREV32 : Vector Reverse elements within 32-bit words
4813 class VREV32D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4814 : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 0, 0, (outs DPR:$Vd),
4815 (ins DPR:$Vm), IIC_VMOVD,
4816 OpcodeStr, Dt, "$Vd, $Vm", "",
4817 [(set DPR:$Vd, (Ty (NEONvrev32 (Ty DPR:$Vm))))]>;
4818 class VREV32Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4819 : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 1, 0, (outs QPR:$Vd),
4820 (ins QPR:$Vm), IIC_VMOVQ,
4821 OpcodeStr, Dt, "$Vd, $Vm", "",
4822 [(set QPR:$Vd, (Ty (NEONvrev32 (Ty QPR:$Vm))))]>;
4824 def VREV32d8 : VREV32D<0b00, "vrev32", "8", v8i8>;
4825 def VREV32d16 : VREV32D<0b01, "vrev32", "16", v4i16>;
4827 def VREV32q8 : VREV32Q<0b00, "vrev32", "8", v16i8>;
4828 def VREV32q16 : VREV32Q<0b01, "vrev32", "16", v8i16>;
4830 // VREV16 : Vector Reverse elements within 16-bit halfwords
4832 class VREV16D<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4833 : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 0, 0, (outs DPR:$Vd),
4834 (ins DPR:$Vm), IIC_VMOVD,
4835 OpcodeStr, Dt, "$Vd, $Vm", "",
4836 [(set DPR:$Vd, (Ty (NEONvrev16 (Ty DPR:$Vm))))]>;
4837 class VREV16Q<bits<2> op19_18, string OpcodeStr, string Dt, ValueType Ty>
4838 : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 1, 0, (outs QPR:$Vd),
4839 (ins QPR:$Vm), IIC_VMOVQ,
4840 OpcodeStr, Dt, "$Vd, $Vm", "",
4841 [(set QPR:$Vd, (Ty (NEONvrev16 (Ty QPR:$Vm))))]>;
4843 def VREV16d8 : VREV16D<0b00, "vrev16", "8", v8i8>;
4844 def VREV16q8 : VREV16Q<0b00, "vrev16", "8", v16i8>;
4846 // Other Vector Shuffles.
4848 // Aligned extractions: really just dropping registers
4850 class AlignedVEXTq<ValueType DestTy, ValueType SrcTy, SDNodeXForm LaneCVT>
4851 : Pat<(DestTy (vector_extract_subvec (SrcTy QPR:$src), (i32 imm:$start))),
4852 (EXTRACT_SUBREG (SrcTy QPR:$src), (LaneCVT imm:$start))>;
4854 def : AlignedVEXTq<v8i8, v16i8, DSubReg_i8_reg>;
4856 def : AlignedVEXTq<v4i16, v8i16, DSubReg_i16_reg>;
4858 def : AlignedVEXTq<v2i32, v4i32, DSubReg_i32_reg>;
4860 def : AlignedVEXTq<v1i64, v2i64, DSubReg_f64_reg>;
4862 def : AlignedVEXTq<v2f32, v4f32, DSubReg_i32_reg>;
4865 // VEXT : Vector Extract
4867 class VEXTd<string OpcodeStr, string Dt, ValueType Ty>
4868 : N3V<0,1,0b11,{?,?,?,?},0,0, (outs DPR:$Vd),
4869 (ins DPR:$Vn, DPR:$Vm, i32imm:$index), NVExtFrm,
4870 IIC_VEXTD, OpcodeStr, Dt, "$Vd, $Vn, $Vm, $index", "",
4871 [(set DPR:$Vd, (Ty (NEONvext (Ty DPR:$Vn),
4872 (Ty DPR:$Vm), imm:$index)))]> {
4874 let Inst{11-8} = index{3-0};
4877 class VEXTq<string OpcodeStr, string Dt, ValueType Ty>
4878 : N3V<0,1,0b11,{?,?,?,?},1,0, (outs QPR:$Vd),
4879 (ins QPR:$Vn, QPR:$Vm, i32imm:$index), NVExtFrm,
4880 IIC_VEXTQ, OpcodeStr, Dt, "$Vd, $Vn, $Vm, $index", "",
4881 [(set QPR:$Vd, (Ty (NEONvext (Ty QPR:$Vn),
4882 (Ty QPR:$Vm), imm:$index)))]> {
4884 let Inst{11-8} = index{3-0};
4887 def VEXTd8 : VEXTd<"vext", "8", v8i8> {
4888 let Inst{11-8} = index{3-0};
4890 def VEXTd16 : VEXTd<"vext", "16", v4i16> {
4891 let Inst{11-9} = index{2-0};
4894 def VEXTd32 : VEXTd<"vext", "32", v2i32> {
4895 let Inst{11-10} = index{1-0};
4896 let Inst{9-8} = 0b00;
4898 def : Pat<(v2f32 (NEONvext (v2f32 DPR:$Vn),
4901 (VEXTd32 DPR:$Vn, DPR:$Vm, imm:$index)>;
4903 def VEXTq8 : VEXTq<"vext", "8", v16i8> {
4904 let Inst{11-8} = index{3-0};
4906 def VEXTq16 : VEXTq<"vext", "16", v8i16> {
4907 let Inst{11-9} = index{2-0};
4910 def VEXTq32 : VEXTq<"vext", "32", v4i32> {
4911 let Inst{11-10} = index{1-0};
4912 let Inst{9-8} = 0b00;
4914 def : Pat<(v4f32 (NEONvext (v4f32 QPR:$Vn),
4917 (VEXTq32 QPR:$Vn, QPR:$Vm, imm:$index)>;
4919 // VTRN : Vector Transpose
4921 def VTRNd8 : N2VDShuffle<0b00, 0b00001, "vtrn", "8">;
4922 def VTRNd16 : N2VDShuffle<0b01, 0b00001, "vtrn", "16">;
4923 def VTRNd32 : N2VDShuffle<0b10, 0b00001, "vtrn", "32">;
4925 def VTRNq8 : N2VQShuffle<0b00, 0b00001, IIC_VPERMQ, "vtrn", "8">;
4926 def VTRNq16 : N2VQShuffle<0b01, 0b00001, IIC_VPERMQ, "vtrn", "16">;
4927 def VTRNq32 : N2VQShuffle<0b10, 0b00001, IIC_VPERMQ, "vtrn", "32">;
4929 // VUZP : Vector Unzip (Deinterleave)
4931 def VUZPd8 : N2VDShuffle<0b00, 0b00010, "vuzp", "8">;
4932 def VUZPd16 : N2VDShuffle<0b01, 0b00010, "vuzp", "16">;
4933 def VUZPd32 : N2VDShuffle<0b10, 0b00010, "vuzp", "32">;
4935 def VUZPq8 : N2VQShuffle<0b00, 0b00010, IIC_VPERMQ3, "vuzp", "8">;
4936 def VUZPq16 : N2VQShuffle<0b01, 0b00010, IIC_VPERMQ3, "vuzp", "16">;
4937 def VUZPq32 : N2VQShuffle<0b10, 0b00010, IIC_VPERMQ3, "vuzp", "32">;
4939 // VZIP : Vector Zip (Interleave)
4941 def VZIPd8 : N2VDShuffle<0b00, 0b00011, "vzip", "8">;
4942 def VZIPd16 : N2VDShuffle<0b01, 0b00011, "vzip", "16">;
4943 def VZIPd32 : N2VDShuffle<0b10, 0b00011, "vzip", "32">;
4945 def VZIPq8 : N2VQShuffle<0b00, 0b00011, IIC_VPERMQ3, "vzip", "8">;
4946 def VZIPq16 : N2VQShuffle<0b01, 0b00011, IIC_VPERMQ3, "vzip", "16">;
4947 def VZIPq32 : N2VQShuffle<0b10, 0b00011, IIC_VPERMQ3, "vzip", "32">;
4949 // Vector Table Lookup and Table Extension.
4951 // VTBL : Vector Table Lookup
4952 let DecoderMethod = "DecodeTBLInstruction" in {
4954 : N3V<1,1,0b11,0b1000,0,0, (outs DPR:$Vd),
4955 (ins VecListOneD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTB1,
4956 "vtbl", "8", "$Vd, $Vn, $Vm", "",
4957 [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbl1 VecListOneD:$Vn, DPR:$Vm)))]>;
4958 let hasExtraSrcRegAllocReq = 1 in {
4960 : N3V<1,1,0b11,0b1001,0,0, (outs DPR:$Vd),
4961 (ins DPR:$Vn, DPR:$tbl2, DPR:$Vm), NVTBLFrm, IIC_VTB2,
4962 "vtbl", "8", "$Vd, \\{$Vn, $tbl2\\}, $Vm", "", []>;
4964 : N3V<1,1,0b11,0b1010,0,0, (outs DPR:$Vd),
4965 (ins DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$Vm), NVTBLFrm, IIC_VTB3,
4966 "vtbl", "8", "$Vd, \\{$Vn, $tbl2, $tbl3\\}, $Vm", "", []>;
4968 : N3V<1,1,0b11,0b1011,0,0, (outs DPR:$Vd),
4969 (ins DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$Vm),
4971 "vtbl", "8", "$Vd, \\{$Vn, $tbl2, $tbl3, $tbl4\\}, $Vm", "", []>;
4972 } // hasExtraSrcRegAllocReq = 1
4975 : PseudoNeonI<(outs DPR:$dst), (ins QPR:$tbl, DPR:$src), IIC_VTB2, "", []>;
4977 : PseudoNeonI<(outs DPR:$dst), (ins QQPR:$tbl, DPR:$src), IIC_VTB3, "", []>;
4979 : PseudoNeonI<(outs DPR:$dst), (ins QQPR:$tbl, DPR:$src), IIC_VTB4, "", []>;
4981 // VTBX : Vector Table Extension
4983 : N3V<1,1,0b11,0b1000,1,0, (outs DPR:$Vd),
4984 (ins DPR:$orig, VecListOneD:$Vn, DPR:$Vm), NVTBLFrm, IIC_VTBX1,
4985 "vtbx", "8", "$Vd, $Vn, $Vm", "$orig = $Vd",
4986 [(set DPR:$Vd, (v8i8 (int_arm_neon_vtbx1
4987 DPR:$orig, VecListOneD:$Vn, DPR:$Vm)))]>;
4988 let hasExtraSrcRegAllocReq = 1 in {
4990 : N3V<1,1,0b11,0b1001,1,0, (outs DPR:$Vd),
4991 (ins DPR:$orig, DPR:$Vn, DPR:$tbl2, DPR:$Vm), NVTBLFrm, IIC_VTBX2,
4992 "vtbx", "8", "$Vd, \\{$Vn, $tbl2\\}, $Vm", "$orig = $Vd", []>;
4994 : N3V<1,1,0b11,0b1010,1,0, (outs DPR:$Vd),
4995 (ins DPR:$orig, DPR:$Vn, DPR:$tbl2, DPR:$tbl3, DPR:$Vm),
4996 NVTBLFrm, IIC_VTBX3,
4997 "vtbx", "8", "$Vd, \\{$Vn, $tbl2, $tbl3\\}, $Vm",
5000 : N3V<1,1,0b11,0b1011,1,0, (outs DPR:$Vd), (ins DPR:$orig, DPR:$Vn,
5001 DPR:$tbl2, DPR:$tbl3, DPR:$tbl4, DPR:$Vm), NVTBLFrm, IIC_VTBX4,
5002 "vtbx", "8", "$Vd, \\{$Vn, $tbl2, $tbl3, $tbl4\\}, $Vm",
5004 } // hasExtraSrcRegAllocReq = 1
5007 : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QPR:$tbl, DPR:$src),
5008 IIC_VTBX2, "$orig = $dst", []>;
5010 : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QQPR:$tbl, DPR:$src),
5011 IIC_VTBX3, "$orig = $dst", []>;
5013 : PseudoNeonI<(outs DPR:$dst), (ins DPR:$orig, QQPR:$tbl, DPR:$src),
5014 IIC_VTBX4, "$orig = $dst", []>;
5015 } // DecoderMethod = "DecodeTBLInstruction"
5017 //===----------------------------------------------------------------------===//
5018 // NEON instructions for single-precision FP math
5019 //===----------------------------------------------------------------------===//
5021 class N2VSPat<SDNode OpNode, NeonI Inst>
5022 : NEONFPPat<(f32 (OpNode SPR:$a)),
5024 (v2f32 (COPY_TO_REGCLASS (Inst
5026 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5027 SPR:$a, ssub_0)), DPR_VFP2)), ssub_0)>;
5029 class N3VSPat<SDNode OpNode, NeonI Inst>
5030 : NEONFPPat<(f32 (OpNode SPR:$a, SPR:$b)),
5032 (v2f32 (COPY_TO_REGCLASS (Inst
5034 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5037 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5038 SPR:$b, ssub_0)), DPR_VFP2)), ssub_0)>;
5040 class N3VSMulOpPat<SDNode MulNode, SDNode OpNode, NeonI Inst>
5041 : NEONFPPat<(f32 (OpNode SPR:$acc, (f32 (MulNode SPR:$a, SPR:$b)))),
5043 (v2f32 (COPY_TO_REGCLASS (Inst
5045 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5048 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5051 (v2f32 (COPY_TO_REGCLASS (v2f32 (IMPLICIT_DEF)), DPR_VFP2)),
5052 SPR:$b, ssub_0)), DPR_VFP2)), ssub_0)>;
5054 def : N3VSPat<fadd, VADDfd>;
5055 def : N3VSPat<fsub, VSUBfd>;
5056 def : N3VSPat<fmul, VMULfd>;
5057 def : N3VSMulOpPat<fmul, fadd, VMLAfd>,
5058 Requires<[HasNEON, UseNEONForFP, UseFPVMLx]>;
5059 def : N3VSMulOpPat<fmul, fsub, VMLSfd>,
5060 Requires<[HasNEON, UseNEONForFP, UseFPVMLx]>;
5061 def : N2VSPat<fabs, VABSfd>;
5062 def : N2VSPat<fneg, VNEGfd>;
5063 def : N3VSPat<NEONfmax, VMAXfd>;
5064 def : N3VSPat<NEONfmin, VMINfd>;
5065 def : N2VSPat<arm_ftosi, VCVTf2sd>;
5066 def : N2VSPat<arm_ftoui, VCVTf2ud>;
5067 def : N2VSPat<arm_sitof, VCVTs2fd>;
5068 def : N2VSPat<arm_uitof, VCVTu2fd>;
5070 //===----------------------------------------------------------------------===//
5071 // Non-Instruction Patterns
5072 //===----------------------------------------------------------------------===//
5075 def : Pat<(v1i64 (bitconvert (v2i32 DPR:$src))), (v1i64 DPR:$src)>;
5076 def : Pat<(v1i64 (bitconvert (v4i16 DPR:$src))), (v1i64 DPR:$src)>;
5077 def : Pat<(v1i64 (bitconvert (v8i8 DPR:$src))), (v1i64 DPR:$src)>;
5078 def : Pat<(v1i64 (bitconvert (f64 DPR:$src))), (v1i64 DPR:$src)>;
5079 def : Pat<(v1i64 (bitconvert (v2f32 DPR:$src))), (v1i64 DPR:$src)>;
5080 def : Pat<(v2i32 (bitconvert (v1i64 DPR:$src))), (v2i32 DPR:$src)>;
5081 def : Pat<(v2i32 (bitconvert (v4i16 DPR:$src))), (v2i32 DPR:$src)>;
5082 def : Pat<(v2i32 (bitconvert (v8i8 DPR:$src))), (v2i32 DPR:$src)>;
5083 def : Pat<(v2i32 (bitconvert (f64 DPR:$src))), (v2i32 DPR:$src)>;
5084 def : Pat<(v2i32 (bitconvert (v2f32 DPR:$src))), (v2i32 DPR:$src)>;
5085 def : Pat<(v4i16 (bitconvert (v1i64 DPR:$src))), (v4i16 DPR:$src)>;
5086 def : Pat<(v4i16 (bitconvert (v2i32 DPR:$src))), (v4i16 DPR:$src)>;
5087 def : Pat<(v4i16 (bitconvert (v8i8 DPR:$src))), (v4i16 DPR:$src)>;
5088 def : Pat<(v4i16 (bitconvert (f64 DPR:$src))), (v4i16 DPR:$src)>;
5089 def : Pat<(v4i16 (bitconvert (v2f32 DPR:$src))), (v4i16 DPR:$src)>;
5090 def : Pat<(v8i8 (bitconvert (v1i64 DPR:$src))), (v8i8 DPR:$src)>;
5091 def : Pat<(v8i8 (bitconvert (v2i32 DPR:$src))), (v8i8 DPR:$src)>;
5092 def : Pat<(v8i8 (bitconvert (v4i16 DPR:$src))), (v8i8 DPR:$src)>;
5093 def : Pat<(v8i8 (bitconvert (f64 DPR:$src))), (v8i8 DPR:$src)>;
5094 def : Pat<(v8i8 (bitconvert (v2f32 DPR:$src))), (v8i8 DPR:$src)>;
5095 def : Pat<(f64 (bitconvert (v1i64 DPR:$src))), (f64 DPR:$src)>;
5096 def : Pat<(f64 (bitconvert (v2i32 DPR:$src))), (f64 DPR:$src)>;
5097 def : Pat<(f64 (bitconvert (v4i16 DPR:$src))), (f64 DPR:$src)>;
5098 def : Pat<(f64 (bitconvert (v8i8 DPR:$src))), (f64 DPR:$src)>;
5099 def : Pat<(f64 (bitconvert (v2f32 DPR:$src))), (f64 DPR:$src)>;
5100 def : Pat<(v2f32 (bitconvert (f64 DPR:$src))), (v2f32 DPR:$src)>;
5101 def : Pat<(v2f32 (bitconvert (v1i64 DPR:$src))), (v2f32 DPR:$src)>;
5102 def : Pat<(v2f32 (bitconvert (v2i32 DPR:$src))), (v2f32 DPR:$src)>;
5103 def : Pat<(v2f32 (bitconvert (v4i16 DPR:$src))), (v2f32 DPR:$src)>;
5104 def : Pat<(v2f32 (bitconvert (v8i8 DPR:$src))), (v2f32 DPR:$src)>;
5106 def : Pat<(v2i64 (bitconvert (v4i32 QPR:$src))), (v2i64 QPR:$src)>;
5107 def : Pat<(v2i64 (bitconvert (v8i16 QPR:$src))), (v2i64 QPR:$src)>;
5108 def : Pat<(v2i64 (bitconvert (v16i8 QPR:$src))), (v2i64 QPR:$src)>;
5109 def : Pat<(v2i64 (bitconvert (v2f64 QPR:$src))), (v2i64 QPR:$src)>;
5110 def : Pat<(v2i64 (bitconvert (v4f32 QPR:$src))), (v2i64 QPR:$src)>;
5111 def : Pat<(v4i32 (bitconvert (v2i64 QPR:$src))), (v4i32 QPR:$src)>;
5112 def : Pat<(v4i32 (bitconvert (v8i16 QPR:$src))), (v4i32 QPR:$src)>;
5113 def : Pat<(v4i32 (bitconvert (v16i8 QPR:$src))), (v4i32 QPR:$src)>;
5114 def : Pat<(v4i32 (bitconvert (v2f64 QPR:$src))), (v4i32 QPR:$src)>;
5115 def : Pat<(v4i32 (bitconvert (v4f32 QPR:$src))), (v4i32 QPR:$src)>;
5116 def : Pat<(v8i16 (bitconvert (v2i64 QPR:$src))), (v8i16 QPR:$src)>;
5117 def : Pat<(v8i16 (bitconvert (v4i32 QPR:$src))), (v8i16 QPR:$src)>;
5118 def : Pat<(v8i16 (bitconvert (v16i8 QPR:$src))), (v8i16 QPR:$src)>;
5119 def : Pat<(v8i16 (bitconvert (v2f64 QPR:$src))), (v8i16 QPR:$src)>;
5120 def : Pat<(v8i16 (bitconvert (v4f32 QPR:$src))), (v8i16 QPR:$src)>;
5121 def : Pat<(v16i8 (bitconvert (v2i64 QPR:$src))), (v16i8 QPR:$src)>;
5122 def : Pat<(v16i8 (bitconvert (v4i32 QPR:$src))), (v16i8 QPR:$src)>;
5123 def : Pat<(v16i8 (bitconvert (v8i16 QPR:$src))), (v16i8 QPR:$src)>;
5124 def : Pat<(v16i8 (bitconvert (v2f64 QPR:$src))), (v16i8 QPR:$src)>;
5125 def : Pat<(v16i8 (bitconvert (v4f32 QPR:$src))), (v16i8 QPR:$src)>;
5126 def : Pat<(v4f32 (bitconvert (v2i64 QPR:$src))), (v4f32 QPR:$src)>;
5127 def : Pat<(v4f32 (bitconvert (v4i32 QPR:$src))), (v4f32 QPR:$src)>;
5128 def : Pat<(v4f32 (bitconvert (v8i16 QPR:$src))), (v4f32 QPR:$src)>;
5129 def : Pat<(v4f32 (bitconvert (v16i8 QPR:$src))), (v4f32 QPR:$src)>;
5130 def : Pat<(v4f32 (bitconvert (v2f64 QPR:$src))), (v4f32 QPR:$src)>;
5131 def : Pat<(v2f64 (bitconvert (v2i64 QPR:$src))), (v2f64 QPR:$src)>;
5132 def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
5133 def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
5134 def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
5135 def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;