1 //===- ARMInstrNEON.td - NEON support for ARM -----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the ARM NEON instruction set.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // NEON-specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
18 def SDTARMVCMP : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<1, 2>]>;
20 def NEONvceq : SDNode<"ARMISD::VCEQ", SDTARMVCMP>;
21 def NEONvcge : SDNode<"ARMISD::VCGE", SDTARMVCMP>;
22 def NEONvcgeu : SDNode<"ARMISD::VCGEU", SDTARMVCMP>;
23 def NEONvcgt : SDNode<"ARMISD::VCGT", SDTARMVCMP>;
24 def NEONvcgtu : SDNode<"ARMISD::VCGTU", SDTARMVCMP>;
25 def NEONvtst : SDNode<"ARMISD::VTST", SDTARMVCMP>;
27 // Types for vector shift by immediates. The "SHX" version is for long and
28 // narrow operations where the source and destination vectors have different
29 // types. The "SHINS" version is for shift and insert operations.
30 def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
32 def SDTARMVSHX : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
34 def SDTARMVSHINS : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>,
35 SDTCisSameAs<0, 2>, SDTCisVT<3, i32>]>;
37 def NEONvshl : SDNode<"ARMISD::VSHL", SDTARMVSH>;
38 def NEONvshrs : SDNode<"ARMISD::VSHRs", SDTARMVSH>;
39 def NEONvshru : SDNode<"ARMISD::VSHRu", SDTARMVSH>;
40 def NEONvshlls : SDNode<"ARMISD::VSHLLs", SDTARMVSHX>;
41 def NEONvshllu : SDNode<"ARMISD::VSHLLu", SDTARMVSHX>;
42 def NEONvshlli : SDNode<"ARMISD::VSHLLi", SDTARMVSHX>;
43 def NEONvshrn : SDNode<"ARMISD::VSHRN", SDTARMVSHX>;
45 def NEONvrshrs : SDNode<"ARMISD::VRSHRs", SDTARMVSH>;
46 def NEONvrshru : SDNode<"ARMISD::VRSHRu", SDTARMVSH>;
47 def NEONvrshrn : SDNode<"ARMISD::VRSHRN", SDTARMVSHX>;
49 def NEONvqshls : SDNode<"ARMISD::VQSHLs", SDTARMVSH>;
50 def NEONvqshlu : SDNode<"ARMISD::VQSHLu", SDTARMVSH>;
51 def NEONvqshlsu : SDNode<"ARMISD::VQSHLsu", SDTARMVSH>;
52 def NEONvqshrns : SDNode<"ARMISD::VQSHRNs", SDTARMVSHX>;
53 def NEONvqshrnu : SDNode<"ARMISD::VQSHRNu", SDTARMVSHX>;
54 def NEONvqshrnsu : SDNode<"ARMISD::VQSHRNsu", SDTARMVSHX>;
56 def NEONvqrshrns : SDNode<"ARMISD::VQRSHRNs", SDTARMVSHX>;
57 def NEONvqrshrnu : SDNode<"ARMISD::VQRSHRNu", SDTARMVSHX>;
58 def NEONvqrshrnsu : SDNode<"ARMISD::VQRSHRNsu", SDTARMVSHX>;
60 def NEONvsli : SDNode<"ARMISD::VSLI", SDTARMVSHINS>;
61 def NEONvsri : SDNode<"ARMISD::VSRI", SDTARMVSHINS>;
63 def SDTARMVGETLN : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisInt<1>,
65 def NEONvgetlaneu : SDNode<"ARMISD::VGETLANEu", SDTARMVGETLN>;
66 def NEONvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>;
68 def NEONvduplaneq : SDNode<"ARMISD::VDUPLANEQ",
69 SDTypeProfile<1, 2, [SDTCisVT<2, i32>]>>;
71 def SDTARMVLD2 : SDTypeProfile<2, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
72 def SDTARMVLD3 : SDTypeProfile<3, 1, [SDTCisSameAs<0, 1>,
73 SDTCisSameAs<0, 2>, SDTCisPtrTy<3>]>;
74 def SDTARMVLD4 : SDTypeProfile<4, 1, [SDTCisSameAs<0, 1>,
76 SDTCisSameAs<0, 3>, SDTCisPtrTy<4>]>;
77 def NEONvld2d : SDNode<"ARMISD::VLD2D", SDTARMVLD2,
78 [SDNPHasChain, SDNPMayLoad]>;
79 def NEONvld3d : SDNode<"ARMISD::VLD3D", SDTARMVLD3,
80 [SDNPHasChain, SDNPMayLoad]>;
81 def NEONvld4d : SDNode<"ARMISD::VLD4D", SDTARMVLD4,
82 [SDNPHasChain, SDNPMayLoad]>;
84 //===----------------------------------------------------------------------===//
85 // NEON operand definitions
86 //===----------------------------------------------------------------------===//
88 // addrmode_neonldstm := reg
90 /* TODO: Take advantage of vldm.
91 def addrmode_neonldstm : Operand<i32>,
92 ComplexPattern<i32, 2, "SelectAddrModeNeonLdStM", []> {
93 let PrintMethod = "printAddrNeonLdStMOperand";
94 let MIOperandInfo = (ops GPR, i32imm);
98 //===----------------------------------------------------------------------===//
99 // NEON load / store instructions
100 //===----------------------------------------------------------------------===//
102 /* TODO: Take advantage of vldm.
104 def VLDMD : NI<(outs),
105 (ins addrmode_neonldstm:$addr, reglist:$dst1, variable_ops),
106 "vldm${addr:submode} ${addr:base}, $dst1",
108 let Inst{27-25} = 0b110;
110 let Inst{11-9} = 0b101;
113 def VLDMS : NI<(outs),
114 (ins addrmode_neonldstm:$addr, reglist:$dst1, variable_ops),
115 "vldm${addr:submode} ${addr:base}, $dst1",
117 let Inst{27-25} = 0b110;
119 let Inst{11-9} = 0b101;
124 // Use vldmia to load a Q register as a D register pair.
125 def VLDRQ : NI<(outs QPR:$dst), (ins GPR:$addr),
126 "vldmia $addr, ${dst:dregpair}",
127 [(set QPR:$dst, (v2f64 (load GPR:$addr)))]> {
128 let Inst{27-25} = 0b110;
129 let Inst{24} = 0; // P bit
130 let Inst{23} = 1; // U bit
132 let Inst{11-9} = 0b101;
135 // Use vstmia to store a Q register as a D register pair.
136 def VSTRQ : NI<(outs), (ins QPR:$src, GPR:$addr),
137 "vstmia $addr, ${src:dregpair}",
138 [(store (v2f64 QPR:$src), GPR:$addr)]> {
139 let Inst{27-25} = 0b110;
140 let Inst{24} = 0; // P bit
141 let Inst{23} = 1; // U bit
143 let Inst{11-9} = 0b101;
147 // VLD1 : Vector Load (multiple single elements)
148 class VLD1D<string OpcodeStr, ValueType Ty, Intrinsic IntOp>
149 : NLdSt<(outs DPR:$dst), (ins addrmode6:$addr),
150 !strconcat(OpcodeStr, "\t\\{$dst\\}, $addr"),
151 [(set DPR:$dst, (Ty (IntOp addrmode6:$addr)))]>;
152 class VLD1Q<string OpcodeStr, ValueType Ty, Intrinsic IntOp>
153 : NLdSt<(outs QPR:$dst), (ins addrmode6:$addr),
154 !strconcat(OpcodeStr, "\t${dst:dregpair}, $addr"),
155 [(set QPR:$dst, (Ty (IntOp addrmode6:$addr)))]>;
157 def VLD1d8 : VLD1D<"vld1.8", v8i8, int_arm_neon_vld1i>;
158 def VLD1d16 : VLD1D<"vld1.16", v4i16, int_arm_neon_vld1i>;
159 def VLD1d32 : VLD1D<"vld1.32", v2i32, int_arm_neon_vld1i>;
160 def VLD1df : VLD1D<"vld1.32", v2f32, int_arm_neon_vld1f>;
161 def VLD1d64 : VLD1D<"vld1.64", v1i64, int_arm_neon_vld1i>;
163 def VLD1q8 : VLD1Q<"vld1.8", v16i8, int_arm_neon_vld1i>;
164 def VLD1q16 : VLD1Q<"vld1.16", v8i16, int_arm_neon_vld1i>;
165 def VLD1q32 : VLD1Q<"vld1.32", v4i32, int_arm_neon_vld1i>;
166 def VLD1qf : VLD1Q<"vld1.32", v4f32, int_arm_neon_vld1f>;
167 def VLD1q64 : VLD1Q<"vld1.64", v2i64, int_arm_neon_vld1i>;
169 // VST1 : Vector Store (multiple single elements)
170 class VST1D<string OpcodeStr, ValueType Ty, Intrinsic IntOp>
171 : NLdSt<(outs), (ins addrmode6:$addr, DPR:$src),
172 !strconcat(OpcodeStr, "\t\\{$src\\}, $addr"),
173 [(IntOp addrmode6:$addr, (Ty DPR:$src))]>;
174 class VST1Q<string OpcodeStr, ValueType Ty, Intrinsic IntOp>
175 : NLdSt<(outs), (ins addrmode6:$addr, QPR:$src),
176 !strconcat(OpcodeStr, "\t${src:dregpair}, $addr"),
177 [(IntOp addrmode6:$addr, (Ty QPR:$src))]>;
179 def VST1d8 : VST1D<"vst1.8", v8i8, int_arm_neon_vst1i>;
180 def VST1d16 : VST1D<"vst1.16", v4i16, int_arm_neon_vst1i>;
181 def VST1d32 : VST1D<"vst1.32", v2i32, int_arm_neon_vst1i>;
182 def VST1df : VST1D<"vst1.32", v2f32, int_arm_neon_vst1f>;
183 def VST1d64 : VST1D<"vst1.64", v1i64, int_arm_neon_vst1i>;
185 def VST1q8 : VST1Q<"vst1.8", v16i8, int_arm_neon_vst1i>;
186 def VST1q16 : VST1Q<"vst1.16", v8i16, int_arm_neon_vst1i>;
187 def VST1q32 : VST1Q<"vst1.32", v4i32, int_arm_neon_vst1i>;
188 def VST1qf : VST1Q<"vst1.32", v4f32, int_arm_neon_vst1f>;
189 def VST1q64 : VST1Q<"vst1.64", v2i64, int_arm_neon_vst1i>;
191 // VLD2 : Vector Load (multiple 2-element structures)
192 class VLD2D<string OpcodeStr>
193 : NLdSt<(outs DPR:$dst1, DPR:$dst2), (ins addrmode6:$addr),
194 !strconcat(OpcodeStr, "\t\\{$dst1,$dst2\\}, $addr"), []>;
196 def VLD2d8 : VLD2D<"vld2.8">;
197 def VLD2d16 : VLD2D<"vld2.16">;
198 def VLD2d32 : VLD2D<"vld2.32">;
199 def VLD2d64 : VLD2D<"vld2.64">;
201 // VLD3 : Vector Load (multiple 3-element structures)
202 class VLD3D<string OpcodeStr>
203 : NLdSt<(outs DPR:$dst1, DPR:$dst2, DPR:$dst3), (ins addrmode6:$addr),
204 !strconcat(OpcodeStr, "\t\\{$dst1,$dst2,$dst3\\}, $addr"), []>;
206 def VLD3d8 : VLD3D<"vld3.8">;
207 def VLD3d16 : VLD3D<"vld3.16">;
208 def VLD3d32 : VLD3D<"vld3.32">;
209 def VLD3d64 : VLD3D<"vld3.64">;
211 // VLD4 : Vector Load (multiple 4-element structures)
212 class VLD4D<string OpcodeStr>
213 : NLdSt<(outs DPR:$dst1, DPR:$dst2, DPR:$dst3, DPR:$dst4),
214 (ins addrmode6:$addr),
215 !strconcat(OpcodeStr, "\t\\{$dst1,$dst2,$dst3,$dst4\\}, $addr"), []>;
217 def VLD4d8 : VLD4D<"vld4.8">;
218 def VLD4d16 : VLD4D<"vld4.16">;
219 def VLD4d32 : VLD4D<"vld4.32">;
220 def VLD4d64 : VLD4D<"vld4.64">;
223 //===----------------------------------------------------------------------===//
224 // NEON pattern fragments
225 //===----------------------------------------------------------------------===//
227 // Extract D sub-registers of Q registers.
228 // (arm_dsubreg_0 is 5; arm_dsubreg_1 is 6)
229 def SubReg_i8_reg : SDNodeXForm<imm, [{
230 return CurDAG->getTargetConstant(5 + N->getZExtValue() / 8, MVT::i32);
232 def SubReg_i16_reg : SDNodeXForm<imm, [{
233 return CurDAG->getTargetConstant(5 + N->getZExtValue() / 4, MVT::i32);
235 def SubReg_i32_reg : SDNodeXForm<imm, [{
236 return CurDAG->getTargetConstant(5 + N->getZExtValue() / 2, MVT::i32);
238 def SubReg_f64_reg : SDNodeXForm<imm, [{
239 return CurDAG->getTargetConstant(5 + N->getZExtValue(), MVT::i32);
242 // Translate lane numbers from Q registers to D subregs.
243 def SubReg_i8_lane : SDNodeXForm<imm, [{
244 return CurDAG->getTargetConstant(N->getZExtValue() & 7, MVT::i32);
246 def SubReg_i16_lane : SDNodeXForm<imm, [{
247 return CurDAG->getTargetConstant(N->getZExtValue() & 3, MVT::i32);
249 def SubReg_i32_lane : SDNodeXForm<imm, [{
250 return CurDAG->getTargetConstant(N->getZExtValue() & 1, MVT::i32);
253 //===----------------------------------------------------------------------===//
254 // Instruction Classes
255 //===----------------------------------------------------------------------===//
257 // Basic 2-register operations, both double- and quad-register.
258 class N2VD<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
259 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
260 ValueType ResTy, ValueType OpTy, SDNode OpNode>
261 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
262 (ins DPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
263 [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src))))]>;
264 class N2VQ<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
265 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
266 ValueType ResTy, ValueType OpTy, SDNode OpNode>
267 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
268 (ins QPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
269 [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src))))]>;
271 // Basic 2-register intrinsics, both double- and quad-register.
272 class N2VDInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
273 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
274 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
275 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
276 (ins DPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
277 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src))))]>;
278 class N2VQInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
279 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
280 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
281 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
282 (ins QPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
283 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src))))]>;
285 // Basic 2-register operations, scalar single-precision
286 class N2VDInts<SDNode OpNode, NeonI Inst>
287 : NEONFPPat<(f32 (OpNode SPR:$a)),
288 (EXTRACT_SUBREG (Inst (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
289 SPR:$a, arm_ssubreg_0)),
292 // Narrow 2-register intrinsics.
293 class N2VNInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
294 bits<2> op17_16, bits<5> op11_7, bit op6, bit op4,
295 string OpcodeStr, ValueType TyD, ValueType TyQ, Intrinsic IntOp>
296 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, op6, op4, (outs DPR:$dst),
297 (ins QPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
298 [(set DPR:$dst, (TyD (IntOp (TyQ QPR:$src))))]>;
300 // Long 2-register intrinsics. (This is currently only used for VMOVL and is
301 // derived from N2VImm instead of N2V because of the way the size is encoded.)
302 class N2VLInt<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
303 bit op6, bit op4, string OpcodeStr, ValueType TyQ, ValueType TyD,
305 : N2VImm<op24, op23, op21_16, op11_8, op7, op6, op4, (outs QPR:$dst),
306 (ins DPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
307 [(set QPR:$dst, (TyQ (IntOp (TyD DPR:$src))))]>;
309 // Basic 3-register operations, both double- and quad-register.
310 class N3VD<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
311 string OpcodeStr, ValueType ResTy, ValueType OpTy,
312 SDNode OpNode, bit Commutable>
313 : N3V<op24, op23, op21_20, op11_8, 0, op4,
314 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2),
315 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
316 [(set DPR:$dst, (ResTy (OpNode (OpTy DPR:$src1), (OpTy DPR:$src2))))]> {
317 let isCommutable = Commutable;
319 class N3VQ<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
320 string OpcodeStr, ValueType ResTy, ValueType OpTy,
321 SDNode OpNode, bit Commutable>
322 : N3V<op24, op23, op21_20, op11_8, 1, op4,
323 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2),
324 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
325 [(set QPR:$dst, (ResTy (OpNode (OpTy QPR:$src1), (OpTy QPR:$src2))))]> {
326 let isCommutable = Commutable;
329 // Basic 3-register operations, scalar single-precision
330 class N3VDs<SDNode OpNode, NeonI Inst>
331 : NEONFPPat<(f32 (OpNode SPR:$a, SPR:$b)),
332 (EXTRACT_SUBREG (Inst (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
333 SPR:$a, arm_ssubreg_0),
334 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
335 SPR:$b, arm_ssubreg_0)),
338 // Basic 3-register intrinsics, both double- and quad-register.
339 class N3VDInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
340 string OpcodeStr, ValueType ResTy, ValueType OpTy,
341 Intrinsic IntOp, bit Commutable>
342 : N3V<op24, op23, op21_20, op11_8, 0, op4,
343 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2),
344 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
345 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src1), (OpTy DPR:$src2))))]> {
346 let isCommutable = Commutable;
348 class N3VQInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
349 string OpcodeStr, ValueType ResTy, ValueType OpTy,
350 Intrinsic IntOp, bit Commutable>
351 : N3V<op24, op23, op21_20, op11_8, 1, op4,
352 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2),
353 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
354 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src1), (OpTy QPR:$src2))))]> {
355 let isCommutable = Commutable;
358 // Multiply-Add/Sub operations, both double- and quad-register.
359 class N3VDMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
360 string OpcodeStr, ValueType Ty, SDNode MulOp, SDNode OpNode>
361 : N3V<op24, op23, op21_20, op11_8, 0, op4,
362 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3),
363 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
364 [(set DPR:$dst, (Ty (OpNode DPR:$src1,
365 (Ty (MulOp DPR:$src2, DPR:$src3)))))]>;
366 class N3VQMulOp<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
367 string OpcodeStr, ValueType Ty, SDNode MulOp, SDNode OpNode>
368 : N3V<op24, op23, op21_20, op11_8, 1, op4,
369 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3),
370 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
371 [(set QPR:$dst, (Ty (OpNode QPR:$src1,
372 (Ty (MulOp QPR:$src2, QPR:$src3)))))]>;
374 // Multiply-Add/Sub operations, scalar single-precision
375 class N3VDMulOps<SDNode MulNode, SDNode OpNode, NeonI Inst>
376 : NEONFPPat<(f32 (OpNode SPR:$acc,
377 (f32 (MulNode SPR:$a, SPR:$b)))),
378 (EXTRACT_SUBREG (Inst (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
379 SPR:$acc, arm_ssubreg_0),
380 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
381 SPR:$a, arm_ssubreg_0),
382 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)),
383 SPR:$b, arm_ssubreg_0)),
386 // Neon 3-argument intrinsics, both double- and quad-register.
387 // The destination register is also used as the first source operand register.
388 class N3VDInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
389 string OpcodeStr, ValueType ResTy, ValueType OpTy,
391 : N3V<op24, op23, op21_20, op11_8, 0, op4,
392 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, DPR:$src3),
393 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
394 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src1),
395 (OpTy DPR:$src2), (OpTy DPR:$src3))))]>;
396 class N3VQInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
397 string OpcodeStr, ValueType ResTy, ValueType OpTy,
399 : N3V<op24, op23, op21_20, op11_8, 1, op4,
400 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, QPR:$src3),
401 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
402 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src1),
403 (OpTy QPR:$src2), (OpTy QPR:$src3))))]>;
405 // Neon Long 3-argument intrinsic. The destination register is
406 // a quad-register and is also used as the first source operand register.
407 class N3VLInt3<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
408 string OpcodeStr, ValueType TyQ, ValueType TyD, Intrinsic IntOp>
409 : N3V<op24, op23, op21_20, op11_8, 0, op4,
410 (outs QPR:$dst), (ins QPR:$src1, DPR:$src2, DPR:$src3),
411 !strconcat(OpcodeStr, "\t$dst, $src2, $src3"), "$src1 = $dst",
413 (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$src2), (TyD DPR:$src3))))]>;
415 // Narrowing 3-register intrinsics.
416 class N3VNInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
417 string OpcodeStr, ValueType TyD, ValueType TyQ,
418 Intrinsic IntOp, bit Commutable>
419 : N3V<op24, op23, op21_20, op11_8, 0, op4,
420 (outs DPR:$dst), (ins QPR:$src1, QPR:$src2),
421 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
422 [(set DPR:$dst, (TyD (IntOp (TyQ QPR:$src1), (TyQ QPR:$src2))))]> {
423 let isCommutable = Commutable;
426 // Long 3-register intrinsics.
427 class N3VLInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
428 string OpcodeStr, ValueType TyQ, ValueType TyD,
429 Intrinsic IntOp, bit Commutable>
430 : N3V<op24, op23, op21_20, op11_8, 0, op4,
431 (outs QPR:$dst), (ins DPR:$src1, DPR:$src2),
432 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
433 [(set QPR:$dst, (TyQ (IntOp (TyD DPR:$src1), (TyD DPR:$src2))))]> {
434 let isCommutable = Commutable;
437 // Wide 3-register intrinsics.
438 class N3VWInt<bit op24, bit op23, bits<2> op21_20, bits<4> op11_8, bit op4,
439 string OpcodeStr, ValueType TyQ, ValueType TyD,
440 Intrinsic IntOp, bit Commutable>
441 : N3V<op24, op23, op21_20, op11_8, 0, op4,
442 (outs QPR:$dst), (ins QPR:$src1, DPR:$src2),
443 !strconcat(OpcodeStr, "\t$dst, $src1, $src2"), "",
444 [(set QPR:$dst, (TyQ (IntOp (TyQ QPR:$src1), (TyD DPR:$src2))))]> {
445 let isCommutable = Commutable;
448 // Pairwise long 2-register intrinsics, both double- and quad-register.
449 class N2VDPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
450 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
451 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
452 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4, (outs DPR:$dst),
453 (ins DPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
454 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src))))]>;
455 class N2VQPLInt<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
456 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
457 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
458 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4, (outs QPR:$dst),
459 (ins QPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
460 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src))))]>;
462 // Pairwise long 2-register accumulate intrinsics,
463 // both double- and quad-register.
464 // The destination register is also used as the first source operand register.
465 class N2VDPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
466 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
467 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
468 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 0, op4,
469 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2),
470 !strconcat(OpcodeStr, "\t$dst, $src2"), "$src1 = $dst",
471 [(set DPR:$dst, (ResTy (IntOp (ResTy DPR:$src1), (OpTy DPR:$src2))))]>;
472 class N2VQPLInt2<bits<2> op24_23, bits<2> op21_20, bits<2> op19_18,
473 bits<2> op17_16, bits<5> op11_7, bit op4, string OpcodeStr,
474 ValueType ResTy, ValueType OpTy, Intrinsic IntOp>
475 : N2V<op24_23, op21_20, op19_18, op17_16, op11_7, 1, op4,
476 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2),
477 !strconcat(OpcodeStr, "\t$dst, $src2"), "$src1 = $dst",
478 [(set QPR:$dst, (ResTy (IntOp (ResTy QPR:$src1), (OpTy QPR:$src2))))]>;
480 // Shift by immediate,
481 // both double- and quad-register.
482 class N2VDSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
483 bit op4, string OpcodeStr, ValueType Ty, SDNode OpNode>
484 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
485 (outs DPR:$dst), (ins DPR:$src, i32imm:$SIMM),
486 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
487 [(set DPR:$dst, (Ty (OpNode (Ty DPR:$src), (i32 imm:$SIMM))))]>;
488 class N2VQSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
489 bit op4, string OpcodeStr, ValueType Ty, SDNode OpNode>
490 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
491 (outs QPR:$dst), (ins QPR:$src, i32imm:$SIMM),
492 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
493 [(set QPR:$dst, (Ty (OpNode (Ty QPR:$src), (i32 imm:$SIMM))))]>;
495 // Long shift by immediate.
496 class N2VLSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
497 bit op6, bit op4, string OpcodeStr, ValueType ResTy,
498 ValueType OpTy, SDNode OpNode>
499 : N2VImm<op24, op23, op21_16, op11_8, op7, op6, op4,
500 (outs QPR:$dst), (ins DPR:$src, i32imm:$SIMM),
501 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
502 [(set QPR:$dst, (ResTy (OpNode (OpTy DPR:$src),
503 (i32 imm:$SIMM))))]>;
505 // Narrow shift by immediate.
506 class N2VNSh<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
507 bit op6, bit op4, string OpcodeStr, ValueType ResTy,
508 ValueType OpTy, SDNode OpNode>
509 : N2VImm<op24, op23, op21_16, op11_8, op7, op6, op4,
510 (outs DPR:$dst), (ins QPR:$src, i32imm:$SIMM),
511 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
512 [(set DPR:$dst, (ResTy (OpNode (OpTy QPR:$src),
513 (i32 imm:$SIMM))))]>;
515 // Shift right by immediate and accumulate,
516 // both double- and quad-register.
517 class N2VDShAdd<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
518 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
519 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
520 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, i32imm:$SIMM),
521 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
522 [(set DPR:$dst, (Ty (add DPR:$src1,
523 (Ty (ShOp DPR:$src2, (i32 imm:$SIMM))))))]>;
524 class N2VQShAdd<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
525 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
526 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
527 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, i32imm:$SIMM),
528 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
529 [(set QPR:$dst, (Ty (add QPR:$src1,
530 (Ty (ShOp QPR:$src2, (i32 imm:$SIMM))))))]>;
532 // Shift by immediate and insert,
533 // both double- and quad-register.
534 class N2VDShIns<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
535 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
536 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
537 (outs DPR:$dst), (ins DPR:$src1, DPR:$src2, i32imm:$SIMM),
538 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
539 [(set DPR:$dst, (Ty (ShOp DPR:$src1, DPR:$src2, (i32 imm:$SIMM))))]>;
540 class N2VQShIns<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
541 bit op4, string OpcodeStr, ValueType Ty, SDNode ShOp>
542 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
543 (outs QPR:$dst), (ins QPR:$src1, QPR:$src2, i32imm:$SIMM),
544 !strconcat(OpcodeStr, "\t$dst, $src2, $SIMM"), "$src1 = $dst",
545 [(set QPR:$dst, (Ty (ShOp QPR:$src1, QPR:$src2, (i32 imm:$SIMM))))]>;
547 // Convert, with fractional bits immediate,
548 // both double- and quad-register.
549 class N2VCvtD<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
550 bit op4, string OpcodeStr, ValueType ResTy, ValueType OpTy,
552 : N2VImm<op24, op23, op21_16, op11_8, op7, 0, op4,
553 (outs DPR:$dst), (ins DPR:$src, i32imm:$SIMM),
554 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
555 [(set DPR:$dst, (ResTy (IntOp (OpTy DPR:$src), (i32 imm:$SIMM))))]>;
556 class N2VCvtQ<bit op24, bit op23, bits<6> op21_16, bits<4> op11_8, bit op7,
557 bit op4, string OpcodeStr, ValueType ResTy, ValueType OpTy,
559 : N2VImm<op24, op23, op21_16, op11_8, op7, 1, op4,
560 (outs QPR:$dst), (ins QPR:$src, i32imm:$SIMM),
561 !strconcat(OpcodeStr, "\t$dst, $src, $SIMM"), "",
562 [(set QPR:$dst, (ResTy (IntOp (OpTy QPR:$src), (i32 imm:$SIMM))))]>;
564 //===----------------------------------------------------------------------===//
566 //===----------------------------------------------------------------------===//
568 // Neon 3-register vector operations.
570 // First with only element sizes of 8, 16 and 32 bits:
571 multiclass N3V_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
572 string OpcodeStr, SDNode OpNode, bit Commutable = 0> {
573 // 64-bit vector types.
574 def v8i8 : N3VD<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
575 v8i8, v8i8, OpNode, Commutable>;
576 def v4i16 : N3VD<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr, "16"),
577 v4i16, v4i16, OpNode, Commutable>;
578 def v2i32 : N3VD<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr, "32"),
579 v2i32, v2i32, OpNode, Commutable>;
581 // 128-bit vector types.
582 def v16i8 : N3VQ<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
583 v16i8, v16i8, OpNode, Commutable>;
584 def v8i16 : N3VQ<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr, "16"),
585 v8i16, v8i16, OpNode, Commutable>;
586 def v4i32 : N3VQ<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr, "32"),
587 v4i32, v4i32, OpNode, Commutable>;
590 // ....then also with element size 64 bits:
591 multiclass N3V_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
592 string OpcodeStr, SDNode OpNode, bit Commutable = 0>
593 : N3V_QHS<op24, op23, op11_8, op4, OpcodeStr, OpNode, Commutable> {
594 def v1i64 : N3VD<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr, "64"),
595 v1i64, v1i64, OpNode, Commutable>;
596 def v2i64 : N3VQ<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr, "64"),
597 v2i64, v2i64, OpNode, Commutable>;
601 // Neon Narrowing 2-register vector intrinsics,
602 // source operand element sizes of 16, 32 and 64 bits:
603 multiclass N2VNInt_HSD<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
604 bits<5> op11_7, bit op6, bit op4, string OpcodeStr,
606 def v8i8 : N2VNInt<op24_23, op21_20, 0b00, op17_16, op11_7, op6, op4,
607 !strconcat(OpcodeStr, "16"), v8i8, v8i16, IntOp>;
608 def v4i16 : N2VNInt<op24_23, op21_20, 0b01, op17_16, op11_7, op6, op4,
609 !strconcat(OpcodeStr, "32"), v4i16, v4i32, IntOp>;
610 def v2i32 : N2VNInt<op24_23, op21_20, 0b10, op17_16, op11_7, op6, op4,
611 !strconcat(OpcodeStr, "64"), v2i32, v2i64, IntOp>;
615 // Neon Lengthening 2-register vector intrinsic (currently specific to VMOVL).
616 // source operand element sizes of 16, 32 and 64 bits:
617 multiclass N2VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op7, bit op6,
618 bit op4, string OpcodeStr, Intrinsic IntOp> {
619 def v8i16 : N2VLInt<op24, op23, 0b001000, op11_8, op7, op6, op4,
620 !strconcat(OpcodeStr, "8"), v8i16, v8i8, IntOp>;
621 def v4i32 : N2VLInt<op24, op23, 0b010000, op11_8, op7, op6, op4,
622 !strconcat(OpcodeStr, "16"), v4i32, v4i16, IntOp>;
623 def v2i64 : N2VLInt<op24, op23, 0b100000, op11_8, op7, op6, op4,
624 !strconcat(OpcodeStr, "32"), v2i64, v2i32, IntOp>;
628 // Neon 3-register vector intrinsics.
630 // First with only element sizes of 16 and 32 bits:
631 multiclass N3VInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
632 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
633 // 64-bit vector types.
634 def v4i16 : N3VDInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
635 v4i16, v4i16, IntOp, Commutable>;
636 def v2i32 : N3VDInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
637 v2i32, v2i32, IntOp, Commutable>;
639 // 128-bit vector types.
640 def v8i16 : N3VQInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
641 v8i16, v8i16, IntOp, Commutable>;
642 def v4i32 : N3VQInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
643 v4i32, v4i32, IntOp, Commutable>;
646 // ....then also with element size of 8 bits:
647 multiclass N3VInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
648 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0>
649 : N3VInt_HS<op24, op23, op11_8, op4, OpcodeStr, IntOp, Commutable> {
650 def v8i8 : N3VDInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
651 v8i8, v8i8, IntOp, Commutable>;
652 def v16i8 : N3VQInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
653 v16i8, v16i8, IntOp, Commutable>;
656 // ....then also with element size of 64 bits:
657 multiclass N3VInt_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
658 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0>
659 : N3VInt_QHS<op24, op23, op11_8, op4, OpcodeStr, IntOp, Commutable> {
660 def v1i64 : N3VDInt<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr,"64"),
661 v1i64, v1i64, IntOp, Commutable>;
662 def v2i64 : N3VQInt<op24, op23, 0b11, op11_8, op4, !strconcat(OpcodeStr,"64"),
663 v2i64, v2i64, IntOp, Commutable>;
667 // Neon Narrowing 3-register vector intrinsics,
668 // source operand element sizes of 16, 32 and 64 bits:
669 multiclass N3VNInt_HSD<bit op24, bit op23, bits<4> op11_8, bit op4,
670 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
671 def v8i8 : N3VNInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr,"16"),
672 v8i8, v8i16, IntOp, Commutable>;
673 def v4i16 : N3VNInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"32"),
674 v4i16, v4i32, IntOp, Commutable>;
675 def v2i32 : N3VNInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"64"),
676 v2i32, v2i64, IntOp, Commutable>;
680 // Neon Long 3-register vector intrinsics.
682 // First with only element sizes of 16 and 32 bits:
683 multiclass N3VLInt_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
684 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
685 def v4i32 : N3VLInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
686 v4i32, v4i16, IntOp, Commutable>;
687 def v2i64 : N3VLInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
688 v2i64, v2i32, IntOp, Commutable>;
691 // ....then also with element size of 8 bits:
692 multiclass N3VLInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
693 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0>
694 : N3VLInt_HS<op24, op23, op11_8, op4, OpcodeStr, IntOp, Commutable> {
695 def v8i16 : N3VLInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
696 v8i16, v8i8, IntOp, Commutable>;
700 // Neon Wide 3-register vector intrinsics,
701 // source operand element sizes of 8, 16 and 32 bits:
702 multiclass N3VWInt_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
703 string OpcodeStr, Intrinsic IntOp, bit Commutable = 0> {
704 def v8i16 : N3VWInt<op24, op23, 0b00, op11_8, op4, !strconcat(OpcodeStr, "8"),
705 v8i16, v8i8, IntOp, Commutable>;
706 def v4i32 : N3VWInt<op24, op23, 0b01, op11_8, op4, !strconcat(OpcodeStr,"16"),
707 v4i32, v4i16, IntOp, Commutable>;
708 def v2i64 : N3VWInt<op24, op23, 0b10, op11_8, op4, !strconcat(OpcodeStr,"32"),
709 v2i64, v2i32, IntOp, Commutable>;
713 // Neon Multiply-Op vector operations,
714 // element sizes of 8, 16 and 32 bits:
715 multiclass N3VMulOp_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
716 string OpcodeStr, SDNode OpNode> {
717 // 64-bit vector types.
718 def v8i8 : N3VDMulOp<op24, op23, 0b00, op11_8, op4,
719 !strconcat(OpcodeStr, "8"), v8i8, mul, OpNode>;
720 def v4i16 : N3VDMulOp<op24, op23, 0b01, op11_8, op4,
721 !strconcat(OpcodeStr, "16"), v4i16, mul, OpNode>;
722 def v2i32 : N3VDMulOp<op24, op23, 0b10, op11_8, op4,
723 !strconcat(OpcodeStr, "32"), v2i32, mul, OpNode>;
725 // 128-bit vector types.
726 def v16i8 : N3VQMulOp<op24, op23, 0b00, op11_8, op4,
727 !strconcat(OpcodeStr, "8"), v16i8, mul, OpNode>;
728 def v8i16 : N3VQMulOp<op24, op23, 0b01, op11_8, op4,
729 !strconcat(OpcodeStr, "16"), v8i16, mul, OpNode>;
730 def v4i32 : N3VQMulOp<op24, op23, 0b10, op11_8, op4,
731 !strconcat(OpcodeStr, "32"), v4i32, mul, OpNode>;
735 // Neon 3-argument intrinsics,
736 // element sizes of 8, 16 and 32 bits:
737 multiclass N3VInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
738 string OpcodeStr, Intrinsic IntOp> {
739 // 64-bit vector types.
740 def v8i8 : N3VDInt3<op24, op23, 0b00, op11_8, op4,
741 !strconcat(OpcodeStr, "8"), v8i8, v8i8, IntOp>;
742 def v4i16 : N3VDInt3<op24, op23, 0b01, op11_8, op4,
743 !strconcat(OpcodeStr, "16"), v4i16, v4i16, IntOp>;
744 def v2i32 : N3VDInt3<op24, op23, 0b10, op11_8, op4,
745 !strconcat(OpcodeStr, "32"), v2i32, v2i32, IntOp>;
747 // 128-bit vector types.
748 def v16i8 : N3VQInt3<op24, op23, 0b00, op11_8, op4,
749 !strconcat(OpcodeStr, "8"), v16i8, v16i8, IntOp>;
750 def v8i16 : N3VQInt3<op24, op23, 0b01, op11_8, op4,
751 !strconcat(OpcodeStr, "16"), v8i16, v8i16, IntOp>;
752 def v4i32 : N3VQInt3<op24, op23, 0b10, op11_8, op4,
753 !strconcat(OpcodeStr, "32"), v4i32, v4i32, IntOp>;
757 // Neon Long 3-argument intrinsics.
759 // First with only element sizes of 16 and 32 bits:
760 multiclass N3VLInt3_HS<bit op24, bit op23, bits<4> op11_8, bit op4,
761 string OpcodeStr, Intrinsic IntOp> {
762 def v4i32 : N3VLInt3<op24, op23, 0b01, op11_8, op4,
763 !strconcat(OpcodeStr, "16"), v4i32, v4i16, IntOp>;
764 def v2i64 : N3VLInt3<op24, op23, 0b10, op11_8, op4,
765 !strconcat(OpcodeStr, "32"), v2i64, v2i32, IntOp>;
768 // ....then also with element size of 8 bits:
769 multiclass N3VLInt3_QHS<bit op24, bit op23, bits<4> op11_8, bit op4,
770 string OpcodeStr, Intrinsic IntOp>
771 : N3VLInt3_HS<op24, op23, op11_8, op4, OpcodeStr, IntOp> {
772 def v8i16 : N3VLInt3<op24, op23, 0b01, op11_8, op4,
773 !strconcat(OpcodeStr, "8"), v8i16, v8i8, IntOp>;
777 // Neon 2-register vector intrinsics,
778 // element sizes of 8, 16 and 32 bits:
779 multiclass N2VInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
780 bits<5> op11_7, bit op4, string OpcodeStr,
782 // 64-bit vector types.
783 def v8i8 : N2VDInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
784 !strconcat(OpcodeStr, "8"), v8i8, v8i8, IntOp>;
785 def v4i16 : N2VDInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
786 !strconcat(OpcodeStr, "16"), v4i16, v4i16, IntOp>;
787 def v2i32 : N2VDInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
788 !strconcat(OpcodeStr, "32"), v2i32, v2i32, IntOp>;
790 // 128-bit vector types.
791 def v16i8 : N2VQInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
792 !strconcat(OpcodeStr, "8"), v16i8, v16i8, IntOp>;
793 def v8i16 : N2VQInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
794 !strconcat(OpcodeStr, "16"), v8i16, v8i16, IntOp>;
795 def v4i32 : N2VQInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
796 !strconcat(OpcodeStr, "32"), v4i32, v4i32, IntOp>;
800 // Neon Pairwise long 2-register intrinsics,
801 // element sizes of 8, 16 and 32 bits:
802 multiclass N2VPLInt_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
803 bits<5> op11_7, bit op4,
804 string OpcodeStr, Intrinsic IntOp> {
805 // 64-bit vector types.
806 def v8i8 : N2VDPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
807 !strconcat(OpcodeStr, "8"), v4i16, v8i8, IntOp>;
808 def v4i16 : N2VDPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
809 !strconcat(OpcodeStr, "16"), v2i32, v4i16, IntOp>;
810 def v2i32 : N2VDPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
811 !strconcat(OpcodeStr, "32"), v1i64, v2i32, IntOp>;
813 // 128-bit vector types.
814 def v16i8 : N2VQPLInt<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
815 !strconcat(OpcodeStr, "8"), v8i16, v16i8, IntOp>;
816 def v8i16 : N2VQPLInt<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
817 !strconcat(OpcodeStr, "16"), v4i32, v8i16, IntOp>;
818 def v4i32 : N2VQPLInt<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
819 !strconcat(OpcodeStr, "32"), v2i64, v4i32, IntOp>;
823 // Neon Pairwise long 2-register accumulate intrinsics,
824 // element sizes of 8, 16 and 32 bits:
825 multiclass N2VPLInt2_QHS<bits<2> op24_23, bits<2> op21_20, bits<2> op17_16,
826 bits<5> op11_7, bit op4,
827 string OpcodeStr, Intrinsic IntOp> {
828 // 64-bit vector types.
829 def v8i8 : N2VDPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
830 !strconcat(OpcodeStr, "8"), v4i16, v8i8, IntOp>;
831 def v4i16 : N2VDPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
832 !strconcat(OpcodeStr, "16"), v2i32, v4i16, IntOp>;
833 def v2i32 : N2VDPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
834 !strconcat(OpcodeStr, "32"), v1i64, v2i32, IntOp>;
836 // 128-bit vector types.
837 def v16i8 : N2VQPLInt2<op24_23, op21_20, 0b00, op17_16, op11_7, op4,
838 !strconcat(OpcodeStr, "8"), v8i16, v16i8, IntOp>;
839 def v8i16 : N2VQPLInt2<op24_23, op21_20, 0b01, op17_16, op11_7, op4,
840 !strconcat(OpcodeStr, "16"), v4i32, v8i16, IntOp>;
841 def v4i32 : N2VQPLInt2<op24_23, op21_20, 0b10, op17_16, op11_7, op4,
842 !strconcat(OpcodeStr, "32"), v2i64, v4i32, IntOp>;
846 // Neon 2-register vector shift by immediate,
847 // element sizes of 8, 16, 32 and 64 bits:
848 multiclass N2VSh_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
849 string OpcodeStr, SDNode OpNode> {
850 // 64-bit vector types.
851 def v8i8 : N2VDSh<op24, op23, 0b001000, op11_8, 0, op4,
852 !strconcat(OpcodeStr, "8"), v8i8, OpNode>;
853 def v4i16 : N2VDSh<op24, op23, 0b010000, op11_8, 0, op4,
854 !strconcat(OpcodeStr, "16"), v4i16, OpNode>;
855 def v2i32 : N2VDSh<op24, op23, 0b100000, op11_8, 0, op4,
856 !strconcat(OpcodeStr, "32"), v2i32, OpNode>;
857 def v1i64 : N2VDSh<op24, op23, 0b000000, op11_8, 1, op4,
858 !strconcat(OpcodeStr, "64"), v1i64, OpNode>;
860 // 128-bit vector types.
861 def v16i8 : N2VQSh<op24, op23, 0b001000, op11_8, 0, op4,
862 !strconcat(OpcodeStr, "8"), v16i8, OpNode>;
863 def v8i16 : N2VQSh<op24, op23, 0b010000, op11_8, 0, op4,
864 !strconcat(OpcodeStr, "16"), v8i16, OpNode>;
865 def v4i32 : N2VQSh<op24, op23, 0b100000, op11_8, 0, op4,
866 !strconcat(OpcodeStr, "32"), v4i32, OpNode>;
867 def v2i64 : N2VQSh<op24, op23, 0b000000, op11_8, 1, op4,
868 !strconcat(OpcodeStr, "64"), v2i64, OpNode>;
872 // Neon Shift-Accumulate vector operations,
873 // element sizes of 8, 16, 32 and 64 bits:
874 multiclass N2VShAdd_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
875 string OpcodeStr, SDNode ShOp> {
876 // 64-bit vector types.
877 def v8i8 : N2VDShAdd<op24, op23, 0b001000, op11_8, 0, op4,
878 !strconcat(OpcodeStr, "8"), v8i8, ShOp>;
879 def v4i16 : N2VDShAdd<op24, op23, 0b010000, op11_8, 0, op4,
880 !strconcat(OpcodeStr, "16"), v4i16, ShOp>;
881 def v2i32 : N2VDShAdd<op24, op23, 0b100000, op11_8, 0, op4,
882 !strconcat(OpcodeStr, "32"), v2i32, ShOp>;
883 def v1i64 : N2VDShAdd<op24, op23, 0b000000, op11_8, 1, op4,
884 !strconcat(OpcodeStr, "64"), v1i64, ShOp>;
886 // 128-bit vector types.
887 def v16i8 : N2VQShAdd<op24, op23, 0b001000, op11_8, 0, op4,
888 !strconcat(OpcodeStr, "8"), v16i8, ShOp>;
889 def v8i16 : N2VQShAdd<op24, op23, 0b010000, op11_8, 0, op4,
890 !strconcat(OpcodeStr, "16"), v8i16, ShOp>;
891 def v4i32 : N2VQShAdd<op24, op23, 0b100000, op11_8, 0, op4,
892 !strconcat(OpcodeStr, "32"), v4i32, ShOp>;
893 def v2i64 : N2VQShAdd<op24, op23, 0b000000, op11_8, 1, op4,
894 !strconcat(OpcodeStr, "64"), v2i64, ShOp>;
898 // Neon Shift-Insert vector operations,
899 // element sizes of 8, 16, 32 and 64 bits:
900 multiclass N2VShIns_QHSD<bit op24, bit op23, bits<4> op11_8, bit op4,
901 string OpcodeStr, SDNode ShOp> {
902 // 64-bit vector types.
903 def v8i8 : N2VDShIns<op24, op23, 0b001000, op11_8, 0, op4,
904 !strconcat(OpcodeStr, "8"), v8i8, ShOp>;
905 def v4i16 : N2VDShIns<op24, op23, 0b010000, op11_8, 0, op4,
906 !strconcat(OpcodeStr, "16"), v4i16, ShOp>;
907 def v2i32 : N2VDShIns<op24, op23, 0b100000, op11_8, 0, op4,
908 !strconcat(OpcodeStr, "32"), v2i32, ShOp>;
909 def v1i64 : N2VDShIns<op24, op23, 0b000000, op11_8, 1, op4,
910 !strconcat(OpcodeStr, "64"), v1i64, ShOp>;
912 // 128-bit vector types.
913 def v16i8 : N2VQShIns<op24, op23, 0b001000, op11_8, 0, op4,
914 !strconcat(OpcodeStr, "8"), v16i8, ShOp>;
915 def v8i16 : N2VQShIns<op24, op23, 0b010000, op11_8, 0, op4,
916 !strconcat(OpcodeStr, "16"), v8i16, ShOp>;
917 def v4i32 : N2VQShIns<op24, op23, 0b100000, op11_8, 0, op4,
918 !strconcat(OpcodeStr, "32"), v4i32, ShOp>;
919 def v2i64 : N2VQShIns<op24, op23, 0b000000, op11_8, 1, op4,
920 !strconcat(OpcodeStr, "64"), v2i64, ShOp>;
923 //===----------------------------------------------------------------------===//
924 // Instruction Definitions.
925 //===----------------------------------------------------------------------===//
927 // Vector Add Operations.
929 // VADD : Vector Add (integer and floating-point)
930 defm VADD : N3V_QHSD<0, 0, 0b1000, 0, "vadd.i", add, 1>;
931 def VADDfd : N3VD<0, 0, 0b00, 0b1101, 0, "vadd.f32", v2f32, v2f32, fadd, 1>;
932 def VADDfq : N3VQ<0, 0, 0b00, 0b1101, 0, "vadd.f32", v4f32, v4f32, fadd, 1>;
933 // VADDL : Vector Add Long (Q = D + D)
934 defm VADDLs : N3VLInt_QHS<0,1,0b0000,0, "vaddl.s", int_arm_neon_vaddls, 1>;
935 defm VADDLu : N3VLInt_QHS<1,1,0b0000,0, "vaddl.u", int_arm_neon_vaddlu, 1>;
936 // VADDW : Vector Add Wide (Q = Q + D)
937 defm VADDWs : N3VWInt_QHS<0,1,0b0001,0, "vaddw.s", int_arm_neon_vaddws, 0>;
938 defm VADDWu : N3VWInt_QHS<1,1,0b0001,0, "vaddw.u", int_arm_neon_vaddwu, 0>;
939 // VHADD : Vector Halving Add
940 defm VHADDs : N3VInt_QHS<0,0,0b0000,0, "vhadd.s", int_arm_neon_vhadds, 1>;
941 defm VHADDu : N3VInt_QHS<1,0,0b0000,0, "vhadd.u", int_arm_neon_vhaddu, 1>;
942 // VRHADD : Vector Rounding Halving Add
943 defm VRHADDs : N3VInt_QHS<0,0,0b0001,0, "vrhadd.s", int_arm_neon_vrhadds, 1>;
944 defm VRHADDu : N3VInt_QHS<1,0,0b0001,0, "vrhadd.u", int_arm_neon_vrhaddu, 1>;
945 // VQADD : Vector Saturating Add
946 defm VQADDs : N3VInt_QHSD<0,0,0b0000,1, "vqadd.s", int_arm_neon_vqadds, 1>;
947 defm VQADDu : N3VInt_QHSD<1,0,0b0000,1, "vqadd.u", int_arm_neon_vqaddu, 1>;
948 // VADDHN : Vector Add and Narrow Returning High Half (D = Q + Q)
949 defm VADDHN : N3VNInt_HSD<0,1,0b0100,0, "vaddhn.i", int_arm_neon_vaddhn, 1>;
950 // VRADDHN : Vector Rounding Add and Narrow Returning High Half (D = Q + Q)
951 defm VRADDHN : N3VNInt_HSD<1,1,0b0100,0, "vraddhn.i", int_arm_neon_vraddhn, 1>;
953 // Vector Add Operations used for single-precision FP
954 def : N3VDs<fadd, VADDfd>;
956 // Vector Multiply Operations.
958 // VMUL : Vector Multiply (integer, polynomial and floating-point)
959 defm VMUL : N3V_QHS<0, 0, 0b1001, 1, "vmul.i", mul, 1>;
960 def VMULpd : N3VDInt<1, 0, 0b00, 0b1001, 1, "vmul.p8", v8i8, v8i8,
961 int_arm_neon_vmulp, 1>;
962 def VMULpq : N3VQInt<1, 0, 0b00, 0b1001, 1, "vmul.p8", v16i8, v16i8,
963 int_arm_neon_vmulp, 1>;
964 def VMULfd : N3VD<1, 0, 0b00, 0b1101, 1, "vmul.f32", v2f32, v2f32, fmul, 1>;
965 def VMULfq : N3VQ<1, 0, 0b00, 0b1101, 1, "vmul.f32", v4f32, v4f32, fmul, 1>;
966 // VQDMULH : Vector Saturating Doubling Multiply Returning High Half
967 defm VQDMULH : N3VInt_HS<0,0,0b1011,0, "vqdmulh.s", int_arm_neon_vqdmulh, 1>;
968 // VQRDMULH : Vector Rounding Saturating Doubling Multiply Returning High Half
969 defm VQRDMULH : N3VInt_HS<1,0,0b1011,0, "vqrdmulh.s", int_arm_neon_vqrdmulh, 1>;
970 // VMULL : Vector Multiply Long (integer and polynomial) (Q = D * D)
971 defm VMULLs : N3VLInt_QHS<0,1,0b1100,0, "vmull.s", int_arm_neon_vmulls, 1>;
972 defm VMULLu : N3VLInt_QHS<1,1,0b1100,0, "vmull.u", int_arm_neon_vmullu, 1>;
973 def VMULLp : N3VLInt<0, 1, 0b00, 0b1110, 0, "vmull.p8", v8i16, v8i8,
974 int_arm_neon_vmullp, 1>;
975 // VQDMULL : Vector Saturating Doubling Multiply Long (Q = D * D)
976 defm VQDMULL : N3VLInt_HS<0,1,0b1101,0, "vqdmull.s", int_arm_neon_vqdmull, 1>;
978 // Vector Multiply Operations used for single-precision FP
979 def : N3VDs<fmul, VMULfd>;
981 // Vector Multiply-Accumulate and Multiply-Subtract Operations.
983 // VMLA : Vector Multiply Accumulate (integer and floating-point)
984 defm VMLA : N3VMulOp_QHS<0, 0, 0b1001, 0, "vmla.i", add>;
985 def VMLAfd : N3VDMulOp<0, 0, 0b00, 0b1101, 1, "vmla.f32", v2f32, fmul, fadd>;
986 def VMLAfq : N3VQMulOp<0, 0, 0b00, 0b1101, 1, "vmla.f32", v4f32, fmul, fadd>;
987 // VMLAL : Vector Multiply Accumulate Long (Q += D * D)
988 defm VMLALs : N3VLInt3_QHS<0,1,0b1000,0, "vmlal.s", int_arm_neon_vmlals>;
989 defm VMLALu : N3VLInt3_QHS<1,1,0b1000,0, "vmlal.u", int_arm_neon_vmlalu>;
990 // VQDMLAL : Vector Saturating Doubling Multiply Accumulate Long (Q += D * D)
991 defm VQDMLAL : N3VLInt3_HS<0, 1, 0b1001, 0, "vqdmlal.s", int_arm_neon_vqdmlal>;
992 // VMLS : Vector Multiply Subtract (integer and floating-point)
993 defm VMLS : N3VMulOp_QHS<0, 0, 0b1001, 0, "vmls.i", sub>;
994 def VMLSfd : N3VDMulOp<0, 0, 0b10, 0b1101, 1, "vmls.f32", v2f32, fmul, fsub>;
995 def VMLSfq : N3VQMulOp<0, 0, 0b10, 0b1101, 1, "vmls.f32", v4f32, fmul, fsub>;
996 // VMLSL : Vector Multiply Subtract Long (Q -= D * D)
997 defm VMLSLs : N3VLInt3_QHS<0,1,0b1010,0, "vmlsl.s", int_arm_neon_vmlsls>;
998 defm VMLSLu : N3VLInt3_QHS<1,1,0b1010,0, "vmlsl.u", int_arm_neon_vmlslu>;
999 // VQDMLSL : Vector Saturating Doubling Multiply Subtract Long (Q -= D * D)
1000 defm VQDMLSL : N3VLInt3_HS<0, 1, 0b1011, 0, "vqdmlsl.s", int_arm_neon_vqdmlsl>;
1002 // Vector Multiply-Accumulate/Subtract used for single-precision FP
1003 def : N3VDMulOps<fmul, fadd, VMLAfd>;
1004 def : N3VDMulOps<fmul, fsub, VMLSfd>;
1006 // Vector Subtract Operations.
1008 // VSUB : Vector Subtract (integer and floating-point)
1009 defm VSUB : N3V_QHSD<1, 0, 0b1000, 0, "vsub.i", sub, 0>;
1010 def VSUBfd : N3VD<0, 0, 0b10, 0b1101, 0, "vsub.f32", v2f32, v2f32, fsub, 0>;
1011 def VSUBfq : N3VQ<0, 0, 0b10, 0b1101, 0, "vsub.f32", v4f32, v4f32, fsub, 0>;
1012 // VSUBL : Vector Subtract Long (Q = D - D)
1013 defm VSUBLs : N3VLInt_QHS<0,1,0b0010,0, "vsubl.s", int_arm_neon_vsubls, 1>;
1014 defm VSUBLu : N3VLInt_QHS<1,1,0b0010,0, "vsubl.u", int_arm_neon_vsublu, 1>;
1015 // VSUBW : Vector Subtract Wide (Q = Q - D)
1016 defm VSUBWs : N3VWInt_QHS<0,1,0b0011,0, "vsubw.s", int_arm_neon_vsubws, 0>;
1017 defm VSUBWu : N3VWInt_QHS<1,1,0b0011,0, "vsubw.u", int_arm_neon_vsubwu, 0>;
1018 // VHSUB : Vector Halving Subtract
1019 defm VHSUBs : N3VInt_QHS<0, 0, 0b0010, 0, "vhsub.s", int_arm_neon_vhsubs, 0>;
1020 defm VHSUBu : N3VInt_QHS<1, 0, 0b0010, 0, "vhsub.u", int_arm_neon_vhsubu, 0>;
1021 // VQSUB : Vector Saturing Subtract
1022 defm VQSUBs : N3VInt_QHSD<0, 0, 0b0010, 1, "vqsub.s", int_arm_neon_vqsubs, 0>;
1023 defm VQSUBu : N3VInt_QHSD<1, 0, 0b0010, 1, "vqsub.u", int_arm_neon_vqsubu, 0>;
1024 // VSUBHN : Vector Subtract and Narrow Returning High Half (D = Q - Q)
1025 defm VSUBHN : N3VNInt_HSD<0,1,0b0110,0, "vsubhn.i", int_arm_neon_vsubhn, 0>;
1026 // VRSUBHN : Vector Rounding Subtract and Narrow Returning High Half (D=Q-Q)
1027 defm VRSUBHN : N3VNInt_HSD<1,1,0b0110,0, "vrsubhn.i", int_arm_neon_vrsubhn, 0>;
1029 // Vector Sub Operations used for single-precision FP
1030 def : N3VDs<fsub, VSUBfd>;
1032 // Vector Comparisons.
1034 // VCEQ : Vector Compare Equal
1035 defm VCEQ : N3V_QHS<1, 0, 0b1000, 1, "vceq.i", NEONvceq, 1>;
1036 def VCEQfd : N3VD<0,0,0b00,0b1110,0, "vceq.f32", v2i32, v2f32, NEONvceq, 1>;
1037 def VCEQfq : N3VQ<0,0,0b00,0b1110,0, "vceq.f32", v4i32, v4f32, NEONvceq, 1>;
1038 // VCGE : Vector Compare Greater Than or Equal
1039 defm VCGEs : N3V_QHS<0, 0, 0b0011, 1, "vcge.s", NEONvcge, 0>;
1040 defm VCGEu : N3V_QHS<1, 0, 0b0011, 1, "vcge.u", NEONvcgeu, 0>;
1041 def VCGEfd : N3VD<1,0,0b00,0b1110,0, "vcge.f32", v2i32, v2f32, NEONvcge, 0>;
1042 def VCGEfq : N3VQ<1,0,0b00,0b1110,0, "vcge.f32", v4i32, v4f32, NEONvcge, 0>;
1043 // VCGT : Vector Compare Greater Than
1044 defm VCGTs : N3V_QHS<0, 0, 0b0011, 0, "vcgt.s", NEONvcgt, 0>;
1045 defm VCGTu : N3V_QHS<1, 0, 0b0011, 0, "vcgt.u", NEONvcgtu, 0>;
1046 def VCGTfd : N3VD<1,0,0b10,0b1110,0, "vcgt.f32", v2i32, v2f32, NEONvcgt, 0>;
1047 def VCGTfq : N3VQ<1,0,0b10,0b1110,0, "vcgt.f32", v4i32, v4f32, NEONvcgt, 0>;
1048 // VACGE : Vector Absolute Compare Greater Than or Equal (aka VCAGE)
1049 def VACGEd : N3VDInt<1, 0, 0b00, 0b1110, 1, "vacge.f32", v2i32, v2f32,
1050 int_arm_neon_vacged, 0>;
1051 def VACGEq : N3VQInt<1, 0, 0b00, 0b1110, 1, "vacge.f32", v4i32, v4f32,
1052 int_arm_neon_vacgeq, 0>;
1053 // VACGT : Vector Absolute Compare Greater Than (aka VCAGT)
1054 def VACGTd : N3VDInt<1, 0, 0b10, 0b1110, 1, "vacgt.f32", v2i32, v2f32,
1055 int_arm_neon_vacgtd, 0>;
1056 def VACGTq : N3VQInt<1, 0, 0b10, 0b1110, 1, "vacgt.f32", v4i32, v4f32,
1057 int_arm_neon_vacgtq, 0>;
1058 // VTST : Vector Test Bits
1059 defm VTST : N3V_QHS<0, 0, 0b1000, 1, "vtst.i", NEONvtst, 1>;
1061 // Vector Bitwise Operations.
1063 // VAND : Vector Bitwise AND
1064 def VANDd : N3VD<0, 0, 0b00, 0b0001, 1, "vand", v2i32, v2i32, and, 1>;
1065 def VANDq : N3VQ<0, 0, 0b00, 0b0001, 1, "vand", v4i32, v4i32, and, 1>;
1067 // VEOR : Vector Bitwise Exclusive OR
1068 def VEORd : N3VD<1, 0, 0b00, 0b0001, 1, "veor", v2i32, v2i32, xor, 1>;
1069 def VEORq : N3VQ<1, 0, 0b00, 0b0001, 1, "veor", v4i32, v4i32, xor, 1>;
1071 // VORR : Vector Bitwise OR
1072 def VORRd : N3VD<0, 0, 0b10, 0b0001, 1, "vorr", v2i32, v2i32, or, 1>;
1073 def VORRq : N3VQ<0, 0, 0b10, 0b0001, 1, "vorr", v4i32, v4i32, or, 1>;
1075 // VBIC : Vector Bitwise Bit Clear (AND NOT)
1076 def VBICd : N3V<0, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
1077 (ins DPR:$src1, DPR:$src2), "vbic\t$dst, $src1, $src2", "",
1078 [(set DPR:$dst, (v2i32 (and DPR:$src1,(vnot DPR:$src2))))]>;
1079 def VBICq : N3V<0, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
1080 (ins QPR:$src1, QPR:$src2), "vbic\t$dst, $src1, $src2", "",
1081 [(set QPR:$dst, (v4i32 (and QPR:$src1,(vnot QPR:$src2))))]>;
1083 // VORN : Vector Bitwise OR NOT
1084 def VORNd : N3V<0, 0, 0b11, 0b0001, 0, 1, (outs DPR:$dst),
1085 (ins DPR:$src1, DPR:$src2), "vorn\t$dst, $src1, $src2", "",
1086 [(set DPR:$dst, (v2i32 (or DPR:$src1, (vnot DPR:$src2))))]>;
1087 def VORNq : N3V<0, 0, 0b11, 0b0001, 1, 1, (outs QPR:$dst),
1088 (ins QPR:$src1, QPR:$src2), "vorn\t$dst, $src1, $src2", "",
1089 [(set QPR:$dst, (v4i32 (or QPR:$src1, (vnot QPR:$src2))))]>;
1091 // VMVN : Vector Bitwise NOT
1092 def VMVNd : N2V<0b11, 0b11, 0b00, 0b00, 0b01011, 0, 0,
1093 (outs DPR:$dst), (ins DPR:$src), "vmvn\t$dst, $src", "",
1094 [(set DPR:$dst, (v2i32 (vnot DPR:$src)))]>;
1095 def VMVNq : N2V<0b11, 0b11, 0b00, 0b00, 0b01011, 1, 0,
1096 (outs QPR:$dst), (ins QPR:$src), "vmvn\t$dst, $src", "",
1097 [(set QPR:$dst, (v4i32 (vnot QPR:$src)))]>;
1098 def : Pat<(v2i32 (vnot_conv DPR:$src)), (VMVNd DPR:$src)>;
1099 def : Pat<(v4i32 (vnot_conv QPR:$src)), (VMVNq QPR:$src)>;
1101 // VBSL : Vector Bitwise Select
1102 def VBSLd : N3V<1, 0, 0b01, 0b0001, 0, 1, (outs DPR:$dst),
1103 (ins DPR:$src1, DPR:$src2, DPR:$src3),
1104 "vbsl\t$dst, $src2, $src3", "$src1 = $dst",
1106 (v2i32 (or (and DPR:$src2, DPR:$src1),
1107 (and DPR:$src3, (vnot DPR:$src1)))))]>;
1108 def VBSLq : N3V<1, 0, 0b01, 0b0001, 1, 1, (outs QPR:$dst),
1109 (ins QPR:$src1, QPR:$src2, QPR:$src3),
1110 "vbsl\t$dst, $src2, $src3", "$src1 = $dst",
1112 (v4i32 (or (and QPR:$src2, QPR:$src1),
1113 (and QPR:$src3, (vnot QPR:$src1)))))]>;
1115 // VBIF : Vector Bitwise Insert if False
1116 // like VBSL but with: "vbif\t$dst, $src3, $src1", "$src2 = $dst",
1117 // VBIT : Vector Bitwise Insert if True
1118 // like VBSL but with: "vbit\t$dst, $src2, $src1", "$src3 = $dst",
1119 // These are not yet implemented. The TwoAddress pass will not go looking
1120 // for equivalent operations with different register constraints; it just
1123 // Vector Absolute Differences.
1125 // VABD : Vector Absolute Difference
1126 defm VABDs : N3VInt_QHS<0, 0, 0b0111, 0, "vabd.s", int_arm_neon_vabds, 0>;
1127 defm VABDu : N3VInt_QHS<1, 0, 0b0111, 0, "vabd.u", int_arm_neon_vabdu, 0>;
1128 def VABDfd : N3VDInt<1, 0, 0b10, 0b1101, 0, "vabd.f32", v2f32, v2f32,
1129 int_arm_neon_vabdf, 0>;
1130 def VABDfq : N3VQInt<1, 0, 0b10, 0b1101, 0, "vabd.f32", v4f32, v4f32,
1131 int_arm_neon_vabdf, 0>;
1133 // VABDL : Vector Absolute Difference Long (Q = | D - D |)
1134 defm VABDLs : N3VLInt_QHS<0,1,0b0111,0, "vabdl.s", int_arm_neon_vabdls, 0>;
1135 defm VABDLu : N3VLInt_QHS<1,1,0b0111,0, "vabdl.u", int_arm_neon_vabdlu, 0>;
1137 // VABA : Vector Absolute Difference and Accumulate
1138 defm VABAs : N3VInt3_QHS<0,1,0b0101,0, "vaba.s", int_arm_neon_vabas>;
1139 defm VABAu : N3VInt3_QHS<1,1,0b0101,0, "vaba.u", int_arm_neon_vabau>;
1141 // VABAL : Vector Absolute Difference and Accumulate Long (Q += | D - D |)
1142 defm VABALs : N3VLInt3_QHS<0,1,0b0101,0, "vabal.s", int_arm_neon_vabals>;
1143 defm VABALu : N3VLInt3_QHS<1,1,0b0101,0, "vabal.u", int_arm_neon_vabalu>;
1145 // Vector Maximum and Minimum.
1147 // VMAX : Vector Maximum
1148 defm VMAXs : N3VInt_QHS<0, 0, 0b0110, 0, "vmax.s", int_arm_neon_vmaxs, 1>;
1149 defm VMAXu : N3VInt_QHS<1, 0, 0b0110, 0, "vmax.u", int_arm_neon_vmaxu, 1>;
1150 def VMAXfd : N3VDInt<0, 0, 0b00, 0b1111, 0, "vmax.f32", v2f32, v2f32,
1151 int_arm_neon_vmaxf, 1>;
1152 def VMAXfq : N3VQInt<0, 0, 0b00, 0b1111, 0, "vmax.f32", v4f32, v4f32,
1153 int_arm_neon_vmaxf, 1>;
1155 // VMIN : Vector Minimum
1156 defm VMINs : N3VInt_QHS<0, 0, 0b0110, 1, "vmin.s", int_arm_neon_vmins, 1>;
1157 defm VMINu : N3VInt_QHS<1, 0, 0b0110, 1, "vmin.u", int_arm_neon_vminu, 1>;
1158 def VMINfd : N3VDInt<0, 0, 0b10, 0b1111, 0, "vmin.f32", v2f32, v2f32,
1159 int_arm_neon_vminf, 1>;
1160 def VMINfq : N3VQInt<0, 0, 0b10, 0b1111, 0, "vmin.f32", v4f32, v4f32,
1161 int_arm_neon_vminf, 1>;
1163 // Vector Pairwise Operations.
1165 // VPADD : Vector Pairwise Add
1166 def VPADDi8 : N3VDInt<0, 0, 0b00, 0b1011, 1, "vpadd.i8", v8i8, v8i8,
1167 int_arm_neon_vpaddi, 0>;
1168 def VPADDi16 : N3VDInt<0, 0, 0b01, 0b1011, 1, "vpadd.i16", v4i16, v4i16,
1169 int_arm_neon_vpaddi, 0>;
1170 def VPADDi32 : N3VDInt<0, 0, 0b10, 0b1011, 1, "vpadd.i32", v2i32, v2i32,
1171 int_arm_neon_vpaddi, 0>;
1172 def VPADDf : N3VDInt<1, 0, 0b00, 0b1101, 0, "vpadd.f32", v2f32, v2f32,
1173 int_arm_neon_vpaddf, 0>;
1175 // VPADDL : Vector Pairwise Add Long
1176 defm VPADDLs : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00100, 0, "vpaddl.s",
1177 int_arm_neon_vpaddls>;
1178 defm VPADDLu : N2VPLInt_QHS<0b11, 0b11, 0b00, 0b00101, 0, "vpaddl.u",
1179 int_arm_neon_vpaddlu>;
1181 // VPADAL : Vector Pairwise Add and Accumulate Long
1182 defm VPADALs : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b00100, 0, "vpadal.s",
1183 int_arm_neon_vpadals>;
1184 defm VPADALu : N2VPLInt2_QHS<0b11, 0b11, 0b00, 0b00101, 0, "vpadal.u",
1185 int_arm_neon_vpadalu>;
1187 // VPMAX : Vector Pairwise Maximum
1188 def VPMAXs8 : N3VDInt<0, 0, 0b00, 0b1010, 0, "vpmax.s8", v8i8, v8i8,
1189 int_arm_neon_vpmaxs, 0>;
1190 def VPMAXs16 : N3VDInt<0, 0, 0b01, 0b1010, 0, "vpmax.s16", v4i16, v4i16,
1191 int_arm_neon_vpmaxs, 0>;
1192 def VPMAXs32 : N3VDInt<0, 0, 0b10, 0b1010, 0, "vpmax.s32", v2i32, v2i32,
1193 int_arm_neon_vpmaxs, 0>;
1194 def VPMAXu8 : N3VDInt<1, 0, 0b00, 0b1010, 0, "vpmax.u8", v8i8, v8i8,
1195 int_arm_neon_vpmaxu, 0>;
1196 def VPMAXu16 : N3VDInt<1, 0, 0b01, 0b1010, 0, "vpmax.u16", v4i16, v4i16,
1197 int_arm_neon_vpmaxu, 0>;
1198 def VPMAXu32 : N3VDInt<1, 0, 0b10, 0b1010, 0, "vpmax.u32", v2i32, v2i32,
1199 int_arm_neon_vpmaxu, 0>;
1200 def VPMAXf : N3VDInt<1, 0, 0b00, 0b1111, 0, "vpmax.f32", v2f32, v2f32,
1201 int_arm_neon_vpmaxf, 0>;
1203 // VPMIN : Vector Pairwise Minimum
1204 def VPMINs8 : N3VDInt<0, 0, 0b00, 0b1010, 1, "vpmin.s8", v8i8, v8i8,
1205 int_arm_neon_vpmins, 0>;
1206 def VPMINs16 : N3VDInt<0, 0, 0b01, 0b1010, 1, "vpmin.s16", v4i16, v4i16,
1207 int_arm_neon_vpmins, 0>;
1208 def VPMINs32 : N3VDInt<0, 0, 0b10, 0b1010, 1, "vpmin.s32", v2i32, v2i32,
1209 int_arm_neon_vpmins, 0>;
1210 def VPMINu8 : N3VDInt<1, 0, 0b00, 0b1010, 1, "vpmin.u8", v8i8, v8i8,
1211 int_arm_neon_vpminu, 0>;
1212 def VPMINu16 : N3VDInt<1, 0, 0b01, 0b1010, 1, "vpmin.u16", v4i16, v4i16,
1213 int_arm_neon_vpminu, 0>;
1214 def VPMINu32 : N3VDInt<1, 0, 0b10, 0b1010, 1, "vpmin.u32", v2i32, v2i32,
1215 int_arm_neon_vpminu, 0>;
1216 def VPMINf : N3VDInt<1, 0, 0b10, 0b1111, 0, "vpmin.f32", v2f32, v2f32,
1217 int_arm_neon_vpminf, 0>;
1219 // Vector Reciprocal and Reciprocal Square Root Estimate and Step.
1221 // VRECPE : Vector Reciprocal Estimate
1222 def VRECPEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0, "vrecpe.u32",
1223 v2i32, v2i32, int_arm_neon_vrecpe>;
1224 def VRECPEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01000, 0, "vrecpe.u32",
1225 v4i32, v4i32, int_arm_neon_vrecpe>;
1226 def VRECPEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0, "vrecpe.f32",
1227 v2f32, v2f32, int_arm_neon_vrecpef>;
1228 def VRECPEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01010, 0, "vrecpe.f32",
1229 v4f32, v4f32, int_arm_neon_vrecpef>;
1231 // VRECPS : Vector Reciprocal Step
1232 def VRECPSfd : N3VDInt<0, 0, 0b00, 0b1111, 1, "vrecps.f32", v2f32, v2f32,
1233 int_arm_neon_vrecps, 1>;
1234 def VRECPSfq : N3VQInt<0, 0, 0b00, 0b1111, 1, "vrecps.f32", v4f32, v4f32,
1235 int_arm_neon_vrecps, 1>;
1237 // VRSQRTE : Vector Reciprocal Square Root Estimate
1238 def VRSQRTEd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0, "vrsqrte.u32",
1239 v2i32, v2i32, int_arm_neon_vrsqrte>;
1240 def VRSQRTEq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01001, 0, "vrsqrte.u32",
1241 v4i32, v4i32, int_arm_neon_vrsqrte>;
1242 def VRSQRTEfd : N2VDInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0, "vrsqrte.f32",
1243 v2f32, v2f32, int_arm_neon_vrsqrtef>;
1244 def VRSQRTEfq : N2VQInt<0b11, 0b11, 0b10, 0b11, 0b01011, 0, "vrsqrte.f32",
1245 v4f32, v4f32, int_arm_neon_vrsqrtef>;
1247 // VRSQRTS : Vector Reciprocal Square Root Step
1248 def VRSQRTSfd : N3VDInt<0, 0, 0b10, 0b1111, 1, "vrsqrts.f32", v2f32, v2f32,
1249 int_arm_neon_vrsqrts, 1>;
1250 def VRSQRTSfq : N3VQInt<0, 0, 0b10, 0b1111, 1, "vrsqrts.f32", v4f32, v4f32,
1251 int_arm_neon_vrsqrts, 1>;
1255 // VSHL : Vector Shift
1256 defm VSHLs : N3VInt_QHSD<0, 0, 0b0100, 0, "vshl.s", int_arm_neon_vshifts, 0>;
1257 defm VSHLu : N3VInt_QHSD<1, 0, 0b0100, 0, "vshl.u", int_arm_neon_vshiftu, 0>;
1258 // VSHL : Vector Shift Left (Immediate)
1259 defm VSHLi : N2VSh_QHSD<0, 1, 0b0111, 1, "vshl.i", NEONvshl>;
1260 // VSHR : Vector Shift Right (Immediate)
1261 defm VSHRs : N2VSh_QHSD<0, 1, 0b0000, 1, "vshr.s", NEONvshrs>;
1262 defm VSHRu : N2VSh_QHSD<1, 1, 0b0000, 1, "vshr.u", NEONvshru>;
1264 // VSHLL : Vector Shift Left Long
1265 def VSHLLs8 : N2VLSh<0, 1, 0b001000, 0b1010, 0, 0, 1, "vshll.s8",
1266 v8i16, v8i8, NEONvshlls>;
1267 def VSHLLs16 : N2VLSh<0, 1, 0b010000, 0b1010, 0, 0, 1, "vshll.s16",
1268 v4i32, v4i16, NEONvshlls>;
1269 def VSHLLs32 : N2VLSh<0, 1, 0b100000, 0b1010, 0, 0, 1, "vshll.s32",
1270 v2i64, v2i32, NEONvshlls>;
1271 def VSHLLu8 : N2VLSh<1, 1, 0b001000, 0b1010, 0, 0, 1, "vshll.u8",
1272 v8i16, v8i8, NEONvshllu>;
1273 def VSHLLu16 : N2VLSh<1, 1, 0b010000, 0b1010, 0, 0, 1, "vshll.u16",
1274 v4i32, v4i16, NEONvshllu>;
1275 def VSHLLu32 : N2VLSh<1, 1, 0b100000, 0b1010, 0, 0, 1, "vshll.u32",
1276 v2i64, v2i32, NEONvshllu>;
1278 // VSHLL : Vector Shift Left Long (with maximum shift count)
1279 def VSHLLi8 : N2VLSh<1, 1, 0b110010, 0b0011, 0, 0, 0, "vshll.i8",
1280 v8i16, v8i8, NEONvshlli>;
1281 def VSHLLi16 : N2VLSh<1, 1, 0b110110, 0b0011, 0, 0, 0, "vshll.i16",
1282 v4i32, v4i16, NEONvshlli>;
1283 def VSHLLi32 : N2VLSh<1, 1, 0b111010, 0b0011, 0, 0, 0, "vshll.i32",
1284 v2i64, v2i32, NEONvshlli>;
1286 // VSHRN : Vector Shift Right and Narrow
1287 def VSHRN16 : N2VNSh<0, 1, 0b001000, 0b1000, 0, 0, 1, "vshrn.i16",
1288 v8i8, v8i16, NEONvshrn>;
1289 def VSHRN32 : N2VNSh<0, 1, 0b010000, 0b1000, 0, 0, 1, "vshrn.i32",
1290 v4i16, v4i32, NEONvshrn>;
1291 def VSHRN64 : N2VNSh<0, 1, 0b100000, 0b1000, 0, 0, 1, "vshrn.i64",
1292 v2i32, v2i64, NEONvshrn>;
1294 // VRSHL : Vector Rounding Shift
1295 defm VRSHLs : N3VInt_QHSD<0,0,0b0101,0, "vrshl.s", int_arm_neon_vrshifts, 0>;
1296 defm VRSHLu : N3VInt_QHSD<1,0,0b0101,0, "vrshl.u", int_arm_neon_vrshiftu, 0>;
1297 // VRSHR : Vector Rounding Shift Right
1298 defm VRSHRs : N2VSh_QHSD<0, 1, 0b0010, 1, "vrshr.s", NEONvrshrs>;
1299 defm VRSHRu : N2VSh_QHSD<1, 1, 0b0010, 1, "vrshr.u", NEONvrshru>;
1301 // VRSHRN : Vector Rounding Shift Right and Narrow
1302 def VRSHRN16 : N2VNSh<0, 1, 0b001000, 0b1000, 0, 1, 1, "vrshrn.i16",
1303 v8i8, v8i16, NEONvrshrn>;
1304 def VRSHRN32 : N2VNSh<0, 1, 0b010000, 0b1000, 0, 1, 1, "vrshrn.i32",
1305 v4i16, v4i32, NEONvrshrn>;
1306 def VRSHRN64 : N2VNSh<0, 1, 0b100000, 0b1000, 0, 1, 1, "vrshrn.i64",
1307 v2i32, v2i64, NEONvrshrn>;
1309 // VQSHL : Vector Saturating Shift
1310 defm VQSHLs : N3VInt_QHSD<0,0,0b0100,1, "vqshl.s", int_arm_neon_vqshifts, 0>;
1311 defm VQSHLu : N3VInt_QHSD<1,0,0b0100,1, "vqshl.u", int_arm_neon_vqshiftu, 0>;
1312 // VQSHL : Vector Saturating Shift Left (Immediate)
1313 defm VQSHLsi : N2VSh_QHSD<0, 1, 0b0111, 1, "vqshl.s", NEONvqshls>;
1314 defm VQSHLui : N2VSh_QHSD<1, 1, 0b0111, 1, "vqshl.u", NEONvqshlu>;
1315 // VQSHLU : Vector Saturating Shift Left (Immediate, Unsigned)
1316 defm VQSHLsu : N2VSh_QHSD<1, 1, 0b0110, 1, "vqshlu.s", NEONvqshlsu>;
1318 // VQSHRN : Vector Saturating Shift Right and Narrow
1319 def VQSHRNs16 : N2VNSh<0, 1, 0b001000, 0b1001, 0, 0, 1, "vqshrn.s16",
1320 v8i8, v8i16, NEONvqshrns>;
1321 def VQSHRNs32 : N2VNSh<0, 1, 0b010000, 0b1001, 0, 0, 1, "vqshrn.s32",
1322 v4i16, v4i32, NEONvqshrns>;
1323 def VQSHRNs64 : N2VNSh<0, 1, 0b100000, 0b1001, 0, 0, 1, "vqshrn.s64",
1324 v2i32, v2i64, NEONvqshrns>;
1325 def VQSHRNu16 : N2VNSh<1, 1, 0b001000, 0b1001, 0, 0, 1, "vqshrn.u16",
1326 v8i8, v8i16, NEONvqshrnu>;
1327 def VQSHRNu32 : N2VNSh<1, 1, 0b010000, 0b1001, 0, 0, 1, "vqshrn.u32",
1328 v4i16, v4i32, NEONvqshrnu>;
1329 def VQSHRNu64 : N2VNSh<1, 1, 0b100000, 0b1001, 0, 0, 1, "vqshrn.u64",
1330 v2i32, v2i64, NEONvqshrnu>;
1332 // VQSHRUN : Vector Saturating Shift Right and Narrow (Unsigned)
1333 def VQSHRUN16 : N2VNSh<1, 1, 0b001000, 0b1000, 0, 0, 1, "vqshrun.s16",
1334 v8i8, v8i16, NEONvqshrnsu>;
1335 def VQSHRUN32 : N2VNSh<1, 1, 0b010000, 0b1000, 0, 0, 1, "vqshrun.s32",
1336 v4i16, v4i32, NEONvqshrnsu>;
1337 def VQSHRUN64 : N2VNSh<1, 1, 0b100000, 0b1000, 0, 0, 1, "vqshrun.s64",
1338 v2i32, v2i64, NEONvqshrnsu>;
1340 // VQRSHL : Vector Saturating Rounding Shift
1341 defm VQRSHLs : N3VInt_QHSD<0, 0, 0b0101, 1, "vqrshl.s",
1342 int_arm_neon_vqrshifts, 0>;
1343 defm VQRSHLu : N3VInt_QHSD<1, 0, 0b0101, 1, "vqrshl.u",
1344 int_arm_neon_vqrshiftu, 0>;
1346 // VQRSHRN : Vector Saturating Rounding Shift Right and Narrow
1347 def VQRSHRNs16: N2VNSh<0, 1, 0b001000, 0b1001, 0, 1, 1, "vqrshrn.s16",
1348 v8i8, v8i16, NEONvqrshrns>;
1349 def VQRSHRNs32: N2VNSh<0, 1, 0b010000, 0b1001, 0, 1, 1, "vqrshrn.s32",
1350 v4i16, v4i32, NEONvqrshrns>;
1351 def VQRSHRNs64: N2VNSh<0, 1, 0b100000, 0b1001, 0, 1, 1, "vqrshrn.s64",
1352 v2i32, v2i64, NEONvqrshrns>;
1353 def VQRSHRNu16: N2VNSh<1, 1, 0b001000, 0b1001, 0, 1, 1, "vqrshrn.u16",
1354 v8i8, v8i16, NEONvqrshrnu>;
1355 def VQRSHRNu32: N2VNSh<1, 1, 0b010000, 0b1001, 0, 1, 1, "vqrshrn.u32",
1356 v4i16, v4i32, NEONvqrshrnu>;
1357 def VQRSHRNu64: N2VNSh<1, 1, 0b100000, 0b1001, 0, 1, 1, "vqrshrn.u64",
1358 v2i32, v2i64, NEONvqrshrnu>;
1360 // VQRSHRUN : Vector Saturating Rounding Shift Right and Narrow (Unsigned)
1361 def VQRSHRUN16: N2VNSh<1, 1, 0b001000, 0b1000, 0, 1, 1, "vqrshrun.s16",
1362 v8i8, v8i16, NEONvqrshrnsu>;
1363 def VQRSHRUN32: N2VNSh<1, 1, 0b010000, 0b1000, 0, 1, 1, "vqrshrun.s32",
1364 v4i16, v4i32, NEONvqrshrnsu>;
1365 def VQRSHRUN64: N2VNSh<1, 1, 0b100000, 0b1000, 0, 1, 1, "vqrshrun.s64",
1366 v2i32, v2i64, NEONvqrshrnsu>;
1368 // VSRA : Vector Shift Right and Accumulate
1369 defm VSRAs : N2VShAdd_QHSD<0, 1, 0b0001, 1, "vsra.s", NEONvshrs>;
1370 defm VSRAu : N2VShAdd_QHSD<1, 1, 0b0001, 1, "vsra.u", NEONvshru>;
1371 // VRSRA : Vector Rounding Shift Right and Accumulate
1372 defm VRSRAs : N2VShAdd_QHSD<0, 1, 0b0011, 1, "vrsra.s", NEONvrshrs>;
1373 defm VRSRAu : N2VShAdd_QHSD<1, 1, 0b0011, 1, "vrsra.u", NEONvrshru>;
1375 // VSLI : Vector Shift Left and Insert
1376 defm VSLI : N2VShIns_QHSD<1, 1, 0b0101, 1, "vsli.", NEONvsli>;
1377 // VSRI : Vector Shift Right and Insert
1378 defm VSRI : N2VShIns_QHSD<1, 1, 0b0100, 1, "vsri.", NEONvsri>;
1380 // Vector Absolute and Saturating Absolute.
1382 // VABS : Vector Absolute Value
1383 defm VABS : N2VInt_QHS<0b11, 0b11, 0b01, 0b00110, 0, "vabs.s",
1385 def VABSfd : N2VDInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0, "vabs.f32",
1386 v2f32, v2f32, int_arm_neon_vabsf>;
1387 def VABSfq : N2VQInt<0b11, 0b11, 0b10, 0b01, 0b01110, 0, "vabs.f32",
1388 v4f32, v4f32, int_arm_neon_vabsf>;
1389 def : N2VDInts<fabs, VABSfd>;
1391 // VQABS : Vector Saturating Absolute Value
1392 defm VQABS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01110, 0, "vqabs.s",
1393 int_arm_neon_vqabs>;
1397 def vneg : PatFrag<(ops node:$in), (sub immAllZerosV, node:$in)>;
1398 def vneg_conv : PatFrag<(ops node:$in), (sub immAllZerosV_bc, node:$in)>;
1400 class VNEGD<bits<2> size, string OpcodeStr, ValueType Ty>
1401 : N2V<0b11, 0b11, size, 0b01, 0b00111, 0, 0, (outs DPR:$dst), (ins DPR:$src),
1402 !strconcat(OpcodeStr, "\t$dst, $src"), "",
1403 [(set DPR:$dst, (Ty (vneg DPR:$src)))]>;
1404 class VNEGQ<bits<2> size, string OpcodeStr, ValueType Ty>
1405 : N2V<0b11, 0b11, size, 0b01, 0b00111, 1, 0, (outs QPR:$dst), (ins QPR:$src),
1406 !strconcat(OpcodeStr, "\t$dst, $src"), "",
1407 [(set QPR:$dst, (Ty (vneg QPR:$src)))]>;
1409 // VNEG : Vector Negate
1410 def VNEGs8d : VNEGD<0b00, "vneg.s8", v8i8>;
1411 def VNEGs16d : VNEGD<0b01, "vneg.s16", v4i16>;
1412 def VNEGs32d : VNEGD<0b10, "vneg.s32", v2i32>;
1413 def VNEGs8q : VNEGQ<0b00, "vneg.s8", v16i8>;
1414 def VNEGs16q : VNEGQ<0b01, "vneg.s16", v8i16>;
1415 def VNEGs32q : VNEGQ<0b10, "vneg.s32", v4i32>;
1417 // VNEG : Vector Negate (floating-point)
1418 def VNEGf32d : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 0, 0,
1419 (outs DPR:$dst), (ins DPR:$src), "vneg.f32\t$dst, $src", "",
1420 [(set DPR:$dst, (v2f32 (fneg DPR:$src)))]>;
1421 def VNEGf32q : N2V<0b11, 0b11, 0b10, 0b01, 0b01111, 1, 0,
1422 (outs QPR:$dst), (ins QPR:$src), "vneg.f32\t$dst, $src", "",
1423 [(set QPR:$dst, (v4f32 (fneg QPR:$src)))]>;
1424 def : N2VDInts<fneg, VNEGf32d>;
1426 def : Pat<(v8i8 (vneg_conv DPR:$src)), (VNEGs8d DPR:$src)>;
1427 def : Pat<(v4i16 (vneg_conv DPR:$src)), (VNEGs16d DPR:$src)>;
1428 def : Pat<(v2i32 (vneg_conv DPR:$src)), (VNEGs32d DPR:$src)>;
1429 def : Pat<(v16i8 (vneg_conv QPR:$src)), (VNEGs8q QPR:$src)>;
1430 def : Pat<(v8i16 (vneg_conv QPR:$src)), (VNEGs16q QPR:$src)>;
1431 def : Pat<(v4i32 (vneg_conv QPR:$src)), (VNEGs32q QPR:$src)>;
1433 // VQNEG : Vector Saturating Negate
1434 defm VQNEG : N2VInt_QHS<0b11, 0b11, 0b00, 0b01111, 0, "vqneg.s",
1435 int_arm_neon_vqneg>;
1437 // Vector Bit Counting Operations.
1439 // VCLS : Vector Count Leading Sign Bits
1440 defm VCLS : N2VInt_QHS<0b11, 0b11, 0b00, 0b01000, 0, "vcls.s",
1442 // VCLZ : Vector Count Leading Zeros
1443 defm VCLZ : N2VInt_QHS<0b11, 0b11, 0b00, 0b01001, 0, "vclz.i",
1445 // VCNT : Vector Count One Bits
1446 def VCNTd : N2VDInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0, "vcnt.8",
1447 v8i8, v8i8, int_arm_neon_vcnt>;
1448 def VCNTq : N2VQInt<0b11, 0b11, 0b00, 0b00, 0b01010, 0, "vcnt.8",
1449 v16i8, v16i8, int_arm_neon_vcnt>;
1451 // Vector Move Operations.
1453 // VMOV : Vector Move (Register)
1455 def VMOVD : N3V<0, 0, 0b10, 0b0001, 0, 1, (outs DPR:$dst), (ins DPR:$src),
1456 "vmov\t$dst, $src", "", []>;
1457 def VMOVQ : N3V<0, 0, 0b10, 0b0001, 1, 1, (outs QPR:$dst), (ins QPR:$src),
1458 "vmov\t$dst, $src", "", []>;
1460 // VMOV : Vector Move (Immediate)
1462 // VMOV_get_imm8 xform function: convert build_vector to VMOV.i8 imm.
1463 def VMOV_get_imm8 : SDNodeXForm<build_vector, [{
1464 return ARM::getVMOVImm(N, 1, *CurDAG);
1466 def vmovImm8 : PatLeaf<(build_vector), [{
1467 return ARM::getVMOVImm(N, 1, *CurDAG).getNode() != 0;
1470 // VMOV_get_imm16 xform function: convert build_vector to VMOV.i16 imm.
1471 def VMOV_get_imm16 : SDNodeXForm<build_vector, [{
1472 return ARM::getVMOVImm(N, 2, *CurDAG);
1474 def vmovImm16 : PatLeaf<(build_vector), [{
1475 return ARM::getVMOVImm(N, 2, *CurDAG).getNode() != 0;
1476 }], VMOV_get_imm16>;
1478 // VMOV_get_imm32 xform function: convert build_vector to VMOV.i32 imm.
1479 def VMOV_get_imm32 : SDNodeXForm<build_vector, [{
1480 return ARM::getVMOVImm(N, 4, *CurDAG);
1482 def vmovImm32 : PatLeaf<(build_vector), [{
1483 return ARM::getVMOVImm(N, 4, *CurDAG).getNode() != 0;
1484 }], VMOV_get_imm32>;
1486 // VMOV_get_imm64 xform function: convert build_vector to VMOV.i64 imm.
1487 def VMOV_get_imm64 : SDNodeXForm<build_vector, [{
1488 return ARM::getVMOVImm(N, 8, *CurDAG);
1490 def vmovImm64 : PatLeaf<(build_vector), [{
1491 return ARM::getVMOVImm(N, 8, *CurDAG).getNode() != 0;
1492 }], VMOV_get_imm64>;
1494 // Note: Some of the cmode bits in the following VMOV instructions need to
1495 // be encoded based on the immed values.
1497 def VMOVv8i8 : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$dst),
1498 (ins i8imm:$SIMM), "vmov.i8\t$dst, $SIMM", "",
1499 [(set DPR:$dst, (v8i8 vmovImm8:$SIMM))]>;
1500 def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$dst),
1501 (ins i8imm:$SIMM), "vmov.i8\t$dst, $SIMM", "",
1502 [(set QPR:$dst, (v16i8 vmovImm8:$SIMM))]>;
1504 def VMOVv4i16 : N1ModImm<1, 0b000, 0b1000, 0, 0, 0, 1, (outs DPR:$dst),
1505 (ins i16imm:$SIMM), "vmov.i16\t$dst, $SIMM", "",
1506 [(set DPR:$dst, (v4i16 vmovImm16:$SIMM))]>;
1507 def VMOVv8i16 : N1ModImm<1, 0b000, 0b1000, 0, 1, 0, 1, (outs QPR:$dst),
1508 (ins i16imm:$SIMM), "vmov.i16\t$dst, $SIMM", "",
1509 [(set QPR:$dst, (v8i16 vmovImm16:$SIMM))]>;
1511 def VMOVv2i32 : N1ModImm<1, 0b000, 0b0000, 0, 0, 0, 1, (outs DPR:$dst),
1512 (ins i32imm:$SIMM), "vmov.i32\t$dst, $SIMM", "",
1513 [(set DPR:$dst, (v2i32 vmovImm32:$SIMM))]>;
1514 def VMOVv4i32 : N1ModImm<1, 0b000, 0b0000, 0, 1, 0, 1, (outs QPR:$dst),
1515 (ins i32imm:$SIMM), "vmov.i32\t$dst, $SIMM", "",
1516 [(set QPR:$dst, (v4i32 vmovImm32:$SIMM))]>;
1518 def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$dst),
1519 (ins i64imm:$SIMM), "vmov.i64\t$dst, $SIMM", "",
1520 [(set DPR:$dst, (v1i64 vmovImm64:$SIMM))]>;
1521 def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$dst),
1522 (ins i64imm:$SIMM), "vmov.i64\t$dst, $SIMM", "",
1523 [(set QPR:$dst, (v2i64 vmovImm64:$SIMM))]>;
1525 // VMOV : Vector Get Lane (move scalar to ARM core register)
1527 def VGETLNs8 : NVGetLane<0b11100101, 0b1011, 0b00,
1528 (outs GPR:$dst), (ins DPR:$src, i32imm:$lane),
1529 "vmov", ".s8\t$dst, $src[$lane]",
1530 [(set GPR:$dst, (NEONvgetlanes (v8i8 DPR:$src),
1532 def VGETLNs16 : NVGetLane<0b11100001, 0b1011, 0b01,
1533 (outs GPR:$dst), (ins DPR:$src, i32imm:$lane),
1534 "vmov", ".s16\t$dst, $src[$lane]",
1535 [(set GPR:$dst, (NEONvgetlanes (v4i16 DPR:$src),
1537 def VGETLNu8 : NVGetLane<0b11101101, 0b1011, 0b00,
1538 (outs GPR:$dst), (ins DPR:$src, i32imm:$lane),
1539 "vmov", ".u8\t$dst, $src[$lane]",
1540 [(set GPR:$dst, (NEONvgetlaneu (v8i8 DPR:$src),
1542 def VGETLNu16 : NVGetLane<0b11101001, 0b1011, 0b01,
1543 (outs GPR:$dst), (ins DPR:$src, i32imm:$lane),
1544 "vmov", ".u16\t$dst, $src[$lane]",
1545 [(set GPR:$dst, (NEONvgetlaneu (v4i16 DPR:$src),
1547 def VGETLNi32 : NVGetLane<0b11100001, 0b1011, 0b00,
1548 (outs GPR:$dst), (ins DPR:$src, i32imm:$lane),
1549 "vmov", ".32\t$dst, $src[$lane]",
1550 [(set GPR:$dst, (extractelt (v2i32 DPR:$src),
1552 // def VGETLNf32: see FMRDH and FMRDL in ARMInstrVFP.td
1553 def : Pat<(NEONvgetlanes (v16i8 QPR:$src), imm:$lane),
1554 (VGETLNs8 (v8i8 (EXTRACT_SUBREG QPR:$src,
1555 (SubReg_i8_reg imm:$lane))),
1556 (SubReg_i8_lane imm:$lane))>;
1557 def : Pat<(NEONvgetlanes (v8i16 QPR:$src), imm:$lane),
1558 (VGETLNs16 (v4i16 (EXTRACT_SUBREG QPR:$src,
1559 (SubReg_i16_reg imm:$lane))),
1560 (SubReg_i16_lane imm:$lane))>;
1561 def : Pat<(NEONvgetlaneu (v16i8 QPR:$src), imm:$lane),
1562 (VGETLNu8 (v8i8 (EXTRACT_SUBREG QPR:$src,
1563 (SubReg_i8_reg imm:$lane))),
1564 (SubReg_i8_lane imm:$lane))>;
1565 def : Pat<(NEONvgetlaneu (v8i16 QPR:$src), imm:$lane),
1566 (VGETLNu16 (v4i16 (EXTRACT_SUBREG QPR:$src,
1567 (SubReg_i16_reg imm:$lane))),
1568 (SubReg_i16_lane imm:$lane))>;
1569 def : Pat<(extractelt (v4i32 QPR:$src), imm:$lane),
1570 (VGETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src,
1571 (SubReg_i32_reg imm:$lane))),
1572 (SubReg_i32_lane imm:$lane))>;
1573 //def : Pat<(extractelt (v2i64 QPR:$src1), imm:$src2),
1574 // (EXTRACT_SUBREG QPR:$src1, (SubReg_f64_reg imm:$src2))>;
1575 def : Pat<(extractelt (v2f64 QPR:$src1), imm:$src2),
1576 (EXTRACT_SUBREG QPR:$src1, (SubReg_f64_reg imm:$src2))>;
1579 // VMOV : Vector Set Lane (move ARM core register to scalar)
1581 let Constraints = "$src1 = $dst" in {
1582 def VSETLNi8 : NVSetLane<0b11100100, 0b1011, 0b00, (outs DPR:$dst),
1583 (ins DPR:$src1, GPR:$src2, i32imm:$lane),
1584 "vmov", ".8\t$dst[$lane], $src2",
1585 [(set DPR:$dst, (vector_insert (v8i8 DPR:$src1),
1586 GPR:$src2, imm:$lane))]>;
1587 def VSETLNi16 : NVSetLane<0b11100000, 0b1011, 0b01, (outs DPR:$dst),
1588 (ins DPR:$src1, GPR:$src2, i32imm:$lane),
1589 "vmov", ".16\t$dst[$lane], $src2",
1590 [(set DPR:$dst, (vector_insert (v4i16 DPR:$src1),
1591 GPR:$src2, imm:$lane))]>;
1592 def VSETLNi32 : NVSetLane<0b11100000, 0b1011, 0b00, (outs DPR:$dst),
1593 (ins DPR:$src1, GPR:$src2, i32imm:$lane),
1594 "vmov", ".32\t$dst[$lane], $src2",
1595 [(set DPR:$dst, (insertelt (v2i32 DPR:$src1),
1596 GPR:$src2, imm:$lane))]>;
1598 def : Pat<(vector_insert (v16i8 QPR:$src1), GPR:$src2, imm:$lane),
1599 (v16i8 (INSERT_SUBREG QPR:$src1,
1600 (VSETLNi8 (v8i8 (EXTRACT_SUBREG QPR:$src1,
1601 (SubReg_i8_reg imm:$lane))),
1602 GPR:$src2, (SubReg_i8_lane imm:$lane)),
1603 (SubReg_i8_reg imm:$lane)))>;
1604 def : Pat<(vector_insert (v8i16 QPR:$src1), GPR:$src2, imm:$lane),
1605 (v8i16 (INSERT_SUBREG QPR:$src1,
1606 (VSETLNi16 (v4i16 (EXTRACT_SUBREG QPR:$src1,
1607 (SubReg_i16_reg imm:$lane))),
1608 GPR:$src2, (SubReg_i16_lane imm:$lane)),
1609 (SubReg_i16_reg imm:$lane)))>;
1610 def : Pat<(insertelt (v4i32 QPR:$src1), GPR:$src2, imm:$lane),
1611 (v4i32 (INSERT_SUBREG QPR:$src1,
1612 (VSETLNi32 (v2i32 (EXTRACT_SUBREG QPR:$src1,
1613 (SubReg_i32_reg imm:$lane))),
1614 GPR:$src2, (SubReg_i32_lane imm:$lane)),
1615 (SubReg_i32_reg imm:$lane)))>;
1617 //def : Pat<(v2i64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
1618 // (INSERT_SUBREG QPR:$src1, DPR:$src2, (SubReg_f64_reg imm:$src3))>;
1619 def : Pat<(v2f64 (insertelt QPR:$src1, DPR:$src2, imm:$src3)),
1620 (INSERT_SUBREG QPR:$src1, DPR:$src2, (SubReg_f64_reg imm:$src3))>;
1622 // VDUP : Vector Duplicate (from ARM core register to all elements)
1624 def splat_lo : PatFrag<(ops node:$lhs, node:$rhs),
1625 (vector_shuffle node:$lhs, node:$rhs), [{
1626 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1627 return SVOp->isSplat() && SVOp->getSplatIndex() == 0;
1630 class VDUPD<bits<8> opcod1, bits<2> opcod3, string asmSize, ValueType Ty>
1631 : NVDup<opcod1, 0b1011, opcod3, (outs DPR:$dst), (ins GPR:$src),
1632 "vdup", !strconcat(asmSize, "\t$dst, $src"),
1633 [(set DPR:$dst, (Ty (splat_lo (scalar_to_vector GPR:$src), undef)))]>;
1634 class VDUPQ<bits<8> opcod1, bits<2> opcod3, string asmSize, ValueType Ty>
1635 : NVDup<opcod1, 0b1011, opcod3, (outs QPR:$dst), (ins GPR:$src),
1636 "vdup", !strconcat(asmSize, "\t$dst, $src"),
1637 [(set QPR:$dst, (Ty (splat_lo (scalar_to_vector GPR:$src), undef)))]>;
1639 def VDUP8d : VDUPD<0b11101100, 0b00, ".8", v8i8>;
1640 def VDUP16d : VDUPD<0b11101000, 0b01, ".16", v4i16>;
1641 def VDUP32d : VDUPD<0b11101000, 0b00, ".32", v2i32>;
1642 def VDUP8q : VDUPQ<0b11101110, 0b00, ".8", v16i8>;
1643 def VDUP16q : VDUPQ<0b11101010, 0b01, ".16", v8i16>;
1644 def VDUP32q : VDUPQ<0b11101010, 0b00, ".32", v4i32>;
1646 def VDUPfd : NVDup<0b11101000, 0b1011, 0b00, (outs DPR:$dst), (ins GPR:$src),
1647 "vdup", ".32\t$dst, $src",
1648 [(set DPR:$dst, (v2f32 (splat_lo
1650 (f32 (bitconvert GPR:$src))),
1652 def VDUPfq : NVDup<0b11101010, 0b1011, 0b00, (outs QPR:$dst), (ins GPR:$src),
1653 "vdup", ".32\t$dst, $src",
1654 [(set QPR:$dst, (v4f32 (splat_lo
1656 (f32 (bitconvert GPR:$src))),
1659 // VDUP : Vector Duplicate Lane (from scalar to all elements)
1661 def SHUFFLE_get_splat_lane : SDNodeXForm<vector_shuffle, [{
1662 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1663 return CurDAG->getTargetConstant(SVOp->getSplatIndex(), MVT::i32);
1666 def splat_lane : PatFrag<(ops node:$lhs, node:$rhs),
1667 (vector_shuffle node:$lhs, node:$rhs), [{
1668 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1669 return SVOp->isSplat();
1670 }], SHUFFLE_get_splat_lane>;
1672 class VDUPLND<bits<2> op19_18, bits<2> op17_16, string OpcodeStr, ValueType Ty>
1673 : N2V<0b11, 0b11, op19_18, op17_16, 0b11000, 0, 0,
1674 (outs DPR:$dst), (ins DPR:$src, i32imm:$lane),
1675 !strconcat(OpcodeStr, "\t$dst, $src[$lane]"), "",
1676 [(set DPR:$dst, (Ty (splat_lane:$lane DPR:$src, undef)))]>;
1678 // vector_shuffle requires that the source and destination types match, so
1679 // VDUP to a 128-bit result uses a target-specific VDUPLANEQ node.
1680 class VDUPLNQ<bits<2> op19_18, bits<2> op17_16, string OpcodeStr,
1681 ValueType ResTy, ValueType OpTy>
1682 : N2V<0b11, 0b11, op19_18, op17_16, 0b11000, 1, 0,
1683 (outs QPR:$dst), (ins DPR:$src, i32imm:$lane),
1684 !strconcat(OpcodeStr, "\t$dst, $src[$lane]"), "",
1685 [(set QPR:$dst, (ResTy (NEONvduplaneq (OpTy DPR:$src), imm:$lane)))]>;
1687 def VDUPLN8d : VDUPLND<0b00, 0b01, "vdup.8", v8i8>;
1688 def VDUPLN16d : VDUPLND<0b00, 0b10, "vdup.16", v4i16>;
1689 def VDUPLN32d : VDUPLND<0b01, 0b00, "vdup.32", v2i32>;
1690 def VDUPLNfd : VDUPLND<0b01, 0b00, "vdup.32", v2f32>;
1691 def VDUPLN8q : VDUPLNQ<0b00, 0b01, "vdup.8", v16i8, v8i8>;
1692 def VDUPLN16q : VDUPLNQ<0b00, 0b10, "vdup.16", v8i16, v4i16>;
1693 def VDUPLN32q : VDUPLNQ<0b01, 0b00, "vdup.32", v4i32, v2i32>;
1694 def VDUPLNfq : VDUPLNQ<0b01, 0b00, "vdup.32", v4f32, v2f32>;
1696 // VMOVN : Vector Narrowing Move
1697 defm VMOVN : N2VNInt_HSD<0b11,0b11,0b10,0b00100,0,0, "vmovn.i",
1698 int_arm_neon_vmovn>;
1699 // VQMOVN : Vector Saturating Narrowing Move
1700 defm VQMOVNs : N2VNInt_HSD<0b11,0b11,0b10,0b00101,0,0, "vqmovn.s",
1701 int_arm_neon_vqmovns>;
1702 defm VQMOVNu : N2VNInt_HSD<0b11,0b11,0b10,0b00101,1,0, "vqmovn.u",
1703 int_arm_neon_vqmovnu>;
1704 defm VQMOVNsu : N2VNInt_HSD<0b11,0b11,0b10,0b00100,1,0, "vqmovun.s",
1705 int_arm_neon_vqmovnsu>;
1706 // VMOVL : Vector Lengthening Move
1707 defm VMOVLs : N2VLInt_QHS<0,1,0b1010,0,0,1, "vmovl.s", int_arm_neon_vmovls>;
1708 defm VMOVLu : N2VLInt_QHS<1,1,0b1010,0,0,1, "vmovl.u", int_arm_neon_vmovlu>;
1710 // Vector Conversions.
1712 // VCVT : Vector Convert Between Floating-Point and Integers
1713 def VCVTf2sd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt.s32.f32",
1714 v2i32, v2f32, fp_to_sint>;
1715 def VCVTf2ud : N2VD<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt.u32.f32",
1716 v2i32, v2f32, fp_to_uint>;
1717 def VCVTs2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt.f32.s32",
1718 v2f32, v2i32, sint_to_fp>;
1719 def VCVTu2fd : N2VD<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt.f32.u32",
1720 v2f32, v2i32, uint_to_fp>;
1722 def VCVTf2sq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01110, 0, "vcvt.s32.f32",
1723 v4i32, v4f32, fp_to_sint>;
1724 def VCVTf2uq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01111, 0, "vcvt.u32.f32",
1725 v4i32, v4f32, fp_to_uint>;
1726 def VCVTs2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01100, 0, "vcvt.f32.s32",
1727 v4f32, v4i32, sint_to_fp>;
1728 def VCVTu2fq : N2VQ<0b11, 0b11, 0b10, 0b11, 0b01101, 0, "vcvt.f32.u32",
1729 v4f32, v4i32, uint_to_fp>;
1731 // VCVT : Vector Convert Between Floating-Point and Fixed-Point.
1732 // Note: Some of the opcode bits in the following VCVT instructions need to
1733 // be encoded based on the immed values.
1734 def VCVTf2xsd : N2VCvtD<0, 1, 0b000000, 0b1111, 0, 1, "vcvt.s32.f32",
1735 v2i32, v2f32, int_arm_neon_vcvtfp2fxs>;
1736 def VCVTf2xud : N2VCvtD<1, 1, 0b000000, 0b1111, 0, 1, "vcvt.u32.f32",
1737 v2i32, v2f32, int_arm_neon_vcvtfp2fxu>;
1738 def VCVTxs2fd : N2VCvtD<0, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.s32",
1739 v2f32, v2i32, int_arm_neon_vcvtfxs2fp>;
1740 def VCVTxu2fd : N2VCvtD<1, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.u32",
1741 v2f32, v2i32, int_arm_neon_vcvtfxu2fp>;
1743 def VCVTf2xsq : N2VCvtQ<0, 1, 0b000000, 0b1111, 0, 1, "vcvt.s32.f32",
1744 v4i32, v4f32, int_arm_neon_vcvtfp2fxs>;
1745 def VCVTf2xuq : N2VCvtQ<1, 1, 0b000000, 0b1111, 0, 1, "vcvt.u32.f32",
1746 v4i32, v4f32, int_arm_neon_vcvtfp2fxu>;
1747 def VCVTxs2fq : N2VCvtQ<0, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.s32",
1748 v4f32, v4i32, int_arm_neon_vcvtfxs2fp>;
1749 def VCVTxu2fq : N2VCvtQ<1, 1, 0b000000, 0b1110, 0, 1, "vcvt.f32.u32",
1750 v4f32, v4i32, int_arm_neon_vcvtfxu2fp>;
1752 // VREV : Vector Reverse
1754 def vrev64_shuffle : PatFrag<(ops node:$in),
1755 (vector_shuffle node:$in, undef), [{
1756 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1757 return ARM::isVREVMask(SVOp, 64);
1760 def vrev32_shuffle : PatFrag<(ops node:$in),
1761 (vector_shuffle node:$in, undef), [{
1762 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1763 return ARM::isVREVMask(SVOp, 32);
1766 def vrev16_shuffle : PatFrag<(ops node:$in),
1767 (vector_shuffle node:$in, undef), [{
1768 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1769 return ARM::isVREVMask(SVOp, 16);
1772 // VREV64 : Vector Reverse elements within 64-bit doublewords
1774 class VREV64D<bits<2> op19_18, string OpcodeStr, ValueType Ty>
1775 : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 0, 0, (outs DPR:$dst),
1776 (ins DPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
1777 [(set DPR:$dst, (Ty (vrev64_shuffle (Ty DPR:$src))))]>;
1778 class VREV64Q<bits<2> op19_18, string OpcodeStr, ValueType Ty>
1779 : N2V<0b11, 0b11, op19_18, 0b00, 0b00000, 1, 0, (outs QPR:$dst),
1780 (ins QPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
1781 [(set QPR:$dst, (Ty (vrev64_shuffle (Ty QPR:$src))))]>;
1783 def VREV64d8 : VREV64D<0b00, "vrev64.8", v8i8>;
1784 def VREV64d16 : VREV64D<0b01, "vrev64.16", v4i16>;
1785 def VREV64d32 : VREV64D<0b10, "vrev64.32", v2i32>;
1786 def VREV64df : VREV64D<0b10, "vrev64.32", v2f32>;
1788 def VREV64q8 : VREV64Q<0b00, "vrev64.8", v16i8>;
1789 def VREV64q16 : VREV64Q<0b01, "vrev64.16", v8i16>;
1790 def VREV64q32 : VREV64Q<0b10, "vrev64.32", v4i32>;
1791 def VREV64qf : VREV64Q<0b10, "vrev64.32", v4f32>;
1793 // VREV32 : Vector Reverse elements within 32-bit words
1795 class VREV32D<bits<2> op19_18, string OpcodeStr, ValueType Ty>
1796 : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 0, 0, (outs DPR:$dst),
1797 (ins DPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
1798 [(set DPR:$dst, (Ty (vrev32_shuffle (Ty DPR:$src))))]>;
1799 class VREV32Q<bits<2> op19_18, string OpcodeStr, ValueType Ty>
1800 : N2V<0b11, 0b11, op19_18, 0b00, 0b00001, 1, 0, (outs QPR:$dst),
1801 (ins QPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
1802 [(set QPR:$dst, (Ty (vrev32_shuffle (Ty QPR:$src))))]>;
1804 def VREV32d8 : VREV32D<0b00, "vrev32.8", v8i8>;
1805 def VREV32d16 : VREV32D<0b01, "vrev32.16", v4i16>;
1807 def VREV32q8 : VREV32Q<0b00, "vrev32.8", v16i8>;
1808 def VREV32q16 : VREV32Q<0b01, "vrev32.16", v8i16>;
1810 // VREV16 : Vector Reverse elements within 16-bit halfwords
1812 class VREV16D<bits<2> op19_18, string OpcodeStr, ValueType Ty>
1813 : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 0, 0, (outs DPR:$dst),
1814 (ins DPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
1815 [(set DPR:$dst, (Ty (vrev16_shuffle (Ty DPR:$src))))]>;
1816 class VREV16Q<bits<2> op19_18, string OpcodeStr, ValueType Ty>
1817 : N2V<0b11, 0b11, op19_18, 0b00, 0b00010, 1, 0, (outs QPR:$dst),
1818 (ins QPR:$src), !strconcat(OpcodeStr, "\t$dst, $src"), "",
1819 [(set QPR:$dst, (Ty (vrev16_shuffle (Ty QPR:$src))))]>;
1821 def VREV16d8 : VREV16D<0b00, "vrev16.8", v8i8>;
1822 def VREV16q8 : VREV16Q<0b00, "vrev16.8", v16i8>;
1824 //===----------------------------------------------------------------------===//
1825 // Non-Instruction Patterns
1826 //===----------------------------------------------------------------------===//
1829 def : Pat<(v1i64 (bitconvert (v2i32 DPR:$src))), (v1i64 DPR:$src)>;
1830 def : Pat<(v1i64 (bitconvert (v4i16 DPR:$src))), (v1i64 DPR:$src)>;
1831 def : Pat<(v1i64 (bitconvert (v8i8 DPR:$src))), (v1i64 DPR:$src)>;
1832 def : Pat<(v1i64 (bitconvert (f64 DPR:$src))), (v1i64 DPR:$src)>;
1833 def : Pat<(v1i64 (bitconvert (v2f32 DPR:$src))), (v1i64 DPR:$src)>;
1834 def : Pat<(v2i32 (bitconvert (v1i64 DPR:$src))), (v2i32 DPR:$src)>;
1835 def : Pat<(v2i32 (bitconvert (v4i16 DPR:$src))), (v2i32 DPR:$src)>;
1836 def : Pat<(v2i32 (bitconvert (v8i8 DPR:$src))), (v2i32 DPR:$src)>;
1837 def : Pat<(v2i32 (bitconvert (f64 DPR:$src))), (v2i32 DPR:$src)>;
1838 def : Pat<(v2i32 (bitconvert (v2f32 DPR:$src))), (v2i32 DPR:$src)>;
1839 def : Pat<(v4i16 (bitconvert (v1i64 DPR:$src))), (v4i16 DPR:$src)>;
1840 def : Pat<(v4i16 (bitconvert (v2i32 DPR:$src))), (v4i16 DPR:$src)>;
1841 def : Pat<(v4i16 (bitconvert (v8i8 DPR:$src))), (v4i16 DPR:$src)>;
1842 def : Pat<(v4i16 (bitconvert (f64 DPR:$src))), (v4i16 DPR:$src)>;
1843 def : Pat<(v4i16 (bitconvert (v2f32 DPR:$src))), (v4i16 DPR:$src)>;
1844 def : Pat<(v8i8 (bitconvert (v1i64 DPR:$src))), (v8i8 DPR:$src)>;
1845 def : Pat<(v8i8 (bitconvert (v2i32 DPR:$src))), (v8i8 DPR:$src)>;
1846 def : Pat<(v8i8 (bitconvert (v4i16 DPR:$src))), (v8i8 DPR:$src)>;
1847 def : Pat<(v8i8 (bitconvert (f64 DPR:$src))), (v8i8 DPR:$src)>;
1848 def : Pat<(v8i8 (bitconvert (v2f32 DPR:$src))), (v8i8 DPR:$src)>;
1849 def : Pat<(f64 (bitconvert (v1i64 DPR:$src))), (f64 DPR:$src)>;
1850 def : Pat<(f64 (bitconvert (v2i32 DPR:$src))), (f64 DPR:$src)>;
1851 def : Pat<(f64 (bitconvert (v4i16 DPR:$src))), (f64 DPR:$src)>;
1852 def : Pat<(f64 (bitconvert (v8i8 DPR:$src))), (f64 DPR:$src)>;
1853 def : Pat<(f64 (bitconvert (v2f32 DPR:$src))), (f64 DPR:$src)>;
1854 def : Pat<(v2f32 (bitconvert (f64 DPR:$src))), (v2f32 DPR:$src)>;
1855 def : Pat<(v2f32 (bitconvert (v1i64 DPR:$src))), (v2f32 DPR:$src)>;
1856 def : Pat<(v2f32 (bitconvert (v2i32 DPR:$src))), (v2f32 DPR:$src)>;
1857 def : Pat<(v2f32 (bitconvert (v4i16 DPR:$src))), (v2f32 DPR:$src)>;
1858 def : Pat<(v2f32 (bitconvert (v8i8 DPR:$src))), (v2f32 DPR:$src)>;
1860 def : Pat<(v2i64 (bitconvert (v4i32 QPR:$src))), (v2i64 QPR:$src)>;
1861 def : Pat<(v2i64 (bitconvert (v8i16 QPR:$src))), (v2i64 QPR:$src)>;
1862 def : Pat<(v2i64 (bitconvert (v16i8 QPR:$src))), (v2i64 QPR:$src)>;
1863 def : Pat<(v2i64 (bitconvert (v2f64 QPR:$src))), (v2i64 QPR:$src)>;
1864 def : Pat<(v2i64 (bitconvert (v4f32 QPR:$src))), (v2i64 QPR:$src)>;
1865 def : Pat<(v4i32 (bitconvert (v2i64 QPR:$src))), (v4i32 QPR:$src)>;
1866 def : Pat<(v4i32 (bitconvert (v8i16 QPR:$src))), (v4i32 QPR:$src)>;
1867 def : Pat<(v4i32 (bitconvert (v16i8 QPR:$src))), (v4i32 QPR:$src)>;
1868 def : Pat<(v4i32 (bitconvert (v2f64 QPR:$src))), (v4i32 QPR:$src)>;
1869 def : Pat<(v4i32 (bitconvert (v4f32 QPR:$src))), (v4i32 QPR:$src)>;
1870 def : Pat<(v8i16 (bitconvert (v2i64 QPR:$src))), (v8i16 QPR:$src)>;
1871 def : Pat<(v8i16 (bitconvert (v4i32 QPR:$src))), (v8i16 QPR:$src)>;
1872 def : Pat<(v8i16 (bitconvert (v16i8 QPR:$src))), (v8i16 QPR:$src)>;
1873 def : Pat<(v8i16 (bitconvert (v2f64 QPR:$src))), (v8i16 QPR:$src)>;
1874 def : Pat<(v8i16 (bitconvert (v4f32 QPR:$src))), (v8i16 QPR:$src)>;
1875 def : Pat<(v16i8 (bitconvert (v2i64 QPR:$src))), (v16i8 QPR:$src)>;
1876 def : Pat<(v16i8 (bitconvert (v4i32 QPR:$src))), (v16i8 QPR:$src)>;
1877 def : Pat<(v16i8 (bitconvert (v8i16 QPR:$src))), (v16i8 QPR:$src)>;
1878 def : Pat<(v16i8 (bitconvert (v2f64 QPR:$src))), (v16i8 QPR:$src)>;
1879 def : Pat<(v16i8 (bitconvert (v4f32 QPR:$src))), (v16i8 QPR:$src)>;
1880 def : Pat<(v4f32 (bitconvert (v2i64 QPR:$src))), (v4f32 QPR:$src)>;
1881 def : Pat<(v4f32 (bitconvert (v4i32 QPR:$src))), (v4f32 QPR:$src)>;
1882 def : Pat<(v4f32 (bitconvert (v8i16 QPR:$src))), (v4f32 QPR:$src)>;
1883 def : Pat<(v4f32 (bitconvert (v16i8 QPR:$src))), (v4f32 QPR:$src)>;
1884 def : Pat<(v4f32 (bitconvert (v2f64 QPR:$src))), (v4f32 QPR:$src)>;
1885 def : Pat<(v2f64 (bitconvert (v2i64 QPR:$src))), (v2f64 QPR:$src)>;
1886 def : Pat<(v2f64 (bitconvert (v4i32 QPR:$src))), (v2f64 QPR:$src)>;
1887 def : Pat<(v2f64 (bitconvert (v8i16 QPR:$src))), (v2f64 QPR:$src)>;
1888 def : Pat<(v2f64 (bitconvert (v16i8 QPR:$src))), (v2f64 QPR:$src)>;
1889 def : Pat<(v2f64 (bitconvert (v4f32 QPR:$src))), (v2f64 QPR:$src)>;