1 //===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the AArch64 NEON instruction set.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // NEON-specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
17 def Neon_bsl : SDNode<"AArch64ISD::NEON_BSL", SDTypeProfile<1, 3,
18 [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
19 SDTCisSameAs<0, 3>]>>;
21 // (outs Result), (ins Imm, OpCmode)
22 def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
24 def Neon_movi : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>;
26 def Neon_mvni : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>;
28 // (outs Result), (ins Imm)
29 def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1,
30 [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
32 // (outs Result), (ins LHS, RHS, CondCode)
33 def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3,
34 [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>;
36 // (outs Result), (ins LHS, 0/0.0 constant, CondCode)
37 def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3,
38 [SDTCisVec<0>, SDTCisVec<1>]>>;
40 // (outs Result), (ins LHS, RHS)
41 def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2,
42 [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>;
44 def Neon_dupImm : SDNode<"AArch64ISD::NEON_DUPIMM", SDTypeProfile<1, 1,
45 [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
47 def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
49 def Neon_sqrshlImm : SDNode<"AArch64ISD::NEON_QSHLs", SDTARMVSH>;
50 def Neon_uqrshlImm : SDNode<"AArch64ISD::NEON_QSHLu", SDTARMVSH>;
52 def Neon_vduplane : SDNode<"AArch64ISD::NEON_VDUPLANE", SDTypeProfile<1, 2,
53 [SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i64>]>>;
55 //===----------------------------------------------------------------------===//
57 //===----------------------------------------------------------------------===//
59 multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size, bits<5> opcode,
60 string asmop, SDPatternOperator opnode8B,
61 SDPatternOperator opnode16B,
64 let isCommutable = Commutable in {
65 def _8B : NeonI_3VSame<0b0, u, size, opcode,
66 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
67 asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
68 [(set (v8i8 VPR64:$Rd),
69 (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
72 def _16B : NeonI_3VSame<0b1, u, size, opcode,
73 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
74 asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
75 [(set (v16i8 VPR128:$Rd),
76 (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
82 multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode,
83 string asmop, SDPatternOperator opnode,
86 let isCommutable = Commutable in {
87 def _4H : NeonI_3VSame<0b0, u, 0b01, opcode,
88 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
89 asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h",
90 [(set (v4i16 VPR64:$Rd),
91 (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))],
94 def _8H : NeonI_3VSame<0b1, u, 0b01, opcode,
95 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
96 asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h",
97 [(set (v8i16 VPR128:$Rd),
98 (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))],
101 def _2S : NeonI_3VSame<0b0, u, 0b10, opcode,
102 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
103 asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
104 [(set (v2i32 VPR64:$Rd),
105 (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))],
108 def _4S : NeonI_3VSame<0b1, u, 0b10, opcode,
109 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
110 asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
111 [(set (v4i32 VPR128:$Rd),
112 (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))],
116 multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode,
117 string asmop, SDPatternOperator opnode,
119 : NeonI_3VSame_HS_sizes<u, opcode, asmop, opnode, Commutable>
121 let isCommutable = Commutable in {
122 def _8B : NeonI_3VSame<0b0, u, 0b00, opcode,
123 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
124 asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
125 [(set (v8i8 VPR64:$Rd),
126 (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
129 def _16B : NeonI_3VSame<0b1, u, 0b00, opcode,
130 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
131 asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
132 [(set (v16i8 VPR128:$Rd),
133 (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
138 multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode,
139 string asmop, SDPatternOperator opnode,
141 : NeonI_3VSame_BHS_sizes<u, opcode, asmop, opnode, Commutable>
143 let isCommutable = Commutable in {
144 def _2D : NeonI_3VSame<0b1, u, 0b11, opcode,
145 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
146 asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
147 [(set (v2i64 VPR128:$Rd),
148 (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))],
153 // Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types,
154 // but Result types can be integer or floating point types.
155 multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode,
156 string asmop, SDPatternOperator opnode2S,
157 SDPatternOperator opnode4S,
158 SDPatternOperator opnode2D,
159 ValueType ResTy2S, ValueType ResTy4S,
160 ValueType ResTy2D, bit Commutable = 0>
162 let isCommutable = Commutable in {
163 def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode,
164 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
165 asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
166 [(set (ResTy2S VPR64:$Rd),
167 (ResTy2S (opnode2S (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))],
170 def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode,
171 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
172 asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
173 [(set (ResTy4S VPR128:$Rd),
174 (ResTy4S (opnode4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))],
177 def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode,
178 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
179 asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
180 [(set (ResTy2D VPR128:$Rd),
181 (ResTy2D (opnode2D (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
186 //===----------------------------------------------------------------------===//
187 // Instruction Definitions
188 //===----------------------------------------------------------------------===//
190 // Vector Arithmetic Instructions
192 // Vector Add (Integer and Floating-Point)
194 defm ADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>;
195 defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd, fadd, fadd,
196 v2f32, v4f32, v2f64, 1>;
198 // Vector Sub (Integer and Floating-Point)
200 defm SUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>;
201 defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub, fsub, fsub,
202 v2f32, v4f32, v2f64, 0>;
204 // Vector Multiply (Integer and Floating-Point)
206 defm MULvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>;
207 defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul, fmul, fmul,
208 v2f32, v4f32, v2f64, 1>;
210 // Vector Multiply (Polynomial)
212 defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
213 int_arm_neon_vmulp, int_arm_neon_vmulp, 1>;
215 // Vector Multiply-accumulate and Multiply-subtract (Integer)
217 // class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
218 // two operands constraints.
219 class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
220 RegisterOperand VPRC, ValueType OpTy, bit q, bit u, bits<2> size,
221 bits<5> opcode, SDPatternOperator opnode>
222 : NeonI_3VSame<q, u, size, opcode,
223 (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
224 asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
225 [(set (OpTy VPRC:$Rd),
226 (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))],
228 let Constraints = "$src = $Rd";
231 def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
232 (add node:$Ra, (mul node:$Rn, node:$Rm))>;
234 def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
235 (sub node:$Ra, (mul node:$Rn, node:$Rm))>;
238 def MLAvvv_8B: NeonI_3VSame_Constraint_impl<"mla", ".8b", VPR64, v8i8,
239 0b0, 0b0, 0b00, 0b10010, Neon_mla>;
240 def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8,
241 0b1, 0b0, 0b00, 0b10010, Neon_mla>;
242 def MLAvvv_4H: NeonI_3VSame_Constraint_impl<"mla", ".4h", VPR64, v4i16,
243 0b0, 0b0, 0b01, 0b10010, Neon_mla>;
244 def MLAvvv_8H: NeonI_3VSame_Constraint_impl<"mla", ".8h", VPR128, v8i16,
245 0b1, 0b0, 0b01, 0b10010, Neon_mla>;
246 def MLAvvv_2S: NeonI_3VSame_Constraint_impl<"mla", ".2s", VPR64, v2i32,
247 0b0, 0b0, 0b10, 0b10010, Neon_mla>;
248 def MLAvvv_4S: NeonI_3VSame_Constraint_impl<"mla", ".4s", VPR128, v4i32,
249 0b1, 0b0, 0b10, 0b10010, Neon_mla>;
251 def MLSvvv_8B: NeonI_3VSame_Constraint_impl<"mls", ".8b", VPR64, v8i8,
252 0b0, 0b1, 0b00, 0b10010, Neon_mls>;
253 def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8,
254 0b1, 0b1, 0b00, 0b10010, Neon_mls>;
255 def MLSvvv_4H: NeonI_3VSame_Constraint_impl<"mls", ".4h", VPR64, v4i16,
256 0b0, 0b1, 0b01, 0b10010, Neon_mls>;
257 def MLSvvv_8H: NeonI_3VSame_Constraint_impl<"mls", ".8h", VPR128, v8i16,
258 0b1, 0b1, 0b01, 0b10010, Neon_mls>;
259 def MLSvvv_2S: NeonI_3VSame_Constraint_impl<"mls", ".2s", VPR64, v2i32,
260 0b0, 0b1, 0b10, 0b10010, Neon_mls>;
261 def MLSvvv_4S: NeonI_3VSame_Constraint_impl<"mls", ".4s", VPR128, v4i32,
262 0b1, 0b1, 0b10, 0b10010, Neon_mls>;
264 // Vector Multiply-accumulate and Multiply-subtract (Floating Point)
266 def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
267 (fadd node:$Ra, (fmul node:$Rn, node:$Rm))>;
269 def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
270 (fsub node:$Ra, (fmul node:$Rn, node:$Rm))>;
272 let Predicates = [HasNEON, UseFusedMAC] in {
273 def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s", VPR64, v2f32,
274 0b0, 0b0, 0b00, 0b11001, Neon_fmla>;
275 def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s", VPR128, v4f32,
276 0b1, 0b0, 0b00, 0b11001, Neon_fmla>;
277 def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d", VPR128, v2f64,
278 0b1, 0b0, 0b01, 0b11001, Neon_fmla>;
280 def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s", VPR64, v2f32,
281 0b0, 0b0, 0b10, 0b11001, Neon_fmls>;
282 def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s", VPR128, v4f32,
283 0b1, 0b0, 0b10, 0b11001, Neon_fmls>;
284 def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d", VPR128, v2f64,
285 0b1, 0b0, 0b11, 0b11001, Neon_fmls>;
288 // We're also allowed to match the fma instruction regardless of compile
290 def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)),
291 (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
292 def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
293 (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
294 def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
295 (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
297 def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)),
298 (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
299 def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
300 (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
301 def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
302 (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
304 // Vector Divide (Floating-Point)
306 defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv, fdiv, fdiv,
307 v2f32, v4f32, v2f64, 0>;
309 // Vector Bitwise Operations
311 // Vector Bitwise AND
313 defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>;
315 // Vector Bitwise Exclusive OR
317 defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>;
321 defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
323 // ORR disassembled as MOV if Vn==Vm
325 // Vector Move - register
326 // Alias for ORR if Vn=Vm.
327 // FIXME: This is actually the preferred syntax but TableGen can't deal with
328 // custom printing of aliases.
329 def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
330 (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn), 0>;
331 def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
332 (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn), 0>;
334 def Neon_immAllOnes: PatLeaf<(Neon_movi (i32 timm), (i32 imm)), [{
335 ConstantSDNode *ImmConstVal = cast<ConstantSDNode>(N->getOperand(0));
336 ConstantSDNode *OpCmodeConstVal = cast<ConstantSDNode>(N->getOperand(1));
338 uint64_t EltVal = A64Imms::decodeNeonModImm(ImmConstVal->getZExtValue(),
339 OpCmodeConstVal->getZExtValue(), EltBits);
340 return (EltBits == 8 && EltVal == 0xff);
344 def Neon_not8B : PatFrag<(ops node:$in),
345 (xor node:$in, (bitconvert (v8i8 Neon_immAllOnes)))>;
346 def Neon_not16B : PatFrag<(ops node:$in),
347 (xor node:$in, (bitconvert (v16i8 Neon_immAllOnes)))>;
349 def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm),
350 (or node:$Rn, (Neon_not8B node:$Rm))>;
352 def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm),
353 (or node:$Rn, (Neon_not16B node:$Rm))>;
355 def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm),
356 (and node:$Rn, (Neon_not8B node:$Rm))>;
358 def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm),
359 (and node:$Rn, (Neon_not16B node:$Rm))>;
362 // Vector Bitwise OR NOT - register
364 defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn",
365 Neon_orn8B, Neon_orn16B, 0>;
367 // Vector Bitwise Bit Clear (AND NOT) - register
369 defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic",
370 Neon_bic8B, Neon_bic16B, 0>;
372 multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B,
373 SDPatternOperator opnode16B,
375 Instruction INST16B> {
376 def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)),
377 (INST8B VPR64:$Rn, VPR64:$Rm)>;
378 def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)),
379 (INST8B VPR64:$Rn, VPR64:$Rm)>;
380 def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)),
381 (INST8B VPR64:$Rn, VPR64:$Rm)>;
382 def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)),
383 (INST16B VPR128:$Rn, VPR128:$Rm)>;
384 def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)),
385 (INST16B VPR128:$Rn, VPR128:$Rm)>;
386 def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)),
387 (INST16B VPR128:$Rn, VPR128:$Rm)>;
390 // Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN
391 defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>;
392 defm : Neon_bitwise2V_patterns<or, or, ORRvvv_8B, ORRvvv_16B>;
393 defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>;
394 defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>;
395 defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>;
397 // Vector Bitwise Select
398 def BSLvvv_8B : NeonI_3VSame_Constraint_impl<"bsl", ".8b", VPR64, v8i8,
399 0b0, 0b1, 0b01, 0b00011, Neon_bsl>;
401 def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8,
402 0b1, 0b1, 0b01, 0b00011, Neon_bsl>;
404 multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode,
406 Instruction INST16B> {
407 // Disassociate type from instruction definition
408 def : Pat<(v2i32 (opnode VPR64:$src,VPR64:$Rn, VPR64:$Rm)),
409 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
410 def : Pat<(v4i16 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
411 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
412 def : Pat<(v1i64 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
413 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
414 def : Pat<(v4i32 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
415 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
416 def : Pat<(v8i16 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
417 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
418 def : Pat<(v2i64 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
419 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
421 // Allow to match BSL instruction pattern with non-constant operand
422 def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd),
423 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
424 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
425 def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd),
426 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
427 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
428 def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd),
429 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
430 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
431 def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd),
432 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
433 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
434 def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd),
435 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
436 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
437 def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd),
438 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
439 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
440 def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd),
441 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
442 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
443 def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd),
444 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
445 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
447 // Allow to match llvm.arm.* intrinsics.
448 def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src),
449 (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
450 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
451 def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src),
452 (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
453 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
454 def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src),
455 (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
456 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
457 def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src),
458 (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
459 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
460 def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src),
461 (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
462 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
463 def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src),
464 (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
465 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
466 def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src),
467 (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
468 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
469 def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src),
470 (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
471 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
472 def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src),
473 (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
474 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
475 def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src),
476 (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
477 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
478 def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
479 (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
480 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
483 // Additional patterns for bitwise instruction BSL
484 defm: Neon_bitwise3V_patterns<Neon_bsl, BSLvvv_8B, BSLvvv_16B>;
486 def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm),
487 (Neon_bsl node:$src, node:$Rn, node:$Rm),
488 [{ (void)N; return false; }]>;
490 // Vector Bitwise Insert if True
492 def BITvvv_8B : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64, v8i8,
493 0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
494 def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8,
495 0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
497 // Vector Bitwise Insert if False
499 def BIFvvv_8B : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64, v8i8,
500 0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
501 def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8,
502 0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
504 // Vector Absolute Difference and Accumulate (Signed, Unsigned)
506 def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
507 (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>;
508 def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
509 (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>;
511 // Vector Absolute Difference and Accumulate (Unsigned)
512 def UABAvvv_8B : NeonI_3VSame_Constraint_impl<"uaba", ".8b", VPR64, v8i8,
513 0b0, 0b1, 0b00, 0b01111, Neon_uaba>;
514 def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8,
515 0b1, 0b1, 0b00, 0b01111, Neon_uaba>;
516 def UABAvvv_4H : NeonI_3VSame_Constraint_impl<"uaba", ".4h", VPR64, v4i16,
517 0b0, 0b1, 0b01, 0b01111, Neon_uaba>;
518 def UABAvvv_8H : NeonI_3VSame_Constraint_impl<"uaba", ".8h", VPR128, v8i16,
519 0b1, 0b1, 0b01, 0b01111, Neon_uaba>;
520 def UABAvvv_2S : NeonI_3VSame_Constraint_impl<"uaba", ".2s", VPR64, v2i32,
521 0b0, 0b1, 0b10, 0b01111, Neon_uaba>;
522 def UABAvvv_4S : NeonI_3VSame_Constraint_impl<"uaba", ".4s", VPR128, v4i32,
523 0b1, 0b1, 0b10, 0b01111, Neon_uaba>;
525 // Vector Absolute Difference and Accumulate (Signed)
526 def SABAvvv_8B : NeonI_3VSame_Constraint_impl<"saba", ".8b", VPR64, v8i8,
527 0b0, 0b0, 0b00, 0b01111, Neon_saba>;
528 def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8,
529 0b1, 0b0, 0b00, 0b01111, Neon_saba>;
530 def SABAvvv_4H : NeonI_3VSame_Constraint_impl<"saba", ".4h", VPR64, v4i16,
531 0b0, 0b0, 0b01, 0b01111, Neon_saba>;
532 def SABAvvv_8H : NeonI_3VSame_Constraint_impl<"saba", ".8h", VPR128, v8i16,
533 0b1, 0b0, 0b01, 0b01111, Neon_saba>;
534 def SABAvvv_2S : NeonI_3VSame_Constraint_impl<"saba", ".2s", VPR64, v2i32,
535 0b0, 0b0, 0b10, 0b01111, Neon_saba>;
536 def SABAvvv_4S : NeonI_3VSame_Constraint_impl<"saba", ".4s", VPR128, v4i32,
537 0b1, 0b0, 0b10, 0b01111, Neon_saba>;
540 // Vector Absolute Difference (Signed, Unsigned)
541 defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>;
542 defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>;
544 // Vector Absolute Difference (Floating Point)
545 defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd",
546 int_arm_neon_vabds, int_arm_neon_vabds,
547 int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>;
549 // Vector Reciprocal Step (Floating Point)
550 defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps",
551 int_arm_neon_vrecps, int_arm_neon_vrecps,
553 v2f32, v4f32, v2f64, 0>;
555 // Vector Reciprocal Square Root Step (Floating Point)
556 defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts",
557 int_arm_neon_vrsqrts,
558 int_arm_neon_vrsqrts,
559 int_arm_neon_vrsqrts,
560 v2f32, v4f32, v2f64, 0>;
562 // Vector Comparisons
564 def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs),
565 (Neon_cmp node:$lhs, node:$rhs, SETEQ)>;
566 def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs),
567 (Neon_cmp node:$lhs, node:$rhs, SETUGE)>;
568 def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs),
569 (Neon_cmp node:$lhs, node:$rhs, SETGE)>;
570 def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs),
571 (Neon_cmp node:$lhs, node:$rhs, SETUGT)>;
572 def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
573 (Neon_cmp node:$lhs, node:$rhs, SETGT)>;
575 // NeonI_compare_aliases class: swaps register operands to implement
576 // comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
577 class NeonI_compare_aliases<string asmop, string asmlane,
578 Instruction inst, RegisterOperand VPRC>
579 : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
581 (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
583 // Vector Comparisons (Integer)
585 // Vector Compare Mask Equal (Integer)
586 let isCommutable =1 in {
587 defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>;
590 // Vector Compare Mask Higher or Same (Unsigned Integer)
591 defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>;
593 // Vector Compare Mask Greater Than or Equal (Integer)
594 defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>;
596 // Vector Compare Mask Higher (Unsigned Integer)
597 defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>;
599 // Vector Compare Mask Greater Than (Integer)
600 defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>;
602 // Vector Compare Mask Bitwise Test (Integer)
603 defm CMTSTvvv: NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>;
605 // Vector Compare Mask Less or Same (Unsigned Integer)
606 // CMLS is alias for CMHS with operands reversed.
607 def CMLSvvv_8B : NeonI_compare_aliases<"cmls", ".8b", CMHSvvv_8B, VPR64>;
608 def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>;
609 def CMLSvvv_4H : NeonI_compare_aliases<"cmls", ".4h", CMHSvvv_4H, VPR64>;
610 def CMLSvvv_8H : NeonI_compare_aliases<"cmls", ".8h", CMHSvvv_8H, VPR128>;
611 def CMLSvvv_2S : NeonI_compare_aliases<"cmls", ".2s", CMHSvvv_2S, VPR64>;
612 def CMLSvvv_4S : NeonI_compare_aliases<"cmls", ".4s", CMHSvvv_4S, VPR128>;
613 def CMLSvvv_2D : NeonI_compare_aliases<"cmls", ".2d", CMHSvvv_2D, VPR128>;
615 // Vector Compare Mask Less Than or Equal (Integer)
616 // CMLE is alias for CMGE with operands reversed.
617 def CMLEvvv_8B : NeonI_compare_aliases<"cmle", ".8b", CMGEvvv_8B, VPR64>;
618 def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>;
619 def CMLEvvv_4H : NeonI_compare_aliases<"cmle", ".4h", CMGEvvv_4H, VPR64>;
620 def CMLEvvv_8H : NeonI_compare_aliases<"cmle", ".8h", CMGEvvv_8H, VPR128>;
621 def CMLEvvv_2S : NeonI_compare_aliases<"cmle", ".2s", CMGEvvv_2S, VPR64>;
622 def CMLEvvv_4S : NeonI_compare_aliases<"cmle", ".4s", CMGEvvv_4S, VPR128>;
623 def CMLEvvv_2D : NeonI_compare_aliases<"cmle", ".2d", CMGEvvv_2D, VPR128>;
625 // Vector Compare Mask Lower (Unsigned Integer)
626 // CMLO is alias for CMHI with operands reversed.
627 def CMLOvvv_8B : NeonI_compare_aliases<"cmlo", ".8b", CMHIvvv_8B, VPR64>;
628 def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>;
629 def CMLOvvv_4H : NeonI_compare_aliases<"cmlo", ".4h", CMHIvvv_4H, VPR64>;
630 def CMLOvvv_8H : NeonI_compare_aliases<"cmlo", ".8h", CMHIvvv_8H, VPR128>;
631 def CMLOvvv_2S : NeonI_compare_aliases<"cmlo", ".2s", CMHIvvv_2S, VPR64>;
632 def CMLOvvv_4S : NeonI_compare_aliases<"cmlo", ".4s", CMHIvvv_4S, VPR128>;
633 def CMLOvvv_2D : NeonI_compare_aliases<"cmlo", ".2d", CMHIvvv_2D, VPR128>;
635 // Vector Compare Mask Less Than (Integer)
636 // CMLT is alias for CMGT with operands reversed.
637 def CMLTvvv_8B : NeonI_compare_aliases<"cmlt", ".8b", CMGTvvv_8B, VPR64>;
638 def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>;
639 def CMLTvvv_4H : NeonI_compare_aliases<"cmlt", ".4h", CMGTvvv_4H, VPR64>;
640 def CMLTvvv_8H : NeonI_compare_aliases<"cmlt", ".8h", CMGTvvv_8H, VPR128>;
641 def CMLTvvv_2S : NeonI_compare_aliases<"cmlt", ".2s", CMGTvvv_2S, VPR64>;
642 def CMLTvvv_4S : NeonI_compare_aliases<"cmlt", ".4s", CMGTvvv_4S, VPR128>;
643 def CMLTvvv_2D : NeonI_compare_aliases<"cmlt", ".2d", CMGTvvv_2D, VPR128>;
646 def neon_uimm0_asmoperand : AsmOperandClass
649 let PredicateMethod = "isUImm<0>";
650 let RenderMethod = "addImmOperands";
653 def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> {
654 let ParserMatchClass = neon_uimm0_asmoperand;
655 let PrintMethod = "printNeonUImm0Operand";
659 multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC>
661 def _8B : NeonI_2VMisc<0b0, u, 0b00, opcode,
662 (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
663 asmop # "\t$Rd.8b, $Rn.8b, $Imm",
664 [(set (v8i8 VPR64:$Rd),
665 (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))],
668 def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode,
669 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
670 asmop # "\t$Rd.16b, $Rn.16b, $Imm",
671 [(set (v16i8 VPR128:$Rd),
672 (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))],
675 def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode,
676 (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
677 asmop # "\t$Rd.4h, $Rn.4h, $Imm",
678 [(set (v4i16 VPR64:$Rd),
679 (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))],
682 def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode,
683 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
684 asmop # "\t$Rd.8h, $Rn.8h, $Imm",
685 [(set (v8i16 VPR128:$Rd),
686 (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))],
689 def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode,
690 (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
691 asmop # "\t$Rd.2s, $Rn.2s, $Imm",
692 [(set (v2i32 VPR64:$Rd),
693 (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))],
696 def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode,
697 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
698 asmop # "\t$Rd.4s, $Rn.4s, $Imm",
699 [(set (v4i32 VPR128:$Rd),
700 (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))],
703 def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode,
704 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
705 asmop # "\t$Rd.2d, $Rn.2d, $Imm",
706 [(set (v2i64 VPR128:$Rd),
707 (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))],
711 // Vector Compare Mask Equal to Zero (Integer)
712 defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>;
714 // Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
715 defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>;
717 // Vector Compare Mask Greater Than Zero (Signed Integer)
718 defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>;
720 // Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
721 defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>;
723 // Vector Compare Mask Less Than Zero (Signed Integer)
724 defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>;
726 // Vector Comparisons (Floating Point)
728 // Vector Compare Mask Equal (Floating Point)
729 let isCommutable =1 in {
730 defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq,
731 Neon_cmeq, Neon_cmeq,
732 v2i32, v4i32, v2i64, 0>;
735 // Vector Compare Mask Greater Than Or Equal (Floating Point)
736 defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge,
737 Neon_cmge, Neon_cmge,
738 v2i32, v4i32, v2i64, 0>;
740 // Vector Compare Mask Greater Than (Floating Point)
741 defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt,
742 Neon_cmgt, Neon_cmgt,
743 v2i32, v4i32, v2i64, 0>;
745 // Vector Compare Mask Less Than Or Equal (Floating Point)
746 // FCMLE is alias for FCMGE with operands reversed.
747 def FCMLEvvv_2S : NeonI_compare_aliases<"fcmle", ".2s", FCMGEvvv_2S, VPR64>;
748 def FCMLEvvv_4S : NeonI_compare_aliases<"fcmle", ".4s", FCMGEvvv_4S, VPR128>;
749 def FCMLEvvv_2D : NeonI_compare_aliases<"fcmle", ".2d", FCMGEvvv_2D, VPR128>;
751 // Vector Compare Mask Less Than (Floating Point)
752 // FCMLT is alias for FCMGT with operands reversed.
753 def FCMLTvvv_2S : NeonI_compare_aliases<"fcmlt", ".2s", FCMGTvvv_2S, VPR64>;
754 def FCMLTvvv_4S : NeonI_compare_aliases<"fcmlt", ".4s", FCMGTvvv_4S, VPR128>;
755 def FCMLTvvv_2D : NeonI_compare_aliases<"fcmlt", ".2d", FCMGTvvv_2D, VPR128>;
758 multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode,
759 string asmop, CondCode CC>
761 def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode,
762 (outs VPR64:$Rd), (ins VPR64:$Rn, fpz32:$FPImm),
763 asmop # "\t$Rd.2s, $Rn.2s, $FPImm",
764 [(set (v2i32 VPR64:$Rd),
765 (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpimm:$FPImm), CC)))],
768 def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode,
769 (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
770 asmop # "\t$Rd.4s, $Rn.4s, $FPImm",
771 [(set (v4i32 VPR128:$Rd),
772 (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
775 def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode,
776 (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
777 asmop # "\t$Rd.2d, $Rn.2d, $FPImm",
778 [(set (v2i64 VPR128:$Rd),
779 (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
783 // Vector Compare Mask Equal to Zero (Floating Point)
784 defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>;
786 // Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
787 defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>;
789 // Vector Compare Mask Greater Than Zero (Floating Point)
790 defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>;
792 // Vector Compare Mask Less Than or Equal To Zero (Floating Point)
793 defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>;
795 // Vector Compare Mask Less Than Zero (Floating Point)
796 defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>;
798 // Vector Absolute Comparisons (Floating Point)
800 // Vector Absolute Compare Mask Greater Than Or Equal (Floating Point)
801 defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge",
802 int_arm_neon_vacged, int_arm_neon_vacgeq,
803 int_aarch64_neon_vacgeq,
804 v2i32, v4i32, v2i64, 0>;
806 // Vector Absolute Compare Mask Greater Than (Floating Point)
807 defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt",
808 int_arm_neon_vacgtd, int_arm_neon_vacgtq,
809 int_aarch64_neon_vacgtq,
810 v2i32, v4i32, v2i64, 0>;
812 // Vector Absolute Compare Mask Less Than Or Equal (Floating Point)
813 // FACLE is alias for FACGE with operands reversed.
814 def FACLEvvv_2S : NeonI_compare_aliases<"facle", ".2s", FACGEvvv_2S, VPR64>;
815 def FACLEvvv_4S : NeonI_compare_aliases<"facle", ".4s", FACGEvvv_4S, VPR128>;
816 def FACLEvvv_2D : NeonI_compare_aliases<"facle", ".2d", FACGEvvv_2D, VPR128>;
818 // Vector Absolute Compare Mask Less Than (Floating Point)
819 // FACLT is alias for FACGT with operands reversed.
820 def FACLTvvv_2S : NeonI_compare_aliases<"faclt", ".2s", FACGTvvv_2S, VPR64>;
821 def FACLTvvv_4S : NeonI_compare_aliases<"faclt", ".4s", FACGTvvv_4S, VPR128>;
822 def FACLTvvv_2D : NeonI_compare_aliases<"faclt", ".2d", FACGTvvv_2D, VPR128>;
824 // Vector halving add (Integer Signed, Unsigned)
825 defm SHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd",
826 int_arm_neon_vhadds, 1>;
827 defm UHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd",
828 int_arm_neon_vhaddu, 1>;
830 // Vector halving sub (Integer Signed, Unsigned)
831 defm SHSUBvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub",
832 int_arm_neon_vhsubs, 0>;
833 defm UHSUBvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub",
834 int_arm_neon_vhsubu, 0>;
836 // Vector rouding halving add (Integer Signed, Unsigned)
837 defm SRHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd",
838 int_arm_neon_vrhadds, 1>;
839 defm URHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd",
840 int_arm_neon_vrhaddu, 1>;
842 // Vector Saturating add (Integer Signed, Unsigned)
843 defm SQADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd",
844 int_arm_neon_vqadds, 1>;
845 defm UQADDvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd",
846 int_arm_neon_vqaddu, 1>;
848 // Vector Saturating sub (Integer Signed, Unsigned)
849 defm SQSUBvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub",
850 int_arm_neon_vqsubs, 1>;
851 defm UQSUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub",
852 int_arm_neon_vqsubu, 1>;
854 // Vector Shift Left (Signed and Unsigned Integer)
855 defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl",
856 int_arm_neon_vshifts, 1>;
857 defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl",
858 int_arm_neon_vshiftu, 1>;
860 // Vector Saturating Shift Left (Signed and Unsigned Integer)
861 defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl",
862 int_arm_neon_vqshifts, 1>;
863 defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl",
864 int_arm_neon_vqshiftu, 1>;
866 // Vector Rouding Shift Left (Signed and Unsigned Integer)
867 defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl",
868 int_arm_neon_vrshifts, 1>;
869 defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl",
870 int_arm_neon_vrshiftu, 1>;
872 // Vector Saturating Rouding Shift Left (Signed and Unsigned Integer)
873 defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl",
874 int_arm_neon_vqrshifts, 1>;
875 defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl",
876 int_arm_neon_vqrshiftu, 1>;
878 // Vector Maximum (Signed and Unsigned Integer)
879 defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>;
880 defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>;
882 // Vector Minimum (Signed and Unsigned Integer)
883 defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>;
884 defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>;
886 // Vector Maximum (Floating Point)
887 defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax",
888 int_arm_neon_vmaxs, int_arm_neon_vmaxs,
889 int_arm_neon_vmaxs, v2f32, v4f32, v2f64, 1>;
891 // Vector Minimum (Floating Point)
892 defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin",
893 int_arm_neon_vmins, int_arm_neon_vmins,
894 int_arm_neon_vmins, v2f32, v4f32, v2f64, 1>;
896 // Vector maxNum (Floating Point) - prefer a number over a quiet NaN)
897 defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm",
898 int_aarch64_neon_vmaxnm,
899 int_aarch64_neon_vmaxnm,
900 int_aarch64_neon_vmaxnm,
901 v2f32, v4f32, v2f64, 1>;
903 // Vector minNum (Floating Point) - prefer a number over a quiet NaN)
904 defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm",
905 int_aarch64_neon_vminnm,
906 int_aarch64_neon_vminnm,
907 int_aarch64_neon_vminnm,
908 v2f32, v4f32, v2f64, 1>;
910 // Vector Maximum Pairwise (Signed and Unsigned Integer)
911 defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>;
912 defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>;
914 // Vector Minimum Pairwise (Signed and Unsigned Integer)
915 defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>;
916 defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>;
918 // Vector Maximum Pairwise (Floating Point)
919 defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp",
920 int_arm_neon_vpmaxs, int_arm_neon_vpmaxs,
921 int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>;
923 // Vector Minimum Pairwise (Floating Point)
924 defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp",
925 int_arm_neon_vpmins, int_arm_neon_vpmins,
926 int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>;
928 // Vector maxNum Pairwise (Floating Point) - prefer a number over a quiet NaN)
929 defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp",
930 int_aarch64_neon_vpmaxnm,
931 int_aarch64_neon_vpmaxnm,
932 int_aarch64_neon_vpmaxnm,
933 v2f32, v4f32, v2f64, 1>;
935 // Vector minNum Pairwise (Floating Point) - prefer a number over a quiet NaN)
936 defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp",
937 int_aarch64_neon_vpminnm,
938 int_aarch64_neon_vpminnm,
939 int_aarch64_neon_vpminnm,
940 v2f32, v4f32, v2f64, 1>;
942 // Vector Addition Pairwise (Integer)
943 defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>;
945 // Vector Addition Pairwise (Floating Point)
946 defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp",
950 v2f32, v4f32, v2f64, 1>;
952 // Vector Saturating Doubling Multiply High
953 defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh",
954 int_arm_neon_vqdmulh, 1>;
956 // Vector Saturating Rouding Doubling Multiply High
957 defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh",
958 int_arm_neon_vqrdmulh, 1>;
960 // Vector Multiply Extended (Floating Point)
961 defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx",
962 int_aarch64_neon_vmulx,
963 int_aarch64_neon_vmulx,
964 int_aarch64_neon_vmulx,
965 v2f32, v4f32, v2f64, 1>;
967 // Vector Immediate Instructions
969 multiclass neon_mov_imm_shift_asmoperands<string PREFIX>
971 def _asmoperand : AsmOperandClass
973 let Name = "NeonMovImmShift" # PREFIX;
974 let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands";
975 let PredicateMethod = "isNeonMovImmShift" # PREFIX;
979 // Definition of vector immediates shift operands
981 // The selectable use-cases extract the shift operation
982 // information from the OpCmode fields encoded in the immediate.
983 def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{
984 uint64_t OpCmode = N->getZExtValue();
986 unsigned ShiftOnesIn;
988 A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
989 if (!HasShift) return SDValue();
990 return CurDAG->getTargetConstant(ShiftImm, MVT::i32);
993 // Vector immediates shift operands which accept LSL and MSL
994 // shift operators with shift value in the range of 0, 8, 16, 24 (LSL),
995 // or 0, 8 (LSLH) or 8, 16 (MSL).
996 defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">;
997 defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">;
998 // LSLH restricts shift amount to 0, 8 out of 0, 8, 16, 24
999 defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">;
1001 multiclass neon_mov_imm_shift_operands<string PREFIX,
1002 string HALF, string ISHALF, code pred>
1004 def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM>
1007 "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1009 "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1010 let ParserMatchClass =
1011 !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand");
1015 defm neon_mov_imm_LSL : neon_mov_imm_shift_operands<"LSL", "", "false", [{
1017 unsigned ShiftOnesIn;
1019 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1020 return (HasShift && !ShiftOnesIn);
1023 defm neon_mov_imm_MSL : neon_mov_imm_shift_operands<"MSL", "", "false", [{
1025 unsigned ShiftOnesIn;
1027 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1028 return (HasShift && ShiftOnesIn);
1031 defm neon_mov_imm_LSLH : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
1033 unsigned ShiftOnesIn;
1035 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1036 return (HasShift && !ShiftOnesIn);
1039 def neon_uimm1_asmoperand : AsmOperandClass
1042 let PredicateMethod = "isUImm<1>";
1043 let RenderMethod = "addImmOperands";
1046 def neon_uimm2_asmoperand : AsmOperandClass
1049 let PredicateMethod = "isUImm<2>";
1050 let RenderMethod = "addImmOperands";
1053 def neon_uimm8_asmoperand : AsmOperandClass
1056 let PredicateMethod = "isUImm<8>";
1057 let RenderMethod = "addImmOperands";
1060 def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1061 let ParserMatchClass = neon_uimm8_asmoperand;
1062 let PrintMethod = "printNeonUImm8Operand";
1065 def neon_uimm64_mask_asmoperand : AsmOperandClass
1067 let Name = "NeonUImm64Mask";
1068 let PredicateMethod = "isNeonUImm64Mask";
1069 let RenderMethod = "addNeonUImm64MaskOperands";
1072 // MCOperand for 64-bit bytemask with each byte having only the
1073 // value 0x00 and 0xff is encoded as an unsigned 8-bit value
1074 def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1075 let ParserMatchClass = neon_uimm64_mask_asmoperand;
1076 let PrintMethod = "printNeonUImm64MaskOperand";
1079 multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op,
1080 SDPatternOperator opnode>
1082 // shift zeros, per word
1083 def _2S : NeonI_1VModImm<0b0, op,
1085 (ins neon_uimm8:$Imm,
1086 neon_mov_imm_LSL_operand:$Simm),
1087 !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1088 [(set (v2i32 VPR64:$Rd),
1089 (v2i32 (opnode (timm:$Imm),
1090 (neon_mov_imm_LSL_operand:$Simm))))],
1093 let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1096 def _4S : NeonI_1VModImm<0b1, op,
1098 (ins neon_uimm8:$Imm,
1099 neon_mov_imm_LSL_operand:$Simm),
1100 !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1101 [(set (v4i32 VPR128:$Rd),
1102 (v4i32 (opnode (timm:$Imm),
1103 (neon_mov_imm_LSL_operand:$Simm))))],
1106 let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1109 // shift zeros, per halfword
1110 def _4H : NeonI_1VModImm<0b0, op,
1112 (ins neon_uimm8:$Imm,
1113 neon_mov_imm_LSLH_operand:$Simm),
1114 !strconcat(asmop, " $Rd.4h, $Imm$Simm"),
1115 [(set (v4i16 VPR64:$Rd),
1116 (v4i16 (opnode (timm:$Imm),
1117 (neon_mov_imm_LSLH_operand:$Simm))))],
1120 let cmode = {0b1, 0b0, Simm, 0b0};
1123 def _8H : NeonI_1VModImm<0b1, op,
1125 (ins neon_uimm8:$Imm,
1126 neon_mov_imm_LSLH_operand:$Simm),
1127 !strconcat(asmop, " $Rd.8h, $Imm$Simm"),
1128 [(set (v8i16 VPR128:$Rd),
1129 (v8i16 (opnode (timm:$Imm),
1130 (neon_mov_imm_LSLH_operand:$Simm))))],
1133 let cmode = {0b1, 0b0, Simm, 0b0};
1137 multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op,
1138 SDPatternOperator opnode,
1139 SDPatternOperator neonopnode>
1141 let Constraints = "$src = $Rd" in {
1142 // shift zeros, per word
1143 def _2S : NeonI_1VModImm<0b0, op,
1145 (ins VPR64:$src, neon_uimm8:$Imm,
1146 neon_mov_imm_LSL_operand:$Simm),
1147 !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1148 [(set (v2i32 VPR64:$Rd),
1149 (v2i32 (opnode (v2i32 VPR64:$src),
1150 (v2i32 (bitconvert (v2i32 (neonopnode timm:$Imm,
1151 neon_mov_imm_LSL_operand:$Simm)))))))],
1154 let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1157 def _4S : NeonI_1VModImm<0b1, op,
1159 (ins VPR128:$src, neon_uimm8:$Imm,
1160 neon_mov_imm_LSL_operand:$Simm),
1161 !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1162 [(set (v4i32 VPR128:$Rd),
1163 (v4i32 (opnode (v4i32 VPR128:$src),
1164 (v4i32 (bitconvert (v4i32 (neonopnode timm:$Imm,
1165 neon_mov_imm_LSL_operand:$Simm)))))))],
1168 let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1171 // shift zeros, per halfword
1172 def _4H : NeonI_1VModImm<0b0, op,
1174 (ins VPR64:$src, neon_uimm8:$Imm,
1175 neon_mov_imm_LSLH_operand:$Simm),
1176 !strconcat(asmop, " $Rd.4h, $Imm$Simm"),
1177 [(set (v4i16 VPR64:$Rd),
1178 (v4i16 (opnode (v4i16 VPR64:$src),
1179 (v4i16 (bitconvert (v4i16 (neonopnode timm:$Imm,
1180 neon_mov_imm_LSL_operand:$Simm)))))))],
1183 let cmode = {0b1, 0b0, Simm, 0b1};
1186 def _8H : NeonI_1VModImm<0b1, op,
1188 (ins VPR128:$src, neon_uimm8:$Imm,
1189 neon_mov_imm_LSLH_operand:$Simm),
1190 !strconcat(asmop, " $Rd.8h, $Imm$Simm"),
1191 [(set (v8i16 VPR128:$Rd),
1192 (v8i16 (opnode (v8i16 VPR128:$src),
1193 (v8i16 (bitconvert (v8i16 (neonopnode timm:$Imm,
1194 neon_mov_imm_LSL_operand:$Simm)))))))],
1197 let cmode = {0b1, 0b0, Simm, 0b1};
1202 multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op,
1203 SDPatternOperator opnode>
1205 // shift ones, per word
1206 def _2S : NeonI_1VModImm<0b0, op,
1208 (ins neon_uimm8:$Imm,
1209 neon_mov_imm_MSL_operand:$Simm),
1210 !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1211 [(set (v2i32 VPR64:$Rd),
1212 (v2i32 (opnode (timm:$Imm),
1213 (neon_mov_imm_MSL_operand:$Simm))))],
1216 let cmode = {0b1, 0b1, 0b0, Simm};
1219 def _4S : NeonI_1VModImm<0b1, op,
1221 (ins neon_uimm8:$Imm,
1222 neon_mov_imm_MSL_operand:$Simm),
1223 !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1224 [(set (v4i32 VPR128:$Rd),
1225 (v4i32 (opnode (timm:$Imm),
1226 (neon_mov_imm_MSL_operand:$Simm))))],
1229 let cmode = {0b1, 0b1, 0b0, Simm};
1233 // Vector Move Immediate Shifted
1234 let isReMaterializable = 1 in {
1235 defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>;
1238 // Vector Move Inverted Immediate Shifted
1239 let isReMaterializable = 1 in {
1240 defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>;
1243 // Vector Bitwise Bit Clear (AND NOT) - immediate
1244 let isReMaterializable = 1 in {
1245 defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1,
1249 // Vector Bitwise OR - immedidate
1251 let isReMaterializable = 1 in {
1252 defm ORRvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0,
1256 // Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate
1257 // LowerBUILD_VECTOR favors lowering MOVI over MVNI.
1258 // BIC immediate instructions selection requires additional patterns to
1259 // transform Neon_movi operands into BIC immediate operands
1261 def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{
1262 uint64_t OpCmode = N->getZExtValue();
1264 unsigned ShiftOnesIn;
1265 (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
1266 // LSLH restricts shift amount to 0, 8 which are encoded as 0 and 1
1267 // Transform encoded shift amount 0 to 1 and 1 to 0.
1268 return CurDAG->getTargetConstant(!ShiftImm, MVT::i32);
1271 def neon_mov_imm_LSLH_transform_operand
1274 unsigned ShiftOnesIn;
1276 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1277 return (HasShift && !ShiftOnesIn); }],
1278 neon_mov_imm_LSLH_transform_XFORM>;
1280 // Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0x00, LSL 8)
1281 // Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0x00)
1282 def : Pat<(v4i16 (and VPR64:$src,
1283 (v4i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1284 (BICvi_lsl_4H VPR64:$src, 0,
1285 neon_mov_imm_LSLH_transform_operand:$Simm)>;
1287 // Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0x00, LSL 8)
1288 // Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0x00)
1289 def : Pat<(v8i16 (and VPR128:$src,
1290 (v8i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1291 (BICvi_lsl_8H VPR128:$src, 0,
1292 neon_mov_imm_LSLH_transform_operand:$Simm)>;
1295 multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
1296 SDPatternOperator neonopnode,
1298 Instruction INST8H> {
1299 def : Pat<(v8i8 (opnode VPR64:$src,
1300 (bitconvert(v4i16 (neonopnode timm:$Imm,
1301 neon_mov_imm_LSLH_operand:$Simm))))),
1302 (INST4H VPR64:$src, neon_uimm8:$Imm,
1303 neon_mov_imm_LSLH_operand:$Simm)>;
1304 def : Pat<(v1i64 (opnode VPR64:$src,
1305 (bitconvert(v4i16 (neonopnode timm:$Imm,
1306 neon_mov_imm_LSLH_operand:$Simm))))),
1307 (INST4H VPR64:$src, neon_uimm8:$Imm,
1308 neon_mov_imm_LSLH_operand:$Simm)>;
1310 def : Pat<(v16i8 (opnode VPR128:$src,
1311 (bitconvert(v8i16 (neonopnode timm:$Imm,
1312 neon_mov_imm_LSLH_operand:$Simm))))),
1313 (INST8H VPR128:$src, neon_uimm8:$Imm,
1314 neon_mov_imm_LSLH_operand:$Simm)>;
1315 def : Pat<(v4i32 (opnode VPR128:$src,
1316 (bitconvert(v8i16 (neonopnode timm:$Imm,
1317 neon_mov_imm_LSLH_operand:$Simm))))),
1318 (INST8H VPR128:$src, neon_uimm8:$Imm,
1319 neon_mov_imm_LSLH_operand:$Simm)>;
1320 def : Pat<(v2i64 (opnode VPR128:$src,
1321 (bitconvert(v8i16 (neonopnode timm:$Imm,
1322 neon_mov_imm_LSLH_operand:$Simm))))),
1323 (INST8H VPR128:$src, neon_uimm8:$Imm,
1324 neon_mov_imm_LSLH_operand:$Simm)>;
1327 // Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
1328 defm : Neon_bitwiseVi_patterns<or, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H>;
1330 // Additional patterns for Vector Bitwise OR - immedidate
1331 defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H>;
1334 // Vector Move Immediate Masked
1335 let isReMaterializable = 1 in {
1336 defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>;
1339 // Vector Move Inverted Immediate Masked
1340 let isReMaterializable = 1 in {
1341 defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
1344 class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
1345 Instruction inst, RegisterOperand VPRC>
1346 : NeonInstAlias<!strconcat(asmop, " $Rd," # asmlane # ", $Imm"),
1347 (inst VPRC:$Rd, neon_uimm8:$Imm, 0), 0b0>;
1349 // Aliases for Vector Move Immediate Shifted
1350 def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>;
1351 def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>;
1352 def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>;
1353 def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>;
1355 // Aliases for Vector Move Inverted Immediate Shifted
1356 def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>;
1357 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>;
1358 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>;
1359 def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>;
1361 // Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate
1362 def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>;
1363 def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>;
1364 def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>;
1365 def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>;
1367 // Aliases for Vector Bitwise OR - immedidate
1368 def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>;
1369 def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>;
1370 def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>;
1371 def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>;
1373 // Vector Move Immediate - per byte
1374 let isReMaterializable = 1 in {
1375 def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0,
1376 (outs VPR64:$Rd), (ins neon_uimm8:$Imm),
1377 "movi\t$Rd.8b, $Imm",
1378 [(set (v8i8 VPR64:$Rd),
1379 (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1384 def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0,
1385 (outs VPR128:$Rd), (ins neon_uimm8:$Imm),
1386 "movi\t$Rd.16b, $Imm",
1387 [(set (v16i8 VPR128:$Rd),
1388 (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1394 // Vector Move Immediate - bytemask, per double word
1395 let isReMaterializable = 1 in {
1396 def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1,
1397 (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm),
1398 "movi\t $Rd.2d, $Imm",
1399 [(set (v2i64 VPR128:$Rd),
1400 (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))],
1406 // Vector Move Immediate - bytemask, one doubleword
1408 let isReMaterializable = 1 in {
1409 def MOVIdi : NeonI_1VModImm<0b0, 0b1,
1410 (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm),
1412 [(set (f64 FPR64:$Rd),
1414 (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))))],
1420 // Vector Floating Point Move Immediate
1422 class NeonI_FMOV_impl<string asmlane, RegisterOperand VPRC, ValueType OpTy,
1423 Operand immOpType, bit q, bit op>
1424 : NeonI_1VModImm<q, op,
1425 (outs VPRC:$Rd), (ins immOpType:$Imm),
1426 "fmov\t$Rd" # asmlane # ", $Imm",
1427 [(set (OpTy VPRC:$Rd),
1428 (OpTy (Neon_fmovi (timm:$Imm))))],
1433 let isReMaterializable = 1 in {
1434 def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64, v2f32, fmov32_operand, 0b0, 0b0>;
1435 def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>;
1436 def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>;
1439 // Vector Shift (Immediate)
1440 // Immediate in [0, 63]
1441 def imm0_63 : Operand<i32> {
1442 let ParserMatchClass = uimm6_asmoperand;
1445 // Shift Right Immediate - A shift right immediate is encoded differently from
1446 // other shift immediates. The immh:immb field is encoded like so:
1449 // 8 immh:immb<6:3> = '0001xxx', <imm> is encoded in immh:immb<2:0>
1450 // 16 immh:immb<6:4> = '001xxxx', <imm> is encoded in immh:immb<3:0>
1451 // 32 immh:immb<6:5> = '01xxxxx', <imm> is encoded in immh:immb<4:0>
1452 // 64 immh:immb<6> = '1xxxxxx', <imm> is encoded in immh:immb<5:0>
1453 class shr_imm_asmoperands<string OFFSET> : AsmOperandClass {
1454 let Name = "ShrImm" # OFFSET;
1455 let RenderMethod = "addImmOperands";
1456 let DiagnosticType = "ShrImm" # OFFSET;
1459 class shr_imm<string OFFSET> : Operand<i32> {
1460 let EncoderMethod = "getShiftRightImm" # OFFSET;
1461 let DecoderMethod = "DecodeShiftRightImm" # OFFSET;
1462 let ParserMatchClass =
1463 !cast<AsmOperandClass>("shr_imm" # OFFSET # "_asmoperand");
1466 def shr_imm8_asmoperand : shr_imm_asmoperands<"8">;
1467 def shr_imm16_asmoperand : shr_imm_asmoperands<"16">;
1468 def shr_imm32_asmoperand : shr_imm_asmoperands<"32">;
1469 def shr_imm64_asmoperand : shr_imm_asmoperands<"64">;
1471 def shr_imm8 : shr_imm<"8">;
1472 def shr_imm16 : shr_imm<"16">;
1473 def shr_imm32 : shr_imm<"32">;
1474 def shr_imm64 : shr_imm<"64">;
1476 class N2VShift<bit q, bit u, bits<5> opcode, string asmop, string T,
1477 RegisterOperand VPRC, ValueType Ty, Operand ImmTy, SDNode OpNode>
1478 : NeonI_2VShiftImm<q, u, opcode,
1479 (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1480 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1481 [(set (Ty VPRC:$Rd),
1482 (Ty (OpNode (Ty VPRC:$Rn),
1483 (Ty (Neon_dupImm (i32 imm:$Imm))))))],
1486 multiclass NeonI_N2VShL<bit u, bits<5> opcode, string asmop> {
1487 // 64-bit vector types.
1488 def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3, shl> {
1489 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1492 def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4, shl> {
1493 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1496 def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5, shl> {
1497 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1500 // 128-bit vector types.
1501 def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3, shl> {
1502 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1505 def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4, shl> {
1506 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1509 def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5, shl> {
1510 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1513 def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63, shl> {
1514 let Inst{22} = 0b1; // immh:immb = 1xxxxxx
1518 multiclass NeonI_N2VShR<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
1519 def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1521 let Inst{22-19} = 0b0001;
1524 def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1526 let Inst{22-20} = 0b001;
1529 def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1531 let Inst{22-21} = 0b01;
1534 def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1536 let Inst{22-19} = 0b0001;
1539 def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1541 let Inst{22-20} = 0b001;
1544 def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1546 let Inst{22-21} = 0b01;
1549 def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1556 defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
1559 defm SSHRvvi : NeonI_N2VShR<0b0, 0b00000, "sshr", sra>;
1560 defm USHRvvi : NeonI_N2VShR<0b1, 0b00000, "ushr", srl>;
1562 def Neon_High16B : PatFrag<(ops node:$in),
1563 (extract_subvector (v16i8 node:$in), (iPTR 8))>;
1564 def Neon_High8H : PatFrag<(ops node:$in),
1565 (extract_subvector (v8i16 node:$in), (iPTR 4))>;
1566 def Neon_High4S : PatFrag<(ops node:$in),
1567 (extract_subvector (v4i32 node:$in), (iPTR 2))>;
1569 def Neon_low8H : PatFrag<(ops node:$in),
1570 (v4i16 (extract_subvector (v8i16 node:$in),
1572 def Neon_low4S : PatFrag<(ops node:$in),
1573 (v2i32 (extract_subvector (v4i32 node:$in),
1575 def Neon_low4f : PatFrag<(ops node:$in),
1576 (v2f32 (extract_subvector (v4f32 node:$in),
1579 class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1580 string SrcT, ValueType DestTy, ValueType SrcTy,
1581 Operand ImmTy, SDPatternOperator ExtOp>
1582 : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1583 (ins VPR64:$Rn, ImmTy:$Imm),
1584 asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1585 [(set (DestTy VPR128:$Rd),
1587 (DestTy (ExtOp (SrcTy VPR64:$Rn))),
1588 (DestTy (Neon_dupImm (i32 imm:$Imm))))))],
1591 class N2VShiftLongHigh<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1592 string SrcT, ValueType DestTy, ValueType SrcTy,
1593 int StartIndex, Operand ImmTy,
1594 SDPatternOperator ExtOp, PatFrag getTop>
1595 : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1596 (ins VPR128:$Rn, ImmTy:$Imm),
1597 asmop # "2\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1598 [(set (DestTy VPR128:$Rd),
1601 (SrcTy (getTop VPR128:$Rn)))),
1602 (DestTy (Neon_dupImm (i32 imm:$Imm))))))],
1605 multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
1607 // 64-bit vector types.
1608 def _8B : N2VShiftLong<0b0, u, opcode, asmop, "8h", "8b", v8i16, v8i8,
1610 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1613 def _4H : N2VShiftLong<0b0, u, opcode, asmop, "4s", "4h", v4i32, v4i16,
1615 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1618 def _2S : N2VShiftLong<0b0, u, opcode, asmop, "2d", "2s", v2i64, v2i32,
1620 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1623 // 128-bit vector types
1624 def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b",
1625 v8i16, v8i8, 8, uimm3, ExtOp, Neon_High16B> {
1626 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1629 def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h",
1630 v4i32, v4i16, 4, uimm4, ExtOp, Neon_High8H> {
1631 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1634 def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s",
1635 v2i64, v2i32, 2, uimm5, ExtOp, Neon_High4S> {
1636 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1639 // Use other patterns to match when the immediate is 0.
1640 def : Pat<(v8i16 (ExtOp (v8i8 VPR64:$Rn))),
1641 (!cast<Instruction>(prefix # "_8B") VPR64:$Rn, 0)>;
1643 def : Pat<(v4i32 (ExtOp (v4i16 VPR64:$Rn))),
1644 (!cast<Instruction>(prefix # "_4H") VPR64:$Rn, 0)>;
1646 def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
1647 (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
1649 def : Pat<(v8i16 (ExtOp (v8i8 (Neon_High16B VPR128:$Rn)))),
1650 (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
1652 def : Pat<(v4i32 (ExtOp (v4i16 (Neon_High8H VPR128:$Rn)))),
1653 (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
1655 def : Pat<(v2i64 (ExtOp (v2i32 (Neon_High4S VPR128:$Rn)))),
1656 (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
1660 defm SSHLLvvi : NeonI_N2VShLL<"SSHLLvvi", 0b0, 0b10100, "sshll", sext>;
1661 defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
1663 // Rounding/Saturating shift
1664 class N2VShift_RQ<bit q, bit u, bits<5> opcode, string asmop, string T,
1665 RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1666 SDPatternOperator OpNode>
1667 : NeonI_2VShiftImm<q, u, opcode,
1668 (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1669 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1670 [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$Rn),
1674 // shift right (vector by immediate)
1675 multiclass NeonI_N2VShR_RQ<bit u, bits<5> opcode, string asmop,
1676 SDPatternOperator OpNode> {
1677 def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1679 let Inst{22-19} = 0b0001;
1682 def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1684 let Inst{22-20} = 0b001;
1687 def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1689 let Inst{22-21} = 0b01;
1692 def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1694 let Inst{22-19} = 0b0001;
1697 def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1699 let Inst{22-20} = 0b001;
1702 def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1704 let Inst{22-21} = 0b01;
1707 def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1713 multiclass NeonI_N2VShL_Q<bit u, bits<5> opcode, string asmop,
1714 SDPatternOperator OpNode> {
1715 // 64-bit vector types.
1716 def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3,
1718 let Inst{22-19} = 0b0001;
1721 def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4,
1723 let Inst{22-20} = 0b001;
1726 def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5,
1728 let Inst{22-21} = 0b01;
1731 // 128-bit vector types.
1732 def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3,
1734 let Inst{22-19} = 0b0001;
1737 def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4,
1739 let Inst{22-20} = 0b001;
1742 def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5,
1744 let Inst{22-21} = 0b01;
1747 def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63,
1753 // Rounding shift right
1754 defm SRSHRvvi : NeonI_N2VShR_RQ<0b0, 0b00100, "srshr",
1755 int_aarch64_neon_vsrshr>;
1756 defm URSHRvvi : NeonI_N2VShR_RQ<0b1, 0b00100, "urshr",
1757 int_aarch64_neon_vurshr>;
1759 // Saturating shift left unsigned
1760 defm SQSHLUvvi : NeonI_N2VShL_Q<0b1, 0b01100, "sqshlu", int_aarch64_neon_vsqshlu>;
1762 // Saturating shift left
1763 defm SQSHLvvi : NeonI_N2VShL_Q<0b0, 0b01110, "sqshl", Neon_sqrshlImm>;
1764 defm UQSHLvvi : NeonI_N2VShL_Q<0b1, 0b01110, "uqshl", Neon_uqrshlImm>;
1766 class N2VShiftAdd<bit q, bit u, bits<5> opcode, string asmop, string T,
1767 RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1769 : NeonI_2VShiftImm<q, u, opcode,
1770 (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1771 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1772 [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
1773 (Ty (OpNode (Ty VPRC:$Rn),
1774 (Ty (Neon_dupImm (i32 imm:$Imm))))))))],
1776 let Constraints = "$src = $Rd";
1779 // Shift Right accumulate
1780 multiclass NeonI_N2VShRAdd<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
1781 def _8B : N2VShiftAdd<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1783 let Inst{22-19} = 0b0001;
1786 def _4H : N2VShiftAdd<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1788 let Inst{22-20} = 0b001;
1791 def _2S : N2VShiftAdd<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1793 let Inst{22-21} = 0b01;
1796 def _16B : N2VShiftAdd<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1798 let Inst{22-19} = 0b0001;
1801 def _8H : N2VShiftAdd<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1803 let Inst{22-20} = 0b001;
1806 def _4S : N2VShiftAdd<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1808 let Inst{22-21} = 0b01;
1811 def _2D : N2VShiftAdd<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1817 // Shift right and accumulate
1818 defm SSRAvvi : NeonI_N2VShRAdd<0, 0b00010, "ssra", sra>;
1819 defm USRAvvi : NeonI_N2VShRAdd<1, 0b00010, "usra", srl>;
1821 // Rounding shift accumulate
1822 class N2VShiftAdd_R<bit q, bit u, bits<5> opcode, string asmop, string T,
1823 RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1824 SDPatternOperator OpNode>
1825 : NeonI_2VShiftImm<q, u, opcode,
1826 (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1827 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1828 [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
1829 (Ty (OpNode (Ty VPRC:$Rn), (i32 imm:$Imm))))))],
1831 let Constraints = "$src = $Rd";
1834 multiclass NeonI_N2VShRAdd_R<bit u, bits<5> opcode, string asmop,
1835 SDPatternOperator OpNode> {
1836 def _8B : N2VShiftAdd_R<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1838 let Inst{22-19} = 0b0001;
1841 def _4H : N2VShiftAdd_R<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1843 let Inst{22-20} = 0b001;
1846 def _2S : N2VShiftAdd_R<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1848 let Inst{22-21} = 0b01;
1851 def _16B : N2VShiftAdd_R<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1853 let Inst{22-19} = 0b0001;
1856 def _8H : N2VShiftAdd_R<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1858 let Inst{22-20} = 0b001;
1861 def _4S : N2VShiftAdd_R<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1863 let Inst{22-21} = 0b01;
1866 def _2D : N2VShiftAdd_R<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1872 // Rounding shift right and accumulate
1873 defm SRSRAvvi : NeonI_N2VShRAdd_R<0, 0b00110, "srsra", int_aarch64_neon_vsrshr>;
1874 defm URSRAvvi : NeonI_N2VShRAdd_R<1, 0b00110, "ursra", int_aarch64_neon_vurshr>;
1876 // Shift insert by immediate
1877 class N2VShiftIns<bit q, bit u, bits<5> opcode, string asmop, string T,
1878 RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1879 SDPatternOperator OpNode>
1880 : NeonI_2VShiftImm<q, u, opcode,
1881 (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1882 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1883 [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$src), (Ty VPRC:$Rn),
1886 let Constraints = "$src = $Rd";
1889 // shift left insert (vector by immediate)
1890 multiclass NeonI_N2VShLIns<bit u, bits<5> opcode, string asmop> {
1891 def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3,
1892 int_aarch64_neon_vsli> {
1893 let Inst{22-19} = 0b0001;
1896 def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4,
1897 int_aarch64_neon_vsli> {
1898 let Inst{22-20} = 0b001;
1901 def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5,
1902 int_aarch64_neon_vsli> {
1903 let Inst{22-21} = 0b01;
1906 // 128-bit vector types
1907 def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3,
1908 int_aarch64_neon_vsli> {
1909 let Inst{22-19} = 0b0001;
1912 def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4,
1913 int_aarch64_neon_vsli> {
1914 let Inst{22-20} = 0b001;
1917 def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5,
1918 int_aarch64_neon_vsli> {
1919 let Inst{22-21} = 0b01;
1922 def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63,
1923 int_aarch64_neon_vsli> {
1928 // shift right insert (vector by immediate)
1929 multiclass NeonI_N2VShRIns<bit u, bits<5> opcode, string asmop> {
1930 // 64-bit vector types.
1931 def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1932 int_aarch64_neon_vsri> {
1933 let Inst{22-19} = 0b0001;
1936 def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1937 int_aarch64_neon_vsri> {
1938 let Inst{22-20} = 0b001;
1941 def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1942 int_aarch64_neon_vsri> {
1943 let Inst{22-21} = 0b01;
1946 // 128-bit vector types
1947 def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1948 int_aarch64_neon_vsri> {
1949 let Inst{22-19} = 0b0001;
1952 def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1953 int_aarch64_neon_vsri> {
1954 let Inst{22-20} = 0b001;
1957 def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1958 int_aarch64_neon_vsri> {
1959 let Inst{22-21} = 0b01;
1962 def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1963 int_aarch64_neon_vsri> {
1968 // Shift left and insert
1969 defm SLIvvi : NeonI_N2VShLIns<0b1, 0b01010, "sli">;
1971 // Shift right and insert
1972 defm SRIvvi : NeonI_N2VShRIns<0b1, 0b01000, "sri">;
1974 class N2VShR_Narrow<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1975 string SrcT, Operand ImmTy>
1976 : NeonI_2VShiftImm<q, u, opcode,
1977 (outs VPR64:$Rd), (ins VPR128:$Rn, ImmTy:$Imm),
1978 asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1981 class N2VShR_Narrow_Hi<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1982 string SrcT, Operand ImmTy>
1983 : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1984 (ins VPR128:$src, VPR128:$Rn, ImmTy:$Imm),
1985 asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1987 let Constraints = "$src = $Rd";
1990 // left long shift by immediate
1991 multiclass NeonI_N2VShR_Narrow<bit u, bits<5> opcode, string asmop> {
1992 def _8B : N2VShR_Narrow<0b0, u, opcode, asmop, "8b", "8h", shr_imm8> {
1993 let Inst{22-19} = 0b0001;
1996 def _4H : N2VShR_Narrow<0b0, u, opcode, asmop, "4h", "4s", shr_imm16> {
1997 let Inst{22-20} = 0b001;
2000 def _2S : N2VShR_Narrow<0b0, u, opcode, asmop, "2s", "2d", shr_imm32> {
2001 let Inst{22-21} = 0b01;
2004 // Shift Narrow High
2005 def _16B : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "16b", "8h",
2007 let Inst{22-19} = 0b0001;
2010 def _8H : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "8h", "4s",
2012 let Inst{22-20} = 0b001;
2015 def _4S : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "4s", "2d",
2017 let Inst{22-21} = 0b01;
2021 // Shift right narrow
2022 defm SHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10000, "shrn">;
2024 // Shift right narrow (prefix Q is saturating, prefix R is rounding)
2025 defm QSHRUNvvi :NeonI_N2VShR_Narrow<0b1, 0b10000, "sqshrun">;
2026 defm RSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10001, "rshrn">;
2027 defm QRSHRUNvvi : NeonI_N2VShR_Narrow<0b1, 0b10001, "sqrshrun">;
2028 defm SQSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10010, "sqshrn">;
2029 defm UQSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10010, "uqshrn">;
2030 defm SQRSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10011, "sqrshrn">;
2031 defm UQRSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10011, "uqrshrn">;
2033 def Neon_combine_2D : PatFrag<(ops node:$Rm, node:$Rn),
2034 (v2i64 (concat_vectors (v1i64 node:$Rm),
2035 (v1i64 node:$Rn)))>;
2036 def Neon_combine_8H : PatFrag<(ops node:$Rm, node:$Rn),
2037 (v8i16 (concat_vectors (v4i16 node:$Rm),
2038 (v4i16 node:$Rn)))>;
2039 def Neon_combine_4S : PatFrag<(ops node:$Rm, node:$Rn),
2040 (v4i32 (concat_vectors (v2i32 node:$Rm),
2041 (v2i32 node:$Rn)))>;
2042 def Neon_combine_4f : PatFrag<(ops node:$Rm, node:$Rn),
2043 (v4f32 (concat_vectors (v2f32 node:$Rm),
2044 (v2f32 node:$Rn)))>;
2045 def Neon_combine_2d : PatFrag<(ops node:$Rm, node:$Rn),
2046 (v2f64 (concat_vectors (v1f64 node:$Rm),
2047 (v1f64 node:$Rn)))>;
2049 def Neon_lshrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2050 (v8i16 (srl (v8i16 node:$lhs),
2051 (v8i16 (Neon_dupImm (i32 node:$rhs)))))>;
2052 def Neon_lshrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2053 (v4i32 (srl (v4i32 node:$lhs),
2054 (v4i32 (Neon_dupImm (i32 node:$rhs)))))>;
2055 def Neon_lshrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2056 (v2i64 (srl (v2i64 node:$lhs),
2057 (v2i64 (Neon_dupImm (i32 node:$rhs)))))>;
2058 def Neon_ashrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2059 (v8i16 (sra (v8i16 node:$lhs),
2060 (v8i16 (Neon_dupImm (i32 node:$rhs)))))>;
2061 def Neon_ashrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2062 (v4i32 (sra (v4i32 node:$lhs),
2063 (v4i32 (Neon_dupImm (i32 node:$rhs)))))>;
2064 def Neon_ashrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2065 (v2i64 (sra (v2i64 node:$lhs),
2066 (v2i64 (Neon_dupImm (i32 node:$rhs)))))>;
2068 // Normal shift right narrow is matched by IR (srl/sra, trunc, concat_vectors)
2069 multiclass Neon_shiftNarrow_patterns<string shr> {
2070 def : Pat<(v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H") VPR128:$Rn,
2072 (SHRNvvi_8B VPR128:$Rn, imm:$Imm)>;
2073 def : Pat<(v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S") VPR128:$Rn,
2075 (SHRNvvi_4H VPR128:$Rn, imm:$Imm)>;
2076 def : Pat<(v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D") VPR128:$Rn,
2078 (SHRNvvi_2S VPR128:$Rn, imm:$Imm)>;
2080 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2081 (v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H")
2082 VPR128:$Rn, imm:$Imm)))))),
2083 (SHRNvvi_16B (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2084 VPR128:$Rn, imm:$Imm)>;
2085 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2086 (v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S")
2087 VPR128:$Rn, imm:$Imm)))))),
2088 (SHRNvvi_8H (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2089 VPR128:$Rn, imm:$Imm)>;
2090 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2091 (v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D")
2092 VPR128:$Rn, imm:$Imm)))))),
2093 (SHRNvvi_4S (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2094 VPR128:$Rn, imm:$Imm)>;
2097 multiclass Neon_shiftNarrow_QR_patterns<SDPatternOperator op, string prefix> {
2098 def : Pat<(v8i8 (op (v8i16 VPR128:$Rn), imm:$Imm)),
2099 (!cast<Instruction>(prefix # "_8B") VPR128:$Rn, imm:$Imm)>;
2100 def : Pat<(v4i16 (op (v4i32 VPR128:$Rn), imm:$Imm)),
2101 (!cast<Instruction>(prefix # "_4H") VPR128:$Rn, imm:$Imm)>;
2102 def : Pat<(v2i32 (op (v2i64 VPR128:$Rn), imm:$Imm)),
2103 (!cast<Instruction>(prefix # "_2S") VPR128:$Rn, imm:$Imm)>;
2105 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2106 (v1i64 (bitconvert (v8i8 (op (v8i16 VPR128:$Rn), imm:$Imm))))),
2107 (!cast<Instruction>(prefix # "_16B")
2108 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2109 VPR128:$Rn, imm:$Imm)>;
2110 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2111 (v1i64 (bitconvert (v4i16 (op (v4i32 VPR128:$Rn), imm:$Imm))))),
2112 (!cast<Instruction>(prefix # "_8H")
2113 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2114 VPR128:$Rn, imm:$Imm)>;
2115 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2116 (v1i64 (bitconvert (v2i32 (op (v2i64 VPR128:$Rn), imm:$Imm))))),
2117 (!cast<Instruction>(prefix # "_4S")
2118 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2119 VPR128:$Rn, imm:$Imm)>;
2122 defm : Neon_shiftNarrow_patterns<"lshr">;
2123 defm : Neon_shiftNarrow_patterns<"ashr">;
2125 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrun, "QSHRUNvvi">;
2126 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vrshrn, "RSHRNvvi">;
2127 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrun, "QRSHRUNvvi">;
2128 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrn, "SQSHRNvvi">;
2129 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqshrn, "UQSHRNvvi">;
2130 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrn, "SQRSHRNvvi">;
2131 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqrshrn, "UQRSHRNvvi">;
2133 // Convert fix-point and float-pointing
2134 class N2VCvt_Fx<bit q, bit u, bits<5> opcode, string asmop, string T,
2135 RegisterOperand VPRC, ValueType DestTy, ValueType SrcTy,
2136 Operand ImmTy, SDPatternOperator IntOp>
2137 : NeonI_2VShiftImm<q, u, opcode,
2138 (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
2139 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2140 [(set (DestTy VPRC:$Rd), (DestTy (IntOp (SrcTy VPRC:$Rn),
2144 multiclass NeonI_N2VCvt_Fx2fp<bit u, bits<5> opcode, string asmop,
2145 SDPatternOperator IntOp> {
2146 def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2f32, v2i32,
2148 let Inst{22-21} = 0b01;
2151 def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4f32, v4i32,
2153 let Inst{22-21} = 0b01;
2156 def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2f64, v2i64,
2162 multiclass NeonI_N2VCvt_Fp2fx<bit u, bits<5> opcode, string asmop,
2163 SDPatternOperator IntOp> {
2164 def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2i32, v2f32,
2166 let Inst{22-21} = 0b01;
2169 def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4i32, v4f32,
2171 let Inst{22-21} = 0b01;
2174 def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2i64, v2f64,
2180 // Convert fixed-point to floating-point
2181 defm VCVTxs2f : NeonI_N2VCvt_Fx2fp<0, 0b11100, "scvtf",
2182 int_arm_neon_vcvtfxs2fp>;
2183 defm VCVTxu2f : NeonI_N2VCvt_Fx2fp<1, 0b11100, "ucvtf",
2184 int_arm_neon_vcvtfxu2fp>;
2186 // Convert floating-point to fixed-point
2187 defm VCVTf2xs : NeonI_N2VCvt_Fp2fx<0, 0b11111, "fcvtzs",
2188 int_arm_neon_vcvtfp2fxs>;
2189 defm VCVTf2xu : NeonI_N2VCvt_Fp2fx<1, 0b11111, "fcvtzu",
2190 int_arm_neon_vcvtfp2fxu>;
2192 multiclass Neon_sshll2_0<SDNode ext>
2194 def _v8i8 : PatFrag<(ops node:$Rn),
2195 (v8i16 (ext (v8i8 (Neon_High16B node:$Rn))))>;
2196 def _v4i16 : PatFrag<(ops node:$Rn),
2197 (v4i32 (ext (v4i16 (Neon_High8H node:$Rn))))>;
2198 def _v2i32 : PatFrag<(ops node:$Rn),
2199 (v2i64 (ext (v2i32 (Neon_High4S node:$Rn))))>;
2202 defm NI_sext_high : Neon_sshll2_0<sext>;
2203 defm NI_zext_high : Neon_sshll2_0<zext>;
2205 // The followings are for instruction class (3V Diff)
2207 // normal long/long2 pattern
2208 class NeonI_3VDL<bit q, bit u, bits<2> size, bits<4> opcode,
2209 string asmop, string ResS, string OpS,
2210 SDPatternOperator opnode, SDPatternOperator ext,
2211 RegisterOperand OpVPR,
2212 ValueType ResTy, ValueType OpTy>
2213 : NeonI_3VDiff<q, u, size, opcode,
2214 (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2215 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2216 [(set (ResTy VPR128:$Rd),
2217 (ResTy (opnode (ResTy (ext (OpTy OpVPR:$Rn))),
2218 (ResTy (ext (OpTy OpVPR:$Rm))))))],
2221 multiclass NeonI_3VDL_s<bit u, bits<4> opcode,
2222 string asmop, SDPatternOperator opnode,
2225 let isCommutable = Commutable in {
2226 def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2227 opnode, sext, VPR64, v8i16, v8i8>;
2228 def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2229 opnode, sext, VPR64, v4i32, v4i16>;
2230 def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2231 opnode, sext, VPR64, v2i64, v2i32>;
2235 multiclass NeonI_3VDL2_s<bit u, bits<4> opcode,
2236 string asmop, SDPatternOperator opnode,
2239 let isCommutable = Commutable in {
2240 def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2241 opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2242 def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2243 opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2244 def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2245 opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2249 multiclass NeonI_3VDL_u<bit u, bits<4> opcode,
2250 string asmop, SDPatternOperator opnode,
2253 let isCommutable = Commutable in {
2254 def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2255 opnode, zext, VPR64, v8i16, v8i8>;
2256 def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2257 opnode, zext, VPR64, v4i32, v4i16>;
2258 def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2259 opnode, zext, VPR64, v2i64, v2i32>;
2263 multiclass NeonI_3VDL2_u<bit u, bits<4> opcode,
2264 string asmop, SDPatternOperator opnode,
2267 let isCommutable = Commutable in {
2268 def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2269 opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2270 def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2271 opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2272 def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2273 opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2277 defm SADDLvvv : NeonI_3VDL_s<0b0, 0b0000, "saddl", add, 1>;
2278 defm UADDLvvv : NeonI_3VDL_u<0b1, 0b0000, "uaddl", add, 1>;
2280 defm SADDL2vvv : NeonI_3VDL2_s<0b0, 0b0000, "saddl2", add, 1>;
2281 defm UADDL2vvv : NeonI_3VDL2_u<0b1, 0b0000, "uaddl2", add, 1>;
2283 defm SSUBLvvv : NeonI_3VDL_s<0b0, 0b0010, "ssubl", sub, 0>;
2284 defm USUBLvvv : NeonI_3VDL_u<0b1, 0b0010, "usubl", sub, 0>;
2286 defm SSUBL2vvv : NeonI_3VDL2_s<0b0, 0b0010, "ssubl2", sub, 0>;
2287 defm USUBL2vvv : NeonI_3VDL2_u<0b1, 0b0010, "usubl2", sub, 0>;
2289 // normal wide/wide2 pattern
2290 class NeonI_3VDW<bit q, bit u, bits<2> size, bits<4> opcode,
2291 string asmop, string ResS, string OpS,
2292 SDPatternOperator opnode, SDPatternOperator ext,
2293 RegisterOperand OpVPR,
2294 ValueType ResTy, ValueType OpTy>
2295 : NeonI_3VDiff<q, u, size, opcode,
2296 (outs VPR128:$Rd), (ins VPR128:$Rn, OpVPR:$Rm),
2297 asmop # "\t$Rd." # ResS # ", $Rn." # ResS # ", $Rm." # OpS,
2298 [(set (ResTy VPR128:$Rd),
2299 (ResTy (opnode (ResTy VPR128:$Rn),
2300 (ResTy (ext (OpTy OpVPR:$Rm))))))],
2303 multiclass NeonI_3VDW_s<bit u, bits<4> opcode,
2304 string asmop, SDPatternOperator opnode>
2306 def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2307 opnode, sext, VPR64, v8i16, v8i8>;
2308 def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2309 opnode, sext, VPR64, v4i32, v4i16>;
2310 def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2311 opnode, sext, VPR64, v2i64, v2i32>;
2314 defm SADDWvvv : NeonI_3VDW_s<0b0, 0b0001, "saddw", add>;
2315 defm SSUBWvvv : NeonI_3VDW_s<0b0, 0b0011, "ssubw", sub>;
2317 multiclass NeonI_3VDW2_s<bit u, bits<4> opcode,
2318 string asmop, SDPatternOperator opnode>
2320 def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2321 opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2322 def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2323 opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2324 def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2325 opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2328 defm SADDW2vvv : NeonI_3VDW2_s<0b0, 0b0001, "saddw2", add>;
2329 defm SSUBW2vvv : NeonI_3VDW2_s<0b0, 0b0011, "ssubw2", sub>;
2331 multiclass NeonI_3VDW_u<bit u, bits<4> opcode,
2332 string asmop, SDPatternOperator opnode>
2334 def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2335 opnode, zext, VPR64, v8i16, v8i8>;
2336 def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2337 opnode, zext, VPR64, v4i32, v4i16>;
2338 def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2339 opnode, zext, VPR64, v2i64, v2i32>;
2342 defm UADDWvvv : NeonI_3VDW_u<0b1, 0b0001, "uaddw", add>;
2343 defm USUBWvvv : NeonI_3VDW_u<0b1, 0b0011, "usubw", sub>;
2345 multiclass NeonI_3VDW2_u<bit u, bits<4> opcode,
2346 string asmop, SDPatternOperator opnode>
2348 def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2349 opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2350 def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2351 opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2352 def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2353 opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2356 defm UADDW2vvv : NeonI_3VDW2_u<0b1, 0b0001, "uaddw2", add>;
2357 defm USUBW2vvv : NeonI_3VDW2_u<0b1, 0b0011, "usubw2", sub>;
2359 // Get the high half part of the vector element.
2360 multiclass NeonI_get_high
2362 def _8h : PatFrag<(ops node:$Rn),
2363 (v8i8 (trunc (v8i16 (srl (v8i16 node:$Rn),
2364 (v8i16 (Neon_dupImm 8))))))>;
2365 def _4s : PatFrag<(ops node:$Rn),
2366 (v4i16 (trunc (v4i32 (srl (v4i32 node:$Rn),
2367 (v4i32 (Neon_dupImm 16))))))>;
2368 def _2d : PatFrag<(ops node:$Rn),
2369 (v2i32 (trunc (v2i64 (srl (v2i64 node:$Rn),
2370 (v2i64 (Neon_dupImm 32))))))>;
2373 defm NI_get_hi : NeonI_get_high;
2375 // pattern for addhn/subhn with 2 operands
2376 class NeonI_3VDN_addhn_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2377 string asmop, string ResS, string OpS,
2378 SDPatternOperator opnode, SDPatternOperator get_hi,
2379 ValueType ResTy, ValueType OpTy>
2380 : NeonI_3VDiff<q, u, size, opcode,
2381 (outs VPR64:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
2382 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2383 [(set (ResTy VPR64:$Rd),
2385 (OpTy (opnode (OpTy VPR128:$Rn),
2386 (OpTy VPR128:$Rm))))))],
2389 multiclass NeonI_3VDN_addhn_2Op<bit u, bits<4> opcode,
2390 string asmop, SDPatternOperator opnode,
2393 let isCommutable = Commutable in {
2394 def _8b8h : NeonI_3VDN_addhn_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2395 opnode, NI_get_hi_8h, v8i8, v8i16>;
2396 def _4h4s : NeonI_3VDN_addhn_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2397 opnode, NI_get_hi_4s, v4i16, v4i32>;
2398 def _2s2d : NeonI_3VDN_addhn_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2399 opnode, NI_get_hi_2d, v2i32, v2i64>;
2403 defm ADDHNvvv : NeonI_3VDN_addhn_2Op<0b0, 0b0100, "addhn", add, 1>;
2404 defm SUBHNvvv : NeonI_3VDN_addhn_2Op<0b0, 0b0110, "subhn", sub, 0>;
2406 // pattern for operation with 2 operands
2407 class NeonI_3VD_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2408 string asmop, string ResS, string OpS,
2409 SDPatternOperator opnode,
2410 RegisterOperand ResVPR, RegisterOperand OpVPR,
2411 ValueType ResTy, ValueType OpTy>
2412 : NeonI_3VDiff<q, u, size, opcode,
2413 (outs ResVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2414 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2415 [(set (ResTy ResVPR:$Rd),
2416 (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))],
2419 // normal narrow pattern
2420 multiclass NeonI_3VDN_2Op<bit u, bits<4> opcode,
2421 string asmop, SDPatternOperator opnode,
2424 let isCommutable = Commutable in {
2425 def _8b8h : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2426 opnode, VPR64, VPR128, v8i8, v8i16>;
2427 def _4h4s : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2428 opnode, VPR64, VPR128, v4i16, v4i32>;
2429 def _2s2d : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2430 opnode, VPR64, VPR128, v2i32, v2i64>;
2434 defm RADDHNvvv : NeonI_3VDN_2Op<0b1, 0b0100, "raddhn", int_arm_neon_vraddhn, 1>;
2435 defm RSUBHNvvv : NeonI_3VDN_2Op<0b1, 0b0110, "rsubhn", int_arm_neon_vrsubhn, 0>;
2437 // pattern for acle intrinsic with 3 operands
2438 class NeonI_3VDN_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
2439 string asmop, string ResS, string OpS>
2440 : NeonI_3VDiff<q, u, size, opcode,
2441 (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
2442 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2444 let Constraints = "$src = $Rd";
2445 let neverHasSideEffects = 1;
2448 multiclass NeonI_3VDN_3Op_v1<bit u, bits<4> opcode,
2450 def _16b8h : NeonI_3VDN_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h">;
2451 def _8h4s : NeonI_3VDN_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s">;
2452 def _4s2d : NeonI_3VDN_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d">;
2455 defm ADDHN2vvv : NeonI_3VDN_3Op_v1<0b0, 0b0100, "addhn2">;
2456 defm SUBHN2vvv : NeonI_3VDN_3Op_v1<0b0, 0b0110, "subhn2">;
2458 defm RADDHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0100, "raddhn2">;
2459 defm RSUBHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0110, "rsubhn2">;
2461 // Patterns have to be separate because there's a SUBREG_TO_REG in the output
2463 class NarrowHighHalfPat<Instruction INST, ValueType DstTy, ValueType SrcTy,
2464 SDPatternOperator coreop>
2465 : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2466 (v1i64 (bitconvert (DstTy (coreop (SrcTy VPR128:$Rn),
2467 (SrcTy VPR128:$Rm)))))),
2468 (INST (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2469 VPR128:$Rn, VPR128:$Rm)>;
2472 def : NarrowHighHalfPat<ADDHN2vvv_16b8h, v8i8, v8i16,
2473 BinOpFrag<(NI_get_hi_8h (add node:$LHS, node:$RHS))>>;
2474 def : NarrowHighHalfPat<ADDHN2vvv_8h4s, v4i16, v4i32,
2475 BinOpFrag<(NI_get_hi_4s (add node:$LHS, node:$RHS))>>;
2476 def : NarrowHighHalfPat<ADDHN2vvv_4s2d, v2i32, v2i64,
2477 BinOpFrag<(NI_get_hi_2d (add node:$LHS, node:$RHS))>>;
2480 def : NarrowHighHalfPat<SUBHN2vvv_16b8h, v8i8, v8i16,
2481 BinOpFrag<(NI_get_hi_8h (sub node:$LHS, node:$RHS))>>;
2482 def : NarrowHighHalfPat<SUBHN2vvv_8h4s, v4i16, v4i32,
2483 BinOpFrag<(NI_get_hi_4s (sub node:$LHS, node:$RHS))>>;
2484 def : NarrowHighHalfPat<SUBHN2vvv_4s2d, v2i32, v2i64,
2485 BinOpFrag<(NI_get_hi_2d (sub node:$LHS, node:$RHS))>>;
2488 def : NarrowHighHalfPat<RADDHN2vvv_16b8h, v8i8, v8i16, int_arm_neon_vraddhn>;
2489 def : NarrowHighHalfPat<RADDHN2vvv_8h4s, v4i16, v4i32, int_arm_neon_vraddhn>;
2490 def : NarrowHighHalfPat<RADDHN2vvv_4s2d, v2i32, v2i64, int_arm_neon_vraddhn>;
2493 def : NarrowHighHalfPat<RSUBHN2vvv_16b8h, v8i8, v8i16, int_arm_neon_vrsubhn>;
2494 def : NarrowHighHalfPat<RSUBHN2vvv_8h4s, v4i16, v4i32, int_arm_neon_vrsubhn>;
2495 def : NarrowHighHalfPat<RSUBHN2vvv_4s2d, v2i32, v2i64, int_arm_neon_vrsubhn>;
2497 // pattern that need to extend result
2498 class NeonI_3VDL_Ext<bit q, bit u, bits<2> size, bits<4> opcode,
2499 string asmop, string ResS, string OpS,
2500 SDPatternOperator opnode,
2501 RegisterOperand OpVPR,
2502 ValueType ResTy, ValueType OpTy, ValueType OpSTy>
2503 : NeonI_3VDiff<q, u, size, opcode,
2504 (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2505 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2506 [(set (ResTy VPR128:$Rd),
2507 (ResTy (zext (OpSTy (opnode (OpTy OpVPR:$Rn),
2508 (OpTy OpVPR:$Rm))))))],
2511 multiclass NeonI_3VDL_zext<bit u, bits<4> opcode,
2512 string asmop, SDPatternOperator opnode,
2515 let isCommutable = Commutable in {
2516 def _8h8b : NeonI_3VDL_Ext<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2517 opnode, VPR64, v8i16, v8i8, v8i8>;
2518 def _4s4h : NeonI_3VDL_Ext<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2519 opnode, VPR64, v4i32, v4i16, v4i16>;
2520 def _2d2s : NeonI_3VDL_Ext<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2521 opnode, VPR64, v2i64, v2i32, v2i32>;
2525 defm SABDLvvv : NeonI_3VDL_zext<0b0, 0b0111, "sabdl", int_arm_neon_vabds, 1>;
2526 defm UABDLvvv : NeonI_3VDL_zext<0b1, 0b0111, "uabdl", int_arm_neon_vabdu, 1>;
2528 multiclass NeonI_Op_High<SDPatternOperator op>
2530 def _16B : PatFrag<(ops node:$Rn, node:$Rm),
2531 (op (v8i8 (Neon_High16B node:$Rn)), (v8i8 (Neon_High16B node:$Rm)))>;
2532 def _8H : PatFrag<(ops node:$Rn, node:$Rm),
2533 (op (v4i16 (Neon_High8H node:$Rn)), (v4i16 (Neon_High8H node:$Rm)))>;
2534 def _4S : PatFrag<(ops node:$Rn, node:$Rm),
2535 (op (v2i32 (Neon_High4S node:$Rn)), (v2i32 (Neon_High4S node:$Rm)))>;
2539 defm NI_sabdl_hi : NeonI_Op_High<int_arm_neon_vabds>;
2540 defm NI_uabdl_hi : NeonI_Op_High<int_arm_neon_vabdu>;
2541 defm NI_smull_hi : NeonI_Op_High<int_arm_neon_vmulls>;
2542 defm NI_umull_hi : NeonI_Op_High<int_arm_neon_vmullu>;
2543 defm NI_qdmull_hi : NeonI_Op_High<int_arm_neon_vqdmull>;
2544 defm NI_pmull_hi : NeonI_Op_High<int_arm_neon_vmullp>;
2546 multiclass NeonI_3VDL_Abd_u<bit u, bits<4> opcode,
2547 string asmop, string opnode,
2550 let isCommutable = Commutable in {
2551 def _8h8b : NeonI_3VDL_Ext<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2552 !cast<PatFrag>(opnode # "_16B"),
2553 VPR128, v8i16, v16i8, v8i8>;
2554 def _4s4h : NeonI_3VDL_Ext<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2555 !cast<PatFrag>(opnode # "_8H"),
2556 VPR128, v4i32, v8i16, v4i16>;
2557 def _2d2s : NeonI_3VDL_Ext<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2558 !cast<PatFrag>(opnode # "_4S"),
2559 VPR128, v2i64, v4i32, v2i32>;
2563 defm SABDL2vvv : NeonI_3VDL_Abd_u<0b0, 0b0111, "sabdl2", "NI_sabdl_hi", 1>;
2564 defm UABDL2vvv : NeonI_3VDL_Abd_u<0b1, 0b0111, "uabdl2", "NI_uabdl_hi", 1>;
2566 // For pattern that need two operators being chained.
2567 class NeonI_3VDL_Aba<bit q, bit u, bits<2> size, bits<4> opcode,
2568 string asmop, string ResS, string OpS,
2569 SDPatternOperator opnode, SDPatternOperator subop,
2570 RegisterOperand OpVPR,
2571 ValueType ResTy, ValueType OpTy, ValueType OpSTy>
2572 : NeonI_3VDiff<q, u, size, opcode,
2573 (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
2574 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2575 [(set (ResTy VPR128:$Rd),
2577 (ResTy VPR128:$src),
2578 (ResTy (zext (OpSTy (subop (OpTy OpVPR:$Rn),
2579 (OpTy OpVPR:$Rm))))))))],
2581 let Constraints = "$src = $Rd";
2584 multiclass NeonI_3VDL_Aba_v1<bit u, bits<4> opcode,
2585 string asmop, SDPatternOperator opnode,
2586 SDPatternOperator subop>
2588 def _8h8b : NeonI_3VDL_Aba<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2589 opnode, subop, VPR64, v8i16, v8i8, v8i8>;
2590 def _4s4h : NeonI_3VDL_Aba<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2591 opnode, subop, VPR64, v4i32, v4i16, v4i16>;
2592 def _2d2s : NeonI_3VDL_Aba<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2593 opnode, subop, VPR64, v2i64, v2i32, v2i32>;
2596 defm SABALvvv : NeonI_3VDL_Aba_v1<0b0, 0b0101, "sabal",
2597 add, int_arm_neon_vabds>;
2598 defm UABALvvv : NeonI_3VDL_Aba_v1<0b1, 0b0101, "uabal",
2599 add, int_arm_neon_vabdu>;
2601 multiclass NeonI_3VDL2_Aba_v1<bit u, bits<4> opcode,
2602 string asmop, SDPatternOperator opnode,
2605 def _8h8b : NeonI_3VDL_Aba<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2606 opnode, !cast<PatFrag>(subop # "_16B"),
2607 VPR128, v8i16, v16i8, v8i8>;
2608 def _4s4h : NeonI_3VDL_Aba<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2609 opnode, !cast<PatFrag>(subop # "_8H"),
2610 VPR128, v4i32, v8i16, v4i16>;
2611 def _2d2s : NeonI_3VDL_Aba<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2612 opnode, !cast<PatFrag>(subop # "_4S"),
2613 VPR128, v2i64, v4i32, v2i32>;
2616 defm SABAL2vvv : NeonI_3VDL2_Aba_v1<0b0, 0b0101, "sabal2", add,
2618 defm UABAL2vvv : NeonI_3VDL2_Aba_v1<0b1, 0b0101, "uabal2", add,
2621 // Long pattern with 2 operands
2622 multiclass NeonI_3VDL_2Op<bit u, bits<4> opcode,
2623 string asmop, SDPatternOperator opnode,
2626 let isCommutable = Commutable in {
2627 def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2628 opnode, VPR128, VPR64, v8i16, v8i8>;
2629 def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2630 opnode, VPR128, VPR64, v4i32, v4i16>;
2631 def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2632 opnode, VPR128, VPR64, v2i64, v2i32>;
2636 defm SMULLvvv : NeonI_3VDL_2Op<0b0, 0b1100, "smull", int_arm_neon_vmulls, 1>;
2637 defm UMULLvvv : NeonI_3VDL_2Op<0b1, 0b1100, "umull", int_arm_neon_vmullu, 1>;
2639 class NeonI_3VDL2_2Op_mull<bit q, bit u, bits<2> size, bits<4> opcode,
2640 string asmop, string ResS, string OpS,
2641 SDPatternOperator opnode,
2642 ValueType ResTy, ValueType OpTy>
2643 : NeonI_3VDiff<q, u, size, opcode,
2644 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
2645 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2646 [(set (ResTy VPR128:$Rd),
2647 (ResTy (opnode (OpTy VPR128:$Rn), (OpTy VPR128:$Rm))))],
2651 multiclass NeonI_3VDL2_2Op_mull_v1<bit u, bits<4> opcode,
2656 let isCommutable = Commutable in {
2657 def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2658 !cast<PatFrag>(opnode # "_16B"),
2660 def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2661 !cast<PatFrag>(opnode # "_8H"),
2663 def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2664 !cast<PatFrag>(opnode # "_4S"),
2669 defm SMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b0, 0b1100, "smull2",
2671 defm UMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b1, 0b1100, "umull2",
2674 // Long pattern with 3 operands
2675 class NeonI_3VDL_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
2676 string asmop, string ResS, string OpS,
2677 SDPatternOperator opnode,
2678 ValueType ResTy, ValueType OpTy>
2679 : NeonI_3VDiff<q, u, size, opcode,
2680 (outs VPR128:$Rd), (ins VPR128:$src, VPR64:$Rn, VPR64:$Rm),
2681 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2682 [(set (ResTy VPR128:$Rd),
2684 (ResTy VPR128:$src),
2685 (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))))],
2687 let Constraints = "$src = $Rd";
2690 multiclass NeonI_3VDL_3Op_v1<bit u, bits<4> opcode,
2691 string asmop, SDPatternOperator opnode>
2693 def _8h8b : NeonI_3VDL_3Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2694 opnode, v8i16, v8i8>;
2695 def _4s4h : NeonI_3VDL_3Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2696 opnode, v4i32, v4i16>;
2697 def _2d2s : NeonI_3VDL_3Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2698 opnode, v2i64, v2i32>;
2701 def Neon_smlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2703 (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
2705 def Neon_umlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2707 (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
2709 def Neon_smlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2711 (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
2713 def Neon_umlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2715 (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
2717 defm SMLALvvv : NeonI_3VDL_3Op_v1<0b0, 0b1000, "smlal", Neon_smlal>;
2718 defm UMLALvvv : NeonI_3VDL_3Op_v1<0b1, 0b1000, "umlal", Neon_umlal>;
2720 defm SMLSLvvv : NeonI_3VDL_3Op_v1<0b0, 0b1010, "smlsl", Neon_smlsl>;
2721 defm UMLSLvvv : NeonI_3VDL_3Op_v1<0b1, 0b1010, "umlsl", Neon_umlsl>;
2723 class NeonI_3VDL2_3Op_mlas<bit q, bit u, bits<2> size, bits<4> opcode,
2724 string asmop, string ResS, string OpS,
2725 SDPatternOperator subop, SDPatternOperator opnode,
2726 RegisterOperand OpVPR,
2727 ValueType ResTy, ValueType OpTy>
2728 : NeonI_3VDiff<q, u, size, opcode,
2729 (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
2730 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2731 [(set (ResTy VPR128:$Rd),
2733 (ResTy VPR128:$src),
2734 (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))))],
2736 let Constraints = "$src = $Rd";
2739 multiclass NeonI_3VDL2_3Op_mlas_v1<bit u, bits<4> opcode,
2741 SDPatternOperator subop,
2744 def _8h16b : NeonI_3VDL2_3Op_mlas<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2745 subop, !cast<PatFrag>(opnode # "_16B"),
2746 VPR128, v8i16, v16i8>;
2747 def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2748 subop, !cast<PatFrag>(opnode # "_8H"),
2749 VPR128, v4i32, v8i16>;
2750 def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2751 subop, !cast<PatFrag>(opnode # "_4S"),
2752 VPR128, v2i64, v4i32>;
2755 defm SMLAL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1000, "smlal2",
2756 add, "NI_smull_hi">;
2757 defm UMLAL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1000, "umlal2",
2758 add, "NI_umull_hi">;
2760 defm SMLSL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1010, "smlsl2",
2761 sub, "NI_smull_hi">;
2762 defm UMLSL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1010, "umlsl2",
2763 sub, "NI_umull_hi">;
2765 multiclass NeonI_3VDL_qdmlal_3Op_v2<bit u, bits<4> opcode,
2766 string asmop, SDPatternOperator opnode>
2768 def _4s4h : NeonI_3VDL2_3Op_mlas<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2769 opnode, int_arm_neon_vqdmull,
2770 VPR64, v4i32, v4i16>;
2771 def _2d2s : NeonI_3VDL2_3Op_mlas<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2772 opnode, int_arm_neon_vqdmull,
2773 VPR64, v2i64, v2i32>;
2776 defm SQDMLALvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1001, "sqdmlal",
2777 int_arm_neon_vqadds>;
2778 defm SQDMLSLvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1011, "sqdmlsl",
2779 int_arm_neon_vqsubs>;
2781 multiclass NeonI_3VDL_v2<bit u, bits<4> opcode,
2782 string asmop, SDPatternOperator opnode,
2785 let isCommutable = Commutable in {
2786 def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2787 opnode, VPR128, VPR64, v4i32, v4i16>;
2788 def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2789 opnode, VPR128, VPR64, v2i64, v2i32>;
2793 defm SQDMULLvvv : NeonI_3VDL_v2<0b0, 0b1101, "sqdmull",
2794 int_arm_neon_vqdmull, 1>;
2796 multiclass NeonI_3VDL2_2Op_mull_v2<bit u, bits<4> opcode,
2801 let isCommutable = Commutable in {
2802 def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2803 !cast<PatFrag>(opnode # "_8H"),
2805 def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2806 !cast<PatFrag>(opnode # "_4S"),
2811 defm SQDMULL2vvv : NeonI_3VDL2_2Op_mull_v2<0b0, 0b1101, "sqdmull2",
2814 multiclass NeonI_3VDL2_3Op_qdmlal_v2<bit u, bits<4> opcode,
2816 SDPatternOperator opnode>
2818 def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2819 opnode, NI_qdmull_hi_8H,
2820 VPR128, v4i32, v8i16>;
2821 def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2822 opnode, NI_qdmull_hi_4S,
2823 VPR128, v2i64, v4i32>;
2826 defm SQDMLAL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1001, "sqdmlal2",
2827 int_arm_neon_vqadds>;
2828 defm SQDMLSL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1011, "sqdmlsl2",
2829 int_arm_neon_vqsubs>;
2831 multiclass NeonI_3VDL_v3<bit u, bits<4> opcode,
2832 string asmop, SDPatternOperator opnode,
2835 let isCommutable = Commutable in {
2836 def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2837 opnode, VPR128, VPR64, v8i16, v8i8>;
2841 defm PMULLvvv : NeonI_3VDL_v3<0b0, 0b1110, "pmull", int_arm_neon_vmullp, 1>;
2843 multiclass NeonI_3VDL2_2Op_mull_v3<bit u, bits<4> opcode,
2848 let isCommutable = Commutable in {
2849 def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2850 !cast<PatFrag>(opnode # "_16B"),
2855 defm PMULL2vvv : NeonI_3VDL2_2Op_mull_v3<0b0, 0b1110, "pmull2",
2858 // End of implementation for instruction class (3V Diff)
2860 // Scalar Arithmetic
2862 class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
2863 : NeonI_Scalar3Same<u, 0b11, opcode,
2864 (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm),
2865 !strconcat(asmop, " $Rd, $Rn, $Rm"),
2869 multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
2870 string asmop, bit Commutable = 0>
2872 let isCommutable = Commutable in {
2873 def bbb : NeonI_Scalar3Same<u, 0b00, opcode,
2874 (outs FPR8:$Rd), (ins FPR8:$Rn, FPR8:$Rm),
2875 !strconcat(asmop, " $Rd, $Rn, $Rm"),
2878 def hhh : NeonI_Scalar3Same<u, 0b01, opcode,
2879 (outs FPR16:$Rd), (ins FPR16:$Rn, FPR16:$Rm),
2880 !strconcat(asmop, " $Rd, $Rn, $Rm"),
2883 def sss : NeonI_Scalar3Same<u, 0b10, opcode,
2884 (outs FPR32:$Rd), (ins FPR32:$Rn, FPR32:$Rm),
2885 !strconcat(asmop, " $Rd, $Rn, $Rm"),
2888 def ddd : NeonI_Scalar3Same<u, 0b11, opcode,
2889 (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm),
2890 !strconcat(asmop, " $Rd, $Rn, $Rm"),
2896 multiclass Neon_Scalar_D_size_patterns<SDPatternOperator opnode,
2897 Instruction INSTD> {
2898 def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
2899 (INSTD FPR64:$Rn, FPR64:$Rm)>;
2902 multiclass Neon_Scalar_BHSD_size_patterns<SDPatternOperator opnode,
2903 Instruction INSTB, Instruction INSTH,
2904 Instruction INSTS, Instruction INSTD>
2905 : Neon_Scalar_D_size_patterns<opnode, INSTD> {
2906 def: Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
2907 (INSTB FPR8:$Rn, FPR8:$Rm)>;
2909 def: Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
2910 (INSTH FPR16:$Rn, FPR16:$Rm)>;
2912 def: Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
2913 (INSTS FPR32:$Rn, FPR32:$Rm)>;
2916 // Scalar Integer Add
2917 let isCommutable = 1 in {
2918 def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
2921 // Scalar Integer Sub
2922 def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
2924 // Pattern for Scalar Integer Add and Sub with D register only
2925 defm : Neon_Scalar_D_size_patterns<add, ADDddd>;
2926 defm : Neon_Scalar_D_size_patterns<sub, SUBddd>;
2928 // Patterns to match llvm.aarch64.* intrinsic for Scalar Add, Sub
2929 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vaddds, ADDddd>;
2930 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vadddu, ADDddd>;
2931 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vsubds, SUBddd>;
2932 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vsubdu, SUBddd>;
2934 // Scalar Integer Saturating Add (Signed, Unsigned)
2935 defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
2936 defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
2938 // Scalar Integer Saturating Sub (Signed, Unsigned)
2939 defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
2940 defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
2942 // Patterns to match llvm.arm.* intrinsic for
2943 // Scalar Integer Saturating Add, Sub (Signed, Unsigned)
2944 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqadds, SQADDddd>;
2945 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqaddu, UQADDddd>;
2946 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubs, SQSUBddd>;
2947 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubu, UQSUBddd>;
2949 // Patterns to match llvm.aarch64.* intrinsic for
2950 // Scalar Integer Saturating Add, Sub (Signed, Unsigned)
2951 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqadds, SQADDbbb, SQADDhhh,
2952 SQADDsss, SQADDddd>;
2953 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqaddu, UQADDbbb, UQADDhhh,
2954 UQADDsss, UQADDddd>;
2955 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqsubs, SQSUBbbb, SQSUBhhh,
2956 SQSUBsss, SQSUBddd>;
2957 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqsubu, UQSUBbbb, UQSUBhhh,
2958 UQSUBsss, UQSUBddd>;
2960 // Scalar Integer Shift Left (Signed, Unsigned)
2961 def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
2962 def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
2964 // Patterns to match llvm.arm.* intrinsic for
2965 // Scalar Integer Shift Left (Signed, Unsigned)
2966 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
2967 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
2969 // Patterns to match llvm.aarch64.* intrinsic for
2970 // Scalar Integer Shift Left (Signed, Unsigned)
2971 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vshlds, SSHLddd>;
2972 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vshldu, USHLddd>;
2974 // Scalar Integer Saturating Shift Left (Signed, Unsigned)
2975 defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
2976 defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
2978 // Patterns to match llvm.aarch64.* intrinsic for
2979 // Scalar Integer Saturating Shift Letf (Signed, Unsigned)
2980 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqshls, SQSHLbbb, SQSHLhhh,
2981 SQSHLsss, SQSHLddd>;
2982 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqshlu, UQSHLbbb, UQSHLhhh,
2983 UQSHLsss, UQSHLddd>;
2985 // Patterns to match llvm.arm.* intrinsic for
2986 // Scalar Integer Saturating Shift Letf (Signed, Unsigned)
2987 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
2988 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
2990 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
2991 def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
2992 def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
2994 // Patterns to match llvm.aarch64.* intrinsic for
2995 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
2996 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vrshlds, SRSHLddd>;
2997 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vrshldu, URSHLddd>;
2999 // Patterns to match llvm.arm.* intrinsic for
3000 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
3001 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
3002 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
3004 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
3005 defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
3006 defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
3008 // Patterns to match llvm.aarch64.* intrinsic for
3009 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
3010 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqrshls, SQRSHLbbb, SQRSHLhhh,
3011 SQRSHLsss, SQRSHLddd>;
3012 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqrshlu, UQRSHLbbb, UQRSHLhhh,
3013 UQRSHLsss, UQRSHLddd>;
3015 // Patterns to match llvm.arm.* intrinsic for
3016 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
3017 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
3018 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
3020 // Scalar Reduce Pairwise
3022 multiclass NeonI_ScalarPair_D_sizes<bit u, bit size, bits<5> opcode,
3023 string asmop, bit Commutable = 0> {
3024 let isCommutable = Commutable in {
3025 def _D_2D : NeonI_ScalarPair<u, {size, 0b1}, opcode,
3026 (outs FPR64:$Rd), (ins VPR128:$Rn),
3027 !strconcat(asmop, " $Rd, $Rn.2d"),
3033 multiclass NeonI_ScalarPair_SD_sizes<bit u, bit size, bits<5> opcode,
3034 string asmop, bit Commutable = 0>
3035 : NeonI_ScalarPair_D_sizes<u, size, opcode, asmop, Commutable> {
3036 let isCommutable = Commutable in {
3037 def _S_2S : NeonI_ScalarPair<u, {size, 0b0}, opcode,
3038 (outs FPR32:$Rd), (ins VPR64:$Rn),
3039 !strconcat(asmop, " $Rd, $Rn.2s"),
3045 // Scalar Reduce Addition Pairwise (Integer) with
3046 // Pattern to match llvm.arm.* intrinsic
3047 defm ADDPvv : NeonI_ScalarPair_D_sizes<0b0, 0b1, 0b11011, "addp", 0>;
3049 // Pattern to match llvm.aarch64.* intrinsic for
3050 // Scalar Reduce Addition Pairwise (Integer)
3051 def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
3052 (ADDPvv_D_2D VPR128:$Rn)>;
3054 // Scalar Reduce Addition Pairwise (Floating Point)
3055 defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
3057 // Scalar Reduce Maximum Pairwise (Floating Point)
3058 defm FMAXPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01111, "fmaxp", 0>;
3060 // Scalar Reduce Minimum Pairwise (Floating Point)
3061 defm FMINPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01111, "fminp", 0>;
3063 // Scalar Reduce maxNum Pairwise (Floating Point)
3064 defm FMAXNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01100, "fmaxnmp", 0>;
3066 // Scalar Reduce minNum Pairwise (Floating Point)
3067 defm FMINNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01100, "fminnmp", 0>;
3069 multiclass Neon_ScalarPair_SD_size_patterns<SDPatternOperator opnodeS,
3070 SDPatternOperator opnodeD,
3072 Instruction INSTD> {
3073 def : Pat<(v1f32 (opnodeS (v2f32 VPR64:$Rn))),
3075 def : Pat<(v1f64 (opnodeD (v2f64 VPR128:$Rn))),
3076 (INSTD VPR128:$Rn)>;
3079 // Patterns to match llvm.aarch64.* intrinsic for
3080 // Scalar Reduce Add, Max, Min, MaxiNum, MinNum Pairwise (Floating Point)
3081 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfadd,
3082 int_aarch64_neon_vpfaddq, FADDPvv_S_2S, FADDPvv_D_2D>;
3084 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmax,
3085 int_aarch64_neon_vpmaxq, FMAXPvv_S_2S, FMAXPvv_D_2D>;
3087 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmin,
3088 int_aarch64_neon_vpminq, FMINPvv_S_2S, FMINPvv_D_2D>;
3090 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfmaxnm,
3091 int_aarch64_neon_vpfmaxnmq, FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
3093 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm,
3094 int_aarch64_neon_vpfminnmq, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
3098 //===----------------------------------------------------------------------===//
3099 // Non-Instruction Patterns
3100 //===----------------------------------------------------------------------===//
3102 // 64-bit vector bitcasts...
3104 def : Pat<(v1i64 (bitconvert (v8i8 VPR64:$src))), (v1i64 VPR64:$src)>;
3105 def : Pat<(v2f32 (bitconvert (v8i8 VPR64:$src))), (v2f32 VPR64:$src)>;
3106 def : Pat<(v2i32 (bitconvert (v8i8 VPR64:$src))), (v2i32 VPR64:$src)>;
3107 def : Pat<(v4i16 (bitconvert (v8i8 VPR64:$src))), (v4i16 VPR64:$src)>;
3109 def : Pat<(v1i64 (bitconvert (v4i16 VPR64:$src))), (v1i64 VPR64:$src)>;
3110 def : Pat<(v2i32 (bitconvert (v4i16 VPR64:$src))), (v2i32 VPR64:$src)>;
3111 def : Pat<(v2f32 (bitconvert (v4i16 VPR64:$src))), (v2f32 VPR64:$src)>;
3112 def : Pat<(v8i8 (bitconvert (v4i16 VPR64:$src))), (v8i8 VPR64:$src)>;
3114 def : Pat<(v1i64 (bitconvert (v2i32 VPR64:$src))), (v1i64 VPR64:$src)>;
3115 def : Pat<(v2f32 (bitconvert (v2i32 VPR64:$src))), (v2f32 VPR64:$src)>;
3116 def : Pat<(v4i16 (bitconvert (v2i32 VPR64:$src))), (v4i16 VPR64:$src)>;
3117 def : Pat<(v8i8 (bitconvert (v2i32 VPR64:$src))), (v8i8 VPR64:$src)>;
3119 def : Pat<(v1i64 (bitconvert (v2f32 VPR64:$src))), (v1i64 VPR64:$src)>;
3120 def : Pat<(v2i32 (bitconvert (v2f32 VPR64:$src))), (v2i32 VPR64:$src)>;
3121 def : Pat<(v4i16 (bitconvert (v2f32 VPR64:$src))), (v4i16 VPR64:$src)>;
3122 def : Pat<(v8i8 (bitconvert (v2f32 VPR64:$src))), (v8i8 VPR64:$src)>;
3124 def : Pat<(v2f32 (bitconvert (v1i64 VPR64:$src))), (v2f32 VPR64:$src)>;
3125 def : Pat<(v2i32 (bitconvert (v1i64 VPR64:$src))), (v2i32 VPR64:$src)>;
3126 def : Pat<(v4i16 (bitconvert (v1i64 VPR64:$src))), (v4i16 VPR64:$src)>;
3127 def : Pat<(v8i8 (bitconvert (v1i64 VPR64:$src))), (v8i8 VPR64:$src)>;
3129 // ..and 128-bit vector bitcasts...
3131 def : Pat<(v2f64 (bitconvert (v16i8 VPR128:$src))), (v2f64 VPR128:$src)>;
3132 def : Pat<(v2i64 (bitconvert (v16i8 VPR128:$src))), (v2i64 VPR128:$src)>;
3133 def : Pat<(v4f32 (bitconvert (v16i8 VPR128:$src))), (v4f32 VPR128:$src)>;
3134 def : Pat<(v4i32 (bitconvert (v16i8 VPR128:$src))), (v4i32 VPR128:$src)>;
3135 def : Pat<(v8i16 (bitconvert (v16i8 VPR128:$src))), (v8i16 VPR128:$src)>;
3137 def : Pat<(v2f64 (bitconvert (v8i16 VPR128:$src))), (v2f64 VPR128:$src)>;
3138 def : Pat<(v2i64 (bitconvert (v8i16 VPR128:$src))), (v2i64 VPR128:$src)>;
3139 def : Pat<(v4i32 (bitconvert (v8i16 VPR128:$src))), (v4i32 VPR128:$src)>;
3140 def : Pat<(v4f32 (bitconvert (v8i16 VPR128:$src))), (v4f32 VPR128:$src)>;
3141 def : Pat<(v16i8 (bitconvert (v8i16 VPR128:$src))), (v16i8 VPR128:$src)>;
3143 def : Pat<(v2f64 (bitconvert (v4i32 VPR128:$src))), (v2f64 VPR128:$src)>;
3144 def : Pat<(v2i64 (bitconvert (v4i32 VPR128:$src))), (v2i64 VPR128:$src)>;
3145 def : Pat<(v4f32 (bitconvert (v4i32 VPR128:$src))), (v4f32 VPR128:$src)>;
3146 def : Pat<(v8i16 (bitconvert (v4i32 VPR128:$src))), (v8i16 VPR128:$src)>;
3147 def : Pat<(v16i8 (bitconvert (v4i32 VPR128:$src))), (v16i8 VPR128:$src)>;
3149 def : Pat<(v2f64 (bitconvert (v4f32 VPR128:$src))), (v2f64 VPR128:$src)>;
3150 def : Pat<(v2i64 (bitconvert (v4f32 VPR128:$src))), (v2i64 VPR128:$src)>;
3151 def : Pat<(v4i32 (bitconvert (v4f32 VPR128:$src))), (v4i32 VPR128:$src)>;
3152 def : Pat<(v8i16 (bitconvert (v4f32 VPR128:$src))), (v8i16 VPR128:$src)>;
3153 def : Pat<(v16i8 (bitconvert (v4f32 VPR128:$src))), (v16i8 VPR128:$src)>;
3155 def : Pat<(v2f64 (bitconvert (v2i64 VPR128:$src))), (v2f64 VPR128:$src)>;
3156 def : Pat<(v4f32 (bitconvert (v2i64 VPR128:$src))), (v4f32 VPR128:$src)>;
3157 def : Pat<(v4i32 (bitconvert (v2i64 VPR128:$src))), (v4i32 VPR128:$src)>;
3158 def : Pat<(v8i16 (bitconvert (v2i64 VPR128:$src))), (v8i16 VPR128:$src)>;
3159 def : Pat<(v16i8 (bitconvert (v2i64 VPR128:$src))), (v16i8 VPR128:$src)>;
3161 def : Pat<(v2i64 (bitconvert (v2f64 VPR128:$src))), (v2i64 VPR128:$src)>;
3162 def : Pat<(v4f32 (bitconvert (v2f64 VPR128:$src))), (v4f32 VPR128:$src)>;
3163 def : Pat<(v4i32 (bitconvert (v2f64 VPR128:$src))), (v4i32 VPR128:$src)>;
3164 def : Pat<(v8i16 (bitconvert (v2f64 VPR128:$src))), (v8i16 VPR128:$src)>;
3165 def : Pat<(v16i8 (bitconvert (v2f64 VPR128:$src))), (v16i8 VPR128:$src)>;
3168 // ...and scalar bitcasts...
3169 def : Pat<(f16 (bitconvert (v1i16 FPR16:$src))), (f16 FPR16:$src)>;
3170 def : Pat<(f32 (bitconvert (v1i32 FPR32:$src))), (f32 FPR32:$src)>;
3171 def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
3172 def : Pat<(f32 (bitconvert (v1f32 FPR32:$src))), (f32 FPR32:$src)>;
3173 def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
3175 def : Pat<(i64 (bitconvert (v1i64 FPR64:$src))), (FMOVxd $src)>;
3176 def : Pat<(i32 (bitconvert (v1i32 FPR32:$src))), (FMOVws $src)>;
3178 def : Pat<(v8i8 (bitconvert (v1i64 VPR64:$src))), (v8i8 VPR64:$src)>;
3179 def : Pat<(v4i16 (bitconvert (v1i64 VPR64:$src))), (v4i16 VPR64:$src)>;
3180 def : Pat<(v2i32 (bitconvert (v1i64 VPR64:$src))), (v2i32 VPR64:$src)>;
3182 def : Pat<(f64 (bitconvert (v8i8 VPR64:$src))), (f64 VPR64:$src)>;
3183 def : Pat<(f64 (bitconvert (v4i16 VPR64:$src))), (f64 VPR64:$src)>;
3184 def : Pat<(f64 (bitconvert (v2i32 VPR64:$src))), (f64 VPR64:$src)>;
3185 def : Pat<(f64 (bitconvert (v2f32 VPR64:$src))), (f64 VPR64:$src)>;
3186 def : Pat<(f64 (bitconvert (v1i64 VPR64:$src))), (f64 VPR64:$src)>;
3188 def : Pat<(f128 (bitconvert (v16i8 VPR128:$src))), (f128 VPR128:$src)>;
3189 def : Pat<(f128 (bitconvert (v8i16 VPR128:$src))), (f128 VPR128:$src)>;
3190 def : Pat<(f128 (bitconvert (v4i32 VPR128:$src))), (f128 VPR128:$src)>;
3191 def : Pat<(f128 (bitconvert (v2i64 VPR128:$src))), (f128 VPR128:$src)>;
3192 def : Pat<(f128 (bitconvert (v4f32 VPR128:$src))), (f128 VPR128:$src)>;
3193 def : Pat<(f128 (bitconvert (v2f64 VPR128:$src))), (f128 VPR128:$src)>;
3195 def : Pat<(v1i16 (bitconvert (f16 FPR16:$src))), (v1i16 FPR16:$src)>;
3196 def : Pat<(v1i32 (bitconvert (f32 FPR32:$src))), (v1i32 FPR32:$src)>;
3197 def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
3198 def : Pat<(v1f32 (bitconvert (f32 FPR32:$src))), (v1f32 FPR32:$src)>;
3199 def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
3201 def : Pat<(v1i64 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
3202 def : Pat<(v1i32 (bitconvert (i32 GPR32:$src))), (FMOVsw $src)>;
3204 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
3205 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
3206 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
3207 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
3208 def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
3210 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>;
3211 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>;
3212 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>;
3213 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>;
3214 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>;
3215 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>;
3217 def neon_uimm0_bare : Operand<i64>,
3218 ImmLeaf<i64, [{return Imm == 0;}]> {
3219 let ParserMatchClass = neon_uimm0_asmoperand;
3220 let PrintMethod = "printNeonUImm8OperandBare";
3223 def neon_uimm1_bare : Operand<i64>,
3224 ImmLeaf<i64, [{(void)Imm; return true;}]> {
3225 let ParserMatchClass = neon_uimm1_asmoperand;
3226 let PrintMethod = "printNeonUImm8OperandBare";
3229 def neon_uimm2_bare : Operand<i64>,
3230 ImmLeaf<i64, [{(void)Imm; return true;}]> {
3231 let ParserMatchClass = neon_uimm2_asmoperand;
3232 let PrintMethod = "printNeonUImm8OperandBare";
3235 def neon_uimm3_bare : Operand<i64>,
3236 ImmLeaf<i64, [{(void)Imm; return true;}]> {
3237 let ParserMatchClass = uimm3_asmoperand;
3238 let PrintMethod = "printNeonUImm8OperandBare";
3241 def neon_uimm4_bare : Operand<i64>,
3242 ImmLeaf<i64, [{(void)Imm; return true;}]> {
3243 let ParserMatchClass = uimm4_asmoperand;
3244 let PrintMethod = "printNeonUImm8OperandBare";
3247 class NeonI_INS_main<string asmop, string Res, ValueType ResTy,
3248 RegisterClass OpGPR, ValueType OpTy, Operand OpImm>
3249 : NeonI_copy<0b1, 0b0, 0b0011,
3250 (outs VPR128:$Rd), (ins VPR128:$src, OpGPR:$Rn, OpImm:$Imm),
3251 asmop # "\t$Rd." # Res # "[$Imm], $Rn",
3252 [(set (ResTy VPR128:$Rd),
3253 (ResTy (vector_insert
3254 (ResTy VPR128:$src),
3259 let Constraints = "$src = $Rd";
3262 // The followings are for instruction class (3V Elem)
3266 class NI_2VE<bit q, bit u, bits<2> size, bits<4> opcode,
3267 string asmop, string ResS, string OpS, string EleOpS,
3268 Operand OpImm, RegisterOperand ResVPR,
3269 RegisterOperand OpVPR, RegisterOperand EleOpVPR>
3270 : NeonI_2VElem<q, u, size, opcode,
3271 (outs ResVPR:$Rd), (ins ResVPR:$src, OpVPR:$Rn,
3272 EleOpVPR:$Re, OpImm:$Index),
3273 asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
3274 ", $Re." # EleOpS # "[$Index]",
3280 let Constraints = "$src = $Rd";
3283 multiclass NI_2VE_v1<bit u, bits<4> opcode, string asmop>
3285 // vector register class for element is always 128-bit to cover the max index
3286 def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
3287 neon_uimm2_bare, VPR64, VPR64, VPR128> {
3288 let Inst{11} = {Index{1}};
3289 let Inst{21} = {Index{0}};
3290 let Inst{20-16} = Re;
3293 def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
3294 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3295 let Inst{11} = {Index{1}};
3296 let Inst{21} = {Index{0}};
3297 let Inst{20-16} = Re;
3300 // Index operations on 16-bit(H) elements are restricted to using v0-v15.
3301 def _4h8h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
3302 neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
3303 let Inst{11} = {Index{2}};
3304 let Inst{21} = {Index{1}};
3305 let Inst{20} = {Index{0}};
3306 let Inst{19-16} = Re{3-0};
3309 def _8h8h : NI_2VE<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
3310 neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
3311 let Inst{11} = {Index{2}};
3312 let Inst{21} = {Index{1}};
3313 let Inst{20} = {Index{0}};
3314 let Inst{19-16} = Re{3-0};
3318 defm MLAvve : NI_2VE_v1<0b1, 0b0000, "mla">;
3319 defm MLSvve : NI_2VE_v1<0b1, 0b0100, "mls">;
3321 // Pattern for lane in 128-bit vector
3322 class NI_2VE_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
3323 RegisterOperand ResVPR, RegisterOperand OpVPR,
3324 RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
3325 ValueType EleOpTy, SDPatternOperator coreop>
3326 : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
3327 (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3328 (INST ResVPR:$src, OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
3330 // Pattern for lane in 64-bit vector
3331 class NI_2VE_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
3332 RegisterOperand ResVPR, RegisterOperand OpVPR,
3333 RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
3334 ValueType EleOpTy, SDPatternOperator coreop>
3335 : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
3336 (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3337 (INST ResVPR:$src, OpVPR:$Rn,
3338 (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
3340 multiclass NI_2VE_v1_pat<string subop, SDPatternOperator op>
3342 def : NI_2VE_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
3343 op, VPR64, VPR64, VPR128, v2i32, v2i32, v4i32,
3344 BinOpFrag<(Neon_vduplane
3345 (Neon_low4S node:$LHS), node:$RHS)>>;
3347 def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
3348 op, VPR128, VPR128, VPR128, v4i32, v4i32, v4i32,
3349 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3351 def : NI_2VE_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
3352 op, VPR64, VPR64, VPR128Lo, v4i16, v4i16, v8i16,
3353 BinOpFrag<(Neon_vduplane
3354 (Neon_low8H node:$LHS), node:$RHS)>>;
3356 def : NI_2VE_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
3357 op, VPR128, VPR128, VPR128Lo, v8i16, v8i16, v8i16,
3358 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3360 // Index can only be half of the max value for lane in 64-bit vector
3362 def : NI_2VE_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
3363 op, VPR64, VPR64, VPR64, v2i32, v2i32, v2i32,
3364 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3366 def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
3367 op, VPR128, VPR128, VPR64, v4i32, v4i32, v2i32,
3368 BinOpFrag<(Neon_vduplane
3369 (Neon_combine_4S node:$LHS, undef),
3372 def : NI_2VE_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
3373 op, VPR64, VPR64, VPR64Lo, v4i16, v4i16, v4i16,
3374 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3376 def : NI_2VE_lane<!cast<Instruction>(subop # "_8h8h"), neon_uimm2_bare,
3377 op, VPR128, VPR128, VPR64Lo, v8i16, v8i16, v4i16,
3378 BinOpFrag<(Neon_vduplane
3379 (Neon_combine_8H node:$LHS, undef),
3383 defm MLA_lane_v1 : NI_2VE_v1_pat<"MLAvve", Neon_mla>;
3384 defm MLS_lane_v1 : NI_2VE_v1_pat<"MLSvve", Neon_mls>;
3386 class NI_2VE_2op<bit q, bit u, bits<2> size, bits<4> opcode,
3387 string asmop, string ResS, string OpS, string EleOpS,
3388 Operand OpImm, RegisterOperand ResVPR,
3389 RegisterOperand OpVPR, RegisterOperand EleOpVPR>
3390 : NeonI_2VElem<q, u, size, opcode,
3391 (outs ResVPR:$Rd), (ins OpVPR:$Rn,
3392 EleOpVPR:$Re, OpImm:$Index),
3393 asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
3394 ", $Re." # EleOpS # "[$Index]",
3401 multiclass NI_2VE_v1_2op<bit u, bits<4> opcode, string asmop>
3403 // vector register class for element is always 128-bit to cover the max index
3404 def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
3405 neon_uimm2_bare, VPR64, VPR64, VPR128> {
3406 let Inst{11} = {Index{1}};
3407 let Inst{21} = {Index{0}};
3408 let Inst{20-16} = Re;
3411 def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
3412 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3413 let Inst{11} = {Index{1}};
3414 let Inst{21} = {Index{0}};
3415 let Inst{20-16} = Re;
3418 // Index operations on 16-bit(H) elements are restricted to using v0-v15.
3419 def _4h8h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
3420 neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
3421 let Inst{11} = {Index{2}};
3422 let Inst{21} = {Index{1}};
3423 let Inst{20} = {Index{0}};
3424 let Inst{19-16} = Re{3-0};
3427 def _8h8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
3428 neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
3429 let Inst{11} = {Index{2}};
3430 let Inst{21} = {Index{1}};
3431 let Inst{20} = {Index{0}};
3432 let Inst{19-16} = Re{3-0};
3436 defm MULve : NI_2VE_v1_2op<0b0, 0b1000, "mul">;
3437 defm SQDMULHve : NI_2VE_v1_2op<0b0, 0b1100, "sqdmulh">;
3438 defm SQRDMULHve : NI_2VE_v1_2op<0b0, 0b1101, "sqrdmulh">;
3440 // Pattern for lane in 128-bit vector
3441 class NI_2VE_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
3442 RegisterOperand OpVPR, RegisterOperand EleOpVPR,
3443 ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
3444 SDPatternOperator coreop>
3445 : Pat<(ResTy (op (OpTy OpVPR:$Rn),
3446 (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3447 (INST OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
3449 // Pattern for lane in 64-bit vector
3450 class NI_2VE_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
3451 RegisterOperand OpVPR, RegisterOperand EleOpVPR,
3452 ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
3453 SDPatternOperator coreop>
3454 : Pat<(ResTy (op (OpTy OpVPR:$Rn),
3455 (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3457 (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
3459 multiclass NI_2VE_mul_v1_pat<string subop, SDPatternOperator op>
3461 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
3462 op, VPR64, VPR128, v2i32, v2i32, v4i32,
3463 BinOpFrag<(Neon_vduplane
3464 (Neon_low4S node:$LHS), node:$RHS)>>;
3466 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
3467 op, VPR128, VPR128, v4i32, v4i32, v4i32,
3468 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3470 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
3471 op, VPR64, VPR128Lo, v4i16, v4i16, v8i16,
3472 BinOpFrag<(Neon_vduplane
3473 (Neon_low8H node:$LHS), node:$RHS)>>;
3475 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
3476 op, VPR128, VPR128Lo, v8i16, v8i16, v8i16,
3477 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3479 // Index can only be half of the max value for lane in 64-bit vector
3481 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
3482 op, VPR64, VPR64, v2i32, v2i32, v2i32,
3483 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3485 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
3486 op, VPR128, VPR64, v4i32, v4i32, v2i32,
3487 BinOpFrag<(Neon_vduplane
3488 (Neon_combine_4S node:$LHS, undef),
3491 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
3492 op, VPR64, VPR64Lo, v4i16, v4i16, v4i16,
3493 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3495 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_8h8h"), neon_uimm2_bare,
3496 op, VPR128, VPR64Lo, v8i16, v8i16, v4i16,
3497 BinOpFrag<(Neon_vduplane
3498 (Neon_combine_8H node:$LHS, undef),
3502 defm MUL_lane_v1 : NI_2VE_mul_v1_pat<"MULve", mul>;
3503 defm SQDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQDMULHve", int_arm_neon_vqdmulh>;
3504 defm SQRDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQRDMULHve", int_arm_neon_vqrdmulh>;
3508 multiclass NI_2VE_v2_2op<bit u, bits<4> opcode, string asmop>
3510 // vector register class for element is always 128-bit to cover the max index
3511 def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
3512 neon_uimm2_bare, VPR64, VPR64, VPR128> {
3513 let Inst{11} = {Index{1}};
3514 let Inst{21} = {Index{0}};
3515 let Inst{20-16} = Re;
3518 def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
3519 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3520 let Inst{11} = {Index{1}};
3521 let Inst{21} = {Index{0}};
3522 let Inst{20-16} = Re;
3525 // _1d2d doesn't exist!
3527 def _2d2d : NI_2VE_2op<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
3528 neon_uimm1_bare, VPR128, VPR128, VPR128> {
3529 let Inst{11} = {Index{0}};
3531 let Inst{20-16} = Re;
3535 defm FMULve : NI_2VE_v2_2op<0b0, 0b1001, "fmul">;
3536 defm FMULXve : NI_2VE_v2_2op<0b1, 0b1001, "fmulx">;
3538 class NI_2VE_mul_lane_2d<Instruction INST, Operand OpImm, SDPatternOperator op,
3539 RegisterOperand OpVPR, RegisterOperand EleOpVPR,
3540 ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
3541 SDPatternOperator coreop>
3542 : Pat<(ResTy (op (OpTy OpVPR:$Rn),
3543 (OpTy (coreop (EleOpTy EleOpVPR:$Re), (EleOpTy EleOpVPR:$Re))))),
3545 (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), 0)>;
3547 multiclass NI_2VE_mul_v2_pat<string subop, SDPatternOperator op>
3549 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
3550 op, VPR64, VPR128, v2f32, v2f32, v4f32,
3551 BinOpFrag<(Neon_vduplane
3552 (Neon_low4f node:$LHS), node:$RHS)>>;
3554 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
3555 op, VPR128, VPR128, v4f32, v4f32, v4f32,
3556 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3558 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
3559 op, VPR128, VPR128, v2f64, v2f64, v2f64,
3560 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3562 // Index can only be half of the max value for lane in 64-bit vector
3564 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
3565 op, VPR64, VPR64, v2f32, v2f32, v2f32,
3566 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3568 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
3569 op, VPR128, VPR64, v4f32, v4f32, v2f32,
3570 BinOpFrag<(Neon_vduplane
3571 (Neon_combine_4f node:$LHS, undef),
3574 def : NI_2VE_mul_lane_2d<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
3575 op, VPR128, VPR64, v2f64, v2f64, v1f64,
3576 BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
3579 defm FMUL_lane_v2 : NI_2VE_mul_v2_pat<"FMULve", fmul>;
3580 defm FMULX_lane_v2 : NI_2VE_mul_v2_pat<"FMULXve", int_aarch64_neon_vmulx>;
3582 // The followings are patterns using fma
3583 // -ffp-contract=fast generates fma
3585 multiclass NI_2VE_v2<bit u, bits<4> opcode, string asmop>
3587 // vector register class for element is always 128-bit to cover the max index
3588 def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
3589 neon_uimm2_bare, VPR64, VPR64, VPR128> {
3590 let Inst{11} = {Index{1}};
3591 let Inst{21} = {Index{0}};
3592 let Inst{20-16} = Re;
3595 def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
3596 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3597 let Inst{11} = {Index{1}};
3598 let Inst{21} = {Index{0}};
3599 let Inst{20-16} = Re;
3602 // _1d2d doesn't exist!
3604 def _2d2d : NI_2VE<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
3605 neon_uimm1_bare, VPR128, VPR128, VPR128> {
3606 let Inst{11} = {Index{0}};
3608 let Inst{20-16} = Re;
3612 defm FMLAvve : NI_2VE_v2<0b0, 0b0001, "fmla">;
3613 defm FMLSvve : NI_2VE_v2<0b0, 0b0101, "fmls">;
3615 // Pattern for lane in 128-bit vector
3616 class NI_2VEswap_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
3617 RegisterOperand ResVPR, RegisterOperand OpVPR,
3618 ValueType ResTy, ValueType OpTy,
3619 SDPatternOperator coreop>
3620 : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
3621 (ResTy ResVPR:$src), (ResTy ResVPR:$Rn))),
3622 (INST ResVPR:$src, ResVPR:$Rn, OpVPR:$Re, OpImm:$Index)>;
3624 // Pattern for lane in 64-bit vector
3625 class NI_2VEswap_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
3626 RegisterOperand ResVPR, RegisterOperand OpVPR,
3627 ValueType ResTy, ValueType OpTy,
3628 SDPatternOperator coreop>
3629 : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
3630 (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
3631 (INST ResVPR:$src, ResVPR:$Rn,
3632 (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), OpImm:$Index)>;
3634 // Pattern for lane in 64-bit vector
3635 class NI_2VEswap_lane_2d2d<Instruction INST, Operand OpImm,
3636 SDPatternOperator op,
3637 RegisterOperand ResVPR, RegisterOperand OpVPR,
3638 ValueType ResTy, ValueType OpTy,
3639 SDPatternOperator coreop>
3640 : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (OpTy OpVPR:$Re))),
3641 (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
3642 (INST ResVPR:$src, ResVPR:$Rn,
3643 (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), 0)>;
3646 multiclass NI_2VE_fma_v2_pat<string subop, SDPatternOperator op>
3648 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
3649 neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
3650 BinOpFrag<(Neon_vduplane
3651 (Neon_low4f node:$LHS), node:$RHS)>>;
3653 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
3654 neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
3655 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3657 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
3658 neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
3659 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3661 // Index can only be half of the max value for lane in 64-bit vector
3663 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
3664 neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
3665 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3667 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
3668 neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
3669 BinOpFrag<(Neon_vduplane
3670 (Neon_combine_4f node:$LHS, undef),
3673 def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
3674 neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
3675 BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
3678 defm FMLA_lane_v2_s : NI_2VE_fma_v2_pat<"FMLAvve", fma>;
3680 multiclass NI_2VE_fms_v2_pat<string subop, SDPatternOperator op>
3682 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
3683 neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
3684 BinOpFrag<(fneg (Neon_vduplane
3685 (Neon_low4f node:$LHS), node:$RHS))>>;
3687 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
3688 neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
3689 BinOpFrag<(Neon_vduplane
3690 (Neon_low4f (fneg node:$LHS)),
3693 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
3694 neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
3695 BinOpFrag<(fneg (Neon_vduplane
3696 node:$LHS, node:$RHS))>>;
3698 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
3699 neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
3700 BinOpFrag<(Neon_vduplane
3701 (fneg node:$LHS), node:$RHS)>>;
3703 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
3704 neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
3705 BinOpFrag<(fneg (Neon_vduplane
3706 node:$LHS, node:$RHS))>>;
3708 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
3709 neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
3710 BinOpFrag<(Neon_vduplane
3711 (fneg node:$LHS), node:$RHS)>>;
3713 // Index can only be half of the max value for lane in 64-bit vector
3715 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
3716 neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
3717 BinOpFrag<(fneg (Neon_vduplane
3718 node:$LHS, node:$RHS))>>;
3720 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
3721 neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
3722 BinOpFrag<(Neon_vduplane
3723 (fneg node:$LHS), node:$RHS)>>;
3725 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
3726 neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
3727 BinOpFrag<(fneg (Neon_vduplane
3728 (Neon_combine_4f node:$LHS, undef),
3731 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
3732 neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
3733 BinOpFrag<(Neon_vduplane
3734 (Neon_combine_4f (fneg node:$LHS), undef),
3737 def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
3738 neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
3739 BinOpFrag<(fneg (Neon_combine_2d
3740 node:$LHS, node:$RHS))>>;
3742 def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
3743 neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
3744 BinOpFrag<(Neon_combine_2d
3745 (fneg node:$LHS), (fneg node:$RHS))>>;
3748 defm FMLS_lane_v2_s : NI_2VE_fms_v2_pat<"FMLSvve", fma>;
3750 // Variant 3: Long type
3751 // E.g. SMLAL : 4S/4H/H (v0-v15), 2D/2S/S
3752 // SMLAL2: 4S/8H/H (v0-v15), 2D/4S/S
3754 multiclass NI_2VE_v3<bit u, bits<4> opcode, string asmop>
3756 // vector register class for element is always 128-bit to cover the max index
3757 def _2d2s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
3758 neon_uimm2_bare, VPR128, VPR64, VPR128> {
3759 let Inst{11} = {Index{1}};
3760 let Inst{21} = {Index{0}};
3761 let Inst{20-16} = Re;
3764 def _2d4s : NI_2VE<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
3765 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3766 let Inst{11} = {Index{1}};
3767 let Inst{21} = {Index{0}};
3768 let Inst{20-16} = Re;
3771 // Index operations on 16-bit(H) elements are restricted to using v0-v15.
3772 def _4s8h : NI_2VE<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
3773 neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
3774 let Inst{11} = {Index{2}};
3775 let Inst{21} = {Index{1}};
3776 let Inst{20} = {Index{0}};
3777 let Inst{19-16} = Re{3-0};
3780 def _4s4h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
3781 neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
3782 let Inst{11} = {Index{2}};
3783 let Inst{21} = {Index{1}};
3784 let Inst{20} = {Index{0}};
3785 let Inst{19-16} = Re{3-0};
3789 defm SMLALvve : NI_2VE_v3<0b0, 0b0010, "smlal">;
3790 defm UMLALvve : NI_2VE_v3<0b1, 0b0010, "umlal">;
3791 defm SMLSLvve : NI_2VE_v3<0b0, 0b0110, "smlsl">;
3792 defm UMLSLvve : NI_2VE_v3<0b1, 0b0110, "umlsl">;
3793 defm SQDMLALvve : NI_2VE_v3<0b0, 0b0011, "sqdmlal">;
3794 defm SQDMLSLvve : NI_2VE_v3<0b0, 0b0111, "sqdmlsl">;
3796 multiclass NI_2VE_v3_2op<bit u, bits<4> opcode, string asmop>
3798 // vector register class for element is always 128-bit to cover the max index
3799 def _2d2s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
3800 neon_uimm2_bare, VPR128, VPR64, VPR128> {
3801 let Inst{11} = {Index{1}};
3802 let Inst{21} = {Index{0}};
3803 let Inst{20-16} = Re;
3806 def _2d4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
3807 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3808 let Inst{11} = {Index{1}};
3809 let Inst{21} = {Index{0}};
3810 let Inst{20-16} = Re;
3813 // Index operations on 16-bit(H) elements are restricted to using v0-v15.
3814 def _4s8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
3815 neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
3816 let Inst{11} = {Index{2}};
3817 let Inst{21} = {Index{1}};
3818 let Inst{20} = {Index{0}};
3819 let Inst{19-16} = Re{3-0};
3822 def _4s4h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
3823 neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
3824 let Inst{11} = {Index{2}};
3825 let Inst{21} = {Index{1}};
3826 let Inst{20} = {Index{0}};
3827 let Inst{19-16} = Re{3-0};
3831 defm SMULLve : NI_2VE_v3_2op<0b0, 0b1010, "smull">;
3832 defm UMULLve : NI_2VE_v3_2op<0b1, 0b1010, "umull">;
3833 defm SQDMULLve : NI_2VE_v3_2op<0b0, 0b1011, "sqdmull">;
3835 // Pattern for lane in 128-bit vector
3836 class NI_2VEL2_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
3837 RegisterOperand EleOpVPR, ValueType ResTy,
3838 ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
3839 SDPatternOperator hiop, SDPatternOperator coreop>
3840 : Pat<(ResTy (op (ResTy VPR128:$src),
3841 (HalfOpTy (hiop (OpTy VPR128:$Rn))),
3842 (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3843 (INST VPR128:$src, VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
3845 // Pattern for lane in 64-bit vector
3846 class NI_2VEL2_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
3847 RegisterOperand EleOpVPR, ValueType ResTy,
3848 ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
3849 SDPatternOperator hiop, SDPatternOperator coreop>
3850 : Pat<(ResTy (op (ResTy VPR128:$src),
3851 (HalfOpTy (hiop (OpTy VPR128:$Rn))),
3852 (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3853 (INST VPR128:$src, VPR128:$Rn,
3854 (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
3856 multiclass NI_2VEL_v3_pat<string subop, SDPatternOperator op>
3858 def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
3859 op, VPR128, VPR64, VPR128Lo, v4i32, v4i16, v8i16,
3860 BinOpFrag<(Neon_vduplane
3861 (Neon_low8H node:$LHS), node:$RHS)>>;
3863 def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
3864 op, VPR128, VPR64, VPR128, v2i64, v2i32, v4i32,
3865 BinOpFrag<(Neon_vduplane
3866 (Neon_low4S node:$LHS), node:$RHS)>>;
3868 def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
3869 op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H,
3870 BinOpFrag<(Neon_vduplane
3871 (Neon_low8H node:$LHS), node:$RHS)>>;
3873 def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
3874 op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S,
3875 BinOpFrag<(Neon_vduplane
3876 (Neon_low4S node:$LHS), node:$RHS)>>;
3878 // Index can only be half of the max value for lane in 64-bit vector
3880 def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
3881 op, VPR128, VPR64, VPR64Lo, v4i32, v4i16, v4i16,
3882 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3884 def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
3885 op, VPR128, VPR64, VPR64, v2i64, v2i32, v2i32,
3886 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3888 def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
3889 op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H,
3890 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3892 def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
3893 op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S,
3894 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3897 defm SMLAL_lane_v3 : NI_2VEL_v3_pat<"SMLALvve", Neon_smlal>;
3898 defm UMLAL_lane_v3 : NI_2VEL_v3_pat<"UMLALvve", Neon_umlal>;
3899 defm SMLSL_lane_v3 : NI_2VEL_v3_pat<"SMLSLvve", Neon_smlsl>;
3900 defm UMLSL_lane_v3 : NI_2VEL_v3_pat<"UMLSLvve", Neon_umlsl>;
3902 // Pattern for lane in 128-bit vector
3903 class NI_2VEL2_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
3904 RegisterOperand EleOpVPR, ValueType ResTy,
3905 ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
3906 SDPatternOperator hiop, SDPatternOperator coreop>
3908 (HalfOpTy (hiop (OpTy VPR128:$Rn))),
3909 (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3910 (INST VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
3912 // Pattern for lane in 64-bit vector
3913 class NI_2VEL2_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
3914 RegisterOperand EleOpVPR, ValueType ResTy,
3915 ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
3916 SDPatternOperator hiop, SDPatternOperator coreop>
3918 (HalfOpTy (hiop (OpTy VPR128:$Rn))),
3919 (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3921 (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
3923 multiclass NI_2VEL_mul_v3_pat<string subop, SDPatternOperator op>
3925 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
3926 op, VPR64, VPR128Lo, v4i32, v4i16, v8i16,
3927 BinOpFrag<(Neon_vduplane
3928 (Neon_low8H node:$LHS), node:$RHS)>>;
3930 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
3931 op, VPR64, VPR128, v2i64, v2i32, v4i32,
3932 BinOpFrag<(Neon_vduplane
3933 (Neon_low4S node:$LHS), node:$RHS)>>;
3935 def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
3936 op, VPR128Lo, v4i32, v8i16, v8i16, v4i16,
3938 BinOpFrag<(Neon_vduplane
3939 (Neon_low8H node:$LHS), node:$RHS)>>;
3941 def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
3942 op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S,
3943 BinOpFrag<(Neon_vduplane
3944 (Neon_low4S node:$LHS), node:$RHS)>>;
3946 // Index can only be half of the max value for lane in 64-bit vector
3948 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
3949 op, VPR64, VPR64Lo, v4i32, v4i16, v4i16,
3950 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3952 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
3953 op, VPR64, VPR64, v2i64, v2i32, v2i32,
3954 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3956 def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
3957 op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H,
3958 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3960 def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
3961 op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S,
3962 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3965 defm SMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SMULLve", int_arm_neon_vmulls>;
3966 defm UMULL_lane_v3 : NI_2VEL_mul_v3_pat<"UMULLve", int_arm_neon_vmullu>;
3967 defm SQDMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SQDMULLve", int_arm_neon_vqdmull>;
3969 multiclass NI_qdma<SDPatternOperator op>
3971 def _4s : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
3973 (v4i32 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
3975 def _2d : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
3977 (v2i64 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
3980 defm Neon_qdmlal : NI_qdma<int_arm_neon_vqadds>;
3981 defm Neon_qdmlsl : NI_qdma<int_arm_neon_vqsubs>;
3983 multiclass NI_2VEL_v3_qdma_pat<string subop, string op>
3985 def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
3986 !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR128Lo,
3987 v4i32, v4i16, v8i16,
3988 BinOpFrag<(Neon_vduplane
3989 (Neon_low8H node:$LHS), node:$RHS)>>;
3991 def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
3992 !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR128,
3993 v2i64, v2i32, v4i32,
3994 BinOpFrag<(Neon_vduplane
3995 (Neon_low4S node:$LHS), node:$RHS)>>;
3997 def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
3998 !cast<PatFrag>(op # "_4s"), VPR128Lo,
3999 v4i32, v8i16, v8i16, v4i16, Neon_High8H,
4000 BinOpFrag<(Neon_vduplane
4001 (Neon_low8H node:$LHS), node:$RHS)>>;
4003 def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
4004 !cast<PatFrag>(op # "_2d"), VPR128,
4005 v2i64, v4i32, v4i32, v2i32, Neon_High4S,
4006 BinOpFrag<(Neon_vduplane
4007 (Neon_low4S node:$LHS), node:$RHS)>>;
4009 // Index can only be half of the max value for lane in 64-bit vector
4011 def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
4012 !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR64Lo,
4013 v4i32, v4i16, v4i16,
4014 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4016 def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
4017 !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR64,
4018 v2i64, v2i32, v2i32,
4019 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4021 def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
4022 !cast<PatFrag>(op # "_4s"), VPR64Lo,
4023 v4i32, v8i16, v4i16, v4i16, Neon_High8H,
4024 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4026 def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
4027 !cast<PatFrag>(op # "_2d"), VPR64,
4028 v2i64, v4i32, v2i32, v2i32, Neon_High4S,
4029 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4032 defm SQDMLAL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLALvve", "Neon_qdmlal">;
4033 defm SQDMLSL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLSLvve", "Neon_qdmlsl">;
4035 // End of implementation for instruction class (3V Elem)
4037 //Insert element (vector, from main)
4038 def INSbw : NeonI_INS_main<"ins", "b", v16i8, GPR32, i32,
4040 let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
4042 def INShw : NeonI_INS_main<"ins", "h", v8i16, GPR32, i32,
4044 let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
4046 def INSsw : NeonI_INS_main<"ins", "s", v4i32, GPR32, i32,
4048 let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
4050 def INSdx : NeonI_INS_main<"ins", "d", v2i64, GPR64, i64,
4052 let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
4055 class Neon_INS_main_pattern <ValueType ResTy,ValueType ExtResTy,
4056 RegisterClass OpGPR, ValueType OpTy,
4057 Operand OpImm, Instruction INS>
4058 : Pat<(ResTy (vector_insert
4062 (ResTy (EXTRACT_SUBREG
4063 (ExtResTy (INS (ExtResTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
4064 OpGPR:$Rn, OpImm:$Imm)), sub_64))>;
4066 def INSbw_pattern : Neon_INS_main_pattern<v8i8, v16i8, GPR32, i32,
4067 neon_uimm3_bare, INSbw>;
4068 def INShw_pattern : Neon_INS_main_pattern<v4i16, v8i16, GPR32, i32,
4069 neon_uimm2_bare, INShw>;
4070 def INSsw_pattern : Neon_INS_main_pattern<v2i32, v4i32, GPR32, i32,
4071 neon_uimm1_bare, INSsw>;
4072 def INSdx_pattern : Neon_INS_main_pattern<v1i64, v2i64, GPR64, i64,
4073 neon_uimm0_bare, INSdx>;
4075 class NeonI_INS_element<string asmop, string Res, ValueType ResTy,
4076 Operand ResImm, ValueType MidTy>
4077 : NeonI_insert<0b1, 0b1,
4078 (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn,
4079 ResImm:$Immd, ResImm:$Immn),
4080 asmop # "\t$Rd." # Res # "[$Immd], $Rn." # Res # "[$Immn]",
4081 [(set (ResTy VPR128:$Rd),
4082 (ResTy (vector_insert
4083 (ResTy VPR128:$src),
4084 (MidTy (vector_extract
4089 let Constraints = "$src = $Rd";
4094 //Insert element (vector, from element)
4095 def INSELb : NeonI_INS_element<"ins", "b", v16i8, neon_uimm4_bare, i32> {
4096 let Inst{20-16} = {Immd{3}, Immd{2}, Immd{1}, Immd{0}, 0b1};
4097 let Inst{14-11} = {Immn{3}, Immn{2}, Immn{1}, Immn{0}};
4099 def INSELh : NeonI_INS_element<"ins", "h", v8i16, neon_uimm3_bare, i32> {
4100 let Inst{20-16} = {Immd{2}, Immd{1}, Immd{0}, 0b1, 0b0};
4101 let Inst{14-12} = {Immn{2}, Immn{1}, Immn{0}};
4102 // bit 11 is unspecified.
4104 def INSELs : NeonI_INS_element<"ins", "s", v4i32, neon_uimm2_bare, i32> {
4105 let Inst{20-16} = {Immd{1}, Immd{0}, 0b1, 0b0, 0b0};
4106 let Inst{14-13} = {Immn{1}, Immn{0}};
4107 // bits 11-12 are unspecified.
4109 def INSELd : NeonI_INS_element<"ins", "d", v2i64, neon_uimm1_bare, i64> {
4110 let Inst{20-16} = {Immd, 0b1, 0b0, 0b0, 0b0};
4111 let Inst{14} = Immn{0};
4112 // bits 11-13 are unspecified.
4115 multiclass Neon_INS_elt_pattern <ValueType NaTy, Operand NaImm,
4116 ValueType MidTy, ValueType StTy,
4117 Operand StImm, Instruction INS> {
4118 def : Pat<(NaTy (vector_insert
4120 (MidTy (vector_extract
4124 (NaTy (EXTRACT_SUBREG
4126 (StTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
4132 def : Pat<(StTy (vector_insert
4134 (MidTy (vector_extract
4140 (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4144 def : Pat<(NaTy (vector_insert
4146 (MidTy (vector_extract
4150 (NaTy (EXTRACT_SUBREG
4152 (StTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
4153 (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4159 defm INSb_pattern : Neon_INS_elt_pattern<v8i8, neon_uimm3_bare, i32,
4160 v16i8, neon_uimm4_bare, INSELb>;
4161 defm INSh_pattern : Neon_INS_elt_pattern<v4i16, neon_uimm2_bare, i32,
4162 v8i16, neon_uimm3_bare, INSELh>;
4163 defm INSs_pattern : Neon_INS_elt_pattern<v2i32, neon_uimm1_bare, i32,
4164 v4i32, neon_uimm2_bare, INSELs>;
4165 defm INSd_pattern : Neon_INS_elt_pattern<v1i64, neon_uimm0_bare, i64,
4166 v2i64, neon_uimm1_bare, INSELd>;
4168 class NeonI_SMOV<string asmop, string Res, bit Q,
4169 ValueType OpTy, ValueType eleTy,
4170 Operand OpImm, RegisterClass ResGPR, ValueType ResTy>
4171 : NeonI_copy<Q, 0b0, 0b0101,
4172 (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
4173 asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
4174 [(set (ResTy ResGPR:$Rd),
4176 (ResTy (vector_extract
4177 (OpTy VPR128:$Rn), (OpImm:$Imm))),
4183 //Signed integer move (main, from element)
4184 def SMOVwb : NeonI_SMOV<"smov", "b", 0b0, v16i8, i8, neon_uimm4_bare,
4186 let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
4188 def SMOVwh : NeonI_SMOV<"smov", "h", 0b0, v8i16, i16, neon_uimm3_bare,
4190 let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
4192 def SMOVxb : NeonI_SMOV<"smov", "b", 0b1, v16i8, i8, neon_uimm4_bare,
4194 let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
4196 def SMOVxh : NeonI_SMOV<"smov", "h", 0b1, v8i16, i16, neon_uimm3_bare,
4198 let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
4200 def SMOVxs : NeonI_SMOV<"smov", "s", 0b1, v4i32, i32, neon_uimm2_bare,
4202 let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
4205 multiclass Neon_SMOVx_pattern <ValueType StTy, ValueType NaTy,
4206 ValueType eleTy, Operand StImm, Operand NaImm,
4207 Instruction SMOVI> {
4208 def : Pat<(i64 (sext_inreg
4210 (i32 (vector_extract
4211 (StTy VPR128:$Rn), (StImm:$Imm))))),
4213 (SMOVI VPR128:$Rn, StImm:$Imm)>;
4215 def : Pat<(i64 (sext
4216 (i32 (vector_extract
4217 (StTy VPR128:$Rn), (StImm:$Imm))))),
4218 (SMOVI VPR128:$Rn, StImm:$Imm)>;
4220 def : Pat<(i64 (sext_inreg
4221 (i64 (vector_extract
4222 (NaTy VPR64:$Rn), (NaImm:$Imm))),
4224 (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4227 def : Pat<(i64 (sext_inreg
4229 (i32 (vector_extract
4230 (NaTy VPR64:$Rn), (NaImm:$Imm))))),
4232 (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4235 def : Pat<(i64 (sext
4236 (i32 (vector_extract
4237 (NaTy VPR64:$Rn), (NaImm:$Imm))))),
4238 (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4242 defm SMOVxb_pattern : Neon_SMOVx_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
4243 neon_uimm3_bare, SMOVxb>;
4244 defm SMOVxh_pattern : Neon_SMOVx_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
4245 neon_uimm2_bare, SMOVxh>;
4246 defm SMOVxs_pattern : Neon_SMOVx_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
4247 neon_uimm1_bare, SMOVxs>;
4249 class Neon_SMOVw_pattern <ValueType StTy, ValueType NaTy,
4250 ValueType eleTy, Operand StImm, Operand NaImm,
4252 : Pat<(i32 (sext_inreg
4253 (i32 (vector_extract
4254 (NaTy VPR64:$Rn), (NaImm:$Imm))),
4256 (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4259 def SMOVwb_pattern : Neon_SMOVw_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
4260 neon_uimm3_bare, SMOVwb>;
4261 def SMOVwh_pattern : Neon_SMOVw_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
4262 neon_uimm2_bare, SMOVwh>;
4265 class NeonI_UMOV<string asmop, string Res, bit Q,
4266 ValueType OpTy, Operand OpImm,
4267 RegisterClass ResGPR, ValueType ResTy>
4268 : NeonI_copy<Q, 0b0, 0b0111,
4269 (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
4270 asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
4271 [(set (ResTy ResGPR:$Rd),
4272 (ResTy (vector_extract
4273 (OpTy VPR128:$Rn), (OpImm:$Imm))))],
4278 //Unsigned integer move (main, from element)
4279 def UMOVwb : NeonI_UMOV<"umov", "b", 0b0, v16i8, neon_uimm4_bare,
4281 let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
4283 def UMOVwh : NeonI_UMOV<"umov", "h", 0b0, v8i16, neon_uimm3_bare,
4285 let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
4287 def UMOVws : NeonI_UMOV<"umov", "s", 0b0, v4i32, neon_uimm2_bare,
4289 let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
4291 def UMOVxd : NeonI_UMOV<"umov", "d", 0b1, v2i64, neon_uimm1_bare,
4293 let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
4296 class Neon_UMOV_pattern <ValueType StTy, ValueType NaTy, ValueType ResTy,
4297 Operand StImm, Operand NaImm,
4299 : Pat<(ResTy (vector_extract
4300 (NaTy VPR64:$Rn), NaImm:$Imm)),
4301 (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4304 def UMOVwb_pattern : Neon_UMOV_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
4305 neon_uimm3_bare, UMOVwb>;
4306 def UMOVwh_pattern : Neon_UMOV_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
4307 neon_uimm2_bare, UMOVwh>;
4308 def UMOVws_pattern : Neon_UMOV_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
4309 neon_uimm1_bare, UMOVws>;
4312 (i32 (vector_extract
4313 (v16i8 VPR128:$Rn), (neon_uimm4_bare:$Imm))),
4315 (UMOVwb VPR128:$Rn, neon_uimm4_bare:$Imm)>;
4318 (i32 (vector_extract
4319 (v8i16 VPR128:$Rn), (neon_uimm3_bare:$Imm))),
4321 (UMOVwh VPR128:$Rn, neon_uimm3_bare:$Imm)>;
4323 def : Pat<(i64 (zext
4324 (i32 (vector_extract
4325 (v2i64 VPR128:$Rn), (neon_uimm1_bare:$Imm))))),
4326 (UMOVxd VPR128:$Rn, neon_uimm1_bare:$Imm)>;
4329 (i32 (vector_extract
4330 (v8i8 VPR64:$Rn), (neon_uimm3_bare:$Imm))),
4332 (UMOVwb (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
4333 neon_uimm3_bare:$Imm)>;
4336 (i32 (vector_extract
4337 (v4i16 VPR64:$Rn), (neon_uimm2_bare:$Imm))),
4339 (UMOVwh (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
4340 neon_uimm2_bare:$Imm)>;
4342 def : Pat<(i64 (zext
4343 (i32 (vector_extract
4344 (v1i64 VPR64:$Rn), (neon_uimm0_bare:$Imm))))),
4345 (UMOVxd (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
4346 neon_uimm0_bare:$Imm)>;
4348 // Additional copy patterns for scalar types
4349 def : Pat<(i32 (vector_extract (v1i8 FPR8:$Rn), (i64 0))),
4351 (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8)), (i64 0))>;
4353 def : Pat<(i32 (vector_extract (v1i16 FPR16:$Rn), (i64 0))),
4355 (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16)), (i64 0))>;
4357 def : Pat<(i32 (vector_extract (v1i32 FPR32:$Rn), (i64 0))),
4358 (FMOVws FPR32:$Rn)>;
4360 def : Pat<(i64 (vector_extract (v1i64 FPR64:$Rn), (i64 0))),
4361 (FMOVxd FPR64:$Rn)>;
4363 def : Pat<(f64 (vector_extract (v1f64 FPR64:$Rn), (i64 0))),
4366 def : Pat<(f32 (vector_extract (v1f32 FPR32:$Rn), (i64 0))),
4369 def : Pat<(v1i8 (scalar_to_vector GPR32:$Rn)),
4370 (v1i8 (EXTRACT_SUBREG (v16i8
4371 (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
4374 def : Pat<(v1i16 (scalar_to_vector GPR32:$Rn)),
4375 (v1i16 (EXTRACT_SUBREG (v8i16
4376 (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
4379 def : Pat<(v1i32 (scalar_to_vector GPR32:$src)),
4382 def : Pat<(v1i64 (scalar_to_vector GPR64:$src)),