1 //===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the AArch64 NEON instruction set.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // NEON-specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
17 def Neon_bsl : SDNode<"AArch64ISD::NEON_BSL", SDTypeProfile<1, 3,
18 [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
19 SDTCisSameAs<0, 3>]>>;
21 // (outs Result), (ins Imm, OpCmode)
22 def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
24 def Neon_movi : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>;
26 def Neon_mvni : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>;
28 // (outs Result), (ins Imm)
29 def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1,
30 [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
32 // (outs Result), (ins LHS, RHS, CondCode)
33 def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3,
34 [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>;
36 // (outs Result), (ins LHS, 0/0.0 constant, CondCode)
37 def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3,
38 [SDTCisVec<0>, SDTCisVec<1>]>>;
40 // (outs Result), (ins LHS, RHS)
41 def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2,
42 [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>;
44 def Neon_dupImm : SDNode<"AArch64ISD::NEON_DUPIMM", SDTypeProfile<1, 1,
45 [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
47 def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
49 def Neon_sqrshlImm : SDNode<"AArch64ISD::NEON_QSHLs", SDTARMVSH>;
50 def Neon_uqrshlImm : SDNode<"AArch64ISD::NEON_QSHLu", SDTARMVSH>;
52 def Neon_vduplane : SDNode<"AArch64ISD::NEON_VDUPLANE", SDTypeProfile<1, 2,
53 [SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i64>]>>;
55 //===----------------------------------------------------------------------===//
57 //===----------------------------------------------------------------------===//
59 multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size, bits<5> opcode,
60 string asmop, SDPatternOperator opnode8B,
61 SDPatternOperator opnode16B,
64 let isCommutable = Commutable in {
65 def _8B : NeonI_3VSame<0b0, u, size, opcode,
66 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
67 asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
68 [(set (v8i8 VPR64:$Rd),
69 (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
72 def _16B : NeonI_3VSame<0b1, u, size, opcode,
73 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
74 asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
75 [(set (v16i8 VPR128:$Rd),
76 (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
82 multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode,
83 string asmop, SDPatternOperator opnode,
86 let isCommutable = Commutable in {
87 def _4H : NeonI_3VSame<0b0, u, 0b01, opcode,
88 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
89 asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h",
90 [(set (v4i16 VPR64:$Rd),
91 (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))],
94 def _8H : NeonI_3VSame<0b1, u, 0b01, opcode,
95 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
96 asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h",
97 [(set (v8i16 VPR128:$Rd),
98 (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))],
101 def _2S : NeonI_3VSame<0b0, u, 0b10, opcode,
102 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
103 asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
104 [(set (v2i32 VPR64:$Rd),
105 (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))],
108 def _4S : NeonI_3VSame<0b1, u, 0b10, opcode,
109 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
110 asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
111 [(set (v4i32 VPR128:$Rd),
112 (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))],
116 multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode,
117 string asmop, SDPatternOperator opnode,
119 : NeonI_3VSame_HS_sizes<u, opcode, asmop, opnode, Commutable>
121 let isCommutable = Commutable in {
122 def _8B : NeonI_3VSame<0b0, u, 0b00, opcode,
123 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
124 asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
125 [(set (v8i8 VPR64:$Rd),
126 (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
129 def _16B : NeonI_3VSame<0b1, u, 0b00, opcode,
130 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
131 asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
132 [(set (v16i8 VPR128:$Rd),
133 (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
138 multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode,
139 string asmop, SDPatternOperator opnode,
141 : NeonI_3VSame_BHS_sizes<u, opcode, asmop, opnode, Commutable>
143 let isCommutable = Commutable in {
144 def _2D : NeonI_3VSame<0b1, u, 0b11, opcode,
145 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
146 asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
147 [(set (v2i64 VPR128:$Rd),
148 (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))],
153 // Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types,
154 // but Result types can be integer or floating point types.
155 multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode,
156 string asmop, SDPatternOperator opnode2S,
157 SDPatternOperator opnode4S,
158 SDPatternOperator opnode2D,
159 ValueType ResTy2S, ValueType ResTy4S,
160 ValueType ResTy2D, bit Commutable = 0>
162 let isCommutable = Commutable in {
163 def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode,
164 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
165 asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
166 [(set (ResTy2S VPR64:$Rd),
167 (ResTy2S (opnode2S (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))],
170 def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode,
171 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
172 asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
173 [(set (ResTy4S VPR128:$Rd),
174 (ResTy4S (opnode4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))],
177 def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode,
178 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
179 asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
180 [(set (ResTy2D VPR128:$Rd),
181 (ResTy2D (opnode2D (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
186 //===----------------------------------------------------------------------===//
187 // Instruction Definitions
188 //===----------------------------------------------------------------------===//
190 // Vector Arithmetic Instructions
192 // Vector Add (Integer and Floating-Point)
194 defm ADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>;
195 defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd, fadd, fadd,
196 v2f32, v4f32, v2f64, 1>;
198 // Vector Sub (Integer and Floating-Point)
200 defm SUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>;
201 defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub, fsub, fsub,
202 v2f32, v4f32, v2f64, 0>;
204 // Vector Multiply (Integer and Floating-Point)
206 defm MULvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>;
207 defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul, fmul, fmul,
208 v2f32, v4f32, v2f64, 1>;
210 // Vector Multiply (Polynomial)
212 defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
213 int_arm_neon_vmulp, int_arm_neon_vmulp, 1>;
215 // Vector Multiply-accumulate and Multiply-subtract (Integer)
217 // class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
218 // two operands constraints.
219 class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
220 RegisterOperand VPRC, ValueType OpTy, bit q, bit u, bits<2> size,
221 bits<5> opcode, SDPatternOperator opnode>
222 : NeonI_3VSame<q, u, size, opcode,
223 (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
224 asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
225 [(set (OpTy VPRC:$Rd),
226 (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))],
228 let Constraints = "$src = $Rd";
231 def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
232 (add node:$Ra, (mul node:$Rn, node:$Rm))>;
234 def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
235 (sub node:$Ra, (mul node:$Rn, node:$Rm))>;
238 def MLAvvv_8B: NeonI_3VSame_Constraint_impl<"mla", ".8b", VPR64, v8i8,
239 0b0, 0b0, 0b00, 0b10010, Neon_mla>;
240 def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8,
241 0b1, 0b0, 0b00, 0b10010, Neon_mla>;
242 def MLAvvv_4H: NeonI_3VSame_Constraint_impl<"mla", ".4h", VPR64, v4i16,
243 0b0, 0b0, 0b01, 0b10010, Neon_mla>;
244 def MLAvvv_8H: NeonI_3VSame_Constraint_impl<"mla", ".8h", VPR128, v8i16,
245 0b1, 0b0, 0b01, 0b10010, Neon_mla>;
246 def MLAvvv_2S: NeonI_3VSame_Constraint_impl<"mla", ".2s", VPR64, v2i32,
247 0b0, 0b0, 0b10, 0b10010, Neon_mla>;
248 def MLAvvv_4S: NeonI_3VSame_Constraint_impl<"mla", ".4s", VPR128, v4i32,
249 0b1, 0b0, 0b10, 0b10010, Neon_mla>;
251 def MLSvvv_8B: NeonI_3VSame_Constraint_impl<"mls", ".8b", VPR64, v8i8,
252 0b0, 0b1, 0b00, 0b10010, Neon_mls>;
253 def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8,
254 0b1, 0b1, 0b00, 0b10010, Neon_mls>;
255 def MLSvvv_4H: NeonI_3VSame_Constraint_impl<"mls", ".4h", VPR64, v4i16,
256 0b0, 0b1, 0b01, 0b10010, Neon_mls>;
257 def MLSvvv_8H: NeonI_3VSame_Constraint_impl<"mls", ".8h", VPR128, v8i16,
258 0b1, 0b1, 0b01, 0b10010, Neon_mls>;
259 def MLSvvv_2S: NeonI_3VSame_Constraint_impl<"mls", ".2s", VPR64, v2i32,
260 0b0, 0b1, 0b10, 0b10010, Neon_mls>;
261 def MLSvvv_4S: NeonI_3VSame_Constraint_impl<"mls", ".4s", VPR128, v4i32,
262 0b1, 0b1, 0b10, 0b10010, Neon_mls>;
264 // Vector Multiply-accumulate and Multiply-subtract (Floating Point)
266 def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
267 (fadd node:$Ra, (fmul node:$Rn, node:$Rm))>;
269 def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
270 (fsub node:$Ra, (fmul node:$Rn, node:$Rm))>;
272 let Predicates = [HasNEON, UseFusedMAC] in {
273 def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s", VPR64, v2f32,
274 0b0, 0b0, 0b00, 0b11001, Neon_fmla>;
275 def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s", VPR128, v4f32,
276 0b1, 0b0, 0b00, 0b11001, Neon_fmla>;
277 def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d", VPR128, v2f64,
278 0b1, 0b0, 0b01, 0b11001, Neon_fmla>;
280 def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s", VPR64, v2f32,
281 0b0, 0b0, 0b10, 0b11001, Neon_fmls>;
282 def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s", VPR128, v4f32,
283 0b1, 0b0, 0b10, 0b11001, Neon_fmls>;
284 def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d", VPR128, v2f64,
285 0b1, 0b0, 0b11, 0b11001, Neon_fmls>;
288 // We're also allowed to match the fma instruction regardless of compile
290 def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)),
291 (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
292 def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
293 (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
294 def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
295 (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
297 def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)),
298 (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
299 def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
300 (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
301 def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
302 (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
304 // Vector Divide (Floating-Point)
306 defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv, fdiv, fdiv,
307 v2f32, v4f32, v2f64, 0>;
309 // Vector Bitwise Operations
311 // Vector Bitwise AND
313 defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>;
315 // Vector Bitwise Exclusive OR
317 defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>;
321 defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
323 // ORR disassembled as MOV if Vn==Vm
325 // Vector Move - register
326 // Alias for ORR if Vn=Vm.
327 // FIXME: This is actually the preferred syntax but TableGen can't deal with
328 // custom printing of aliases.
329 def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
330 (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn), 0>;
331 def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
332 (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn), 0>;
334 def Neon_immAllOnes: PatLeaf<(Neon_movi (i32 timm), (i32 imm)), [{
335 ConstantSDNode *ImmConstVal = cast<ConstantSDNode>(N->getOperand(0));
336 ConstantSDNode *OpCmodeConstVal = cast<ConstantSDNode>(N->getOperand(1));
338 uint64_t EltVal = A64Imms::decodeNeonModImm(ImmConstVal->getZExtValue(),
339 OpCmodeConstVal->getZExtValue(), EltBits);
340 return (EltBits == 8 && EltVal == 0xff);
344 def Neon_not8B : PatFrag<(ops node:$in),
345 (xor node:$in, (bitconvert (v8i8 Neon_immAllOnes)))>;
346 def Neon_not16B : PatFrag<(ops node:$in),
347 (xor node:$in, (bitconvert (v16i8 Neon_immAllOnes)))>;
349 def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm),
350 (or node:$Rn, (Neon_not8B node:$Rm))>;
352 def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm),
353 (or node:$Rn, (Neon_not16B node:$Rm))>;
355 def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm),
356 (and node:$Rn, (Neon_not8B node:$Rm))>;
358 def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm),
359 (and node:$Rn, (Neon_not16B node:$Rm))>;
362 // Vector Bitwise OR NOT - register
364 defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn",
365 Neon_orn8B, Neon_orn16B, 0>;
367 // Vector Bitwise Bit Clear (AND NOT) - register
369 defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic",
370 Neon_bic8B, Neon_bic16B, 0>;
372 multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B,
373 SDPatternOperator opnode16B,
375 Instruction INST16B> {
376 def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)),
377 (INST8B VPR64:$Rn, VPR64:$Rm)>;
378 def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)),
379 (INST8B VPR64:$Rn, VPR64:$Rm)>;
380 def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)),
381 (INST8B VPR64:$Rn, VPR64:$Rm)>;
382 def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)),
383 (INST16B VPR128:$Rn, VPR128:$Rm)>;
384 def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)),
385 (INST16B VPR128:$Rn, VPR128:$Rm)>;
386 def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)),
387 (INST16B VPR128:$Rn, VPR128:$Rm)>;
390 // Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN
391 defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>;
392 defm : Neon_bitwise2V_patterns<or, or, ORRvvv_8B, ORRvvv_16B>;
393 defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>;
394 defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>;
395 defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>;
397 // Vector Bitwise Select
398 def BSLvvv_8B : NeonI_3VSame_Constraint_impl<"bsl", ".8b", VPR64, v8i8,
399 0b0, 0b1, 0b01, 0b00011, Neon_bsl>;
401 def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8,
402 0b1, 0b1, 0b01, 0b00011, Neon_bsl>;
404 multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode,
406 Instruction INST16B> {
407 // Disassociate type from instruction definition
408 def : Pat<(v2i32 (opnode VPR64:$src,VPR64:$Rn, VPR64:$Rm)),
409 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
410 def : Pat<(v4i16 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
411 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
412 def : Pat<(v1i64 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
413 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
414 def : Pat<(v4i32 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
415 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
416 def : Pat<(v8i16 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
417 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
418 def : Pat<(v2i64 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
419 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
421 // Allow to match BSL instruction pattern with non-constant operand
422 def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd),
423 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
424 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
425 def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd),
426 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
427 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
428 def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd),
429 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
430 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
431 def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd),
432 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
433 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
434 def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd),
435 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
436 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
437 def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd),
438 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
439 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
440 def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd),
441 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
442 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
443 def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd),
444 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
445 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
447 // Allow to match llvm.arm.* intrinsics.
448 def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src),
449 (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
450 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
451 def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src),
452 (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
453 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
454 def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src),
455 (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
456 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
457 def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src),
458 (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
459 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
460 def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src),
461 (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
462 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
463 def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src),
464 (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
465 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
466 def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src),
467 (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
468 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
469 def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src),
470 (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
471 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
472 def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src),
473 (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
474 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
475 def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src),
476 (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
477 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
478 def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
479 (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
480 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
483 // Additional patterns for bitwise instruction BSL
484 defm: Neon_bitwise3V_patterns<Neon_bsl, BSLvvv_8B, BSLvvv_16B>;
486 def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm),
487 (Neon_bsl node:$src, node:$Rn, node:$Rm),
488 [{ (void)N; return false; }]>;
490 // Vector Bitwise Insert if True
492 def BITvvv_8B : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64, v8i8,
493 0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
494 def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8,
495 0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
497 // Vector Bitwise Insert if False
499 def BIFvvv_8B : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64, v8i8,
500 0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
501 def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8,
502 0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
504 // Vector Absolute Difference and Accumulate (Signed, Unsigned)
506 def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
507 (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>;
508 def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
509 (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>;
511 // Vector Absolute Difference and Accumulate (Unsigned)
512 def UABAvvv_8B : NeonI_3VSame_Constraint_impl<"uaba", ".8b", VPR64, v8i8,
513 0b0, 0b1, 0b00, 0b01111, Neon_uaba>;
514 def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8,
515 0b1, 0b1, 0b00, 0b01111, Neon_uaba>;
516 def UABAvvv_4H : NeonI_3VSame_Constraint_impl<"uaba", ".4h", VPR64, v4i16,
517 0b0, 0b1, 0b01, 0b01111, Neon_uaba>;
518 def UABAvvv_8H : NeonI_3VSame_Constraint_impl<"uaba", ".8h", VPR128, v8i16,
519 0b1, 0b1, 0b01, 0b01111, Neon_uaba>;
520 def UABAvvv_2S : NeonI_3VSame_Constraint_impl<"uaba", ".2s", VPR64, v2i32,
521 0b0, 0b1, 0b10, 0b01111, Neon_uaba>;
522 def UABAvvv_4S : NeonI_3VSame_Constraint_impl<"uaba", ".4s", VPR128, v4i32,
523 0b1, 0b1, 0b10, 0b01111, Neon_uaba>;
525 // Vector Absolute Difference and Accumulate (Signed)
526 def SABAvvv_8B : NeonI_3VSame_Constraint_impl<"saba", ".8b", VPR64, v8i8,
527 0b0, 0b0, 0b00, 0b01111, Neon_saba>;
528 def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8,
529 0b1, 0b0, 0b00, 0b01111, Neon_saba>;
530 def SABAvvv_4H : NeonI_3VSame_Constraint_impl<"saba", ".4h", VPR64, v4i16,
531 0b0, 0b0, 0b01, 0b01111, Neon_saba>;
532 def SABAvvv_8H : NeonI_3VSame_Constraint_impl<"saba", ".8h", VPR128, v8i16,
533 0b1, 0b0, 0b01, 0b01111, Neon_saba>;
534 def SABAvvv_2S : NeonI_3VSame_Constraint_impl<"saba", ".2s", VPR64, v2i32,
535 0b0, 0b0, 0b10, 0b01111, Neon_saba>;
536 def SABAvvv_4S : NeonI_3VSame_Constraint_impl<"saba", ".4s", VPR128, v4i32,
537 0b1, 0b0, 0b10, 0b01111, Neon_saba>;
540 // Vector Absolute Difference (Signed, Unsigned)
541 defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>;
542 defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>;
544 // Vector Absolute Difference (Floating Point)
545 defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd",
546 int_arm_neon_vabds, int_arm_neon_vabds,
547 int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>;
549 // Vector Reciprocal Step (Floating Point)
550 defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps",
551 int_arm_neon_vrecps, int_arm_neon_vrecps,
553 v2f32, v4f32, v2f64, 0>;
555 // Vector Reciprocal Square Root Step (Floating Point)
556 defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts",
557 int_arm_neon_vrsqrts,
558 int_arm_neon_vrsqrts,
559 int_arm_neon_vrsqrts,
560 v2f32, v4f32, v2f64, 0>;
562 // Vector Comparisons
564 def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs),
565 (Neon_cmp node:$lhs, node:$rhs, SETEQ)>;
566 def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs),
567 (Neon_cmp node:$lhs, node:$rhs, SETUGE)>;
568 def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs),
569 (Neon_cmp node:$lhs, node:$rhs, SETGE)>;
570 def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs),
571 (Neon_cmp node:$lhs, node:$rhs, SETUGT)>;
572 def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
573 (Neon_cmp node:$lhs, node:$rhs, SETGT)>;
575 // NeonI_compare_aliases class: swaps register operands to implement
576 // comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
577 class NeonI_compare_aliases<string asmop, string asmlane,
578 Instruction inst, RegisterOperand VPRC>
579 : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
581 (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
583 // Vector Comparisons (Integer)
585 // Vector Compare Mask Equal (Integer)
586 let isCommutable =1 in {
587 defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>;
590 // Vector Compare Mask Higher or Same (Unsigned Integer)
591 defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>;
593 // Vector Compare Mask Greater Than or Equal (Integer)
594 defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>;
596 // Vector Compare Mask Higher (Unsigned Integer)
597 defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>;
599 // Vector Compare Mask Greater Than (Integer)
600 defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>;
602 // Vector Compare Mask Bitwise Test (Integer)
603 defm CMTSTvvv: NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>;
605 // Vector Compare Mask Less or Same (Unsigned Integer)
606 // CMLS is alias for CMHS with operands reversed.
607 def CMLSvvv_8B : NeonI_compare_aliases<"cmls", ".8b", CMHSvvv_8B, VPR64>;
608 def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>;
609 def CMLSvvv_4H : NeonI_compare_aliases<"cmls", ".4h", CMHSvvv_4H, VPR64>;
610 def CMLSvvv_8H : NeonI_compare_aliases<"cmls", ".8h", CMHSvvv_8H, VPR128>;
611 def CMLSvvv_2S : NeonI_compare_aliases<"cmls", ".2s", CMHSvvv_2S, VPR64>;
612 def CMLSvvv_4S : NeonI_compare_aliases<"cmls", ".4s", CMHSvvv_4S, VPR128>;
613 def CMLSvvv_2D : NeonI_compare_aliases<"cmls", ".2d", CMHSvvv_2D, VPR128>;
615 // Vector Compare Mask Less Than or Equal (Integer)
616 // CMLE is alias for CMGE with operands reversed.
617 def CMLEvvv_8B : NeonI_compare_aliases<"cmle", ".8b", CMGEvvv_8B, VPR64>;
618 def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>;
619 def CMLEvvv_4H : NeonI_compare_aliases<"cmle", ".4h", CMGEvvv_4H, VPR64>;
620 def CMLEvvv_8H : NeonI_compare_aliases<"cmle", ".8h", CMGEvvv_8H, VPR128>;
621 def CMLEvvv_2S : NeonI_compare_aliases<"cmle", ".2s", CMGEvvv_2S, VPR64>;
622 def CMLEvvv_4S : NeonI_compare_aliases<"cmle", ".4s", CMGEvvv_4S, VPR128>;
623 def CMLEvvv_2D : NeonI_compare_aliases<"cmle", ".2d", CMGEvvv_2D, VPR128>;
625 // Vector Compare Mask Lower (Unsigned Integer)
626 // CMLO is alias for CMHI with operands reversed.
627 def CMLOvvv_8B : NeonI_compare_aliases<"cmlo", ".8b", CMHIvvv_8B, VPR64>;
628 def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>;
629 def CMLOvvv_4H : NeonI_compare_aliases<"cmlo", ".4h", CMHIvvv_4H, VPR64>;
630 def CMLOvvv_8H : NeonI_compare_aliases<"cmlo", ".8h", CMHIvvv_8H, VPR128>;
631 def CMLOvvv_2S : NeonI_compare_aliases<"cmlo", ".2s", CMHIvvv_2S, VPR64>;
632 def CMLOvvv_4S : NeonI_compare_aliases<"cmlo", ".4s", CMHIvvv_4S, VPR128>;
633 def CMLOvvv_2D : NeonI_compare_aliases<"cmlo", ".2d", CMHIvvv_2D, VPR128>;
635 // Vector Compare Mask Less Than (Integer)
636 // CMLT is alias for CMGT with operands reversed.
637 def CMLTvvv_8B : NeonI_compare_aliases<"cmlt", ".8b", CMGTvvv_8B, VPR64>;
638 def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>;
639 def CMLTvvv_4H : NeonI_compare_aliases<"cmlt", ".4h", CMGTvvv_4H, VPR64>;
640 def CMLTvvv_8H : NeonI_compare_aliases<"cmlt", ".8h", CMGTvvv_8H, VPR128>;
641 def CMLTvvv_2S : NeonI_compare_aliases<"cmlt", ".2s", CMGTvvv_2S, VPR64>;
642 def CMLTvvv_4S : NeonI_compare_aliases<"cmlt", ".4s", CMGTvvv_4S, VPR128>;
643 def CMLTvvv_2D : NeonI_compare_aliases<"cmlt", ".2d", CMGTvvv_2D, VPR128>;
646 def neon_uimm0_asmoperand : AsmOperandClass
649 let PredicateMethod = "isUImm<0>";
650 let RenderMethod = "addImmOperands";
653 def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> {
654 let ParserMatchClass = neon_uimm0_asmoperand;
655 let PrintMethod = "printNeonUImm0Operand";
659 multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC>
661 def _8B : NeonI_2VMisc<0b0, u, 0b00, opcode,
662 (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
663 asmop # "\t$Rd.8b, $Rn.8b, $Imm",
664 [(set (v8i8 VPR64:$Rd),
665 (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))],
668 def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode,
669 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
670 asmop # "\t$Rd.16b, $Rn.16b, $Imm",
671 [(set (v16i8 VPR128:$Rd),
672 (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))],
675 def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode,
676 (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
677 asmop # "\t$Rd.4h, $Rn.4h, $Imm",
678 [(set (v4i16 VPR64:$Rd),
679 (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))],
682 def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode,
683 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
684 asmop # "\t$Rd.8h, $Rn.8h, $Imm",
685 [(set (v8i16 VPR128:$Rd),
686 (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))],
689 def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode,
690 (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
691 asmop # "\t$Rd.2s, $Rn.2s, $Imm",
692 [(set (v2i32 VPR64:$Rd),
693 (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))],
696 def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode,
697 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
698 asmop # "\t$Rd.4s, $Rn.4s, $Imm",
699 [(set (v4i32 VPR128:$Rd),
700 (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))],
703 def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode,
704 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
705 asmop # "\t$Rd.2d, $Rn.2d, $Imm",
706 [(set (v2i64 VPR128:$Rd),
707 (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))],
711 // Vector Compare Mask Equal to Zero (Integer)
712 defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>;
714 // Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
715 defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>;
717 // Vector Compare Mask Greater Than Zero (Signed Integer)
718 defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>;
720 // Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
721 defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>;
723 // Vector Compare Mask Less Than Zero (Signed Integer)
724 defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>;
726 // Vector Comparisons (Floating Point)
728 // Vector Compare Mask Equal (Floating Point)
729 let isCommutable =1 in {
730 defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq,
731 Neon_cmeq, Neon_cmeq,
732 v2i32, v4i32, v2i64, 0>;
735 // Vector Compare Mask Greater Than Or Equal (Floating Point)
736 defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge,
737 Neon_cmge, Neon_cmge,
738 v2i32, v4i32, v2i64, 0>;
740 // Vector Compare Mask Greater Than (Floating Point)
741 defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt,
742 Neon_cmgt, Neon_cmgt,
743 v2i32, v4i32, v2i64, 0>;
745 // Vector Compare Mask Less Than Or Equal (Floating Point)
746 // FCMLE is alias for FCMGE with operands reversed.
747 def FCMLEvvv_2S : NeonI_compare_aliases<"fcmle", ".2s", FCMGEvvv_2S, VPR64>;
748 def FCMLEvvv_4S : NeonI_compare_aliases<"fcmle", ".4s", FCMGEvvv_4S, VPR128>;
749 def FCMLEvvv_2D : NeonI_compare_aliases<"fcmle", ".2d", FCMGEvvv_2D, VPR128>;
751 // Vector Compare Mask Less Than (Floating Point)
752 // FCMLT is alias for FCMGT with operands reversed.
753 def FCMLTvvv_2S : NeonI_compare_aliases<"fcmlt", ".2s", FCMGTvvv_2S, VPR64>;
754 def FCMLTvvv_4S : NeonI_compare_aliases<"fcmlt", ".4s", FCMGTvvv_4S, VPR128>;
755 def FCMLTvvv_2D : NeonI_compare_aliases<"fcmlt", ".2d", FCMGTvvv_2D, VPR128>;
758 multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode,
759 string asmop, CondCode CC>
761 def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode,
762 (outs VPR64:$Rd), (ins VPR64:$Rn, fpz32:$FPImm),
763 asmop # "\t$Rd.2s, $Rn.2s, $FPImm",
764 [(set (v2i32 VPR64:$Rd),
765 (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpimm:$FPImm), CC)))],
768 def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode,
769 (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
770 asmop # "\t$Rd.4s, $Rn.4s, $FPImm",
771 [(set (v4i32 VPR128:$Rd),
772 (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
775 def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode,
776 (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
777 asmop # "\t$Rd.2d, $Rn.2d, $FPImm",
778 [(set (v2i64 VPR128:$Rd),
779 (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
783 // Vector Compare Mask Equal to Zero (Floating Point)
784 defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>;
786 // Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
787 defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>;
789 // Vector Compare Mask Greater Than Zero (Floating Point)
790 defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>;
792 // Vector Compare Mask Less Than or Equal To Zero (Floating Point)
793 defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>;
795 // Vector Compare Mask Less Than Zero (Floating Point)
796 defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>;
798 // Vector Absolute Comparisons (Floating Point)
800 // Vector Absolute Compare Mask Greater Than Or Equal (Floating Point)
801 defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge",
802 int_arm_neon_vacged, int_arm_neon_vacgeq,
803 int_aarch64_neon_vacgeq,
804 v2i32, v4i32, v2i64, 0>;
806 // Vector Absolute Compare Mask Greater Than (Floating Point)
807 defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt",
808 int_arm_neon_vacgtd, int_arm_neon_vacgtq,
809 int_aarch64_neon_vacgtq,
810 v2i32, v4i32, v2i64, 0>;
812 // Vector Absolute Compare Mask Less Than Or Equal (Floating Point)
813 // FACLE is alias for FACGE with operands reversed.
814 def FACLEvvv_2S : NeonI_compare_aliases<"facle", ".2s", FACGEvvv_2S, VPR64>;
815 def FACLEvvv_4S : NeonI_compare_aliases<"facle", ".4s", FACGEvvv_4S, VPR128>;
816 def FACLEvvv_2D : NeonI_compare_aliases<"facle", ".2d", FACGEvvv_2D, VPR128>;
818 // Vector Absolute Compare Mask Less Than (Floating Point)
819 // FACLT is alias for FACGT with operands reversed.
820 def FACLTvvv_2S : NeonI_compare_aliases<"faclt", ".2s", FACGTvvv_2S, VPR64>;
821 def FACLTvvv_4S : NeonI_compare_aliases<"faclt", ".4s", FACGTvvv_4S, VPR128>;
822 def FACLTvvv_2D : NeonI_compare_aliases<"faclt", ".2d", FACGTvvv_2D, VPR128>;
824 // Vector halving add (Integer Signed, Unsigned)
825 defm SHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd",
826 int_arm_neon_vhadds, 1>;
827 defm UHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd",
828 int_arm_neon_vhaddu, 1>;
830 // Vector halving sub (Integer Signed, Unsigned)
831 defm SHSUBvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub",
832 int_arm_neon_vhsubs, 0>;
833 defm UHSUBvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub",
834 int_arm_neon_vhsubu, 0>;
836 // Vector rouding halving add (Integer Signed, Unsigned)
837 defm SRHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd",
838 int_arm_neon_vrhadds, 1>;
839 defm URHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd",
840 int_arm_neon_vrhaddu, 1>;
842 // Vector Saturating add (Integer Signed, Unsigned)
843 defm SQADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd",
844 int_arm_neon_vqadds, 1>;
845 defm UQADDvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd",
846 int_arm_neon_vqaddu, 1>;
848 // Vector Saturating sub (Integer Signed, Unsigned)
849 defm SQSUBvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub",
850 int_arm_neon_vqsubs, 1>;
851 defm UQSUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub",
852 int_arm_neon_vqsubu, 1>;
854 // Vector Shift Left (Signed and Unsigned Integer)
855 defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl",
856 int_arm_neon_vshifts, 1>;
857 defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl",
858 int_arm_neon_vshiftu, 1>;
860 // Vector Saturating Shift Left (Signed and Unsigned Integer)
861 defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl",
862 int_arm_neon_vqshifts, 1>;
863 defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl",
864 int_arm_neon_vqshiftu, 1>;
866 // Vector Rouding Shift Left (Signed and Unsigned Integer)
867 defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl",
868 int_arm_neon_vrshifts, 1>;
869 defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl",
870 int_arm_neon_vrshiftu, 1>;
872 // Vector Saturating Rouding Shift Left (Signed and Unsigned Integer)
873 defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl",
874 int_arm_neon_vqrshifts, 1>;
875 defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl",
876 int_arm_neon_vqrshiftu, 1>;
878 // Vector Maximum (Signed and Unsigned Integer)
879 defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>;
880 defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>;
882 // Vector Minimum (Signed and Unsigned Integer)
883 defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>;
884 defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>;
886 // Vector Maximum (Floating Point)
887 defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax",
888 int_arm_neon_vmaxs, int_arm_neon_vmaxs,
889 int_arm_neon_vmaxs, v2f32, v4f32, v2f64, 1>;
891 // Vector Minimum (Floating Point)
892 defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin",
893 int_arm_neon_vmins, int_arm_neon_vmins,
894 int_arm_neon_vmins, v2f32, v4f32, v2f64, 1>;
896 // Vector maxNum (Floating Point) - prefer a number over a quiet NaN)
897 defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm",
898 int_aarch64_neon_vmaxnm,
899 int_aarch64_neon_vmaxnm,
900 int_aarch64_neon_vmaxnm,
901 v2f32, v4f32, v2f64, 1>;
903 // Vector minNum (Floating Point) - prefer a number over a quiet NaN)
904 defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm",
905 int_aarch64_neon_vminnm,
906 int_aarch64_neon_vminnm,
907 int_aarch64_neon_vminnm,
908 v2f32, v4f32, v2f64, 1>;
910 // Vector Maximum Pairwise (Signed and Unsigned Integer)
911 defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>;
912 defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>;
914 // Vector Minimum Pairwise (Signed and Unsigned Integer)
915 defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>;
916 defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>;
918 // Vector Maximum Pairwise (Floating Point)
919 defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp",
920 int_arm_neon_vpmaxs, int_arm_neon_vpmaxs,
921 int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>;
923 // Vector Minimum Pairwise (Floating Point)
924 defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp",
925 int_arm_neon_vpmins, int_arm_neon_vpmins,
926 int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>;
928 // Vector maxNum Pairwise (Floating Point) - prefer a number over a quiet NaN)
929 defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp",
930 int_aarch64_neon_vpmaxnm,
931 int_aarch64_neon_vpmaxnm,
932 int_aarch64_neon_vpmaxnm,
933 v2f32, v4f32, v2f64, 1>;
935 // Vector minNum Pairwise (Floating Point) - prefer a number over a quiet NaN)
936 defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp",
937 int_aarch64_neon_vpminnm,
938 int_aarch64_neon_vpminnm,
939 int_aarch64_neon_vpminnm,
940 v2f32, v4f32, v2f64, 1>;
942 // Vector Addition Pairwise (Integer)
943 defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>;
945 // Vector Addition Pairwise (Floating Point)
946 defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp",
950 v2f32, v4f32, v2f64, 1>;
952 // Vector Saturating Doubling Multiply High
953 defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh",
954 int_arm_neon_vqdmulh, 1>;
956 // Vector Saturating Rouding Doubling Multiply High
957 defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh",
958 int_arm_neon_vqrdmulh, 1>;
960 // Vector Multiply Extended (Floating Point)
961 defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx",
962 int_aarch64_neon_vmulx,
963 int_aarch64_neon_vmulx,
964 int_aarch64_neon_vmulx,
965 v2f32, v4f32, v2f64, 1>;
967 // Vector Immediate Instructions
969 multiclass neon_mov_imm_shift_asmoperands<string PREFIX>
971 def _asmoperand : AsmOperandClass
973 let Name = "NeonMovImmShift" # PREFIX;
974 let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands";
975 let PredicateMethod = "isNeonMovImmShift" # PREFIX;
979 // Definition of vector immediates shift operands
981 // The selectable use-cases extract the shift operation
982 // information from the OpCmode fields encoded in the immediate.
983 def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{
984 uint64_t OpCmode = N->getZExtValue();
986 unsigned ShiftOnesIn;
988 A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
989 if (!HasShift) return SDValue();
990 return CurDAG->getTargetConstant(ShiftImm, MVT::i32);
993 // Vector immediates shift operands which accept LSL and MSL
994 // shift operators with shift value in the range of 0, 8, 16, 24 (LSL),
995 // or 0, 8 (LSLH) or 8, 16 (MSL).
996 defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">;
997 defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">;
998 // LSLH restricts shift amount to 0, 8 out of 0, 8, 16, 24
999 defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">;
1001 multiclass neon_mov_imm_shift_operands<string PREFIX,
1002 string HALF, string ISHALF, code pred>
1004 def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM>
1007 "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1009 "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1010 let ParserMatchClass =
1011 !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand");
1015 defm neon_mov_imm_LSL : neon_mov_imm_shift_operands<"LSL", "", "false", [{
1017 unsigned ShiftOnesIn;
1019 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1020 return (HasShift && !ShiftOnesIn);
1023 defm neon_mov_imm_MSL : neon_mov_imm_shift_operands<"MSL", "", "false", [{
1025 unsigned ShiftOnesIn;
1027 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1028 return (HasShift && ShiftOnesIn);
1031 defm neon_mov_imm_LSLH : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
1033 unsigned ShiftOnesIn;
1035 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1036 return (HasShift && !ShiftOnesIn);
1039 def neon_uimm1_asmoperand : AsmOperandClass
1042 let PredicateMethod = "isUImm<1>";
1043 let RenderMethod = "addImmOperands";
1046 def neon_uimm2_asmoperand : AsmOperandClass
1049 let PredicateMethod = "isUImm<2>";
1050 let RenderMethod = "addImmOperands";
1053 def neon_uimm8_asmoperand : AsmOperandClass
1056 let PredicateMethod = "isUImm<8>";
1057 let RenderMethod = "addImmOperands";
1060 def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1061 let ParserMatchClass = neon_uimm8_asmoperand;
1062 let PrintMethod = "printNeonUImm8Operand";
1065 def neon_uimm64_mask_asmoperand : AsmOperandClass
1067 let Name = "NeonUImm64Mask";
1068 let PredicateMethod = "isNeonUImm64Mask";
1069 let RenderMethod = "addNeonUImm64MaskOperands";
1072 // MCOperand for 64-bit bytemask with each byte having only the
1073 // value 0x00 and 0xff is encoded as an unsigned 8-bit value
1074 def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1075 let ParserMatchClass = neon_uimm64_mask_asmoperand;
1076 let PrintMethod = "printNeonUImm64MaskOperand";
1079 multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op,
1080 SDPatternOperator opnode>
1082 // shift zeros, per word
1083 def _2S : NeonI_1VModImm<0b0, op,
1085 (ins neon_uimm8:$Imm,
1086 neon_mov_imm_LSL_operand:$Simm),
1087 !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1088 [(set (v2i32 VPR64:$Rd),
1089 (v2i32 (opnode (timm:$Imm),
1090 (neon_mov_imm_LSL_operand:$Simm))))],
1093 let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1096 def _4S : NeonI_1VModImm<0b1, op,
1098 (ins neon_uimm8:$Imm,
1099 neon_mov_imm_LSL_operand:$Simm),
1100 !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1101 [(set (v4i32 VPR128:$Rd),
1102 (v4i32 (opnode (timm:$Imm),
1103 (neon_mov_imm_LSL_operand:$Simm))))],
1106 let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1109 // shift zeros, per halfword
1110 def _4H : NeonI_1VModImm<0b0, op,
1112 (ins neon_uimm8:$Imm,
1113 neon_mov_imm_LSLH_operand:$Simm),
1114 !strconcat(asmop, " $Rd.4h, $Imm$Simm"),
1115 [(set (v4i16 VPR64:$Rd),
1116 (v4i16 (opnode (timm:$Imm),
1117 (neon_mov_imm_LSLH_operand:$Simm))))],
1120 let cmode = {0b1, 0b0, Simm, 0b0};
1123 def _8H : NeonI_1VModImm<0b1, op,
1125 (ins neon_uimm8:$Imm,
1126 neon_mov_imm_LSLH_operand:$Simm),
1127 !strconcat(asmop, " $Rd.8h, $Imm$Simm"),
1128 [(set (v8i16 VPR128:$Rd),
1129 (v8i16 (opnode (timm:$Imm),
1130 (neon_mov_imm_LSLH_operand:$Simm))))],
1133 let cmode = {0b1, 0b0, Simm, 0b0};
1137 multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op,
1138 SDPatternOperator opnode,
1139 SDPatternOperator neonopnode>
1141 let Constraints = "$src = $Rd" in {
1142 // shift zeros, per word
1143 def _2S : NeonI_1VModImm<0b0, op,
1145 (ins VPR64:$src, neon_uimm8:$Imm,
1146 neon_mov_imm_LSL_operand:$Simm),
1147 !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1148 [(set (v2i32 VPR64:$Rd),
1149 (v2i32 (opnode (v2i32 VPR64:$src),
1150 (v2i32 (bitconvert (v2i32 (neonopnode timm:$Imm,
1151 neon_mov_imm_LSL_operand:$Simm)))))))],
1154 let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1157 def _4S : NeonI_1VModImm<0b1, op,
1159 (ins VPR128:$src, neon_uimm8:$Imm,
1160 neon_mov_imm_LSL_operand:$Simm),
1161 !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1162 [(set (v4i32 VPR128:$Rd),
1163 (v4i32 (opnode (v4i32 VPR128:$src),
1164 (v4i32 (bitconvert (v4i32 (neonopnode timm:$Imm,
1165 neon_mov_imm_LSL_operand:$Simm)))))))],
1168 let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1171 // shift zeros, per halfword
1172 def _4H : NeonI_1VModImm<0b0, op,
1174 (ins VPR64:$src, neon_uimm8:$Imm,
1175 neon_mov_imm_LSLH_operand:$Simm),
1176 !strconcat(asmop, " $Rd.4h, $Imm$Simm"),
1177 [(set (v4i16 VPR64:$Rd),
1178 (v4i16 (opnode (v4i16 VPR64:$src),
1179 (v4i16 (bitconvert (v4i16 (neonopnode timm:$Imm,
1180 neon_mov_imm_LSL_operand:$Simm)))))))],
1183 let cmode = {0b1, 0b0, Simm, 0b1};
1186 def _8H : NeonI_1VModImm<0b1, op,
1188 (ins VPR128:$src, neon_uimm8:$Imm,
1189 neon_mov_imm_LSLH_operand:$Simm),
1190 !strconcat(asmop, " $Rd.8h, $Imm$Simm"),
1191 [(set (v8i16 VPR128:$Rd),
1192 (v8i16 (opnode (v8i16 VPR128:$src),
1193 (v8i16 (bitconvert (v8i16 (neonopnode timm:$Imm,
1194 neon_mov_imm_LSL_operand:$Simm)))))))],
1197 let cmode = {0b1, 0b0, Simm, 0b1};
1202 multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op,
1203 SDPatternOperator opnode>
1205 // shift ones, per word
1206 def _2S : NeonI_1VModImm<0b0, op,
1208 (ins neon_uimm8:$Imm,
1209 neon_mov_imm_MSL_operand:$Simm),
1210 !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1211 [(set (v2i32 VPR64:$Rd),
1212 (v2i32 (opnode (timm:$Imm),
1213 (neon_mov_imm_MSL_operand:$Simm))))],
1216 let cmode = {0b1, 0b1, 0b0, Simm};
1219 def _4S : NeonI_1VModImm<0b1, op,
1221 (ins neon_uimm8:$Imm,
1222 neon_mov_imm_MSL_operand:$Simm),
1223 !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1224 [(set (v4i32 VPR128:$Rd),
1225 (v4i32 (opnode (timm:$Imm),
1226 (neon_mov_imm_MSL_operand:$Simm))))],
1229 let cmode = {0b1, 0b1, 0b0, Simm};
1233 // Vector Move Immediate Shifted
1234 let isReMaterializable = 1 in {
1235 defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>;
1238 // Vector Move Inverted Immediate Shifted
1239 let isReMaterializable = 1 in {
1240 defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>;
1243 // Vector Bitwise Bit Clear (AND NOT) - immediate
1244 let isReMaterializable = 1 in {
1245 defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1,
1249 // Vector Bitwise OR - immedidate
1251 let isReMaterializable = 1 in {
1252 defm ORRvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0,
1256 // Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate
1257 // LowerBUILD_VECTOR favors lowering MOVI over MVNI.
1258 // BIC immediate instructions selection requires additional patterns to
1259 // transform Neon_movi operands into BIC immediate operands
1261 def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{
1262 uint64_t OpCmode = N->getZExtValue();
1264 unsigned ShiftOnesIn;
1265 (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
1266 // LSLH restricts shift amount to 0, 8 which are encoded as 0 and 1
1267 // Transform encoded shift amount 0 to 1 and 1 to 0.
1268 return CurDAG->getTargetConstant(!ShiftImm, MVT::i32);
1271 def neon_mov_imm_LSLH_transform_operand
1274 unsigned ShiftOnesIn;
1276 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1277 return (HasShift && !ShiftOnesIn); }],
1278 neon_mov_imm_LSLH_transform_XFORM>;
1280 // Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0x00, LSL 8)
1281 // Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0x00)
1282 def : Pat<(v4i16 (and VPR64:$src,
1283 (v4i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1284 (BICvi_lsl_4H VPR64:$src, 0,
1285 neon_mov_imm_LSLH_transform_operand:$Simm)>;
1287 // Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0x00, LSL 8)
1288 // Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0x00)
1289 def : Pat<(v8i16 (and VPR128:$src,
1290 (v8i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1291 (BICvi_lsl_8H VPR128:$src, 0,
1292 neon_mov_imm_LSLH_transform_operand:$Simm)>;
1295 multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
1296 SDPatternOperator neonopnode,
1298 Instruction INST8H> {
1299 def : Pat<(v8i8 (opnode VPR64:$src,
1300 (bitconvert(v4i16 (neonopnode timm:$Imm,
1301 neon_mov_imm_LSLH_operand:$Simm))))),
1302 (INST4H VPR64:$src, neon_uimm8:$Imm,
1303 neon_mov_imm_LSLH_operand:$Simm)>;
1304 def : Pat<(v1i64 (opnode VPR64:$src,
1305 (bitconvert(v4i16 (neonopnode timm:$Imm,
1306 neon_mov_imm_LSLH_operand:$Simm))))),
1307 (INST4H VPR64:$src, neon_uimm8:$Imm,
1308 neon_mov_imm_LSLH_operand:$Simm)>;
1310 def : Pat<(v16i8 (opnode VPR128:$src,
1311 (bitconvert(v8i16 (neonopnode timm:$Imm,
1312 neon_mov_imm_LSLH_operand:$Simm))))),
1313 (INST8H VPR128:$src, neon_uimm8:$Imm,
1314 neon_mov_imm_LSLH_operand:$Simm)>;
1315 def : Pat<(v4i32 (opnode VPR128:$src,
1316 (bitconvert(v8i16 (neonopnode timm:$Imm,
1317 neon_mov_imm_LSLH_operand:$Simm))))),
1318 (INST8H VPR128:$src, neon_uimm8:$Imm,
1319 neon_mov_imm_LSLH_operand:$Simm)>;
1320 def : Pat<(v2i64 (opnode VPR128:$src,
1321 (bitconvert(v8i16 (neonopnode timm:$Imm,
1322 neon_mov_imm_LSLH_operand:$Simm))))),
1323 (INST8H VPR128:$src, neon_uimm8:$Imm,
1324 neon_mov_imm_LSLH_operand:$Simm)>;
1327 // Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
1328 defm : Neon_bitwiseVi_patterns<or, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H>;
1330 // Additional patterns for Vector Bitwise OR - immedidate
1331 defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H>;
1334 // Vector Move Immediate Masked
1335 let isReMaterializable = 1 in {
1336 defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>;
1339 // Vector Move Inverted Immediate Masked
1340 let isReMaterializable = 1 in {
1341 defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
1344 class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
1345 Instruction inst, RegisterOperand VPRC>
1346 : NeonInstAlias<!strconcat(asmop, " $Rd," # asmlane # ", $Imm"),
1347 (inst VPRC:$Rd, neon_uimm8:$Imm, 0), 0b0>;
1349 // Aliases for Vector Move Immediate Shifted
1350 def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>;
1351 def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>;
1352 def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>;
1353 def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>;
1355 // Aliases for Vector Move Inverted Immediate Shifted
1356 def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>;
1357 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>;
1358 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>;
1359 def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>;
1361 // Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate
1362 def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>;
1363 def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>;
1364 def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>;
1365 def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>;
1367 // Aliases for Vector Bitwise OR - immedidate
1368 def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>;
1369 def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>;
1370 def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>;
1371 def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>;
1373 // Vector Move Immediate - per byte
1374 let isReMaterializable = 1 in {
1375 def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0,
1376 (outs VPR64:$Rd), (ins neon_uimm8:$Imm),
1377 "movi\t$Rd.8b, $Imm",
1378 [(set (v8i8 VPR64:$Rd),
1379 (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1384 def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0,
1385 (outs VPR128:$Rd), (ins neon_uimm8:$Imm),
1386 "movi\t$Rd.16b, $Imm",
1387 [(set (v16i8 VPR128:$Rd),
1388 (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1394 // Vector Move Immediate - bytemask, per double word
1395 let isReMaterializable = 1 in {
1396 def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1,
1397 (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm),
1398 "movi\t $Rd.2d, $Imm",
1399 [(set (v2i64 VPR128:$Rd),
1400 (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))],
1406 // Vector Move Immediate - bytemask, one doubleword
1408 let isReMaterializable = 1 in {
1409 def MOVIdi : NeonI_1VModImm<0b0, 0b1,
1410 (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm),
1412 [(set (f64 FPR64:$Rd),
1414 (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))))],
1420 // Vector Floating Point Move Immediate
1422 class NeonI_FMOV_impl<string asmlane, RegisterOperand VPRC, ValueType OpTy,
1423 Operand immOpType, bit q, bit op>
1424 : NeonI_1VModImm<q, op,
1425 (outs VPRC:$Rd), (ins immOpType:$Imm),
1426 "fmov\t$Rd" # asmlane # ", $Imm",
1427 [(set (OpTy VPRC:$Rd),
1428 (OpTy (Neon_fmovi (timm:$Imm))))],
1433 let isReMaterializable = 1 in {
1434 def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64, v2f32, fmov32_operand, 0b0, 0b0>;
1435 def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>;
1436 def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>;
1439 // Vector Shift (Immediate)
1440 // Immediate in [0, 63]
1441 def imm0_63 : Operand<i32> {
1442 let ParserMatchClass = uimm6_asmoperand;
1445 // Shift Right Immediate - A shift right immediate is encoded differently from
1446 // other shift immediates. The immh:immb field is encoded like so:
1449 // 8 immh:immb<6:3> = '0001xxx', <imm> is encoded in immh:immb<2:0>
1450 // 16 immh:immb<6:4> = '001xxxx', <imm> is encoded in immh:immb<3:0>
1451 // 32 immh:immb<6:5> = '01xxxxx', <imm> is encoded in immh:immb<4:0>
1452 // 64 immh:immb<6> = '1xxxxxx', <imm> is encoded in immh:immb<5:0>
1453 class shr_imm_asmoperands<string OFFSET> : AsmOperandClass {
1454 let Name = "ShrImm" # OFFSET;
1455 let RenderMethod = "addImmOperands";
1456 let DiagnosticType = "ShrImm" # OFFSET;
1459 class shr_imm<string OFFSET> : Operand<i32> {
1460 let EncoderMethod = "getShiftRightImm" # OFFSET;
1461 let DecoderMethod = "DecodeShiftRightImm" # OFFSET;
1462 let ParserMatchClass =
1463 !cast<AsmOperandClass>("shr_imm" # OFFSET # "_asmoperand");
1466 def shr_imm8_asmoperand : shr_imm_asmoperands<"8">;
1467 def shr_imm16_asmoperand : shr_imm_asmoperands<"16">;
1468 def shr_imm32_asmoperand : shr_imm_asmoperands<"32">;
1469 def shr_imm64_asmoperand : shr_imm_asmoperands<"64">;
1471 def shr_imm8 : shr_imm<"8">;
1472 def shr_imm16 : shr_imm<"16">;
1473 def shr_imm32 : shr_imm<"32">;
1474 def shr_imm64 : shr_imm<"64">;
1476 class N2VShift<bit q, bit u, bits<5> opcode, string asmop, string T,
1477 RegisterOperand VPRC, ValueType Ty, Operand ImmTy, SDNode OpNode>
1478 : NeonI_2VShiftImm<q, u, opcode,
1479 (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1480 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1481 [(set (Ty VPRC:$Rd),
1482 (Ty (OpNode (Ty VPRC:$Rn),
1483 (Ty (Neon_dupImm (i32 imm:$Imm))))))],
1486 multiclass NeonI_N2VShL<bit u, bits<5> opcode, string asmop> {
1487 // 64-bit vector types.
1488 def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3, shl> {
1489 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1492 def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4, shl> {
1493 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1496 def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5, shl> {
1497 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1500 // 128-bit vector types.
1501 def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3, shl> {
1502 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1505 def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4, shl> {
1506 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1509 def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5, shl> {
1510 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1513 def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63, shl> {
1514 let Inst{22} = 0b1; // immh:immb = 1xxxxxx
1518 multiclass NeonI_N2VShR<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
1519 def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1521 let Inst{22-19} = 0b0001;
1524 def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1526 let Inst{22-20} = 0b001;
1529 def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1531 let Inst{22-21} = 0b01;
1534 def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1536 let Inst{22-19} = 0b0001;
1539 def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1541 let Inst{22-20} = 0b001;
1544 def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1546 let Inst{22-21} = 0b01;
1549 def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1556 defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
1559 defm SSHRvvi : NeonI_N2VShR<0b0, 0b00000, "sshr", sra>;
1560 defm USHRvvi : NeonI_N2VShR<0b1, 0b00000, "ushr", srl>;
1562 def Neon_High16B : PatFrag<(ops node:$in),
1563 (extract_subvector (v16i8 node:$in), (iPTR 8))>;
1564 def Neon_High8H : PatFrag<(ops node:$in),
1565 (extract_subvector (v8i16 node:$in), (iPTR 4))>;
1566 def Neon_High4S : PatFrag<(ops node:$in),
1567 (extract_subvector (v4i32 node:$in), (iPTR 2))>;
1569 def Neon_low8H : PatFrag<(ops node:$in),
1570 (v4i16 (extract_subvector (v8i16 node:$in),
1572 def Neon_low4S : PatFrag<(ops node:$in),
1573 (v2i32 (extract_subvector (v4i32 node:$in),
1575 def Neon_low4f : PatFrag<(ops node:$in),
1576 (v2f32 (extract_subvector (v4f32 node:$in),
1579 class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1580 string SrcT, ValueType DestTy, ValueType SrcTy,
1581 Operand ImmTy, SDPatternOperator ExtOp>
1582 : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1583 (ins VPR64:$Rn, ImmTy:$Imm),
1584 asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1585 [(set (DestTy VPR128:$Rd),
1587 (DestTy (ExtOp (SrcTy VPR64:$Rn))),
1588 (DestTy (Neon_dupImm (i32 imm:$Imm))))))],
1591 class N2VShiftLongHigh<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1592 string SrcT, ValueType DestTy, ValueType SrcTy,
1593 int StartIndex, Operand ImmTy,
1594 SDPatternOperator ExtOp, PatFrag getTop>
1595 : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1596 (ins VPR128:$Rn, ImmTy:$Imm),
1597 asmop # "2\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1598 [(set (DestTy VPR128:$Rd),
1601 (SrcTy (getTop VPR128:$Rn)))),
1602 (DestTy (Neon_dupImm (i32 imm:$Imm))))))],
1605 multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
1607 // 64-bit vector types.
1608 def _8B : N2VShiftLong<0b0, u, opcode, asmop, "8h", "8b", v8i16, v8i8,
1610 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1613 def _4H : N2VShiftLong<0b0, u, opcode, asmop, "4s", "4h", v4i32, v4i16,
1615 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1618 def _2S : N2VShiftLong<0b0, u, opcode, asmop, "2d", "2s", v2i64, v2i32,
1620 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1623 // 128-bit vector types
1624 def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b",
1625 v8i16, v8i8, 8, uimm3, ExtOp, Neon_High16B> {
1626 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1629 def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h",
1630 v4i32, v4i16, 4, uimm4, ExtOp, Neon_High8H> {
1631 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1634 def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s",
1635 v2i64, v2i32, 2, uimm5, ExtOp, Neon_High4S> {
1636 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1639 // Use other patterns to match when the immediate is 0.
1640 def : Pat<(v8i16 (ExtOp (v8i8 VPR64:$Rn))),
1641 (!cast<Instruction>(prefix # "_8B") VPR64:$Rn, 0)>;
1643 def : Pat<(v4i32 (ExtOp (v4i16 VPR64:$Rn))),
1644 (!cast<Instruction>(prefix # "_4H") VPR64:$Rn, 0)>;
1646 def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
1647 (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
1649 def : Pat<(v8i16 (ExtOp (v8i8 (Neon_High16B VPR128:$Rn)))),
1650 (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
1652 def : Pat<(v4i32 (ExtOp (v4i16 (Neon_High8H VPR128:$Rn)))),
1653 (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
1655 def : Pat<(v2i64 (ExtOp (v2i32 (Neon_High4S VPR128:$Rn)))),
1656 (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
1660 defm SSHLLvvi : NeonI_N2VShLL<"SSHLLvvi", 0b0, 0b10100, "sshll", sext>;
1661 defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
1663 // Rounding/Saturating shift
1664 class N2VShift_RQ<bit q, bit u, bits<5> opcode, string asmop, string T,
1665 RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1666 SDPatternOperator OpNode>
1667 : NeonI_2VShiftImm<q, u, opcode,
1668 (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1669 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1670 [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$Rn),
1674 // shift right (vector by immediate)
1675 multiclass NeonI_N2VShR_RQ<bit u, bits<5> opcode, string asmop,
1676 SDPatternOperator OpNode> {
1677 def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1679 let Inst{22-19} = 0b0001;
1682 def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1684 let Inst{22-20} = 0b001;
1687 def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1689 let Inst{22-21} = 0b01;
1692 def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1694 let Inst{22-19} = 0b0001;
1697 def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1699 let Inst{22-20} = 0b001;
1702 def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1704 let Inst{22-21} = 0b01;
1707 def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1713 multiclass NeonI_N2VShL_Q<bit u, bits<5> opcode, string asmop,
1714 SDPatternOperator OpNode> {
1715 // 64-bit vector types.
1716 def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3,
1718 let Inst{22-19} = 0b0001;
1721 def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4,
1723 let Inst{22-20} = 0b001;
1726 def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5,
1728 let Inst{22-21} = 0b01;
1731 // 128-bit vector types.
1732 def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3,
1734 let Inst{22-19} = 0b0001;
1737 def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4,
1739 let Inst{22-20} = 0b001;
1742 def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5,
1744 let Inst{22-21} = 0b01;
1747 def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63,
1753 // Rounding shift right
1754 defm SRSHRvvi : NeonI_N2VShR_RQ<0b0, 0b00100, "srshr",
1755 int_aarch64_neon_vsrshr>;
1756 defm URSHRvvi : NeonI_N2VShR_RQ<0b1, 0b00100, "urshr",
1757 int_aarch64_neon_vurshr>;
1759 // Saturating shift left unsigned
1760 defm SQSHLUvvi : NeonI_N2VShL_Q<0b1, 0b01100, "sqshlu", int_aarch64_neon_vsqshlu>;
1762 // Saturating shift left
1763 defm SQSHLvvi : NeonI_N2VShL_Q<0b0, 0b01110, "sqshl", Neon_sqrshlImm>;
1764 defm UQSHLvvi : NeonI_N2VShL_Q<0b1, 0b01110, "uqshl", Neon_uqrshlImm>;
1766 class N2VShiftAdd<bit q, bit u, bits<5> opcode, string asmop, string T,
1767 RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1769 : NeonI_2VShiftImm<q, u, opcode,
1770 (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1771 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1772 [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
1773 (Ty (OpNode (Ty VPRC:$Rn),
1774 (Ty (Neon_dupImm (i32 imm:$Imm))))))))],
1776 let Constraints = "$src = $Rd";
1779 // Shift Right accumulate
1780 multiclass NeonI_N2VShRAdd<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
1781 def _8B : N2VShiftAdd<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1783 let Inst{22-19} = 0b0001;
1786 def _4H : N2VShiftAdd<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1788 let Inst{22-20} = 0b001;
1791 def _2S : N2VShiftAdd<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1793 let Inst{22-21} = 0b01;
1796 def _16B : N2VShiftAdd<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1798 let Inst{22-19} = 0b0001;
1801 def _8H : N2VShiftAdd<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1803 let Inst{22-20} = 0b001;
1806 def _4S : N2VShiftAdd<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1808 let Inst{22-21} = 0b01;
1811 def _2D : N2VShiftAdd<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1817 // Shift right and accumulate
1818 defm SSRAvvi : NeonI_N2VShRAdd<0, 0b00010, "ssra", sra>;
1819 defm USRAvvi : NeonI_N2VShRAdd<1, 0b00010, "usra", srl>;
1821 // Rounding shift accumulate
1822 class N2VShiftAdd_R<bit q, bit u, bits<5> opcode, string asmop, string T,
1823 RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1824 SDPatternOperator OpNode>
1825 : NeonI_2VShiftImm<q, u, opcode,
1826 (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1827 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1828 [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
1829 (Ty (OpNode (Ty VPRC:$Rn), (i32 imm:$Imm))))))],
1831 let Constraints = "$src = $Rd";
1834 multiclass NeonI_N2VShRAdd_R<bit u, bits<5> opcode, string asmop,
1835 SDPatternOperator OpNode> {
1836 def _8B : N2VShiftAdd_R<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1838 let Inst{22-19} = 0b0001;
1841 def _4H : N2VShiftAdd_R<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1843 let Inst{22-20} = 0b001;
1846 def _2S : N2VShiftAdd_R<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1848 let Inst{22-21} = 0b01;
1851 def _16B : N2VShiftAdd_R<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1853 let Inst{22-19} = 0b0001;
1856 def _8H : N2VShiftAdd_R<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1858 let Inst{22-20} = 0b001;
1861 def _4S : N2VShiftAdd_R<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1863 let Inst{22-21} = 0b01;
1866 def _2D : N2VShiftAdd_R<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1872 // Rounding shift right and accumulate
1873 defm SRSRAvvi : NeonI_N2VShRAdd_R<0, 0b00110, "srsra", int_aarch64_neon_vsrshr>;
1874 defm URSRAvvi : NeonI_N2VShRAdd_R<1, 0b00110, "ursra", int_aarch64_neon_vurshr>;
1876 // Shift insert by immediate
1877 class N2VShiftIns<bit q, bit u, bits<5> opcode, string asmop, string T,
1878 RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1879 SDPatternOperator OpNode>
1880 : NeonI_2VShiftImm<q, u, opcode,
1881 (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1882 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1883 [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$src), (Ty VPRC:$Rn),
1886 let Constraints = "$src = $Rd";
1889 // shift left insert (vector by immediate)
1890 multiclass NeonI_N2VShLIns<bit u, bits<5> opcode, string asmop> {
1891 def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3,
1892 int_aarch64_neon_vsli> {
1893 let Inst{22-19} = 0b0001;
1896 def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4,
1897 int_aarch64_neon_vsli> {
1898 let Inst{22-20} = 0b001;
1901 def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5,
1902 int_aarch64_neon_vsli> {
1903 let Inst{22-21} = 0b01;
1906 // 128-bit vector types
1907 def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3,
1908 int_aarch64_neon_vsli> {
1909 let Inst{22-19} = 0b0001;
1912 def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4,
1913 int_aarch64_neon_vsli> {
1914 let Inst{22-20} = 0b001;
1917 def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5,
1918 int_aarch64_neon_vsli> {
1919 let Inst{22-21} = 0b01;
1922 def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63,
1923 int_aarch64_neon_vsli> {
1928 // shift right insert (vector by immediate)
1929 multiclass NeonI_N2VShRIns<bit u, bits<5> opcode, string asmop> {
1930 // 64-bit vector types.
1931 def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1932 int_aarch64_neon_vsri> {
1933 let Inst{22-19} = 0b0001;
1936 def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1937 int_aarch64_neon_vsri> {
1938 let Inst{22-20} = 0b001;
1941 def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1942 int_aarch64_neon_vsri> {
1943 let Inst{22-21} = 0b01;
1946 // 128-bit vector types
1947 def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1948 int_aarch64_neon_vsri> {
1949 let Inst{22-19} = 0b0001;
1952 def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1953 int_aarch64_neon_vsri> {
1954 let Inst{22-20} = 0b001;
1957 def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1958 int_aarch64_neon_vsri> {
1959 let Inst{22-21} = 0b01;
1962 def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1963 int_aarch64_neon_vsri> {
1968 // Shift left and insert
1969 defm SLIvvi : NeonI_N2VShLIns<0b1, 0b01010, "sli">;
1971 // Shift right and insert
1972 defm SRIvvi : NeonI_N2VShRIns<0b1, 0b01000, "sri">;
1974 class N2VShR_Narrow<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1975 string SrcT, Operand ImmTy>
1976 : NeonI_2VShiftImm<q, u, opcode,
1977 (outs VPR64:$Rd), (ins VPR128:$Rn, ImmTy:$Imm),
1978 asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1981 class N2VShR_Narrow_Hi<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1982 string SrcT, Operand ImmTy>
1983 : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1984 (ins VPR128:$src, VPR128:$Rn, ImmTy:$Imm),
1985 asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1987 let Constraints = "$src = $Rd";
1990 // left long shift by immediate
1991 multiclass NeonI_N2VShR_Narrow<bit u, bits<5> opcode, string asmop> {
1992 def _8B : N2VShR_Narrow<0b0, u, opcode, asmop, "8b", "8h", shr_imm8> {
1993 let Inst{22-19} = 0b0001;
1996 def _4H : N2VShR_Narrow<0b0, u, opcode, asmop, "4h", "4s", shr_imm16> {
1997 let Inst{22-20} = 0b001;
2000 def _2S : N2VShR_Narrow<0b0, u, opcode, asmop, "2s", "2d", shr_imm32> {
2001 let Inst{22-21} = 0b01;
2004 // Shift Narrow High
2005 def _16B : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "16b", "8h",
2007 let Inst{22-19} = 0b0001;
2010 def _8H : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "8h", "4s",
2012 let Inst{22-20} = 0b001;
2015 def _4S : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "4s", "2d",
2017 let Inst{22-21} = 0b01;
2021 // Shift right narrow
2022 defm SHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10000, "shrn">;
2024 // Shift right narrow (prefix Q is saturating, prefix R is rounding)
2025 defm QSHRUNvvi :NeonI_N2VShR_Narrow<0b1, 0b10000, "sqshrun">;
2026 defm RSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10001, "rshrn">;
2027 defm QRSHRUNvvi : NeonI_N2VShR_Narrow<0b1, 0b10001, "sqrshrun">;
2028 defm SQSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10010, "sqshrn">;
2029 defm UQSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10010, "uqshrn">;
2030 defm SQRSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10011, "sqrshrn">;
2031 defm UQRSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10011, "uqrshrn">;
2033 def Neon_combine_2D : PatFrag<(ops node:$Rm, node:$Rn),
2034 (v2i64 (concat_vectors (v1i64 node:$Rm),
2035 (v1i64 node:$Rn)))>;
2036 def Neon_combine_8H : PatFrag<(ops node:$Rm, node:$Rn),
2037 (v8i16 (concat_vectors (v4i16 node:$Rm),
2038 (v4i16 node:$Rn)))>;
2039 def Neon_combine_4S : PatFrag<(ops node:$Rm, node:$Rn),
2040 (v4i32 (concat_vectors (v2i32 node:$Rm),
2041 (v2i32 node:$Rn)))>;
2042 def Neon_combine_4f : PatFrag<(ops node:$Rm, node:$Rn),
2043 (v4f32 (concat_vectors (v2f32 node:$Rm),
2044 (v2f32 node:$Rn)))>;
2045 def Neon_combine_2d : PatFrag<(ops node:$Rm, node:$Rn),
2046 (v2f64 (concat_vectors (v1f64 node:$Rm),
2047 (v1f64 node:$Rn)))>;
2049 def Neon_lshrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2050 (v8i16 (srl (v8i16 node:$lhs),
2051 (v8i16 (Neon_dupImm (i32 node:$rhs)))))>;
2052 def Neon_lshrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2053 (v4i32 (srl (v4i32 node:$lhs),
2054 (v4i32 (Neon_dupImm (i32 node:$rhs)))))>;
2055 def Neon_lshrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2056 (v2i64 (srl (v2i64 node:$lhs),
2057 (v2i64 (Neon_dupImm (i32 node:$rhs)))))>;
2058 def Neon_ashrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2059 (v8i16 (sra (v8i16 node:$lhs),
2060 (v8i16 (Neon_dupImm (i32 node:$rhs)))))>;
2061 def Neon_ashrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2062 (v4i32 (sra (v4i32 node:$lhs),
2063 (v4i32 (Neon_dupImm (i32 node:$rhs)))))>;
2064 def Neon_ashrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2065 (v2i64 (sra (v2i64 node:$lhs),
2066 (v2i64 (Neon_dupImm (i32 node:$rhs)))))>;
2068 // Normal shift right narrow is matched by IR (srl/sra, trunc, concat_vectors)
2069 multiclass Neon_shiftNarrow_patterns<string shr> {
2070 def : Pat<(v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H") VPR128:$Rn,
2072 (SHRNvvi_8B VPR128:$Rn, imm:$Imm)>;
2073 def : Pat<(v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S") VPR128:$Rn,
2075 (SHRNvvi_4H VPR128:$Rn, imm:$Imm)>;
2076 def : Pat<(v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D") VPR128:$Rn,
2078 (SHRNvvi_2S VPR128:$Rn, imm:$Imm)>;
2080 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2081 (v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H")
2082 VPR128:$Rn, imm:$Imm)))))),
2083 (SHRNvvi_16B (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2084 VPR128:$Rn, imm:$Imm)>;
2085 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2086 (v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S")
2087 VPR128:$Rn, imm:$Imm)))))),
2088 (SHRNvvi_8H (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2089 VPR128:$Rn, imm:$Imm)>;
2090 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2091 (v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D")
2092 VPR128:$Rn, imm:$Imm)))))),
2093 (SHRNvvi_4S (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2094 VPR128:$Rn, imm:$Imm)>;
2097 multiclass Neon_shiftNarrow_QR_patterns<SDPatternOperator op, string prefix> {
2098 def : Pat<(v8i8 (op (v8i16 VPR128:$Rn), imm:$Imm)),
2099 (!cast<Instruction>(prefix # "_8B") VPR128:$Rn, imm:$Imm)>;
2100 def : Pat<(v4i16 (op (v4i32 VPR128:$Rn), imm:$Imm)),
2101 (!cast<Instruction>(prefix # "_4H") VPR128:$Rn, imm:$Imm)>;
2102 def : Pat<(v2i32 (op (v2i64 VPR128:$Rn), imm:$Imm)),
2103 (!cast<Instruction>(prefix # "_2S") VPR128:$Rn, imm:$Imm)>;
2105 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2106 (v1i64 (bitconvert (v8i8 (op (v8i16 VPR128:$Rn), imm:$Imm))))),
2107 (!cast<Instruction>(prefix # "_16B")
2108 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2109 VPR128:$Rn, imm:$Imm)>;
2110 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2111 (v1i64 (bitconvert (v4i16 (op (v4i32 VPR128:$Rn), imm:$Imm))))),
2112 (!cast<Instruction>(prefix # "_8H")
2113 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2114 VPR128:$Rn, imm:$Imm)>;
2115 def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2116 (v1i64 (bitconvert (v2i32 (op (v2i64 VPR128:$Rn), imm:$Imm))))),
2117 (!cast<Instruction>(prefix # "_4S")
2118 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2119 VPR128:$Rn, imm:$Imm)>;
2122 defm : Neon_shiftNarrow_patterns<"lshr">;
2123 defm : Neon_shiftNarrow_patterns<"ashr">;
2125 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrun, "QSHRUNvvi">;
2126 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vrshrn, "RSHRNvvi">;
2127 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrun, "QRSHRUNvvi">;
2128 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrn, "SQSHRNvvi">;
2129 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqshrn, "UQSHRNvvi">;
2130 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrn, "SQRSHRNvvi">;
2131 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqrshrn, "UQRSHRNvvi">;
2133 // Convert fix-point and float-pointing
2134 class N2VCvt_Fx<bit q, bit u, bits<5> opcode, string asmop, string T,
2135 RegisterOperand VPRC, ValueType DestTy, ValueType SrcTy,
2136 Operand ImmTy, SDPatternOperator IntOp>
2137 : NeonI_2VShiftImm<q, u, opcode,
2138 (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
2139 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2140 [(set (DestTy VPRC:$Rd), (DestTy (IntOp (SrcTy VPRC:$Rn),
2144 multiclass NeonI_N2VCvt_Fx2fp<bit u, bits<5> opcode, string asmop,
2145 SDPatternOperator IntOp> {
2146 def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2f32, v2i32,
2148 let Inst{22-21} = 0b01;
2151 def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4f32, v4i32,
2153 let Inst{22-21} = 0b01;
2156 def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2f64, v2i64,
2162 multiclass NeonI_N2VCvt_Fp2fx<bit u, bits<5> opcode, string asmop,
2163 SDPatternOperator IntOp> {
2164 def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2i32, v2f32,
2166 let Inst{22-21} = 0b01;
2169 def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4i32, v4f32,
2171 let Inst{22-21} = 0b01;
2174 def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2i64, v2f64,
2180 // Convert fixed-point to floating-point
2181 defm VCVTxs2f : NeonI_N2VCvt_Fx2fp<0, 0b11100, "scvtf",
2182 int_arm_neon_vcvtfxs2fp>;
2183 defm VCVTxu2f : NeonI_N2VCvt_Fx2fp<1, 0b11100, "ucvtf",
2184 int_arm_neon_vcvtfxu2fp>;
2186 // Convert floating-point to fixed-point
2187 defm VCVTf2xs : NeonI_N2VCvt_Fp2fx<0, 0b11111, "fcvtzs",
2188 int_arm_neon_vcvtfp2fxs>;
2189 defm VCVTf2xu : NeonI_N2VCvt_Fp2fx<1, 0b11111, "fcvtzu",
2190 int_arm_neon_vcvtfp2fxu>;
2192 multiclass Neon_sshll2_0<SDNode ext>
2194 def _v8i8 : PatFrag<(ops node:$Rn),
2195 (v8i16 (ext (v8i8 (Neon_High16B node:$Rn))))>;
2196 def _v4i16 : PatFrag<(ops node:$Rn),
2197 (v4i32 (ext (v4i16 (Neon_High8H node:$Rn))))>;
2198 def _v2i32 : PatFrag<(ops node:$Rn),
2199 (v2i64 (ext (v2i32 (Neon_High4S node:$Rn))))>;
2202 defm NI_sext_high : Neon_sshll2_0<sext>;
2203 defm NI_zext_high : Neon_sshll2_0<zext>;
2206 //===----------------------------------------------------------------------===//
2207 // Multiclasses for NeonI_Across
2208 //===----------------------------------------------------------------------===//
2212 multiclass NeonI_2VAcross_1<bit u, bits<5> opcode,
2213 string asmop, SDPatternOperator opnode>
2215 def _1h8b: NeonI_2VAcross<0b0, u, 0b00, opcode,
2216 (outs FPR16:$Rd), (ins VPR64:$Rn),
2217 asmop # "\t$Rd, $Rn.8b",
2218 [(set (v1i16 FPR16:$Rd),
2219 (v1i16 (opnode (v8i8 VPR64:$Rn))))],
2222 def _1h16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
2223 (outs FPR16:$Rd), (ins VPR128:$Rn),
2224 asmop # "\t$Rd, $Rn.16b",
2225 [(set (v1i16 FPR16:$Rd),
2226 (v1i16 (opnode (v16i8 VPR128:$Rn))))],
2229 def _1s4h: NeonI_2VAcross<0b0, u, 0b01, opcode,
2230 (outs FPR32:$Rd), (ins VPR64:$Rn),
2231 asmop # "\t$Rd, $Rn.4h",
2232 [(set (v1i32 FPR32:$Rd),
2233 (v1i32 (opnode (v4i16 VPR64:$Rn))))],
2236 def _1s8h: NeonI_2VAcross<0b1, u, 0b01, opcode,
2237 (outs FPR32:$Rd), (ins VPR128:$Rn),
2238 asmop # "\t$Rd, $Rn.8h",
2239 [(set (v1i32 FPR32:$Rd),
2240 (v1i32 (opnode (v8i16 VPR128:$Rn))))],
2243 // _1d2s doesn't exist!
2245 def _1d4s: NeonI_2VAcross<0b1, u, 0b10, opcode,
2246 (outs FPR64:$Rd), (ins VPR128:$Rn),
2247 asmop # "\t$Rd, $Rn.4s",
2248 [(set (v1i64 FPR64:$Rd),
2249 (v1i64 (opnode (v4i32 VPR128:$Rn))))],
2253 defm SADDLV : NeonI_2VAcross_1<0b0, 0b00011, "saddlv", int_aarch64_neon_saddlv>;
2254 defm UADDLV : NeonI_2VAcross_1<0b1, 0b00011, "uaddlv", int_aarch64_neon_uaddlv>;
2258 multiclass NeonI_2VAcross_2<bit u, bits<5> opcode,
2259 string asmop, SDPatternOperator opnode>
2261 def _1b8b: NeonI_2VAcross<0b0, u, 0b00, opcode,
2262 (outs FPR8:$Rd), (ins VPR64:$Rn),
2263 asmop # "\t$Rd, $Rn.8b",
2264 [(set (v1i8 FPR8:$Rd),
2265 (v1i8 (opnode (v8i8 VPR64:$Rn))))],
2268 def _1b16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
2269 (outs FPR8:$Rd), (ins VPR128:$Rn),
2270 asmop # "\t$Rd, $Rn.16b",
2271 [(set (v1i8 FPR8:$Rd),
2272 (v1i8 (opnode (v16i8 VPR128:$Rn))))],
2275 def _1h4h: NeonI_2VAcross<0b0, u, 0b01, opcode,
2276 (outs FPR16:$Rd), (ins VPR64:$Rn),
2277 asmop # "\t$Rd, $Rn.4h",
2278 [(set (v1i16 FPR16:$Rd),
2279 (v1i16 (opnode (v4i16 VPR64:$Rn))))],
2282 def _1h8h: NeonI_2VAcross<0b1, u, 0b01, opcode,
2283 (outs FPR16:$Rd), (ins VPR128:$Rn),
2284 asmop # "\t$Rd, $Rn.8h",
2285 [(set (v1i16 FPR16:$Rd),
2286 (v1i16 (opnode (v8i16 VPR128:$Rn))))],
2289 // _1s2s doesn't exist!
2291 def _1s4s: NeonI_2VAcross<0b1, u, 0b10, opcode,
2292 (outs FPR32:$Rd), (ins VPR128:$Rn),
2293 asmop # "\t$Rd, $Rn.4s",
2294 [(set (v1i32 FPR32:$Rd),
2295 (v1i32 (opnode (v4i32 VPR128:$Rn))))],
2299 defm SMAXV : NeonI_2VAcross_2<0b0, 0b01010, "smaxv", int_aarch64_neon_smaxv>;
2300 defm UMAXV : NeonI_2VAcross_2<0b1, 0b01010, "umaxv", int_aarch64_neon_umaxv>;
2302 defm SMINV : NeonI_2VAcross_2<0b0, 0b11010, "sminv", int_aarch64_neon_sminv>;
2303 defm UMINV : NeonI_2VAcross_2<0b1, 0b11010, "uminv", int_aarch64_neon_uminv>;
2305 defm ADDV : NeonI_2VAcross_2<0b0, 0b11011, "addv", int_aarch64_neon_vaddv>;
2309 multiclass NeonI_2VAcross_3<bit u, bits<5> opcode, bits<2> size,
2310 string asmop, SDPatternOperator opnode>
2312 def _1s4s: NeonI_2VAcross<0b1, u, size, opcode,
2313 (outs FPR32:$Rd), (ins VPR128:$Rn),
2314 asmop # "\t$Rd, $Rn.4s",
2315 [(set (v1f32 FPR32:$Rd),
2316 (v1f32 (opnode (v4f32 VPR128:$Rn))))],
2320 defm FMAXNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b00, "fmaxnmv",
2321 int_aarch64_neon_vmaxnmv>;
2322 defm FMINNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b10, "fminnmv",
2323 int_aarch64_neon_vminnmv>;
2325 defm FMAXV : NeonI_2VAcross_3<0b1, 0b01111, 0b00, "fmaxv",
2326 int_aarch64_neon_vmaxv>;
2327 defm FMINV : NeonI_2VAcross_3<0b1, 0b01111, 0b10, "fminv",
2328 int_aarch64_neon_vminv>;
2330 // The followings are for instruction class (3V Diff)
2332 // normal long/long2 pattern
2333 class NeonI_3VDL<bit q, bit u, bits<2> size, bits<4> opcode,
2334 string asmop, string ResS, string OpS,
2335 SDPatternOperator opnode, SDPatternOperator ext,
2336 RegisterOperand OpVPR,
2337 ValueType ResTy, ValueType OpTy>
2338 : NeonI_3VDiff<q, u, size, opcode,
2339 (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2340 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2341 [(set (ResTy VPR128:$Rd),
2342 (ResTy (opnode (ResTy (ext (OpTy OpVPR:$Rn))),
2343 (ResTy (ext (OpTy OpVPR:$Rm))))))],
2346 multiclass NeonI_3VDL_s<bit u, bits<4> opcode,
2347 string asmop, SDPatternOperator opnode,
2350 let isCommutable = Commutable in {
2351 def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2352 opnode, sext, VPR64, v8i16, v8i8>;
2353 def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2354 opnode, sext, VPR64, v4i32, v4i16>;
2355 def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2356 opnode, sext, VPR64, v2i64, v2i32>;
2360 multiclass NeonI_3VDL2_s<bit u, bits<4> opcode,
2361 string asmop, SDPatternOperator opnode,
2364 let isCommutable = Commutable in {
2365 def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2366 opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2367 def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2368 opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2369 def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2370 opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2374 multiclass NeonI_3VDL_u<bit u, bits<4> opcode,
2375 string asmop, SDPatternOperator opnode,
2378 let isCommutable = Commutable in {
2379 def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2380 opnode, zext, VPR64, v8i16, v8i8>;
2381 def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2382 opnode, zext, VPR64, v4i32, v4i16>;
2383 def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2384 opnode, zext, VPR64, v2i64, v2i32>;
2388 multiclass NeonI_3VDL2_u<bit u, bits<4> opcode,
2389 string asmop, SDPatternOperator opnode,
2392 let isCommutable = Commutable in {
2393 def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2394 opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2395 def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2396 opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2397 def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2398 opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2402 defm SADDLvvv : NeonI_3VDL_s<0b0, 0b0000, "saddl", add, 1>;
2403 defm UADDLvvv : NeonI_3VDL_u<0b1, 0b0000, "uaddl", add, 1>;
2405 defm SADDL2vvv : NeonI_3VDL2_s<0b0, 0b0000, "saddl2", add, 1>;
2406 defm UADDL2vvv : NeonI_3VDL2_u<0b1, 0b0000, "uaddl2", add, 1>;
2408 defm SSUBLvvv : NeonI_3VDL_s<0b0, 0b0010, "ssubl", sub, 0>;
2409 defm USUBLvvv : NeonI_3VDL_u<0b1, 0b0010, "usubl", sub, 0>;
2411 defm SSUBL2vvv : NeonI_3VDL2_s<0b0, 0b0010, "ssubl2", sub, 0>;
2412 defm USUBL2vvv : NeonI_3VDL2_u<0b1, 0b0010, "usubl2", sub, 0>;
2414 // normal wide/wide2 pattern
2415 class NeonI_3VDW<bit q, bit u, bits<2> size, bits<4> opcode,
2416 string asmop, string ResS, string OpS,
2417 SDPatternOperator opnode, SDPatternOperator ext,
2418 RegisterOperand OpVPR,
2419 ValueType ResTy, ValueType OpTy>
2420 : NeonI_3VDiff<q, u, size, opcode,
2421 (outs VPR128:$Rd), (ins VPR128:$Rn, OpVPR:$Rm),
2422 asmop # "\t$Rd." # ResS # ", $Rn." # ResS # ", $Rm." # OpS,
2423 [(set (ResTy VPR128:$Rd),
2424 (ResTy (opnode (ResTy VPR128:$Rn),
2425 (ResTy (ext (OpTy OpVPR:$Rm))))))],
2428 multiclass NeonI_3VDW_s<bit u, bits<4> opcode,
2429 string asmop, SDPatternOperator opnode>
2431 def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2432 opnode, sext, VPR64, v8i16, v8i8>;
2433 def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2434 opnode, sext, VPR64, v4i32, v4i16>;
2435 def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2436 opnode, sext, VPR64, v2i64, v2i32>;
2439 defm SADDWvvv : NeonI_3VDW_s<0b0, 0b0001, "saddw", add>;
2440 defm SSUBWvvv : NeonI_3VDW_s<0b0, 0b0011, "ssubw", sub>;
2442 multiclass NeonI_3VDW2_s<bit u, bits<4> opcode,
2443 string asmop, SDPatternOperator opnode>
2445 def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2446 opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2447 def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2448 opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2449 def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2450 opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2453 defm SADDW2vvv : NeonI_3VDW2_s<0b0, 0b0001, "saddw2", add>;
2454 defm SSUBW2vvv : NeonI_3VDW2_s<0b0, 0b0011, "ssubw2", sub>;
2456 multiclass NeonI_3VDW_u<bit u, bits<4> opcode,
2457 string asmop, SDPatternOperator opnode>
2459 def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2460 opnode, zext, VPR64, v8i16, v8i8>;
2461 def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2462 opnode, zext, VPR64, v4i32, v4i16>;
2463 def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2464 opnode, zext, VPR64, v2i64, v2i32>;
2467 defm UADDWvvv : NeonI_3VDW_u<0b1, 0b0001, "uaddw", add>;
2468 defm USUBWvvv : NeonI_3VDW_u<0b1, 0b0011, "usubw", sub>;
2470 multiclass NeonI_3VDW2_u<bit u, bits<4> opcode,
2471 string asmop, SDPatternOperator opnode>
2473 def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2474 opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2475 def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2476 opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2477 def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2478 opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2481 defm UADDW2vvv : NeonI_3VDW2_u<0b1, 0b0001, "uaddw2", add>;
2482 defm USUBW2vvv : NeonI_3VDW2_u<0b1, 0b0011, "usubw2", sub>;
2484 // Get the high half part of the vector element.
2485 multiclass NeonI_get_high
2487 def _8h : PatFrag<(ops node:$Rn),
2488 (v8i8 (trunc (v8i16 (srl (v8i16 node:$Rn),
2489 (v8i16 (Neon_dupImm 8))))))>;
2490 def _4s : PatFrag<(ops node:$Rn),
2491 (v4i16 (trunc (v4i32 (srl (v4i32 node:$Rn),
2492 (v4i32 (Neon_dupImm 16))))))>;
2493 def _2d : PatFrag<(ops node:$Rn),
2494 (v2i32 (trunc (v2i64 (srl (v2i64 node:$Rn),
2495 (v2i64 (Neon_dupImm 32))))))>;
2498 defm NI_get_hi : NeonI_get_high;
2500 // pattern for addhn/subhn with 2 operands
2501 class NeonI_3VDN_addhn_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2502 string asmop, string ResS, string OpS,
2503 SDPatternOperator opnode, SDPatternOperator get_hi,
2504 ValueType ResTy, ValueType OpTy>
2505 : NeonI_3VDiff<q, u, size, opcode,
2506 (outs VPR64:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
2507 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2508 [(set (ResTy VPR64:$Rd),
2510 (OpTy (opnode (OpTy VPR128:$Rn),
2511 (OpTy VPR128:$Rm))))))],
2514 multiclass NeonI_3VDN_addhn_2Op<bit u, bits<4> opcode,
2515 string asmop, SDPatternOperator opnode,
2518 let isCommutable = Commutable in {
2519 def _8b8h : NeonI_3VDN_addhn_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2520 opnode, NI_get_hi_8h, v8i8, v8i16>;
2521 def _4h4s : NeonI_3VDN_addhn_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2522 opnode, NI_get_hi_4s, v4i16, v4i32>;
2523 def _2s2d : NeonI_3VDN_addhn_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2524 opnode, NI_get_hi_2d, v2i32, v2i64>;
2528 defm ADDHNvvv : NeonI_3VDN_addhn_2Op<0b0, 0b0100, "addhn", add, 1>;
2529 defm SUBHNvvv : NeonI_3VDN_addhn_2Op<0b0, 0b0110, "subhn", sub, 0>;
2531 // pattern for operation with 2 operands
2532 class NeonI_3VD_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2533 string asmop, string ResS, string OpS,
2534 SDPatternOperator opnode,
2535 RegisterOperand ResVPR, RegisterOperand OpVPR,
2536 ValueType ResTy, ValueType OpTy>
2537 : NeonI_3VDiff<q, u, size, opcode,
2538 (outs ResVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2539 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2540 [(set (ResTy ResVPR:$Rd),
2541 (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))],
2544 // normal narrow pattern
2545 multiclass NeonI_3VDN_2Op<bit u, bits<4> opcode,
2546 string asmop, SDPatternOperator opnode,
2549 let isCommutable = Commutable in {
2550 def _8b8h : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2551 opnode, VPR64, VPR128, v8i8, v8i16>;
2552 def _4h4s : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2553 opnode, VPR64, VPR128, v4i16, v4i32>;
2554 def _2s2d : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2555 opnode, VPR64, VPR128, v2i32, v2i64>;
2559 defm RADDHNvvv : NeonI_3VDN_2Op<0b1, 0b0100, "raddhn", int_arm_neon_vraddhn, 1>;
2560 defm RSUBHNvvv : NeonI_3VDN_2Op<0b1, 0b0110, "rsubhn", int_arm_neon_vrsubhn, 0>;
2562 // pattern for acle intrinsic with 3 operands
2563 class NeonI_3VDN_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
2564 string asmop, string ResS, string OpS>
2565 : NeonI_3VDiff<q, u, size, opcode,
2566 (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
2567 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2569 let Constraints = "$src = $Rd";
2570 let neverHasSideEffects = 1;
2573 multiclass NeonI_3VDN_3Op_v1<bit u, bits<4> opcode,
2575 def _16b8h : NeonI_3VDN_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h">;
2576 def _8h4s : NeonI_3VDN_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s">;
2577 def _4s2d : NeonI_3VDN_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d">;
2580 defm ADDHN2vvv : NeonI_3VDN_3Op_v1<0b0, 0b0100, "addhn2">;
2581 defm SUBHN2vvv : NeonI_3VDN_3Op_v1<0b0, 0b0110, "subhn2">;
2583 defm RADDHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0100, "raddhn2">;
2584 defm RSUBHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0110, "rsubhn2">;
2586 // Patterns have to be separate because there's a SUBREG_TO_REG in the output
2588 class NarrowHighHalfPat<Instruction INST, ValueType DstTy, ValueType SrcTy,
2589 SDPatternOperator coreop>
2590 : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2591 (v1i64 (bitconvert (DstTy (coreop (SrcTy VPR128:$Rn),
2592 (SrcTy VPR128:$Rm)))))),
2593 (INST (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2594 VPR128:$Rn, VPR128:$Rm)>;
2597 def : NarrowHighHalfPat<ADDHN2vvv_16b8h, v8i8, v8i16,
2598 BinOpFrag<(NI_get_hi_8h (add node:$LHS, node:$RHS))>>;
2599 def : NarrowHighHalfPat<ADDHN2vvv_8h4s, v4i16, v4i32,
2600 BinOpFrag<(NI_get_hi_4s (add node:$LHS, node:$RHS))>>;
2601 def : NarrowHighHalfPat<ADDHN2vvv_4s2d, v2i32, v2i64,
2602 BinOpFrag<(NI_get_hi_2d (add node:$LHS, node:$RHS))>>;
2605 def : NarrowHighHalfPat<SUBHN2vvv_16b8h, v8i8, v8i16,
2606 BinOpFrag<(NI_get_hi_8h (sub node:$LHS, node:$RHS))>>;
2607 def : NarrowHighHalfPat<SUBHN2vvv_8h4s, v4i16, v4i32,
2608 BinOpFrag<(NI_get_hi_4s (sub node:$LHS, node:$RHS))>>;
2609 def : NarrowHighHalfPat<SUBHN2vvv_4s2d, v2i32, v2i64,
2610 BinOpFrag<(NI_get_hi_2d (sub node:$LHS, node:$RHS))>>;
2613 def : NarrowHighHalfPat<RADDHN2vvv_16b8h, v8i8, v8i16, int_arm_neon_vraddhn>;
2614 def : NarrowHighHalfPat<RADDHN2vvv_8h4s, v4i16, v4i32, int_arm_neon_vraddhn>;
2615 def : NarrowHighHalfPat<RADDHN2vvv_4s2d, v2i32, v2i64, int_arm_neon_vraddhn>;
2618 def : NarrowHighHalfPat<RSUBHN2vvv_16b8h, v8i8, v8i16, int_arm_neon_vrsubhn>;
2619 def : NarrowHighHalfPat<RSUBHN2vvv_8h4s, v4i16, v4i32, int_arm_neon_vrsubhn>;
2620 def : NarrowHighHalfPat<RSUBHN2vvv_4s2d, v2i32, v2i64, int_arm_neon_vrsubhn>;
2622 // pattern that need to extend result
2623 class NeonI_3VDL_Ext<bit q, bit u, bits<2> size, bits<4> opcode,
2624 string asmop, string ResS, string OpS,
2625 SDPatternOperator opnode,
2626 RegisterOperand OpVPR,
2627 ValueType ResTy, ValueType OpTy, ValueType OpSTy>
2628 : NeonI_3VDiff<q, u, size, opcode,
2629 (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2630 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2631 [(set (ResTy VPR128:$Rd),
2632 (ResTy (zext (OpSTy (opnode (OpTy OpVPR:$Rn),
2633 (OpTy OpVPR:$Rm))))))],
2636 multiclass NeonI_3VDL_zext<bit u, bits<4> opcode,
2637 string asmop, SDPatternOperator opnode,
2640 let isCommutable = Commutable in {
2641 def _8h8b : NeonI_3VDL_Ext<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2642 opnode, VPR64, v8i16, v8i8, v8i8>;
2643 def _4s4h : NeonI_3VDL_Ext<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2644 opnode, VPR64, v4i32, v4i16, v4i16>;
2645 def _2d2s : NeonI_3VDL_Ext<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2646 opnode, VPR64, v2i64, v2i32, v2i32>;
2650 defm SABDLvvv : NeonI_3VDL_zext<0b0, 0b0111, "sabdl", int_arm_neon_vabds, 1>;
2651 defm UABDLvvv : NeonI_3VDL_zext<0b1, 0b0111, "uabdl", int_arm_neon_vabdu, 1>;
2653 multiclass NeonI_Op_High<SDPatternOperator op>
2655 def _16B : PatFrag<(ops node:$Rn, node:$Rm),
2656 (op (v8i8 (Neon_High16B node:$Rn)), (v8i8 (Neon_High16B node:$Rm)))>;
2657 def _8H : PatFrag<(ops node:$Rn, node:$Rm),
2658 (op (v4i16 (Neon_High8H node:$Rn)), (v4i16 (Neon_High8H node:$Rm)))>;
2659 def _4S : PatFrag<(ops node:$Rn, node:$Rm),
2660 (op (v2i32 (Neon_High4S node:$Rn)), (v2i32 (Neon_High4S node:$Rm)))>;
2664 defm NI_sabdl_hi : NeonI_Op_High<int_arm_neon_vabds>;
2665 defm NI_uabdl_hi : NeonI_Op_High<int_arm_neon_vabdu>;
2666 defm NI_smull_hi : NeonI_Op_High<int_arm_neon_vmulls>;
2667 defm NI_umull_hi : NeonI_Op_High<int_arm_neon_vmullu>;
2668 defm NI_qdmull_hi : NeonI_Op_High<int_arm_neon_vqdmull>;
2669 defm NI_pmull_hi : NeonI_Op_High<int_arm_neon_vmullp>;
2671 multiclass NeonI_3VDL_Abd_u<bit u, bits<4> opcode,
2672 string asmop, string opnode,
2675 let isCommutable = Commutable in {
2676 def _8h8b : NeonI_3VDL_Ext<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2677 !cast<PatFrag>(opnode # "_16B"),
2678 VPR128, v8i16, v16i8, v8i8>;
2679 def _4s4h : NeonI_3VDL_Ext<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2680 !cast<PatFrag>(opnode # "_8H"),
2681 VPR128, v4i32, v8i16, v4i16>;
2682 def _2d2s : NeonI_3VDL_Ext<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2683 !cast<PatFrag>(opnode # "_4S"),
2684 VPR128, v2i64, v4i32, v2i32>;
2688 defm SABDL2vvv : NeonI_3VDL_Abd_u<0b0, 0b0111, "sabdl2", "NI_sabdl_hi", 1>;
2689 defm UABDL2vvv : NeonI_3VDL_Abd_u<0b1, 0b0111, "uabdl2", "NI_uabdl_hi", 1>;
2691 // For pattern that need two operators being chained.
2692 class NeonI_3VDL_Aba<bit q, bit u, bits<2> size, bits<4> opcode,
2693 string asmop, string ResS, string OpS,
2694 SDPatternOperator opnode, SDPatternOperator subop,
2695 RegisterOperand OpVPR,
2696 ValueType ResTy, ValueType OpTy, ValueType OpSTy>
2697 : NeonI_3VDiff<q, u, size, opcode,
2698 (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
2699 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2700 [(set (ResTy VPR128:$Rd),
2702 (ResTy VPR128:$src),
2703 (ResTy (zext (OpSTy (subop (OpTy OpVPR:$Rn),
2704 (OpTy OpVPR:$Rm))))))))],
2706 let Constraints = "$src = $Rd";
2709 multiclass NeonI_3VDL_Aba_v1<bit u, bits<4> opcode,
2710 string asmop, SDPatternOperator opnode,
2711 SDPatternOperator subop>
2713 def _8h8b : NeonI_3VDL_Aba<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2714 opnode, subop, VPR64, v8i16, v8i8, v8i8>;
2715 def _4s4h : NeonI_3VDL_Aba<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2716 opnode, subop, VPR64, v4i32, v4i16, v4i16>;
2717 def _2d2s : NeonI_3VDL_Aba<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2718 opnode, subop, VPR64, v2i64, v2i32, v2i32>;
2721 defm SABALvvv : NeonI_3VDL_Aba_v1<0b0, 0b0101, "sabal",
2722 add, int_arm_neon_vabds>;
2723 defm UABALvvv : NeonI_3VDL_Aba_v1<0b1, 0b0101, "uabal",
2724 add, int_arm_neon_vabdu>;
2726 multiclass NeonI_3VDL2_Aba_v1<bit u, bits<4> opcode,
2727 string asmop, SDPatternOperator opnode,
2730 def _8h8b : NeonI_3VDL_Aba<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2731 opnode, !cast<PatFrag>(subop # "_16B"),
2732 VPR128, v8i16, v16i8, v8i8>;
2733 def _4s4h : NeonI_3VDL_Aba<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2734 opnode, !cast<PatFrag>(subop # "_8H"),
2735 VPR128, v4i32, v8i16, v4i16>;
2736 def _2d2s : NeonI_3VDL_Aba<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2737 opnode, !cast<PatFrag>(subop # "_4S"),
2738 VPR128, v2i64, v4i32, v2i32>;
2741 defm SABAL2vvv : NeonI_3VDL2_Aba_v1<0b0, 0b0101, "sabal2", add,
2743 defm UABAL2vvv : NeonI_3VDL2_Aba_v1<0b1, 0b0101, "uabal2", add,
2746 // Long pattern with 2 operands
2747 multiclass NeonI_3VDL_2Op<bit u, bits<4> opcode,
2748 string asmop, SDPatternOperator opnode,
2751 let isCommutable = Commutable in {
2752 def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2753 opnode, VPR128, VPR64, v8i16, v8i8>;
2754 def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2755 opnode, VPR128, VPR64, v4i32, v4i16>;
2756 def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2757 opnode, VPR128, VPR64, v2i64, v2i32>;
2761 defm SMULLvvv : NeonI_3VDL_2Op<0b0, 0b1100, "smull", int_arm_neon_vmulls, 1>;
2762 defm UMULLvvv : NeonI_3VDL_2Op<0b1, 0b1100, "umull", int_arm_neon_vmullu, 1>;
2764 class NeonI_3VDL2_2Op_mull<bit q, bit u, bits<2> size, bits<4> opcode,
2765 string asmop, string ResS, string OpS,
2766 SDPatternOperator opnode,
2767 ValueType ResTy, ValueType OpTy>
2768 : NeonI_3VDiff<q, u, size, opcode,
2769 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
2770 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2771 [(set (ResTy VPR128:$Rd),
2772 (ResTy (opnode (OpTy VPR128:$Rn), (OpTy VPR128:$Rm))))],
2776 multiclass NeonI_3VDL2_2Op_mull_v1<bit u, bits<4> opcode,
2781 let isCommutable = Commutable in {
2782 def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2783 !cast<PatFrag>(opnode # "_16B"),
2785 def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2786 !cast<PatFrag>(opnode # "_8H"),
2788 def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2789 !cast<PatFrag>(opnode # "_4S"),
2794 defm SMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b0, 0b1100, "smull2",
2796 defm UMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b1, 0b1100, "umull2",
2799 // Long pattern with 3 operands
2800 class NeonI_3VDL_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
2801 string asmop, string ResS, string OpS,
2802 SDPatternOperator opnode,
2803 ValueType ResTy, ValueType OpTy>
2804 : NeonI_3VDiff<q, u, size, opcode,
2805 (outs VPR128:$Rd), (ins VPR128:$src, VPR64:$Rn, VPR64:$Rm),
2806 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2807 [(set (ResTy VPR128:$Rd),
2809 (ResTy VPR128:$src),
2810 (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))))],
2812 let Constraints = "$src = $Rd";
2815 multiclass NeonI_3VDL_3Op_v1<bit u, bits<4> opcode,
2816 string asmop, SDPatternOperator opnode>
2818 def _8h8b : NeonI_3VDL_3Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2819 opnode, v8i16, v8i8>;
2820 def _4s4h : NeonI_3VDL_3Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2821 opnode, v4i32, v4i16>;
2822 def _2d2s : NeonI_3VDL_3Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2823 opnode, v2i64, v2i32>;
2826 def Neon_smlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2828 (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
2830 def Neon_umlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2832 (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
2834 def Neon_smlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2836 (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
2838 def Neon_umlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
2840 (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
2842 defm SMLALvvv : NeonI_3VDL_3Op_v1<0b0, 0b1000, "smlal", Neon_smlal>;
2843 defm UMLALvvv : NeonI_3VDL_3Op_v1<0b1, 0b1000, "umlal", Neon_umlal>;
2845 defm SMLSLvvv : NeonI_3VDL_3Op_v1<0b0, 0b1010, "smlsl", Neon_smlsl>;
2846 defm UMLSLvvv : NeonI_3VDL_3Op_v1<0b1, 0b1010, "umlsl", Neon_umlsl>;
2848 class NeonI_3VDL2_3Op_mlas<bit q, bit u, bits<2> size, bits<4> opcode,
2849 string asmop, string ResS, string OpS,
2850 SDPatternOperator subop, SDPatternOperator opnode,
2851 RegisterOperand OpVPR,
2852 ValueType ResTy, ValueType OpTy>
2853 : NeonI_3VDiff<q, u, size, opcode,
2854 (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
2855 asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2856 [(set (ResTy VPR128:$Rd),
2858 (ResTy VPR128:$src),
2859 (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))))],
2861 let Constraints = "$src = $Rd";
2864 multiclass NeonI_3VDL2_3Op_mlas_v1<bit u, bits<4> opcode,
2866 SDPatternOperator subop,
2869 def _8h16b : NeonI_3VDL2_3Op_mlas<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2870 subop, !cast<PatFrag>(opnode # "_16B"),
2871 VPR128, v8i16, v16i8>;
2872 def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2873 subop, !cast<PatFrag>(opnode # "_8H"),
2874 VPR128, v4i32, v8i16>;
2875 def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2876 subop, !cast<PatFrag>(opnode # "_4S"),
2877 VPR128, v2i64, v4i32>;
2880 defm SMLAL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1000, "smlal2",
2881 add, "NI_smull_hi">;
2882 defm UMLAL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1000, "umlal2",
2883 add, "NI_umull_hi">;
2885 defm SMLSL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1010, "smlsl2",
2886 sub, "NI_smull_hi">;
2887 defm UMLSL2vvv : NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1010, "umlsl2",
2888 sub, "NI_umull_hi">;
2890 multiclass NeonI_3VDL_qdmlal_3Op_v2<bit u, bits<4> opcode,
2891 string asmop, SDPatternOperator opnode>
2893 def _4s4h : NeonI_3VDL2_3Op_mlas<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2894 opnode, int_arm_neon_vqdmull,
2895 VPR64, v4i32, v4i16>;
2896 def _2d2s : NeonI_3VDL2_3Op_mlas<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2897 opnode, int_arm_neon_vqdmull,
2898 VPR64, v2i64, v2i32>;
2901 defm SQDMLALvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1001, "sqdmlal",
2902 int_arm_neon_vqadds>;
2903 defm SQDMLSLvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1011, "sqdmlsl",
2904 int_arm_neon_vqsubs>;
2906 multiclass NeonI_3VDL_v2<bit u, bits<4> opcode,
2907 string asmop, SDPatternOperator opnode,
2910 let isCommutable = Commutable in {
2911 def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2912 opnode, VPR128, VPR64, v4i32, v4i16>;
2913 def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2914 opnode, VPR128, VPR64, v2i64, v2i32>;
2918 defm SQDMULLvvv : NeonI_3VDL_v2<0b0, 0b1101, "sqdmull",
2919 int_arm_neon_vqdmull, 1>;
2921 multiclass NeonI_3VDL2_2Op_mull_v2<bit u, bits<4> opcode,
2926 let isCommutable = Commutable in {
2927 def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2928 !cast<PatFrag>(opnode # "_8H"),
2930 def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2931 !cast<PatFrag>(opnode # "_4S"),
2936 defm SQDMULL2vvv : NeonI_3VDL2_2Op_mull_v2<0b0, 0b1101, "sqdmull2",
2939 multiclass NeonI_3VDL2_3Op_qdmlal_v2<bit u, bits<4> opcode,
2941 SDPatternOperator opnode>
2943 def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2944 opnode, NI_qdmull_hi_8H,
2945 VPR128, v4i32, v8i16>;
2946 def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2947 opnode, NI_qdmull_hi_4S,
2948 VPR128, v2i64, v4i32>;
2951 defm SQDMLAL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1001, "sqdmlal2",
2952 int_arm_neon_vqadds>;
2953 defm SQDMLSL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1011, "sqdmlsl2",
2954 int_arm_neon_vqsubs>;
2956 multiclass NeonI_3VDL_v3<bit u, bits<4> opcode,
2957 string asmop, SDPatternOperator opnode,
2960 let isCommutable = Commutable in {
2961 def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2962 opnode, VPR128, VPR64, v8i16, v8i8>;
2966 defm PMULLvvv : NeonI_3VDL_v3<0b0, 0b1110, "pmull", int_arm_neon_vmullp, 1>;
2968 multiclass NeonI_3VDL2_2Op_mull_v3<bit u, bits<4> opcode,
2973 let isCommutable = Commutable in {
2974 def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2975 !cast<PatFrag>(opnode # "_16B"),
2980 defm PMULL2vvv : NeonI_3VDL2_2Op_mull_v3<0b0, 0b1110, "pmull2",
2983 // End of implementation for instruction class (3V Diff)
2985 // Scalar Arithmetic
2987 class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
2988 : NeonI_Scalar3Same<u, 0b11, opcode,
2989 (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm),
2990 !strconcat(asmop, " $Rd, $Rn, $Rm"),
2994 multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
2995 string asmop, bit Commutable = 0>
2997 let isCommutable = Commutable in {
2998 def bbb : NeonI_Scalar3Same<u, 0b00, opcode,
2999 (outs FPR8:$Rd), (ins FPR8:$Rn, FPR8:$Rm),
3000 !strconcat(asmop, " $Rd, $Rn, $Rm"),
3003 def hhh : NeonI_Scalar3Same<u, 0b01, opcode,
3004 (outs FPR16:$Rd), (ins FPR16:$Rn, FPR16:$Rm),
3005 !strconcat(asmop, " $Rd, $Rn, $Rm"),
3008 def sss : NeonI_Scalar3Same<u, 0b10, opcode,
3009 (outs FPR32:$Rd), (ins FPR32:$Rn, FPR32:$Rm),
3010 !strconcat(asmop, " $Rd, $Rn, $Rm"),
3013 def ddd : NeonI_Scalar3Same<u, 0b11, opcode,
3014 (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm),
3015 !strconcat(asmop, " $Rd, $Rn, $Rm"),
3021 multiclass Neon_Scalar_D_size_patterns<SDPatternOperator opnode,
3022 Instruction INSTD> {
3023 def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
3024 (INSTD FPR64:$Rn, FPR64:$Rm)>;
3027 multiclass Neon_Scalar_BHSD_size_patterns<SDPatternOperator opnode,
3028 Instruction INSTB, Instruction INSTH,
3029 Instruction INSTS, Instruction INSTD>
3030 : Neon_Scalar_D_size_patterns<opnode, INSTD> {
3031 def: Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
3032 (INSTB FPR8:$Rn, FPR8:$Rm)>;
3034 def: Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
3035 (INSTH FPR16:$Rn, FPR16:$Rm)>;
3037 def: Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
3038 (INSTS FPR32:$Rn, FPR32:$Rm)>;
3041 // Scalar Integer Add
3042 let isCommutable = 1 in {
3043 def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
3046 // Scalar Integer Sub
3047 def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
3049 // Pattern for Scalar Integer Add and Sub with D register only
3050 defm : Neon_Scalar_D_size_patterns<add, ADDddd>;
3051 defm : Neon_Scalar_D_size_patterns<sub, SUBddd>;
3053 // Patterns to match llvm.aarch64.* intrinsic for Scalar Add, Sub
3054 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vaddds, ADDddd>;
3055 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vadddu, ADDddd>;
3056 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vsubds, SUBddd>;
3057 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vsubdu, SUBddd>;
3059 // Scalar Integer Saturating Add (Signed, Unsigned)
3060 defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
3061 defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
3063 // Scalar Integer Saturating Sub (Signed, Unsigned)
3064 defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
3065 defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
3067 // Patterns to match llvm.arm.* intrinsic for
3068 // Scalar Integer Saturating Add, Sub (Signed, Unsigned)
3069 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqadds, SQADDddd>;
3070 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqaddu, UQADDddd>;
3071 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubs, SQSUBddd>;
3072 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubu, UQSUBddd>;
3074 // Patterns to match llvm.aarch64.* intrinsic for
3075 // Scalar Integer Saturating Add, Sub (Signed, Unsigned)
3076 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqadds, SQADDbbb, SQADDhhh,
3077 SQADDsss, SQADDddd>;
3078 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqaddu, UQADDbbb, UQADDhhh,
3079 UQADDsss, UQADDddd>;
3080 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqsubs, SQSUBbbb, SQSUBhhh,
3081 SQSUBsss, SQSUBddd>;
3082 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqsubu, UQSUBbbb, UQSUBhhh,
3083 UQSUBsss, UQSUBddd>;
3085 // Scalar Integer Shift Left (Signed, Unsigned)
3086 def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
3087 def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
3089 // Patterns to match llvm.arm.* intrinsic for
3090 // Scalar Integer Shift Left (Signed, Unsigned)
3091 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
3092 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
3094 // Patterns to match llvm.aarch64.* intrinsic for
3095 // Scalar Integer Shift Left (Signed, Unsigned)
3096 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vshlds, SSHLddd>;
3097 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vshldu, USHLddd>;
3099 // Scalar Integer Saturating Shift Left (Signed, Unsigned)
3100 defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
3101 defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
3103 // Patterns to match llvm.aarch64.* intrinsic for
3104 // Scalar Integer Saturating Shift Letf (Signed, Unsigned)
3105 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqshls, SQSHLbbb, SQSHLhhh,
3106 SQSHLsss, SQSHLddd>;
3107 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqshlu, UQSHLbbb, UQSHLhhh,
3108 UQSHLsss, UQSHLddd>;
3110 // Patterns to match llvm.arm.* intrinsic for
3111 // Scalar Integer Saturating Shift Letf (Signed, Unsigned)
3112 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
3113 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
3115 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
3116 def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
3117 def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
3119 // Patterns to match llvm.aarch64.* intrinsic for
3120 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
3121 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vrshlds, SRSHLddd>;
3122 defm : Neon_Scalar_D_size_patterns<int_aarch64_neon_vrshldu, URSHLddd>;
3124 // Patterns to match llvm.arm.* intrinsic for
3125 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
3126 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
3127 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
3129 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
3130 defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
3131 defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
3133 // Patterns to match llvm.aarch64.* intrinsic for
3134 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
3135 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqrshls, SQRSHLbbb, SQRSHLhhh,
3136 SQRSHLsss, SQRSHLddd>;
3137 defm : Neon_Scalar_BHSD_size_patterns<int_aarch64_neon_vqrshlu, UQRSHLbbb, UQRSHLhhh,
3138 UQRSHLsss, UQRSHLddd>;
3140 // Patterns to match llvm.arm.* intrinsic for
3141 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
3142 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
3143 defm : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
3145 // Scalar Reduce Pairwise
3147 multiclass NeonI_ScalarPair_D_sizes<bit u, bit size, bits<5> opcode,
3148 string asmop, bit Commutable = 0> {
3149 let isCommutable = Commutable in {
3150 def _D_2D : NeonI_ScalarPair<u, {size, 0b1}, opcode,
3151 (outs FPR64:$Rd), (ins VPR128:$Rn),
3152 !strconcat(asmop, " $Rd, $Rn.2d"),
3158 multiclass NeonI_ScalarPair_SD_sizes<bit u, bit size, bits<5> opcode,
3159 string asmop, bit Commutable = 0>
3160 : NeonI_ScalarPair_D_sizes<u, size, opcode, asmop, Commutable> {
3161 let isCommutable = Commutable in {
3162 def _S_2S : NeonI_ScalarPair<u, {size, 0b0}, opcode,
3163 (outs FPR32:$Rd), (ins VPR64:$Rn),
3164 !strconcat(asmop, " $Rd, $Rn.2s"),
3170 // Scalar Reduce Addition Pairwise (Integer) with
3171 // Pattern to match llvm.arm.* intrinsic
3172 defm ADDPvv : NeonI_ScalarPair_D_sizes<0b0, 0b1, 0b11011, "addp", 0>;
3174 // Pattern to match llvm.aarch64.* intrinsic for
3175 // Scalar Reduce Addition Pairwise (Integer)
3176 def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
3177 (ADDPvv_D_2D VPR128:$Rn)>;
3179 // Scalar Reduce Addition Pairwise (Floating Point)
3180 defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
3182 // Scalar Reduce Maximum Pairwise (Floating Point)
3183 defm FMAXPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01111, "fmaxp", 0>;
3185 // Scalar Reduce Minimum Pairwise (Floating Point)
3186 defm FMINPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01111, "fminp", 0>;
3188 // Scalar Reduce maxNum Pairwise (Floating Point)
3189 defm FMAXNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01100, "fmaxnmp", 0>;
3191 // Scalar Reduce minNum Pairwise (Floating Point)
3192 defm FMINNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01100, "fminnmp", 0>;
3194 multiclass Neon_ScalarPair_SD_size_patterns<SDPatternOperator opnodeS,
3195 SDPatternOperator opnodeD,
3197 Instruction INSTD> {
3198 def : Pat<(v1f32 (opnodeS (v2f32 VPR64:$Rn))),
3200 def : Pat<(v1f64 (opnodeD (v2f64 VPR128:$Rn))),
3201 (INSTD VPR128:$Rn)>;
3204 // Patterns to match llvm.aarch64.* intrinsic for
3205 // Scalar Reduce Add, Max, Min, MaxiNum, MinNum Pairwise (Floating Point)
3206 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfadd,
3207 int_aarch64_neon_vpfaddq, FADDPvv_S_2S, FADDPvv_D_2D>;
3209 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmax,
3210 int_aarch64_neon_vpmaxq, FMAXPvv_S_2S, FMAXPvv_D_2D>;
3212 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmin,
3213 int_aarch64_neon_vpminq, FMINPvv_S_2S, FMINPvv_D_2D>;
3215 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfmaxnm,
3216 int_aarch64_neon_vpfmaxnmq, FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
3218 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm,
3219 int_aarch64_neon_vpfminnmq, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
3223 //===----------------------------------------------------------------------===//
3224 // Non-Instruction Patterns
3225 //===----------------------------------------------------------------------===//
3227 // 64-bit vector bitcasts...
3229 def : Pat<(v1i64 (bitconvert (v8i8 VPR64:$src))), (v1i64 VPR64:$src)>;
3230 def : Pat<(v2f32 (bitconvert (v8i8 VPR64:$src))), (v2f32 VPR64:$src)>;
3231 def : Pat<(v2i32 (bitconvert (v8i8 VPR64:$src))), (v2i32 VPR64:$src)>;
3232 def : Pat<(v4i16 (bitconvert (v8i8 VPR64:$src))), (v4i16 VPR64:$src)>;
3234 def : Pat<(v1i64 (bitconvert (v4i16 VPR64:$src))), (v1i64 VPR64:$src)>;
3235 def : Pat<(v2i32 (bitconvert (v4i16 VPR64:$src))), (v2i32 VPR64:$src)>;
3236 def : Pat<(v2f32 (bitconvert (v4i16 VPR64:$src))), (v2f32 VPR64:$src)>;
3237 def : Pat<(v8i8 (bitconvert (v4i16 VPR64:$src))), (v8i8 VPR64:$src)>;
3239 def : Pat<(v1i64 (bitconvert (v2i32 VPR64:$src))), (v1i64 VPR64:$src)>;
3240 def : Pat<(v2f32 (bitconvert (v2i32 VPR64:$src))), (v2f32 VPR64:$src)>;
3241 def : Pat<(v4i16 (bitconvert (v2i32 VPR64:$src))), (v4i16 VPR64:$src)>;
3242 def : Pat<(v8i8 (bitconvert (v2i32 VPR64:$src))), (v8i8 VPR64:$src)>;
3244 def : Pat<(v1i64 (bitconvert (v2f32 VPR64:$src))), (v1i64 VPR64:$src)>;
3245 def : Pat<(v2i32 (bitconvert (v2f32 VPR64:$src))), (v2i32 VPR64:$src)>;
3246 def : Pat<(v4i16 (bitconvert (v2f32 VPR64:$src))), (v4i16 VPR64:$src)>;
3247 def : Pat<(v8i8 (bitconvert (v2f32 VPR64:$src))), (v8i8 VPR64:$src)>;
3249 def : Pat<(v2f32 (bitconvert (v1i64 VPR64:$src))), (v2f32 VPR64:$src)>;
3250 def : Pat<(v2i32 (bitconvert (v1i64 VPR64:$src))), (v2i32 VPR64:$src)>;
3251 def : Pat<(v4i16 (bitconvert (v1i64 VPR64:$src))), (v4i16 VPR64:$src)>;
3252 def : Pat<(v8i8 (bitconvert (v1i64 VPR64:$src))), (v8i8 VPR64:$src)>;
3254 // ..and 128-bit vector bitcasts...
3256 def : Pat<(v2f64 (bitconvert (v16i8 VPR128:$src))), (v2f64 VPR128:$src)>;
3257 def : Pat<(v2i64 (bitconvert (v16i8 VPR128:$src))), (v2i64 VPR128:$src)>;
3258 def : Pat<(v4f32 (bitconvert (v16i8 VPR128:$src))), (v4f32 VPR128:$src)>;
3259 def : Pat<(v4i32 (bitconvert (v16i8 VPR128:$src))), (v4i32 VPR128:$src)>;
3260 def : Pat<(v8i16 (bitconvert (v16i8 VPR128:$src))), (v8i16 VPR128:$src)>;
3262 def : Pat<(v2f64 (bitconvert (v8i16 VPR128:$src))), (v2f64 VPR128:$src)>;
3263 def : Pat<(v2i64 (bitconvert (v8i16 VPR128:$src))), (v2i64 VPR128:$src)>;
3264 def : Pat<(v4i32 (bitconvert (v8i16 VPR128:$src))), (v4i32 VPR128:$src)>;
3265 def : Pat<(v4f32 (bitconvert (v8i16 VPR128:$src))), (v4f32 VPR128:$src)>;
3266 def : Pat<(v16i8 (bitconvert (v8i16 VPR128:$src))), (v16i8 VPR128:$src)>;
3268 def : Pat<(v2f64 (bitconvert (v4i32 VPR128:$src))), (v2f64 VPR128:$src)>;
3269 def : Pat<(v2i64 (bitconvert (v4i32 VPR128:$src))), (v2i64 VPR128:$src)>;
3270 def : Pat<(v4f32 (bitconvert (v4i32 VPR128:$src))), (v4f32 VPR128:$src)>;
3271 def : Pat<(v8i16 (bitconvert (v4i32 VPR128:$src))), (v8i16 VPR128:$src)>;
3272 def : Pat<(v16i8 (bitconvert (v4i32 VPR128:$src))), (v16i8 VPR128:$src)>;
3274 def : Pat<(v2f64 (bitconvert (v4f32 VPR128:$src))), (v2f64 VPR128:$src)>;
3275 def : Pat<(v2i64 (bitconvert (v4f32 VPR128:$src))), (v2i64 VPR128:$src)>;
3276 def : Pat<(v4i32 (bitconvert (v4f32 VPR128:$src))), (v4i32 VPR128:$src)>;
3277 def : Pat<(v8i16 (bitconvert (v4f32 VPR128:$src))), (v8i16 VPR128:$src)>;
3278 def : Pat<(v16i8 (bitconvert (v4f32 VPR128:$src))), (v16i8 VPR128:$src)>;
3280 def : Pat<(v2f64 (bitconvert (v2i64 VPR128:$src))), (v2f64 VPR128:$src)>;
3281 def : Pat<(v4f32 (bitconvert (v2i64 VPR128:$src))), (v4f32 VPR128:$src)>;
3282 def : Pat<(v4i32 (bitconvert (v2i64 VPR128:$src))), (v4i32 VPR128:$src)>;
3283 def : Pat<(v8i16 (bitconvert (v2i64 VPR128:$src))), (v8i16 VPR128:$src)>;
3284 def : Pat<(v16i8 (bitconvert (v2i64 VPR128:$src))), (v16i8 VPR128:$src)>;
3286 def : Pat<(v2i64 (bitconvert (v2f64 VPR128:$src))), (v2i64 VPR128:$src)>;
3287 def : Pat<(v4f32 (bitconvert (v2f64 VPR128:$src))), (v4f32 VPR128:$src)>;
3288 def : Pat<(v4i32 (bitconvert (v2f64 VPR128:$src))), (v4i32 VPR128:$src)>;
3289 def : Pat<(v8i16 (bitconvert (v2f64 VPR128:$src))), (v8i16 VPR128:$src)>;
3290 def : Pat<(v16i8 (bitconvert (v2f64 VPR128:$src))), (v16i8 VPR128:$src)>;
3293 // ...and scalar bitcasts...
3294 def : Pat<(f16 (bitconvert (v1i16 FPR16:$src))), (f16 FPR16:$src)>;
3295 def : Pat<(f32 (bitconvert (v1i32 FPR32:$src))), (f32 FPR32:$src)>;
3296 def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
3297 def : Pat<(f32 (bitconvert (v1f32 FPR32:$src))), (f32 FPR32:$src)>;
3298 def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
3300 def : Pat<(i64 (bitconvert (v1i64 FPR64:$src))), (FMOVxd $src)>;
3301 def : Pat<(i32 (bitconvert (v1i32 FPR32:$src))), (FMOVws $src)>;
3303 def : Pat<(v8i8 (bitconvert (v1i64 VPR64:$src))), (v8i8 VPR64:$src)>;
3304 def : Pat<(v4i16 (bitconvert (v1i64 VPR64:$src))), (v4i16 VPR64:$src)>;
3305 def : Pat<(v2i32 (bitconvert (v1i64 VPR64:$src))), (v2i32 VPR64:$src)>;
3307 def : Pat<(f64 (bitconvert (v8i8 VPR64:$src))), (f64 VPR64:$src)>;
3308 def : Pat<(f64 (bitconvert (v4i16 VPR64:$src))), (f64 VPR64:$src)>;
3309 def : Pat<(f64 (bitconvert (v2i32 VPR64:$src))), (f64 VPR64:$src)>;
3310 def : Pat<(f64 (bitconvert (v2f32 VPR64:$src))), (f64 VPR64:$src)>;
3311 def : Pat<(f64 (bitconvert (v1i64 VPR64:$src))), (f64 VPR64:$src)>;
3313 def : Pat<(f128 (bitconvert (v16i8 VPR128:$src))), (f128 VPR128:$src)>;
3314 def : Pat<(f128 (bitconvert (v8i16 VPR128:$src))), (f128 VPR128:$src)>;
3315 def : Pat<(f128 (bitconvert (v4i32 VPR128:$src))), (f128 VPR128:$src)>;
3316 def : Pat<(f128 (bitconvert (v2i64 VPR128:$src))), (f128 VPR128:$src)>;
3317 def : Pat<(f128 (bitconvert (v4f32 VPR128:$src))), (f128 VPR128:$src)>;
3318 def : Pat<(f128 (bitconvert (v2f64 VPR128:$src))), (f128 VPR128:$src)>;
3320 def : Pat<(v1i16 (bitconvert (f16 FPR16:$src))), (v1i16 FPR16:$src)>;
3321 def : Pat<(v1i32 (bitconvert (f32 FPR32:$src))), (v1i32 FPR32:$src)>;
3322 def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
3323 def : Pat<(v1f32 (bitconvert (f32 FPR32:$src))), (v1f32 FPR32:$src)>;
3324 def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
3326 def : Pat<(v1i64 (bitconvert (i64 GPR64:$src))), (FMOVdx $src)>;
3327 def : Pat<(v1i32 (bitconvert (i32 GPR32:$src))), (FMOVsw $src)>;
3329 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
3330 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
3331 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
3332 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
3333 def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
3335 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>;
3336 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>;
3337 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>;
3338 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>;
3339 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>;
3340 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>;
3342 def neon_uimm0_bare : Operand<i64>,
3343 ImmLeaf<i64, [{return Imm == 0;}]> {
3344 let ParserMatchClass = neon_uimm0_asmoperand;
3345 let PrintMethod = "printNeonUImm8OperandBare";
3348 def neon_uimm1_bare : Operand<i64>,
3349 ImmLeaf<i64, [{(void)Imm; return true;}]> {
3350 let ParserMatchClass = neon_uimm1_asmoperand;
3351 let PrintMethod = "printNeonUImm8OperandBare";
3354 def neon_uimm2_bare : Operand<i64>,
3355 ImmLeaf<i64, [{(void)Imm; return true;}]> {
3356 let ParserMatchClass = neon_uimm2_asmoperand;
3357 let PrintMethod = "printNeonUImm8OperandBare";
3360 def neon_uimm3_bare : Operand<i64>,
3361 ImmLeaf<i64, [{(void)Imm; return true;}]> {
3362 let ParserMatchClass = uimm3_asmoperand;
3363 let PrintMethod = "printNeonUImm8OperandBare";
3366 def neon_uimm4_bare : Operand<i64>,
3367 ImmLeaf<i64, [{(void)Imm; return true;}]> {
3368 let ParserMatchClass = uimm4_asmoperand;
3369 let PrintMethod = "printNeonUImm8OperandBare";
3372 class NeonI_INS_main<string asmop, string Res, ValueType ResTy,
3373 RegisterClass OpGPR, ValueType OpTy, Operand OpImm>
3374 : NeonI_copy<0b1, 0b0, 0b0011,
3375 (outs VPR128:$Rd), (ins VPR128:$src, OpGPR:$Rn, OpImm:$Imm),
3376 asmop # "\t$Rd." # Res # "[$Imm], $Rn",
3377 [(set (ResTy VPR128:$Rd),
3378 (ResTy (vector_insert
3379 (ResTy VPR128:$src),
3384 let Constraints = "$src = $Rd";
3387 // The followings are for instruction class (3V Elem)
3391 class NI_2VE<bit q, bit u, bits<2> size, bits<4> opcode,
3392 string asmop, string ResS, string OpS, string EleOpS,
3393 Operand OpImm, RegisterOperand ResVPR,
3394 RegisterOperand OpVPR, RegisterOperand EleOpVPR>
3395 : NeonI_2VElem<q, u, size, opcode,
3396 (outs ResVPR:$Rd), (ins ResVPR:$src, OpVPR:$Rn,
3397 EleOpVPR:$Re, OpImm:$Index),
3398 asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
3399 ", $Re." # EleOpS # "[$Index]",
3405 let Constraints = "$src = $Rd";
3408 multiclass NI_2VE_v1<bit u, bits<4> opcode, string asmop>
3410 // vector register class for element is always 128-bit to cover the max index
3411 def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
3412 neon_uimm2_bare, VPR64, VPR64, VPR128> {
3413 let Inst{11} = {Index{1}};
3414 let Inst{21} = {Index{0}};
3415 let Inst{20-16} = Re;
3418 def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
3419 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3420 let Inst{11} = {Index{1}};
3421 let Inst{21} = {Index{0}};
3422 let Inst{20-16} = Re;
3425 // Index operations on 16-bit(H) elements are restricted to using v0-v15.
3426 def _4h8h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
3427 neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
3428 let Inst{11} = {Index{2}};
3429 let Inst{21} = {Index{1}};
3430 let Inst{20} = {Index{0}};
3431 let Inst{19-16} = Re{3-0};
3434 def _8h8h : NI_2VE<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
3435 neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
3436 let Inst{11} = {Index{2}};
3437 let Inst{21} = {Index{1}};
3438 let Inst{20} = {Index{0}};
3439 let Inst{19-16} = Re{3-0};
3443 defm MLAvve : NI_2VE_v1<0b1, 0b0000, "mla">;
3444 defm MLSvve : NI_2VE_v1<0b1, 0b0100, "mls">;
3446 // Pattern for lane in 128-bit vector
3447 class NI_2VE_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
3448 RegisterOperand ResVPR, RegisterOperand OpVPR,
3449 RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
3450 ValueType EleOpTy, SDPatternOperator coreop>
3451 : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
3452 (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3453 (INST ResVPR:$src, OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
3455 // Pattern for lane in 64-bit vector
3456 class NI_2VE_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
3457 RegisterOperand ResVPR, RegisterOperand OpVPR,
3458 RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
3459 ValueType EleOpTy, SDPatternOperator coreop>
3460 : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
3461 (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3462 (INST ResVPR:$src, OpVPR:$Rn,
3463 (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
3465 multiclass NI_2VE_v1_pat<string subop, SDPatternOperator op>
3467 def : NI_2VE_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
3468 op, VPR64, VPR64, VPR128, v2i32, v2i32, v4i32,
3469 BinOpFrag<(Neon_vduplane
3470 (Neon_low4S node:$LHS), node:$RHS)>>;
3472 def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
3473 op, VPR128, VPR128, VPR128, v4i32, v4i32, v4i32,
3474 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3476 def : NI_2VE_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
3477 op, VPR64, VPR64, VPR128Lo, v4i16, v4i16, v8i16,
3478 BinOpFrag<(Neon_vduplane
3479 (Neon_low8H node:$LHS), node:$RHS)>>;
3481 def : NI_2VE_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
3482 op, VPR128, VPR128, VPR128Lo, v8i16, v8i16, v8i16,
3483 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3485 // Index can only be half of the max value for lane in 64-bit vector
3487 def : NI_2VE_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
3488 op, VPR64, VPR64, VPR64, v2i32, v2i32, v2i32,
3489 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3491 def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
3492 op, VPR128, VPR128, VPR64, v4i32, v4i32, v2i32,
3493 BinOpFrag<(Neon_vduplane
3494 (Neon_combine_4S node:$LHS, undef),
3497 def : NI_2VE_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
3498 op, VPR64, VPR64, VPR64Lo, v4i16, v4i16, v4i16,
3499 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3501 def : NI_2VE_lane<!cast<Instruction>(subop # "_8h8h"), neon_uimm2_bare,
3502 op, VPR128, VPR128, VPR64Lo, v8i16, v8i16, v4i16,
3503 BinOpFrag<(Neon_vduplane
3504 (Neon_combine_8H node:$LHS, undef),
3508 defm MLA_lane_v1 : NI_2VE_v1_pat<"MLAvve", Neon_mla>;
3509 defm MLS_lane_v1 : NI_2VE_v1_pat<"MLSvve", Neon_mls>;
3511 class NI_2VE_2op<bit q, bit u, bits<2> size, bits<4> opcode,
3512 string asmop, string ResS, string OpS, string EleOpS,
3513 Operand OpImm, RegisterOperand ResVPR,
3514 RegisterOperand OpVPR, RegisterOperand EleOpVPR>
3515 : NeonI_2VElem<q, u, size, opcode,
3516 (outs ResVPR:$Rd), (ins OpVPR:$Rn,
3517 EleOpVPR:$Re, OpImm:$Index),
3518 asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
3519 ", $Re." # EleOpS # "[$Index]",
3526 multiclass NI_2VE_v1_2op<bit u, bits<4> opcode, string asmop>
3528 // vector register class for element is always 128-bit to cover the max index
3529 def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
3530 neon_uimm2_bare, VPR64, VPR64, VPR128> {
3531 let Inst{11} = {Index{1}};
3532 let Inst{21} = {Index{0}};
3533 let Inst{20-16} = Re;
3536 def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
3537 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3538 let Inst{11} = {Index{1}};
3539 let Inst{21} = {Index{0}};
3540 let Inst{20-16} = Re;
3543 // Index operations on 16-bit(H) elements are restricted to using v0-v15.
3544 def _4h8h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
3545 neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
3546 let Inst{11} = {Index{2}};
3547 let Inst{21} = {Index{1}};
3548 let Inst{20} = {Index{0}};
3549 let Inst{19-16} = Re{3-0};
3552 def _8h8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
3553 neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
3554 let Inst{11} = {Index{2}};
3555 let Inst{21} = {Index{1}};
3556 let Inst{20} = {Index{0}};
3557 let Inst{19-16} = Re{3-0};
3561 defm MULve : NI_2VE_v1_2op<0b0, 0b1000, "mul">;
3562 defm SQDMULHve : NI_2VE_v1_2op<0b0, 0b1100, "sqdmulh">;
3563 defm SQRDMULHve : NI_2VE_v1_2op<0b0, 0b1101, "sqrdmulh">;
3565 // Pattern for lane in 128-bit vector
3566 class NI_2VE_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
3567 RegisterOperand OpVPR, RegisterOperand EleOpVPR,
3568 ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
3569 SDPatternOperator coreop>
3570 : Pat<(ResTy (op (OpTy OpVPR:$Rn),
3571 (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3572 (INST OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
3574 // Pattern for lane in 64-bit vector
3575 class NI_2VE_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
3576 RegisterOperand OpVPR, RegisterOperand EleOpVPR,
3577 ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
3578 SDPatternOperator coreop>
3579 : Pat<(ResTy (op (OpTy OpVPR:$Rn),
3580 (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3582 (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
3584 multiclass NI_2VE_mul_v1_pat<string subop, SDPatternOperator op>
3586 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
3587 op, VPR64, VPR128, v2i32, v2i32, v4i32,
3588 BinOpFrag<(Neon_vduplane
3589 (Neon_low4S node:$LHS), node:$RHS)>>;
3591 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
3592 op, VPR128, VPR128, v4i32, v4i32, v4i32,
3593 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3595 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
3596 op, VPR64, VPR128Lo, v4i16, v4i16, v8i16,
3597 BinOpFrag<(Neon_vduplane
3598 (Neon_low8H node:$LHS), node:$RHS)>>;
3600 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
3601 op, VPR128, VPR128Lo, v8i16, v8i16, v8i16,
3602 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3604 // Index can only be half of the max value for lane in 64-bit vector
3606 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
3607 op, VPR64, VPR64, v2i32, v2i32, v2i32,
3608 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3610 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
3611 op, VPR128, VPR64, v4i32, v4i32, v2i32,
3612 BinOpFrag<(Neon_vduplane
3613 (Neon_combine_4S node:$LHS, undef),
3616 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
3617 op, VPR64, VPR64Lo, v4i16, v4i16, v4i16,
3618 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3620 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_8h8h"), neon_uimm2_bare,
3621 op, VPR128, VPR64Lo, v8i16, v8i16, v4i16,
3622 BinOpFrag<(Neon_vduplane
3623 (Neon_combine_8H node:$LHS, undef),
3627 defm MUL_lane_v1 : NI_2VE_mul_v1_pat<"MULve", mul>;
3628 defm SQDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQDMULHve", int_arm_neon_vqdmulh>;
3629 defm SQRDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQRDMULHve", int_arm_neon_vqrdmulh>;
3633 multiclass NI_2VE_v2_2op<bit u, bits<4> opcode, string asmop>
3635 // vector register class for element is always 128-bit to cover the max index
3636 def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
3637 neon_uimm2_bare, VPR64, VPR64, VPR128> {
3638 let Inst{11} = {Index{1}};
3639 let Inst{21} = {Index{0}};
3640 let Inst{20-16} = Re;
3643 def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
3644 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3645 let Inst{11} = {Index{1}};
3646 let Inst{21} = {Index{0}};
3647 let Inst{20-16} = Re;
3650 // _1d2d doesn't exist!
3652 def _2d2d : NI_2VE_2op<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
3653 neon_uimm1_bare, VPR128, VPR128, VPR128> {
3654 let Inst{11} = {Index{0}};
3656 let Inst{20-16} = Re;
3660 defm FMULve : NI_2VE_v2_2op<0b0, 0b1001, "fmul">;
3661 defm FMULXve : NI_2VE_v2_2op<0b1, 0b1001, "fmulx">;
3663 class NI_2VE_mul_lane_2d<Instruction INST, Operand OpImm, SDPatternOperator op,
3664 RegisterOperand OpVPR, RegisterOperand EleOpVPR,
3665 ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
3666 SDPatternOperator coreop>
3667 : Pat<(ResTy (op (OpTy OpVPR:$Rn),
3668 (OpTy (coreop (EleOpTy EleOpVPR:$Re), (EleOpTy EleOpVPR:$Re))))),
3670 (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), 0)>;
3672 multiclass NI_2VE_mul_v2_pat<string subop, SDPatternOperator op>
3674 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
3675 op, VPR64, VPR128, v2f32, v2f32, v4f32,
3676 BinOpFrag<(Neon_vduplane
3677 (Neon_low4f node:$LHS), node:$RHS)>>;
3679 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
3680 op, VPR128, VPR128, v4f32, v4f32, v4f32,
3681 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3683 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
3684 op, VPR128, VPR128, v2f64, v2f64, v2f64,
3685 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3687 // Index can only be half of the max value for lane in 64-bit vector
3689 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
3690 op, VPR64, VPR64, v2f32, v2f32, v2f32,
3691 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3693 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
3694 op, VPR128, VPR64, v4f32, v4f32, v2f32,
3695 BinOpFrag<(Neon_vduplane
3696 (Neon_combine_4f node:$LHS, undef),
3699 def : NI_2VE_mul_lane_2d<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
3700 op, VPR128, VPR64, v2f64, v2f64, v1f64,
3701 BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
3704 defm FMUL_lane_v2 : NI_2VE_mul_v2_pat<"FMULve", fmul>;
3705 defm FMULX_lane_v2 : NI_2VE_mul_v2_pat<"FMULXve", int_aarch64_neon_vmulx>;
3707 // The followings are patterns using fma
3708 // -ffp-contract=fast generates fma
3710 multiclass NI_2VE_v2<bit u, bits<4> opcode, string asmop>
3712 // vector register class for element is always 128-bit to cover the max index
3713 def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
3714 neon_uimm2_bare, VPR64, VPR64, VPR128> {
3715 let Inst{11} = {Index{1}};
3716 let Inst{21} = {Index{0}};
3717 let Inst{20-16} = Re;
3720 def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
3721 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3722 let Inst{11} = {Index{1}};
3723 let Inst{21} = {Index{0}};
3724 let Inst{20-16} = Re;
3727 // _1d2d doesn't exist!
3729 def _2d2d : NI_2VE<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
3730 neon_uimm1_bare, VPR128, VPR128, VPR128> {
3731 let Inst{11} = {Index{0}};
3733 let Inst{20-16} = Re;
3737 defm FMLAvve : NI_2VE_v2<0b0, 0b0001, "fmla">;
3738 defm FMLSvve : NI_2VE_v2<0b0, 0b0101, "fmls">;
3740 // Pattern for lane in 128-bit vector
3741 class NI_2VEswap_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
3742 RegisterOperand ResVPR, RegisterOperand OpVPR,
3743 ValueType ResTy, ValueType OpTy,
3744 SDPatternOperator coreop>
3745 : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
3746 (ResTy ResVPR:$src), (ResTy ResVPR:$Rn))),
3747 (INST ResVPR:$src, ResVPR:$Rn, OpVPR:$Re, OpImm:$Index)>;
3749 // Pattern for lane in 64-bit vector
3750 class NI_2VEswap_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
3751 RegisterOperand ResVPR, RegisterOperand OpVPR,
3752 ValueType ResTy, ValueType OpTy,
3753 SDPatternOperator coreop>
3754 : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
3755 (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
3756 (INST ResVPR:$src, ResVPR:$Rn,
3757 (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), OpImm:$Index)>;
3759 // Pattern for lane in 64-bit vector
3760 class NI_2VEswap_lane_2d2d<Instruction INST, Operand OpImm,
3761 SDPatternOperator op,
3762 RegisterOperand ResVPR, RegisterOperand OpVPR,
3763 ValueType ResTy, ValueType OpTy,
3764 SDPatternOperator coreop>
3765 : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (OpTy OpVPR:$Re))),
3766 (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
3767 (INST ResVPR:$src, ResVPR:$Rn,
3768 (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), 0)>;
3771 multiclass NI_2VE_fma_v2_pat<string subop, SDPatternOperator op>
3773 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
3774 neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
3775 BinOpFrag<(Neon_vduplane
3776 (Neon_low4f node:$LHS), node:$RHS)>>;
3778 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
3779 neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
3780 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3782 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
3783 neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
3784 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3786 // Index can only be half of the max value for lane in 64-bit vector
3788 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
3789 neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
3790 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
3792 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
3793 neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
3794 BinOpFrag<(Neon_vduplane
3795 (Neon_combine_4f node:$LHS, undef),
3798 def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
3799 neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
3800 BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
3803 defm FMLA_lane_v2_s : NI_2VE_fma_v2_pat<"FMLAvve", fma>;
3805 multiclass NI_2VE_fms_v2_pat<string subop, SDPatternOperator op>
3807 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
3808 neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
3809 BinOpFrag<(fneg (Neon_vduplane
3810 (Neon_low4f node:$LHS), node:$RHS))>>;
3812 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
3813 neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
3814 BinOpFrag<(Neon_vduplane
3815 (Neon_low4f (fneg node:$LHS)),
3818 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
3819 neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
3820 BinOpFrag<(fneg (Neon_vduplane
3821 node:$LHS, node:$RHS))>>;
3823 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
3824 neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
3825 BinOpFrag<(Neon_vduplane
3826 (fneg node:$LHS), node:$RHS)>>;
3828 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
3829 neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
3830 BinOpFrag<(fneg (Neon_vduplane
3831 node:$LHS, node:$RHS))>>;
3833 def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
3834 neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
3835 BinOpFrag<(Neon_vduplane
3836 (fneg node:$LHS), node:$RHS)>>;
3838 // Index can only be half of the max value for lane in 64-bit vector
3840 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
3841 neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
3842 BinOpFrag<(fneg (Neon_vduplane
3843 node:$LHS, node:$RHS))>>;
3845 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
3846 neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
3847 BinOpFrag<(Neon_vduplane
3848 (fneg node:$LHS), node:$RHS)>>;
3850 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
3851 neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
3852 BinOpFrag<(fneg (Neon_vduplane
3853 (Neon_combine_4f node:$LHS, undef),
3856 def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
3857 neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
3858 BinOpFrag<(Neon_vduplane
3859 (Neon_combine_4f (fneg node:$LHS), undef),
3862 def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
3863 neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
3864 BinOpFrag<(fneg (Neon_combine_2d
3865 node:$LHS, node:$RHS))>>;
3867 def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
3868 neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
3869 BinOpFrag<(Neon_combine_2d
3870 (fneg node:$LHS), (fneg node:$RHS))>>;
3873 defm FMLS_lane_v2_s : NI_2VE_fms_v2_pat<"FMLSvve", fma>;
3875 // Variant 3: Long type
3876 // E.g. SMLAL : 4S/4H/H (v0-v15), 2D/2S/S
3877 // SMLAL2: 4S/8H/H (v0-v15), 2D/4S/S
3879 multiclass NI_2VE_v3<bit u, bits<4> opcode, string asmop>
3881 // vector register class for element is always 128-bit to cover the max index
3882 def _2d2s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
3883 neon_uimm2_bare, VPR128, VPR64, VPR128> {
3884 let Inst{11} = {Index{1}};
3885 let Inst{21} = {Index{0}};
3886 let Inst{20-16} = Re;
3889 def _2d4s : NI_2VE<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
3890 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3891 let Inst{11} = {Index{1}};
3892 let Inst{21} = {Index{0}};
3893 let Inst{20-16} = Re;
3896 // Index operations on 16-bit(H) elements are restricted to using v0-v15.
3897 def _4s8h : NI_2VE<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
3898 neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
3899 let Inst{11} = {Index{2}};
3900 let Inst{21} = {Index{1}};
3901 let Inst{20} = {Index{0}};
3902 let Inst{19-16} = Re{3-0};
3905 def _4s4h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
3906 neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
3907 let Inst{11} = {Index{2}};
3908 let Inst{21} = {Index{1}};
3909 let Inst{20} = {Index{0}};
3910 let Inst{19-16} = Re{3-0};
3914 defm SMLALvve : NI_2VE_v3<0b0, 0b0010, "smlal">;
3915 defm UMLALvve : NI_2VE_v3<0b1, 0b0010, "umlal">;
3916 defm SMLSLvve : NI_2VE_v3<0b0, 0b0110, "smlsl">;
3917 defm UMLSLvve : NI_2VE_v3<0b1, 0b0110, "umlsl">;
3918 defm SQDMLALvve : NI_2VE_v3<0b0, 0b0011, "sqdmlal">;
3919 defm SQDMLSLvve : NI_2VE_v3<0b0, 0b0111, "sqdmlsl">;
3921 multiclass NI_2VE_v3_2op<bit u, bits<4> opcode, string asmop>
3923 // vector register class for element is always 128-bit to cover the max index
3924 def _2d2s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
3925 neon_uimm2_bare, VPR128, VPR64, VPR128> {
3926 let Inst{11} = {Index{1}};
3927 let Inst{21} = {Index{0}};
3928 let Inst{20-16} = Re;
3931 def _2d4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
3932 neon_uimm2_bare, VPR128, VPR128, VPR128> {
3933 let Inst{11} = {Index{1}};
3934 let Inst{21} = {Index{0}};
3935 let Inst{20-16} = Re;
3938 // Index operations on 16-bit(H) elements are restricted to using v0-v15.
3939 def _4s8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
3940 neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
3941 let Inst{11} = {Index{2}};
3942 let Inst{21} = {Index{1}};
3943 let Inst{20} = {Index{0}};
3944 let Inst{19-16} = Re{3-0};
3947 def _4s4h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
3948 neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
3949 let Inst{11} = {Index{2}};
3950 let Inst{21} = {Index{1}};
3951 let Inst{20} = {Index{0}};
3952 let Inst{19-16} = Re{3-0};
3956 defm SMULLve : NI_2VE_v3_2op<0b0, 0b1010, "smull">;
3957 defm UMULLve : NI_2VE_v3_2op<0b1, 0b1010, "umull">;
3958 defm SQDMULLve : NI_2VE_v3_2op<0b0, 0b1011, "sqdmull">;
3960 // Pattern for lane in 128-bit vector
3961 class NI_2VEL2_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
3962 RegisterOperand EleOpVPR, ValueType ResTy,
3963 ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
3964 SDPatternOperator hiop, SDPatternOperator coreop>
3965 : Pat<(ResTy (op (ResTy VPR128:$src),
3966 (HalfOpTy (hiop (OpTy VPR128:$Rn))),
3967 (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3968 (INST VPR128:$src, VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
3970 // Pattern for lane in 64-bit vector
3971 class NI_2VEL2_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
3972 RegisterOperand EleOpVPR, ValueType ResTy,
3973 ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
3974 SDPatternOperator hiop, SDPatternOperator coreop>
3975 : Pat<(ResTy (op (ResTy VPR128:$src),
3976 (HalfOpTy (hiop (OpTy VPR128:$Rn))),
3977 (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
3978 (INST VPR128:$src, VPR128:$Rn,
3979 (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
3981 multiclass NI_2VEL_v3_pat<string subop, SDPatternOperator op>
3983 def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
3984 op, VPR128, VPR64, VPR128Lo, v4i32, v4i16, v8i16,
3985 BinOpFrag<(Neon_vduplane
3986 (Neon_low8H node:$LHS), node:$RHS)>>;
3988 def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
3989 op, VPR128, VPR64, VPR128, v2i64, v2i32, v4i32,
3990 BinOpFrag<(Neon_vduplane
3991 (Neon_low4S node:$LHS), node:$RHS)>>;
3993 def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
3994 op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H,
3995 BinOpFrag<(Neon_vduplane
3996 (Neon_low8H node:$LHS), node:$RHS)>>;
3998 def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
3999 op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S,
4000 BinOpFrag<(Neon_vduplane
4001 (Neon_low4S node:$LHS), node:$RHS)>>;
4003 // Index can only be half of the max value for lane in 64-bit vector
4005 def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
4006 op, VPR128, VPR64, VPR64Lo, v4i32, v4i16, v4i16,
4007 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4009 def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
4010 op, VPR128, VPR64, VPR64, v2i64, v2i32, v2i32,
4011 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4013 def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
4014 op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H,
4015 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4017 def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
4018 op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S,
4019 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4022 defm SMLAL_lane_v3 : NI_2VEL_v3_pat<"SMLALvve", Neon_smlal>;
4023 defm UMLAL_lane_v3 : NI_2VEL_v3_pat<"UMLALvve", Neon_umlal>;
4024 defm SMLSL_lane_v3 : NI_2VEL_v3_pat<"SMLSLvve", Neon_smlsl>;
4025 defm UMLSL_lane_v3 : NI_2VEL_v3_pat<"UMLSLvve", Neon_umlsl>;
4027 // Pattern for lane in 128-bit vector
4028 class NI_2VEL2_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
4029 RegisterOperand EleOpVPR, ValueType ResTy,
4030 ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
4031 SDPatternOperator hiop, SDPatternOperator coreop>
4033 (HalfOpTy (hiop (OpTy VPR128:$Rn))),
4034 (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
4035 (INST VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
4037 // Pattern for lane in 64-bit vector
4038 class NI_2VEL2_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
4039 RegisterOperand EleOpVPR, ValueType ResTy,
4040 ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
4041 SDPatternOperator hiop, SDPatternOperator coreop>
4043 (HalfOpTy (hiop (OpTy VPR128:$Rn))),
4044 (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
4046 (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
4048 multiclass NI_2VEL_mul_v3_pat<string subop, SDPatternOperator op>
4050 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
4051 op, VPR64, VPR128Lo, v4i32, v4i16, v8i16,
4052 BinOpFrag<(Neon_vduplane
4053 (Neon_low8H node:$LHS), node:$RHS)>>;
4055 def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
4056 op, VPR64, VPR128, v2i64, v2i32, v4i32,
4057 BinOpFrag<(Neon_vduplane
4058 (Neon_low4S node:$LHS), node:$RHS)>>;
4060 def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
4061 op, VPR128Lo, v4i32, v8i16, v8i16, v4i16,
4063 BinOpFrag<(Neon_vduplane
4064 (Neon_low8H node:$LHS), node:$RHS)>>;
4066 def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
4067 op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S,
4068 BinOpFrag<(Neon_vduplane
4069 (Neon_low4S node:$LHS), node:$RHS)>>;
4071 // Index can only be half of the max value for lane in 64-bit vector
4073 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
4074 op, VPR64, VPR64Lo, v4i32, v4i16, v4i16,
4075 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4077 def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
4078 op, VPR64, VPR64, v2i64, v2i32, v2i32,
4079 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4081 def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
4082 op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H,
4083 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4085 def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
4086 op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S,
4087 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4090 defm SMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SMULLve", int_arm_neon_vmulls>;
4091 defm UMULL_lane_v3 : NI_2VEL_mul_v3_pat<"UMULLve", int_arm_neon_vmullu>;
4092 defm SQDMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SQDMULLve", int_arm_neon_vqdmull>;
4094 multiclass NI_qdma<SDPatternOperator op>
4096 def _4s : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
4098 (v4i32 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
4100 def _2d : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
4102 (v2i64 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
4105 defm Neon_qdmlal : NI_qdma<int_arm_neon_vqadds>;
4106 defm Neon_qdmlsl : NI_qdma<int_arm_neon_vqsubs>;
4108 multiclass NI_2VEL_v3_qdma_pat<string subop, string op>
4110 def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
4111 !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR128Lo,
4112 v4i32, v4i16, v8i16,
4113 BinOpFrag<(Neon_vduplane
4114 (Neon_low8H node:$LHS), node:$RHS)>>;
4116 def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
4117 !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR128,
4118 v2i64, v2i32, v4i32,
4119 BinOpFrag<(Neon_vduplane
4120 (Neon_low4S node:$LHS), node:$RHS)>>;
4122 def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
4123 !cast<PatFrag>(op # "_4s"), VPR128Lo,
4124 v4i32, v8i16, v8i16, v4i16, Neon_High8H,
4125 BinOpFrag<(Neon_vduplane
4126 (Neon_low8H node:$LHS), node:$RHS)>>;
4128 def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
4129 !cast<PatFrag>(op # "_2d"), VPR128,
4130 v2i64, v4i32, v4i32, v2i32, Neon_High4S,
4131 BinOpFrag<(Neon_vduplane
4132 (Neon_low4S node:$LHS), node:$RHS)>>;
4134 // Index can only be half of the max value for lane in 64-bit vector
4136 def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
4137 !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR64Lo,
4138 v4i32, v4i16, v4i16,
4139 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4141 def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
4142 !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR64,
4143 v2i64, v2i32, v2i32,
4144 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4146 def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
4147 !cast<PatFrag>(op # "_4s"), VPR64Lo,
4148 v4i32, v8i16, v4i16, v4i16, Neon_High8H,
4149 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4151 def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
4152 !cast<PatFrag>(op # "_2d"), VPR64,
4153 v2i64, v4i32, v2i32, v2i32, Neon_High4S,
4154 BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
4157 defm SQDMLAL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLALvve", "Neon_qdmlal">;
4158 defm SQDMLSL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLSLvve", "Neon_qdmlsl">;
4160 // End of implementation for instruction class (3V Elem)
4162 //Insert element (vector, from main)
4163 def INSbw : NeonI_INS_main<"ins", "b", v16i8, GPR32, i32,
4165 let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
4167 def INShw : NeonI_INS_main<"ins", "h", v8i16, GPR32, i32,
4169 let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
4171 def INSsw : NeonI_INS_main<"ins", "s", v4i32, GPR32, i32,
4173 let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
4175 def INSdx : NeonI_INS_main<"ins", "d", v2i64, GPR64, i64,
4177 let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
4180 class Neon_INS_main_pattern <ValueType ResTy,ValueType ExtResTy,
4181 RegisterClass OpGPR, ValueType OpTy,
4182 Operand OpImm, Instruction INS>
4183 : Pat<(ResTy (vector_insert
4187 (ResTy (EXTRACT_SUBREG
4188 (ExtResTy (INS (ExtResTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
4189 OpGPR:$Rn, OpImm:$Imm)), sub_64))>;
4191 def INSbw_pattern : Neon_INS_main_pattern<v8i8, v16i8, GPR32, i32,
4192 neon_uimm3_bare, INSbw>;
4193 def INShw_pattern : Neon_INS_main_pattern<v4i16, v8i16, GPR32, i32,
4194 neon_uimm2_bare, INShw>;
4195 def INSsw_pattern : Neon_INS_main_pattern<v2i32, v4i32, GPR32, i32,
4196 neon_uimm1_bare, INSsw>;
4197 def INSdx_pattern : Neon_INS_main_pattern<v1i64, v2i64, GPR64, i64,
4198 neon_uimm0_bare, INSdx>;
4200 class NeonI_INS_element<string asmop, string Res, ValueType ResTy,
4201 Operand ResImm, ValueType MidTy>
4202 : NeonI_insert<0b1, 0b1,
4203 (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn,
4204 ResImm:$Immd, ResImm:$Immn),
4205 asmop # "\t$Rd." # Res # "[$Immd], $Rn." # Res # "[$Immn]",
4206 [(set (ResTy VPR128:$Rd),
4207 (ResTy (vector_insert
4208 (ResTy VPR128:$src),
4209 (MidTy (vector_extract
4214 let Constraints = "$src = $Rd";
4219 //Insert element (vector, from element)
4220 def INSELb : NeonI_INS_element<"ins", "b", v16i8, neon_uimm4_bare, i32> {
4221 let Inst{20-16} = {Immd{3}, Immd{2}, Immd{1}, Immd{0}, 0b1};
4222 let Inst{14-11} = {Immn{3}, Immn{2}, Immn{1}, Immn{0}};
4224 def INSELh : NeonI_INS_element<"ins", "h", v8i16, neon_uimm3_bare, i32> {
4225 let Inst{20-16} = {Immd{2}, Immd{1}, Immd{0}, 0b1, 0b0};
4226 let Inst{14-12} = {Immn{2}, Immn{1}, Immn{0}};
4227 // bit 11 is unspecified.
4229 def INSELs : NeonI_INS_element<"ins", "s", v4i32, neon_uimm2_bare, i32> {
4230 let Inst{20-16} = {Immd{1}, Immd{0}, 0b1, 0b0, 0b0};
4231 let Inst{14-13} = {Immn{1}, Immn{0}};
4232 // bits 11-12 are unspecified.
4234 def INSELd : NeonI_INS_element<"ins", "d", v2i64, neon_uimm1_bare, i64> {
4235 let Inst{20-16} = {Immd, 0b1, 0b0, 0b0, 0b0};
4236 let Inst{14} = Immn{0};
4237 // bits 11-13 are unspecified.
4240 multiclass Neon_INS_elt_pattern <ValueType NaTy, Operand NaImm,
4241 ValueType MidTy, ValueType StTy,
4242 Operand StImm, Instruction INS> {
4243 def : Pat<(NaTy (vector_insert
4245 (MidTy (vector_extract
4249 (NaTy (EXTRACT_SUBREG
4251 (StTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
4257 def : Pat<(StTy (vector_insert
4259 (MidTy (vector_extract
4265 (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4269 def : Pat<(NaTy (vector_insert
4271 (MidTy (vector_extract
4275 (NaTy (EXTRACT_SUBREG
4277 (StTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
4278 (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4284 defm INSb_pattern : Neon_INS_elt_pattern<v8i8, neon_uimm3_bare, i32,
4285 v16i8, neon_uimm4_bare, INSELb>;
4286 defm INSh_pattern : Neon_INS_elt_pattern<v4i16, neon_uimm2_bare, i32,
4287 v8i16, neon_uimm3_bare, INSELh>;
4288 defm INSs_pattern : Neon_INS_elt_pattern<v2i32, neon_uimm1_bare, i32,
4289 v4i32, neon_uimm2_bare, INSELs>;
4290 defm INSd_pattern : Neon_INS_elt_pattern<v1i64, neon_uimm0_bare, i64,
4291 v2i64, neon_uimm1_bare, INSELd>;
4293 class NeonI_SMOV<string asmop, string Res, bit Q,
4294 ValueType OpTy, ValueType eleTy,
4295 Operand OpImm, RegisterClass ResGPR, ValueType ResTy>
4296 : NeonI_copy<Q, 0b0, 0b0101,
4297 (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
4298 asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
4299 [(set (ResTy ResGPR:$Rd),
4301 (ResTy (vector_extract
4302 (OpTy VPR128:$Rn), (OpImm:$Imm))),
4308 //Signed integer move (main, from element)
4309 def SMOVwb : NeonI_SMOV<"smov", "b", 0b0, v16i8, i8, neon_uimm4_bare,
4311 let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
4313 def SMOVwh : NeonI_SMOV<"smov", "h", 0b0, v8i16, i16, neon_uimm3_bare,
4315 let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
4317 def SMOVxb : NeonI_SMOV<"smov", "b", 0b1, v16i8, i8, neon_uimm4_bare,
4319 let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
4321 def SMOVxh : NeonI_SMOV<"smov", "h", 0b1, v8i16, i16, neon_uimm3_bare,
4323 let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
4325 def SMOVxs : NeonI_SMOV<"smov", "s", 0b1, v4i32, i32, neon_uimm2_bare,
4327 let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
4330 multiclass Neon_SMOVx_pattern <ValueType StTy, ValueType NaTy,
4331 ValueType eleTy, Operand StImm, Operand NaImm,
4332 Instruction SMOVI> {
4333 def : Pat<(i64 (sext_inreg
4335 (i32 (vector_extract
4336 (StTy VPR128:$Rn), (StImm:$Imm))))),
4338 (SMOVI VPR128:$Rn, StImm:$Imm)>;
4340 def : Pat<(i64 (sext
4341 (i32 (vector_extract
4342 (StTy VPR128:$Rn), (StImm:$Imm))))),
4343 (SMOVI VPR128:$Rn, StImm:$Imm)>;
4345 def : Pat<(i64 (sext_inreg
4346 (i64 (vector_extract
4347 (NaTy VPR64:$Rn), (NaImm:$Imm))),
4349 (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4352 def : Pat<(i64 (sext_inreg
4354 (i32 (vector_extract
4355 (NaTy VPR64:$Rn), (NaImm:$Imm))))),
4357 (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4360 def : Pat<(i64 (sext
4361 (i32 (vector_extract
4362 (NaTy VPR64:$Rn), (NaImm:$Imm))))),
4363 (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4367 defm SMOVxb_pattern : Neon_SMOVx_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
4368 neon_uimm3_bare, SMOVxb>;
4369 defm SMOVxh_pattern : Neon_SMOVx_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
4370 neon_uimm2_bare, SMOVxh>;
4371 defm SMOVxs_pattern : Neon_SMOVx_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
4372 neon_uimm1_bare, SMOVxs>;
4374 class Neon_SMOVw_pattern <ValueType StTy, ValueType NaTy,
4375 ValueType eleTy, Operand StImm, Operand NaImm,
4377 : Pat<(i32 (sext_inreg
4378 (i32 (vector_extract
4379 (NaTy VPR64:$Rn), (NaImm:$Imm))),
4381 (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4384 def SMOVwb_pattern : Neon_SMOVw_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
4385 neon_uimm3_bare, SMOVwb>;
4386 def SMOVwh_pattern : Neon_SMOVw_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
4387 neon_uimm2_bare, SMOVwh>;
4390 class NeonI_UMOV<string asmop, string Res, bit Q,
4391 ValueType OpTy, Operand OpImm,
4392 RegisterClass ResGPR, ValueType ResTy>
4393 : NeonI_copy<Q, 0b0, 0b0111,
4394 (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
4395 asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
4396 [(set (ResTy ResGPR:$Rd),
4397 (ResTy (vector_extract
4398 (OpTy VPR128:$Rn), (OpImm:$Imm))))],
4403 //Unsigned integer move (main, from element)
4404 def UMOVwb : NeonI_UMOV<"umov", "b", 0b0, v16i8, neon_uimm4_bare,
4406 let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
4408 def UMOVwh : NeonI_UMOV<"umov", "h", 0b0, v8i16, neon_uimm3_bare,
4410 let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
4412 def UMOVws : NeonI_UMOV<"umov", "s", 0b0, v4i32, neon_uimm2_bare,
4414 let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
4416 def UMOVxd : NeonI_UMOV<"umov", "d", 0b1, v2i64, neon_uimm1_bare,
4418 let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
4421 class Neon_UMOV_pattern <ValueType StTy, ValueType NaTy, ValueType ResTy,
4422 Operand StImm, Operand NaImm,
4424 : Pat<(ResTy (vector_extract
4425 (NaTy VPR64:$Rn), NaImm:$Imm)),
4426 (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
4429 def UMOVwb_pattern : Neon_UMOV_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
4430 neon_uimm3_bare, UMOVwb>;
4431 def UMOVwh_pattern : Neon_UMOV_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
4432 neon_uimm2_bare, UMOVwh>;
4433 def UMOVws_pattern : Neon_UMOV_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
4434 neon_uimm1_bare, UMOVws>;
4437 (i32 (vector_extract
4438 (v16i8 VPR128:$Rn), (neon_uimm4_bare:$Imm))),
4440 (UMOVwb VPR128:$Rn, neon_uimm4_bare:$Imm)>;
4443 (i32 (vector_extract
4444 (v8i16 VPR128:$Rn), (neon_uimm3_bare:$Imm))),
4446 (UMOVwh VPR128:$Rn, neon_uimm3_bare:$Imm)>;
4448 def : Pat<(i64 (zext
4449 (i32 (vector_extract
4450 (v2i64 VPR128:$Rn), (neon_uimm1_bare:$Imm))))),
4451 (UMOVxd VPR128:$Rn, neon_uimm1_bare:$Imm)>;
4454 (i32 (vector_extract
4455 (v8i8 VPR64:$Rn), (neon_uimm3_bare:$Imm))),
4457 (UMOVwb (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
4458 neon_uimm3_bare:$Imm)>;
4461 (i32 (vector_extract
4462 (v4i16 VPR64:$Rn), (neon_uimm2_bare:$Imm))),
4464 (UMOVwh (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
4465 neon_uimm2_bare:$Imm)>;
4467 def : Pat<(i64 (zext
4468 (i32 (vector_extract
4469 (v1i64 VPR64:$Rn), (neon_uimm0_bare:$Imm))))),
4470 (UMOVxd (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
4471 neon_uimm0_bare:$Imm)>;
4473 // Additional copy patterns for scalar types
4474 def : Pat<(i32 (vector_extract (v1i8 FPR8:$Rn), (i64 0))),
4476 (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8)), (i64 0))>;
4478 def : Pat<(i32 (vector_extract (v1i16 FPR16:$Rn), (i64 0))),
4480 (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16)), (i64 0))>;
4482 def : Pat<(i32 (vector_extract (v1i32 FPR32:$Rn), (i64 0))),
4483 (FMOVws FPR32:$Rn)>;
4485 def : Pat<(i64 (vector_extract (v1i64 FPR64:$Rn), (i64 0))),
4486 (FMOVxd FPR64:$Rn)>;
4488 def : Pat<(f64 (vector_extract (v1f64 FPR64:$Rn), (i64 0))),
4491 def : Pat<(f32 (vector_extract (v1f32 FPR32:$Rn), (i64 0))),
4494 def : Pat<(v1i8 (scalar_to_vector GPR32:$Rn)),
4495 (v1i8 (EXTRACT_SUBREG (v16i8
4496 (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
4499 def : Pat<(v1i16 (scalar_to_vector GPR32:$Rn)),
4500 (v1i16 (EXTRACT_SUBREG (v8i16
4501 (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
4504 def : Pat<(v1i32 (scalar_to_vector GPR32:$src)),
4507 def : Pat<(v1i64 (scalar_to_vector GPR64:$src)),