1 //===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file describes the AArch64 NEON instruction set.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // NEON-specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
17 def Neon_bsl : SDNode<"AArch64ISD::NEON_BSL", SDTypeProfile<1, 3,
18 [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
19 SDTCisSameAs<0, 3>]>>;
21 // (outs Result), (ins Imm, OpCmode)
22 def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
24 def Neon_movi : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>;
26 def Neon_mvni : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>;
28 // (outs Result), (ins Imm)
29 def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1,
30 [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
32 // (outs Result), (ins LHS, RHS, CondCode)
33 def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3,
34 [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>;
36 // (outs Result), (ins LHS, 0/0.0 constant, CondCode)
37 def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3,
38 [SDTCisVec<0>, SDTCisVec<1>]>>;
40 // (outs Result), (ins LHS, RHS)
41 def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2,
42 [SDTCisVec<0>, SDTCisSameAs<1, 2>]>>;
44 def Neon_dupImm : SDNode<"AArch64ISD::NEON_DUPIMM", SDTypeProfile<1, 1,
45 [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
47 //===----------------------------------------------------------------------===//
49 //===----------------------------------------------------------------------===//
51 multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size, bits<5> opcode,
52 string asmop, SDPatternOperator opnode8B,
53 SDPatternOperator opnode16B,
56 let isCommutable = Commutable in {
57 def _8B : NeonI_3VSame<0b0, u, size, opcode,
58 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
59 asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
60 [(set (v8i8 VPR64:$Rd),
61 (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
64 def _16B : NeonI_3VSame<0b1, u, size, opcode,
65 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
66 asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
67 [(set (v16i8 VPR128:$Rd),
68 (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
74 multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode,
75 string asmop, SDPatternOperator opnode,
78 let isCommutable = Commutable in {
79 def _4H : NeonI_3VSame<0b0, u, 0b01, opcode,
80 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
81 asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h",
82 [(set (v4i16 VPR64:$Rd),
83 (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))],
86 def _8H : NeonI_3VSame<0b1, u, 0b01, opcode,
87 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
88 asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h",
89 [(set (v8i16 VPR128:$Rd),
90 (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))],
93 def _2S : NeonI_3VSame<0b0, u, 0b10, opcode,
94 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
95 asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
96 [(set (v2i32 VPR64:$Rd),
97 (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))],
100 def _4S : NeonI_3VSame<0b1, u, 0b10, opcode,
101 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
102 asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
103 [(set (v4i32 VPR128:$Rd),
104 (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))],
108 multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode,
109 string asmop, SDPatternOperator opnode,
111 : NeonI_3VSame_HS_sizes<u, opcode, asmop, opnode, Commutable>
113 let isCommutable = Commutable in {
114 def _8B : NeonI_3VSame<0b0, u, 0b00, opcode,
115 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
116 asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
117 [(set (v8i8 VPR64:$Rd),
118 (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
121 def _16B : NeonI_3VSame<0b1, u, 0b00, opcode,
122 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
123 asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
124 [(set (v16i8 VPR128:$Rd),
125 (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
130 multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode,
131 string asmop, SDPatternOperator opnode,
133 : NeonI_3VSame_BHS_sizes<u, opcode, asmop, opnode, Commutable>
135 let isCommutable = Commutable in {
136 def _2D : NeonI_3VSame<0b1, u, 0b11, opcode,
137 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
138 asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
139 [(set (v2i64 VPR128:$Rd),
140 (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))],
145 // Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types,
146 // but Result types can be integer or floating point types.
147 multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode,
148 string asmop, SDPatternOperator opnode2S,
149 SDPatternOperator opnode4S,
150 SDPatternOperator opnode2D,
151 ValueType ResTy2S, ValueType ResTy4S,
152 ValueType ResTy2D, bit Commutable = 0>
154 let isCommutable = Commutable in {
155 def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode,
156 (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
157 asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
158 [(set (ResTy2S VPR64:$Rd),
159 (ResTy2S (opnode2S (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))],
162 def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode,
163 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
164 asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
165 [(set (ResTy4S VPR128:$Rd),
166 (ResTy4S (opnode4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))],
169 def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode,
170 (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
171 asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
172 [(set (ResTy2D VPR128:$Rd),
173 (ResTy2D (opnode2D (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
178 //===----------------------------------------------------------------------===//
179 // Instruction Definitions
180 //===----------------------------------------------------------------------===//
182 // Vector Arithmetic Instructions
184 // Vector Add (Integer and Floating-Point)
186 defm ADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>;
187 defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd, fadd, fadd,
188 v2f32, v4f32, v2f64, 1>;
190 // Vector Sub (Integer and Floating-Point)
192 defm SUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>;
193 defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub, fsub, fsub,
194 v2f32, v4f32, v2f64, 0>;
196 // Vector Multiply (Integer and Floating-Point)
198 defm MULvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>;
199 defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul, fmul, fmul,
200 v2f32, v4f32, v2f64, 1>;
202 // Vector Multiply (Polynomial)
204 defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
205 int_arm_neon_vmulp, int_arm_neon_vmulp, 1>;
207 // Vector Multiply-accumulate and Multiply-subtract (Integer)
209 // class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
210 // two operands constraints.
211 class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
212 RegisterClass VPRC, ValueType OpTy, bit q, bit u, bits<2> size, bits<5> opcode,
213 SDPatternOperator opnode>
214 : NeonI_3VSame<q, u, size, opcode,
215 (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
216 asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
217 [(set (OpTy VPRC:$Rd),
218 (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))],
220 let Constraints = "$src = $Rd";
223 def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
224 (add node:$Ra, (mul node:$Rn, node:$Rm))>;
226 def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
227 (sub node:$Ra, (mul node:$Rn, node:$Rm))>;
230 def MLAvvv_8B: NeonI_3VSame_Constraint_impl<"mla", ".8b", VPR64, v8i8,
231 0b0, 0b0, 0b00, 0b10010, Neon_mla>;
232 def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8,
233 0b1, 0b0, 0b00, 0b10010, Neon_mla>;
234 def MLAvvv_4H: NeonI_3VSame_Constraint_impl<"mla", ".4h", VPR64, v4i16,
235 0b0, 0b0, 0b01, 0b10010, Neon_mla>;
236 def MLAvvv_8H: NeonI_3VSame_Constraint_impl<"mla", ".8h", VPR128, v8i16,
237 0b1, 0b0, 0b01, 0b10010, Neon_mla>;
238 def MLAvvv_2S: NeonI_3VSame_Constraint_impl<"mla", ".2s", VPR64, v2i32,
239 0b0, 0b0, 0b10, 0b10010, Neon_mla>;
240 def MLAvvv_4S: NeonI_3VSame_Constraint_impl<"mla", ".4s", VPR128, v4i32,
241 0b1, 0b0, 0b10, 0b10010, Neon_mla>;
243 def MLSvvv_8B: NeonI_3VSame_Constraint_impl<"mls", ".8b", VPR64, v8i8,
244 0b0, 0b1, 0b00, 0b10010, Neon_mls>;
245 def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8,
246 0b1, 0b1, 0b00, 0b10010, Neon_mls>;
247 def MLSvvv_4H: NeonI_3VSame_Constraint_impl<"mls", ".4h", VPR64, v4i16,
248 0b0, 0b1, 0b01, 0b10010, Neon_mls>;
249 def MLSvvv_8H: NeonI_3VSame_Constraint_impl<"mls", ".8h", VPR128, v8i16,
250 0b1, 0b1, 0b01, 0b10010, Neon_mls>;
251 def MLSvvv_2S: NeonI_3VSame_Constraint_impl<"mls", ".2s", VPR64, v2i32,
252 0b0, 0b1, 0b10, 0b10010, Neon_mls>;
253 def MLSvvv_4S: NeonI_3VSame_Constraint_impl<"mls", ".4s", VPR128, v4i32,
254 0b1, 0b1, 0b10, 0b10010, Neon_mls>;
256 // Vector Multiply-accumulate and Multiply-subtract (Floating Point)
258 def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
259 (fadd node:$Ra, (fmul node:$Rn, node:$Rm))>;
261 def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
262 (fsub node:$Ra, (fmul node:$Rn, node:$Rm))>;
264 let Predicates = [HasNEON, UseFusedMAC] in {
265 def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s", VPR64, v2f32,
266 0b0, 0b0, 0b00, 0b11001, Neon_fmla>;
267 def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s", VPR128, v4f32,
268 0b1, 0b0, 0b00, 0b11001, Neon_fmla>;
269 def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d", VPR128, v2f64,
270 0b1, 0b0, 0b01, 0b11001, Neon_fmla>;
272 def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s", VPR64, v2f32,
273 0b0, 0b0, 0b10, 0b11001, Neon_fmls>;
274 def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s", VPR128, v4f32,
275 0b1, 0b0, 0b10, 0b11001, Neon_fmls>;
276 def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d", VPR128, v2f64,
277 0b1, 0b0, 0b11, 0b11001, Neon_fmls>;
280 // We're also allowed to match the fma instruction regardless of compile
282 def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)),
283 (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
284 def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
285 (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
286 def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
287 (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
289 def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)),
290 (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
291 def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
292 (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
293 def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
294 (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
296 // Vector Divide (Floating-Point)
298 defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv, fdiv, fdiv,
299 v2f32, v4f32, v2f64, 0>;
301 // Vector Bitwise Operations
303 // Vector Bitwise AND
305 defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>;
307 // Vector Bitwise Exclusive OR
309 defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>;
313 defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
315 // ORR disassembled as MOV if Vn==Vm
317 // Vector Move - register
318 // Alias for ORR if Vn=Vm and it is the preferred syntax
319 def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
320 (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn)>;
321 def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
322 (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn)>;
324 def Neon_immAllOnes: PatLeaf<(Neon_movi (i32 timm), (i32 imm)), [{
325 ConstantSDNode *ImmConstVal = cast<ConstantSDNode>(N->getOperand(0));
326 ConstantSDNode *OpCmodeConstVal = cast<ConstantSDNode>(N->getOperand(1));
328 uint64_t EltVal = A64Imms::decodeNeonModImm(ImmConstVal->getZExtValue(),
329 OpCmodeConstVal->getZExtValue(), EltBits);
330 return (EltBits == 8 && EltVal == 0xff);
334 def Neon_not8B : PatFrag<(ops node:$in),
335 (xor node:$in, (bitconvert (v8i8 Neon_immAllOnes)))>;
336 def Neon_not16B : PatFrag<(ops node:$in),
337 (xor node:$in, (bitconvert (v16i8 Neon_immAllOnes)))>;
339 def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm),
340 (or node:$Rn, (Neon_not8B node:$Rm))>;
342 def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm),
343 (or node:$Rn, (Neon_not16B node:$Rm))>;
345 def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm),
346 (and node:$Rn, (Neon_not8B node:$Rm))>;
348 def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm),
349 (and node:$Rn, (Neon_not16B node:$Rm))>;
352 // Vector Bitwise OR NOT - register
354 defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn",
355 Neon_orn8B, Neon_orn16B, 0>;
357 // Vector Bitwise Bit Clear (AND NOT) - register
359 defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic",
360 Neon_bic8B, Neon_bic16B, 0>;
362 multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B,
363 SDPatternOperator opnode16B,
365 Instruction INST16B> {
366 def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)),
367 (INST8B VPR64:$Rn, VPR64:$Rm)>;
368 def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)),
369 (INST8B VPR64:$Rn, VPR64:$Rm)>;
370 def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)),
371 (INST8B VPR64:$Rn, VPR64:$Rm)>;
372 def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)),
373 (INST16B VPR128:$Rn, VPR128:$Rm)>;
374 def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)),
375 (INST16B VPR128:$Rn, VPR128:$Rm)>;
376 def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)),
377 (INST16B VPR128:$Rn, VPR128:$Rm)>;
380 // Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN
381 defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>;
382 defm : Neon_bitwise2V_patterns<or, or, ORRvvv_8B, ORRvvv_16B>;
383 defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>;
384 defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>;
385 defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>;
387 // Vector Bitwise Select
388 def BSLvvv_8B : NeonI_3VSame_Constraint_impl<"bsl", ".8b", VPR64, v8i8,
389 0b0, 0b1, 0b01, 0b00011, Neon_bsl>;
391 def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8,
392 0b1, 0b1, 0b01, 0b00011, Neon_bsl>;
394 multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode,
396 Instruction INST16B> {
397 // Disassociate type from instruction definition
398 def : Pat<(v2i32 (opnode VPR64:$src,VPR64:$Rn, VPR64:$Rm)),
399 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
400 def : Pat<(v4i16 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
401 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
402 def : Pat<(v1i64 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
403 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
404 def : Pat<(v4i32 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
405 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
406 def : Pat<(v8i16 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
407 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
408 def : Pat<(v2i64 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
409 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
411 // Allow to match BSL instruction pattern with non-constant operand
412 def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd),
413 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
414 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
415 def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd),
416 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
417 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
418 def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd),
419 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
420 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
421 def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd),
422 (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
423 (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
424 def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd),
425 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
426 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
427 def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd),
428 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
429 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
430 def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd),
431 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
432 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
433 def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd),
434 (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
435 (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
437 // Allow to match llvm.arm.* intrinsics.
438 def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src),
439 (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
440 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
441 def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src),
442 (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
443 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
444 def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src),
445 (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
446 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
447 def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src),
448 (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
449 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
450 def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src),
451 (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
452 (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
453 def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src),
454 (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
455 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
456 def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src),
457 (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
458 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
459 def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src),
460 (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
461 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
462 def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src),
463 (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
464 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
465 def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src),
466 (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
467 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
468 def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
469 (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
470 (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
473 // Additional patterns for bitwise instruction BSL
474 defm: Neon_bitwise3V_patterns<Neon_bsl, BSLvvv_8B, BSLvvv_16B>;
476 def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm),
477 (Neon_bsl node:$src, node:$Rn, node:$Rm),
478 [{ (void)N; return false; }]>;
480 // Vector Bitwise Insert if True
482 def BITvvv_8B : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64, v8i8,
483 0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
484 def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8,
485 0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
487 // Vector Bitwise Insert if False
489 def BIFvvv_8B : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64, v8i8,
490 0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
491 def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8,
492 0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
494 // Vector Absolute Difference and Accumulate (Signed, Unsigned)
496 def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
497 (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>;
498 def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
499 (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>;
501 // Vector Absolute Difference and Accumulate (Unsigned)
502 def UABAvvv_8B : NeonI_3VSame_Constraint_impl<"uaba", ".8b", VPR64, v8i8,
503 0b0, 0b1, 0b00, 0b01111, Neon_uaba>;
504 def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8,
505 0b1, 0b1, 0b00, 0b01111, Neon_uaba>;
506 def UABAvvv_4H : NeonI_3VSame_Constraint_impl<"uaba", ".4h", VPR64, v4i16,
507 0b0, 0b1, 0b01, 0b01111, Neon_uaba>;
508 def UABAvvv_8H : NeonI_3VSame_Constraint_impl<"uaba", ".8h", VPR128, v8i16,
509 0b1, 0b1, 0b01, 0b01111, Neon_uaba>;
510 def UABAvvv_2S : NeonI_3VSame_Constraint_impl<"uaba", ".2s", VPR64, v2i32,
511 0b0, 0b1, 0b10, 0b01111, Neon_uaba>;
512 def UABAvvv_4S : NeonI_3VSame_Constraint_impl<"uaba", ".4s", VPR128, v4i32,
513 0b1, 0b1, 0b10, 0b01111, Neon_uaba>;
515 // Vector Absolute Difference and Accumulate (Signed)
516 def SABAvvv_8B : NeonI_3VSame_Constraint_impl<"saba", ".8b", VPR64, v8i8,
517 0b0, 0b0, 0b00, 0b01111, Neon_saba>;
518 def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8,
519 0b1, 0b0, 0b00, 0b01111, Neon_saba>;
520 def SABAvvv_4H : NeonI_3VSame_Constraint_impl<"saba", ".4h", VPR64, v4i16,
521 0b0, 0b0, 0b01, 0b01111, Neon_saba>;
522 def SABAvvv_8H : NeonI_3VSame_Constraint_impl<"saba", ".8h", VPR128, v8i16,
523 0b1, 0b0, 0b01, 0b01111, Neon_saba>;
524 def SABAvvv_2S : NeonI_3VSame_Constraint_impl<"saba", ".2s", VPR64, v2i32,
525 0b0, 0b0, 0b10, 0b01111, Neon_saba>;
526 def SABAvvv_4S : NeonI_3VSame_Constraint_impl<"saba", ".4s", VPR128, v4i32,
527 0b1, 0b0, 0b10, 0b01111, Neon_saba>;
530 // Vector Absolute Difference (Signed, Unsigned)
531 defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>;
532 defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>;
534 // Vector Absolute Difference (Floating Point)
535 defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd",
536 int_arm_neon_vabds, int_arm_neon_vabds,
537 int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>;
539 // Vector Reciprocal Step (Floating Point)
540 defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps",
541 int_arm_neon_vrecps, int_arm_neon_vrecps,
543 v2f32, v4f32, v2f64, 0>;
545 // Vector Reciprocal Square Root Step (Floating Point)
546 defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts",
547 int_arm_neon_vrsqrts,
548 int_arm_neon_vrsqrts,
549 int_arm_neon_vrsqrts,
550 v2f32, v4f32, v2f64, 0>;
552 // Vector Comparisons
554 def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs),
555 (Neon_cmp node:$lhs, node:$rhs, SETEQ)>;
556 def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs),
557 (Neon_cmp node:$lhs, node:$rhs, SETUGE)>;
558 def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs),
559 (Neon_cmp node:$lhs, node:$rhs, SETGE)>;
560 def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs),
561 (Neon_cmp node:$lhs, node:$rhs, SETUGT)>;
562 def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
563 (Neon_cmp node:$lhs, node:$rhs, SETGT)>;
565 // NeonI_compare_aliases class: swaps register operands to implement
566 // comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
567 class NeonI_compare_aliases<string asmop, string asmlane,
568 Instruction inst, RegisterClass VPRC>
569 : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
571 (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
573 // Vector Comparisons (Integer)
575 // Vector Compare Mask Equal (Integer)
576 let isCommutable =1 in {
577 defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>;
580 // Vector Compare Mask Higher or Same (Unsigned Integer)
581 defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>;
583 // Vector Compare Mask Greater Than or Equal (Integer)
584 defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>;
586 // Vector Compare Mask Higher (Unsigned Integer)
587 defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>;
589 // Vector Compare Mask Greater Than (Integer)
590 defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>;
592 // Vector Compare Mask Bitwise Test (Integer)
593 defm CMTSTvvv: NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>;
595 // Vector Compare Mask Less or Same (Unsigned Integer)
596 // CMLS is alias for CMHS with operands reversed.
597 def CMLSvvv_8B : NeonI_compare_aliases<"cmls", ".8b", CMHSvvv_8B, VPR64>;
598 def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>;
599 def CMLSvvv_4H : NeonI_compare_aliases<"cmls", ".4h", CMHSvvv_4H, VPR64>;
600 def CMLSvvv_8H : NeonI_compare_aliases<"cmls", ".8h", CMHSvvv_8H, VPR128>;
601 def CMLSvvv_2S : NeonI_compare_aliases<"cmls", ".2s", CMHSvvv_2S, VPR64>;
602 def CMLSvvv_4S : NeonI_compare_aliases<"cmls", ".4s", CMHSvvv_4S, VPR128>;
603 def CMLSvvv_2D : NeonI_compare_aliases<"cmls", ".2d", CMHSvvv_2D, VPR128>;
605 // Vector Compare Mask Less Than or Equal (Integer)
606 // CMLE is alias for CMGE with operands reversed.
607 def CMLEvvv_8B : NeonI_compare_aliases<"cmle", ".8b", CMGEvvv_8B, VPR64>;
608 def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>;
609 def CMLEvvv_4H : NeonI_compare_aliases<"cmle", ".4h", CMGEvvv_4H, VPR64>;
610 def CMLEvvv_8H : NeonI_compare_aliases<"cmle", ".8h", CMGEvvv_8H, VPR128>;
611 def CMLEvvv_2S : NeonI_compare_aliases<"cmle", ".2s", CMGEvvv_2S, VPR64>;
612 def CMLEvvv_4S : NeonI_compare_aliases<"cmle", ".4s", CMGEvvv_4S, VPR128>;
613 def CMLEvvv_2D : NeonI_compare_aliases<"cmle", ".2d", CMGEvvv_2D, VPR128>;
615 // Vector Compare Mask Lower (Unsigned Integer)
616 // CMLO is alias for CMHI with operands reversed.
617 def CMLOvvv_8B : NeonI_compare_aliases<"cmlo", ".8b", CMHIvvv_8B, VPR64>;
618 def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>;
619 def CMLOvvv_4H : NeonI_compare_aliases<"cmlo", ".4h", CMHIvvv_4H, VPR64>;
620 def CMLOvvv_8H : NeonI_compare_aliases<"cmlo", ".8h", CMHIvvv_8H, VPR128>;
621 def CMLOvvv_2S : NeonI_compare_aliases<"cmlo", ".2s", CMHIvvv_2S, VPR64>;
622 def CMLOvvv_4S : NeonI_compare_aliases<"cmlo", ".4s", CMHIvvv_4S, VPR128>;
623 def CMLOvvv_2D : NeonI_compare_aliases<"cmlo", ".2d", CMHIvvv_2D, VPR128>;
625 // Vector Compare Mask Less Than (Integer)
626 // CMLT is alias for CMGT with operands reversed.
627 def CMLTvvv_8B : NeonI_compare_aliases<"cmlt", ".8b", CMGTvvv_8B, VPR64>;
628 def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>;
629 def CMLTvvv_4H : NeonI_compare_aliases<"cmlt", ".4h", CMGTvvv_4H, VPR64>;
630 def CMLTvvv_8H : NeonI_compare_aliases<"cmlt", ".8h", CMGTvvv_8H, VPR128>;
631 def CMLTvvv_2S : NeonI_compare_aliases<"cmlt", ".2s", CMGTvvv_2S, VPR64>;
632 def CMLTvvv_4S : NeonI_compare_aliases<"cmlt", ".4s", CMGTvvv_4S, VPR128>;
633 def CMLTvvv_2D : NeonI_compare_aliases<"cmlt", ".2d", CMGTvvv_2D, VPR128>;
636 def neon_uimm0_asmoperand : AsmOperandClass
639 let PredicateMethod = "isUImm<0>";
640 let RenderMethod = "addImmOperands";
643 def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> {
644 let ParserMatchClass = neon_uimm0_asmoperand;
645 let PrintMethod = "printNeonUImm0Operand";
649 multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC>
651 def _8B : NeonI_2VMisc<0b0, u, 0b00, opcode,
652 (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
653 asmop # "\t$Rd.8b, $Rn.8b, $Imm",
654 [(set (v8i8 VPR64:$Rd),
655 (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))],
658 def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode,
659 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
660 asmop # "\t$Rd.16b, $Rn.16b, $Imm",
661 [(set (v16i8 VPR128:$Rd),
662 (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))],
665 def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode,
666 (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
667 asmop # "\t$Rd.4h, $Rn.4h, $Imm",
668 [(set (v4i16 VPR64:$Rd),
669 (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))],
672 def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode,
673 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
674 asmop # "\t$Rd.8h, $Rn.8h, $Imm",
675 [(set (v8i16 VPR128:$Rd),
676 (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))],
679 def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode,
680 (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
681 asmop # "\t$Rd.2s, $Rn.2s, $Imm",
682 [(set (v2i32 VPR64:$Rd),
683 (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))],
686 def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode,
687 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
688 asmop # "\t$Rd.4s, $Rn.4s, $Imm",
689 [(set (v4i32 VPR128:$Rd),
690 (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))],
693 def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode,
694 (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
695 asmop # "\t$Rd.2d, $Rn.2d, $Imm",
696 [(set (v2i64 VPR128:$Rd),
697 (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))],
701 // Vector Compare Mask Equal to Zero (Integer)
702 defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>;
704 // Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
705 defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>;
707 // Vector Compare Mask Greater Than Zero (Signed Integer)
708 defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>;
710 // Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
711 defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>;
713 // Vector Compare Mask Less Than Zero (Signed Integer)
714 defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>;
716 // Vector Comparisons (Floating Point)
718 // Vector Compare Mask Equal (Floating Point)
719 let isCommutable =1 in {
720 defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq,
721 Neon_cmeq, Neon_cmeq,
722 v2i32, v4i32, v2i64, 0>;
725 // Vector Compare Mask Greater Than Or Equal (Floating Point)
726 defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge,
727 Neon_cmge, Neon_cmge,
728 v2i32, v4i32, v2i64, 0>;
730 // Vector Compare Mask Greater Than (Floating Point)
731 defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt,
732 Neon_cmgt, Neon_cmgt,
733 v2i32, v4i32, v2i64, 0>;
735 // Vector Compare Mask Less Than Or Equal (Floating Point)
736 // FCMLE is alias for FCMGE with operands reversed.
737 def FCMLEvvv_2S : NeonI_compare_aliases<"fcmle", ".2s", FCMGEvvv_2S, VPR64>;
738 def FCMLEvvv_4S : NeonI_compare_aliases<"fcmle", ".4s", FCMGEvvv_4S, VPR128>;
739 def FCMLEvvv_2D : NeonI_compare_aliases<"fcmle", ".2d", FCMGEvvv_2D, VPR128>;
741 // Vector Compare Mask Less Than (Floating Point)
742 // FCMLT is alias for FCMGT with operands reversed.
743 def FCMLTvvv_2S : NeonI_compare_aliases<"fcmlt", ".2s", FCMGTvvv_2S, VPR64>;
744 def FCMLTvvv_4S : NeonI_compare_aliases<"fcmlt", ".4s", FCMGTvvv_4S, VPR128>;
745 def FCMLTvvv_2D : NeonI_compare_aliases<"fcmlt", ".2d", FCMGTvvv_2D, VPR128>;
748 multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode,
749 string asmop, CondCode CC>
751 def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode,
752 (outs VPR64:$Rd), (ins VPR64:$Rn, fpz32:$FPImm),
753 asmop # "\t$Rd.2s, $Rn.2s, $FPImm",
754 [(set (v2i32 VPR64:$Rd),
755 (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpimm:$FPImm), CC)))],
758 def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode,
759 (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
760 asmop # "\t$Rd.4s, $Rn.4s, $FPImm",
761 [(set (v4i32 VPR128:$Rd),
762 (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
765 def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode,
766 (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
767 asmop # "\t$Rd.2d, $Rn.2d, $FPImm",
768 [(set (v2i64 VPR128:$Rd),
769 (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
773 // Vector Compare Mask Equal to Zero (Floating Point)
774 defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>;
776 // Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
777 defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>;
779 // Vector Compare Mask Greater Than Zero (Floating Point)
780 defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>;
782 // Vector Compare Mask Less Than or Equal To Zero (Floating Point)
783 defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>;
785 // Vector Compare Mask Less Than Zero (Floating Point)
786 defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>;
788 // Vector Absolute Comparisons (Floating Point)
790 // Vector Absolute Compare Mask Greater Than Or Equal (Floating Point)
791 defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge",
792 int_arm_neon_vacged, int_arm_neon_vacgeq,
793 int_aarch64_neon_vacgeq,
794 v2i32, v4i32, v2i64, 0>;
796 // Vector Absolute Compare Mask Greater Than (Floating Point)
797 defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt",
798 int_arm_neon_vacgtd, int_arm_neon_vacgtq,
799 int_aarch64_neon_vacgtq,
800 v2i32, v4i32, v2i64, 0>;
802 // Vector Absolute Compare Mask Less Than Or Equal (Floating Point)
803 // FACLE is alias for FACGE with operands reversed.
804 def FACLEvvv_2S : NeonI_compare_aliases<"facle", ".2s", FACGEvvv_2S, VPR64>;
805 def FACLEvvv_4S : NeonI_compare_aliases<"facle", ".4s", FACGEvvv_4S, VPR128>;
806 def FACLEvvv_2D : NeonI_compare_aliases<"facle", ".2d", FACGEvvv_2D, VPR128>;
808 // Vector Absolute Compare Mask Less Than (Floating Point)
809 // FACLT is alias for FACGT with operands reversed.
810 def FACLTvvv_2S : NeonI_compare_aliases<"faclt", ".2s", FACGTvvv_2S, VPR64>;
811 def FACLTvvv_4S : NeonI_compare_aliases<"faclt", ".4s", FACGTvvv_4S, VPR128>;
812 def FACLTvvv_2D : NeonI_compare_aliases<"faclt", ".2d", FACGTvvv_2D, VPR128>;
814 // Vector halving add (Integer Signed, Unsigned)
815 defm SHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd",
816 int_arm_neon_vhadds, 1>;
817 defm UHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd",
818 int_arm_neon_vhaddu, 1>;
820 // Vector halving sub (Integer Signed, Unsigned)
821 defm SHSUBvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub",
822 int_arm_neon_vhsubs, 0>;
823 defm UHSUBvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub",
824 int_arm_neon_vhsubu, 0>;
826 // Vector rouding halving add (Integer Signed, Unsigned)
827 defm SRHADDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd",
828 int_arm_neon_vrhadds, 1>;
829 defm URHADDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd",
830 int_arm_neon_vrhaddu, 1>;
832 // Vector Saturating add (Integer Signed, Unsigned)
833 defm SQADDvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd",
834 int_arm_neon_vqadds, 1>;
835 defm UQADDvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd",
836 int_arm_neon_vqaddu, 1>;
838 // Vector Saturating sub (Integer Signed, Unsigned)
839 defm SQSUBvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub",
840 int_arm_neon_vqsubs, 1>;
841 defm UQSUBvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub",
842 int_arm_neon_vqsubu, 1>;
844 // Vector Shift Left (Signed and Unsigned Integer)
845 defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl",
846 int_arm_neon_vshifts, 1>;
847 defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl",
848 int_arm_neon_vshiftu, 1>;
850 // Vector Saturating Shift Left (Signed and Unsigned Integer)
851 defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl",
852 int_arm_neon_vqshifts, 1>;
853 defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl",
854 int_arm_neon_vqshiftu, 1>;
856 // Vector Rouding Shift Left (Signed and Unsigned Integer)
857 defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl",
858 int_arm_neon_vrshifts, 1>;
859 defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl",
860 int_arm_neon_vrshiftu, 1>;
862 // Vector Saturating Rouding Shift Left (Signed and Unsigned Integer)
863 defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl",
864 int_arm_neon_vqrshifts, 1>;
865 defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl",
866 int_arm_neon_vqrshiftu, 1>;
868 // Vector Maximum (Signed and Unsigned Integer)
869 defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>;
870 defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>;
872 // Vector Minimum (Signed and Unsigned Integer)
873 defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>;
874 defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>;
876 // Vector Maximum (Floating Point)
877 defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax",
878 int_arm_neon_vmaxs, int_arm_neon_vmaxs,
879 int_arm_neon_vmaxs, v2f32, v4f32, v2f64, 1>;
881 // Vector Minimum (Floating Point)
882 defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin",
883 int_arm_neon_vmins, int_arm_neon_vmins,
884 int_arm_neon_vmins, v2f32, v4f32, v2f64, 1>;
886 // Vector maxNum (Floating Point) - prefer a number over a quiet NaN)
887 defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm",
888 int_aarch64_neon_vmaxnm,
889 int_aarch64_neon_vmaxnm,
890 int_aarch64_neon_vmaxnm,
891 v2f32, v4f32, v2f64, 1>;
893 // Vector minNum (Floating Point) - prefer a number over a quiet NaN)
894 defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm",
895 int_aarch64_neon_vminnm,
896 int_aarch64_neon_vminnm,
897 int_aarch64_neon_vminnm,
898 v2f32, v4f32, v2f64, 1>;
900 // Vector Maximum Pairwise (Signed and Unsigned Integer)
901 defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>;
902 defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>;
904 // Vector Minimum Pairwise (Signed and Unsigned Integer)
905 defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>;
906 defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>;
908 // Vector Maximum Pairwise (Floating Point)
909 defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp",
910 int_arm_neon_vpmaxs, int_arm_neon_vpmaxs,
911 int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>;
913 // Vector Minimum Pairwise (Floating Point)
914 defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp",
915 int_arm_neon_vpmins, int_arm_neon_vpmins,
916 int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>;
918 // Vector maxNum Pairwise (Floating Point) - prefer a number over a quiet NaN)
919 defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp",
920 int_aarch64_neon_vpmaxnm,
921 int_aarch64_neon_vpmaxnm,
922 int_aarch64_neon_vpmaxnm,
923 v2f32, v4f32, v2f64, 1>;
925 // Vector minNum Pairwise (Floating Point) - prefer a number over a quiet NaN)
926 defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp",
927 int_aarch64_neon_vpminnm,
928 int_aarch64_neon_vpminnm,
929 int_aarch64_neon_vpminnm,
930 v2f32, v4f32, v2f64, 1>;
932 // Vector Addition Pairwise (Integer)
933 defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>;
935 // Vector Addition Pairwise (Floating Point)
936 defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp",
940 v2f32, v4f32, v2f64, 1>;
942 // Vector Saturating Doubling Multiply High
943 defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh",
944 int_arm_neon_vqdmulh, 1>;
946 // Vector Saturating Rouding Doubling Multiply High
947 defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh",
948 int_arm_neon_vqrdmulh, 1>;
950 // Vector Multiply Extended (Floating Point)
951 defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx",
952 int_aarch64_neon_vmulx,
953 int_aarch64_neon_vmulx,
954 int_aarch64_neon_vmulx,
955 v2f32, v4f32, v2f64, 1>;
957 // Vector Immediate Instructions
959 multiclass neon_mov_imm_shift_asmoperands<string PREFIX>
961 def _asmoperand : AsmOperandClass
963 let Name = "NeonMovImmShift" # PREFIX;
964 let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands";
965 let PredicateMethod = "isNeonMovImmShift" # PREFIX;
969 // Definition of vector immediates shift operands
971 // The selectable use-cases extract the shift operation
972 // information from the OpCmode fields encoded in the immediate.
973 def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{
974 uint64_t OpCmode = N->getZExtValue();
976 unsigned ShiftOnesIn;
978 A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
979 if (!HasShift) return SDValue();
980 return CurDAG->getTargetConstant(ShiftImm, MVT::i32);
983 // Vector immediates shift operands which accept LSL and MSL
984 // shift operators with shift value in the range of 0, 8, 16, 24 (LSL),
985 // or 0, 8 (LSLH) or 8, 16 (MSL).
986 defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">;
987 defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">;
988 // LSLH restricts shift amount to 0, 8 out of 0, 8, 16, 24
989 defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">;
991 multiclass neon_mov_imm_shift_operands<string PREFIX,
992 string HALF, string ISHALF, code pred>
994 def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM>
997 "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
999 "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1000 let ParserMatchClass =
1001 !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand");
1005 defm neon_mov_imm_LSL : neon_mov_imm_shift_operands<"LSL", "", "false", [{
1007 unsigned ShiftOnesIn;
1009 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1010 return (HasShift && !ShiftOnesIn);
1013 defm neon_mov_imm_MSL : neon_mov_imm_shift_operands<"MSL", "", "false", [{
1015 unsigned ShiftOnesIn;
1017 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1018 return (HasShift && ShiftOnesIn);
1021 defm neon_mov_imm_LSLH : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
1023 unsigned ShiftOnesIn;
1025 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1026 return (HasShift && !ShiftOnesIn);
1029 def neon_uimm8_asmoperand : AsmOperandClass
1032 let PredicateMethod = "isUImm<8>";
1033 let RenderMethod = "addImmOperands";
1036 def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1037 let ParserMatchClass = neon_uimm8_asmoperand;
1038 let PrintMethod = "printNeonUImm8Operand";
1041 def neon_uimm64_mask_asmoperand : AsmOperandClass
1043 let Name = "NeonUImm64Mask";
1044 let PredicateMethod = "isNeonUImm64Mask";
1045 let RenderMethod = "addNeonUImm64MaskOperands";
1048 // MCOperand for 64-bit bytemask with each byte having only the
1049 // value 0x00 and 0xff is encoded as an unsigned 8-bit value
1050 def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1051 let ParserMatchClass = neon_uimm64_mask_asmoperand;
1052 let PrintMethod = "printNeonUImm64MaskOperand";
1055 multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op,
1056 SDPatternOperator opnode>
1058 // shift zeros, per word
1059 def _2S : NeonI_1VModImm<0b0, op,
1061 (ins neon_uimm8:$Imm,
1062 neon_mov_imm_LSL_operand:$Simm),
1063 !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1064 [(set (v2i32 VPR64:$Rd),
1065 (v2i32 (opnode (timm:$Imm),
1066 (neon_mov_imm_LSL_operand:$Simm))))],
1069 let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1072 def _4S : NeonI_1VModImm<0b1, op,
1074 (ins neon_uimm8:$Imm,
1075 neon_mov_imm_LSL_operand:$Simm),
1076 !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1077 [(set (v4i32 VPR128:$Rd),
1078 (v4i32 (opnode (timm:$Imm),
1079 (neon_mov_imm_LSL_operand:$Simm))))],
1082 let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1085 // shift zeros, per halfword
1086 def _4H : NeonI_1VModImm<0b0, op,
1088 (ins neon_uimm8:$Imm,
1089 neon_mov_imm_LSLH_operand:$Simm),
1090 !strconcat(asmop, " $Rd.4h, $Imm$Simm"),
1091 [(set (v4i16 VPR64:$Rd),
1092 (v4i16 (opnode (timm:$Imm),
1093 (neon_mov_imm_LSLH_operand:$Simm))))],
1096 let cmode = {0b1, 0b0, Simm, 0b0};
1099 def _8H : NeonI_1VModImm<0b1, op,
1101 (ins neon_uimm8:$Imm,
1102 neon_mov_imm_LSLH_operand:$Simm),
1103 !strconcat(asmop, " $Rd.8h, $Imm$Simm"),
1104 [(set (v8i16 VPR128:$Rd),
1105 (v8i16 (opnode (timm:$Imm),
1106 (neon_mov_imm_LSLH_operand:$Simm))))],
1109 let cmode = {0b1, 0b0, Simm, 0b0};
1113 multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op,
1114 SDPatternOperator opnode,
1115 SDPatternOperator neonopnode>
1117 let Constraints = "$src = $Rd" in {
1118 // shift zeros, per word
1119 def _2S : NeonI_1VModImm<0b0, op,
1121 (ins VPR64:$src, neon_uimm8:$Imm,
1122 neon_mov_imm_LSL_operand:$Simm),
1123 !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1124 [(set (v2i32 VPR64:$Rd),
1125 (v2i32 (opnode (v2i32 VPR64:$src),
1126 (v2i32 (bitconvert (v2i32 (neonopnode timm:$Imm,
1127 neon_mov_imm_LSL_operand:$Simm)))))))],
1130 let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1133 def _4S : NeonI_1VModImm<0b1, op,
1135 (ins VPR128:$src, neon_uimm8:$Imm,
1136 neon_mov_imm_LSL_operand:$Simm),
1137 !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1138 [(set (v4i32 VPR128:$Rd),
1139 (v4i32 (opnode (v4i32 VPR128:$src),
1140 (v4i32 (bitconvert (v4i32 (neonopnode timm:$Imm,
1141 neon_mov_imm_LSL_operand:$Simm)))))))],
1144 let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1147 // shift zeros, per halfword
1148 def _4H : NeonI_1VModImm<0b0, op,
1150 (ins VPR64:$src, neon_uimm8:$Imm,
1151 neon_mov_imm_LSLH_operand:$Simm),
1152 !strconcat(asmop, " $Rd.4h, $Imm$Simm"),
1153 [(set (v4i16 VPR64:$Rd),
1154 (v4i16 (opnode (v4i16 VPR64:$src),
1155 (v4i16 (bitconvert (v4i16 (neonopnode timm:$Imm,
1156 neon_mov_imm_LSL_operand:$Simm)))))))],
1159 let cmode = {0b1, 0b0, Simm, 0b1};
1162 def _8H : NeonI_1VModImm<0b1, op,
1164 (ins VPR128:$src, neon_uimm8:$Imm,
1165 neon_mov_imm_LSLH_operand:$Simm),
1166 !strconcat(asmop, " $Rd.8h, $Imm$Simm"),
1167 [(set (v8i16 VPR128:$Rd),
1168 (v8i16 (opnode (v8i16 VPR128:$src),
1169 (v8i16 (bitconvert (v8i16 (neonopnode timm:$Imm,
1170 neon_mov_imm_LSL_operand:$Simm)))))))],
1173 let cmode = {0b1, 0b0, Simm, 0b1};
1178 multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op,
1179 SDPatternOperator opnode>
1181 // shift ones, per word
1182 def _2S : NeonI_1VModImm<0b0, op,
1184 (ins neon_uimm8:$Imm,
1185 neon_mov_imm_MSL_operand:$Simm),
1186 !strconcat(asmop, " $Rd.2s, $Imm$Simm"),
1187 [(set (v2i32 VPR64:$Rd),
1188 (v2i32 (opnode (timm:$Imm),
1189 (neon_mov_imm_MSL_operand:$Simm))))],
1192 let cmode = {0b1, 0b1, 0b0, Simm};
1195 def _4S : NeonI_1VModImm<0b1, op,
1197 (ins neon_uimm8:$Imm,
1198 neon_mov_imm_MSL_operand:$Simm),
1199 !strconcat(asmop, " $Rd.4s, $Imm$Simm"),
1200 [(set (v4i32 VPR128:$Rd),
1201 (v4i32 (opnode (timm:$Imm),
1202 (neon_mov_imm_MSL_operand:$Simm))))],
1205 let cmode = {0b1, 0b1, 0b0, Simm};
1209 // Vector Move Immediate Shifted
1210 let isReMaterializable = 1 in {
1211 defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>;
1214 // Vector Move Inverted Immediate Shifted
1215 let isReMaterializable = 1 in {
1216 defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>;
1219 // Vector Bitwise Bit Clear (AND NOT) - immediate
1220 let isReMaterializable = 1 in {
1221 defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1,
1225 // Vector Bitwise OR - immedidate
1227 let isReMaterializable = 1 in {
1228 defm ORRvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0,
1232 // Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate
1233 // LowerBUILD_VECTOR favors lowering MOVI over MVNI.
1234 // BIC immediate instructions selection requires additional patterns to
1235 // transform Neon_movi operands into BIC immediate operands
1237 def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{
1238 uint64_t OpCmode = N->getZExtValue();
1240 unsigned ShiftOnesIn;
1241 (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
1242 // LSLH restricts shift amount to 0, 8 which are encoded as 0 and 1
1243 // Transform encoded shift amount 0 to 1 and 1 to 0.
1244 return CurDAG->getTargetConstant(!ShiftImm, MVT::i32);
1247 def neon_mov_imm_LSLH_transform_operand
1250 unsigned ShiftOnesIn;
1252 A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1253 return (HasShift && !ShiftOnesIn); }],
1254 neon_mov_imm_LSLH_transform_XFORM>;
1256 // Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0x00, LSL 8)
1257 // Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0x00)
1258 def : Pat<(v4i16 (and VPR64:$src,
1259 (v4i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1260 (BICvi_lsl_4H VPR64:$src, 0,
1261 neon_mov_imm_LSLH_transform_operand:$Simm)>;
1263 // Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0x00, LSL 8)
1264 // Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0x00)
1265 def : Pat<(v8i16 (and VPR128:$src,
1266 (v8i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1267 (BICvi_lsl_8H VPR128:$src, 0,
1268 neon_mov_imm_LSLH_transform_operand:$Simm)>;
1271 multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
1272 SDPatternOperator neonopnode,
1274 Instruction INST8H> {
1275 def : Pat<(v8i8 (opnode VPR64:$src,
1276 (bitconvert(v4i16 (neonopnode timm:$Imm,
1277 neon_mov_imm_LSLH_operand:$Simm))))),
1278 (INST4H VPR64:$src, neon_uimm8:$Imm,
1279 neon_mov_imm_LSLH_operand:$Simm)>;
1280 def : Pat<(v1i64 (opnode VPR64:$src,
1281 (bitconvert(v4i16 (neonopnode timm:$Imm,
1282 neon_mov_imm_LSLH_operand:$Simm))))),
1283 (INST4H VPR64:$src, neon_uimm8:$Imm,
1284 neon_mov_imm_LSLH_operand:$Simm)>;
1286 def : Pat<(v16i8 (opnode VPR128:$src,
1287 (bitconvert(v8i16 (neonopnode timm:$Imm,
1288 neon_mov_imm_LSLH_operand:$Simm))))),
1289 (INST8H VPR128:$src, neon_uimm8:$Imm,
1290 neon_mov_imm_LSLH_operand:$Simm)>;
1291 def : Pat<(v4i32 (opnode VPR128:$src,
1292 (bitconvert(v8i16 (neonopnode timm:$Imm,
1293 neon_mov_imm_LSLH_operand:$Simm))))),
1294 (INST8H VPR128:$src, neon_uimm8:$Imm,
1295 neon_mov_imm_LSLH_operand:$Simm)>;
1296 def : Pat<(v2i64 (opnode VPR128:$src,
1297 (bitconvert(v8i16 (neonopnode timm:$Imm,
1298 neon_mov_imm_LSLH_operand:$Simm))))),
1299 (INST8H VPR128:$src, neon_uimm8:$Imm,
1300 neon_mov_imm_LSLH_operand:$Simm)>;
1303 // Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
1304 defm : Neon_bitwiseVi_patterns<or, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H>;
1306 // Additional patterns for Vector Bitwise OR - immedidate
1307 defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H>;
1310 // Vector Move Immediate Masked
1311 let isReMaterializable = 1 in {
1312 defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>;
1315 // Vector Move Inverted Immediate Masked
1316 let isReMaterializable = 1 in {
1317 defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
1320 class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
1321 Instruction inst, RegisterClass VPRC>
1322 : NeonInstAlias<!strconcat(asmop, " $Rd," # asmlane # ", $Imm"),
1323 (inst VPRC:$Rd, neon_uimm8:$Imm, 0), 0b0>;
1325 // Aliases for Vector Move Immediate Shifted
1326 def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>;
1327 def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>;
1328 def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>;
1329 def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>;
1331 // Aliases for Vector Move Inverted Immediate Shifted
1332 def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>;
1333 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>;
1334 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>;
1335 def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>;
1337 // Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate
1338 def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>;
1339 def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>;
1340 def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>;
1341 def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>;
1343 // Aliases for Vector Bitwise OR - immedidate
1344 def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>;
1345 def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>;
1346 def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>;
1347 def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>;
1349 // Vector Move Immediate - per byte
1350 let isReMaterializable = 1 in {
1351 def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0,
1352 (outs VPR64:$Rd), (ins neon_uimm8:$Imm),
1353 "movi\t$Rd.8b, $Imm",
1354 [(set (v8i8 VPR64:$Rd),
1355 (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1360 def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0,
1361 (outs VPR128:$Rd), (ins neon_uimm8:$Imm),
1362 "movi\t$Rd.16b, $Imm",
1363 [(set (v16i8 VPR128:$Rd),
1364 (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1370 // Vector Move Immediate - bytemask, per double word
1371 let isReMaterializable = 1 in {
1372 def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1,
1373 (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm),
1374 "movi\t $Rd.2d, $Imm",
1375 [(set (v2i64 VPR128:$Rd),
1376 (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))],
1382 // Vector Move Immediate - bytemask, one doubleword
1384 let isReMaterializable = 1 in {
1385 def MOVIdi : NeonI_1VModImm<0b0, 0b1,
1386 (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm),
1388 [(set (f64 FPR64:$Rd),
1390 (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))))],
1396 // Vector Floating Point Move Immediate
1398 class NeonI_FMOV_impl<string asmlane, RegisterClass VPRC, ValueType OpTy,
1399 Operand immOpType, bit q, bit op>
1400 : NeonI_1VModImm<q, op,
1401 (outs VPRC:$Rd), (ins immOpType:$Imm),
1402 "fmov\t$Rd" # asmlane # ", $Imm",
1403 [(set (OpTy VPRC:$Rd),
1404 (OpTy (Neon_fmovi (timm:$Imm))))],
1409 let isReMaterializable = 1 in {
1410 def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64, v2f32, fmov32_operand, 0b0, 0b0>;
1411 def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>;
1412 def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>;
1415 // Vector Shift (Immediate)
1417 def imm0_63 : Operand<i32> {
1418 let ParserMatchClass = uimm6_asmoperand;
1421 class N2VShiftLeft<bit q, bit u, bits<5> opcode, string asmop, string T,
1422 RegisterClass VPRC, ValueType Ty, Operand ImmTy>
1423 : NeonI_2VShiftImm<q, u, opcode,
1424 (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1425 asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1426 [(set (Ty VPRC:$Rd),
1427 (Ty (shl (Ty VPRC:$Rn),
1428 (Ty (Neon_dupImm (i32 imm:$Imm))))))],
1431 multiclass NeonI_N2VShL<bit u, bits<5> opcode, string asmop> {
1432 // 64-bit vector types.
1433 def _8B : N2VShiftLeft<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3> {
1434 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1437 def _4H : N2VShiftLeft<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4> {
1438 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1441 def _2S : N2VShiftLeft<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5> {
1442 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1445 // 128-bit vector types.
1446 def _16B : N2VShiftLeft<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3> {
1447 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1450 def _8H : N2VShiftLeft<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4> {
1451 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1454 def _4S : N2VShiftLeft<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5> {
1455 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1458 def _2D : N2VShiftLeft<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63> {
1459 let Inst{22} = 0b1; // immh:immb = 1xxxxxx
1463 def Neon_top16B : PatFrag<(ops node:$in),
1464 (extract_subvector (v16i8 node:$in), (iPTR 8))>;
1465 def Neon_top8H : PatFrag<(ops node:$in),
1466 (extract_subvector (v8i16 node:$in), (iPTR 4))>;
1467 def Neon_top4S : PatFrag<(ops node:$in),
1468 (extract_subvector (v4i32 node:$in), (iPTR 2))>;
1470 class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1471 string SrcT, ValueType DestTy, ValueType SrcTy,
1472 Operand ImmTy, SDPatternOperator ExtOp>
1473 : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1474 (ins VPR64:$Rn, ImmTy:$Imm),
1475 asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1476 [(set (DestTy VPR128:$Rd),
1478 (DestTy (ExtOp (SrcTy VPR64:$Rn))),
1479 (DestTy (Neon_dupImm (i32 imm:$Imm))))))],
1482 class N2VShiftLongHigh<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1483 string SrcT, ValueType DestTy, ValueType SrcTy,
1484 int StartIndex, Operand ImmTy,
1485 SDPatternOperator ExtOp, PatFrag getTop>
1486 : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1487 (ins VPR128:$Rn, ImmTy:$Imm),
1488 asmop # "2\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1489 [(set (DestTy VPR128:$Rd),
1492 (SrcTy (getTop VPR128:$Rn)))),
1493 (DestTy (Neon_dupImm (i32 imm:$Imm))))))],
1496 multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
1498 // 64-bit vector types.
1499 def _8B : N2VShiftLong<0b0, u, opcode, asmop, "8h", "8b", v8i16, v8i8,
1501 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1504 def _4H : N2VShiftLong<0b0, u, opcode, asmop, "4s", "4h", v4i32, v4i16,
1506 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1509 def _2S : N2VShiftLong<0b0, u, opcode, asmop, "2d", "2s", v2i64, v2i32,
1511 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1514 // 128-bit vector types
1515 def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b",
1516 v8i16, v8i8, 8, uimm3, ExtOp, Neon_top16B>{
1517 let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
1520 def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h",
1521 v4i32, v4i16, 4, uimm4, ExtOp, Neon_top8H>{
1522 let Inst{22-20} = 0b001; // immh:immb = 001xxxx
1525 def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s",
1526 v2i64, v2i32, 2, uimm5, ExtOp, Neon_top4S>{
1527 let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
1530 // Use other patterns to match when the immediate is 0.
1531 def : Pat<(v8i16 (ExtOp (v8i8 VPR64:$Rn))),
1532 (!cast<Instruction>(prefix # "_8B") VPR64:$Rn, 0)>;
1534 def : Pat<(v4i32 (ExtOp (v4i16 VPR64:$Rn))),
1535 (!cast<Instruction>(prefix # "_4H") VPR64:$Rn, 0)>;
1537 def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
1538 (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
1540 def : Pat<(v8i16 (ExtOp (v8i8 (Neon_top16B VPR128:$Rn)))),
1541 (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
1543 def : Pat<(v4i32 (ExtOp (v4i16 (Neon_top8H VPR128:$Rn)))),
1544 (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
1546 def : Pat<(v2i64 (ExtOp (v2i32 (Neon_top4S VPR128:$Rn)))),
1547 (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
1550 // Shift left immediate
1551 defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
1553 // Shift left long immediate
1554 defm SSHLLvvi : NeonI_N2VShLL<"SSHLLvvi", 0b0, 0b10100, "sshll", sext>;
1555 defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
1557 // Scalar Arithmetic
1559 class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
1560 : NeonI_Scalar3Same<u, 0b11, opcode,
1561 (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm),
1562 !strconcat(asmop, " $Rd, $Rn, $Rm"),
1566 multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
1567 string asmop, bit Commutable = 0>
1569 let isCommutable = Commutable in {
1570 def bbb : NeonI_Scalar3Same<u, 0b00, opcode,
1571 (outs FPR8:$Rd), (ins FPR8:$Rn, FPR8:$Rm),
1572 !strconcat(asmop, " $Rd, $Rn, $Rm"),
1575 def hhh : NeonI_Scalar3Same<u, 0b01, opcode,
1576 (outs FPR16:$Rd), (ins FPR16:$Rn, FPR16:$Rm),
1577 !strconcat(asmop, " $Rd, $Rn, $Rm"),
1580 def sss : NeonI_Scalar3Same<u, 0b10, opcode,
1581 (outs FPR32:$Rd), (ins FPR32:$Rn, FPR32:$Rm),
1582 !strconcat(asmop, " $Rd, $Rn, $Rm"),
1585 def ddd : NeonI_Scalar3Same<u, 0b11, opcode,
1586 (outs FPR64:$Rd), (ins FPR64:$Rn, FPR64:$Rm),
1587 !strconcat(asmop, " $Rd, $Rn, $Rm"),
1593 class Neon_Scalar_D_size_patterns<SDPatternOperator opnode, Instruction INSTD>
1594 : Pat<(v1i64 (opnode (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
1595 (SUBREG_TO_REG (i64 0),
1596 (INSTD (EXTRACT_SUBREG VPR64:$Rn, sub_64),
1597 (EXTRACT_SUBREG VPR64:$Rm, sub_64)),
1601 // Scalar Integer Add
1602 let isCommutable = 1 in {
1603 def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
1606 // Scalar Integer Sub
1607 def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
1609 // Pattern for Scalar Integer Add and Sub with D register
1610 def : Neon_Scalar_D_size_patterns<add, ADDddd>;
1611 def : Neon_Scalar_D_size_patterns<sub, SUBddd>;
1613 // Scalar Integer Saturating Add (Signed, Unsigned)
1614 defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
1615 defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
1617 // Scalar Integer Saturating Sub (Signed, Unsigned)
1618 defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
1619 defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
1621 // Patterns for Scalar Integer Saturating Add, Sub with D register only
1622 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqadds, SQADDddd>;
1623 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqaddu, UQADDddd>;
1624 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubs, SQSUBddd>;
1625 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqsubu, UQSUBddd>;
1627 // Scalar Integer Shift Left (Signed, Unsigned)
1628 def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
1629 def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
1631 // Scalar Integer Saturating Shift Left (Signed, Unsigned)
1632 defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
1633 defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
1635 // Scalar Integer Rouding Shift Left (Signed, Unsigned)
1636 def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
1637 def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
1639 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
1640 defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
1641 defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
1643 // Patterns for Scalar Integer Shift Lef, Saturating Shift Left,
1644 // Rounding Shift Left, Rounding Saturating Shift Left with D register only
1645 def : Neon_Scalar_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
1646 def : Neon_Scalar_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
1647 def : Neon_Scalar_D_size_patterns<shl, SSHLddd>;
1648 def : Neon_Scalar_D_size_patterns<shl, USHLddd>;
1649 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
1650 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
1651 def : Neon_Scalar_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
1652 def : Neon_Scalar_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
1653 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
1654 def : Neon_Scalar_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
1657 //===----------------------------------------------------------------------===//
1658 // Non-Instruction Patterns
1659 //===----------------------------------------------------------------------===//
1661 // 64-bit vector bitcasts...
1663 def : Pat<(v1i64 (bitconvert (v8i8 VPR64:$src))), (v1i64 VPR64:$src)>;
1664 def : Pat<(v2f32 (bitconvert (v8i8 VPR64:$src))), (v2f32 VPR64:$src)>;
1665 def : Pat<(v2i32 (bitconvert (v8i8 VPR64:$src))), (v2i32 VPR64:$src)>;
1666 def : Pat<(v4i16 (bitconvert (v8i8 VPR64:$src))), (v4i16 VPR64:$src)>;
1668 def : Pat<(v1i64 (bitconvert (v4i16 VPR64:$src))), (v1i64 VPR64:$src)>;
1669 def : Pat<(v2i32 (bitconvert (v4i16 VPR64:$src))), (v2i32 VPR64:$src)>;
1670 def : Pat<(v2f32 (bitconvert (v4i16 VPR64:$src))), (v2f32 VPR64:$src)>;
1671 def : Pat<(v8i8 (bitconvert (v4i16 VPR64:$src))), (v8i8 VPR64:$src)>;
1673 def : Pat<(v1i64 (bitconvert (v2i32 VPR64:$src))), (v1i64 VPR64:$src)>;
1674 def : Pat<(v2f32 (bitconvert (v2i32 VPR64:$src))), (v2f32 VPR64:$src)>;
1675 def : Pat<(v4i16 (bitconvert (v2i32 VPR64:$src))), (v4i16 VPR64:$src)>;
1676 def : Pat<(v8i8 (bitconvert (v2i32 VPR64:$src))), (v8i8 VPR64:$src)>;
1678 def : Pat<(v1i64 (bitconvert (v2f32 VPR64:$src))), (v1i64 VPR64:$src)>;
1679 def : Pat<(v2i32 (bitconvert (v2f32 VPR64:$src))), (v2i32 VPR64:$src)>;
1680 def : Pat<(v4i16 (bitconvert (v2f32 VPR64:$src))), (v4i16 VPR64:$src)>;
1681 def : Pat<(v8i8 (bitconvert (v2f32 VPR64:$src))), (v8i8 VPR64:$src)>;
1683 def : Pat<(v2f32 (bitconvert (v1i64 VPR64:$src))), (v2f32 VPR64:$src)>;
1684 def : Pat<(v2i32 (bitconvert (v1i64 VPR64:$src))), (v2i32 VPR64:$src)>;
1685 def : Pat<(v4i16 (bitconvert (v1i64 VPR64:$src))), (v4i16 VPR64:$src)>;
1686 def : Pat<(v8i8 (bitconvert (v1i64 VPR64:$src))), (v8i8 VPR64:$src)>;
1688 // ..and 128-bit vector bitcasts...
1690 def : Pat<(v2f64 (bitconvert (v16i8 VPR128:$src))), (v2f64 VPR128:$src)>;
1691 def : Pat<(v2i64 (bitconvert (v16i8 VPR128:$src))), (v2i64 VPR128:$src)>;
1692 def : Pat<(v4f32 (bitconvert (v16i8 VPR128:$src))), (v4f32 VPR128:$src)>;
1693 def : Pat<(v4i32 (bitconvert (v16i8 VPR128:$src))), (v4i32 VPR128:$src)>;
1694 def : Pat<(v8i16 (bitconvert (v16i8 VPR128:$src))), (v8i16 VPR128:$src)>;
1696 def : Pat<(v2f64 (bitconvert (v8i16 VPR128:$src))), (v2f64 VPR128:$src)>;
1697 def : Pat<(v2i64 (bitconvert (v8i16 VPR128:$src))), (v2i64 VPR128:$src)>;
1698 def : Pat<(v4i32 (bitconvert (v8i16 VPR128:$src))), (v4i32 VPR128:$src)>;
1699 def : Pat<(v4f32 (bitconvert (v8i16 VPR128:$src))), (v4f32 VPR128:$src)>;
1700 def : Pat<(v16i8 (bitconvert (v8i16 VPR128:$src))), (v16i8 VPR128:$src)>;
1702 def : Pat<(v2f64 (bitconvert (v4i32 VPR128:$src))), (v2f64 VPR128:$src)>;
1703 def : Pat<(v2i64 (bitconvert (v4i32 VPR128:$src))), (v2i64 VPR128:$src)>;
1704 def : Pat<(v4f32 (bitconvert (v4i32 VPR128:$src))), (v4f32 VPR128:$src)>;
1705 def : Pat<(v8i16 (bitconvert (v4i32 VPR128:$src))), (v8i16 VPR128:$src)>;
1706 def : Pat<(v16i8 (bitconvert (v4i32 VPR128:$src))), (v16i8 VPR128:$src)>;
1708 def : Pat<(v2f64 (bitconvert (v4f32 VPR128:$src))), (v2f64 VPR128:$src)>;
1709 def : Pat<(v2i64 (bitconvert (v4f32 VPR128:$src))), (v2i64 VPR128:$src)>;
1710 def : Pat<(v4i32 (bitconvert (v4f32 VPR128:$src))), (v4i32 VPR128:$src)>;
1711 def : Pat<(v8i16 (bitconvert (v4f32 VPR128:$src))), (v8i16 VPR128:$src)>;
1712 def : Pat<(v16i8 (bitconvert (v4f32 VPR128:$src))), (v16i8 VPR128:$src)>;
1714 def : Pat<(v2f64 (bitconvert (v2i64 VPR128:$src))), (v2f64 VPR128:$src)>;
1715 def : Pat<(v4f32 (bitconvert (v2i64 VPR128:$src))), (v4f32 VPR128:$src)>;
1716 def : Pat<(v4i32 (bitconvert (v2i64 VPR128:$src))), (v4i32 VPR128:$src)>;
1717 def : Pat<(v8i16 (bitconvert (v2i64 VPR128:$src))), (v8i16 VPR128:$src)>;
1718 def : Pat<(v16i8 (bitconvert (v2i64 VPR128:$src))), (v16i8 VPR128:$src)>;
1720 def : Pat<(v2i64 (bitconvert (v2f64 VPR128:$src))), (v2i64 VPR128:$src)>;
1721 def : Pat<(v4f32 (bitconvert (v2f64 VPR128:$src))), (v4f32 VPR128:$src)>;
1722 def : Pat<(v4i32 (bitconvert (v2f64 VPR128:$src))), (v4i32 VPR128:$src)>;
1723 def : Pat<(v8i16 (bitconvert (v2f64 VPR128:$src))), (v8i16 VPR128:$src)>;
1724 def : Pat<(v16i8 (bitconvert (v2f64 VPR128:$src))), (v16i8 VPR128:$src)>;
1727 // ...and scalar bitcasts...
1729 def : Pat<(f64 (bitconvert (v8i8 VPR64:$src))),
1730 (f64 (EXTRACT_SUBREG (v8i8 VPR64:$src), sub_64))>;
1731 def : Pat<(f64 (bitconvert (v4i16 VPR64:$src))),
1732 (f64 (EXTRACT_SUBREG (v4i16 VPR64:$src), sub_64))>;
1733 def : Pat<(f64 (bitconvert (v2i32 VPR64:$src))),
1734 (f64 (EXTRACT_SUBREG (v2i32 VPR64:$src), sub_64))>;
1735 def : Pat<(f64 (bitconvert (v2f32 VPR64:$src))),
1736 (f64 (EXTRACT_SUBREG (v2f32 VPR64:$src), sub_64))>;
1737 def : Pat<(f64 (bitconvert (v1i64 VPR64:$src))),
1738 (f64 (EXTRACT_SUBREG (v1i64 VPR64:$src), sub_64))>;
1739 def : Pat<(f128 (bitconvert (v16i8 VPR128:$src))),
1740 (f128 (EXTRACT_SUBREG (v16i8 VPR128:$src), sub_alias))>;
1741 def : Pat<(f128 (bitconvert (v8i16 VPR128:$src))),
1742 (f128 (EXTRACT_SUBREG (v8i16 VPR128:$src), sub_alias))>;
1743 def : Pat<(f128 (bitconvert (v4i32 VPR128:$src))),
1744 (f128 (EXTRACT_SUBREG (v4i32 VPR128:$src), sub_alias))>;
1745 def : Pat<(f128 (bitconvert (v2i64 VPR128:$src))),
1746 (f128 (EXTRACT_SUBREG (v2i64 VPR128:$src), sub_alias))>;
1747 def : Pat<(f128 (bitconvert (v4f32 VPR128:$src))),
1748 (f128 (EXTRACT_SUBREG (v4f32 VPR128:$src), sub_alias))>;
1749 def : Pat<(f128 (bitconvert (v2f64 VPR128:$src))),
1750 (f128 (EXTRACT_SUBREG (v2f64 VPR128:$src), sub_alias))>;
1752 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))),
1753 (v8i8 (SUBREG_TO_REG (i64 0), (f64 FPR64:$src), sub_64))>;
1754 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))),
1755 (v4i16 (SUBREG_TO_REG (i64 0), (f64 FPR64:$src), sub_64))>;
1756 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))),
1757 (v2i32 (SUBREG_TO_REG (i64 0), (f64 FPR64:$src), sub_64))>;
1758 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))),
1759 (v2f32 (SUBREG_TO_REG (i64 0), (f64 FPR64:$src), sub_64))>;
1760 def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))),
1761 (v1i64 (SUBREG_TO_REG (i64 0), (f64 FPR64:$src), sub_64))>;
1762 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))),
1763 (v16i8 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src),
1765 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))),
1766 (v8i16 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src),
1768 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))),
1769 (v4i32 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src),
1771 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))),
1772 (v2i64 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src),
1774 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))),
1775 (v4f32 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src),
1777 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))),
1778 (v2f64 (SUBREG_TO_REG (i128 0), (f128 FPR128:$src),