implement MC layer of AArch64 neon instruction PMULL and PMULL2 with 128 bit integer.
[oota-llvm.git] / lib / Target / AArch64 / AArch64InstrNEON.td
1 //===-- AArch64InstrNEON.td - NEON support for AArch64 -----*- tablegen -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the AArch64 NEON instruction set.
11 //
12 //===----------------------------------------------------------------------===//
13
14 //===----------------------------------------------------------------------===//
15 // NEON-specific DAG Nodes.
16 //===----------------------------------------------------------------------===//
17 def Neon_bsl       : SDNode<"AArch64ISD::NEON_BSL", SDTypeProfile<1, 3,
18                       [SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
19                       SDTCisSameAs<0, 3>]>>;
20
21 // (outs Result), (ins Imm, OpCmode)
22 def SDT_Neon_movi : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
23
24 def Neon_movi     : SDNode<"AArch64ISD::NEON_MOVIMM", SDT_Neon_movi>;
25
26 def Neon_mvni     : SDNode<"AArch64ISD::NEON_MVNIMM", SDT_Neon_movi>;
27
28 // (outs Result), (ins Imm)
29 def Neon_fmovi : SDNode<"AArch64ISD::NEON_FMOVIMM", SDTypeProfile<1, 1,
30                         [SDTCisVec<0>, SDTCisVT<1, i32>]>>;
31
32 // (outs Result), (ins LHS, RHS, CondCode)
33 def Neon_cmp : SDNode<"AArch64ISD::NEON_CMP", SDTypeProfile<1, 3,
34                  [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
35
36 // (outs Result), (ins LHS, 0/0.0 constant, CondCode)
37 def Neon_cmpz : SDNode<"AArch64ISD::NEON_CMPZ", SDTypeProfile<1, 3,
38                  [SDTCisVec<0>,  SDTCisVec<1>]>>;
39
40 // (outs Result), (ins LHS, RHS)
41 def Neon_tst : SDNode<"AArch64ISD::NEON_TST", SDTypeProfile<1, 2,
42                  [SDTCisVec<0>,  SDTCisSameAs<1, 2>]>>;
43
44 def SDTARMVSH : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
45                                      SDTCisVT<2, i32>]>;
46 def Neon_sqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLs", SDTARMVSH>;
47 def Neon_uqrshlImm   : SDNode<"AArch64ISD::NEON_QSHLu", SDTARMVSH>;
48
49 def SDTVSHUF : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0, 1>]>;
50 def Neon_rev64    : SDNode<"AArch64ISD::NEON_REV64", SDTVSHUF>;
51 def Neon_rev32    : SDNode<"AArch64ISD::NEON_REV32", SDTVSHUF>;
52 def Neon_rev16    : SDNode<"AArch64ISD::NEON_REV16", SDTVSHUF>;
53 def Neon_vdup : SDNode<"AArch64ISD::NEON_VDUP", SDTypeProfile<1, 1,
54                        [SDTCisVec<0>]>>;
55 def Neon_vduplane : SDNode<"AArch64ISD::NEON_VDUPLANE", SDTypeProfile<1, 2,
56                            [SDTCisVec<0>, SDTCisVec<1>, SDTCisVT<2, i64>]>>;
57 def Neon_vextract : SDNode<"AArch64ISD::NEON_VEXTRACT", SDTypeProfile<1, 3,
58                            [SDTCisVec<0>,  SDTCisSameAs<0, 1>,
59                            SDTCisSameAs<0, 2>, SDTCisVT<3, i64>]>>;
60
61 //===----------------------------------------------------------------------===//
62 // Multiclasses
63 //===----------------------------------------------------------------------===//
64
65 multiclass NeonI_3VSame_B_sizes<bit u, bits<2> size,  bits<5> opcode,
66                                 string asmop, SDPatternOperator opnode8B,
67                                 SDPatternOperator opnode16B,
68                                 bit Commutable = 0> {
69   let isCommutable = Commutable in {
70     def _8B :  NeonI_3VSame<0b0, u, size, opcode,
71                (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
72                asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
73                [(set (v8i8 VPR64:$Rd),
74                   (v8i8 (opnode8B (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
75                NoItinerary>;
76
77     def _16B : NeonI_3VSame<0b1, u, size, opcode,
78                (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
79                asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
80                [(set (v16i8 VPR128:$Rd),
81                   (v16i8 (opnode16B (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
82                NoItinerary>;
83   }
84
85 }
86
87 multiclass NeonI_3VSame_HS_sizes<bit u, bits<5> opcode,
88                                   string asmop, SDPatternOperator opnode,
89                                   bit Commutable = 0> {
90   let isCommutable = Commutable in {
91     def _4H : NeonI_3VSame<0b0, u, 0b01, opcode,
92               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
93               asmop # "\t$Rd.4h, $Rn.4h, $Rm.4h",
94               [(set (v4i16 VPR64:$Rd),
95                  (v4i16 (opnode (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))))],
96               NoItinerary>;
97
98     def _8H : NeonI_3VSame<0b1, u, 0b01, opcode,
99               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
100               asmop # "\t$Rd.8h, $Rn.8h, $Rm.8h",
101               [(set (v8i16 VPR128:$Rd),
102                  (v8i16 (opnode (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))))],
103               NoItinerary>;
104
105     def _2S : NeonI_3VSame<0b0, u, 0b10, opcode,
106               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
107               asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
108               [(set (v2i32 VPR64:$Rd),
109                  (v2i32 (opnode (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))))],
110               NoItinerary>;
111
112     def _4S : NeonI_3VSame<0b1, u, 0b10, opcode,
113               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
114               asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
115               [(set (v4i32 VPR128:$Rd),
116                  (v4i32 (opnode (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))))],
117               NoItinerary>;
118   }
119 }
120 multiclass NeonI_3VSame_BHS_sizes<bit u, bits<5> opcode,
121                                   string asmop, SDPatternOperator opnode,
122                                   bit Commutable = 0>
123    : NeonI_3VSame_HS_sizes<u, opcode,  asmop, opnode, Commutable> {
124   let isCommutable = Commutable in {
125     def _8B :  NeonI_3VSame<0b0, u, 0b00, opcode,
126                (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
127                asmop # "\t$Rd.8b, $Rn.8b, $Rm.8b",
128                [(set (v8i8 VPR64:$Rd),
129                   (v8i8 (opnode (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))))],
130                NoItinerary>;
131
132     def _16B : NeonI_3VSame<0b1, u, 0b00, opcode,
133                (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
134                asmop # "\t$Rd.16b, $Rn.16b, $Rm.16b",
135                [(set (v16i8 VPR128:$Rd),
136                   (v16i8 (opnode (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))))],
137                NoItinerary>;
138   }
139 }
140
141 multiclass NeonI_3VSame_BHSD_sizes<bit u, bits<5> opcode,
142                                    string asmop, SDPatternOperator opnode,
143                                    bit Commutable = 0>
144    : NeonI_3VSame_BHS_sizes<u, opcode,  asmop, opnode, Commutable> {
145   let isCommutable = Commutable in {
146     def _2D : NeonI_3VSame<0b1, u, 0b11, opcode,
147               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
148               asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
149               [(set (v2i64 VPR128:$Rd),
150                  (v2i64 (opnode (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))))],
151               NoItinerary>;
152   }
153 }
154
155 // Multiclass NeonI_3VSame_SD_sizes: Operand types are floating point types,
156 // but Result types can be integer or floating point types.
157 multiclass NeonI_3VSame_SD_sizes<bit u, bit size, bits<5> opcode,
158                                  string asmop, SDPatternOperator opnode2S,
159                                  SDPatternOperator opnode4S,
160                                  SDPatternOperator opnode2D,
161                                  ValueType ResTy2S, ValueType ResTy4S,
162                                  ValueType ResTy2D, bit Commutable = 0> {
163   let isCommutable = Commutable in {
164     def _2S : NeonI_3VSame<0b0, u, {size, 0b0}, opcode,
165               (outs VPR64:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
166               asmop # "\t$Rd.2s, $Rn.2s, $Rm.2s",
167               [(set (ResTy2S VPR64:$Rd),
168                  (ResTy2S (opnode2S (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))))],
169               NoItinerary>;
170
171     def _4S : NeonI_3VSame<0b1, u, {size, 0b0}, opcode,
172               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
173               asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
174               [(set (ResTy4S VPR128:$Rd),
175                  (ResTy4S (opnode4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))))],
176               NoItinerary>;
177
178     def _2D : NeonI_3VSame<0b1, u, {size, 0b1}, opcode,
179               (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
180               asmop # "\t$Rd.2d, $Rn.2d, $Rm.2d",
181               [(set (ResTy2D VPR128:$Rd),
182                  (ResTy2D (opnode2D (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))))],
183                NoItinerary>;
184   }
185 }
186
187 //===----------------------------------------------------------------------===//
188 // Instruction Definitions
189 //===----------------------------------------------------------------------===//
190
191 // Vector Arithmetic Instructions
192
193 // Vector Add (Integer and Floating-Point)
194
195 defm ADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b10000, "add", add, 1>;
196 defm FADDvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11010, "fadd", fadd, fadd, fadd,
197                                      v2f32, v4f32, v2f64, 1>;
198
199 // Vector Sub (Integer and Floating-Point)
200
201 defm SUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b10000, "sub", sub, 0>;
202 defm FSUBvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11010, "fsub", fsub, fsub, fsub,
203                                      v2f32, v4f32, v2f64, 0>;
204
205 // Vector Multiply (Integer and Floating-Point)
206
207 defm MULvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b10011, "mul", mul, 1>;
208 defm FMULvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11011, "fmul", fmul, fmul, fmul,
209                                      v2f32, v4f32, v2f64, 1>;
210
211 // Vector Multiply (Polynomial)
212
213 defm PMULvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b10011, "pmul",
214                                     int_arm_neon_vmulp, int_arm_neon_vmulp, 1>;
215
216 // Vector Multiply-accumulate and Multiply-subtract (Integer)
217
218 // class NeonI_3VSame_Constraint_impl: NeonI_3VSame with no data type and
219 // two operands constraints.
220 class NeonI_3VSame_Constraint_impl<string asmop, string asmlane,
221   RegisterOperand VPRC, ValueType OpTy, bit q, bit u, bits<2> size, 
222   bits<5> opcode, SDPatternOperator opnode>
223   : NeonI_3VSame<q, u, size, opcode,
224     (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, VPRC:$Rm),
225     asmop # "\t$Rd" # asmlane # ", $Rn" # asmlane # ", $Rm" # asmlane,
226     [(set (OpTy VPRC:$Rd),
227        (OpTy (opnode (OpTy VPRC:$src), (OpTy VPRC:$Rn), (OpTy VPRC:$Rm))))],
228     NoItinerary> {
229   let Constraints = "$src = $Rd";
230 }
231
232 def Neon_mla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
233                        (add node:$Ra, (mul node:$Rn, node:$Rm))>;
234
235 def Neon_mls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
236                        (sub node:$Ra, (mul node:$Rn, node:$Rm))>;
237
238
239 def MLAvvv_8B:  NeonI_3VSame_Constraint_impl<"mla", ".8b",  VPR64,  v8i8,
240                                              0b0, 0b0, 0b00, 0b10010, Neon_mla>;
241 def MLAvvv_16B: NeonI_3VSame_Constraint_impl<"mla", ".16b", VPR128, v16i8,
242                                              0b1, 0b0, 0b00, 0b10010, Neon_mla>;
243 def MLAvvv_4H:  NeonI_3VSame_Constraint_impl<"mla", ".4h",  VPR64,  v4i16,
244                                              0b0, 0b0, 0b01, 0b10010, Neon_mla>;
245 def MLAvvv_8H:  NeonI_3VSame_Constraint_impl<"mla", ".8h",  VPR128, v8i16,
246                                              0b1, 0b0, 0b01, 0b10010, Neon_mla>;
247 def MLAvvv_2S:  NeonI_3VSame_Constraint_impl<"mla", ".2s",  VPR64,  v2i32,
248                                              0b0, 0b0, 0b10, 0b10010, Neon_mla>;
249 def MLAvvv_4S:  NeonI_3VSame_Constraint_impl<"mla", ".4s",  VPR128, v4i32,
250                                              0b1, 0b0, 0b10, 0b10010, Neon_mla>;
251
252 def MLSvvv_8B:  NeonI_3VSame_Constraint_impl<"mls", ".8b",  VPR64,  v8i8,
253                                              0b0, 0b1, 0b00, 0b10010, Neon_mls>;
254 def MLSvvv_16B: NeonI_3VSame_Constraint_impl<"mls", ".16b", VPR128, v16i8,
255                                              0b1, 0b1, 0b00, 0b10010, Neon_mls>;
256 def MLSvvv_4H:  NeonI_3VSame_Constraint_impl<"mls", ".4h",  VPR64,  v4i16,
257                                              0b0, 0b1, 0b01, 0b10010, Neon_mls>;
258 def MLSvvv_8H:  NeonI_3VSame_Constraint_impl<"mls", ".8h",  VPR128, v8i16,
259                                              0b1, 0b1, 0b01, 0b10010, Neon_mls>;
260 def MLSvvv_2S:  NeonI_3VSame_Constraint_impl<"mls", ".2s",  VPR64,  v2i32,
261                                              0b0, 0b1, 0b10, 0b10010, Neon_mls>;
262 def MLSvvv_4S:  NeonI_3VSame_Constraint_impl<"mls", ".4s",  VPR128, v4i32,
263                                              0b1, 0b1, 0b10, 0b10010, Neon_mls>;
264
265 // Vector Multiply-accumulate and Multiply-subtract (Floating Point)
266
267 def Neon_fmla : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
268                         (fadd node:$Ra, (fmul node:$Rn, node:$Rm))>;
269
270 def Neon_fmls : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
271                         (fsub node:$Ra, (fmul node:$Rn, node:$Rm))>;
272
273 let Predicates = [HasNEON, UseFusedMAC] in {
274 def FMLAvvv_2S: NeonI_3VSame_Constraint_impl<"fmla", ".2s",  VPR64,  v2f32,
275                                              0b0, 0b0, 0b00, 0b11001, Neon_fmla>;
276 def FMLAvvv_4S: NeonI_3VSame_Constraint_impl<"fmla", ".4s",  VPR128, v4f32,
277                                              0b1, 0b0, 0b00, 0b11001, Neon_fmla>;
278 def FMLAvvv_2D: NeonI_3VSame_Constraint_impl<"fmla", ".2d",  VPR128, v2f64,
279                                              0b1, 0b0, 0b01, 0b11001, Neon_fmla>;
280
281 def FMLSvvv_2S: NeonI_3VSame_Constraint_impl<"fmls", ".2s",  VPR64,  v2f32,
282                                               0b0, 0b0, 0b10, 0b11001, Neon_fmls>;
283 def FMLSvvv_4S: NeonI_3VSame_Constraint_impl<"fmls", ".4s",  VPR128, v4f32,
284                                              0b1, 0b0, 0b10, 0b11001, Neon_fmls>;
285 def FMLSvvv_2D: NeonI_3VSame_Constraint_impl<"fmls", ".2d",  VPR128, v2f64,
286                                              0b1, 0b0, 0b11, 0b11001, Neon_fmls>;
287 }
288
289 // We're also allowed to match the fma instruction regardless of compile
290 // options.
291 def : Pat<(v2f32 (fma VPR64:$Rn, VPR64:$Rm, VPR64:$Ra)),
292           (FMLAvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
293 def : Pat<(v4f32 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
294           (FMLAvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
295 def : Pat<(v2f64 (fma VPR128:$Rn, VPR128:$Rm, VPR128:$Ra)),
296           (FMLAvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
297
298 def : Pat<(v2f32 (fma (fneg VPR64:$Rn), VPR64:$Rm, VPR64:$Ra)),
299           (FMLSvvv_2S VPR64:$Ra, VPR64:$Rn, VPR64:$Rm)>;
300 def : Pat<(v4f32 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
301           (FMLSvvv_4S VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
302 def : Pat<(v2f64 (fma (fneg VPR128:$Rn), VPR128:$Rm, VPR128:$Ra)),
303           (FMLSvvv_2D VPR128:$Ra, VPR128:$Rn, VPR128:$Rm)>;
304
305 // Vector Divide (Floating-Point)
306
307 defm FDIVvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11111, "fdiv", fdiv, fdiv, fdiv,
308                                      v2f32, v4f32, v2f64, 0>;
309
310 // Vector Bitwise Operations
311
312 // Vector Bitwise AND
313
314 defm ANDvvv : NeonI_3VSame_B_sizes<0b0, 0b00, 0b00011, "and", and, and, 1>;
315
316 // Vector Bitwise Exclusive OR
317
318 defm EORvvv : NeonI_3VSame_B_sizes<0b1, 0b00, 0b00011, "eor", xor, xor, 1>;
319
320 // Vector Bitwise OR
321
322 defm ORRvvv : NeonI_3VSame_B_sizes<0b0, 0b10, 0b00011, "orr", or, or, 1>;
323
324 // ORR disassembled as MOV if Vn==Vm
325
326 // Vector Move - register
327 // Alias for ORR if Vn=Vm.
328 // FIXME: This is actually the preferred syntax but TableGen can't deal with
329 // custom printing of aliases.
330 def : NeonInstAlias<"mov $Rd.8b, $Rn.8b",
331                     (ORRvvv_8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rn), 0>;
332 def : NeonInstAlias<"mov $Rd.16b, $Rn.16b",
333                     (ORRvvv_16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rn), 0>;
334
335 // The MOVI instruction takes two immediate operands.  The first is the
336 // immediate encoding, while the second is the cmode.  A cmode of 14, or
337 // 0b1110, produces a MOVI operation, rather than a MVNI, ORR, or BIC.
338 def Neon_AllZero : PatFrag<(ops), (Neon_movi (i32 0), (i32 14))>;
339 def Neon_AllOne : PatFrag<(ops), (Neon_movi (i32 255), (i32 14))>;
340
341 def Neon_not8B  : PatFrag<(ops node:$in),
342                           (xor node:$in, (bitconvert (v8i8 Neon_AllOne)))>;
343 def Neon_not16B : PatFrag<(ops node:$in),
344                           (xor node:$in, (bitconvert (v16i8 Neon_AllOne)))>;
345
346 def Neon_orn8B : PatFrag<(ops node:$Rn, node:$Rm),
347                          (or node:$Rn, (Neon_not8B node:$Rm))>;
348
349 def Neon_orn16B : PatFrag<(ops node:$Rn, node:$Rm),
350                           (or node:$Rn, (Neon_not16B node:$Rm))>;
351
352 def Neon_bic8B : PatFrag<(ops node:$Rn, node:$Rm),
353                          (and node:$Rn, (Neon_not8B node:$Rm))>;
354
355 def Neon_bic16B : PatFrag<(ops node:$Rn, node:$Rm),
356                           (and node:$Rn, (Neon_not16B node:$Rm))>;
357
358
359 // Vector Bitwise OR NOT - register
360
361 defm ORNvvv : NeonI_3VSame_B_sizes<0b0, 0b11, 0b00011, "orn",
362                                    Neon_orn8B, Neon_orn16B, 0>;
363
364 // Vector Bitwise Bit Clear (AND NOT) - register
365
366 defm BICvvv : NeonI_3VSame_B_sizes<0b0, 0b01, 0b00011, "bic",
367                                    Neon_bic8B, Neon_bic16B, 0>;
368
369 multiclass Neon_bitwise2V_patterns<SDPatternOperator opnode8B,
370                                    SDPatternOperator opnode16B,
371                                    Instruction INST8B,
372                                    Instruction INST16B> {
373   def : Pat<(v2i32 (opnode8B VPR64:$Rn, VPR64:$Rm)),
374             (INST8B VPR64:$Rn, VPR64:$Rm)>;
375   def : Pat<(v4i16 (opnode8B VPR64:$Rn, VPR64:$Rm)),
376             (INST8B VPR64:$Rn, VPR64:$Rm)>;
377   def : Pat<(v1i64 (opnode8B VPR64:$Rn, VPR64:$Rm)),
378             (INST8B VPR64:$Rn, VPR64:$Rm)>;
379   def : Pat<(v4i32 (opnode16B VPR128:$Rn, VPR128:$Rm)),
380             (INST16B VPR128:$Rn, VPR128:$Rm)>;
381   def : Pat<(v8i16 (opnode16B VPR128:$Rn, VPR128:$Rm)),
382             (INST16B VPR128:$Rn, VPR128:$Rm)>;
383   def : Pat<(v2i64 (opnode16B VPR128:$Rn, VPR128:$Rm)),
384             (INST16B VPR128:$Rn, VPR128:$Rm)>;
385 }
386
387 // Additional patterns for bitwise instructions AND, EOR, ORR, BIC, ORN
388 defm : Neon_bitwise2V_patterns<and, and, ANDvvv_8B, ANDvvv_16B>;
389 defm : Neon_bitwise2V_patterns<or,  or,  ORRvvv_8B, ORRvvv_16B>;
390 defm : Neon_bitwise2V_patterns<xor, xor, EORvvv_8B, EORvvv_16B>;
391 defm : Neon_bitwise2V_patterns<Neon_bic8B, Neon_bic16B, BICvvv_8B, BICvvv_16B>;
392 defm : Neon_bitwise2V_patterns<Neon_orn8B, Neon_orn16B, ORNvvv_8B, ORNvvv_16B>;
393
394 //   Vector Bitwise Select
395 def BSLvvv_8B  : NeonI_3VSame_Constraint_impl<"bsl", ".8b",  VPR64, v8i8,
396                                               0b0, 0b1, 0b01, 0b00011, Neon_bsl>;
397
398 def BSLvvv_16B : NeonI_3VSame_Constraint_impl<"bsl", ".16b", VPR128, v16i8,
399                                               0b1, 0b1, 0b01, 0b00011, Neon_bsl>;
400
401 multiclass Neon_bitwise3V_patterns<SDPatternOperator opnode,
402                                    Instruction INST8B,
403                                    Instruction INST16B> {
404   // Disassociate type from instruction definition
405   def : Pat<(v2i32 (opnode VPR64:$src,VPR64:$Rn, VPR64:$Rm)),
406             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
407   def : Pat<(v4i16 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
408             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
409   def : Pat<(v1i64 (opnode VPR64:$src, VPR64:$Rn, VPR64:$Rm)),
410             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
411   def : Pat<(v4i32 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
412             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
413   def : Pat<(v8i16 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
414             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
415   def : Pat<(v2i64 (opnode VPR128:$src, VPR128:$Rn, VPR128:$Rm)),
416             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
417
418   // Allow to match BSL instruction pattern with non-constant operand
419   def : Pat<(v8i8 (or (and VPR64:$Rn, VPR64:$Rd),
420                     (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
421           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
422   def : Pat<(v4i16 (or (and VPR64:$Rn, VPR64:$Rd),
423                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
424           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
425   def : Pat<(v2i32 (or (and VPR64:$Rn, VPR64:$Rd),
426                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
427           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
428   def : Pat<(v1i64 (or (and VPR64:$Rn, VPR64:$Rd),
429                      (and VPR64:$Rm, (Neon_not8B VPR64:$Rd)))),
430           (INST8B VPR64:$Rd, VPR64:$Rn, VPR64:$Rm)>;
431   def : Pat<(v16i8 (or (and VPR128:$Rn, VPR128:$Rd),
432                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
433           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
434   def : Pat<(v8i16 (or (and VPR128:$Rn, VPR128:$Rd),
435                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
436           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
437   def : Pat<(v4i32 (or (and VPR128:$Rn, VPR128:$Rd),
438                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
439           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
440   def : Pat<(v2i64 (or (and VPR128:$Rn, VPR128:$Rd),
441                      (and VPR128:$Rm, (Neon_not16B VPR128:$Rd)))),
442           (INST16B VPR128:$Rd, VPR128:$Rn, VPR128:$Rm)>;
443
444   // Allow to match llvm.arm.* intrinsics.
445   def : Pat<(v8i8 (int_arm_neon_vbsl (v8i8 VPR64:$src),
446                     (v8i8 VPR64:$Rn), (v8i8 VPR64:$Rm))),
447             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
448   def : Pat<(v4i16 (int_arm_neon_vbsl (v4i16 VPR64:$src),
449                     (v4i16 VPR64:$Rn), (v4i16 VPR64:$Rm))),
450             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
451   def : Pat<(v2i32 (int_arm_neon_vbsl (v2i32 VPR64:$src),
452                     (v2i32 VPR64:$Rn), (v2i32 VPR64:$Rm))),
453             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
454   def : Pat<(v1i64 (int_arm_neon_vbsl (v1i64 VPR64:$src),
455                     (v1i64 VPR64:$Rn), (v1i64 VPR64:$Rm))),
456             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
457   def : Pat<(v2f32 (int_arm_neon_vbsl (v2f32 VPR64:$src),
458                     (v2f32 VPR64:$Rn), (v2f32 VPR64:$Rm))),
459             (INST8B VPR64:$src, VPR64:$Rn, VPR64:$Rm)>;
460   def : Pat<(v16i8 (int_arm_neon_vbsl (v16i8 VPR128:$src),
461                     (v16i8 VPR128:$Rn), (v16i8 VPR128:$Rm))),
462             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
463   def : Pat<(v8i16 (int_arm_neon_vbsl (v8i16 VPR128:$src),
464                     (v8i16 VPR128:$Rn), (v8i16 VPR128:$Rm))),
465             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
466   def : Pat<(v4i32 (int_arm_neon_vbsl (v4i32 VPR128:$src),
467                     (v4i32 VPR128:$Rn), (v4i32 VPR128:$Rm))),
468             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
469   def : Pat<(v2i64 (int_arm_neon_vbsl (v2i64 VPR128:$src),
470                     (v2i64 VPR128:$Rn), (v2i64 VPR128:$Rm))),
471             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
472   def : Pat<(v4f32 (int_arm_neon_vbsl (v4f32 VPR128:$src),
473                     (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rm))),
474             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
475   def : Pat<(v2f64 (int_arm_neon_vbsl (v2f64 VPR128:$src),
476                     (v2f64 VPR128:$Rn), (v2f64 VPR128:$Rm))),
477             (INST16B VPR128:$src, VPR128:$Rn, VPR128:$Rm)>;
478 }
479
480 // Additional patterns for bitwise instruction BSL
481 defm: Neon_bitwise3V_patterns<Neon_bsl, BSLvvv_8B, BSLvvv_16B>;
482
483 def Neon_NoBSLop : PatFrag<(ops node:$src, node:$Rn, node:$Rm),
484                            (Neon_bsl node:$src, node:$Rn, node:$Rm),
485                            [{ (void)N; return false; }]>;
486
487 // Vector Bitwise Insert if True
488
489 def BITvvv_8B  : NeonI_3VSame_Constraint_impl<"bit", ".8b", VPR64,   v8i8,
490                    0b0, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
491 def BITvvv_16B : NeonI_3VSame_Constraint_impl<"bit", ".16b", VPR128, v16i8,
492                    0b1, 0b1, 0b10, 0b00011, Neon_NoBSLop>;
493
494 // Vector Bitwise Insert if False
495
496 def BIFvvv_8B  : NeonI_3VSame_Constraint_impl<"bif", ".8b", VPR64,  v8i8,
497                                 0b0, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
498 def BIFvvv_16B : NeonI_3VSame_Constraint_impl<"bif", ".16b", VPR128, v16i8,
499                                 0b1, 0b1, 0b11, 0b00011, Neon_NoBSLop>;
500
501 // Vector Absolute Difference and Accumulate (Signed, Unsigned)
502
503 def Neon_uaba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
504                        (add node:$Ra, (int_arm_neon_vabdu node:$Rn, node:$Rm))>;
505 def Neon_saba : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
506                        (add node:$Ra, (int_arm_neon_vabds node:$Rn, node:$Rm))>;
507
508 // Vector Absolute Difference and Accumulate (Unsigned)
509 def UABAvvv_8B :  NeonI_3VSame_Constraint_impl<"uaba", ".8b",  VPR64,  v8i8,
510                     0b0, 0b1, 0b00, 0b01111, Neon_uaba>;
511 def UABAvvv_16B : NeonI_3VSame_Constraint_impl<"uaba", ".16b", VPR128, v16i8,
512                     0b1, 0b1, 0b00, 0b01111, Neon_uaba>;
513 def UABAvvv_4H :  NeonI_3VSame_Constraint_impl<"uaba", ".4h",  VPR64,  v4i16,
514                     0b0, 0b1, 0b01, 0b01111, Neon_uaba>;
515 def UABAvvv_8H :  NeonI_3VSame_Constraint_impl<"uaba", ".8h",  VPR128, v8i16,
516                     0b1, 0b1, 0b01, 0b01111, Neon_uaba>;
517 def UABAvvv_2S :  NeonI_3VSame_Constraint_impl<"uaba", ".2s",  VPR64,  v2i32,
518                     0b0, 0b1, 0b10, 0b01111, Neon_uaba>;
519 def UABAvvv_4S :  NeonI_3VSame_Constraint_impl<"uaba", ".4s",  VPR128, v4i32,
520                     0b1, 0b1, 0b10, 0b01111, Neon_uaba>;
521
522 // Vector Absolute Difference and Accumulate (Signed)
523 def SABAvvv_8B :  NeonI_3VSame_Constraint_impl<"saba", ".8b",  VPR64,  v8i8,
524                     0b0, 0b0, 0b00, 0b01111, Neon_saba>;
525 def SABAvvv_16B : NeonI_3VSame_Constraint_impl<"saba", ".16b", VPR128, v16i8,
526                     0b1, 0b0, 0b00, 0b01111, Neon_saba>;
527 def SABAvvv_4H :  NeonI_3VSame_Constraint_impl<"saba", ".4h",  VPR64,  v4i16,
528                     0b0, 0b0, 0b01, 0b01111, Neon_saba>;
529 def SABAvvv_8H :  NeonI_3VSame_Constraint_impl<"saba", ".8h",  VPR128, v8i16,
530                     0b1, 0b0, 0b01, 0b01111, Neon_saba>;
531 def SABAvvv_2S :  NeonI_3VSame_Constraint_impl<"saba", ".2s",  VPR64,  v2i32,
532                     0b0, 0b0, 0b10, 0b01111, Neon_saba>;
533 def SABAvvv_4S :  NeonI_3VSame_Constraint_impl<"saba", ".4s",  VPR128, v4i32,
534                     0b1, 0b0, 0b10, 0b01111, Neon_saba>;
535
536
537 // Vector Absolute Difference (Signed, Unsigned)
538 defm UABDvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01110, "uabd", int_arm_neon_vabdu, 0>;
539 defm SABDvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01110, "sabd", int_arm_neon_vabds, 0>;
540
541 // Vector Absolute Difference (Floating Point)
542 defm FABDvvv: NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11010, "fabd",
543                                     int_arm_neon_vabds, int_arm_neon_vabds,
544                                     int_arm_neon_vabds, v2f32, v4f32, v2f64, 0>;
545
546 // Vector Reciprocal Step (Floating Point)
547 defm FRECPSvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11111, "frecps",
548                                        int_arm_neon_vrecps, int_arm_neon_vrecps,
549                                        int_arm_neon_vrecps,
550                                        v2f32, v4f32, v2f64, 0>;
551
552 // Vector Reciprocal Square Root Step (Floating Point)
553 defm FRSQRTSvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11111, "frsqrts",
554                                         int_arm_neon_vrsqrts,
555                                         int_arm_neon_vrsqrts,
556                                         int_arm_neon_vrsqrts,
557                                         v2f32, v4f32, v2f64, 0>;
558
559 // Vector Comparisons
560
561 def Neon_cmeq : PatFrag<(ops node:$lhs, node:$rhs),
562                         (Neon_cmp node:$lhs, node:$rhs, SETEQ)>;
563 def Neon_cmphs : PatFrag<(ops node:$lhs, node:$rhs),
564                          (Neon_cmp node:$lhs, node:$rhs, SETUGE)>;
565 def Neon_cmge : PatFrag<(ops node:$lhs, node:$rhs),
566                         (Neon_cmp node:$lhs, node:$rhs, SETGE)>;
567 def Neon_cmhi : PatFrag<(ops node:$lhs, node:$rhs),
568                         (Neon_cmp node:$lhs, node:$rhs, SETUGT)>;
569 def Neon_cmgt : PatFrag<(ops node:$lhs, node:$rhs),
570                         (Neon_cmp node:$lhs, node:$rhs, SETGT)>;
571
572 // NeonI_compare_aliases class: swaps register operands to implement
573 // comparison aliases, e.g., CMLE is alias for CMGE with operands reversed.
574 class NeonI_compare_aliases<string asmop, string asmlane,
575                             Instruction inst, RegisterOperand VPRC>
576   : NeonInstAlias<asmop # "\t$Rd" # asmlane #", $Rn" # asmlane #
577                     ", $Rm" # asmlane,
578                   (inst VPRC:$Rd, VPRC:$Rm, VPRC:$Rn), 0b0>;
579
580 // Vector Comparisons (Integer)
581
582 // Vector Compare Mask Equal (Integer)
583 let isCommutable =1 in {
584 defm CMEQvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b10001, "cmeq", Neon_cmeq, 0>;
585 }
586
587 // Vector Compare Mask Higher or Same (Unsigned Integer)
588 defm CMHSvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00111, "cmhs", Neon_cmphs, 0>;
589
590 // Vector Compare Mask Greater Than or Equal (Integer)
591 defm CMGEvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00111, "cmge", Neon_cmge, 0>;
592
593 // Vector Compare Mask Higher (Unsigned Integer)
594 defm CMHIvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b00110, "cmhi", Neon_cmhi, 0>;
595
596 // Vector Compare Mask Greater Than (Integer)
597 defm CMGTvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b00110, "cmgt", Neon_cmgt, 0>;
598
599 // Vector Compare Mask Bitwise Test (Integer)
600 defm CMTSTvvv:  NeonI_3VSame_BHSD_sizes<0b0, 0b10001, "cmtst", Neon_tst, 0>;
601
602 // Vector Compare Mask Less or Same (Unsigned Integer)
603 // CMLS is alias for CMHS with operands reversed.
604 def CMLSvvv_8B  : NeonI_compare_aliases<"cmls", ".8b",  CMHSvvv_8B,  VPR64>;
605 def CMLSvvv_16B : NeonI_compare_aliases<"cmls", ".16b", CMHSvvv_16B, VPR128>;
606 def CMLSvvv_4H  : NeonI_compare_aliases<"cmls", ".4h",  CMHSvvv_4H,  VPR64>;
607 def CMLSvvv_8H  : NeonI_compare_aliases<"cmls", ".8h",  CMHSvvv_8H,  VPR128>;
608 def CMLSvvv_2S  : NeonI_compare_aliases<"cmls", ".2s",  CMHSvvv_2S,  VPR64>;
609 def CMLSvvv_4S  : NeonI_compare_aliases<"cmls", ".4s",  CMHSvvv_4S,  VPR128>;
610 def CMLSvvv_2D  : NeonI_compare_aliases<"cmls", ".2d",  CMHSvvv_2D,  VPR128>;
611
612 // Vector Compare Mask Less Than or Equal (Integer)
613 // CMLE is alias for CMGE with operands reversed.
614 def CMLEvvv_8B  : NeonI_compare_aliases<"cmle", ".8b",  CMGEvvv_8B,  VPR64>;
615 def CMLEvvv_16B : NeonI_compare_aliases<"cmle", ".16b", CMGEvvv_16B, VPR128>;
616 def CMLEvvv_4H  : NeonI_compare_aliases<"cmle", ".4h",  CMGEvvv_4H,  VPR64>;
617 def CMLEvvv_8H  : NeonI_compare_aliases<"cmle", ".8h",  CMGEvvv_8H,  VPR128>;
618 def CMLEvvv_2S  : NeonI_compare_aliases<"cmle", ".2s",  CMGEvvv_2S,  VPR64>;
619 def CMLEvvv_4S  : NeonI_compare_aliases<"cmle", ".4s",  CMGEvvv_4S,  VPR128>;
620 def CMLEvvv_2D  : NeonI_compare_aliases<"cmle", ".2d",  CMGEvvv_2D,  VPR128>;
621
622 // Vector Compare Mask Lower (Unsigned Integer)
623 // CMLO is alias for CMHI with operands reversed.
624 def CMLOvvv_8B  : NeonI_compare_aliases<"cmlo", ".8b",  CMHIvvv_8B,  VPR64>;
625 def CMLOvvv_16B : NeonI_compare_aliases<"cmlo", ".16b", CMHIvvv_16B, VPR128>;
626 def CMLOvvv_4H  : NeonI_compare_aliases<"cmlo", ".4h",  CMHIvvv_4H,  VPR64>;
627 def CMLOvvv_8H  : NeonI_compare_aliases<"cmlo", ".8h",  CMHIvvv_8H,  VPR128>;
628 def CMLOvvv_2S  : NeonI_compare_aliases<"cmlo", ".2s",  CMHIvvv_2S,  VPR64>;
629 def CMLOvvv_4S  : NeonI_compare_aliases<"cmlo", ".4s",  CMHIvvv_4S,  VPR128>;
630 def CMLOvvv_2D  : NeonI_compare_aliases<"cmlo", ".2d",  CMHIvvv_2D,  VPR128>;
631
632 // Vector Compare Mask Less Than (Integer)
633 // CMLT is alias for CMGT with operands reversed.
634 def CMLTvvv_8B  : NeonI_compare_aliases<"cmlt", ".8b",  CMGTvvv_8B,  VPR64>;
635 def CMLTvvv_16B : NeonI_compare_aliases<"cmlt", ".16b", CMGTvvv_16B, VPR128>;
636 def CMLTvvv_4H  : NeonI_compare_aliases<"cmlt", ".4h",  CMGTvvv_4H,  VPR64>;
637 def CMLTvvv_8H  : NeonI_compare_aliases<"cmlt", ".8h",  CMGTvvv_8H,  VPR128>;
638 def CMLTvvv_2S  : NeonI_compare_aliases<"cmlt", ".2s",  CMGTvvv_2S,  VPR64>;
639 def CMLTvvv_4S  : NeonI_compare_aliases<"cmlt", ".4s",  CMGTvvv_4S,  VPR128>;
640 def CMLTvvv_2D  : NeonI_compare_aliases<"cmlt", ".2d",  CMGTvvv_2D,  VPR128>;
641
642
643 def neon_uimm0_asmoperand : AsmOperandClass
644 {
645   let Name = "UImm0";
646   let PredicateMethod = "isUImm<0>";
647   let RenderMethod = "addImmOperands";
648 }
649
650 def neon_uimm0 : Operand<i32>, ImmLeaf<i32, [{return Imm == 0;}]> {
651   let ParserMatchClass = neon_uimm0_asmoperand;
652   let PrintMethod = "printNeonUImm0Operand";
653
654 }
655
656 multiclass NeonI_cmpz_sizes<bit u, bits<5> opcode, string asmop, CondCode CC>
657 {
658   def _8B :  NeonI_2VMisc<0b0, u, 0b00, opcode,
659              (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
660              asmop # "\t$Rd.8b, $Rn.8b, $Imm",
661              [(set (v8i8 VPR64:$Rd),
662                 (v8i8 (Neon_cmpz (v8i8 VPR64:$Rn), (i32 imm:$Imm), CC)))],
663              NoItinerary>;
664
665   def _16B : NeonI_2VMisc<0b1, u, 0b00, opcode,
666              (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
667              asmop # "\t$Rd.16b, $Rn.16b, $Imm",
668              [(set (v16i8 VPR128:$Rd),
669                 (v16i8 (Neon_cmpz (v16i8 VPR128:$Rn), (i32 imm:$Imm), CC)))],
670              NoItinerary>;
671
672   def _4H : NeonI_2VMisc<0b0, u, 0b01, opcode,
673             (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
674             asmop # "\t$Rd.4h, $Rn.4h, $Imm",
675             [(set (v4i16 VPR64:$Rd),
676                (v4i16 (Neon_cmpz (v4i16 VPR64:$Rn), (i32 imm:$Imm), CC)))],
677             NoItinerary>;
678
679   def _8H : NeonI_2VMisc<0b1, u, 0b01, opcode,
680             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
681             asmop # "\t$Rd.8h, $Rn.8h, $Imm",
682             [(set (v8i16 VPR128:$Rd),
683                (v8i16 (Neon_cmpz (v8i16 VPR128:$Rn), (i32 imm:$Imm), CC)))],
684             NoItinerary>;
685
686   def _2S : NeonI_2VMisc<0b0, u, 0b10, opcode,
687             (outs VPR64:$Rd), (ins VPR64:$Rn, neon_uimm0:$Imm),
688             asmop # "\t$Rd.2s, $Rn.2s, $Imm",
689             [(set (v2i32 VPR64:$Rd),
690                (v2i32 (Neon_cmpz (v2i32 VPR64:$Rn), (i32 imm:$Imm), CC)))],
691             NoItinerary>;
692
693   def _4S : NeonI_2VMisc<0b1, u, 0b10, opcode,
694             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
695             asmop # "\t$Rd.4s, $Rn.4s, $Imm",
696             [(set (v4i32 VPR128:$Rd),
697                (v4i32 (Neon_cmpz (v4i32 VPR128:$Rn), (i32 imm:$Imm), CC)))],
698             NoItinerary>;
699
700   def _2D : NeonI_2VMisc<0b1, u, 0b11, opcode,
701             (outs VPR128:$Rd), (ins VPR128:$Rn, neon_uimm0:$Imm),
702             asmop # "\t$Rd.2d, $Rn.2d, $Imm",
703             [(set (v2i64 VPR128:$Rd),
704                (v2i64 (Neon_cmpz (v2i64 VPR128:$Rn), (i32 imm:$Imm), CC)))],
705             NoItinerary>;
706 }
707
708 // Vector Compare Mask Equal to Zero (Integer)
709 defm CMEQvvi : NeonI_cmpz_sizes<0b0, 0b01001, "cmeq", SETEQ>;
710
711 // Vector Compare Mask Greater Than or Equal to Zero (Signed Integer)
712 defm CMGEvvi : NeonI_cmpz_sizes<0b1, 0b01000, "cmge", SETGE>;
713
714 // Vector Compare Mask Greater Than Zero (Signed Integer)
715 defm CMGTvvi : NeonI_cmpz_sizes<0b0, 0b01000, "cmgt", SETGT>;
716
717 // Vector Compare Mask Less Than or Equal To Zero (Signed Integer)
718 defm CMLEvvi : NeonI_cmpz_sizes<0b1, 0b01001, "cmle", SETLE>;
719
720 // Vector Compare Mask Less Than Zero (Signed Integer)
721 defm CMLTvvi : NeonI_cmpz_sizes<0b0, 0b01010, "cmlt", SETLT>;
722
723 // Vector Comparisons (Floating Point)
724
725 // Vector Compare Mask Equal (Floating Point)
726 let isCommutable =1 in {
727 defm FCMEQvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11100, "fcmeq", Neon_cmeq,
728                                       Neon_cmeq, Neon_cmeq,
729                                       v2i32, v4i32, v2i64, 0>;
730 }
731
732 // Vector Compare Mask Greater Than Or Equal (Floating Point)
733 defm FCMGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11100, "fcmge", Neon_cmge,
734                                       Neon_cmge, Neon_cmge,
735                                       v2i32, v4i32, v2i64, 0>;
736
737 // Vector Compare Mask Greater Than (Floating Point)
738 defm FCMGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11100, "fcmgt", Neon_cmgt,
739                                       Neon_cmgt, Neon_cmgt,
740                                       v2i32, v4i32, v2i64, 0>;
741
742 // Vector Compare Mask Less Than Or Equal (Floating Point)
743 // FCMLE is alias for FCMGE with operands reversed.
744 def FCMLEvvv_2S  : NeonI_compare_aliases<"fcmle", ".2s",  FCMGEvvv_2S,  VPR64>;
745 def FCMLEvvv_4S  : NeonI_compare_aliases<"fcmle", ".4s",  FCMGEvvv_4S,  VPR128>;
746 def FCMLEvvv_2D  : NeonI_compare_aliases<"fcmle", ".2d",  FCMGEvvv_2D,  VPR128>;
747
748 // Vector Compare Mask Less Than (Floating Point)
749 // FCMLT is alias for FCMGT with operands reversed.
750 def FCMLTvvv_2S  : NeonI_compare_aliases<"fcmlt", ".2s",  FCMGTvvv_2S,  VPR64>;
751 def FCMLTvvv_4S  : NeonI_compare_aliases<"fcmlt", ".4s",  FCMGTvvv_4S,  VPR128>;
752 def FCMLTvvv_2D  : NeonI_compare_aliases<"fcmlt", ".2d",  FCMGTvvv_2D,  VPR128>;
753
754
755 multiclass NeonI_fpcmpz_sizes<bit u, bit size, bits<5> opcode,
756                               string asmop, CondCode CC>
757 {
758   def _2S : NeonI_2VMisc<0b0, u, {size, 0b0}, opcode,
759             (outs VPR64:$Rd), (ins VPR64:$Rn, fpz32:$FPImm),
760             asmop # "\t$Rd.2s, $Rn.2s, $FPImm",
761             [(set (v2i32 VPR64:$Rd),
762                (v2i32 (Neon_cmpz (v2f32 VPR64:$Rn), (f32 fpimm:$FPImm), CC)))],
763             NoItinerary>;
764
765   def _4S : NeonI_2VMisc<0b1, u, {size, 0b0}, opcode,
766             (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
767             asmop # "\t$Rd.4s, $Rn.4s, $FPImm",
768             [(set (v4i32 VPR128:$Rd),
769                (v4i32 (Neon_cmpz (v4f32 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
770             NoItinerary>;
771
772   def _2D : NeonI_2VMisc<0b1, u, {size, 0b1}, opcode,
773             (outs VPR128:$Rd), (ins VPR128:$Rn, fpz32:$FPImm),
774             asmop # "\t$Rd.2d, $Rn.2d, $FPImm",
775             [(set (v2i64 VPR128:$Rd),
776                (v2i64 (Neon_cmpz (v2f64 VPR128:$Rn), (f32 fpimm:$FPImm), CC)))],
777             NoItinerary>;
778 }
779
780 // Vector Compare Mask Equal to Zero (Floating Point)
781 defm FCMEQvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01101, "fcmeq", SETEQ>;
782
783 // Vector Compare Mask Greater Than or Equal to Zero (Floating Point)
784 defm FCMGEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01100, "fcmge", SETGE>;
785
786 // Vector Compare Mask Greater Than Zero (Floating Point)
787 defm FCMGTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01100, "fcmgt", SETGT>;
788
789 // Vector Compare Mask Less Than or Equal To Zero (Floating Point)
790 defm FCMLEvvi : NeonI_fpcmpz_sizes<0b1, 0b1, 0b01101, "fcmle", SETLE>;
791
792 // Vector Compare Mask Less Than Zero (Floating Point)
793 defm FCMLTvvi : NeonI_fpcmpz_sizes<0b0, 0b1, 0b01110, "fcmlt", SETLT>;
794
795 // Vector Absolute Comparisons (Floating Point)
796
797 // Vector Absolute Compare Mask Greater Than Or Equal (Floating Point)
798 defm FACGEvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11101, "facge",
799                                       int_arm_neon_vacged, int_arm_neon_vacgeq,
800                                       int_aarch64_neon_vacgeq,
801                                       v2i32, v4i32, v2i64, 0>;
802
803 // Vector Absolute Compare Mask Greater Than (Floating Point)
804 defm FACGTvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11101, "facgt",
805                                       int_arm_neon_vacgtd, int_arm_neon_vacgtq,
806                                       int_aarch64_neon_vacgtq,
807                                       v2i32, v4i32, v2i64, 0>;
808
809 // Vector Absolute Compare Mask Less Than Or Equal (Floating Point)
810 // FACLE is alias for FACGE with operands reversed.
811 def FACLEvvv_2S  : NeonI_compare_aliases<"facle", ".2s",  FACGEvvv_2S,  VPR64>;
812 def FACLEvvv_4S  : NeonI_compare_aliases<"facle", ".4s",  FACGEvvv_4S,  VPR128>;
813 def FACLEvvv_2D  : NeonI_compare_aliases<"facle", ".2d",  FACGEvvv_2D,  VPR128>;
814
815 // Vector Absolute Compare Mask Less Than (Floating Point)
816 // FACLT is alias for FACGT with operands reversed.
817 def FACLTvvv_2S  : NeonI_compare_aliases<"faclt", ".2s",  FACGTvvv_2S,  VPR64>;
818 def FACLTvvv_4S  : NeonI_compare_aliases<"faclt", ".4s",  FACGTvvv_4S,  VPR128>;
819 def FACLTvvv_2D  : NeonI_compare_aliases<"faclt", ".2d",  FACGTvvv_2D,  VPR128>;
820
821 // Vector halving add (Integer Signed, Unsigned)
822 defm SHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00000, "shadd",
823                                         int_arm_neon_vhadds, 1>;
824 defm UHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00000, "uhadd",
825                                         int_arm_neon_vhaddu, 1>;
826
827 // Vector halving sub (Integer Signed, Unsigned)
828 defm SHSUBvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00100, "shsub",
829                                         int_arm_neon_vhsubs, 0>;
830 defm UHSUBvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00100, "uhsub",
831                                         int_arm_neon_vhsubu, 0>;
832
833 // Vector rouding halving add (Integer Signed, Unsigned)
834 defm SRHADDvvv :  NeonI_3VSame_BHS_sizes<0b0, 0b00010, "srhadd",
835                                          int_arm_neon_vrhadds, 1>;
836 defm URHADDvvv :  NeonI_3VSame_BHS_sizes<0b1, 0b00010, "urhadd",
837                                          int_arm_neon_vrhaddu, 1>;
838
839 // Vector Saturating add (Integer Signed, Unsigned)
840 defm SQADDvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00001, "sqadd",
841                    int_arm_neon_vqadds, 1>;
842 defm UQADDvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00001, "uqadd",
843                    int_arm_neon_vqaddu, 1>;
844
845 // Vector Saturating sub (Integer Signed, Unsigned)
846 defm SQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b0, 0b00101, "sqsub",
847                    int_arm_neon_vqsubs, 1>;
848 defm UQSUBvvv :  NeonI_3VSame_BHSD_sizes<0b1, 0b00101, "uqsub",
849                    int_arm_neon_vqsubu, 1>;
850
851 // Vector Shift Left (Signed and Unsigned Integer)
852 defm SSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01000, "sshl",
853                  int_arm_neon_vshifts, 1>;
854 defm USHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01000, "ushl",
855                  int_arm_neon_vshiftu, 1>;
856
857 // Vector Saturating Shift Left (Signed and Unsigned Integer)
858 defm SQSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01001, "sqshl",
859                   int_arm_neon_vqshifts, 1>;
860 defm UQSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01001, "uqshl",
861                   int_arm_neon_vqshiftu, 1>;
862
863 // Vector Rouding Shift Left (Signed and Unsigned Integer)
864 defm SRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01010, "srshl",
865                   int_arm_neon_vrshifts, 1>;
866 defm URSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01010, "urshl",
867                   int_arm_neon_vrshiftu, 1>;
868
869 // Vector Saturating Rouding Shift Left (Signed and Unsigned Integer)
870 defm SQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b0, 0b01011, "sqrshl",
871                    int_arm_neon_vqrshifts, 1>;
872 defm UQRSHLvvv : NeonI_3VSame_BHSD_sizes<0b1, 0b01011, "uqrshl",
873                    int_arm_neon_vqrshiftu, 1>;
874
875 // Vector Maximum (Signed and Unsigned Integer)
876 defm SMAXvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01100, "smax", int_arm_neon_vmaxs, 1>;
877 defm UMAXvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01100, "umax", int_arm_neon_vmaxu, 1>;
878
879 // Vector Minimum (Signed and Unsigned Integer)
880 defm SMINvvv : NeonI_3VSame_BHS_sizes<0b0, 0b01101, "smin", int_arm_neon_vmins, 1>;
881 defm UMINvvv : NeonI_3VSame_BHS_sizes<0b1, 0b01101, "umin", int_arm_neon_vminu, 1>;
882
883 // Vector Maximum (Floating Point)
884 defm FMAXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11110, "fmax",
885                                      int_arm_neon_vmaxs, int_arm_neon_vmaxs,
886                                      int_arm_neon_vmaxs, v2f32, v4f32, v2f64, 1>;
887
888 // Vector Minimum (Floating Point)
889 defm FMINvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11110, "fmin",
890                                      int_arm_neon_vmins, int_arm_neon_vmins,
891                                      int_arm_neon_vmins, v2f32, v4f32, v2f64, 1>;
892
893 // Vector maxNum (Floating Point) -  prefer a number over a quiet NaN)
894 defm FMAXNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11000, "fmaxnm",
895                                        int_aarch64_neon_vmaxnm,
896                                        int_aarch64_neon_vmaxnm,
897                                        int_aarch64_neon_vmaxnm,
898                                        v2f32, v4f32, v2f64, 1>;
899
900 // Vector minNum (Floating Point) - prefer a number over a quiet NaN)
901 defm FMINNMvvv : NeonI_3VSame_SD_sizes<0b0, 0b1, 0b11000, "fminnm",
902                                        int_aarch64_neon_vminnm,
903                                        int_aarch64_neon_vminnm,
904                                        int_aarch64_neon_vminnm,
905                                        v2f32, v4f32, v2f64, 1>;
906
907 // Vector Maximum Pairwise (Signed and Unsigned Integer)
908 defm SMAXPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10100, "smaxp", int_arm_neon_vpmaxs, 1>;
909 defm UMAXPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10100, "umaxp", int_arm_neon_vpmaxu, 1>;
910
911 // Vector Minimum Pairwise (Signed and Unsigned Integer)
912 defm SMINPvvv : NeonI_3VSame_BHS_sizes<0b0, 0b10101, "sminp", int_arm_neon_vpmins, 1>;
913 defm UMINPvvv : NeonI_3VSame_BHS_sizes<0b1, 0b10101, "uminp", int_arm_neon_vpminu, 1>;
914
915 // Vector Maximum Pairwise (Floating Point)
916 defm FMAXPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11110, "fmaxp",
917                                      int_arm_neon_vpmaxs, int_arm_neon_vpmaxs,
918                                      int_arm_neon_vpmaxs, v2f32, v4f32, v2f64, 1>;
919
920 // Vector Minimum Pairwise (Floating Point)
921 defm FMINPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11110, "fminp",
922                                      int_arm_neon_vpmins, int_arm_neon_vpmins,
923                                      int_arm_neon_vpmins, v2f32, v4f32, v2f64, 1>;
924
925 // Vector maxNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
926 defm FMAXNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11000, "fmaxnmp",
927                                        int_aarch64_neon_vpmaxnm,
928                                        int_aarch64_neon_vpmaxnm,
929                                        int_aarch64_neon_vpmaxnm,
930                                        v2f32, v4f32, v2f64, 1>;
931
932 // Vector minNum Pairwise (Floating Point) -  prefer a number over a quiet NaN)
933 defm FMINNMPvvv : NeonI_3VSame_SD_sizes<0b1, 0b1, 0b11000, "fminnmp",
934                                        int_aarch64_neon_vpminnm,
935                                        int_aarch64_neon_vpminnm,
936                                        int_aarch64_neon_vpminnm,
937                                        v2f32, v4f32, v2f64, 1>;
938
939 // Vector Addition Pairwise (Integer)
940 defm ADDP : NeonI_3VSame_BHSD_sizes<0b0, 0b10111, "addp", int_arm_neon_vpadd, 1>;
941
942 // Vector Addition Pairwise (Floating Point)
943 defm FADDP : NeonI_3VSame_SD_sizes<0b1, 0b0, 0b11010, "faddp",
944                                        int_arm_neon_vpadd,
945                                        int_arm_neon_vpadd,
946                                        int_arm_neon_vpadd,
947                                        v2f32, v4f32, v2f64, 1>;
948
949 // Vector Saturating Doubling Multiply High
950 defm SQDMULHvvv : NeonI_3VSame_HS_sizes<0b0, 0b10110, "sqdmulh",
951                     int_arm_neon_vqdmulh, 1>;
952
953 // Vector Saturating Rouding Doubling Multiply High
954 defm SQRDMULHvvv : NeonI_3VSame_HS_sizes<0b1, 0b10110, "sqrdmulh",
955                      int_arm_neon_vqrdmulh, 1>;
956
957 // Vector Multiply Extended (Floating Point)
958 defm FMULXvvv : NeonI_3VSame_SD_sizes<0b0, 0b0, 0b11011, "fmulx",
959                                       int_aarch64_neon_vmulx,
960                                       int_aarch64_neon_vmulx,
961                                       int_aarch64_neon_vmulx,
962                                       v2f32, v4f32, v2f64, 1>;
963
964 // Vector Immediate Instructions
965
966 multiclass neon_mov_imm_shift_asmoperands<string PREFIX>
967 {
968   def _asmoperand : AsmOperandClass
969     {
970       let Name = "NeonMovImmShift" # PREFIX;
971       let RenderMethod = "addNeonMovImmShift" # PREFIX # "Operands";
972       let PredicateMethod = "isNeonMovImmShift" # PREFIX;
973     }
974 }
975
976 // Definition of vector immediates shift operands
977
978 // The selectable use-cases extract the shift operation
979 // information from the OpCmode fields encoded in the immediate.
980 def neon_mod_shift_imm_XFORM : SDNodeXForm<imm, [{
981   uint64_t OpCmode = N->getZExtValue();
982   unsigned ShiftImm;
983   unsigned ShiftOnesIn;
984   unsigned HasShift =
985     A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
986   if (!HasShift) return SDValue();
987   return CurDAG->getTargetConstant(ShiftImm, MVT::i32);
988 }]>;
989
990 // Vector immediates shift operands which accept LSL and MSL
991 // shift operators with shift value in the range of 0, 8, 16, 24 (LSL),
992 // or 0, 8 (LSLH) or 8, 16 (MSL).
993 defm neon_mov_imm_LSL : neon_mov_imm_shift_asmoperands<"LSL">;
994 defm neon_mov_imm_MSL : neon_mov_imm_shift_asmoperands<"MSL">;
995 // LSLH restricts shift amount to  0, 8 out of 0, 8, 16, 24
996 defm neon_mov_imm_LSLH : neon_mov_imm_shift_asmoperands<"LSLH">;
997
998 multiclass neon_mov_imm_shift_operands<string PREFIX,
999                                        string HALF, string ISHALF, code pred>
1000 {
1001    def _operand : Operand<i32>, ImmLeaf<i32, pred, neon_mod_shift_imm_XFORM>
1002     {
1003       let PrintMethod =
1004         "printNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1005       let DecoderMethod =
1006         "DecodeNeonMovImmShiftOperand<A64SE::" # PREFIX # ", " # ISHALF # ">";
1007       let ParserMatchClass =
1008         !cast<AsmOperandClass>("neon_mov_imm_" # PREFIX # HALF # "_asmoperand");
1009     }
1010 }
1011
1012 defm neon_mov_imm_LSL  : neon_mov_imm_shift_operands<"LSL", "", "false", [{
1013   unsigned ShiftImm;
1014   unsigned ShiftOnesIn;
1015   unsigned HasShift =
1016     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1017   return (HasShift && !ShiftOnesIn);
1018 }]>;
1019
1020 defm neon_mov_imm_MSL  : neon_mov_imm_shift_operands<"MSL", "", "false", [{
1021   unsigned ShiftImm;
1022   unsigned ShiftOnesIn;
1023   unsigned HasShift =
1024     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1025   return (HasShift && ShiftOnesIn);
1026 }]>;
1027
1028 defm neon_mov_imm_LSLH  : neon_mov_imm_shift_operands<"LSL", "H", "true", [{
1029   unsigned ShiftImm;
1030   unsigned ShiftOnesIn;
1031   unsigned HasShift =
1032     A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1033   return (HasShift && !ShiftOnesIn);
1034 }]>;
1035
1036 def neon_uimm1_asmoperand : AsmOperandClass
1037 {
1038   let Name = "UImm1";
1039   let PredicateMethod = "isUImm<1>";
1040   let RenderMethod = "addImmOperands";
1041 }
1042
1043 def neon_uimm2_asmoperand : AsmOperandClass
1044 {
1045   let Name = "UImm2";
1046   let PredicateMethod = "isUImm<2>";
1047   let RenderMethod = "addImmOperands";
1048 }
1049
1050 def neon_uimm8_asmoperand : AsmOperandClass
1051 {
1052   let Name = "UImm8";
1053   let PredicateMethod = "isUImm<8>";
1054   let RenderMethod = "addImmOperands";
1055 }
1056
1057 def neon_uimm8 : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1058   let ParserMatchClass = neon_uimm8_asmoperand;
1059   let PrintMethod = "printUImmHexOperand";
1060 }
1061
1062 def neon_uimm64_mask_asmoperand : AsmOperandClass
1063 {
1064   let Name = "NeonUImm64Mask";
1065   let PredicateMethod = "isNeonUImm64Mask";
1066   let RenderMethod = "addNeonUImm64MaskOperands";
1067 }
1068
1069 // MCOperand for 64-bit bytemask with each byte having only the
1070 // value 0x00 and 0xff is encoded as an unsigned 8-bit value
1071 def neon_uimm64_mask : Operand<i32>, ImmLeaf<i32, [{(void)Imm; return true;}]> {
1072   let ParserMatchClass = neon_uimm64_mask_asmoperand;
1073   let PrintMethod = "printNeonUImm64MaskOperand";
1074 }
1075
1076 multiclass NeonI_mov_imm_lsl_sizes<string asmop, bit op,
1077                                    SDPatternOperator opnode>
1078 {
1079     // shift zeros, per word
1080     def _2S  : NeonI_1VModImm<0b0, op,
1081                               (outs VPR64:$Rd),
1082                               (ins neon_uimm8:$Imm,
1083                                 neon_mov_imm_LSL_operand:$Simm),
1084                               !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1085                               [(set (v2i32 VPR64:$Rd),
1086                                  (v2i32 (opnode (timm:$Imm),
1087                                    (neon_mov_imm_LSL_operand:$Simm))))],
1088                               NoItinerary> {
1089        bits<2> Simm;
1090        let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1091      }
1092
1093     def _4S  : NeonI_1VModImm<0b1, op,
1094                               (outs VPR128:$Rd),
1095                               (ins neon_uimm8:$Imm,
1096                                 neon_mov_imm_LSL_operand:$Simm),
1097                               !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1098                               [(set (v4i32 VPR128:$Rd),
1099                                  (v4i32 (opnode (timm:$Imm),
1100                                    (neon_mov_imm_LSL_operand:$Simm))))],
1101                               NoItinerary> {
1102       bits<2> Simm;
1103       let cmode = {0b0, Simm{1}, Simm{0}, 0b0};
1104     }
1105
1106     // shift zeros, per halfword
1107     def _4H  : NeonI_1VModImm<0b0, op,
1108                               (outs VPR64:$Rd),
1109                               (ins neon_uimm8:$Imm,
1110                                 neon_mov_imm_LSLH_operand:$Simm),
1111                               !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
1112                               [(set (v4i16 VPR64:$Rd),
1113                                  (v4i16 (opnode (timm:$Imm),
1114                                    (neon_mov_imm_LSLH_operand:$Simm))))],
1115                               NoItinerary> {
1116       bit  Simm;
1117       let cmode = {0b1, 0b0, Simm, 0b0};
1118     }
1119
1120     def _8H  : NeonI_1VModImm<0b1, op,
1121                               (outs VPR128:$Rd),
1122                               (ins neon_uimm8:$Imm,
1123                                 neon_mov_imm_LSLH_operand:$Simm),
1124                               !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
1125                               [(set (v8i16 VPR128:$Rd),
1126                                  (v8i16 (opnode (timm:$Imm),
1127                                    (neon_mov_imm_LSLH_operand:$Simm))))],
1128                               NoItinerary> {
1129       bit Simm;
1130       let cmode = {0b1, 0b0, Simm, 0b0};
1131      }
1132 }
1133
1134 multiclass NeonI_mov_imm_with_constraint_lsl_sizes<string asmop, bit op,
1135                                                    SDPatternOperator opnode,
1136                                                    SDPatternOperator neonopnode>
1137 {
1138   let Constraints = "$src = $Rd" in {
1139     // shift zeros, per word
1140     def _2S  : NeonI_1VModImm<0b0, op,
1141                  (outs VPR64:$Rd),
1142                  (ins VPR64:$src, neon_uimm8:$Imm,
1143                    neon_mov_imm_LSL_operand:$Simm),
1144                  !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1145                  [(set (v2i32 VPR64:$Rd),
1146                     (v2i32 (opnode (v2i32 VPR64:$src),
1147                       (v2i32 (bitconvert (v2i32 (neonopnode timm:$Imm,
1148                         neon_mov_imm_LSL_operand:$Simm)))))))],
1149                  NoItinerary> {
1150       bits<2> Simm;
1151       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1152     }
1153
1154     def _4S  : NeonI_1VModImm<0b1, op,
1155                  (outs VPR128:$Rd),
1156                  (ins VPR128:$src, neon_uimm8:$Imm,
1157                    neon_mov_imm_LSL_operand:$Simm),
1158                  !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1159                  [(set (v4i32 VPR128:$Rd),
1160                     (v4i32 (opnode (v4i32 VPR128:$src),
1161                       (v4i32 (bitconvert (v4i32 (neonopnode timm:$Imm,
1162                         neon_mov_imm_LSL_operand:$Simm)))))))],
1163                  NoItinerary> {
1164       bits<2> Simm;
1165       let cmode = {0b0, Simm{1}, Simm{0}, 0b1};
1166     }
1167
1168     // shift zeros, per halfword
1169     def _4H  : NeonI_1VModImm<0b0, op,
1170                  (outs VPR64:$Rd),
1171                  (ins VPR64:$src, neon_uimm8:$Imm,
1172                    neon_mov_imm_LSLH_operand:$Simm),
1173                  !strconcat(asmop, "\t$Rd.4h, $Imm$Simm"),
1174                  [(set (v4i16 VPR64:$Rd),
1175                     (v4i16 (opnode (v4i16 VPR64:$src),
1176                        (v4i16 (bitconvert (v4i16 (neonopnode timm:$Imm,
1177                           neon_mov_imm_LSL_operand:$Simm)))))))],
1178                  NoItinerary> {
1179       bit  Simm;
1180       let cmode = {0b1, 0b0, Simm, 0b1};
1181     }
1182
1183     def _8H  : NeonI_1VModImm<0b1, op,
1184                  (outs VPR128:$Rd),
1185                  (ins VPR128:$src, neon_uimm8:$Imm,
1186                    neon_mov_imm_LSLH_operand:$Simm),
1187                  !strconcat(asmop, "\t$Rd.8h, $Imm$Simm"),
1188                  [(set (v8i16 VPR128:$Rd),
1189                     (v8i16 (opnode (v8i16 VPR128:$src),
1190                       (v8i16 (bitconvert (v8i16 (neonopnode timm:$Imm,
1191                         neon_mov_imm_LSL_operand:$Simm)))))))],
1192                  NoItinerary> {
1193       bit Simm;
1194       let cmode = {0b1, 0b0, Simm, 0b1};
1195     }
1196   }
1197 }
1198
1199 multiclass NeonI_mov_imm_msl_sizes<string asmop, bit op,
1200                                    SDPatternOperator opnode>
1201 {
1202     // shift ones, per word
1203     def _2S  : NeonI_1VModImm<0b0, op,
1204                              (outs VPR64:$Rd),
1205                              (ins neon_uimm8:$Imm,
1206                                neon_mov_imm_MSL_operand:$Simm),
1207                              !strconcat(asmop, "\t$Rd.2s, $Imm$Simm"),
1208                               [(set (v2i32 VPR64:$Rd),
1209                                  (v2i32 (opnode (timm:$Imm),
1210                                    (neon_mov_imm_MSL_operand:$Simm))))],
1211                              NoItinerary> {
1212        bit Simm;
1213        let cmode = {0b1, 0b1, 0b0, Simm};
1214      }
1215
1216    def _4S  : NeonI_1VModImm<0b1, op,
1217                               (outs VPR128:$Rd),
1218                               (ins neon_uimm8:$Imm,
1219                                 neon_mov_imm_MSL_operand:$Simm),
1220                               !strconcat(asmop, "\t$Rd.4s, $Imm$Simm"),
1221                               [(set (v4i32 VPR128:$Rd),
1222                                  (v4i32 (opnode (timm:$Imm),
1223                                    (neon_mov_imm_MSL_operand:$Simm))))],
1224                               NoItinerary> {
1225      bit Simm;
1226      let cmode = {0b1, 0b1, 0b0, Simm};
1227    }
1228 }
1229
1230 // Vector Move Immediate Shifted
1231 let isReMaterializable = 1 in {
1232 defm MOVIvi_lsl : NeonI_mov_imm_lsl_sizes<"movi", 0b0, Neon_movi>;
1233 }
1234
1235 // Vector Move Inverted Immediate Shifted
1236 let isReMaterializable = 1 in {
1237 defm MVNIvi_lsl : NeonI_mov_imm_lsl_sizes<"mvni", 0b1, Neon_mvni>;
1238 }
1239
1240 // Vector Bitwise Bit Clear (AND NOT) - immediate
1241 let isReMaterializable = 1 in {
1242 defm BICvi_lsl : NeonI_mov_imm_with_constraint_lsl_sizes<"bic", 0b1,
1243                                                          and, Neon_mvni>;
1244 }
1245
1246 // Vector Bitwise OR - immedidate
1247
1248 let isReMaterializable = 1 in {
1249 defm ORRvi_lsl   : NeonI_mov_imm_with_constraint_lsl_sizes<"orr", 0b0,
1250                                                            or, Neon_movi>;
1251 }
1252
1253 // Additional patterns for Vector Bitwise Bit Clear (AND NOT) - immedidate
1254 // LowerBUILD_VECTOR favors lowering MOVI over MVNI.
1255 // BIC immediate instructions selection requires additional patterns to
1256 // transform Neon_movi operands into BIC immediate operands
1257
1258 def neon_mov_imm_LSLH_transform_XFORM : SDNodeXForm<imm, [{
1259   uint64_t OpCmode = N->getZExtValue();
1260   unsigned ShiftImm;
1261   unsigned ShiftOnesIn;
1262   (void)A64Imms::decodeNeonModShiftImm(OpCmode, ShiftImm, ShiftOnesIn);
1263   // LSLH restricts shift amount to  0, 8 which are encoded as 0 and 1
1264   // Transform encoded shift amount 0 to 1 and 1 to 0.
1265   return CurDAG->getTargetConstant(!ShiftImm, MVT::i32);
1266 }]>;
1267
1268 def neon_mov_imm_LSLH_transform_operand
1269   : ImmLeaf<i32, [{
1270     unsigned ShiftImm;
1271     unsigned ShiftOnesIn;
1272     unsigned HasShift =
1273       A64Imms::decodeNeonModShiftImm(Imm, ShiftImm, ShiftOnesIn);
1274     return (HasShift && !ShiftOnesIn); }],
1275   neon_mov_imm_LSLH_transform_XFORM>;
1276
1277 // Transform (and A, (4h Neon_movi 0xff)) -> BIC 4h (A, 0x00, LSL 8)
1278 // Transform (and A, (4h Neon_movi 0xff LSL #8)) -> BIC 4h (A, 0x00)
1279 def : Pat<(v4i16 (and VPR64:$src,
1280             (v4i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1281           (BICvi_lsl_4H VPR64:$src, 0,
1282             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1283
1284 // Transform (and A, (8h Neon_movi 8h 0xff)) -> BIC 8h (A, 0x00, LSL 8)
1285 // Transform (and A, (8h Neon_movi 0xff LSL #8)) -> BIC 8h (A, 0x00)
1286 def : Pat<(v8i16 (and VPR128:$src,
1287             (v8i16 (Neon_movi 255, neon_mov_imm_LSLH_transform_operand:$Simm)))),
1288           (BICvi_lsl_8H VPR128:$src, 0,
1289             neon_mov_imm_LSLH_transform_operand:$Simm)>;
1290
1291
1292 multiclass Neon_bitwiseVi_patterns<SDPatternOperator opnode,
1293                                    SDPatternOperator neonopnode,
1294                                    Instruction INST4H,
1295                                    Instruction INST8H> {
1296   def : Pat<(v8i8 (opnode VPR64:$src,
1297                     (bitconvert(v4i16 (neonopnode timm:$Imm,
1298                       neon_mov_imm_LSLH_operand:$Simm))))),
1299             (INST4H VPR64:$src, neon_uimm8:$Imm,
1300               neon_mov_imm_LSLH_operand:$Simm)>;
1301   def : Pat<(v1i64 (opnode VPR64:$src,
1302                   (bitconvert(v4i16 (neonopnode timm:$Imm,
1303                     neon_mov_imm_LSLH_operand:$Simm))))),
1304           (INST4H VPR64:$src, neon_uimm8:$Imm,
1305             neon_mov_imm_LSLH_operand:$Simm)>;
1306
1307   def : Pat<(v16i8 (opnode VPR128:$src,
1308                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1309                      neon_mov_imm_LSLH_operand:$Simm))))),
1310           (INST8H VPR128:$src, neon_uimm8:$Imm,
1311             neon_mov_imm_LSLH_operand:$Simm)>;
1312   def : Pat<(v4i32 (opnode VPR128:$src,
1313                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1314                      neon_mov_imm_LSLH_operand:$Simm))))),
1315           (INST8H VPR128:$src, neon_uimm8:$Imm,
1316             neon_mov_imm_LSLH_operand:$Simm)>;
1317   def : Pat<(v2i64 (opnode VPR128:$src,
1318                    (bitconvert(v8i16 (neonopnode timm:$Imm,
1319                      neon_mov_imm_LSLH_operand:$Simm))))),
1320           (INST8H VPR128:$src, neon_uimm8:$Imm,
1321             neon_mov_imm_LSLH_operand:$Simm)>;
1322 }
1323
1324 // Additional patterns for Vector Vector Bitwise Bit Clear (AND NOT) - immediate
1325 defm : Neon_bitwiseVi_patterns<or, Neon_mvni, BICvi_lsl_4H, BICvi_lsl_8H>;
1326
1327 // Additional patterns for Vector Bitwise OR - immedidate
1328 defm : Neon_bitwiseVi_patterns<or, Neon_movi, ORRvi_lsl_4H, ORRvi_lsl_8H>;
1329
1330
1331 // Vector Move Immediate Masked
1332 let isReMaterializable = 1 in {
1333 defm MOVIvi_msl : NeonI_mov_imm_msl_sizes<"movi", 0b0, Neon_movi>;
1334 }
1335
1336 // Vector Move Inverted Immediate Masked
1337 let isReMaterializable = 1 in {
1338 defm MVNIvi_msl : NeonI_mov_imm_msl_sizes<"mvni", 0b1, Neon_mvni>;
1339 }
1340
1341 class NeonI_mov_imm_lsl_aliases<string asmop, string asmlane,
1342                                 Instruction inst, RegisterOperand VPRC>
1343   : NeonInstAlias<!strconcat(asmop, "\t$Rd," # asmlane # ", $Imm"),
1344                         (inst VPRC:$Rd, neon_uimm8:$Imm,  0), 0b0>;
1345
1346 // Aliases for Vector Move Immediate Shifted
1347 def : NeonI_mov_imm_lsl_aliases<"movi", ".2s", MOVIvi_lsl_2S, VPR64>;
1348 def : NeonI_mov_imm_lsl_aliases<"movi", ".4s", MOVIvi_lsl_4S, VPR128>;
1349 def : NeonI_mov_imm_lsl_aliases<"movi", ".4h", MOVIvi_lsl_4H, VPR64>;
1350 def : NeonI_mov_imm_lsl_aliases<"movi", ".8h", MOVIvi_lsl_8H, VPR128>;
1351
1352 // Aliases for Vector Move Inverted Immediate Shifted
1353 def : NeonI_mov_imm_lsl_aliases<"mvni", ".2s", MVNIvi_lsl_2S, VPR64>;
1354 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4s", MVNIvi_lsl_4S, VPR128>;
1355 def : NeonI_mov_imm_lsl_aliases<"mvni", ".4h", MVNIvi_lsl_4H, VPR64>;
1356 def : NeonI_mov_imm_lsl_aliases<"mvni", ".8h", MVNIvi_lsl_8H, VPR128>;
1357
1358 // Aliases for Vector Bitwise Bit Clear (AND NOT) - immediate
1359 def : NeonI_mov_imm_lsl_aliases<"bic", ".2s", BICvi_lsl_2S, VPR64>;
1360 def : NeonI_mov_imm_lsl_aliases<"bic", ".4s", BICvi_lsl_4S, VPR128>;
1361 def : NeonI_mov_imm_lsl_aliases<"bic", ".4h", BICvi_lsl_4H, VPR64>;
1362 def : NeonI_mov_imm_lsl_aliases<"bic", ".8h", BICvi_lsl_8H, VPR128>;
1363
1364 // Aliases for Vector Bitwise OR - immedidate
1365 def : NeonI_mov_imm_lsl_aliases<"orr", ".2s", ORRvi_lsl_2S, VPR64>;
1366 def : NeonI_mov_imm_lsl_aliases<"orr", ".4s", ORRvi_lsl_4S, VPR128>;
1367 def : NeonI_mov_imm_lsl_aliases<"orr", ".4h", ORRvi_lsl_4H, VPR64>;
1368 def : NeonI_mov_imm_lsl_aliases<"orr", ".8h", ORRvi_lsl_8H, VPR128>;
1369
1370 //  Vector Move Immediate - per byte
1371 let isReMaterializable = 1 in {
1372 def MOVIvi_8B : NeonI_1VModImm<0b0, 0b0,
1373                                (outs VPR64:$Rd), (ins neon_uimm8:$Imm),
1374                                "movi\t$Rd.8b, $Imm",
1375                                [(set (v8i8 VPR64:$Rd),
1376                                   (v8i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1377                                 NoItinerary> {
1378   let cmode = 0b1110;
1379 }
1380
1381 def MOVIvi_16B : NeonI_1VModImm<0b1, 0b0,
1382                                 (outs VPR128:$Rd), (ins neon_uimm8:$Imm),
1383                                 "movi\t$Rd.16b, $Imm",
1384                                 [(set (v16i8 VPR128:$Rd),
1385                                    (v16i8 (Neon_movi (timm:$Imm), (i32 imm))))],
1386                                  NoItinerary> {
1387   let cmode = 0b1110;
1388 }
1389 }
1390
1391 // Vector Move Immediate - bytemask, per double word
1392 let isReMaterializable = 1 in {
1393 def MOVIvi_2D : NeonI_1VModImm<0b1, 0b1,
1394                                (outs VPR128:$Rd), (ins neon_uimm64_mask:$Imm),
1395                                "movi\t $Rd.2d, $Imm",
1396                                [(set (v2i64 VPR128:$Rd),
1397                                   (v2i64 (Neon_movi (timm:$Imm), (i32 imm))))],
1398                                NoItinerary> {
1399   let cmode = 0b1110;
1400 }
1401 }
1402
1403 // Vector Move Immediate - bytemask, one doubleword
1404
1405 let isReMaterializable = 1 in {
1406 def MOVIdi : NeonI_1VModImm<0b0, 0b1,
1407                            (outs FPR64:$Rd), (ins neon_uimm64_mask:$Imm),
1408                            "movi\t $Rd, $Imm",
1409                            [(set (f64 FPR64:$Rd),
1410                               (f64 (bitconvert
1411                                 (v1i64 (Neon_movi (timm:$Imm), (i32 imm))))))],
1412                            NoItinerary> {
1413   let cmode = 0b1110;
1414 }
1415 }
1416
1417 // Vector Floating Point Move Immediate
1418
1419 class NeonI_FMOV_impl<string asmlane, RegisterOperand VPRC, ValueType OpTy,
1420                       Operand immOpType, bit q, bit op>
1421   : NeonI_1VModImm<q, op,
1422                    (outs VPRC:$Rd), (ins immOpType:$Imm),
1423                    "fmov\t$Rd" # asmlane # ", $Imm",
1424                    [(set (OpTy VPRC:$Rd),
1425                       (OpTy (Neon_fmovi (timm:$Imm))))],
1426                    NoItinerary> {
1427      let cmode = 0b1111;
1428    }
1429
1430 let isReMaterializable = 1 in {
1431 def FMOVvi_2S : NeonI_FMOV_impl<".2s", VPR64,  v2f32, fmov32_operand, 0b0, 0b0>;
1432 def FMOVvi_4S : NeonI_FMOV_impl<".4s", VPR128, v4f32, fmov32_operand, 0b1, 0b0>;
1433 def FMOVvi_2D : NeonI_FMOV_impl<".2d", VPR128, v2f64, fmov64_operand, 0b1, 0b1>;
1434 }
1435
1436 // Vector Shift (Immediate) 
1437 // Immediate in [0, 63]
1438 def imm0_63 : Operand<i32> {
1439   let ParserMatchClass = uimm6_asmoperand;
1440 }
1441
1442 // Shift Right/Left Immediate - The immh:immb field of these shifts are encoded
1443 // as follows:
1444 //
1445 //    Offset    Encoding
1446 //     8        immh:immb<6:3> = '0001xxx', <imm> is encoded in immh:immb<2:0>
1447 //     16       immh:immb<6:4> = '001xxxx', <imm> is encoded in immh:immb<3:0>
1448 //     32       immh:immb<6:5> = '01xxxxx', <imm> is encoded in immh:immb<4:0>
1449 //     64       immh:immb<6>   = '1xxxxxx', <imm> is encoded in immh:immb<5:0>
1450 //
1451 // The shift right immediate amount, in the range 1 to element bits, is computed
1452 // as Offset - UInt(immh:immb).  The shift left immediate amount, in the range 0
1453 // to element bits - 1, is computed as UInt(immh:immb) - Offset.
1454
1455 class shr_imm_asmoperands<string OFFSET> : AsmOperandClass {
1456   let Name = "ShrImm" # OFFSET;
1457   let RenderMethod = "addImmOperands";
1458   let DiagnosticType = "ShrImm" # OFFSET;
1459 }
1460
1461 class shr_imm<string OFFSET> : Operand<i32> {
1462   let EncoderMethod = "getShiftRightImm" # OFFSET;
1463   let DecoderMethod = "DecodeShiftRightImm" # OFFSET;
1464   let ParserMatchClass = 
1465     !cast<AsmOperandClass>("shr_imm" # OFFSET # "_asmoperand");
1466 }
1467
1468 def shr_imm8_asmoperand : shr_imm_asmoperands<"8">;
1469 def shr_imm16_asmoperand : shr_imm_asmoperands<"16">;
1470 def shr_imm32_asmoperand : shr_imm_asmoperands<"32">;
1471 def shr_imm64_asmoperand : shr_imm_asmoperands<"64">;
1472
1473 def shr_imm8 : shr_imm<"8">;
1474 def shr_imm16 : shr_imm<"16">;
1475 def shr_imm32 : shr_imm<"32">;
1476 def shr_imm64 : shr_imm<"64">;
1477
1478 class shl_imm_asmoperands<string OFFSET> : AsmOperandClass {
1479   let Name = "ShlImm" # OFFSET;
1480   let RenderMethod = "addImmOperands";
1481   let DiagnosticType = "ShlImm" # OFFSET;
1482 }
1483
1484 class shl_imm<string OFFSET> : Operand<i32> {
1485   let EncoderMethod = "getShiftLeftImm" # OFFSET;
1486   let DecoderMethod = "DecodeShiftLeftImm" # OFFSET;
1487   let ParserMatchClass = 
1488     !cast<AsmOperandClass>("shl_imm" # OFFSET # "_asmoperand");
1489 }
1490
1491 def shl_imm8_asmoperand : shl_imm_asmoperands<"8">;
1492 def shl_imm16_asmoperand : shl_imm_asmoperands<"16">;
1493 def shl_imm32_asmoperand : shl_imm_asmoperands<"32">;
1494 def shl_imm64_asmoperand : shl_imm_asmoperands<"64">;
1495
1496 def shl_imm8 : shl_imm<"8">;
1497 def shl_imm16 : shl_imm<"16">;
1498 def shl_imm32 : shl_imm<"32">;
1499 def shl_imm64 : shl_imm<"64">;
1500
1501 class N2VShift<bit q, bit u, bits<5> opcode, string asmop, string T,
1502                RegisterOperand VPRC, ValueType Ty, Operand ImmTy, SDNode OpNode>
1503   : NeonI_2VShiftImm<q, u, opcode,
1504                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1505                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1506                      [(set (Ty VPRC:$Rd),
1507                         (Ty (OpNode (Ty VPRC:$Rn),
1508                           (Ty (Neon_vdup (i32 imm:$Imm))))))],
1509                      NoItinerary>;
1510
1511 multiclass NeonI_N2VShL<bit u, bits<5> opcode, string asmop> {
1512   // 64-bit vector types.
1513   def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3, shl> {
1514     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1515   }
1516
1517   def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4, shl> {
1518     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1519   }
1520
1521   def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5, shl> {
1522     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1523   }
1524
1525   // 128-bit vector types.
1526   def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3, shl> {
1527     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1528   }
1529
1530   def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4, shl> {
1531     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1532   }
1533
1534   def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5, shl> {
1535     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1536   }
1537
1538   def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63, shl> {
1539     let Inst{22} = 0b1;        // immh:immb = 1xxxxxx
1540   }
1541 }
1542
1543 multiclass NeonI_N2VShR<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
1544   def _8B : N2VShift<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1545                      OpNode> {
1546     let Inst{22-19} = 0b0001;
1547   }
1548
1549   def _4H : N2VShift<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1550                      OpNode> {
1551     let Inst{22-20} = 0b001;
1552   }
1553
1554   def _2S : N2VShift<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1555                      OpNode> {
1556      let Inst{22-21} = 0b01;
1557   }
1558
1559   def _16B : N2VShift<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1560                       OpNode> {
1561                       let Inst{22-19} = 0b0001;
1562                     }
1563
1564   def _8H : N2VShift<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1565                      OpNode> {
1566                      let Inst{22-20} = 0b001;
1567                     }
1568
1569   def _4S : N2VShift<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1570                      OpNode> {
1571                       let Inst{22-21} = 0b01;
1572                     }
1573
1574   def _2D : N2VShift<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1575                      OpNode> {
1576                       let Inst{22} = 0b1;
1577                     }
1578 }
1579
1580 // Shift left
1581 defm SHLvvi : NeonI_N2VShL<0b0, 0b01010, "shl">;
1582
1583 // Shift right
1584 defm SSHRvvi : NeonI_N2VShR<0b0, 0b00000, "sshr", sra>;
1585 defm USHRvvi : NeonI_N2VShR<0b1, 0b00000, "ushr", srl>;
1586
1587 def Neon_High16B : PatFrag<(ops node:$in),
1588                            (extract_subvector (v16i8 node:$in), (iPTR 8))>;
1589 def Neon_High8H  : PatFrag<(ops node:$in),
1590                            (extract_subvector (v8i16 node:$in), (iPTR 4))>;
1591 def Neon_High4S  : PatFrag<(ops node:$in),
1592                            (extract_subvector (v4i32 node:$in), (iPTR 2))>;
1593
1594 def Neon_low8H : PatFrag<(ops node:$in),
1595                          (v4i16 (extract_subvector (v8i16 node:$in),
1596                                                    (iPTR 0)))>;
1597 def Neon_low4S : PatFrag<(ops node:$in),
1598                          (v2i32 (extract_subvector (v4i32 node:$in),
1599                                                    (iPTR 0)))>;
1600 def Neon_low4f : PatFrag<(ops node:$in),
1601                          (v2f32 (extract_subvector (v4f32 node:$in),
1602                                                    (iPTR 0)))>;
1603
1604 def neon_uimm3_shift : Operand<i32>,
1605                          ImmLeaf<i32, [{return Imm < 8;}]> {
1606   let ParserMatchClass = uimm3_asmoperand;
1607 }
1608
1609 def neon_uimm4_shift : Operand<i32>,
1610                          ImmLeaf<i32, [{return Imm < 16;}]> {
1611   let ParserMatchClass = uimm4_asmoperand;
1612 }
1613
1614 def neon_uimm5_shift : Operand<i32>,
1615                          ImmLeaf<i32, [{return Imm < 32;}]> {
1616   let ParserMatchClass = uimm5_asmoperand;
1617 }
1618
1619 class N2VShiftLong<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1620                    string SrcT, ValueType DestTy, ValueType SrcTy,
1621                    Operand ImmTy, SDPatternOperator ExtOp>
1622   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1623                      (ins VPR64:$Rn, ImmTy:$Imm),
1624                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1625                      [(set (DestTy VPR128:$Rd),
1626                         (DestTy (shl
1627                           (DestTy (ExtOp (SrcTy VPR64:$Rn))),
1628                             (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
1629                      NoItinerary>;
1630
1631 class N2VShiftLongHigh<bit q, bit u, bits<5> opcode, string asmop, string DestT,
1632                        string SrcT, ValueType DestTy, ValueType SrcTy,
1633                        int StartIndex, Operand ImmTy,
1634                        SDPatternOperator ExtOp, PatFrag getTop>
1635   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
1636                      (ins VPR128:$Rn, ImmTy:$Imm),
1637                      asmop # "2\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
1638                      [(set (DestTy VPR128:$Rd),
1639                         (DestTy (shl
1640                           (DestTy (ExtOp
1641                             (SrcTy (getTop VPR128:$Rn)))),
1642                               (DestTy (Neon_vdup (i32 ImmTy:$Imm))))))],
1643                      NoItinerary>;
1644
1645 multiclass NeonI_N2VShLL<string prefix, bit u, bits<5> opcode, string asmop,
1646                          SDNode ExtOp> {
1647   // 64-bit vector types.
1648   def _8B : N2VShiftLong<0b0, u, opcode, asmop, "8h", "8b", v8i16, v8i8,
1649                          neon_uimm3_shift, ExtOp> {
1650     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1651   }
1652
1653   def _4H : N2VShiftLong<0b0, u, opcode, asmop, "4s", "4h", v4i32, v4i16,
1654                          neon_uimm4_shift, ExtOp> {
1655     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1656   }
1657
1658   def _2S : N2VShiftLong<0b0, u, opcode, asmop, "2d", "2s", v2i64, v2i32,
1659                          neon_uimm5_shift, ExtOp> {
1660     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1661   }
1662
1663   // 128-bit vector types
1664   def _16B : N2VShiftLongHigh<0b1, u, opcode, asmop, "8h", "16b", v8i16, v8i8,
1665                               8, neon_uimm3_shift, ExtOp, Neon_High16B> {
1666     let Inst{22-19} = 0b0001;  // immh:immb = 0001xxx
1667   }
1668
1669   def _8H : N2VShiftLongHigh<0b1, u, opcode, asmop, "4s", "8h", v4i32, v4i16,
1670                              4, neon_uimm4_shift, ExtOp, Neon_High8H> {
1671     let Inst{22-20} = 0b001;   // immh:immb = 001xxxx
1672   }
1673
1674   def _4S : N2VShiftLongHigh<0b1, u, opcode, asmop, "2d", "4s", v2i64, v2i32,
1675                              2, neon_uimm5_shift, ExtOp, Neon_High4S> {
1676     let Inst{22-21} = 0b01;    // immh:immb = 01xxxxx
1677   }
1678
1679   // Use other patterns to match when the immediate is 0.
1680   def : Pat<(v8i16 (ExtOp (v8i8 VPR64:$Rn))),
1681             (!cast<Instruction>(prefix # "_8B") VPR64:$Rn, 0)>;
1682
1683   def : Pat<(v4i32 (ExtOp (v4i16 VPR64:$Rn))),
1684             (!cast<Instruction>(prefix # "_4H") VPR64:$Rn, 0)>;
1685
1686   def : Pat<(v2i64 (ExtOp (v2i32 VPR64:$Rn))),
1687             (!cast<Instruction>(prefix # "_2S") VPR64:$Rn, 0)>;
1688
1689   def : Pat<(v8i16 (ExtOp (v8i8 (Neon_High16B VPR128:$Rn)))),
1690             (!cast<Instruction>(prefix # "_16B") VPR128:$Rn, 0)>;
1691
1692   def : Pat<(v4i32 (ExtOp (v4i16 (Neon_High8H VPR128:$Rn)))),
1693             (!cast<Instruction>(prefix # "_8H") VPR128:$Rn, 0)>;
1694
1695   def : Pat<(v2i64 (ExtOp (v2i32 (Neon_High4S VPR128:$Rn)))),
1696             (!cast<Instruction>(prefix # "_4S") VPR128:$Rn, 0)>;
1697 }
1698
1699 // Shift left long
1700 defm SSHLLvvi : NeonI_N2VShLL<"SSHLLvvi", 0b0, 0b10100, "sshll", sext>;
1701 defm USHLLvvi : NeonI_N2VShLL<"USHLLvvi", 0b1, 0b10100, "ushll", zext>;
1702
1703 // Rounding/Saturating shift
1704 class N2VShift_RQ<bit q, bit u, bits<5> opcode, string asmop, string T,
1705                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1706                   SDPatternOperator OpNode>
1707   : NeonI_2VShiftImm<q, u, opcode,
1708                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
1709                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1710                      [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$Rn),
1711                         (i32 imm:$Imm))))],
1712                      NoItinerary>;
1713
1714 // shift right (vector by immediate)
1715 multiclass NeonI_N2VShR_RQ<bit u, bits<5> opcode, string asmop,
1716                            SDPatternOperator OpNode> {
1717   def _8B  : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1718                          OpNode> {
1719     let Inst{22-19} = 0b0001;
1720   }
1721
1722   def _4H  : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1723                          OpNode> {
1724     let Inst{22-20} = 0b001;
1725   }
1726
1727   def _2S  : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1728                          OpNode> {
1729     let Inst{22-21} = 0b01;
1730   }
1731
1732   def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1733                          OpNode> {
1734     let Inst{22-19} = 0b0001;
1735   }
1736
1737   def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1738                         OpNode> {
1739     let Inst{22-20} = 0b001;
1740   }
1741
1742   def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1743                         OpNode> {
1744     let Inst{22-21} = 0b01;
1745   }
1746
1747   def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1748                         OpNode> {
1749     let Inst{22} = 0b1;
1750   }
1751 }
1752
1753 multiclass NeonI_N2VShL_Q<bit u, bits<5> opcode, string asmop,
1754                           SDPatternOperator OpNode> {
1755   // 64-bit vector types.
1756   def _8B : N2VShift_RQ<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3,
1757                         OpNode> {
1758     let Inst{22-19} = 0b0001;
1759   }
1760
1761   def _4H : N2VShift_RQ<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4,
1762                         OpNode> {
1763     let Inst{22-20} = 0b001;
1764   }
1765
1766   def _2S : N2VShift_RQ<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5,
1767                         OpNode> {
1768     let Inst{22-21} = 0b01;
1769   }
1770
1771   // 128-bit vector types.
1772   def _16B : N2VShift_RQ<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3,
1773                          OpNode> {
1774     let Inst{22-19} = 0b0001;
1775   }
1776
1777   def _8H : N2VShift_RQ<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4,
1778                         OpNode> {
1779     let Inst{22-20} = 0b001;
1780   }
1781
1782   def _4S : N2VShift_RQ<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5,
1783                         OpNode> {
1784     let Inst{22-21} = 0b01;
1785   }
1786
1787   def _2D : N2VShift_RQ<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63,
1788                         OpNode> {
1789     let Inst{22} = 0b1;
1790   }
1791 }
1792
1793 // Rounding shift right
1794 defm SRSHRvvi : NeonI_N2VShR_RQ<0b0, 0b00100, "srshr",
1795                                 int_aarch64_neon_vsrshr>;
1796 defm URSHRvvi : NeonI_N2VShR_RQ<0b1, 0b00100, "urshr",
1797                                 int_aarch64_neon_vurshr>;
1798
1799 // Saturating shift left unsigned
1800 defm SQSHLUvvi : NeonI_N2VShL_Q<0b1, 0b01100, "sqshlu", int_aarch64_neon_vsqshlu>;
1801
1802 // Saturating shift left
1803 defm SQSHLvvi : NeonI_N2VShL_Q<0b0, 0b01110, "sqshl", Neon_sqrshlImm>;
1804 defm UQSHLvvi : NeonI_N2VShL_Q<0b1, 0b01110, "uqshl", Neon_uqrshlImm>;
1805
1806 class N2VShiftAdd<bit q, bit u, bits<5> opcode, string asmop, string T,
1807                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1808                   SDNode OpNode>
1809   : NeonI_2VShiftImm<q, u, opcode,
1810            (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1811            asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1812            [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
1813               (Ty (OpNode (Ty VPRC:$Rn),
1814                 (Ty (Neon_vdup (i32 imm:$Imm))))))))],
1815            NoItinerary> {
1816   let Constraints = "$src = $Rd";
1817 }
1818
1819 // Shift Right accumulate
1820 multiclass NeonI_N2VShRAdd<bit u, bits<5> opcode, string asmop, SDNode OpNode> {
1821   def _8B : N2VShiftAdd<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1822                         OpNode> {
1823     let Inst{22-19} = 0b0001;
1824   }
1825
1826   def _4H : N2VShiftAdd<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1827                         OpNode> {
1828     let Inst{22-20} = 0b001;
1829   }
1830
1831   def _2S : N2VShiftAdd<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1832                         OpNode> {
1833     let Inst{22-21} = 0b01;
1834   }
1835
1836   def _16B : N2VShiftAdd<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1837                          OpNode> {
1838     let Inst{22-19} = 0b0001;
1839   }
1840
1841   def _8H : N2VShiftAdd<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1842                         OpNode> {
1843     let Inst{22-20} = 0b001;
1844   }
1845
1846   def _4S : N2VShiftAdd<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1847                         OpNode> {
1848     let Inst{22-21} = 0b01;
1849   }
1850
1851   def _2D : N2VShiftAdd<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1852                         OpNode> {
1853     let Inst{22} = 0b1;
1854   }
1855 }
1856
1857 // Shift right and accumulate
1858 defm SSRAvvi    : NeonI_N2VShRAdd<0, 0b00010, "ssra", sra>;
1859 defm USRAvvi    : NeonI_N2VShRAdd<1, 0b00010, "usra", srl>;
1860
1861 // Rounding shift accumulate
1862 class N2VShiftAdd_R<bit q, bit u, bits<5> opcode, string asmop, string T,
1863                     RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1864                     SDPatternOperator OpNode>
1865   : NeonI_2VShiftImm<q, u, opcode,
1866                      (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1867                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1868                      [(set (Ty VPRC:$Rd), (Ty (add (Ty VPRC:$src),
1869                         (Ty (OpNode (Ty VPRC:$Rn), (i32 imm:$Imm))))))],
1870                      NoItinerary> {
1871   let Constraints = "$src = $Rd";
1872 }
1873
1874 multiclass NeonI_N2VShRAdd_R<bit u, bits<5> opcode, string asmop,
1875                              SDPatternOperator OpNode> {
1876   def _8B : N2VShiftAdd_R<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1877                           OpNode> {
1878     let Inst{22-19} = 0b0001;
1879   }
1880
1881   def _4H : N2VShiftAdd_R<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1882                           OpNode> {
1883     let Inst{22-20} = 0b001;
1884   }
1885
1886   def _2S : N2VShiftAdd_R<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1887                           OpNode> {
1888     let Inst{22-21} = 0b01;
1889   }
1890
1891   def _16B : N2VShiftAdd_R<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1892                            OpNode> {
1893     let Inst{22-19} = 0b0001;
1894   }
1895
1896   def _8H : N2VShiftAdd_R<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1897                           OpNode> {
1898     let Inst{22-20} = 0b001;
1899   }
1900
1901   def _4S : N2VShiftAdd_R<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1902                           OpNode> {
1903     let Inst{22-21} = 0b01;
1904   }
1905
1906   def _2D : N2VShiftAdd_R<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
1907                           OpNode> {
1908     let Inst{22} = 0b1;
1909   }
1910 }
1911
1912 // Rounding shift right and accumulate
1913 defm SRSRAvvi : NeonI_N2VShRAdd_R<0, 0b00110, "srsra", int_aarch64_neon_vsrshr>;
1914 defm URSRAvvi : NeonI_N2VShRAdd_R<1, 0b00110, "ursra", int_aarch64_neon_vurshr>;
1915
1916 // Shift insert by immediate
1917 class N2VShiftIns<bit q, bit u, bits<5> opcode, string asmop, string T,
1918                   RegisterOperand VPRC, ValueType Ty, Operand ImmTy,
1919                   SDPatternOperator OpNode>
1920     : NeonI_2VShiftImm<q, u, opcode,
1921            (outs VPRC:$Rd), (ins VPRC:$src, VPRC:$Rn, ImmTy:$Imm),
1922            asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
1923            [(set (Ty VPRC:$Rd), (Ty (OpNode (Ty VPRC:$src), (Ty VPRC:$Rn),
1924              (i32 imm:$Imm))))],
1925            NoItinerary> {
1926   let Constraints = "$src = $Rd";
1927 }
1928
1929 // shift left insert (vector by immediate)
1930 multiclass NeonI_N2VShLIns<bit u, bits<5> opcode, string asmop> {
1931   def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, uimm3,
1932                         int_aarch64_neon_vsli> {
1933     let Inst{22-19} = 0b0001;
1934   }
1935
1936   def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, uimm4,
1937                         int_aarch64_neon_vsli> {
1938     let Inst{22-20} = 0b001;
1939   }
1940
1941   def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, uimm5,
1942                         int_aarch64_neon_vsli> {
1943     let Inst{22-21} = 0b01;
1944   }
1945
1946     // 128-bit vector types
1947   def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, uimm3,
1948                          int_aarch64_neon_vsli> {
1949     let Inst{22-19} = 0b0001;
1950   }
1951
1952   def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, uimm4,
1953                         int_aarch64_neon_vsli> {
1954     let Inst{22-20} = 0b001;
1955   }
1956
1957   def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, uimm5,
1958                         int_aarch64_neon_vsli> {
1959     let Inst{22-21} = 0b01;
1960   }
1961
1962   def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, imm0_63,
1963                         int_aarch64_neon_vsli> {
1964     let Inst{22} = 0b1;
1965   }
1966 }
1967
1968 // shift right insert (vector by immediate)
1969 multiclass NeonI_N2VShRIns<bit u, bits<5> opcode, string asmop> {
1970     // 64-bit vector types.
1971   def _8B : N2VShiftIns<0b0, u, opcode, asmop, "8b", VPR64, v8i8, shr_imm8,
1972                         int_aarch64_neon_vsri> {
1973     let Inst{22-19} = 0b0001;
1974   }
1975
1976   def _4H : N2VShiftIns<0b0, u, opcode, asmop, "4h", VPR64, v4i16, shr_imm16,
1977                         int_aarch64_neon_vsri> {
1978     let Inst{22-20} = 0b001;
1979   }
1980
1981   def _2S : N2VShiftIns<0b0, u, opcode, asmop, "2s", VPR64, v2i32, shr_imm32,
1982                         int_aarch64_neon_vsri> {
1983     let Inst{22-21} = 0b01;
1984   }
1985
1986     // 128-bit vector types
1987   def _16B : N2VShiftIns<0b1, u, opcode, asmop, "16b", VPR128, v16i8, shr_imm8,
1988                          int_aarch64_neon_vsri> {
1989     let Inst{22-19} = 0b0001;
1990   }
1991
1992   def _8H : N2VShiftIns<0b1, u, opcode, asmop, "8h", VPR128, v8i16, shr_imm16,
1993                         int_aarch64_neon_vsri> {
1994     let Inst{22-20} = 0b001;
1995   }
1996
1997   def _4S : N2VShiftIns<0b1, u, opcode, asmop, "4s", VPR128, v4i32, shr_imm32,
1998                         int_aarch64_neon_vsri> {
1999     let Inst{22-21} = 0b01;
2000   }
2001
2002   def _2D : N2VShiftIns<0b1, u, opcode, asmop, "2d", VPR128, v2i64, shr_imm64,
2003                         int_aarch64_neon_vsri> {
2004     let Inst{22} = 0b1;
2005   }
2006 }
2007
2008 // Shift left and insert
2009 defm SLIvvi   : NeonI_N2VShLIns<0b1, 0b01010, "sli">;
2010
2011 // Shift right and insert
2012 defm SRIvvi   : NeonI_N2VShRIns<0b1, 0b01000, "sri">;
2013
2014 class N2VShR_Narrow<bit q, bit u, bits<5> opcode, string asmop, string DestT,
2015                     string SrcT, Operand ImmTy>
2016   : NeonI_2VShiftImm<q, u, opcode,
2017                      (outs VPR64:$Rd), (ins VPR128:$Rn, ImmTy:$Imm),
2018                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
2019                      [], NoItinerary>;
2020
2021 class N2VShR_Narrow_Hi<bit q, bit u, bits<5> opcode, string asmop, string DestT,
2022                        string SrcT, Operand ImmTy>
2023   : NeonI_2VShiftImm<q, u, opcode, (outs VPR128:$Rd),
2024                      (ins VPR128:$src, VPR128:$Rn, ImmTy:$Imm),
2025                      asmop # "\t$Rd." # DestT # ", $Rn." # SrcT # ", $Imm",
2026                      [], NoItinerary> {
2027   let Constraints = "$src = $Rd";
2028 }
2029
2030 // left long shift by immediate
2031 multiclass NeonI_N2VShR_Narrow<bit u, bits<5> opcode, string asmop> {
2032   def _8B : N2VShR_Narrow<0b0, u, opcode, asmop, "8b", "8h", shr_imm8> {
2033     let Inst{22-19} = 0b0001;
2034   }
2035
2036   def _4H : N2VShR_Narrow<0b0, u, opcode, asmop, "4h", "4s", shr_imm16> {
2037     let Inst{22-20} = 0b001;
2038   }
2039
2040   def _2S : N2VShR_Narrow<0b0, u, opcode, asmop, "2s", "2d", shr_imm32> {
2041     let Inst{22-21} = 0b01;
2042   }
2043
2044   // Shift Narrow High
2045   def _16B : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "16b", "8h",
2046                               shr_imm8> {
2047     let Inst{22-19} = 0b0001;
2048   }
2049
2050   def _8H : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "8h", "4s",
2051                              shr_imm16> {
2052     let Inst{22-20} = 0b001;
2053   }
2054
2055   def _4S : N2VShR_Narrow_Hi<0b1, u, opcode, asmop # "2", "4s", "2d",
2056                              shr_imm32> {
2057     let Inst{22-21} = 0b01;
2058   }
2059 }
2060
2061 // Shift right narrow
2062 defm SHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10000, "shrn">;
2063
2064 // Shift right narrow (prefix Q is saturating, prefix R is rounding)
2065 defm QSHRUNvvi :NeonI_N2VShR_Narrow<0b1, 0b10000, "sqshrun">;
2066 defm RSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10001, "rshrn">;
2067 defm QRSHRUNvvi : NeonI_N2VShR_Narrow<0b1, 0b10001, "sqrshrun">;
2068 defm SQSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10010, "sqshrn">;
2069 defm UQSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10010, "uqshrn">;
2070 defm SQRSHRNvvi : NeonI_N2VShR_Narrow<0b0, 0b10011, "sqrshrn">;
2071 defm UQRSHRNvvi : NeonI_N2VShR_Narrow<0b1, 0b10011, "uqrshrn">;
2072
2073 def Neon_combine_2D : PatFrag<(ops node:$Rm, node:$Rn),
2074                               (v2i64 (concat_vectors (v1i64 node:$Rm),
2075                                                      (v1i64 node:$Rn)))>;
2076 def Neon_combine_8H : PatFrag<(ops node:$Rm, node:$Rn),
2077                               (v8i16 (concat_vectors (v4i16 node:$Rm),
2078                                                      (v4i16 node:$Rn)))>;
2079 def Neon_combine_4S : PatFrag<(ops node:$Rm, node:$Rn),
2080                               (v4i32 (concat_vectors (v2i32 node:$Rm),
2081                                                      (v2i32 node:$Rn)))>;
2082 def Neon_combine_4f : PatFrag<(ops node:$Rm, node:$Rn),
2083                               (v4f32 (concat_vectors (v2f32 node:$Rm),
2084                                                      (v2f32 node:$Rn)))>;
2085 def Neon_combine_2d : PatFrag<(ops node:$Rm, node:$Rn),
2086                               (v2f64 (concat_vectors (v1f64 node:$Rm),
2087                                                      (v1f64 node:$Rn)))>;
2088
2089 def Neon_lshrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2090                              (v8i16 (srl (v8i16 node:$lhs),
2091                                (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
2092 def Neon_lshrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2093                              (v4i32 (srl (v4i32 node:$lhs),
2094                                (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
2095 def Neon_lshrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2096                              (v2i64 (srl (v2i64 node:$lhs),
2097                                (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
2098 def Neon_ashrImm8H : PatFrag<(ops node:$lhs, node:$rhs),
2099                              (v8i16 (sra (v8i16 node:$lhs),
2100                                (v8i16 (Neon_vdup (i32 node:$rhs)))))>;
2101 def Neon_ashrImm4S : PatFrag<(ops node:$lhs, node:$rhs),
2102                              (v4i32 (sra (v4i32 node:$lhs),
2103                                (v4i32 (Neon_vdup (i32 node:$rhs)))))>;
2104 def Neon_ashrImm2D : PatFrag<(ops node:$lhs, node:$rhs),
2105                              (v2i64 (sra (v2i64 node:$lhs),
2106                                (v2i64 (Neon_vdup (i32 node:$rhs)))))>;
2107
2108 // Normal shift right narrow is matched by IR (srl/sra, trunc, concat_vectors)
2109 multiclass Neon_shiftNarrow_patterns<string shr> {
2110   def : Pat<(v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H") VPR128:$Rn,
2111               (i32 imm:$Imm)))),
2112             (SHRNvvi_8B VPR128:$Rn, imm:$Imm)>;
2113   def : Pat<(v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S") VPR128:$Rn,
2114               (i32 imm:$Imm)))),
2115             (SHRNvvi_4H VPR128:$Rn, imm:$Imm)>;
2116   def : Pat<(v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D") VPR128:$Rn,
2117               (i32 imm:$Imm)))),
2118             (SHRNvvi_2S VPR128:$Rn, imm:$Imm)>;
2119
2120   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2121               (v8i8 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm8H")
2122                 VPR128:$Rn, (i32 imm:$Imm))))))),
2123             (SHRNvvi_16B (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
2124                          VPR128:$Rn, imm:$Imm)>;
2125   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2126               (v4i16 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm4S")
2127                 VPR128:$Rn, (i32 imm:$Imm))))))),
2128             (SHRNvvi_8H (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2129                         VPR128:$Rn, imm:$Imm)>;
2130   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src), (v1i64 (bitconvert
2131               (v2i32 (trunc (!cast<PatFrag>("Neon_" # shr # "Imm2D")
2132                 VPR128:$Rn, (i32 imm:$Imm))))))),
2133             (SHRNvvi_4S (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2134                         VPR128:$Rn, imm:$Imm)>;
2135 }
2136
2137 multiclass Neon_shiftNarrow_QR_patterns<SDPatternOperator op, string prefix> {
2138   def : Pat<(v8i8 (op (v8i16 VPR128:$Rn), imm:$Imm)),
2139             (!cast<Instruction>(prefix # "_8B") VPR128:$Rn, imm:$Imm)>;
2140   def : Pat<(v4i16 (op (v4i32 VPR128:$Rn), imm:$Imm)),
2141             (!cast<Instruction>(prefix # "_4H") VPR128:$Rn, imm:$Imm)>;
2142   def : Pat<(v2i32 (op (v2i64 VPR128:$Rn), imm:$Imm)),
2143             (!cast<Instruction>(prefix # "_2S") VPR128:$Rn, imm:$Imm)>;
2144
2145   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2146                 (v1i64 (bitconvert (v8i8 (op (v8i16 VPR128:$Rn), imm:$Imm))))),
2147             (!cast<Instruction>(prefix # "_16B")
2148                 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2149                 VPR128:$Rn, imm:$Imm)>;
2150   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2151                 (v1i64 (bitconvert (v4i16 (op (v4i32 VPR128:$Rn), imm:$Imm))))),
2152             (!cast<Instruction>(prefix # "_8H")
2153                 (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2154                 VPR128:$Rn, imm:$Imm)>;
2155   def : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2156                 (v1i64 (bitconvert (v2i32 (op (v2i64 VPR128:$Rn), imm:$Imm))))),
2157             (!cast<Instruction>(prefix # "_4S")
2158                   (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2159                   VPR128:$Rn, imm:$Imm)>;
2160 }
2161
2162 defm : Neon_shiftNarrow_patterns<"lshr">;
2163 defm : Neon_shiftNarrow_patterns<"ashr">;
2164
2165 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrun, "QSHRUNvvi">;
2166 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vrshrn, "RSHRNvvi">;
2167 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrun, "QRSHRUNvvi">;
2168 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqshrn, "SQSHRNvvi">;
2169 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqshrn, "UQSHRNvvi">;
2170 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vsqrshrn, "SQRSHRNvvi">;
2171 defm : Neon_shiftNarrow_QR_patterns<int_aarch64_neon_vuqrshrn, "UQRSHRNvvi">;
2172
2173 // Convert fix-point and float-pointing
2174 class N2VCvt_Fx<bit q, bit u, bits<5> opcode, string asmop, string T,
2175                 RegisterOperand VPRC, ValueType DestTy, ValueType SrcTy,
2176                 Operand ImmTy, SDPatternOperator IntOp>
2177   : NeonI_2VShiftImm<q, u, opcode,
2178                      (outs VPRC:$Rd), (ins VPRC:$Rn, ImmTy:$Imm),
2179                      asmop # "\t$Rd." # T # ", $Rn." # T # ", $Imm",
2180                      [(set (DestTy VPRC:$Rd), (DestTy (IntOp (SrcTy VPRC:$Rn),
2181                        (i32 imm:$Imm))))],
2182                      NoItinerary>;
2183
2184 multiclass NeonI_N2VCvt_Fx2fp<bit u, bits<5> opcode, string asmop,
2185                               SDPatternOperator IntOp> {
2186   def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2f32, v2i32,
2187                       shr_imm32, IntOp> {
2188     let Inst{22-21} = 0b01;
2189   }
2190
2191   def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4f32, v4i32,
2192                       shr_imm32, IntOp> {
2193     let Inst{22-21} = 0b01;
2194   }
2195
2196   def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2f64, v2i64,
2197                       shr_imm64, IntOp> {
2198     let Inst{22} = 0b1;
2199   }
2200 }
2201
2202 multiclass NeonI_N2VCvt_Fp2fx<bit u, bits<5> opcode, string asmop,
2203                               SDPatternOperator IntOp> {
2204   def _2S : N2VCvt_Fx<0, u, opcode, asmop, "2s", VPR64, v2i32, v2f32,
2205                       shr_imm32, IntOp> {
2206     let Inst{22-21} = 0b01;
2207   }
2208
2209   def _4S : N2VCvt_Fx<1, u, opcode, asmop, "4s", VPR128, v4i32, v4f32,
2210                       shr_imm32, IntOp> {
2211     let Inst{22-21} = 0b01;
2212   }
2213
2214   def _2D : N2VCvt_Fx<1, u, opcode, asmop, "2d", VPR128, v2i64, v2f64,
2215                       shr_imm64, IntOp> {
2216     let Inst{22} = 0b1;
2217   }
2218 }
2219
2220 // Convert fixed-point to floating-point
2221 defm VCVTxs2f : NeonI_N2VCvt_Fx2fp<0, 0b11100, "scvtf",
2222                                    int_arm_neon_vcvtfxs2fp>;
2223 defm VCVTxu2f : NeonI_N2VCvt_Fx2fp<1, 0b11100, "ucvtf",
2224                                    int_arm_neon_vcvtfxu2fp>;
2225
2226 // Convert floating-point to fixed-point
2227 defm VCVTf2xs : NeonI_N2VCvt_Fp2fx<0, 0b11111, "fcvtzs",
2228                                    int_arm_neon_vcvtfp2fxs>;
2229 defm VCVTf2xu : NeonI_N2VCvt_Fp2fx<1, 0b11111, "fcvtzu",
2230                                    int_arm_neon_vcvtfp2fxu>;
2231
2232 multiclass Neon_sshll2_0<SDNode ext>
2233 {
2234   def _v8i8  : PatFrag<(ops node:$Rn),
2235                        (v8i16 (ext (v8i8 (Neon_High16B node:$Rn))))>;
2236   def _v4i16 : PatFrag<(ops node:$Rn),
2237                        (v4i32 (ext (v4i16 (Neon_High8H node:$Rn))))>;
2238   def _v2i32 : PatFrag<(ops node:$Rn),
2239                        (v2i64 (ext (v2i32 (Neon_High4S node:$Rn))))>;
2240 }
2241
2242 defm NI_sext_high : Neon_sshll2_0<sext>;
2243 defm NI_zext_high : Neon_sshll2_0<zext>;
2244
2245
2246 //===----------------------------------------------------------------------===//
2247 // Multiclasses for NeonI_Across
2248 //===----------------------------------------------------------------------===//
2249
2250 // Variant 1
2251
2252 multiclass NeonI_2VAcross_1<bit u, bits<5> opcode,
2253                             string asmop, SDPatternOperator opnode>
2254 {
2255     def _1h8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
2256                 (outs FPR16:$Rd), (ins VPR64:$Rn),
2257                 asmop # "\t$Rd, $Rn.8b",
2258                 [(set (v1i16 FPR16:$Rd),
2259                     (v1i16 (opnode (v8i8 VPR64:$Rn))))],
2260                 NoItinerary>;
2261
2262     def _1h16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
2263                 (outs FPR16:$Rd), (ins VPR128:$Rn),
2264                 asmop # "\t$Rd, $Rn.16b",
2265                 [(set (v1i16 FPR16:$Rd),
2266                     (v1i16 (opnode (v16i8 VPR128:$Rn))))],
2267                 NoItinerary>;
2268
2269     def _1s4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
2270                 (outs FPR32:$Rd), (ins VPR64:$Rn),
2271                 asmop # "\t$Rd, $Rn.4h",
2272                 [(set (v1i32 FPR32:$Rd),
2273                     (v1i32 (opnode (v4i16 VPR64:$Rn))))],
2274                 NoItinerary>;
2275
2276     def _1s8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
2277                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2278                 asmop # "\t$Rd, $Rn.8h",
2279                 [(set (v1i32 FPR32:$Rd),
2280                     (v1i32 (opnode (v8i16 VPR128:$Rn))))],
2281                 NoItinerary>;
2282
2283     // _1d2s doesn't exist!
2284
2285     def _1d4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
2286                 (outs FPR64:$Rd), (ins VPR128:$Rn),
2287                 asmop # "\t$Rd, $Rn.4s",
2288                 [(set (v1i64 FPR64:$Rd),
2289                     (v1i64 (opnode (v4i32 VPR128:$Rn))))],
2290                 NoItinerary>;
2291 }
2292
2293 defm SADDLV : NeonI_2VAcross_1<0b0, 0b00011, "saddlv", int_aarch64_neon_saddlv>;
2294 defm UADDLV : NeonI_2VAcross_1<0b1, 0b00011, "uaddlv", int_aarch64_neon_uaddlv>;
2295
2296 // Variant 2
2297
2298 multiclass NeonI_2VAcross_2<bit u, bits<5> opcode,
2299                             string asmop, SDPatternOperator opnode>
2300 {
2301     def _1b8b:  NeonI_2VAcross<0b0, u, 0b00, opcode,
2302                 (outs FPR8:$Rd), (ins VPR64:$Rn),
2303                 asmop # "\t$Rd, $Rn.8b",
2304                 [(set (v1i8 FPR8:$Rd),
2305                     (v1i8 (opnode (v8i8 VPR64:$Rn))))],
2306                 NoItinerary>;
2307
2308     def _1b16b: NeonI_2VAcross<0b1, u, 0b00, opcode,
2309                 (outs FPR8:$Rd), (ins VPR128:$Rn),
2310                 asmop # "\t$Rd, $Rn.16b",
2311                 [(set (v1i8 FPR8:$Rd),
2312                     (v1i8 (opnode (v16i8 VPR128:$Rn))))],
2313                 NoItinerary>;
2314
2315     def _1h4h:  NeonI_2VAcross<0b0, u, 0b01, opcode,
2316                 (outs FPR16:$Rd), (ins VPR64:$Rn),
2317                 asmop # "\t$Rd, $Rn.4h",
2318                 [(set (v1i16 FPR16:$Rd),
2319                     (v1i16 (opnode (v4i16 VPR64:$Rn))))],
2320                 NoItinerary>;
2321
2322     def _1h8h:  NeonI_2VAcross<0b1, u, 0b01, opcode,
2323                 (outs FPR16:$Rd), (ins VPR128:$Rn),
2324                 asmop # "\t$Rd, $Rn.8h",
2325                 [(set (v1i16 FPR16:$Rd),
2326                     (v1i16 (opnode (v8i16 VPR128:$Rn))))],
2327                 NoItinerary>;
2328
2329     // _1s2s doesn't exist!
2330
2331     def _1s4s:  NeonI_2VAcross<0b1, u, 0b10, opcode,
2332                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2333                 asmop # "\t$Rd, $Rn.4s",
2334                 [(set (v1i32 FPR32:$Rd),
2335                     (v1i32 (opnode (v4i32 VPR128:$Rn))))],
2336                 NoItinerary>;
2337 }
2338
2339 defm SMAXV : NeonI_2VAcross_2<0b0, 0b01010, "smaxv", int_aarch64_neon_smaxv>;
2340 defm UMAXV : NeonI_2VAcross_2<0b1, 0b01010, "umaxv", int_aarch64_neon_umaxv>;
2341
2342 defm SMINV : NeonI_2VAcross_2<0b0, 0b11010, "sminv", int_aarch64_neon_sminv>;
2343 defm UMINV : NeonI_2VAcross_2<0b1, 0b11010, "uminv", int_aarch64_neon_uminv>;
2344
2345 defm ADDV : NeonI_2VAcross_2<0b0, 0b11011, "addv", int_aarch64_neon_vaddv>;
2346
2347 // Variant 3
2348
2349 multiclass NeonI_2VAcross_3<bit u, bits<5> opcode, bits<2> size,
2350                             string asmop, SDPatternOperator opnode> {
2351     def _1s4s:  NeonI_2VAcross<0b1, u, size, opcode,
2352                 (outs FPR32:$Rd), (ins VPR128:$Rn),
2353                 asmop # "\t$Rd, $Rn.4s",
2354                 [(set (v1f32 FPR32:$Rd),
2355                     (v1f32 (opnode (v4f32 VPR128:$Rn))))],
2356                 NoItinerary>;
2357 }
2358
2359 defm FMAXNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b00, "fmaxnmv",
2360                                 int_aarch64_neon_vmaxnmv>;
2361 defm FMINNMV : NeonI_2VAcross_3<0b1, 0b01100, 0b10, "fminnmv",
2362                                 int_aarch64_neon_vminnmv>;
2363
2364 defm FMAXV : NeonI_2VAcross_3<0b1, 0b01111, 0b00, "fmaxv",
2365                               int_aarch64_neon_vmaxv>;
2366 defm FMINV : NeonI_2VAcross_3<0b1, 0b01111, 0b10, "fminv",
2367                               int_aarch64_neon_vminv>;
2368
2369 // The followings are for instruction class (Perm)
2370
2371 class NeonI_Permute<bit q, bits<2> size, bits<3> opcode,
2372                     string asmop, RegisterOperand OpVPR, string OpS>
2373   : NeonI_Perm<q, size, opcode,
2374                (outs OpVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2375                asmop # "\t$Rd." # OpS # ", $Rn." # OpS # ", $Rm." # OpS,
2376                [], NoItinerary>;
2377
2378 multiclass NeonI_Perm_pat<bits<3> opcode, string asmop> {
2379    def _8b  : NeonI_Permute<0b0, 0b00, opcode, asmop, VPR64,  "8b">;
2380    def _16b : NeonI_Permute<0b1, 0b00, opcode, asmop, VPR128, "16b">;
2381    def _4h  : NeonI_Permute<0b0, 0b01, opcode, asmop, VPR64,  "4h">;
2382    def _8h  : NeonI_Permute<0b1, 0b01, opcode, asmop, VPR128, "8h">;
2383    def _2s  : NeonI_Permute<0b0, 0b10, opcode, asmop, VPR64,  "2s">;
2384    def _4s  : NeonI_Permute<0b1, 0b10, opcode, asmop, VPR128, "4s">;
2385    def _2d  : NeonI_Permute<0b1, 0b11, opcode, asmop, VPR128, "2d">;
2386 }                          
2387
2388 defm UZP1vvv : NeonI_Perm_pat<0b001, "uzp1">;
2389 defm TRN1vvv : NeonI_Perm_pat<0b010, "trn1">;
2390 defm ZIP1vvv : NeonI_Perm_pat<0b011, "zip1">;
2391 defm UZP2vvv : NeonI_Perm_pat<0b101, "uzp2">;
2392 defm TRN2vvv : NeonI_Perm_pat<0b110, "trn2">;
2393 defm ZIP2vvv : NeonI_Perm_pat<0b111, "zip2">;
2394
2395 // Extract and Insert
2396 def NI_ei_i32 : PatFrag<(ops node:$Rn, node:$Rm, node:$Ext, node:$Ins),
2397                         (vector_insert node:$Rn,
2398                           (i32 (vector_extract node:$Rm, node:$Ext)),
2399                           node:$Ins)>;
2400
2401 def NI_ei_f32 : PatFrag<(ops node:$Rn, node:$Rm, node:$Ext, node:$Ins),
2402                         (vector_insert node:$Rn,
2403                           (f32 (vector_extract node:$Rm, node:$Ext)),
2404                           node:$Ins)>;
2405
2406 // uzp1
2407 def : Pat<(v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2408           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2409           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2410           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2411           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2412           (v16i8 VPR128:$Rn),
2413           (v16i8 VPR128:$Rn), 2,  1)),
2414           (v16i8 VPR128:$Rn), 4,  2)),
2415           (v16i8 VPR128:$Rn), 6,  3)),
2416           (v16i8 VPR128:$Rn), 8,  4)),
2417           (v16i8 VPR128:$Rn), 10, 5)),
2418           (v16i8 VPR128:$Rn), 12, 6)),
2419           (v16i8 VPR128:$Rn), 14, 7)),
2420           (v16i8 VPR128:$Rm), 0,  8)),
2421           (v16i8 VPR128:$Rm), 2,  9)),
2422           (v16i8 VPR128:$Rm), 4,  10)),
2423           (v16i8 VPR128:$Rm), 6,  11)),
2424           (v16i8 VPR128:$Rm), 8,  12)),
2425           (v16i8 VPR128:$Rm), 10, 13)),
2426           (v16i8 VPR128:$Rm), 12, 14)),
2427           (v16i8 VPR128:$Rm), 14, 15)),
2428           (UZP1vvv_16b VPR128:$Rn, VPR128:$Rm)>;
2429
2430 class NI_Uzp1_v8<ValueType Ty, RegisterOperand VPR, Instruction INST>
2431   : Pat<(Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32
2432         (Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32
2433         (Ty VPR:$Rn),
2434         (Ty VPR:$Rn), 2, 1)),
2435         (Ty VPR:$Rn), 4, 2)),
2436         (Ty VPR:$Rn), 6, 3)),
2437         (Ty VPR:$Rm), 0, 4)),
2438         (Ty VPR:$Rm), 2, 5)),
2439         (Ty VPR:$Rm), 4, 6)),
2440         (Ty VPR:$Rm), 6, 7)),
2441         (INST VPR:$Rn, VPR:$Rm)>;
2442
2443 def : NI_Uzp1_v8<v8i8, VPR64, UZP1vvv_8b>;
2444 def : NI_Uzp1_v8<v8i16, VPR128, UZP1vvv_8h>;
2445
2446 class NI_Uzp1_v4<ValueType Ty, RegisterOperand VPR, Instruction INST,
2447                  PatFrag ei>
2448   : Pat<(Ty (ei (Ty (ei (Ty (ei
2449         (Ty VPR:$Rn),
2450         (Ty VPR:$Rn), 2, 1)),
2451         (Ty VPR:$Rm), 0, 2)),
2452         (Ty VPR:$Rm), 2, 3)),
2453         (INST VPR:$Rn, VPR:$Rm)>;
2454
2455 def : NI_Uzp1_v4<v4i16, VPR64, UZP1vvv_4h, NI_ei_i32>;
2456 def : NI_Uzp1_v4<v4i32, VPR128, UZP1vvv_4s, NI_ei_i32>;
2457 def : NI_Uzp1_v4<v4f32, VPR128, UZP1vvv_4s, NI_ei_f32>;
2458
2459 // uzp2
2460 def : Pat<(v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2461           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2462           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2463           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2464           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 
2465           (v16i8 VPR128:$Rm),
2466           (v16i8 VPR128:$Rn), 1,  0)),
2467           (v16i8 VPR128:$Rn), 3,  1)),
2468           (v16i8 VPR128:$Rn), 5,  2)),
2469           (v16i8 VPR128:$Rn), 7,  3)),
2470           (v16i8 VPR128:$Rn), 9,  4)),
2471           (v16i8 VPR128:$Rn), 11, 5)),
2472           (v16i8 VPR128:$Rn), 13, 6)),
2473           (v16i8 VPR128:$Rn), 15, 7)),
2474           (v16i8 VPR128:$Rm), 1,  8)),
2475           (v16i8 VPR128:$Rm), 3,  9)),
2476           (v16i8 VPR128:$Rm), 5,  10)),
2477           (v16i8 VPR128:$Rm), 7,  11)),
2478           (v16i8 VPR128:$Rm), 9,  12)),
2479           (v16i8 VPR128:$Rm), 11, 13)),
2480           (v16i8 VPR128:$Rm), 13, 14)),
2481           (UZP2vvv_16b VPR128:$Rn, VPR128:$Rm)>;
2482
2483 class NI_Uzp2_v8<ValueType Ty, RegisterOperand VPR, Instruction INST>
2484   : Pat<(Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32
2485         (Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32
2486         (Ty VPR:$Rm),
2487         (Ty VPR:$Rn), 1, 0)),
2488         (Ty VPR:$Rn), 3, 1)),
2489         (Ty VPR:$Rn), 5, 2)),
2490         (Ty VPR:$Rn), 7, 3)),
2491         (Ty VPR:$Rm), 1, 4)),
2492         (Ty VPR:$Rm), 3, 5)),
2493         (Ty VPR:$Rm), 5, 6)),
2494         (INST VPR:$Rn, VPR:$Rm)>;
2495
2496 def : NI_Uzp2_v8<v8i8, VPR64, UZP2vvv_8b>;
2497 def : NI_Uzp2_v8<v8i16, VPR128, UZP2vvv_8h>;
2498
2499 class NI_Uzp2_v4<ValueType Ty, RegisterOperand VPR, Instruction INST,
2500                  PatFrag ei>
2501   : Pat<(Ty (ei (Ty (ei (Ty (ei
2502         (Ty VPR:$Rm),
2503         (Ty VPR:$Rn), 1, 0)),
2504         (Ty VPR:$Rn), 3, 1)),
2505         (Ty VPR:$Rm), 1, 2)),
2506         (INST VPR:$Rn, VPR:$Rm)>;
2507
2508 def : NI_Uzp2_v4<v4i16, VPR64, UZP2vvv_4h, NI_ei_i32>;
2509 def : NI_Uzp2_v4<v4i32, VPR128, UZP2vvv_4s, NI_ei_i32>;
2510 def : NI_Uzp2_v4<v4f32, VPR128, UZP2vvv_4s, NI_ei_f32>;
2511
2512 // zip1
2513 def : Pat<(v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2514           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2515           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2516           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2517           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2518           (v16i8 VPR128:$Rn),
2519           (v16i8 VPR128:$Rm), 0, 1)),
2520           (v16i8 VPR128:$Rn), 1, 2)),
2521           (v16i8 VPR128:$Rm), 1, 3)),
2522           (v16i8 VPR128:$Rn), 2, 4)),
2523           (v16i8 VPR128:$Rm), 2, 5)),
2524           (v16i8 VPR128:$Rn), 3, 6)),
2525           (v16i8 VPR128:$Rm), 3, 7)),
2526           (v16i8 VPR128:$Rn), 4, 8)),
2527           (v16i8 VPR128:$Rm), 4, 9)),
2528           (v16i8 VPR128:$Rn), 5, 10)),
2529           (v16i8 VPR128:$Rm), 5, 11)),
2530           (v16i8 VPR128:$Rn), 6, 12)),
2531           (v16i8 VPR128:$Rm), 6, 13)),
2532           (v16i8 VPR128:$Rn), 7, 14)),
2533           (v16i8 VPR128:$Rm), 7, 15)),
2534           (ZIP1vvv_16b VPR128:$Rn, VPR128:$Rm)>;
2535
2536 class NI_Zip1_v8<ValueType Ty, RegisterOperand VPR, Instruction INST>
2537   : Pat<(Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32
2538         (Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32
2539         (Ty VPR:$Rn),
2540         (Ty VPR:$Rm), 0, 1)),
2541         (Ty VPR:$Rn), 1, 2)),
2542         (Ty VPR:$Rm), 1, 3)),
2543         (Ty VPR:$Rn), 2, 4)),
2544         (Ty VPR:$Rm), 2, 5)),
2545         (Ty VPR:$Rn), 3, 6)),
2546         (Ty VPR:$Rm), 3, 7)),
2547         (INST VPR:$Rn, VPR:$Rm)>;
2548
2549 def : NI_Zip1_v8<v8i8, VPR64, ZIP1vvv_8b>;
2550 def : NI_Zip1_v8<v8i16, VPR128, ZIP1vvv_8h>;
2551
2552 class NI_Zip1_v4<ValueType Ty, RegisterOperand VPR, Instruction INST,
2553                  PatFrag ei>
2554   : Pat<(Ty (ei (Ty (ei (Ty (ei
2555         (Ty VPR:$Rn),
2556         (Ty VPR:$Rm), 0, 1)),
2557         (Ty VPR:$Rn), 1, 2)),
2558         (Ty VPR:$Rm), 1, 3)),
2559         (INST VPR:$Rn, VPR:$Rm)>;
2560
2561 def : NI_Zip1_v4<v4i16, VPR64, ZIP1vvv_4h, NI_ei_i32>;
2562 def : NI_Zip1_v4<v4i32, VPR128, ZIP1vvv_4s, NI_ei_i32>;
2563 def : NI_Zip1_v4<v4f32, VPR128, ZIP1vvv_4s, NI_ei_f32>;
2564
2565 // zip2
2566 def : Pat<(v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2567           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2568           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2569           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2570           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2571           (v16i8 VPR128:$Rm),
2572           (v16i8 VPR128:$Rn), 8, 0)),
2573           (v16i8 VPR128:$Rm), 8, 1)),
2574           (v16i8 VPR128:$Rn), 9, 2)),
2575           (v16i8 VPR128:$Rm), 9, 3)),
2576           (v16i8 VPR128:$Rn), 10, 4)),
2577           (v16i8 VPR128:$Rm), 10, 5)),
2578           (v16i8 VPR128:$Rn), 11, 6)),
2579           (v16i8 VPR128:$Rm), 11, 7)),
2580           (v16i8 VPR128:$Rn), 12, 8)),
2581           (v16i8 VPR128:$Rm), 12, 9)),
2582           (v16i8 VPR128:$Rn), 13, 10)),
2583           (v16i8 VPR128:$Rm), 13, 11)),
2584           (v16i8 VPR128:$Rn), 14, 12)),
2585           (v16i8 VPR128:$Rm), 14, 13)),
2586           (v16i8 VPR128:$Rn), 15, 14)),
2587           (ZIP2vvv_16b VPR128:$Rn, VPR128:$Rm)>;
2588
2589 class NI_Zip2_v8<ValueType Ty, RegisterOperand VPR, Instruction INST>
2590   : Pat<(Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32
2591         (Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32
2592         (Ty VPR:$Rm),
2593         (Ty VPR:$Rn), 4, 0)),
2594         (Ty VPR:$Rm), 4, 1)),
2595         (Ty VPR:$Rn), 5, 2)),
2596         (Ty VPR:$Rm), 5, 3)),
2597         (Ty VPR:$Rn), 6, 4)),
2598         (Ty VPR:$Rm), 6, 5)),
2599         (Ty VPR:$Rn), 7, 6)),
2600         (INST VPR:$Rn, VPR:$Rm)>;
2601
2602 def : NI_Zip2_v8<v8i8, VPR64, ZIP2vvv_8b>;
2603 def : NI_Zip2_v8<v8i16, VPR128, ZIP2vvv_8h>;
2604
2605 class NI_Zip2_v4<ValueType Ty, RegisterOperand VPR, Instruction INST,
2606                  PatFrag ei>
2607   : Pat<(Ty (ei (Ty (ei (Ty (ei
2608         (Ty VPR:$Rm),
2609         (Ty VPR:$Rn), 2, 0)),
2610         (Ty VPR:$Rm), 2, 1)),
2611         (Ty VPR:$Rn), 3, 2)),
2612         (INST VPR:$Rn, VPR:$Rm)>;
2613
2614 def : NI_Zip2_v4<v4i16, VPR64, ZIP2vvv_4h, NI_ei_i32>;
2615 def : NI_Zip2_v4<v4i32, VPR128, ZIP2vvv_4s, NI_ei_i32>;
2616 def : NI_Zip2_v4<v4f32, VPR128, ZIP2vvv_4s, NI_ei_f32>;
2617
2618 // trn1
2619 def : Pat<(v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2620           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2621           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2622           (v16i8 VPR128:$Rn),
2623           (v16i8 VPR128:$Rm), 0,  1)),
2624           (v16i8 VPR128:$Rm), 2,  3)),
2625           (v16i8 VPR128:$Rm), 4,  5)),
2626           (v16i8 VPR128:$Rm), 6,  7)),
2627           (v16i8 VPR128:$Rm), 8,  9)),
2628           (v16i8 VPR128:$Rm), 10, 11)),
2629           (v16i8 VPR128:$Rm), 12, 13)),
2630           (v16i8 VPR128:$Rm), 14, 15)),
2631           (TRN1vvv_16b VPR128:$Rn, VPR128:$Rm)>;
2632
2633 class NI_Trn1_v8<ValueType Ty, RegisterOperand VPR, Instruction INST>
2634   : Pat<(Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32
2635         (Ty VPR:$Rn),
2636         (Ty VPR:$Rm), 0, 1)),
2637         (Ty VPR:$Rm), 2, 3)),
2638         (Ty VPR:$Rm), 4, 5)),
2639         (Ty VPR:$Rm), 6, 7)),
2640         (INST VPR:$Rn, VPR:$Rm)>;
2641
2642 def : NI_Trn1_v8<v8i8, VPR64, TRN1vvv_8b>;
2643 def : NI_Trn1_v8<v8i16, VPR128, TRN1vvv_8h>;
2644
2645 class NI_Trn1_v4<ValueType Ty, RegisterOperand VPR, Instruction INST,
2646                  PatFrag ei>
2647   : Pat<(Ty (ei (Ty (ei
2648         (Ty VPR:$Rn),
2649         (Ty VPR:$Rm), 0, 1)),
2650         (Ty VPR:$Rm), 2, 3)),
2651         (INST VPR:$Rn, VPR:$Rm)>;
2652
2653 def : NI_Trn1_v4<v4i16, VPR64, TRN1vvv_4h, NI_ei_i32>;
2654 def : NI_Trn1_v4<v4i32, VPR128, TRN1vvv_4s, NI_ei_i32>;
2655 def : NI_Trn1_v4<v4f32, VPR128, TRN1vvv_4s, NI_ei_f32>;
2656
2657 // trn2
2658 def : Pat<(v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2659           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2660           (v16i8 (NI_ei_i32 (v16i8 (NI_ei_i32
2661           (v16i8 VPR128:$Rm),
2662           (v16i8 VPR128:$Rn), 1,  0)),
2663           (v16i8 VPR128:$Rn), 3,  2)),
2664           (v16i8 VPR128:$Rn), 5,  4)),
2665           (v16i8 VPR128:$Rn), 7,  6)),
2666           (v16i8 VPR128:$Rn), 9,  8)),
2667           (v16i8 VPR128:$Rn), 11, 10)),
2668           (v16i8 VPR128:$Rn), 13, 12)),
2669           (v16i8 VPR128:$Rn), 15, 14)),
2670           (TRN2vvv_16b VPR128:$Rn, VPR128:$Rm)>;
2671
2672 class NI_Trn2_v8<ValueType Ty, RegisterOperand VPR, Instruction INST>
2673   : Pat<(Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32 (Ty (NI_ei_i32
2674         (Ty VPR:$Rm),
2675         (Ty VPR:$Rn), 1, 0)),
2676         (Ty VPR:$Rn), 3, 2)),
2677         (Ty VPR:$Rn), 5, 4)),
2678         (Ty VPR:$Rn), 7, 6)),
2679         (INST VPR:$Rn, VPR:$Rm)>;
2680
2681 def : NI_Trn2_v8<v8i8, VPR64, TRN2vvv_8b>;
2682 def : NI_Trn2_v8<v8i16, VPR128, TRN2vvv_8h>;
2683
2684 class NI_Trn2_v4<ValueType Ty, RegisterOperand VPR, Instruction INST,
2685                  PatFrag ei>
2686   : Pat<(Ty (ei (Ty (ei
2687         (Ty VPR:$Rm),
2688         (Ty VPR:$Rn), 1, 0)),
2689         (Ty VPR:$Rn), 3, 2)),
2690         (INST VPR:$Rn, VPR:$Rm)>;
2691
2692 def : NI_Trn2_v4<v4i16, VPR64, TRN2vvv_4h, NI_ei_i32>;
2693 def : NI_Trn2_v4<v4i32, VPR128, TRN2vvv_4s, NI_ei_i32>;
2694 def : NI_Trn2_v4<v4f32, VPR128, TRN2vvv_4s, NI_ei_f32>;
2695
2696 // End of implementation for instruction class (Perm)
2697
2698 // The followings are for instruction class (3V Diff)
2699
2700 // normal long/long2 pattern
2701 class NeonI_3VDL<bit q, bit u, bits<2> size, bits<4> opcode,
2702                  string asmop, string ResS, string OpS,
2703                  SDPatternOperator opnode, SDPatternOperator ext,
2704                  RegisterOperand OpVPR,
2705                  ValueType ResTy, ValueType OpTy>
2706   : NeonI_3VDiff<q, u, size, opcode,
2707                  (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2708                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2709                  [(set (ResTy VPR128:$Rd),
2710                     (ResTy (opnode (ResTy (ext (OpTy OpVPR:$Rn))),
2711                                    (ResTy (ext (OpTy OpVPR:$Rm))))))],
2712                  NoItinerary>;
2713
2714 multiclass NeonI_3VDL_s<bit u, bits<4> opcode,
2715                         string asmop, SDPatternOperator opnode,
2716                         bit Commutable = 0> {
2717   let isCommutable = Commutable in {
2718     def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2719                            opnode, sext, VPR64, v8i16, v8i8>;
2720     def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2721                            opnode, sext, VPR64, v4i32, v4i16>;
2722     def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2723                            opnode, sext, VPR64, v2i64, v2i32>;
2724   }
2725 }
2726
2727 multiclass NeonI_3VDL2_s<bit u, bits<4> opcode, string asmop,
2728                          SDPatternOperator opnode, bit Commutable = 0> {
2729   let isCommutable = Commutable in {
2730     def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2731                             opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2732     def _4s8h  : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2733                             opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2734     def _2d4s  : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2735                             opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2736   }
2737 }
2738
2739 multiclass NeonI_3VDL_u<bit u, bits<4> opcode, string asmop,
2740                         SDPatternOperator opnode, bit Commutable = 0> {
2741   let isCommutable = Commutable in {
2742     def _8h8b : NeonI_3VDL<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2743                            opnode, zext, VPR64, v8i16, v8i8>;
2744     def _4s4h : NeonI_3VDL<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2745                            opnode, zext, VPR64, v4i32, v4i16>;
2746     def _2d2s : NeonI_3VDL<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2747                            opnode, zext, VPR64, v2i64, v2i32>;
2748   }
2749 }
2750
2751 multiclass NeonI_3VDL2_u<bit u, bits<4> opcode, string asmop,
2752                          SDPatternOperator opnode, bit Commutable = 0> {
2753   let isCommutable = Commutable in {
2754     def _8h16b : NeonI_3VDL<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2755                             opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2756     def _4s8h : NeonI_3VDL<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2757                            opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2758     def _2d4s : NeonI_3VDL<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2759                            opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2760   }
2761 }
2762
2763 defm SADDLvvv :  NeonI_3VDL_s<0b0, 0b0000, "saddl", add, 1>;
2764 defm UADDLvvv :  NeonI_3VDL_u<0b1, 0b0000, "uaddl", add, 1>;
2765
2766 defm SADDL2vvv :  NeonI_3VDL2_s<0b0, 0b0000, "saddl2", add, 1>;
2767 defm UADDL2vvv :  NeonI_3VDL2_u<0b1, 0b0000, "uaddl2", add, 1>;
2768
2769 defm SSUBLvvv :  NeonI_3VDL_s<0b0, 0b0010, "ssubl", sub, 0>;
2770 defm USUBLvvv :  NeonI_3VDL_u<0b1, 0b0010, "usubl", sub, 0>;
2771
2772 defm SSUBL2vvv :  NeonI_3VDL2_s<0b0, 0b0010, "ssubl2", sub, 0>;
2773 defm USUBL2vvv :  NeonI_3VDL2_u<0b1, 0b0010, "usubl2", sub, 0>;
2774
2775 // normal wide/wide2 pattern
2776 class NeonI_3VDW<bit q, bit u, bits<2> size, bits<4> opcode,
2777                  string asmop, string ResS, string OpS,
2778                  SDPatternOperator opnode, SDPatternOperator ext,
2779                  RegisterOperand OpVPR,
2780                  ValueType ResTy, ValueType OpTy>
2781   : NeonI_3VDiff<q, u, size, opcode,
2782                  (outs VPR128:$Rd), (ins VPR128:$Rn, OpVPR:$Rm),
2783                  asmop # "\t$Rd." # ResS # ", $Rn." # ResS # ", $Rm." # OpS,
2784                  [(set (ResTy VPR128:$Rd),
2785                     (ResTy (opnode (ResTy VPR128:$Rn),
2786                                    (ResTy (ext (OpTy OpVPR:$Rm))))))],
2787                  NoItinerary>;
2788
2789 multiclass NeonI_3VDW_s<bit u, bits<4> opcode, string asmop,
2790                         SDPatternOperator opnode> {
2791   def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2792                          opnode, sext, VPR64, v8i16, v8i8>;
2793   def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2794                          opnode, sext, VPR64, v4i32, v4i16>;
2795   def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2796                          opnode, sext, VPR64, v2i64, v2i32>;
2797 }
2798
2799 defm SADDWvvv :  NeonI_3VDW_s<0b0, 0b0001, "saddw", add>;
2800 defm SSUBWvvv :  NeonI_3VDW_s<0b0, 0b0011, "ssubw", sub>;
2801
2802 multiclass NeonI_3VDW2_s<bit u, bits<4> opcode, string asmop,
2803                          SDPatternOperator opnode> {
2804   def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2805                           opnode, NI_sext_high_v8i8, VPR128, v8i16, v16i8>;
2806   def _4s8h  : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2807                           opnode, NI_sext_high_v4i16, VPR128, v4i32, v8i16>;
2808   def _2d4s  : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2809                           opnode, NI_sext_high_v2i32, VPR128, v2i64, v4i32>;
2810 }
2811
2812 defm SADDW2vvv :  NeonI_3VDW2_s<0b0, 0b0001, "saddw2", add>;
2813 defm SSUBW2vvv :  NeonI_3VDW2_s<0b0, 0b0011, "ssubw2", sub>;
2814
2815 multiclass NeonI_3VDW_u<bit u, bits<4> opcode, string asmop,
2816                         SDPatternOperator opnode> {
2817   def _8h8b : NeonI_3VDW<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2818                          opnode, zext, VPR64, v8i16, v8i8>;
2819   def _4s4h : NeonI_3VDW<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2820                          opnode, zext, VPR64, v4i32, v4i16>;
2821   def _2d2s : NeonI_3VDW<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2822                          opnode, zext, VPR64, v2i64, v2i32>;
2823 }
2824
2825 defm UADDWvvv :  NeonI_3VDW_u<0b1, 0b0001, "uaddw", add>;
2826 defm USUBWvvv :  NeonI_3VDW_u<0b1, 0b0011, "usubw", sub>;
2827
2828 multiclass NeonI_3VDW2_u<bit u, bits<4> opcode, string asmop,
2829                          SDPatternOperator opnode> {
2830   def _8h16b : NeonI_3VDW<0b1, u, 0b00, opcode, asmop, "8h", "16b",
2831                           opnode, NI_zext_high_v8i8, VPR128, v8i16, v16i8>;
2832   def _4s8h : NeonI_3VDW<0b1, u, 0b01, opcode, asmop, "4s", "8h",
2833                          opnode, NI_zext_high_v4i16, VPR128, v4i32, v8i16>;
2834   def _2d4s : NeonI_3VDW<0b1, u, 0b10, opcode, asmop, "2d", "4s",
2835                          opnode, NI_zext_high_v2i32, VPR128, v2i64, v4i32>;
2836 }
2837
2838 defm UADDW2vvv :  NeonI_3VDW2_u<0b1, 0b0001, "uaddw2", add>;
2839 defm USUBW2vvv :  NeonI_3VDW2_u<0b1, 0b0011, "usubw2", sub>;
2840
2841 // Get the high half part of the vector element.
2842 multiclass NeonI_get_high {
2843   def _8h : PatFrag<(ops node:$Rn),
2844                     (v8i8 (trunc (v8i16 (srl (v8i16 node:$Rn),
2845                                              (v8i16 (Neon_vdup (i32 8)))))))>;
2846   def _4s : PatFrag<(ops node:$Rn),
2847                     (v4i16 (trunc (v4i32 (srl (v4i32 node:$Rn),
2848                                               (v4i32 (Neon_vdup (i32 16)))))))>;
2849   def _2d : PatFrag<(ops node:$Rn),
2850                     (v2i32 (trunc (v2i64 (srl (v2i64 node:$Rn),
2851                                               (v2i64 (Neon_vdup (i32 32)))))))>;
2852 }
2853
2854 defm NI_get_hi : NeonI_get_high;
2855
2856 // pattern for addhn/subhn with 2 operands
2857 class NeonI_3VDN_addhn_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2858                            string asmop, string ResS, string OpS,
2859                            SDPatternOperator opnode, SDPatternOperator get_hi,
2860                            ValueType ResTy, ValueType OpTy>
2861   : NeonI_3VDiff<q, u, size, opcode,
2862                  (outs VPR64:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
2863                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2864                  [(set (ResTy VPR64:$Rd),
2865                     (ResTy (get_hi
2866                       (OpTy (opnode (OpTy VPR128:$Rn),
2867                                     (OpTy VPR128:$Rm))))))],
2868                  NoItinerary>;
2869
2870 multiclass NeonI_3VDN_addhn_2Op<bit u, bits<4> opcode, string asmop,
2871                                 SDPatternOperator opnode, bit Commutable = 0> {
2872   let isCommutable = Commutable in {
2873     def _8b8h : NeonI_3VDN_addhn_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2874                                      opnode, NI_get_hi_8h, v8i8, v8i16>;
2875     def _4h4s : NeonI_3VDN_addhn_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2876                                      opnode, NI_get_hi_4s, v4i16, v4i32>;
2877     def _2s2d : NeonI_3VDN_addhn_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2878                                      opnode, NI_get_hi_2d, v2i32, v2i64>;
2879   }
2880 }
2881
2882 defm ADDHNvvv  : NeonI_3VDN_addhn_2Op<0b0, 0b0100, "addhn", add, 1>;
2883 defm SUBHNvvv  : NeonI_3VDN_addhn_2Op<0b0, 0b0110, "subhn", sub, 0>;
2884
2885 // pattern for operation with 2 operands
2886 class NeonI_3VD_2Op<bit q, bit u, bits<2> size, bits<4> opcode,
2887                     string asmop, string ResS, string OpS,
2888                     SDPatternOperator opnode,
2889                     RegisterOperand ResVPR, RegisterOperand OpVPR,
2890                     ValueType ResTy, ValueType OpTy>
2891   : NeonI_3VDiff<q, u, size, opcode,
2892                  (outs ResVPR:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2893                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2894                  [(set (ResTy ResVPR:$Rd),
2895                     (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))],
2896                  NoItinerary>;
2897
2898 // normal narrow pattern
2899 multiclass NeonI_3VDN_2Op<bit u, bits<4> opcode, string asmop,
2900                           SDPatternOperator opnode, bit Commutable = 0> {
2901   let isCommutable = Commutable in {
2902     def _8b8h : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8b", "8h",
2903                               opnode, VPR64, VPR128, v8i8, v8i16>;
2904     def _4h4s : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4h", "4s",
2905                               opnode, VPR64, VPR128, v4i16, v4i32>;
2906     def _2s2d : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2s", "2d",
2907                               opnode, VPR64, VPR128, v2i32, v2i64>;
2908   }
2909 }
2910
2911 defm RADDHNvvv : NeonI_3VDN_2Op<0b1, 0b0100, "raddhn", int_arm_neon_vraddhn, 1>;
2912 defm RSUBHNvvv : NeonI_3VDN_2Op<0b1, 0b0110, "rsubhn", int_arm_neon_vrsubhn, 0>;
2913
2914 // pattern for acle intrinsic with 3 operands
2915 class NeonI_3VDN_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
2916                      string asmop, string ResS, string OpS>
2917   : NeonI_3VDiff<q, u, size, opcode,
2918                  (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
2919                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2920                  [], NoItinerary> {
2921   let Constraints = "$src = $Rd";
2922   let neverHasSideEffects = 1;
2923 }
2924
2925 multiclass NeonI_3VDN_3Op_v1<bit u, bits<4> opcode, string asmop> {
2926   def _16b8h : NeonI_3VDN_3Op<0b1, u, 0b00, opcode, asmop, "16b", "8h">;
2927   def _8h4s : NeonI_3VDN_3Op<0b1, u, 0b01, opcode, asmop, "8h", "4s">;
2928   def _4s2d : NeonI_3VDN_3Op<0b1, u, 0b10, opcode, asmop, "4s", "2d">;
2929 }
2930
2931 defm ADDHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0100, "addhn2">;
2932 defm SUBHN2vvv  : NeonI_3VDN_3Op_v1<0b0, 0b0110, "subhn2">;
2933
2934 defm RADDHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0100, "raddhn2">;
2935 defm RSUBHN2vvv : NeonI_3VDN_3Op_v1<0b1, 0b0110, "rsubhn2">;
2936
2937 // Patterns have to be separate because there's a SUBREG_TO_REG in the output
2938 // part.
2939 class NarrowHighHalfPat<Instruction INST, ValueType DstTy, ValueType SrcTy,
2940                         SDPatternOperator coreop>
2941   : Pat<(Neon_combine_2D (v1i64 VPR64:$src),
2942                       (v1i64 (bitconvert (DstTy (coreop (SrcTy VPR128:$Rn),
2943                                                         (SrcTy VPR128:$Rm)))))),
2944         (INST (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64),
2945               VPR128:$Rn, VPR128:$Rm)>;
2946
2947 // addhn2 patterns
2948 def : NarrowHighHalfPat<ADDHN2vvv_16b8h, v8i8,  v8i16,
2949           BinOpFrag<(NI_get_hi_8h (add node:$LHS, node:$RHS))>>;
2950 def : NarrowHighHalfPat<ADDHN2vvv_8h4s,  v4i16, v4i32,
2951           BinOpFrag<(NI_get_hi_4s (add node:$LHS, node:$RHS))>>;
2952 def : NarrowHighHalfPat<ADDHN2vvv_4s2d,  v2i32, v2i64,
2953           BinOpFrag<(NI_get_hi_2d (add node:$LHS, node:$RHS))>>;
2954
2955 // subhn2 patterns
2956 def : NarrowHighHalfPat<SUBHN2vvv_16b8h, v8i8,  v8i16,
2957           BinOpFrag<(NI_get_hi_8h (sub node:$LHS, node:$RHS))>>;
2958 def : NarrowHighHalfPat<SUBHN2vvv_8h4s,  v4i16, v4i32,
2959           BinOpFrag<(NI_get_hi_4s (sub node:$LHS, node:$RHS))>>;
2960 def : NarrowHighHalfPat<SUBHN2vvv_4s2d,  v2i32, v2i64,
2961           BinOpFrag<(NI_get_hi_2d (sub node:$LHS, node:$RHS))>>;
2962
2963 // raddhn2 patterns
2964 def : NarrowHighHalfPat<RADDHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vraddhn>;
2965 def : NarrowHighHalfPat<RADDHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vraddhn>;
2966 def : NarrowHighHalfPat<RADDHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vraddhn>;
2967
2968 // rsubhn2 patterns
2969 def : NarrowHighHalfPat<RSUBHN2vvv_16b8h, v8i8,  v8i16, int_arm_neon_vrsubhn>;
2970 def : NarrowHighHalfPat<RSUBHN2vvv_8h4s,  v4i16, v4i32, int_arm_neon_vrsubhn>;
2971 def : NarrowHighHalfPat<RSUBHN2vvv_4s2d,  v2i32, v2i64, int_arm_neon_vrsubhn>;
2972
2973 // pattern that need to extend result
2974 class NeonI_3VDL_Ext<bit q, bit u, bits<2> size, bits<4> opcode,
2975                      string asmop, string ResS, string OpS,
2976                      SDPatternOperator opnode,
2977                      RegisterOperand OpVPR,
2978                      ValueType ResTy, ValueType OpTy, ValueType OpSTy>
2979   : NeonI_3VDiff<q, u, size, opcode,
2980                  (outs VPR128:$Rd), (ins OpVPR:$Rn, OpVPR:$Rm),
2981                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
2982                  [(set (ResTy VPR128:$Rd),
2983                     (ResTy (zext (OpSTy (opnode (OpTy OpVPR:$Rn),
2984                                                 (OpTy OpVPR:$Rm))))))],
2985                  NoItinerary>;
2986
2987 multiclass NeonI_3VDL_zext<bit u, bits<4> opcode, string asmop,
2988                            SDPatternOperator opnode, bit Commutable = 0> {
2989   let isCommutable = Commutable in {
2990     def _8h8b : NeonI_3VDL_Ext<0b0, u, 0b00, opcode, asmop, "8h", "8b",
2991                                opnode, VPR64, v8i16, v8i8, v8i8>;
2992     def _4s4h : NeonI_3VDL_Ext<0b0, u, 0b01, opcode, asmop, "4s", "4h",
2993                                opnode, VPR64, v4i32, v4i16, v4i16>;
2994     def _2d2s : NeonI_3VDL_Ext<0b0, u, 0b10, opcode, asmop, "2d", "2s",
2995                                opnode, VPR64, v2i64, v2i32, v2i32>;
2996   }
2997 }
2998
2999 defm SABDLvvv : NeonI_3VDL_zext<0b0, 0b0111, "sabdl", int_arm_neon_vabds, 1>;
3000 defm UABDLvvv : NeonI_3VDL_zext<0b1, 0b0111, "uabdl", int_arm_neon_vabdu, 1>;
3001
3002 multiclass NeonI_Op_High<SDPatternOperator op> {
3003   def _16B : PatFrag<(ops node:$Rn, node:$Rm),
3004                      (op (v8i8 (Neon_High16B node:$Rn)),
3005                          (v8i8 (Neon_High16B node:$Rm)))>;
3006   def _8H  : PatFrag<(ops node:$Rn, node:$Rm),
3007                      (op (v4i16 (Neon_High8H node:$Rn)),
3008                          (v4i16 (Neon_High8H node:$Rm)))>;
3009   def _4S  : PatFrag<(ops node:$Rn, node:$Rm),
3010                      (op (v2i32 (Neon_High4S node:$Rn)),
3011                          (v2i32 (Neon_High4S node:$Rm)))>;
3012 }
3013
3014 defm NI_sabdl_hi : NeonI_Op_High<int_arm_neon_vabds>;
3015 defm NI_uabdl_hi : NeonI_Op_High<int_arm_neon_vabdu>;
3016 defm NI_smull_hi : NeonI_Op_High<int_arm_neon_vmulls>;
3017 defm NI_umull_hi : NeonI_Op_High<int_arm_neon_vmullu>;
3018 defm NI_qdmull_hi : NeonI_Op_High<int_arm_neon_vqdmull>;
3019 defm NI_pmull_hi : NeonI_Op_High<int_arm_neon_vmullp>;
3020
3021 multiclass NeonI_3VDL_Abd_u<bit u, bits<4> opcode, string asmop, string opnode,
3022                             bit Commutable = 0> {
3023   let isCommutable = Commutable in {
3024     def _8h8b  : NeonI_3VDL_Ext<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3025                                 !cast<PatFrag>(opnode # "_16B"),
3026                                 VPR128, v8i16, v16i8, v8i8>;
3027     def _4s4h  : NeonI_3VDL_Ext<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3028                                 !cast<PatFrag>(opnode # "_8H"),
3029                                 VPR128, v4i32, v8i16, v4i16>;
3030     def _2d2s  : NeonI_3VDL_Ext<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3031                                 !cast<PatFrag>(opnode # "_4S"),
3032                                 VPR128, v2i64, v4i32, v2i32>;
3033   }
3034 }
3035
3036 defm SABDL2vvv : NeonI_3VDL_Abd_u<0b0, 0b0111, "sabdl2", "NI_sabdl_hi", 1>;
3037 defm UABDL2vvv : NeonI_3VDL_Abd_u<0b1, 0b0111, "uabdl2", "NI_uabdl_hi", 1>;
3038
3039 // For pattern that need two operators being chained.
3040 class NeonI_3VDL_Aba<bit q, bit u, bits<2> size, bits<4> opcode,
3041                      string asmop, string ResS, string OpS, 
3042                      SDPatternOperator opnode, SDPatternOperator subop,
3043                      RegisterOperand OpVPR,
3044                      ValueType ResTy, ValueType OpTy, ValueType OpSTy>
3045   : NeonI_3VDiff<q, u, size, opcode,
3046                  (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
3047                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS, 
3048                  [(set (ResTy VPR128:$Rd),
3049                     (ResTy (opnode
3050                       (ResTy VPR128:$src), 
3051                       (ResTy (zext (OpSTy (subop (OpTy OpVPR:$Rn),
3052                                                  (OpTy OpVPR:$Rm))))))))],
3053                  NoItinerary> {
3054   let Constraints = "$src = $Rd";
3055 }
3056
3057 multiclass NeonI_3VDL_Aba_v1<bit u, bits<4> opcode, string asmop,
3058                              SDPatternOperator opnode, SDPatternOperator subop>{
3059   def _8h8b : NeonI_3VDL_Aba<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3060                              opnode, subop, VPR64, v8i16, v8i8, v8i8>;
3061   def _4s4h : NeonI_3VDL_Aba<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3062                              opnode, subop, VPR64, v4i32, v4i16, v4i16>;
3063   def _2d2s : NeonI_3VDL_Aba<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3064                              opnode, subop, VPR64, v2i64, v2i32, v2i32>;
3065 }
3066
3067 defm SABALvvv :  NeonI_3VDL_Aba_v1<0b0, 0b0101, "sabal",
3068                                    add, int_arm_neon_vabds>;
3069 defm UABALvvv :  NeonI_3VDL_Aba_v1<0b1, 0b0101, "uabal",
3070                                    add, int_arm_neon_vabdu>;
3071
3072 multiclass NeonI_3VDL2_Aba_v1<bit u, bits<4> opcode, string asmop,
3073                               SDPatternOperator opnode, string subop> {
3074   def _8h8b : NeonI_3VDL_Aba<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3075                              opnode, !cast<PatFrag>(subop # "_16B"), 
3076                              VPR128, v8i16, v16i8, v8i8>;
3077   def _4s4h : NeonI_3VDL_Aba<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3078                              opnode, !cast<PatFrag>(subop # "_8H"), 
3079                              VPR128, v4i32, v8i16, v4i16>;
3080   def _2d2s : NeonI_3VDL_Aba<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3081                              opnode, !cast<PatFrag>(subop # "_4S"), 
3082                              VPR128, v2i64, v4i32, v2i32>;
3083 }
3084
3085 defm SABAL2vvv :  NeonI_3VDL2_Aba_v1<0b0, 0b0101, "sabal2", add,
3086                                      "NI_sabdl_hi">;
3087 defm UABAL2vvv :  NeonI_3VDL2_Aba_v1<0b1, 0b0101, "uabal2", add,
3088                                      "NI_uabdl_hi">;
3089
3090 // Long pattern with 2 operands
3091 multiclass NeonI_3VDL_2Op<bit u, bits<4> opcode, string asmop,
3092                           SDPatternOperator opnode, bit Commutable = 0> {
3093   let isCommutable = Commutable in {
3094     def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3095                               opnode, VPR128, VPR64, v8i16, v8i8>;
3096     def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3097                               opnode, VPR128, VPR64, v4i32, v4i16>;
3098     def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3099                               opnode, VPR128, VPR64, v2i64, v2i32>;
3100   }
3101 }
3102
3103 defm SMULLvvv :  NeonI_3VDL_2Op<0b0, 0b1100, "smull", int_arm_neon_vmulls, 1>;
3104 defm UMULLvvv :  NeonI_3VDL_2Op<0b1, 0b1100, "umull", int_arm_neon_vmullu, 1>;
3105
3106 class NeonI_3VDL2_2Op_mull<bit q, bit u, bits<2> size, bits<4> opcode,
3107                            string asmop, string ResS, string OpS,
3108                            SDPatternOperator opnode,
3109                            ValueType ResTy, ValueType OpTy>
3110   : NeonI_3VDiff<q, u, size, opcode,
3111                  (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
3112                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3113                  [(set (ResTy VPR128:$Rd),
3114                     (ResTy (opnode (OpTy VPR128:$Rn), (OpTy VPR128:$Rm))))],
3115                  NoItinerary>;
3116
3117 multiclass NeonI_3VDL2_2Op_mull_v1<bit u, bits<4> opcode, string asmop,
3118                                    string opnode, bit Commutable = 0> {
3119   let isCommutable = Commutable in {
3120     def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3121                                       !cast<PatFrag>(opnode # "_16B"),
3122                                       v8i16, v16i8>;
3123     def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3124                                      !cast<PatFrag>(opnode # "_8H"),
3125                                      v4i32, v8i16>;
3126     def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3127                                      !cast<PatFrag>(opnode # "_4S"),
3128                                      v2i64, v4i32>;
3129   }
3130 }
3131
3132 defm SMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b0, 0b1100, "smull2",
3133                                          "NI_smull_hi", 1>;
3134 defm UMULL2vvv : NeonI_3VDL2_2Op_mull_v1<0b1, 0b1100, "umull2",
3135                                          "NI_umull_hi", 1>;
3136
3137 // Long pattern with 3 operands
3138 class NeonI_3VDL_3Op<bit q, bit u, bits<2> size, bits<4> opcode,
3139                      string asmop, string ResS, string OpS,
3140                      SDPatternOperator opnode,
3141                      ValueType ResTy, ValueType OpTy>
3142   : NeonI_3VDiff<q, u, size, opcode,
3143                  (outs VPR128:$Rd), (ins VPR128:$src, VPR64:$Rn, VPR64:$Rm),
3144                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3145                  [(set (ResTy VPR128:$Rd),
3146                     (ResTy (opnode
3147                       (ResTy VPR128:$src),
3148                       (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))))],
3149                NoItinerary> {
3150   let Constraints = "$src = $Rd";
3151 }
3152
3153 multiclass NeonI_3VDL_3Op_v1<bit u, bits<4> opcode, string asmop,
3154                              SDPatternOperator opnode> {
3155   def _8h8b : NeonI_3VDL_3Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3156                              opnode, v8i16, v8i8>;
3157   def _4s4h : NeonI_3VDL_3Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3158                              opnode, v4i32, v4i16>;
3159   def _2d2s : NeonI_3VDL_3Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3160                              opnode, v2i64, v2i32>;
3161 }
3162
3163 def Neon_smlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3164                          (add node:$Rd,
3165                             (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
3166
3167 def Neon_umlal : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3168                          (add node:$Rd,
3169                             (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
3170
3171 def Neon_smlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3172                          (sub node:$Rd,
3173                             (int_arm_neon_vmulls node:$Rn, node:$Rm))>;
3174
3175 def Neon_umlsl : PatFrag<(ops node:$Rd, node:$Rn, node:$Rm),
3176                          (sub node:$Rd,
3177                             (int_arm_neon_vmullu node:$Rn, node:$Rm))>;
3178
3179 defm SMLALvvv :  NeonI_3VDL_3Op_v1<0b0, 0b1000, "smlal", Neon_smlal>;
3180 defm UMLALvvv :  NeonI_3VDL_3Op_v1<0b1, 0b1000, "umlal", Neon_umlal>;
3181
3182 defm SMLSLvvv :  NeonI_3VDL_3Op_v1<0b0, 0b1010, "smlsl", Neon_smlsl>;
3183 defm UMLSLvvv :  NeonI_3VDL_3Op_v1<0b1, 0b1010, "umlsl", Neon_umlsl>;
3184
3185 class NeonI_3VDL2_3Op_mlas<bit q, bit u, bits<2> size, bits<4> opcode,
3186                            string asmop, string ResS, string OpS,
3187                            SDPatternOperator subop, SDPatternOperator opnode,
3188                            RegisterOperand OpVPR,
3189                            ValueType ResTy, ValueType OpTy>
3190   : NeonI_3VDiff<q, u, size, opcode,
3191                (outs VPR128:$Rd), (ins VPR128:$src, OpVPR:$Rn, OpVPR:$Rm),
3192                asmop # "\t$Rd." # ResS # ", $Rn." # OpS # ", $Rm." # OpS,
3193                [(set (ResTy VPR128:$Rd),
3194                   (ResTy (subop
3195                     (ResTy VPR128:$src),
3196                     (ResTy (opnode (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm))))))],
3197                NoItinerary> {
3198   let Constraints = "$src = $Rd";
3199 }
3200
3201 multiclass NeonI_3VDL2_3Op_mlas_v1<bit u, bits<4> opcode, string asmop, 
3202                                    SDPatternOperator subop, string opnode> {
3203   def _8h16b : NeonI_3VDL2_3Op_mlas<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3204                                     subop, !cast<PatFrag>(opnode # "_16B"),
3205                                     VPR128, v8i16, v16i8>;
3206   def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3207                                    subop, !cast<PatFrag>(opnode # "_8H"), 
3208                                    VPR128, v4i32, v8i16>;
3209   def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3210                                    subop, !cast<PatFrag>(opnode # "_4S"),
3211                                    VPR128, v2i64, v4i32>;
3212 }
3213
3214 defm SMLAL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1000, "smlal2",
3215                                           add, "NI_smull_hi">;
3216 defm UMLAL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1000, "umlal2",
3217                                           add, "NI_umull_hi">;
3218
3219 defm SMLSL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b0, 0b1010, "smlsl2",
3220                                           sub, "NI_smull_hi">;
3221 defm UMLSL2vvv :  NeonI_3VDL2_3Op_mlas_v1<0b1, 0b1010, "umlsl2",
3222                                           sub, "NI_umull_hi">;
3223
3224 multiclass NeonI_3VDL_qdmlal_3Op_v2<bit u, bits<4> opcode, string asmop,
3225                                     SDPatternOperator opnode> {
3226   def _4s4h : NeonI_3VDL2_3Op_mlas<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3227                                    opnode, int_arm_neon_vqdmull,
3228                                    VPR64, v4i32, v4i16>;
3229   def _2d2s : NeonI_3VDL2_3Op_mlas<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3230                                    opnode, int_arm_neon_vqdmull,
3231                                    VPR64, v2i64, v2i32>;
3232 }
3233
3234 defm SQDMLALvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1001, "sqdmlal",
3235                                            int_arm_neon_vqadds>;
3236 defm SQDMLSLvvv : NeonI_3VDL_qdmlal_3Op_v2<0b0, 0b1011, "sqdmlsl",
3237                                            int_arm_neon_vqsubs>;
3238
3239 multiclass NeonI_3VDL_v2<bit u, bits<4> opcode, string asmop,
3240                          SDPatternOperator opnode, bit Commutable = 0> {
3241   let isCommutable = Commutable in {
3242     def _4s4h : NeonI_3VD_2Op<0b0, u, 0b01, opcode, asmop, "4s", "4h",
3243                               opnode, VPR128, VPR64, v4i32, v4i16>;
3244     def _2d2s : NeonI_3VD_2Op<0b0, u, 0b10, opcode, asmop, "2d", "2s",
3245                               opnode, VPR128, VPR64, v2i64, v2i32>;
3246   }
3247 }
3248
3249 defm SQDMULLvvv : NeonI_3VDL_v2<0b0, 0b1101, "sqdmull",
3250                                 int_arm_neon_vqdmull, 1>;
3251
3252 multiclass NeonI_3VDL2_2Op_mull_v2<bit u, bits<4> opcode, string asmop, 
3253                                    string opnode, bit Commutable = 0> {
3254   let isCommutable = Commutable in {
3255     def _4s8h : NeonI_3VDL2_2Op_mull<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3256                                      !cast<PatFrag>(opnode # "_8H"),
3257                                      v4i32, v8i16>;
3258     def _2d4s : NeonI_3VDL2_2Op_mull<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3259                                      !cast<PatFrag>(opnode # "_4S"),
3260                                      v2i64, v4i32>;
3261   }
3262 }
3263
3264 defm SQDMULL2vvv : NeonI_3VDL2_2Op_mull_v2<0b0, 0b1101, "sqdmull2", 
3265                                            "NI_qdmull_hi", 1>;
3266
3267 multiclass NeonI_3VDL2_3Op_qdmlal_v2<bit u, bits<4> opcode, string asmop, 
3268                                      SDPatternOperator opnode> {
3269   def _4s8h : NeonI_3VDL2_3Op_mlas<0b1, u, 0b01, opcode, asmop, "4s", "8h",
3270                                    opnode, NI_qdmull_hi_8H,
3271                                    VPR128, v4i32, v8i16>;
3272   def _2d4s : NeonI_3VDL2_3Op_mlas<0b1, u, 0b10, opcode, asmop, "2d", "4s",
3273                                    opnode, NI_qdmull_hi_4S,
3274                                    VPR128, v2i64, v4i32>;
3275 }
3276
3277 defm SQDMLAL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1001, "sqdmlal2",
3278                                              int_arm_neon_vqadds>;
3279 defm SQDMLSL2vvv : NeonI_3VDL2_3Op_qdmlal_v2<0b0, 0b1011, "sqdmlsl2",
3280                                              int_arm_neon_vqsubs>;
3281
3282 multiclass NeonI_3VDL_v3<bit u, bits<4> opcode, string asmop,
3283                          SDPatternOperator opnode, bit Commutable = 0> {
3284   let isCommutable = Commutable in {
3285     def _8h8b : NeonI_3VD_2Op<0b0, u, 0b00, opcode, asmop, "8h", "8b",
3286                               opnode, VPR128, VPR64, v8i16, v8i8>;
3287     
3288     def _1q1d : NeonI_3VDiff<0b0, u, 0b11, opcode,
3289                              (outs VPR128:$Rd), (ins VPR64:$Rn, VPR64:$Rm),
3290                              asmop # "\t$Rd.1q, $Rn.1d, $Rm.1d",
3291                              [], NoItinerary>;
3292   }
3293 }
3294
3295 defm PMULLvvv : NeonI_3VDL_v3<0b0, 0b1110, "pmull", int_arm_neon_vmullp, 1>;
3296
3297 multiclass NeonI_3VDL2_2Op_mull_v3<bit u, bits<4> opcode, string asmop, 
3298                                    string opnode, bit Commutable = 0> {
3299   let isCommutable = Commutable in {
3300     def _8h16b : NeonI_3VDL2_2Op_mull<0b1, u, 0b00, opcode, asmop, "8h", "16b",
3301                                       !cast<PatFrag>(opnode # "_16B"),
3302                                       v8i16, v16i8>;
3303     
3304     def _1q2d : NeonI_3VDiff<0b1, u, 0b11, opcode,
3305                              (outs VPR128:$Rd), (ins VPR128:$Rn, VPR128:$Rm),
3306                              asmop # "\t$Rd.1q, $Rn.2d, $Rm.2d",
3307                              [], NoItinerary>;
3308   }
3309 }
3310
3311 defm PMULL2vvv : NeonI_3VDL2_2Op_mull_v3<0b0, 0b1110, "pmull2", "NI_pmull_hi",
3312                                          1>;
3313
3314 // End of implementation for instruction class (3V Diff)
3315
3316 // The followings are vector load/store multiple N-element structure
3317 // (class SIMD lselem).
3318
3319 // ld1:         load multiple 1-element structure to 1/2/3/4 registers.
3320 // ld2/ld3/ld4: load multiple N-element structure to N registers (N = 2, 3, 4).
3321 //              The structure consists of a sequence of sets of N values.
3322 //              The first element of the structure is placed in the first lane
3323 //              of the first first vector, the second element in the first lane
3324 //              of the second vector, and so on. 
3325 // E.g. LD1_3V_2S will load 32-bit elements {A, B, C, D, E, F} sequentially into
3326 // the three 64-bit vectors list {BA, DC, FE}.
3327 // E.g. LD3_2S will load 32-bit elements {A, B, C, D, E, F} into the three
3328 // 64-bit vectors list {DA, EB, FC}.
3329 // Store instructions store multiple structure to N registers like load.
3330
3331
3332 class NeonI_LDVList<bit q, bits<4> opcode, bits<2> size,
3333                     RegisterOperand VecList, string asmop>
3334   : NeonI_LdStMult<q, 1, opcode, size,
3335                  (outs VecList:$Rt), (ins GPR64xsp:$Rn),
3336                  asmop # "\t$Rt, [$Rn]",
3337                  [],
3338                  NoItinerary> {
3339   let mayLoad = 1;
3340   let neverHasSideEffects = 1;
3341 }
3342
3343 multiclass LDVList_BHSD<bits<4> opcode, string List, string asmop> {
3344   def _8B : NeonI_LDVList<0, opcode, 0b00,
3345                           !cast<RegisterOperand>(List # "8B_operand"), asmop>;
3346
3347   def _4H : NeonI_LDVList<0, opcode, 0b01,
3348                           !cast<RegisterOperand>(List # "4H_operand"), asmop>;
3349
3350   def _2S : NeonI_LDVList<0, opcode, 0b10,
3351                           !cast<RegisterOperand>(List # "2S_operand"), asmop>;
3352
3353   def _16B : NeonI_LDVList<1, opcode, 0b00,
3354                            !cast<RegisterOperand>(List # "16B_operand"), asmop>;
3355
3356   def _8H : NeonI_LDVList<1, opcode, 0b01,
3357                           !cast<RegisterOperand>(List # "8H_operand"), asmop>;
3358
3359   def _4S : NeonI_LDVList<1, opcode, 0b10,
3360                           !cast<RegisterOperand>(List # "4S_operand"), asmop>;
3361
3362   def _2D : NeonI_LDVList<1, opcode, 0b11,
3363                           !cast<RegisterOperand>(List # "2D_operand"), asmop>;
3364 }
3365
3366 // Load multiple N-element structure to N consecutive registers (N = 1,2,3,4)
3367 defm LD1 : LDVList_BHSD<0b0111, "VOne", "ld1">;
3368 def LD1_1D : NeonI_LDVList<0, 0b0111, 0b11, VOne1D_operand, "ld1">;
3369
3370 defm LD2 : LDVList_BHSD<0b1000, "VPair", "ld2">;
3371
3372 defm LD3 : LDVList_BHSD<0b0100, "VTriple", "ld3">;
3373
3374 defm LD4 : LDVList_BHSD<0b0000, "VQuad", "ld4">;
3375
3376 // Load multiple 1-element structure to N consecutive registers (N = 2,3,4)
3377 defm LD1x2 : LDVList_BHSD<0b1010, "VPair", "ld1">;
3378 def LD1x2_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">;
3379
3380 defm LD1x3 : LDVList_BHSD<0b0110, "VTriple", "ld1">;
3381 def LD1x3_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand, "ld1">;
3382
3383 defm LD1x4 : LDVList_BHSD<0b0010, "VQuad", "ld1">;
3384 def LD1x4_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">;
3385
3386 class NeonI_STVList<bit q, bits<4> opcode, bits<2> size,
3387                     RegisterOperand VecList, string asmop>
3388   : NeonI_LdStMult<q, 0, opcode, size,
3389                  (outs), (ins GPR64xsp:$Rn, VecList:$Rt), 
3390                  asmop # "\t$Rt, [$Rn]",
3391                  [], 
3392                  NoItinerary> {
3393   let mayStore = 1;
3394   let neverHasSideEffects = 1;
3395 }
3396
3397 multiclass STVList_BHSD<bits<4> opcode, string List, string asmop> {
3398   def _8B : NeonI_STVList<0, opcode, 0b00,
3399                           !cast<RegisterOperand>(List # "8B_operand"), asmop>;
3400
3401   def _4H : NeonI_STVList<0, opcode, 0b01,
3402                           !cast<RegisterOperand>(List # "4H_operand"), asmop>;
3403
3404   def _2S : NeonI_STVList<0, opcode, 0b10,
3405                           !cast<RegisterOperand>(List # "2S_operand"), asmop>;
3406
3407   def _16B : NeonI_STVList<1, opcode, 0b00,
3408                            !cast<RegisterOperand>(List # "16B_operand"), asmop>;
3409
3410   def _8H : NeonI_STVList<1, opcode, 0b01,
3411                           !cast<RegisterOperand>(List # "8H_operand"), asmop>;
3412
3413   def _4S : NeonI_STVList<1, opcode, 0b10,
3414                           !cast<RegisterOperand>(List # "4S_operand"), asmop>;
3415
3416   def _2D : NeonI_STVList<1, opcode, 0b11,
3417                           !cast<RegisterOperand>(List # "2D_operand"), asmop>;
3418 }
3419
3420 // Store multiple N-element structures from N registers (N = 1,2,3,4)
3421 defm ST1 : STVList_BHSD<0b0111, "VOne", "st1">;
3422 def ST1_1D : NeonI_STVList<0, 0b0111, 0b11, VOne1D_operand, "st1">;
3423
3424 defm ST2 : STVList_BHSD<0b1000, "VPair", "st2">;
3425
3426 defm ST3 : STVList_BHSD<0b0100, "VTriple", "st3">;
3427
3428 defm ST4 : STVList_BHSD<0b0000, "VQuad", "st4">;
3429
3430 // Store multiple 1-element structures from N consecutive registers (N = 2,3,4)
3431 defm ST1x2 : STVList_BHSD<0b1010, "VPair", "st1">;
3432 def ST1x2_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">;
3433
3434 defm ST1x3 : STVList_BHSD<0b0110, "VTriple", "st1">;
3435 def ST1x3_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand, "st1">;
3436
3437 defm ST1x4 : STVList_BHSD<0b0010, "VQuad", "st1">;
3438 def ST1x4_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">;
3439
3440 // End of vector load/store multiple N-element structure(class SIMD lselem)
3441
3442 // The followings are post-index vector load/store multiple N-element
3443 // structure(class SIMD lselem-post)
3444 def exact8_asmoperand : AsmOperandClass {
3445   let Name = "Exact8";
3446   let PredicateMethod = "isExactImm<8>";
3447   let RenderMethod = "addImmOperands";
3448 }
3449 def uimm_exact8 : Operand<i32>, ImmLeaf<i32, [{return Imm == 8;}]> {
3450   let ParserMatchClass = exact8_asmoperand;
3451 }
3452
3453 def exact16_asmoperand : AsmOperandClass {
3454   let Name = "Exact16";
3455   let PredicateMethod = "isExactImm<16>";
3456   let RenderMethod = "addImmOperands";
3457 }
3458 def uimm_exact16 : Operand<i32>, ImmLeaf<i32, [{return Imm == 16;}]> {
3459   let ParserMatchClass = exact16_asmoperand;
3460 }
3461
3462 def exact24_asmoperand : AsmOperandClass {
3463   let Name = "Exact24";
3464   let PredicateMethod = "isExactImm<24>";
3465   let RenderMethod = "addImmOperands";
3466 }
3467 def uimm_exact24 : Operand<i32>, ImmLeaf<i32, [{return Imm == 24;}]> {
3468   let ParserMatchClass = exact24_asmoperand;
3469 }
3470
3471 def exact32_asmoperand : AsmOperandClass {
3472   let Name = "Exact32";
3473   let PredicateMethod = "isExactImm<32>";
3474   let RenderMethod = "addImmOperands";
3475 }
3476 def uimm_exact32 : Operand<i32>, ImmLeaf<i32, [{return Imm == 32;}]> {
3477   let ParserMatchClass = exact32_asmoperand;
3478 }
3479
3480 def exact48_asmoperand : AsmOperandClass {
3481   let Name = "Exact48";
3482   let PredicateMethod = "isExactImm<48>";
3483   let RenderMethod = "addImmOperands";
3484 }
3485 def uimm_exact48 : Operand<i32>, ImmLeaf<i32, [{return Imm == 48;}]> {
3486   let ParserMatchClass = exact48_asmoperand;
3487 }
3488
3489 def exact64_asmoperand : AsmOperandClass {
3490   let Name = "Exact64";
3491   let PredicateMethod = "isExactImm<64>";
3492   let RenderMethod = "addImmOperands";
3493 }
3494 def uimm_exact64 : Operand<i32>, ImmLeaf<i32, [{return Imm == 64;}]> {
3495   let ParserMatchClass = exact64_asmoperand;
3496 }
3497
3498 multiclass NeonI_LDWB_VList<bit q, bits<4> opcode, bits<2> size,
3499                            RegisterOperand VecList, Operand ImmTy,
3500                            string asmop> {
3501   let Constraints = "$Rn = $wb", mayLoad = 1, neverHasSideEffects = 1, 
3502       DecoderMethod = "DecodeVLDSTPostInstruction" in {
3503     def _fixed : NeonI_LdStMult_Post<q, 1, opcode, size,
3504                      (outs VecList:$Rt, GPR64xsp:$wb),
3505                      (ins GPR64xsp:$Rn, ImmTy:$amt), 
3506                      asmop # "\t$Rt, [$Rn], $amt",
3507                      [],
3508                      NoItinerary> {
3509       let Rm = 0b11111;
3510     }
3511
3512     def _register : NeonI_LdStMult_Post<q, 1, opcode, size,
3513                         (outs VecList:$Rt, GPR64xsp:$wb),
3514                         (ins GPR64xsp:$Rn, GPR64noxzr:$Rm), 
3515                         asmop # "\t$Rt, [$Rn], $Rm",
3516                         [],
3517                         NoItinerary>;
3518   }
3519 }
3520
3521 multiclass LDWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
3522     Operand ImmTy2, string asmop> {
3523   defm _8B : NeonI_LDWB_VList<0, opcode, 0b00,
3524                               !cast<RegisterOperand>(List # "8B_operand"),
3525                               ImmTy, asmop>;
3526
3527   defm _4H : NeonI_LDWB_VList<0, opcode, 0b01,
3528                               !cast<RegisterOperand>(List # "4H_operand"),
3529                               ImmTy, asmop>;
3530
3531   defm _2S : NeonI_LDWB_VList<0, opcode, 0b10,
3532                               !cast<RegisterOperand>(List # "2S_operand"),
3533                               ImmTy, asmop>;
3534
3535   defm _16B : NeonI_LDWB_VList<1, opcode, 0b00,
3536                                !cast<RegisterOperand>(List # "16B_operand"),
3537                                ImmTy2, asmop>;
3538
3539   defm _8H : NeonI_LDWB_VList<1, opcode, 0b01,
3540                               !cast<RegisterOperand>(List # "8H_operand"),
3541                               ImmTy2, asmop>;
3542
3543   defm _4S : NeonI_LDWB_VList<1, opcode, 0b10,
3544                               !cast<RegisterOperand>(List # "4S_operand"),
3545                               ImmTy2, asmop>;
3546
3547   defm _2D : NeonI_LDWB_VList<1, opcode, 0b11,
3548                               !cast<RegisterOperand>(List # "2D_operand"),
3549                               ImmTy2, asmop>;
3550 }
3551
3552 // Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
3553 defm LD1WB : LDWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "ld1">;
3554 defm LD1WB_1D : NeonI_LDWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
3555                                  "ld1">;
3556
3557 defm LD2WB : LDWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "ld2">;
3558
3559 defm LD3WB : LDWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
3560                              "ld3">;
3561
3562 defm LD4WB : LDWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "ld4">;
3563
3564 // Post-index load multiple 1-element structures from N consecutive registers
3565 // (N = 2,3,4)
3566 defm LD1x2WB : LDWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
3567                                "ld1">;
3568 defm LD1x2WB_1D : NeonI_LDWB_VList<0, 0b1010, 0b11, VPair1D_operand,
3569                                    uimm_exact16, "ld1">;
3570
3571 defm LD1x3WB : LDWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
3572                                "ld1">;
3573 defm LD1x3WB_1D : NeonI_LDWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
3574                                    uimm_exact24, "ld1">;
3575
3576 defm LD1x4WB : LDWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
3577                                 "ld1">;
3578 defm LD1x4WB_1D : NeonI_LDWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
3579                                    uimm_exact32, "ld1">;
3580
3581 multiclass NeonI_STWB_VList<bit q, bits<4> opcode, bits<2> size,
3582                             RegisterOperand VecList, Operand ImmTy,
3583                             string asmop> {
3584   let Constraints = "$Rn = $wb", mayStore = 1, neverHasSideEffects = 1,
3585       DecoderMethod = "DecodeVLDSTPostInstruction" in {
3586     def _fixed : NeonI_LdStMult_Post<q, 0, opcode, size,
3587                      (outs GPR64xsp:$wb),
3588                      (ins GPR64xsp:$Rn, ImmTy:$amt, VecList:$Rt),
3589                      asmop # "\t$Rt, [$Rn], $amt",
3590                      [],
3591                      NoItinerary> {
3592       let Rm = 0b11111;
3593     }
3594
3595     def _register : NeonI_LdStMult_Post<q, 0, opcode, size,
3596                       (outs GPR64xsp:$wb),
3597                       (ins GPR64xsp:$Rn, GPR64noxzr:$Rm, VecList:$Rt), 
3598                       asmop # "\t$Rt, [$Rn], $Rm",
3599                       [],
3600                       NoItinerary>;
3601   }
3602 }
3603
3604 multiclass STWB_VList_BHSD<bits<4> opcode, string List, Operand ImmTy,
3605                            Operand ImmTy2, string asmop> {
3606   defm _8B : NeonI_STWB_VList<0, opcode, 0b00,
3607                  !cast<RegisterOperand>(List # "8B_operand"), ImmTy, asmop>;
3608
3609   defm _4H : NeonI_STWB_VList<0, opcode, 0b01,
3610                               !cast<RegisterOperand>(List # "4H_operand"),
3611                               ImmTy, asmop>;
3612
3613   defm _2S : NeonI_STWB_VList<0, opcode, 0b10,
3614                               !cast<RegisterOperand>(List # "2S_operand"),
3615                               ImmTy, asmop>;
3616
3617   defm _16B : NeonI_STWB_VList<1, opcode, 0b00,
3618                                !cast<RegisterOperand>(List # "16B_operand"),
3619                                ImmTy2, asmop>;
3620
3621   defm _8H : NeonI_STWB_VList<1, opcode, 0b01,
3622                               !cast<RegisterOperand>(List # "8H_operand"),
3623                               ImmTy2, asmop>;
3624
3625   defm _4S : NeonI_STWB_VList<1, opcode, 0b10,
3626                               !cast<RegisterOperand>(List # "4S_operand"),
3627                               ImmTy2, asmop>;
3628
3629   defm _2D : NeonI_STWB_VList<1, opcode, 0b11,
3630                               !cast<RegisterOperand>(List # "2D_operand"),
3631                               ImmTy2, asmop>;
3632 }
3633
3634 // Post-index load multiple N-element structures from N registers (N = 1,2,3,4)
3635 defm ST1WB : STWB_VList_BHSD<0b0111, "VOne", uimm_exact8, uimm_exact16, "st1">;
3636 defm ST1WB_1D : NeonI_STWB_VList<0, 0b0111, 0b11, VOne1D_operand, uimm_exact8,
3637                                  "st1">;
3638
3639 defm ST2WB : STWB_VList_BHSD<0b1000, "VPair", uimm_exact16, uimm_exact32, "st2">;
3640
3641 defm ST3WB : STWB_VList_BHSD<0b0100, "VTriple", uimm_exact24, uimm_exact48,
3642                              "st3">;
3643
3644 defm ST4WB : STWB_VList_BHSD<0b0000, "VQuad", uimm_exact32, uimm_exact64, "st4">;
3645
3646 // Post-index load multiple 1-element structures from N consecutive registers
3647 // (N = 2,3,4)
3648 defm ST1x2WB : STWB_VList_BHSD<0b1010, "VPair", uimm_exact16, uimm_exact32,
3649                                "st1">;
3650 defm ST1x2WB_1D : NeonI_STWB_VList<0, 0b1010, 0b11, VPair1D_operand,
3651                                    uimm_exact16, "st1">;
3652
3653 defm ST1x3WB : STWB_VList_BHSD<0b0110, "VTriple", uimm_exact24, uimm_exact48,
3654                                "st1">;
3655 defm ST1x3WB_1D : NeonI_STWB_VList<0, 0b0110, 0b11, VTriple1D_operand,
3656                                    uimm_exact24, "st1">;
3657
3658 defm ST1x4WB : STWB_VList_BHSD<0b0010, "VQuad", uimm_exact32, uimm_exact64,
3659                                "st1">;
3660 defm ST1x4WB_1D : NeonI_STWB_VList<0, 0b0010, 0b11, VQuad1D_operand,
3661                                    uimm_exact32, "st1">;
3662
3663 // End of post-index vector load/store multiple N-element structure
3664 // (class SIMD lselem-post)
3665
3666
3667 // Neon Scalar instructions implementation
3668 // Scalar Three Same
3669
3670 class NeonI_Scalar3Same_size<bit u, bits<2> size, bits<5> opcode, string asmop,
3671                              RegisterClass FPRC>
3672   : NeonI_Scalar3Same<u, size, opcode,
3673                       (outs FPRC:$Rd), (ins FPRC:$Rn, FPRC:$Rm),
3674                       !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
3675                       [],
3676                       NoItinerary>;
3677
3678 class NeonI_Scalar3Same_D_size<bit u, bits<5> opcode, string asmop>
3679   : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
3680
3681 multiclass NeonI_Scalar3Same_HS_sizes<bit u, bits<5> opcode, string asmop,
3682                                       bit Commutable = 0> {
3683   let isCommutable = Commutable in {
3684     def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
3685     def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
3686   }
3687 }
3688
3689 multiclass NeonI_Scalar3Same_SD_sizes<bit u, bit size_high, bits<5> opcode,
3690                                       string asmop, bit Commutable = 0> {
3691   let isCommutable = Commutable in {
3692     def sss : NeonI_Scalar3Same_size<u, {size_high, 0b0}, opcode, asmop, FPR32>;
3693     def ddd : NeonI_Scalar3Same_size<u, {size_high, 0b1}, opcode, asmop, FPR64>;
3694   }
3695 }
3696
3697 multiclass NeonI_Scalar3Same_BHSD_sizes<bit u, bits<5> opcode,
3698                                         string asmop, bit Commutable = 0> {
3699   let isCommutable = Commutable in {
3700     def bbb : NeonI_Scalar3Same_size<u, 0b00, opcode, asmop, FPR8>;
3701     def hhh : NeonI_Scalar3Same_size<u, 0b01, opcode, asmop, FPR16>;
3702     def sss : NeonI_Scalar3Same_size<u, 0b10, opcode, asmop, FPR32>;
3703     def ddd : NeonI_Scalar3Same_size<u, 0b11, opcode, asmop, FPR64>;
3704   }
3705 }
3706
3707 multiclass Neon_Scalar3Same_D_size_patterns<SDPatternOperator opnode,
3708                                             Instruction INSTD> {
3709   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
3710             (INSTD FPR64:$Rn, FPR64:$Rm)>;        
3711 }
3712
3713 multiclass Neon_Scalar3Same_BHSD_size_patterns<SDPatternOperator opnode,
3714                                                Instruction INSTB,
3715                                                Instruction INSTH,
3716                                                Instruction INSTS,
3717                                                Instruction INSTD>
3718   : Neon_Scalar3Same_D_size_patterns<opnode, INSTD> {
3719   def: Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (v1i8 FPR8:$Rm))),
3720            (INSTB FPR8:$Rn, FPR8:$Rm)>;
3721
3722   def: Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
3723            (INSTH FPR16:$Rn, FPR16:$Rm)>;
3724
3725   def: Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
3726            (INSTS FPR32:$Rn, FPR32:$Rm)>;
3727 }
3728
3729 class Neon_Scalar3Same_cmp_D_size_patterns<SDPatternOperator opnode,
3730                                            Instruction INSTD>
3731   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm))),
3732         (INSTD FPR64:$Rn, FPR64:$Rm)>;
3733
3734 multiclass Neon_Scalar3Same_HS_size_patterns<SDPatternOperator opnode,
3735                                              Instruction INSTH,
3736                                              Instruction INSTS> {
3737   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
3738             (INSTH FPR16:$Rn, FPR16:$Rm)>;
3739   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
3740             (INSTS FPR32:$Rn, FPR32:$Rm)>;
3741 }
3742
3743 multiclass Neon_Scalar3Same_SD_size_patterns<SDPatternOperator opnode,
3744                                              Instruction INSTS,
3745                                              Instruction INSTD> {
3746   def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn), (v1f32 FPR32:$Rm))),
3747             (INSTS FPR32:$Rn, FPR32:$Rm)>;
3748   def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3749             (INSTD FPR64:$Rn, FPR64:$Rm)>;
3750 }
3751
3752 multiclass Neon_Scalar3Same_cmp_SD_size_patterns<SDPatternOperator opnode,
3753                                                  Instruction INSTS,
3754                                                  Instruction INSTD> {
3755   def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn), (v1f32 FPR32:$Rm))),
3756             (INSTS FPR32:$Rn, FPR32:$Rm)>;
3757   def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3758             (INSTD FPR64:$Rn, FPR64:$Rm)>;
3759 }
3760
3761 // Scalar Three Different
3762
3763 class NeonI_Scalar3Diff_size<bit u, bits<2> size, bits<4> opcode, string asmop,
3764                              RegisterClass FPRCD, RegisterClass FPRCS>
3765   : NeonI_Scalar3Diff<u, size, opcode,
3766                       (outs FPRCD:$Rd), (ins FPRCS:$Rn, FPRCS:$Rm),
3767                       !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
3768                       [],
3769                       NoItinerary>;
3770
3771 multiclass NeonI_Scalar3Diff_HS_size<bit u, bits<4> opcode, string asmop> {
3772   def shh : NeonI_Scalar3Diff_size<u, 0b01, opcode, asmop, FPR32, FPR16>;
3773   def dss : NeonI_Scalar3Diff_size<u, 0b10, opcode, asmop, FPR64, FPR32>;
3774 }
3775
3776 multiclass NeonI_Scalar3Diff_ml_HS_size<bit u, bits<4> opcode, string asmop> {
3777   let Constraints = "$Src = $Rd" in {
3778     def shh : NeonI_Scalar3Diff<u, 0b01, opcode,
3779                        (outs FPR32:$Rd), (ins FPR32:$Src, FPR16:$Rn, FPR16:$Rm),
3780                        !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
3781                        [],
3782                        NoItinerary>;
3783     def dss : NeonI_Scalar3Diff<u, 0b10, opcode,
3784                        (outs FPR64:$Rd), (ins FPR64:$Src, FPR32:$Rn, FPR32:$Rm),
3785                        !strconcat(asmop, "\t$Rd, $Rn, $Rm"),
3786                        [],
3787                        NoItinerary>;
3788   }
3789 }
3790
3791 multiclass Neon_Scalar3Diff_HS_size_patterns<SDPatternOperator opnode,
3792                                              Instruction INSTH,
3793                                              Instruction INSTS> {
3794   def : Pat<(v1i32 (opnode (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
3795             (INSTH FPR16:$Rn, FPR16:$Rm)>;
3796   def : Pat<(v1i64 (opnode (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
3797             (INSTS FPR32:$Rn, FPR32:$Rm)>;
3798 }
3799
3800 multiclass Neon_Scalar3Diff_ml_HS_size_patterns<SDPatternOperator opnode,
3801                                              Instruction INSTH,
3802                                              Instruction INSTS> {
3803   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i16 FPR16:$Rn), (v1i16 FPR16:$Rm))),
3804             (INSTH FPR32:$Src, FPR16:$Rn, FPR16:$Rm)>;
3805   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i32 FPR32:$Rn), (v1i32 FPR32:$Rm))),
3806             (INSTS FPR64:$Src, FPR32:$Rn, FPR32:$Rm)>;
3807 }
3808
3809 // Scalar Two Registers Miscellaneous
3810
3811 class NeonI_Scalar2SameMisc_size<bit u, bits<2> size, bits<5> opcode, string asmop,
3812                              RegisterClass FPRCD, RegisterClass FPRCS>
3813   : NeonI_Scalar2SameMisc<u, size, opcode,
3814                           (outs FPRCD:$Rd), (ins FPRCS:$Rn),
3815                           !strconcat(asmop, "\t$Rd, $Rn"),
3816                           [],
3817                           NoItinerary>;
3818
3819 multiclass NeonI_Scalar2SameMisc_SD_size<bit u, bit size_high, bits<5> opcode,
3820                                          string asmop> {
3821   def ss : NeonI_Scalar2SameMisc_size<u, {size_high, 0b0}, opcode, asmop, FPR32,
3822                                       FPR32>;
3823   def dd : NeonI_Scalar2SameMisc_size<u, {size_high, 0b1}, opcode, asmop, FPR64,
3824                                       FPR64>;
3825 }
3826
3827 multiclass NeonI_Scalar2SameMisc_D_size<bit u, bits<5> opcode, string asmop> {
3828   def dd: NeonI_Scalar2SameMisc_size<u, 0b11, opcode, asmop, FPR64, FPR64>;
3829 }
3830
3831 multiclass NeonI_Scalar2SameMisc_BHSD_size<bit u, bits<5> opcode, string asmop>
3832   : NeonI_Scalar2SameMisc_D_size<u, opcode, asmop> {
3833   def bb : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR8>;
3834   def hh : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR16>;
3835   def ss : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR32>;
3836 }
3837
3838 multiclass NeonI_Scalar2SameMisc_narrow_HSD_size<bit u, bits<5> opcode,
3839                                                  string asmop> {
3840   def bh : NeonI_Scalar2SameMisc_size<u, 0b00, opcode, asmop, FPR8, FPR16>;
3841   def hs : NeonI_Scalar2SameMisc_size<u, 0b01, opcode, asmop, FPR16, FPR32>;
3842   def sd : NeonI_Scalar2SameMisc_size<u, 0b10, opcode, asmop, FPR32, FPR64>;
3843 }
3844
3845 class NeonI_Scalar2SameMisc_accum_size<bit u, bits<2> size, bits<5> opcode,
3846                                        string asmop, RegisterClass FPRC>
3847   : NeonI_Scalar2SameMisc<u, size, opcode,
3848                           (outs FPRC:$Rd), (ins FPRC:$Src, FPRC:$Rn),
3849                           !strconcat(asmop, "\t$Rd, $Rn"),
3850                           [],
3851                           NoItinerary>;
3852
3853 multiclass NeonI_Scalar2SameMisc_accum_BHSD_size<bit u, bits<5> opcode,
3854                                                  string asmop> {
3855
3856   let Constraints = "$Src = $Rd" in {
3857     def bb : NeonI_Scalar2SameMisc_accum_size<u, 0b00, opcode, asmop, FPR8>;
3858     def hh : NeonI_Scalar2SameMisc_accum_size<u, 0b01, opcode, asmop, FPR16>;
3859     def ss : NeonI_Scalar2SameMisc_accum_size<u, 0b10, opcode, asmop, FPR32>;
3860     def dd : NeonI_Scalar2SameMisc_accum_size<u, 0b11, opcode, asmop, FPR64>;
3861   }
3862 }
3863
3864 multiclass Neon_Scalar2SameMisc_cvt_SD_size_patterns<SDPatternOperator Sopnode,
3865                                                      SDPatternOperator Dopnode,
3866                                                      Instruction INSTS,
3867                                                      Instruction INSTD> {
3868   def : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn))),
3869             (INSTS FPR32:$Rn)>;
3870   def : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn))),
3871             (INSTD FPR64:$Rn)>;
3872 }
3873
3874 multiclass Neon_Scalar2SameMisc_SD_size_patterns<SDPatternOperator opnode,
3875                                                  Instruction INSTS,
3876                                                  Instruction INSTD> {
3877   def : Pat<(v1f32 (opnode (v1f32 FPR32:$Rn))),
3878             (INSTS FPR32:$Rn)>;
3879   def : Pat<(v1f64 (opnode (v1f64 FPR64:$Rn))),
3880             (INSTD FPR64:$Rn)>;
3881 }
3882
3883 class NeonI_Scalar2SameMisc_cmpz_D_size<bit u, bits<5> opcode, string asmop>
3884   : NeonI_Scalar2SameMisc<u, 0b11, opcode,
3885                           (outs FPR64:$Rd), (ins FPR64:$Rn, neon_uimm0:$Imm),
3886                           !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
3887                           [],
3888                           NoItinerary>;
3889
3890 multiclass NeonI_Scalar2SameMisc_cmpz_SD_size<bit u, bits<5> opcode,
3891                                               string asmop> {
3892   def ssi : NeonI_Scalar2SameMisc<u, 0b10, opcode,
3893                            (outs FPR32:$Rd), (ins FPR32:$Rn, fpz32:$FPImm),
3894                            !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
3895                            [],
3896                            NoItinerary>;
3897   def ddi : NeonI_Scalar2SameMisc<u, 0b11, opcode,
3898                            (outs FPR64:$Rd), (ins FPR64:$Rn, fpz64movi:$FPImm),
3899                            !strconcat(asmop, "\t$Rd, $Rn, $FPImm"),
3900                            [],
3901                            NoItinerary>;
3902 }
3903
3904 class Neon_Scalar2SameMisc_cmpz_D_size_patterns<SDPatternOperator opnode,
3905                                                 Instruction INSTD>
3906   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn),
3907                        (v1i64 (bitconvert (v8i8 Neon_AllZero))))),
3908         (INSTD FPR64:$Rn, 0)>;
3909
3910 multiclass Neon_Scalar2SameMisc_cmpz_SD_size_patterns<SDPatternOperator opnode,
3911                                                       Instruction INSTS,
3912                                                       Instruction INSTD> {
3913   def : Pat<(v1i32 (opnode (v1f32 FPR32:$Rn),
3914                            (v1f32 (scalar_to_vector (f32 fpimm:$FPImm))))),
3915             (INSTS FPR32:$Rn, fpimm:$FPImm)>;
3916   def : Pat<(v1i64 (opnode (v1f64 FPR64:$Rn),
3917                            (v1f64 (bitconvert (v8i8 Neon_AllZero))))),
3918             (INSTD FPR64:$Rn, 0)>;
3919 }
3920
3921 multiclass Neon_Scalar2SameMisc_D_size_patterns<SDPatternOperator opnode,
3922                                                 Instruction INSTD> {
3923   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn))),
3924             (INSTD FPR64:$Rn)>;
3925 }
3926
3927 multiclass Neon_Scalar2SameMisc_BHSD_size_patterns<SDPatternOperator opnode,
3928                                                    Instruction INSTB,
3929                                                    Instruction INSTH,
3930                                                    Instruction INSTS,
3931                                                    Instruction INSTD>
3932   : Neon_Scalar2SameMisc_D_size_patterns<opnode, INSTD> {
3933   def : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn))),
3934             (INSTB FPR8:$Rn)>;
3935   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn))),
3936             (INSTH FPR16:$Rn)>;
3937   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn))),
3938             (INSTS FPR32:$Rn)>;
3939 }
3940
3941 multiclass Neon_Scalar2SameMisc_narrow_HSD_size_patterns<
3942                                                        SDPatternOperator opnode,
3943                                                        Instruction INSTH,
3944                                                        Instruction INSTS,
3945                                                        Instruction INSTD> {
3946   def : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn))),
3947             (INSTH FPR16:$Rn)>;
3948   def : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn))),
3949             (INSTS FPR32:$Rn)>;
3950   def : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn))),
3951             (INSTD FPR64:$Rn)>;
3952
3953 }
3954
3955 multiclass Neon_Scalar2SameMisc_accum_BHSD_size_patterns<
3956                                                        SDPatternOperator opnode,
3957                                                        Instruction INSTB,
3958                                                        Instruction INSTH,
3959                                                        Instruction INSTS,
3960                                                        Instruction INSTD> {
3961   def : Pat<(v1i8 (opnode (v1i8 FPR8:$Src), (v1i8 FPR8:$Rn))),
3962             (INSTB FPR8:$Src, FPR8:$Rn)>;
3963   def : Pat<(v1i16 (opnode (v1i16 FPR16:$Src), (v1i16 FPR16:$Rn))),
3964             (INSTH FPR16:$Src, FPR16:$Rn)>;
3965   def : Pat<(v1i32 (opnode (v1i32 FPR32:$Src), (v1i32 FPR32:$Rn))),
3966             (INSTS FPR32:$Src, FPR32:$Rn)>;
3967   def : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn))),
3968             (INSTD FPR64:$Src, FPR64:$Rn)>;
3969 }
3970
3971 // Scalar Shift By Immediate
3972
3973 class NeonI_ScalarShiftImm_size<bit u, bits<5> opcode, string asmop,
3974                                 RegisterClass FPRC, Operand ImmTy>
3975   : NeonI_ScalarShiftImm<u, opcode,
3976                          (outs FPRC:$Rd), (ins FPRC:$Rn, ImmTy:$Imm),
3977                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
3978                          [], NoItinerary>;
3979
3980 multiclass NeonI_ScalarShiftRightImm_D_size<bit u, bits<5> opcode,
3981                                             string asmop> {
3982   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
3983     bits<6> Imm;
3984     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
3985     let Inst{21-16} = Imm;
3986   }
3987 }
3988
3989 multiclass NeonI_ScalarShiftRightImm_BHSD_size<bit u, bits<5> opcode,
3990                                                string asmop>
3991   : NeonI_ScalarShiftRightImm_D_size<u, opcode, asmop> {
3992   def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shr_imm8> {
3993     bits<3> Imm;
3994     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
3995     let Inst{18-16} = Imm;
3996   }
3997   def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shr_imm16> {
3998     bits<4> Imm;
3999     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4000     let Inst{19-16} = Imm;
4001   }
4002   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
4003     bits<5> Imm;
4004     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4005     let Inst{20-16} = Imm;
4006   }
4007 }
4008
4009 multiclass NeonI_ScalarShiftLeftImm_D_size<bit u, bits<5> opcode,
4010                                             string asmop> {
4011   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shl_imm64> {
4012     bits<6> Imm;
4013     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4014     let Inst{21-16} = Imm;
4015   }
4016 }
4017
4018 multiclass NeonI_ScalarShiftLeftImm_BHSD_size<bit u, bits<5> opcode,
4019                                               string asmop>
4020   : NeonI_ScalarShiftLeftImm_D_size<u, opcode, asmop> {
4021   def bbi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR8, shl_imm8> {
4022     bits<3> Imm;
4023     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
4024     let Inst{18-16} = Imm;
4025   }
4026   def hhi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR16, shl_imm16> {
4027     bits<4> Imm;
4028     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4029     let Inst{19-16} = Imm;
4030   }
4031   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shl_imm32> {
4032     bits<5> Imm;
4033     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4034     let Inst{20-16} = Imm;
4035   }
4036 }
4037
4038 class NeonI_ScalarShiftRightImm_accum_D_size<bit u, bits<5> opcode, string asmop>
4039   : NeonI_ScalarShiftImm<u, opcode,
4040                          (outs FPR64:$Rd), (ins FPR64:$Src, FPR64:$Rn, shr_imm64:$Imm),
4041                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4042                          [], NoItinerary> {
4043     bits<6> Imm;
4044     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4045     let Inst{21-16} = Imm;
4046     let Constraints = "$Src = $Rd";
4047 }
4048
4049 class NeonI_ScalarShiftLeftImm_accum_D_size<bit u, bits<5> opcode, string asmop>
4050   : NeonI_ScalarShiftImm<u, opcode,
4051                          (outs FPR64:$Rd), (ins FPR64:$Src, FPR64:$Rn, shl_imm64:$Imm),
4052                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4053                          [], NoItinerary> {
4054     bits<6> Imm;
4055     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4056     let Inst{21-16} = Imm;
4057     let Constraints = "$Src = $Rd";
4058 }
4059
4060 class NeonI_ScalarShiftImm_narrow_size<bit u, bits<5> opcode, string asmop,
4061                                        RegisterClass FPRCD, RegisterClass FPRCS,
4062                                        Operand ImmTy>
4063   : NeonI_ScalarShiftImm<u, opcode,
4064                          (outs FPRCD:$Rd), (ins FPRCS:$Rn, ImmTy:$Imm),
4065                          !strconcat(asmop, "\t$Rd, $Rn, $Imm"),
4066                          [], NoItinerary>;
4067
4068 multiclass NeonI_ScalarShiftImm_narrow_HSD_size<bit u, bits<5> opcode,
4069                                                 string asmop> {
4070   def bhi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR8, FPR16,
4071                                              shr_imm8> {
4072     bits<3> Imm;
4073     let Inst{22-19} = 0b0001; // immh:immb = 0001xxx
4074     let Inst{18-16} = Imm;
4075   }
4076   def hsi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR16, FPR32,
4077                                              shr_imm16> {
4078     bits<4> Imm;
4079     let Inst{22-20} = 0b001; // immh:immb = 001xxxx
4080     let Inst{19-16} = Imm;
4081   }
4082   def sdi : NeonI_ScalarShiftImm_narrow_size<u, opcode, asmop, FPR32, FPR64,
4083                                              shr_imm32> {
4084     bits<5> Imm;
4085     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4086     let Inst{20-16} = Imm;
4087   }
4088 }
4089
4090 multiclass NeonI_ScalarShiftImm_cvt_SD_size<bit u, bits<5> opcode, string asmop> {
4091   def ssi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR32, shr_imm32> {
4092     bits<5> Imm;
4093     let Inst{22-21} = 0b01; // immh:immb = 01xxxxx
4094     let Inst{20-16} = Imm;
4095   }
4096   def ddi : NeonI_ScalarShiftImm_size<u, opcode, asmop, FPR64, shr_imm64> {
4097     bits<6> Imm;
4098     let Inst{22} = 0b1; // immh:immb = 1xxxxxx
4099     let Inst{21-16} = Imm;
4100   }
4101 }
4102
4103 multiclass Neon_ScalarShiftImm_D_size_patterns<SDPatternOperator opnode,
4104                                                Instruction INSTD> {
4105   def ddi : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (i32 imm:$Imm))),
4106                 (INSTD FPR64:$Rn, imm:$Imm)>;
4107 }
4108
4109 class Neon_ScalarShiftImm_arm_D_size_patterns<SDPatternOperator opnode,
4110                                               Instruction INSTD>
4111   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 (Neon_vdup (i32 imm:$Imm))))),
4112         (INSTD FPR64:$Rn, imm:$Imm)>;
4113
4114 multiclass Neon_ScalarShiftImm_BHSD_size_patterns<SDPatternOperator opnode,
4115                                                   Instruction INSTB,
4116                                                   Instruction INSTH,
4117                                                   Instruction INSTS,
4118                                                   Instruction INSTD>
4119   : Neon_ScalarShiftImm_D_size_patterns<opnode, INSTD> {
4120   def bbi : Pat<(v1i8 (opnode (v1i8 FPR8:$Rn), (i32 imm:$Imm))),
4121                 (INSTB FPR8:$Rn, imm:$Imm)>;
4122   def hhi : Pat<(v1i16 (opnode (v1i16 FPR16:$Rn), (i32 imm:$Imm))),
4123                 (INSTH FPR16:$Rn, imm:$Imm)>;
4124   def ssi : Pat<(v1i32 (opnode (v1i32 FPR32:$Rn), (i32 imm:$Imm))),
4125                 (INSTS FPR32:$Rn, imm:$Imm)>;
4126 }
4127
4128 class Neon_ScalarShiftImm_accum_D_size_patterns<SDPatternOperator opnode,
4129                                                 Instruction INSTD>
4130   : Pat<(v1i64 (opnode (v1i64 FPR64:$Src), (v1i64 FPR64:$Rn), (i32 imm:$Imm))),
4131         (INSTD FPR64:$Src, FPR64:$Rn, imm:$Imm)>;
4132
4133 multiclass Neon_ScalarShiftImm_narrow_HSD_size_patterns<
4134                                                        SDPatternOperator opnode,
4135                                                        Instruction INSTH,
4136                                                        Instruction INSTS,
4137                                                        Instruction INSTD> {
4138   def bhi : Pat<(v1i8 (opnode (v1i16 FPR16:$Rn), (i32 imm:$Imm))),
4139                 (INSTH FPR16:$Rn, imm:$Imm)>;
4140   def hsi : Pat<(v1i16 (opnode (v1i32 FPR32:$Rn), (i32 imm:$Imm))),
4141                 (INSTS FPR32:$Rn, imm:$Imm)>;
4142   def sdi : Pat<(v1i32 (opnode (v1i64 FPR64:$Rn), (i32 imm:$Imm))),
4143                 (INSTD FPR64:$Rn, imm:$Imm)>;
4144 }
4145
4146 multiclass Neon_ScalarShiftImm_scvtf_SD_size_patterns<SDPatternOperator Sopnode,
4147                                                       SDPatternOperator Dopnode,
4148                                                       Instruction INSTS,
4149                                                       Instruction INSTD> {
4150   def ssi : Pat<(f32 (Sopnode (v1i32 FPR32:$Rn), (i32 imm:$Imm))),
4151                 (INSTS FPR32:$Rn, imm:$Imm)>;
4152   def ddi : Pat<(f64 (Dopnode (v1i64 FPR64:$Rn), (i32 imm:$Imm))),
4153                 (INSTD FPR64:$Rn, imm:$Imm)>;
4154 }
4155
4156 multiclass Neon_ScalarShiftImm_fcvts_SD_size_patterns<SDPatternOperator Sopnode,
4157                                                       SDPatternOperator Dopnode,
4158                                                       Instruction INSTS,
4159                                                       Instruction INSTD> {
4160   def ssi : Pat<(v1i32 (Sopnode (v1f32 FPR32:$Rn), (i32 imm:$Imm))),
4161                 (INSTS FPR32:$Rn, imm:$Imm)>;
4162   def ddi : Pat<(v1i64 (Dopnode (v1f64 FPR64:$Rn), (i32 imm:$Imm))),
4163                 (INSTD FPR64:$Rn, imm:$Imm)>;
4164 }
4165
4166 // Scalar Signed Shift Right (Immediate)
4167 defm SSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00000, "sshr">;
4168 defm : Neon_ScalarShiftImm_D_size_patterns<int_aarch64_neon_vshrds_n, SSHRddi>;
4169 // Pattern to match llvm.arm.* intrinsic.
4170 def : Neon_ScalarShiftImm_arm_D_size_patterns<sra, SSHRddi>;
4171
4172 // Scalar Unsigned Shift Right (Immediate)
4173 defm USHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00000, "ushr">;
4174 defm : Neon_ScalarShiftImm_D_size_patterns<int_aarch64_neon_vshrdu_n, USHRddi>;
4175 // Pattern to match llvm.arm.* intrinsic.
4176 def : Neon_ScalarShiftImm_arm_D_size_patterns<srl, USHRddi>;
4177
4178 // Scalar Signed Rounding Shift Right (Immediate)
4179 defm SRSHR : NeonI_ScalarShiftRightImm_D_size<0b0, 0b00100, "srshr">;
4180 defm : Neon_ScalarShiftImm_D_size_patterns<int_aarch64_neon_vsrshr, SRSHRddi>;
4181
4182 // Scalar Unigned Rounding Shift Right (Immediate)
4183 defm URSHR : NeonI_ScalarShiftRightImm_D_size<0b1, 0b00100, "urshr">;
4184 defm : Neon_ScalarShiftImm_D_size_patterns<int_aarch64_neon_vurshr, URSHRddi>;
4185
4186 // Scalar Signed Shift Right and Accumulate (Immediate)
4187 def SSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00010, "ssra">;
4188 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vsrads_n, SSRA>;
4189
4190 // Scalar Unsigned Shift Right and Accumulate (Immediate)
4191 def USRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00010, "usra">;
4192 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vsradu_n, USRA>;
4193
4194 // Scalar Signed Rounding Shift Right and Accumulate (Immediate)
4195 def SRSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b0, 0b00110, "srsra">;
4196 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vrsrads_n, SRSRA>;
4197
4198 // Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
4199 def URSRA : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b00110, "ursra">;
4200 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vrsradu_n, URSRA>;
4201
4202 // Scalar Shift Left (Immediate)
4203 defm SHL : NeonI_ScalarShiftLeftImm_D_size<0b0, 0b01010, "shl">;
4204 defm : Neon_ScalarShiftImm_D_size_patterns<int_aarch64_neon_vshld_n, SHLddi>;
4205 // Pattern to match llvm.arm.* intrinsic.
4206 def : Neon_ScalarShiftImm_arm_D_size_patterns<shl, SHLddi>;
4207
4208 // Signed Saturating Shift Left (Immediate)
4209 defm SQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b0, 0b01110, "sqshl">;
4210 defm : Neon_ScalarShiftImm_BHSD_size_patterns<int_aarch64_neon_vqshls_n,
4211                                               SQSHLbbi, SQSHLhhi,
4212                                               SQSHLssi, SQSHLddi>;
4213 // Pattern to match llvm.arm.* intrinsic.
4214 defm : Neon_ScalarShiftImm_D_size_patterns<Neon_sqrshlImm, SQSHLddi>;
4215
4216 // Unsigned Saturating Shift Left (Immediate)
4217 defm UQSHL : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01110, "uqshl">;
4218 defm : Neon_ScalarShiftImm_BHSD_size_patterns<int_aarch64_neon_vqshlu_n,
4219                                               UQSHLbbi, UQSHLhhi,
4220                                               UQSHLssi, UQSHLddi>;
4221 // Pattern to match llvm.arm.* intrinsic.
4222 defm : Neon_ScalarShiftImm_D_size_patterns<Neon_uqrshlImm, UQSHLddi>;
4223
4224 // Signed Saturating Shift Left Unsigned (Immediate)
4225 defm SQSHLU : NeonI_ScalarShiftLeftImm_BHSD_size<0b1, 0b01100, "sqshlu">;
4226 defm : Neon_ScalarShiftImm_BHSD_size_patterns<int_aarch64_neon_vsqshlu,
4227                                               SQSHLUbbi, SQSHLUhhi,
4228                                               SQSHLUssi, SQSHLUddi>;
4229
4230 // Shift Right And Insert (Immediate)
4231 def SRI : NeonI_ScalarShiftRightImm_accum_D_size<0b1, 0b01000, "sri">;
4232 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vsri, SRI>;
4233
4234 // Shift Left And Insert (Immediate)
4235 def SLI : NeonI_ScalarShiftLeftImm_accum_D_size<0b1, 0b01010, "sli">;
4236 def : Neon_ScalarShiftImm_accum_D_size_patterns<int_aarch64_neon_vsli, SLI>;
4237
4238 // Signed Saturating Shift Right Narrow (Immediate)
4239 defm SQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10010, "sqshrn">;
4240 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrn,
4241                                                     SQSHRNbhi, SQSHRNhsi,
4242                                                     SQSHRNsdi>;
4243
4244 // Unsigned Saturating Shift Right Narrow (Immediate)
4245 defm UQSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10010, "uqshrn">;
4246 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqshrn,
4247                                                     UQSHRNbhi, UQSHRNhsi,
4248                                                     UQSHRNsdi>;
4249
4250 // Signed Saturating Rounded Shift Right Narrow (Immediate)
4251 defm SQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b0, 0b10011, "sqrshrn">;
4252 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrn,
4253                                                     SQRSHRNbhi, SQRSHRNhsi,
4254                                                     SQRSHRNsdi>;
4255
4256 // Unsigned Saturating Rounded Shift Right Narrow (Immediate)
4257 defm UQRSHRN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10011, "uqrshrn">;
4258 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vuqrshrn,
4259                                                     UQRSHRNbhi, UQRSHRNhsi,
4260                                                     UQRSHRNsdi>;
4261
4262 // Signed Saturating Shift Right Unsigned Narrow (Immediate)
4263 defm SQSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10000, "sqshrun">;
4264 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqshrun,
4265                                                     SQSHRUNbhi, SQSHRUNhsi,
4266                                                     SQSHRUNsdi>;
4267
4268 // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
4269 defm SQRSHRUN : NeonI_ScalarShiftImm_narrow_HSD_size<0b1, 0b10001, "sqrshrun">;
4270 defm : Neon_ScalarShiftImm_narrow_HSD_size_patterns<int_aarch64_neon_vsqrshrun,
4271                                                     SQRSHRUNbhi, SQRSHRUNhsi,
4272                                                     SQRSHRUNsdi>;
4273
4274 // Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
4275 defm SCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11100, "scvtf">;
4276 defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_s32,
4277                                                   int_aarch64_neon_vcvtf64_n_s64,
4278                                                   SCVTF_Nssi, SCVTF_Nddi>;
4279
4280 // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
4281 defm UCVTF_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11100, "ucvtf">;
4282 defm : Neon_ScalarShiftImm_scvtf_SD_size_patterns<int_aarch64_neon_vcvtf32_n_u32,
4283                                                   int_aarch64_neon_vcvtf64_n_u64,
4284                                                   UCVTF_Nssi, UCVTF_Nddi>;
4285
4286 // Scalar Floating-point Convert To Signed Fixed-point (Immediate)
4287 defm FCVTZS_N : NeonI_ScalarShiftImm_cvt_SD_size<0b0, 0b11111, "fcvtzs">;
4288 defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_s32_f32,
4289                                                   int_aarch64_neon_vcvtd_n_s64_f64,
4290                                                   FCVTZS_Nssi, FCVTZS_Nddi>;
4291
4292 // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
4293 defm FCVTZU_N : NeonI_ScalarShiftImm_cvt_SD_size<0b1, 0b11111, "fcvtzu">;
4294 defm : Neon_ScalarShiftImm_fcvts_SD_size_patterns<int_aarch64_neon_vcvts_n_u32_f32,
4295                                                   int_aarch64_neon_vcvtd_n_u64_f64,
4296                                                   FCVTZU_Nssi, FCVTZU_Nddi>;
4297
4298 // Scalar Integer Add
4299 let isCommutable = 1 in {
4300 def ADDddd : NeonI_Scalar3Same_D_size<0b0, 0b10000, "add">;
4301 }
4302
4303 // Scalar Integer Sub
4304 def SUBddd : NeonI_Scalar3Same_D_size<0b1, 0b10000, "sub">;
4305
4306 // Pattern for Scalar Integer Add and Sub with D register only
4307 defm : Neon_Scalar3Same_D_size_patterns<add, ADDddd>;
4308 defm : Neon_Scalar3Same_D_size_patterns<sub, SUBddd>;
4309
4310 // Patterns to match llvm.aarch64.* intrinsic for Scalar Add, Sub
4311 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vaddds, ADDddd>;
4312 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vadddu, ADDddd>;
4313 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubds, SUBddd>;
4314 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vsubdu, SUBddd>;
4315
4316 // Scalar Integer Saturating Add (Signed, Unsigned)
4317 defm SQADD : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00001, "sqadd", 1>;
4318 defm UQADD : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00001, "uqadd", 1>;
4319
4320 // Scalar Integer Saturating Sub (Signed, Unsigned)
4321 defm SQSUB : NeonI_Scalar3Same_BHSD_sizes<0b0, 0b00101, "sqsub", 0>;
4322 defm UQSUB : NeonI_Scalar3Same_BHSD_sizes<0b1, 0b00101, "uqsub", 0>;
4323
4324 // Patterns to match llvm.arm.* intrinsic for
4325 // Scalar Integer Saturating Add, Sub  (Signed, Unsigned)
4326 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqadds, SQADDddd>;
4327 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqaddu, UQADDddd>;
4328 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqsubs, SQSUBddd>;
4329 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqsubu, UQSUBddd>;
4330
4331 // Patterns to match llvm.aarch64.* intrinsic for
4332 // Scalar Integer Saturating Add, Sub  (Signed, Unsigned)
4333 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqadds, SQADDbbb,
4334                                            SQADDhhh, SQADDsss, SQADDddd>;
4335 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqaddu, UQADDbbb,
4336                                            UQADDhhh, UQADDsss, UQADDddd>;
4337 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqsubs, SQSUBbbb,
4338                                            SQSUBhhh, SQSUBsss, SQSUBddd>;
4339 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqsubu, UQSUBbbb,
4340                                            UQSUBhhh, UQSUBsss, UQSUBddd>;
4341
4342 // Scalar Integer Saturating Doubling Multiply Half High
4343 defm SQDMULH : NeonI_Scalar3Same_HS_sizes<0b0, 0b10110, "sqdmulh", 1>;
4344
4345 // Scalar Integer Saturating Rounding Doubling Multiply Half High
4346 defm SQRDMULH : NeonI_Scalar3Same_HS_sizes<0b1, 0b10110, "sqrdmulh", 1>;
4347
4348 // Patterns to match llvm.arm.* intrinsic for
4349 // Scalar Integer Saturating Doubling Multiply Half High and
4350 // Scalar Integer Saturating Rounding Doubling Multiply Half High
4351 defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqdmulh, SQDMULHhhh,
4352                                                                SQDMULHsss>;
4353 defm : Neon_Scalar3Same_HS_size_patterns<int_arm_neon_vqrdmulh, SQRDMULHhhh,
4354                                                                 SQRDMULHsss>;
4355
4356 // Scalar Floating-point Multiply Extended
4357 defm FMULX : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11011, "fmulx", 1>;
4358
4359 // Scalar Floating-point Reciprocal Step
4360 defm FRECPS : NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11111, "frecps", 0>;
4361
4362 // Scalar Floating-point Reciprocal Square Root Step
4363 defm FRSQRTS : NeonI_Scalar3Same_SD_sizes<0b0, 0b1, 0b11111, "frsqrts", 0>;
4364
4365 // Patterns to match llvm.arm.* intrinsic for
4366 // Scalar Floating-point Reciprocal Step and
4367 // Scalar Floating-point Reciprocal Square Root Step
4368 defm : Neon_Scalar3Same_SD_size_patterns<int_arm_neon_vrecps, FRECPSsss,
4369                                                               FRECPSddd>;
4370 defm : Neon_Scalar3Same_SD_size_patterns<int_arm_neon_vrsqrts, FRSQRTSsss,
4371                                                                FRSQRTSddd>;
4372
4373 // Patterns to match llvm.aarch64.* intrinsic for
4374 // Scalar Floating-point Multiply Extended,
4375 multiclass Neon_Scalar3Same_MULX_SD_size_patterns<SDPatternOperator opnode,
4376                                                   Instruction INSTS,
4377                                                   Instruction INSTD> {
4378   def : Pat<(f32 (opnode (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4379             (INSTS FPR32:$Rn, FPR32:$Rm)>;
4380   def : Pat<(f64 (opnode (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4381             (INSTD FPR64:$Rn, FPR64:$Rm)>;
4382 }
4383
4384 defm : Neon_Scalar3Same_MULX_SD_size_patterns<int_aarch64_neon_vmulx,
4385                                               FMULXsss,FMULXddd>;
4386
4387 // Scalar Integer Shift Left (Signed, Unsigned)
4388 def SSHLddd : NeonI_Scalar3Same_D_size<0b0, 0b01000, "sshl">;
4389 def USHLddd : NeonI_Scalar3Same_D_size<0b1, 0b01000, "ushl">;
4390
4391 // Patterns to match llvm.arm.* intrinsic for
4392 // Scalar Integer Shift Left (Signed, Unsigned)
4393 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshifts, SSHLddd>;
4394 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vshiftu, USHLddd>;
4395
4396 // Patterns to match llvm.aarch64.* intrinsic for
4397 // Scalar Integer Shift Left (Signed, Unsigned)
4398 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshlds, SSHLddd>;
4399 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vshldu, USHLddd>;
4400
4401 // Scalar Integer Saturating Shift Left (Signed, Unsigned)
4402 defm SQSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01001, "sqshl", 0>;
4403 defm UQSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01001, "uqshl", 0>;
4404
4405 // Patterns to match llvm.aarch64.* intrinsic for
4406 // Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
4407 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshls, SQSHLbbb,
4408                                            SQSHLhhh, SQSHLsss, SQSHLddd>;
4409 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqshlu, UQSHLbbb,
4410                                            UQSHLhhh, UQSHLsss, UQSHLddd>;
4411
4412 // Patterns to match llvm.arm.* intrinsic for
4413 // Scalar  Integer Saturating Shift Letf (Signed, Unsigned)
4414 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshifts, SQSHLddd>;
4415 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqshiftu, UQSHLddd>;
4416
4417 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
4418 def SRSHLddd: NeonI_Scalar3Same_D_size<0b0, 0b01010, "srshl">;
4419 def URSHLddd: NeonI_Scalar3Same_D_size<0b1, 0b01010, "urshl">;
4420
4421 // Patterns to match llvm.aarch64.* intrinsic for
4422 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
4423 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshlds, SRSHLddd>;
4424 defm : Neon_Scalar3Same_D_size_patterns<int_aarch64_neon_vrshldu, URSHLddd>;
4425
4426 // Patterns to match llvm.arm.* intrinsic for
4427 // Scalar Integer Rounding Shift Left (Signed, Unsigned)
4428 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshifts, SRSHLddd>;
4429 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vrshiftu, URSHLddd>;
4430
4431 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
4432 defm SQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b0, 0b01011, "sqrshl", 0>;
4433 defm UQRSHL: NeonI_Scalar3Same_BHSD_sizes<0b1, 0b01011, "uqrshl", 0>;
4434
4435 // Patterns to match llvm.aarch64.* intrinsic for
4436 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
4437 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshls, SQRSHLbbb,
4438                                            SQRSHLhhh, SQRSHLsss, SQRSHLddd>;
4439 defm : Neon_Scalar3Same_BHSD_size_patterns<int_aarch64_neon_vqrshlu, UQRSHLbbb,
4440                                            UQRSHLhhh, UQRSHLsss, UQRSHLddd>;
4441
4442 // Patterns to match llvm.arm.* intrinsic for
4443 // Scalar Integer Saturating Rounding Shift Left (Signed, Unsigned)
4444 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshifts, SQRSHLddd>;
4445 defm : Neon_Scalar3Same_D_size_patterns<int_arm_neon_vqrshiftu, UQRSHLddd>;
4446
4447 // Signed Saturating Doubling Multiply-Add Long
4448 defm SQDMLAL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1001, "sqdmlal">;
4449 defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlal,
4450                                             SQDMLALshh, SQDMLALdss>;
4451
4452 // Signed Saturating Doubling Multiply-Subtract Long
4453 defm SQDMLSL : NeonI_Scalar3Diff_ml_HS_size<0b0, 0b1011, "sqdmlsl">;
4454 defm : Neon_Scalar3Diff_ml_HS_size_patterns<int_aarch64_neon_vqdmlsl,
4455                                             SQDMLSLshh, SQDMLSLdss>;
4456
4457 // Signed Saturating Doubling Multiply Long
4458 defm SQDMULL : NeonI_Scalar3Diff_HS_size<0b0, 0b1101, "sqdmull">;
4459 defm : Neon_Scalar3Diff_HS_size_patterns<int_aarch64_neon_vqdmull,
4460                                          SQDMULLshh, SQDMULLdss>;
4461
4462 // Scalar Signed Integer Convert To Floating-point
4463 defm SCVTF  : NeonI_Scalar2SameMisc_SD_size<0b0, 0b0, 0b11101, "scvtf">;
4464 defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtf32_s32,
4465                                                  int_aarch64_neon_vcvtf64_s64,
4466                                                  SCVTFss, SCVTFdd>;
4467
4468 // Scalar Unsigned Integer Convert To Floating-point
4469 defm UCVTF  : NeonI_Scalar2SameMisc_SD_size<0b1, 0b0, 0b11101, "ucvtf">;
4470 defm : Neon_Scalar2SameMisc_cvt_SD_size_patterns<int_aarch64_neon_vcvtf32_u32,
4471                                                  int_aarch64_neon_vcvtf64_u64,
4472                                                  UCVTFss, UCVTFdd>;
4473
4474 // Scalar Floating-point Reciprocal Estimate
4475 defm FRECPE : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11101, "frecpe">;
4476 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrecpe,
4477                                              FRECPEss, FRECPEdd>;
4478
4479 // Scalar Floating-point Reciprocal Exponent
4480 defm FRECPX : NeonI_Scalar2SameMisc_SD_size<0b0, 0b1, 0b11111, "frecpx">;
4481 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_aarch64_neon_vrecpx,
4482                                              FRECPXss, FRECPXdd>;
4483
4484 // Scalar Floating-point Reciprocal Square Root Estimate
4485 defm FRSQRTE: NeonI_Scalar2SameMisc_SD_size<0b1, 0b1, 0b11101, "frsqrte">;
4486 defm : Neon_Scalar2SameMisc_SD_size_patterns<int_arm_neon_vrsqrte,
4487                                              FRSQRTEss, FRSQRTEdd>;
4488
4489 // Scalar Integer Compare
4490
4491 // Scalar Compare Bitwise Equal
4492 def CMEQddd: NeonI_Scalar3Same_D_size<0b1, 0b10001, "cmeq">;
4493 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vceq, CMEQddd>;
4494
4495 class Neon_Scalar3Same_cmp_D_size_v1_patterns<SDPatternOperator opnode,
4496                                               Instruction INSTD,
4497                                               CondCode CC>
4498   : Pat<(v1i64 (opnode (v1i64 FPR64:$Rn), (v1i64 FPR64:$Rm), CC)),
4499         (INSTD FPR64:$Rn, FPR64:$Rm)>;
4500
4501 def : Neon_Scalar3Same_cmp_D_size_v1_patterns<Neon_cmp, CMEQddd, SETEQ>;
4502
4503 // Scalar Compare Signed Greather Than Or Equal
4504 def CMGEddd: NeonI_Scalar3Same_D_size<0b0, 0b00111, "cmge">;
4505 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vcge, CMGEddd>;
4506
4507 // Scalar Compare Unsigned Higher Or Same
4508 def CMHSddd: NeonI_Scalar3Same_D_size<0b1, 0b00111, "cmhs">;
4509 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vchs, CMHSddd>;
4510
4511 // Scalar Compare Unsigned Higher
4512 def CMHIddd: NeonI_Scalar3Same_D_size<0b1, 0b00110, "cmhi">;
4513 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vchi, CMHIddd>;
4514
4515 // Scalar Compare Signed Greater Than
4516 def CMGTddd: NeonI_Scalar3Same_D_size<0b0, 0b00110, "cmgt">;
4517 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vcgt, CMGTddd>;
4518
4519 // Scalar Compare Bitwise Test Bits
4520 def CMTSTddd: NeonI_Scalar3Same_D_size<0b0, 0b10001, "cmtst">;
4521 def : Neon_Scalar3Same_cmp_D_size_patterns<int_aarch64_neon_vtstd, CMTSTddd>;
4522 def : Neon_Scalar3Same_cmp_D_size_patterns<Neon_tst, CMTSTddd>;
4523
4524 // Scalar Compare Bitwise Equal To Zero
4525 def CMEQddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01001, "cmeq">;
4526 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vceq,
4527                                                 CMEQddi>;
4528
4529 // Scalar Compare Signed Greather Than Or Equal To Zero
4530 def CMGEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01000, "cmge">;
4531 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcge,
4532                                                 CMGEddi>;
4533
4534 // Scalar Compare Signed Greater Than Zero
4535 def CMGTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01000, "cmgt">;
4536 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcgt,
4537                                                 CMGTddi>;
4538
4539 // Scalar Compare Signed Less Than Or Equal To Zero
4540 def CMLEddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b1, 0b01001, "cmle">;
4541 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vclez,
4542                                                 CMLEddi>;
4543
4544 // Scalar Compare Less Than Zero
4545 def CMLTddi: NeonI_Scalar2SameMisc_cmpz_D_size<0b0, 0b01010, "cmlt">;
4546 def : Neon_Scalar2SameMisc_cmpz_D_size_patterns<int_aarch64_neon_vcltz,
4547                                                 CMLTddi>;
4548
4549 // Scalar Floating-point Compare
4550
4551 // Scalar Floating-point Compare Mask Equal
4552 defm FCMEQ: NeonI_Scalar3Same_SD_sizes<0b0, 0b0, 0b11100, "fcmeq">;
4553 defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vceq,
4554                                              FCMEQsss, FCMEQddd>;
4555
4556 // Scalar Floating-point Compare Mask Equal To Zero
4557 defm FCMEQZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01101, "fcmeq">;
4558 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vceq,
4559                                                   FCMEQZssi, FCMEQZddi>;
4560
4561 // Scalar Floating-point Compare Mask Greater Than Or Equal
4562 defm FCMGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11100, "fcmge">;
4563 defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcge,
4564                                              FCMGEsss, FCMGEddd>;
4565
4566 // Scalar Floating-point Compare Mask Greater Than Or Equal To Zero
4567 defm FCMGEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01100, "fcmge">;
4568 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcge,
4569                                                   FCMGEZssi, FCMGEZddi>;
4570
4571 // Scalar Floating-point Compare Mask Greather Than
4572 defm FCMGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11100, "fcmgt">;
4573 defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcgt,
4574                                              FCMGTsss, FCMGTddd>;
4575
4576 // Scalar Floating-point Compare Mask Greather Than Zero
4577 defm FCMGTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01100, "fcmgt">;
4578 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcgt,
4579                                                   FCMGTZssi, FCMGTZddi>;
4580
4581 // Scalar Floating-point Compare Mask Less Than Or Equal To Zero
4582 defm FCMLEZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b1, 0b01101, "fcmle">;
4583 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vclez,
4584                                                   FCMLEZssi, FCMLEZddi>;
4585
4586 // Scalar Floating-point Compare Mask Less Than Zero
4587 defm FCMLTZ: NeonI_Scalar2SameMisc_cmpz_SD_size<0b0, 0b01110, "fcmlt">;
4588 defm : Neon_Scalar2SameMisc_cmpz_SD_size_patterns<int_aarch64_neon_vcltz,
4589                                                   FCMLTZssi, FCMLTZddi>;
4590
4591 // Scalar Floating-point Absolute Compare Mask Greater Than Or Equal
4592 defm FACGE: NeonI_Scalar3Same_SD_sizes<0b1, 0b0, 0b11101, "facge">;
4593 defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcage,
4594                                              FACGEsss, FACGEddd>;
4595
4596 // Scalar Floating-point Absolute Compare Mask Greater Than
4597 defm FACGT: NeonI_Scalar3Same_SD_sizes<0b1, 0b1, 0b11101, "facgt">;
4598 defm : Neon_Scalar3Same_cmp_SD_size_patterns<int_aarch64_neon_vcagt,
4599                                              FACGTsss, FACGTddd>;
4600
4601 // Scalar Absolute Value
4602 defm ABS : NeonI_Scalar2SameMisc_D_size<0b0, 0b01011, "abs">;
4603 defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vabs, ABSdd>;
4604
4605 // Scalar Signed Saturating Absolute Value
4606 defm SQABS : NeonI_Scalar2SameMisc_BHSD_size<0b0, 0b00111, "sqabs">;
4607 defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqabs,
4608                                                SQABSbb, SQABShh, SQABSss, SQABSdd>;
4609
4610 // Scalar Negate
4611 defm NEG : NeonI_Scalar2SameMisc_D_size<0b1, 0b01011, "neg">;
4612 defm : Neon_Scalar2SameMisc_D_size_patterns<int_aarch64_neon_vneg, NEGdd>;
4613
4614 // Scalar Signed Saturating Negate
4615 defm SQNEG : NeonI_Scalar2SameMisc_BHSD_size<0b1, 0b00111, "sqneg">;
4616 defm : Neon_Scalar2SameMisc_BHSD_size_patterns<int_arm_neon_vqneg,
4617                                                SQNEGbb, SQNEGhh, SQNEGss, SQNEGdd>;
4618
4619 // Scalar Signed Saturating Accumulated of Unsigned Value
4620 defm SUQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b0, 0b00011, "suqadd">;
4621 defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vuqadd,
4622                                                      SUQADDbb, SUQADDhh,
4623                                                      SUQADDss, SUQADDdd>;
4624
4625 // Scalar Unsigned Saturating Accumulated of Signed Value
4626 defm USQADD : NeonI_Scalar2SameMisc_accum_BHSD_size<0b1, 0b00011, "usqadd">;
4627 defm : Neon_Scalar2SameMisc_accum_BHSD_size_patterns<int_aarch64_neon_vsqadd,
4628                                                      USQADDbb, USQADDhh,
4629                                                      USQADDss, USQADDdd>;
4630
4631 // Scalar Signed Saturating Extract Unsigned Narrow
4632 defm SQXTUN : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10010, "sqxtun">;
4633 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnsu,
4634                                                      SQXTUNbh, SQXTUNhs,
4635                                                      SQXTUNsd>;
4636
4637 // Scalar Signed Saturating Extract Narrow
4638 defm SQXTN  : NeonI_Scalar2SameMisc_narrow_HSD_size<0b0, 0b10100, "sqxtn">;
4639 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovns,
4640                                                      SQXTNbh, SQXTNhs,
4641                                                      SQXTNsd>;
4642
4643 // Scalar Unsigned Saturating Extract Narrow
4644 defm UQXTN  : NeonI_Scalar2SameMisc_narrow_HSD_size<0b1, 0b10100, "uqxtn">;
4645 defm : Neon_Scalar2SameMisc_narrow_HSD_size_patterns<int_arm_neon_vqmovnu,
4646                                                      UQXTNbh, UQXTNhs,
4647                                                      UQXTNsd>;
4648
4649 // Scalar Reduce Pairwise
4650
4651 multiclass NeonI_ScalarPair_D_sizes<bit u, bit size, bits<5> opcode,
4652                                      string asmop, bit Commutable = 0> {
4653   let isCommutable = Commutable in {
4654     def _D_2D : NeonI_ScalarPair<u, {size, 0b1}, opcode,
4655                                 (outs FPR64:$Rd), (ins VPR128:$Rn),
4656                                 !strconcat(asmop, "\t$Rd, $Rn.2d"),
4657                                 [],
4658                                 NoItinerary>;
4659   }
4660 }
4661
4662 multiclass NeonI_ScalarPair_SD_sizes<bit u, bit size, bits<5> opcode,
4663                                      string asmop, bit Commutable = 0>
4664   : NeonI_ScalarPair_D_sizes<u, size, opcode, asmop, Commutable> {
4665   let isCommutable = Commutable in {
4666     def _S_2S : NeonI_ScalarPair<u, {size, 0b0}, opcode,
4667                                 (outs FPR32:$Rd), (ins VPR64:$Rn),
4668                                 !strconcat(asmop, "\t$Rd, $Rn.2s"),
4669                                 [],
4670                                 NoItinerary>;
4671   }
4672 }
4673
4674 // Scalar Reduce Addition Pairwise (Integer) with
4675 // Pattern to match llvm.arm.* intrinsic
4676 defm ADDPvv : NeonI_ScalarPair_D_sizes<0b0, 0b1, 0b11011, "addp", 0>;
4677
4678 // Pattern to match llvm.aarch64.* intrinsic for
4679 // Scalar Reduce Addition Pairwise (Integer)
4680 def : Pat<(v1i64 (int_aarch64_neon_vpadd (v2i64 VPR128:$Rn))),
4681           (ADDPvv_D_2D VPR128:$Rn)>;
4682
4683 // Scalar Reduce Addition Pairwise (Floating Point)
4684 defm FADDPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01101, "faddp", 0>;
4685
4686 // Scalar Reduce Maximum Pairwise (Floating Point)
4687 defm FMAXPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01111, "fmaxp", 0>;
4688
4689 // Scalar Reduce Minimum Pairwise (Floating Point)
4690 defm FMINPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01111, "fminp", 0>;
4691
4692 // Scalar Reduce maxNum Pairwise (Floating Point)
4693 defm FMAXNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b0, 0b01100, "fmaxnmp", 0>;
4694
4695 // Scalar Reduce minNum Pairwise (Floating Point)
4696 defm FMINNMPvv : NeonI_ScalarPair_SD_sizes<0b1, 0b1, 0b01100, "fminnmp", 0>;
4697
4698 multiclass Neon_ScalarPair_SD_size_patterns<SDPatternOperator opnodeS,
4699                                             SDPatternOperator opnodeD,
4700                                             Instruction INSTS,
4701                                             Instruction INSTD> {
4702   def : Pat<(v1f32 (opnodeS (v2f32 VPR64:$Rn))),
4703             (INSTS VPR64:$Rn)>;
4704   def : Pat<(v1f64 (opnodeD (v2f64 VPR128:$Rn))),
4705             (INSTD VPR128:$Rn)>;
4706 }
4707
4708 // Patterns to match llvm.aarch64.* intrinsic for
4709 // Scalar Reduce Add, Max, Min, MaxiNum, MinNum Pairwise (Floating Point)
4710 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfadd,
4711   int_aarch64_neon_vpfaddq, FADDPvv_S_2S, FADDPvv_D_2D>;
4712
4713 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmax,
4714   int_aarch64_neon_vpmaxq, FMAXPvv_S_2S, FMAXPvv_D_2D>;
4715
4716 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpmin,
4717   int_aarch64_neon_vpminq, FMINPvv_S_2S, FMINPvv_D_2D>;
4718
4719 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfmaxnm,
4720   int_aarch64_neon_vpfmaxnmq, FMAXNMPvv_S_2S, FMAXNMPvv_D_2D>;
4721
4722 defm : Neon_ScalarPair_SD_size_patterns<int_aarch64_neon_vpfminnm, 
4723   int_aarch64_neon_vpfminnmq, FMINNMPvv_S_2S, FMINNMPvv_D_2D>;
4724
4725 def neon_uimm0_bare : Operand<i64>,
4726                         ImmLeaf<i64, [{return Imm == 0;}]> {
4727   let ParserMatchClass = neon_uimm0_asmoperand;
4728   let PrintMethod = "printUImmBareOperand";
4729 }
4730
4731 def neon_uimm1_bare : Operand<i64>,
4732                         ImmLeaf<i64, [{return Imm < 2;}]> {
4733   let ParserMatchClass = neon_uimm1_asmoperand;
4734   let PrintMethod = "printUImmBareOperand";
4735 }
4736
4737 def neon_uimm2_bare : Operand<i64>,
4738                         ImmLeaf<i64, [{return Imm < 4;}]> {
4739   let ParserMatchClass = neon_uimm2_asmoperand;
4740   let PrintMethod = "printUImmBareOperand";
4741 }
4742
4743 def neon_uimm3_bare : Operand<i64>,
4744                         ImmLeaf<i64, [{return Imm < 8;}]> {
4745   let ParserMatchClass = uimm3_asmoperand;
4746   let PrintMethod = "printUImmBareOperand";
4747 }
4748
4749 def neon_uimm4_bare : Operand<i64>,
4750                         ImmLeaf<i64, [{return Imm < 16;}]> {
4751   let ParserMatchClass = uimm4_asmoperand;
4752   let PrintMethod = "printUImmBareOperand";
4753 }
4754
4755
4756 // Scalar by element Arithmetic
4757
4758 class NeonI_ScalarXIndexedElemArith<string asmop, bits<4> opcode,
4759                                     string rmlane, bit u, bit szhi, bit szlo,
4760                                     RegisterClass ResFPR, RegisterClass OpFPR,
4761                                     RegisterOperand OpVPR, Operand OpImm>
4762   : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
4763                              (outs ResFPR:$Rd),
4764                              (ins OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
4765                              asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
4766                              [],
4767                              NoItinerary> {
4768   bits<3> Imm;
4769   bits<5> MRm;
4770 }
4771
4772 class NeonI_ScalarXIndexedElemArith_Constraint_Impl<string asmop, bits<4> opcode,
4773                                                     string rmlane,
4774                                                     bit u, bit szhi, bit szlo,
4775                                                     RegisterClass ResFPR,
4776                                                     RegisterClass OpFPR,
4777                                                     RegisterOperand OpVPR,
4778                                                     Operand OpImm>
4779   : NeonI_ScalarXIndexedElem<u, szhi, szlo, opcode,
4780                              (outs ResFPR:$Rd),
4781                              (ins ResFPR:$src, OpFPR:$Rn, OpVPR:$MRm, OpImm:$Imm),
4782                              asmop # "\t$Rd, $Rn, $MRm" # rmlane # "[$Imm]",
4783                              [],
4784                              NoItinerary> {
4785   let Constraints = "$src = $Rd";
4786   bits<3> Imm;
4787   bits<5> MRm;
4788 }
4789
4790 // Scalar Floating Point  multiply (scalar, by element)
4791 def FMULssv_4S : NeonI_ScalarXIndexedElemArith<"fmul",
4792   0b1001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
4793   let Inst{11} = Imm{1}; // h
4794   let Inst{21} = Imm{0}; // l
4795   let Inst{20-16} = MRm;
4796 }
4797 def FMULddv_2D : NeonI_ScalarXIndexedElemArith<"fmul",
4798   0b1001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
4799   let Inst{11} = Imm{0}; // h
4800   let Inst{21} = 0b0;    // l
4801   let Inst{20-16} = MRm;
4802 }
4803
4804 // Scalar Floating Point  multiply extended (scalar, by element)
4805 def FMULXssv_4S : NeonI_ScalarXIndexedElemArith<"fmulx",
4806   0b1001, ".s", 0b1, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
4807   let Inst{11} = Imm{1}; // h
4808   let Inst{21} = Imm{0}; // l
4809   let Inst{20-16} = MRm;
4810 }
4811 def FMULXddv_2D : NeonI_ScalarXIndexedElemArith<"fmulx",
4812   0b1001, ".d", 0b1, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
4813   let Inst{11} = Imm{0}; // h
4814   let Inst{21} = 0b0;    // l
4815   let Inst{20-16} = MRm;
4816 }
4817
4818 multiclass Neon_ScalarXIndexedElem_MUL_MULX_Patterns<
4819   SDPatternOperator opnode,
4820   Instruction INST,
4821   ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
4822   ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
4823
4824   def  : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
4825                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)))),
4826              (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
4827
4828   def  : Pat<(ResTy (opnode (ResTy FPRC:$Rn),
4829                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)))),
4830              (ResTy (INST (ResTy FPRC:$Rn),
4831                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
4832                OpNImm:$Imm))>;
4833
4834   // swapped operands
4835   def  : Pat<(ResTy (opnode
4836                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
4837                (ResTy FPRC:$Rn))),
4838              (ResTy (INST (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
4839
4840   def  : Pat<(ResTy (opnode
4841                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
4842                (ResTy FPRC:$Rn))),
4843              (ResTy (INST (ResTy FPRC:$Rn),
4844                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
4845                OpNImm:$Imm))>;
4846 }
4847
4848 // Patterns for Scalar Floating Point  multiply (scalar, by element)
4849 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULssv_4S,
4850   f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
4851 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<fmul, FMULddv_2D,
4852   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
4853
4854 // Patterns for Scalar Floating Point  multiply extended (scalar, by element)
4855 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
4856   FMULXssv_4S, f32, FPR32, v4f32, neon_uimm2_bare,
4857   v2f32, v4f32, neon_uimm1_bare>;
4858 defm : Neon_ScalarXIndexedElem_MUL_MULX_Patterns<int_aarch64_neon_vmulx,
4859   FMULXddv_2D, f64, FPR64, v2f64, neon_uimm1_bare,
4860   v1f64, v2f64, neon_uimm0_bare>;
4861
4862
4863 // Scalar Floating Point fused multiply-add (scalar, by element)
4864 def FMLAssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
4865   0b0001, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
4866   let Inst{11} = Imm{1}; // h
4867   let Inst{21} = Imm{0}; // l
4868   let Inst{20-16} = MRm;
4869 }
4870 def FMLAddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmla",
4871   0b0001, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
4872   let Inst{11} = Imm{0}; // h
4873   let Inst{21} = 0b0;    // l
4874   let Inst{20-16} = MRm;
4875 }
4876
4877 // Scalar Floating Point fused multiply-subtract (scalar, by element)
4878 def FMLSssv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
4879   0b0101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
4880   let Inst{11} = Imm{1}; // h
4881   let Inst{21} = Imm{0}; // l
4882   let Inst{20-16} = MRm;
4883 }
4884 def FMLSddv_2D : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"fmls",
4885   0b0101, ".d", 0b0, 0b1, 0b1, FPR64, FPR64, VPR128, neon_uimm1_bare> {
4886   let Inst{11} = Imm{0}; // h
4887   let Inst{21} = 0b0;    // l
4888   let Inst{20-16} = MRm;
4889 }
4890 // We are allowed to match the fma instruction regardless of compile options.
4891 multiclass Neon_ScalarXIndexedElem_FMA_Patterns<
4892   Instruction FMLAI, Instruction FMLSI,
4893   ValueType ResTy, RegisterClass FPRC, ValueType OpTy, Operand OpImm,
4894   ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
4895   // fmla
4896   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
4897                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
4898                (ResTy FPRC:$Ra))),
4899              (ResTy (FMLAI (ResTy FPRC:$Ra),
4900                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
4901
4902   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
4903                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
4904                (ResTy FPRC:$Ra))),
4905              (ResTy (FMLAI (ResTy FPRC:$Ra),
4906                (ResTy FPRC:$Rn),
4907                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
4908                OpNImm:$Imm))>;
4909
4910   // swapped fmla operands
4911   def  : Pat<(ResTy (fma
4912                (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm)),
4913                (ResTy FPRC:$Rn),
4914                (ResTy FPRC:$Ra))),
4915              (ResTy (FMLAI (ResTy FPRC:$Ra),
4916                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
4917
4918   def  : Pat<(ResTy (fma
4919                (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm)),
4920                (ResTy FPRC:$Rn),
4921                (ResTy FPRC:$Ra))),
4922              (ResTy (FMLAI (ResTy FPRC:$Ra),
4923                (ResTy FPRC:$Rn),
4924                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
4925                OpNImm:$Imm))>;
4926
4927   // fmls
4928   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
4929                (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
4930                (ResTy FPRC:$Ra))),
4931              (ResTy (FMLSI (ResTy FPRC:$Ra),
4932                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
4933
4934   def  : Pat<(ResTy (fma (ResTy FPRC:$Rn),
4935                (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
4936                (ResTy FPRC:$Ra))),
4937              (ResTy (FMLSI (ResTy FPRC:$Ra),
4938                (ResTy FPRC:$Rn),
4939                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
4940                OpNImm:$Imm))>;
4941
4942   // swapped fmls operands
4943   def  : Pat<(ResTy (fma
4944                (fneg (ResTy (vector_extract (OpTy VPR128:$MRm), OpImm:$Imm))),
4945                (ResTy FPRC:$Rn),
4946                (ResTy FPRC:$Ra))),
4947              (ResTy (FMLSI (ResTy FPRC:$Ra),
4948                (ResTy FPRC:$Rn), (OpTy VPR128:$MRm), OpImm:$Imm))>;
4949
4950   def  : Pat<(ResTy (fma
4951                (fneg (ResTy (vector_extract (OpNTy VPR64:$MRm), OpNImm:$Imm))),
4952                (ResTy FPRC:$Rn),
4953                (ResTy FPRC:$Ra))),
4954              (ResTy (FMLSI (ResTy FPRC:$Ra),
4955                (ResTy FPRC:$Rn),
4956                (ExTy (SUBREG_TO_REG (i64 0), VPR64:$MRm, sub_64)),
4957                OpNImm:$Imm))>;
4958 }
4959
4960 // Scalar Floating Point fused multiply-add and multiply-subtract (scalar, by element)
4961 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAssv_4S, FMLSssv_4S,
4962   f32, FPR32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
4963 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
4964   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
4965 defm : Neon_ScalarXIndexedElem_FMA_Patterns<FMLAddv_2D, FMLSddv_2D,
4966   f64, FPR64, v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
4967
4968 // Scalar Signed saturating doubling multiply-add long (scalar, by element)
4969 def SQDMLALshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
4970   0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
4971   let Inst{11} = 0b0; // h
4972   let Inst{21} = Imm{1}; // l
4973   let Inst{20} = Imm{0}; // m
4974   let Inst{19-16} = MRm{3-0};
4975 }
4976 def SQDMLALshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
4977   0b0011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
4978   let Inst{11} = Imm{2}; // h
4979   let Inst{21} = Imm{1}; // l
4980   let Inst{20} = Imm{0}; // m
4981   let Inst{19-16} = MRm{3-0};
4982 }
4983 def SQDMLALdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
4984   0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
4985   let Inst{11} = 0b0;    // h
4986   let Inst{21} = Imm{0}; // l
4987   let Inst{20-16} = MRm;
4988 }
4989 def SQDMLALdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlal",
4990   0b0011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
4991   let Inst{11} = Imm{1};    // h
4992   let Inst{21} = Imm{0};    // l
4993   let Inst{20-16} = MRm;
4994 }
4995
4996 // Scalar Signed saturating doubling
4997 // multiply-subtract long (scalar, by element)
4998 def SQDMLSLshv_4H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
4999   0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
5000   let Inst{11} = 0b0; // h
5001   let Inst{21} = Imm{1}; // l
5002   let Inst{20} = Imm{0}; // m
5003   let Inst{19-16} = MRm{3-0};
5004 }
5005 def SQDMLSLshv_8H : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
5006   0b0111, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
5007   let Inst{11} = Imm{2}; // h
5008   let Inst{21} = Imm{1}; // l
5009   let Inst{20} = Imm{0}; // m
5010   let Inst{19-16} = MRm{3-0};
5011 }
5012 def SQDMLSLdsv_2S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
5013   0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
5014   let Inst{11} = 0b0;    // h
5015   let Inst{21} = Imm{0}; // l
5016   let Inst{20-16} = MRm;
5017 }
5018 def SQDMLSLdsv_4S : NeonI_ScalarXIndexedElemArith_Constraint_Impl<"sqdmlsl",
5019   0b0111, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
5020   let Inst{11} = Imm{1};    // h
5021   let Inst{21} = Imm{0};    // l
5022   let Inst{20-16} = MRm;
5023 }
5024
5025 // Scalar Signed saturating doubling multiply long (scalar, by element)
5026 def SQDMULLshv_4H : NeonI_ScalarXIndexedElemArith<"sqdmull",
5027   0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR64Lo, neon_uimm2_bare> {
5028   let Inst{11} = 0b0; // h
5029   let Inst{21} = Imm{1}; // l
5030   let Inst{20} = Imm{0}; // m
5031   let Inst{19-16} = MRm{3-0};
5032 }
5033 def SQDMULLshv_8H : NeonI_ScalarXIndexedElemArith<"sqdmull",
5034   0b1011, ".h", 0b0, 0b0, 0b1, FPR32, FPR16, VPR128Lo, neon_uimm3_bare> {
5035   let Inst{11} = Imm{2}; // h
5036   let Inst{21} = Imm{1}; // l
5037   let Inst{20} = Imm{0}; // m
5038   let Inst{19-16} = MRm{3-0};
5039 }
5040 def SQDMULLdsv_2S : NeonI_ScalarXIndexedElemArith<"sqdmull",
5041   0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR64, neon_uimm1_bare> {
5042   let Inst{11} = 0b0;    // h
5043   let Inst{21} = Imm{0}; // l
5044   let Inst{20-16} = MRm;
5045 }
5046 def SQDMULLdsv_4S : NeonI_ScalarXIndexedElemArith<"sqdmull",
5047   0b1011, ".s", 0b0, 0b1, 0b0, FPR64, FPR32, VPR128, neon_uimm2_bare> {
5048   let Inst{11} = Imm{1};    // h
5049   let Inst{21} = Imm{0};    // l
5050   let Inst{20-16} = MRm;
5051 }
5052
5053 // Scalar Signed saturating doubling multiply returning
5054 // high half (scalar, by element)
5055 def SQDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
5056   0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
5057   let Inst{11} = 0b0; // h
5058   let Inst{21} = Imm{1}; // l
5059   let Inst{20} = Imm{0}; // m
5060   let Inst{19-16} = MRm{3-0};
5061 }
5062 def SQDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqdmulh",
5063   0b1100, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
5064   let Inst{11} = Imm{2}; // h
5065   let Inst{21} = Imm{1}; // l
5066   let Inst{20} = Imm{0}; // m
5067   let Inst{19-16} = MRm{3-0};
5068 }
5069 def SQDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
5070   0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
5071   let Inst{11} = 0b0;    // h
5072   let Inst{21} = Imm{0}; // l
5073   let Inst{20-16} = MRm;
5074 }
5075 def SQDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqdmulh",
5076   0b1100, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5077   let Inst{11} = Imm{1};    // h
5078   let Inst{21} = Imm{0};    // l
5079   let Inst{20-16} = MRm;
5080 }
5081
5082 // Scalar Signed saturating rounding doubling multiply
5083 // returning high half (scalar, by element)
5084 def SQRDMULHhhv_4H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
5085   0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR64Lo, neon_uimm2_bare> {
5086   let Inst{11} = 0b0; // h
5087   let Inst{21} = Imm{1}; // l
5088   let Inst{20} = Imm{0}; // m
5089   let Inst{19-16} = MRm{3-0};
5090 }
5091 def SQRDMULHhhv_8H : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
5092   0b1101, ".h", 0b0, 0b0, 0b1, FPR16, FPR16, VPR128Lo, neon_uimm3_bare> {
5093   let Inst{11} = Imm{2}; // h
5094   let Inst{21} = Imm{1}; // l
5095   let Inst{20} = Imm{0}; // m
5096   let Inst{19-16} = MRm{3-0};
5097 }
5098 def SQRDMULHssv_2S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
5099   0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR64, neon_uimm1_bare> {
5100   let Inst{11} = 0b0;    // h
5101   let Inst{21} = Imm{0}; // l
5102   let Inst{20-16} = MRm;
5103 }
5104 def SQRDMULHssv_4S : NeonI_ScalarXIndexedElemArith<"sqrdmulh",
5105   0b1101, ".s", 0b0, 0b1, 0b0, FPR32, FPR32, VPR128, neon_uimm2_bare> {
5106   let Inst{11} = Imm{1};    // h
5107   let Inst{21} = Imm{0};    // l
5108   let Inst{20-16} = MRm;
5109 }
5110
5111
5112 // Scalar Copy - DUP element to scalar
5113 class NeonI_Scalar_DUP<string asmop, string asmlane,
5114                        RegisterClass ResRC, RegisterOperand VPRC,
5115                        Operand OpImm>
5116   : NeonI_ScalarCopy<(outs ResRC:$Rd), (ins VPRC:$Rn, OpImm:$Imm),
5117                      asmop # "\t$Rd, $Rn." # asmlane # "[$Imm]",
5118                      [],
5119                      NoItinerary> {
5120   bits<4> Imm;
5121 }
5122
5123 def DUPbv_B : NeonI_Scalar_DUP<"dup", "b", FPR8, VPR128, neon_uimm4_bare> {
5124   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
5125 }
5126 def DUPhv_H : NeonI_Scalar_DUP<"dup", "h", FPR16, VPR128, neon_uimm3_bare> {
5127   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
5128 }
5129 def DUPsv_S : NeonI_Scalar_DUP<"dup", "s", FPR32, VPR128, neon_uimm2_bare> {
5130   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
5131 }
5132 def DUPdv_D : NeonI_Scalar_DUP<"dup", "d", FPR64, VPR128, neon_uimm1_bare> {
5133   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
5134 }
5135
5136 multiclass NeonI_Scalar_DUP_Elt_pattern<Instruction DUPI, ValueType ResTy,
5137   ValueType OpTy, Operand OpImm,
5138   ValueType OpNTy, ValueType ExTy, Operand OpNImm> {
5139
5140   def  : Pat<(ResTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)),
5141              (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>;
5142
5143   def : Pat<(ResTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)),
5144             (ResTy (DUPI
5145               (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
5146                 OpNImm:$Imm))>;
5147 }
5148
5149 // Patterns  for vector extract of FP data using scalar DUP instructions
5150 defm : NeonI_Scalar_DUP_Elt_pattern<DUPsv_S, f32,
5151   v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>;
5152 defm : NeonI_Scalar_DUP_Elt_pattern<DUPdv_D, f64,
5153   v2f64, neon_uimm1_bare, v1f64, v2f64, neon_uimm0_bare>;
5154
5155 multiclass NeonI_Scalar_DUP_alias<string asmop, string asmlane,
5156                                   Instruction DUPI, Operand OpImm,
5157                                   RegisterClass ResRC> {
5158   def : NeonInstAlias<!strconcat(asmop, "$Rd, $Rn." # asmlane # "[$Imm]"),
5159           (DUPI ResRC:$Rd, VPR128:$Rn, OpImm:$Imm), 0b0>;
5160 }
5161
5162 // Aliases for Scalar copy - DUP element (scalar)
5163 // FIXME: This is actually the preferred syntax but TableGen can't deal with
5164 // custom printing of aliases.
5165 defm : NeonI_Scalar_DUP_alias<"mov", ".b", DUPbv_B, neon_uimm4_bare, FPR8>;
5166 defm : NeonI_Scalar_DUP_alias<"mov", ".h", DUPhv_H, neon_uimm3_bare, FPR16>;
5167 defm : NeonI_Scalar_DUP_alias<"mov", ".s", DUPsv_S, neon_uimm2_bare, FPR32>;
5168 defm : NeonI_Scalar_DUP_alias<"mov", ".d", DUPdv_D, neon_uimm1_bare, FPR64>;
5169
5170
5171 //===----------------------------------------------------------------------===//
5172 // Non-Instruction Patterns
5173 //===----------------------------------------------------------------------===//
5174
5175 // 64-bit vector bitcasts...
5176
5177 def : Pat<(v1i64 (bitconvert (v8i8  VPR64:$src))), (v1i64 VPR64:$src)>;
5178 def : Pat<(v2f32 (bitconvert (v8i8  VPR64:$src))), (v2f32 VPR64:$src)>;
5179 def : Pat<(v2i32 (bitconvert (v8i8  VPR64:$src))), (v2i32 VPR64:$src)>;
5180 def : Pat<(v4i16 (bitconvert (v8i8  VPR64:$src))), (v4i16 VPR64:$src)>;
5181
5182 def : Pat<(v1i64 (bitconvert (v4i16  VPR64:$src))), (v1i64 VPR64:$src)>;
5183 def : Pat<(v2i32 (bitconvert (v4i16  VPR64:$src))), (v2i32 VPR64:$src)>;
5184 def : Pat<(v2f32 (bitconvert (v4i16  VPR64:$src))), (v2f32 VPR64:$src)>;
5185 def : Pat<(v8i8  (bitconvert (v4i16  VPR64:$src))), (v8i8 VPR64:$src)>;
5186
5187 def : Pat<(v1i64 (bitconvert (v2i32  VPR64:$src))), (v1i64 VPR64:$src)>;
5188 def : Pat<(v2f32 (bitconvert (v2i32  VPR64:$src))), (v2f32 VPR64:$src)>;
5189 def : Pat<(v4i16 (bitconvert (v2i32  VPR64:$src))), (v4i16 VPR64:$src)>;
5190 def : Pat<(v8i8  (bitconvert (v2i32  VPR64:$src))), (v8i8 VPR64:$src)>;
5191
5192 def : Pat<(v1i64 (bitconvert (v2f32  VPR64:$src))), (v1i64 VPR64:$src)>;
5193 def : Pat<(v2i32 (bitconvert (v2f32  VPR64:$src))), (v2i32 VPR64:$src)>;
5194 def : Pat<(v4i16 (bitconvert (v2f32  VPR64:$src))), (v4i16 VPR64:$src)>;
5195 def : Pat<(v8i8  (bitconvert (v2f32  VPR64:$src))), (v8i8 VPR64:$src)>;
5196
5197 def : Pat<(v2f32 (bitconvert (v1i64  VPR64:$src))), (v2f32 VPR64:$src)>;
5198 def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
5199 def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
5200 def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
5201
5202 // ..and 128-bit vector bitcasts...
5203
5204 def : Pat<(v2f64 (bitconvert (v16i8  VPR128:$src))), (v2f64 VPR128:$src)>;
5205 def : Pat<(v2i64 (bitconvert (v16i8  VPR128:$src))), (v2i64 VPR128:$src)>;
5206 def : Pat<(v4f32 (bitconvert (v16i8  VPR128:$src))), (v4f32 VPR128:$src)>;
5207 def : Pat<(v4i32 (bitconvert (v16i8  VPR128:$src))), (v4i32 VPR128:$src)>;
5208 def : Pat<(v8i16 (bitconvert (v16i8  VPR128:$src))), (v8i16 VPR128:$src)>;
5209
5210 def : Pat<(v2f64 (bitconvert (v8i16  VPR128:$src))), (v2f64 VPR128:$src)>;
5211 def : Pat<(v2i64 (bitconvert (v8i16  VPR128:$src))), (v2i64 VPR128:$src)>;
5212 def : Pat<(v4i32 (bitconvert (v8i16  VPR128:$src))), (v4i32 VPR128:$src)>;
5213 def : Pat<(v4f32 (bitconvert (v8i16  VPR128:$src))), (v4f32 VPR128:$src)>;
5214 def : Pat<(v16i8 (bitconvert (v8i16  VPR128:$src))), (v16i8 VPR128:$src)>;
5215
5216 def : Pat<(v2f64 (bitconvert (v4i32  VPR128:$src))), (v2f64 VPR128:$src)>;
5217 def : Pat<(v2i64 (bitconvert (v4i32  VPR128:$src))), (v2i64 VPR128:$src)>;
5218 def : Pat<(v4f32 (bitconvert (v4i32  VPR128:$src))), (v4f32 VPR128:$src)>;
5219 def : Pat<(v8i16 (bitconvert (v4i32  VPR128:$src))), (v8i16 VPR128:$src)>;
5220 def : Pat<(v16i8 (bitconvert (v4i32  VPR128:$src))), (v16i8 VPR128:$src)>;
5221
5222 def : Pat<(v2f64 (bitconvert (v4f32  VPR128:$src))), (v2f64 VPR128:$src)>;
5223 def : Pat<(v2i64 (bitconvert (v4f32  VPR128:$src))), (v2i64 VPR128:$src)>;
5224 def : Pat<(v4i32 (bitconvert (v4f32  VPR128:$src))), (v4i32 VPR128:$src)>;
5225 def : Pat<(v8i16 (bitconvert (v4f32  VPR128:$src))), (v8i16 VPR128:$src)>;
5226 def : Pat<(v16i8 (bitconvert (v4f32  VPR128:$src))), (v16i8 VPR128:$src)>;
5227
5228 def : Pat<(v2f64 (bitconvert (v2i64  VPR128:$src))), (v2f64 VPR128:$src)>;
5229 def : Pat<(v4f32 (bitconvert (v2i64  VPR128:$src))), (v4f32 VPR128:$src)>;
5230 def : Pat<(v4i32 (bitconvert (v2i64  VPR128:$src))), (v4i32 VPR128:$src)>;
5231 def : Pat<(v8i16 (bitconvert (v2i64  VPR128:$src))), (v8i16 VPR128:$src)>;
5232 def : Pat<(v16i8 (bitconvert (v2i64  VPR128:$src))), (v16i8 VPR128:$src)>;
5233
5234 def : Pat<(v2i64 (bitconvert (v2f64  VPR128:$src))), (v2i64 VPR128:$src)>;
5235 def : Pat<(v4f32 (bitconvert (v2f64  VPR128:$src))), (v4f32 VPR128:$src)>;
5236 def : Pat<(v4i32 (bitconvert (v2f64  VPR128:$src))), (v4i32 VPR128:$src)>;
5237 def : Pat<(v8i16 (bitconvert (v2f64  VPR128:$src))), (v8i16 VPR128:$src)>;
5238 def : Pat<(v16i8 (bitconvert (v2f64  VPR128:$src))), (v16i8 VPR128:$src)>;
5239
5240
5241 // ...and scalar bitcasts...
5242 def : Pat<(f16 (bitconvert (v1i16  FPR16:$src))), (f16 FPR16:$src)>;
5243 def : Pat<(f32 (bitconvert (v1i32  FPR32:$src))), (f32 FPR32:$src)>;
5244 def : Pat<(f64 (bitconvert (v1i64  FPR64:$src))), (f64 FPR64:$src)>;
5245 def : Pat<(f32 (bitconvert (v1f32  FPR32:$src))), (f32 FPR32:$src)>;
5246 def : Pat<(f64 (bitconvert (v1f64  FPR64:$src))), (f64 FPR64:$src)>;
5247
5248 def : Pat<(i64 (bitconvert (v1i64  FPR64:$src))), (FMOVxd $src)>;
5249 def : Pat<(i32 (bitconvert (v1i32  FPR32:$src))), (FMOVws $src)>;
5250
5251 def : Pat<(v8i8  (bitconvert (v1i64  VPR64:$src))), (v8i8 VPR64:$src)>;
5252 def : Pat<(v4i16 (bitconvert (v1i64  VPR64:$src))), (v4i16 VPR64:$src)>;
5253 def : Pat<(v2i32 (bitconvert (v1i64  VPR64:$src))), (v2i32 VPR64:$src)>;
5254
5255 def : Pat<(f64   (bitconvert (v8i8  VPR64:$src))), (f64 VPR64:$src)>;
5256 def : Pat<(f64   (bitconvert (v4i16  VPR64:$src))), (f64 VPR64:$src)>;
5257 def : Pat<(f64   (bitconvert (v2i32  VPR64:$src))), (f64 VPR64:$src)>;
5258 def : Pat<(f64   (bitconvert (v2f32  VPR64:$src))), (f64 VPR64:$src)>;
5259 def : Pat<(f64   (bitconvert (v1i64  VPR64:$src))), (f64 VPR64:$src)>;
5260
5261 def : Pat<(f128  (bitconvert (v16i8  VPR128:$src))), (f128 VPR128:$src)>;
5262 def : Pat<(f128  (bitconvert (v8i16  VPR128:$src))), (f128 VPR128:$src)>;
5263 def : Pat<(f128  (bitconvert (v4i32  VPR128:$src))), (f128 VPR128:$src)>;
5264 def : Pat<(f128  (bitconvert (v2i64  VPR128:$src))), (f128 VPR128:$src)>;
5265 def : Pat<(f128  (bitconvert (v4f32  VPR128:$src))), (f128 VPR128:$src)>;
5266 def : Pat<(f128  (bitconvert (v2f64  VPR128:$src))), (f128 VPR128:$src)>;
5267
5268 def : Pat<(v1i16 (bitconvert (f16  FPR16:$src))), (v1i16 FPR16:$src)>;
5269 def : Pat<(v1i32 (bitconvert (f32  FPR32:$src))), (v1i32 FPR32:$src)>;
5270 def : Pat<(v1i64 (bitconvert (f64  FPR64:$src))), (v1i64 FPR64:$src)>;
5271 def : Pat<(v1f32 (bitconvert (f32  FPR32:$src))), (v1f32 FPR32:$src)>;
5272 def : Pat<(v1f64 (bitconvert (f64  FPR64:$src))), (v1f64 FPR64:$src)>;
5273
5274 def : Pat<(v1i64 (bitconvert (i64  GPR64:$src))), (FMOVdx $src)>;
5275 def : Pat<(v1i32 (bitconvert (i32  GPR32:$src))), (FMOVsw $src)>;
5276
5277 def : Pat<(v8i8   (bitconvert (f64   FPR64:$src))), (v8i8 FPR64:$src)>;
5278 def : Pat<(v4i16  (bitconvert (f64   FPR64:$src))), (v4i16 FPR64:$src)>;
5279 def : Pat<(v2i32  (bitconvert (f64   FPR64:$src))), (v2i32 FPR64:$src)>;
5280 def : Pat<(v2f32  (bitconvert (f64   FPR64:$src))), (v2f32 FPR64:$src)>;
5281 def : Pat<(v1i64  (bitconvert (f64   FPR64:$src))), (v1i64 FPR64:$src)>;
5282
5283 def : Pat<(v16i8  (bitconvert (f128   FPR128:$src))), (v16i8 FPR128:$src)>;
5284 def : Pat<(v8i16  (bitconvert (f128   FPR128:$src))), (v8i16 FPR128:$src)>;
5285 def : Pat<(v4i32  (bitconvert (f128   FPR128:$src))), (v4i32 FPR128:$src)>;
5286 def : Pat<(v2i64  (bitconvert (f128   FPR128:$src))), (v2i64 FPR128:$src)>;
5287 def : Pat<(v4f32  (bitconvert (f128   FPR128:$src))), (v4f32 FPR128:$src)>;
5288 def : Pat<(v2f64  (bitconvert (f128   FPR128:$src))), (v2f64 FPR128:$src)>;
5289
5290 def neon_uimm3 : Operand<i64>,
5291                    ImmLeaf<i64, [{return Imm < 8;}]> {
5292   let ParserMatchClass = uimm3_asmoperand;
5293   let PrintMethod = "printUImmHexOperand";
5294 }
5295
5296 def neon_uimm4 : Operand<i64>,
5297                    ImmLeaf<i64, [{return Imm < 16;}]> {
5298   let ParserMatchClass = uimm4_asmoperand;
5299   let PrintMethod = "printUImmHexOperand";
5300 }
5301
5302 // Bitwise Extract
5303 class NeonI_Extract<bit q, bits<2> op2, string asmop,
5304                     string OpS, RegisterOperand OpVPR, Operand OpImm>
5305   : NeonI_BitExtract<q, op2, (outs OpVPR:$Rd),
5306                      (ins OpVPR:$Rn, OpVPR:$Rm, OpImm:$Index),
5307                      asmop # "\t$Rd." # OpS # ", $Rn." # OpS # 
5308                      ", $Rm." # OpS # ", $Index",
5309                      [],
5310                      NoItinerary>{
5311   bits<4> Index;
5312 }
5313
5314 def EXTvvvi_8b : NeonI_Extract<0b0, 0b00, "ext", "8b",
5315                                VPR64, neon_uimm3> {
5316   let Inst{14-11} = {0b0, Index{2}, Index{1}, Index{0}};
5317 }
5318
5319 def EXTvvvi_16b: NeonI_Extract<0b1, 0b00, "ext", "16b",
5320                                VPR128, neon_uimm4> {
5321   let Inst{14-11} = Index;
5322 }
5323
5324 class NI_Extract<ValueType OpTy, RegisterOperand OpVPR, Instruction INST,
5325                  Operand OpImm> 
5326   : Pat<(OpTy (Neon_vextract (OpTy OpVPR:$Rn), (OpTy OpVPR:$Rm),
5327                                  (i64 OpImm:$Imm))),
5328               (INST OpVPR:$Rn, OpVPR:$Rm, OpImm:$Imm)>;
5329
5330 def : NI_Extract<v8i8,  VPR64,  EXTvvvi_8b,  neon_uimm3>;
5331 def : NI_Extract<v4i16, VPR64,  EXTvvvi_8b,  neon_uimm3>;
5332 def : NI_Extract<v2i32, VPR64,  EXTvvvi_8b,  neon_uimm3>;
5333 def : NI_Extract<v1i64, VPR64,  EXTvvvi_8b,  neon_uimm3>;
5334 def : NI_Extract<v2f32, VPR64,  EXTvvvi_8b,  neon_uimm3>;
5335 def : NI_Extract<v1f64, VPR64,  EXTvvvi_8b,  neon_uimm3>;
5336 def : NI_Extract<v16i8, VPR128, EXTvvvi_16b, neon_uimm4>;
5337 def : NI_Extract<v8i16, VPR128, EXTvvvi_16b, neon_uimm4>;
5338 def : NI_Extract<v4i32, VPR128, EXTvvvi_16b, neon_uimm4>;
5339 def : NI_Extract<v2i64, VPR128, EXTvvvi_16b, neon_uimm4>;
5340 def : NI_Extract<v4f32, VPR128, EXTvvvi_16b, neon_uimm4>;
5341 def : NI_Extract<v2f64, VPR128, EXTvvvi_16b, neon_uimm4>;
5342
5343 // Table lookup
5344 class NI_TBL<bit q, bits<2> op2, bits<2> len, bit op,
5345              string asmop, string OpS, RegisterOperand OpVPR,
5346              RegisterOperand VecList>
5347   : NeonI_TBL<q, op2, len, op,
5348               (outs OpVPR:$Rd), (ins VecList:$Rn, OpVPR:$Rm),
5349               asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
5350               [],
5351               NoItinerary>;
5352
5353 // The vectors in look up table are always 16b
5354 multiclass NI_TBL_pat<bits<2> len, bit op, string asmop, string List> {
5355   def _8b  : NI_TBL<0, 0b00, len, op, asmop, "8b", VPR64,
5356                     !cast<RegisterOperand>(List # "16B_operand")>;
5357
5358   def _16b : NI_TBL<1, 0b00, len, op, asmop, "16b", VPR128,
5359                     !cast<RegisterOperand>(List # "16B_operand")>;
5360 }
5361
5362 defm TBL1 : NI_TBL_pat<0b00, 0b0, "tbl", "VOne">;
5363 defm TBL2 : NI_TBL_pat<0b01, 0b0, "tbl", "VPair">;
5364 defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">;
5365 defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">;
5366
5367 // Table lookup extention
5368 class NI_TBX<bit q, bits<2> op2, bits<2> len, bit op,
5369              string asmop, string OpS, RegisterOperand OpVPR,
5370              RegisterOperand VecList>
5371   : NeonI_TBL<q, op2, len, op,
5372               (outs OpVPR:$Rd), (ins OpVPR:$src, VecList:$Rn, OpVPR:$Rm),
5373               asmop # "\t$Rd." # OpS # ", $Rn, $Rm." # OpS,
5374               [],
5375               NoItinerary> {
5376   let Constraints = "$src = $Rd";
5377 }
5378
5379 // The vectors in look up table are always 16b
5380 multiclass NI_TBX_pat<bits<2> len, bit op, string asmop, string List> {
5381   def _8b  : NI_TBX<0, 0b00, len, op, asmop, "8b", VPR64,
5382                     !cast<RegisterOperand>(List # "16B_operand")>;
5383
5384   def _16b : NI_TBX<1, 0b00, len, op, asmop, "16b", VPR128,
5385                     !cast<RegisterOperand>(List # "16B_operand")>;
5386 }
5387
5388 defm TBX1 : NI_TBX_pat<0b00, 0b1, "tbx", "VOne">;
5389 defm TBX2 : NI_TBX_pat<0b01, 0b1, "tbx", "VPair">;
5390 defm TBX3 : NI_TBX_pat<0b10, 0b1, "tbx", "VTriple">;
5391 defm TBX4 : NI_TBX_pat<0b11, 0b1, "tbx", "VQuad">;
5392
5393 // The followings are for instruction class (3V Elem)
5394
5395 // Variant 1
5396
5397 class NI_2VE<bit q, bit u, bits<2> size, bits<4> opcode,
5398              string asmop, string ResS, string OpS, string EleOpS,
5399              Operand OpImm, RegisterOperand ResVPR,
5400              RegisterOperand OpVPR, RegisterOperand EleOpVPR>
5401   : NeonI_2VElem<q, u, size, opcode, 
5402                  (outs ResVPR:$Rd), (ins ResVPR:$src, OpVPR:$Rn,
5403                                          EleOpVPR:$Re, OpImm:$Index),
5404                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
5405                  ", $Re." # EleOpS # "[$Index]",
5406                  [],
5407                  NoItinerary> {
5408   bits<3> Index;
5409   bits<5> Re;
5410
5411   let Constraints = "$src = $Rd";
5412 }
5413
5414 multiclass NI_2VE_v1<bit u, bits<4> opcode, string asmop> {
5415   // vector register class for element is always 128-bit to cover the max index
5416   def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
5417                      neon_uimm2_bare, VPR64, VPR64, VPR128> {
5418     let Inst{11} = {Index{1}};
5419     let Inst{21} = {Index{0}};
5420     let Inst{20-16} = Re;
5421   }
5422
5423   def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
5424                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
5425     let Inst{11} = {Index{1}};
5426     let Inst{21} = {Index{0}};
5427     let Inst{20-16} = Re;
5428   }
5429
5430   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
5431   def _4h8h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
5432                      neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
5433     let Inst{11} = {Index{2}};
5434     let Inst{21} = {Index{1}};
5435     let Inst{20} = {Index{0}};
5436     let Inst{19-16} = Re{3-0};
5437   }
5438
5439   def _8h8h : NI_2VE<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
5440                      neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
5441     let Inst{11} = {Index{2}};
5442     let Inst{21} = {Index{1}};
5443     let Inst{20} = {Index{0}};
5444     let Inst{19-16} = Re{3-0};
5445   }
5446 }
5447
5448 defm MLAvve : NI_2VE_v1<0b1, 0b0000, "mla">;
5449 defm MLSvve : NI_2VE_v1<0b1, 0b0100, "mls">;
5450
5451 // Pattern for lane in 128-bit vector
5452 class NI_2VE_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
5453                    RegisterOperand ResVPR, RegisterOperand OpVPR,
5454                    RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
5455                    ValueType EleOpTy, SDPatternOperator coreop>
5456   : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
5457           (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
5458         (INST ResVPR:$src, OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
5459
5460 // Pattern for lane in 64-bit vector
5461 class NI_2VE_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
5462                   RegisterOperand ResVPR, RegisterOperand OpVPR,
5463                   RegisterOperand EleOpVPR, ValueType ResTy, ValueType OpTy,
5464                   ValueType EleOpTy, SDPatternOperator coreop>
5465   : Pat<(ResTy (op (ResTy ResVPR:$src), (OpTy OpVPR:$Rn),
5466           (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
5467         (INST ResVPR:$src, OpVPR:$Rn, 
5468           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
5469
5470 multiclass NI_2VE_v1_pat<string subop, SDPatternOperator op>
5471 {
5472   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
5473                      op, VPR64, VPR64, VPR128, v2i32, v2i32, v4i32,
5474                      BinOpFrag<(Neon_vduplane
5475                                  (Neon_low4S node:$LHS), node:$RHS)>>;
5476
5477   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
5478                      op, VPR128, VPR128, VPR128, v4i32, v4i32, v4i32,
5479                      BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5480
5481   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
5482                      op, VPR64, VPR64, VPR128Lo, v4i16, v4i16, v8i16,
5483                      BinOpFrag<(Neon_vduplane
5484                                  (Neon_low8H node:$LHS), node:$RHS)>>;
5485
5486   def : NI_2VE_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
5487                      op, VPR128, VPR128, VPR128Lo, v8i16, v8i16, v8i16,
5488                      BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5489
5490   // Index can only be half of the max value for lane in 64-bit vector
5491
5492   def : NI_2VE_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
5493                     op, VPR64, VPR64, VPR64, v2i32, v2i32, v2i32,
5494                     BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5495
5496   def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
5497                     op, VPR128, VPR128, VPR64, v4i32, v4i32, v2i32,
5498                     BinOpFrag<(Neon_vduplane
5499                                 (Neon_combine_4S node:$LHS, undef),
5500                                  node:$RHS)>>;
5501
5502   def : NI_2VE_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
5503                     op, VPR64, VPR64, VPR64Lo, v4i16, v4i16, v4i16,
5504                     BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5505
5506   def : NI_2VE_lane<!cast<Instruction>(subop # "_8h8h"), neon_uimm2_bare,
5507                     op, VPR128, VPR128, VPR64Lo, v8i16, v8i16, v4i16,
5508                     BinOpFrag<(Neon_vduplane
5509                                 (Neon_combine_8H node:$LHS, undef),
5510                                 node:$RHS)>>;
5511 }
5512
5513 defm MLA_lane_v1 : NI_2VE_v1_pat<"MLAvve", Neon_mla>;
5514 defm MLS_lane_v1 : NI_2VE_v1_pat<"MLSvve", Neon_mls>;
5515
5516 class NI_2VE_2op<bit q, bit u, bits<2> size, bits<4> opcode,
5517                  string asmop, string ResS, string OpS, string EleOpS,
5518                  Operand OpImm, RegisterOperand ResVPR,
5519                  RegisterOperand OpVPR, RegisterOperand EleOpVPR>
5520   : NeonI_2VElem<q, u, size, opcode, 
5521                  (outs ResVPR:$Rd), (ins OpVPR:$Rn,
5522                                          EleOpVPR:$Re, OpImm:$Index),
5523                  asmop # "\t$Rd." # ResS # ", $Rn." # OpS #
5524                  ", $Re." # EleOpS # "[$Index]",
5525                  [],
5526                  NoItinerary> {
5527   bits<3> Index;
5528   bits<5> Re;
5529 }
5530
5531 multiclass NI_2VE_v1_2op<bit u, bits<4> opcode, string asmop> {
5532   // vector register class for element is always 128-bit to cover the max index
5533   def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
5534                          neon_uimm2_bare, VPR64, VPR64, VPR128> {
5535     let Inst{11} = {Index{1}};
5536     let Inst{21} = {Index{0}};
5537     let Inst{20-16} = Re;
5538   }
5539
5540   def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
5541                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
5542     let Inst{11} = {Index{1}};
5543     let Inst{21} = {Index{0}};
5544     let Inst{20-16} = Re;
5545   }
5546
5547   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
5548   def _4h8h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4h", "4h", "h",
5549                          neon_uimm3_bare, VPR64, VPR64, VPR128Lo> {
5550     let Inst{11} = {Index{2}};
5551     let Inst{21} = {Index{1}};
5552     let Inst{20} = {Index{0}};
5553     let Inst{19-16} = Re{3-0};
5554   }
5555
5556   def _8h8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop, "8h", "8h", "h",
5557                          neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
5558     let Inst{11} = {Index{2}};
5559     let Inst{21} = {Index{1}};
5560     let Inst{20} = {Index{0}};
5561     let Inst{19-16} = Re{3-0};
5562   }
5563 }
5564
5565 defm MULve : NI_2VE_v1_2op<0b0, 0b1000, "mul">;
5566 defm SQDMULHve : NI_2VE_v1_2op<0b0, 0b1100, "sqdmulh">;
5567 defm SQRDMULHve : NI_2VE_v1_2op<0b0, 0b1101, "sqrdmulh">;
5568
5569 // Pattern for lane in 128-bit vector
5570 class NI_2VE_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
5571                        RegisterOperand OpVPR, RegisterOperand EleOpVPR,
5572                        ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
5573                        SDPatternOperator coreop>
5574   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
5575           (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
5576         (INST OpVPR:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
5577
5578 // Pattern for lane in 64-bit vector
5579 class NI_2VE_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
5580                       RegisterOperand OpVPR, RegisterOperand EleOpVPR,
5581                       ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
5582                       SDPatternOperator coreop>
5583   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
5584           (OpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
5585         (INST OpVPR:$Rn, 
5586           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
5587
5588 multiclass NI_2VE_mul_v1_pat<string subop, SDPatternOperator op> {
5589   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
5590                          op, VPR64, VPR128, v2i32, v2i32, v4i32,
5591                          BinOpFrag<(Neon_vduplane
5592                                      (Neon_low4S node:$LHS), node:$RHS)>>;
5593
5594   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
5595                          op, VPR128, VPR128, v4i32, v4i32, v4i32,
5596                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5597
5598   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4h8h"), neon_uimm3_bare,
5599                          op, VPR64, VPR128Lo, v4i16, v4i16, v8i16,
5600                          BinOpFrag<(Neon_vduplane
5601                                     (Neon_low8H node:$LHS), node:$RHS)>>;
5602
5603   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_8h8h"), neon_uimm3_bare,
5604                          op, VPR128, VPR128Lo, v8i16, v8i16, v8i16,
5605                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5606
5607   // Index can only be half of the max value for lane in 64-bit vector
5608
5609   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
5610                         op, VPR64, VPR64, v2i32, v2i32, v2i32,
5611                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5612
5613   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
5614                         op, VPR128, VPR64, v4i32, v4i32, v2i32,
5615                         BinOpFrag<(Neon_vduplane
5616                                     (Neon_combine_4S node:$LHS, undef),
5617                                      node:$RHS)>>;
5618
5619   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4h8h"), neon_uimm2_bare,
5620                         op, VPR64, VPR64Lo, v4i16, v4i16, v4i16,
5621                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5622
5623   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_8h8h"), neon_uimm2_bare,
5624                         op, VPR128, VPR64Lo, v8i16, v8i16, v4i16,
5625                         BinOpFrag<(Neon_vduplane
5626                                     (Neon_combine_8H node:$LHS, undef),
5627                                     node:$RHS)>>;
5628 }
5629
5630 defm MUL_lane_v1 : NI_2VE_mul_v1_pat<"MULve", mul>;
5631 defm SQDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQDMULHve", int_arm_neon_vqdmulh>;
5632 defm SQRDMULH_lane_v1 : NI_2VE_mul_v1_pat<"SQRDMULHve", int_arm_neon_vqrdmulh>;
5633
5634 // Variant 2
5635
5636 multiclass NI_2VE_v2_2op<bit u, bits<4> opcode, string asmop> {
5637   // vector register class for element is always 128-bit to cover the max index
5638   def _2s4s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
5639                          neon_uimm2_bare, VPR64, VPR64, VPR128> {
5640     let Inst{11} = {Index{1}};
5641     let Inst{21} = {Index{0}};
5642     let Inst{20-16} = Re;
5643   }
5644
5645   def _4s4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
5646                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
5647     let Inst{11} = {Index{1}};
5648     let Inst{21} = {Index{0}};
5649     let Inst{20-16} = Re;
5650   }
5651
5652   // _1d2d doesn't exist!
5653
5654   def _2d2d : NI_2VE_2op<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
5655                          neon_uimm1_bare, VPR128, VPR128, VPR128> {
5656     let Inst{11} = {Index{0}};
5657     let Inst{21} = 0b0;
5658     let Inst{20-16} = Re;
5659   }
5660 }
5661
5662 defm FMULve : NI_2VE_v2_2op<0b0, 0b1001, "fmul">;
5663 defm FMULXve : NI_2VE_v2_2op<0b1, 0b1001, "fmulx">;
5664
5665 class NI_2VE_mul_lane_2d<Instruction INST, Operand OpImm, SDPatternOperator op,
5666                          RegisterOperand OpVPR, RegisterOperand EleOpVPR,
5667                          ValueType ResTy, ValueType OpTy, ValueType EleOpTy,
5668                          SDPatternOperator coreop>
5669   : Pat<(ResTy (op (OpTy OpVPR:$Rn),
5670           (OpTy (coreop (EleOpTy EleOpVPR:$Re), (EleOpTy EleOpVPR:$Re))))),
5671         (INST OpVPR:$Rn, 
5672           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), 0)>;
5673
5674 multiclass NI_2VE_mul_v2_pat<string subop, SDPatternOperator op> {
5675   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2s4s"), neon_uimm2_bare,
5676                          op, VPR64, VPR128, v2f32, v2f32, v4f32,
5677                          BinOpFrag<(Neon_vduplane
5678                                      (Neon_low4f node:$LHS), node:$RHS)>>;
5679
5680   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4s"), neon_uimm2_bare,
5681                          op, VPR128, VPR128, v4f32, v4f32, v4f32,
5682                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5683
5684   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
5685                          op, VPR128, VPR128, v2f64, v2f64, v2f64,
5686                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5687
5688   // Index can only be half of the max value for lane in 64-bit vector
5689
5690   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2s4s"), neon_uimm1_bare,
5691                         op, VPR64, VPR64, v2f32, v2f32, v2f32,
5692                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5693
5694   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4s"), neon_uimm1_bare,
5695                         op, VPR128, VPR64, v4f32, v4f32, v2f32,
5696                         BinOpFrag<(Neon_vduplane
5697                                     (Neon_combine_4f node:$LHS, undef),
5698                                     node:$RHS)>>;
5699
5700   def : NI_2VE_mul_lane_2d<!cast<Instruction>(subop # "_2d2d"), neon_uimm1_bare,
5701                            op, VPR128, VPR64, v2f64, v2f64, v1f64,
5702                            BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
5703 }
5704
5705 defm FMUL_lane_v2 : NI_2VE_mul_v2_pat<"FMULve", fmul>;
5706 defm FMULX_lane_v2 : NI_2VE_mul_v2_pat<"FMULXve", int_aarch64_neon_vmulx>;
5707
5708 // The followings are patterns using fma
5709 // -ffp-contract=fast generates fma
5710
5711 multiclass NI_2VE_v2<bit u, bits<4> opcode, string asmop> {
5712   // vector register class for element is always 128-bit to cover the max index
5713   def _2s4s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2s", "2s", "s",
5714                      neon_uimm2_bare, VPR64, VPR64, VPR128> {
5715     let Inst{11} = {Index{1}};
5716     let Inst{21} = {Index{0}};
5717     let Inst{20-16} = Re;
5718   }
5719
5720   def _4s4s : NI_2VE<0b1, u, 0b10, opcode, asmop, "4s", "4s", "s",
5721                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
5722     let Inst{11} = {Index{1}};
5723     let Inst{21} = {Index{0}};
5724     let Inst{20-16} = Re;
5725   }
5726
5727   // _1d2d doesn't exist!
5728   
5729   def _2d2d : NI_2VE<0b1, u, 0b11, opcode, asmop, "2d", "2d", "d",
5730                      neon_uimm1_bare, VPR128, VPR128, VPR128> {
5731     let Inst{11} = {Index{0}};
5732     let Inst{21} = 0b0;
5733     let Inst{20-16} = Re;
5734   }
5735 }
5736
5737 defm FMLAvve : NI_2VE_v2<0b0, 0b0001, "fmla">;
5738 defm FMLSvve : NI_2VE_v2<0b0, 0b0101, "fmls">;
5739
5740 // Pattern for lane in 128-bit vector
5741 class NI_2VEswap_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
5742                        RegisterOperand ResVPR, RegisterOperand OpVPR,
5743                        ValueType ResTy, ValueType OpTy,
5744                        SDPatternOperator coreop>
5745   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
5746                    (ResTy ResVPR:$src), (ResTy ResVPR:$Rn))),
5747         (INST ResVPR:$src, ResVPR:$Rn, OpVPR:$Re, OpImm:$Index)>;
5748
5749 // Pattern for lane in 64-bit vector
5750 class NI_2VEswap_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
5751                       RegisterOperand ResVPR, RegisterOperand OpVPR,
5752                       ValueType ResTy, ValueType OpTy,
5753                       SDPatternOperator coreop>
5754   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (i64 OpImm:$Index))),
5755                    (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
5756         (INST ResVPR:$src, ResVPR:$Rn, 
5757           (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), OpImm:$Index)>;
5758
5759 // Pattern for lane in 64-bit vector
5760 class NI_2VEswap_lane_2d2d<Instruction INST, Operand OpImm,
5761                            SDPatternOperator op,
5762                            RegisterOperand ResVPR, RegisterOperand OpVPR,
5763                            ValueType ResTy, ValueType OpTy,
5764                            SDPatternOperator coreop>
5765   : Pat<(ResTy (op (ResTy (coreop (OpTy OpVPR:$Re), (OpTy OpVPR:$Re))),
5766                    (ResTy ResVPR:$Rn), (ResTy ResVPR:$src))),
5767         (INST ResVPR:$src, ResVPR:$Rn, 
5768           (SUBREG_TO_REG (i64 0), OpVPR:$Re, sub_64), 0)>;
5769
5770
5771 multiclass NI_2VE_fma_v2_pat<string subop, SDPatternOperator op> {
5772   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
5773                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
5774                          BinOpFrag<(Neon_vduplane
5775                                      (Neon_low4f node:$LHS), node:$RHS)>>;
5776
5777   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
5778                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
5779                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5780
5781   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
5782                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
5783                          BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5784
5785   // Index can only be half of the max value for lane in 64-bit vector
5786
5787   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
5788                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
5789                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
5790
5791   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
5792                         neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
5793                         BinOpFrag<(Neon_vduplane
5794                                     (Neon_combine_4f node:$LHS, undef),
5795                                     node:$RHS)>>;
5796
5797   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
5798                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
5799                              BinOpFrag<(Neon_combine_2d node:$LHS, node:$RHS)>>;
5800 }
5801
5802 defm FMLA_lane_v2_s : NI_2VE_fma_v2_pat<"FMLAvve", fma>;
5803
5804 multiclass NI_2VE_fms_v2_pat<string subop, SDPatternOperator op>
5805 {
5806   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
5807                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
5808                          BinOpFrag<(fneg (Neon_vduplane
5809                                      (Neon_low4f node:$LHS), node:$RHS))>>;
5810
5811   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2s4s"),
5812                          neon_uimm2_bare, op, VPR64, VPR128, v2f32, v4f32,
5813                          BinOpFrag<(Neon_vduplane
5814                                      (Neon_low4f (fneg node:$LHS)),
5815                                      node:$RHS)>>;
5816
5817   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
5818                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
5819                          BinOpFrag<(fneg (Neon_vduplane
5820                                      node:$LHS, node:$RHS))>>;
5821
5822   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_4s4s"),
5823                          neon_uimm2_bare, op, VPR128, VPR128, v4f32, v4f32,
5824                          BinOpFrag<(Neon_vduplane
5825                                      (fneg node:$LHS), node:$RHS)>>;
5826
5827   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
5828                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
5829                          BinOpFrag<(fneg (Neon_vduplane
5830                                      node:$LHS, node:$RHS))>>;
5831
5832   def : NI_2VEswap_laneq<!cast<Instruction>(subop # "_2d2d"),
5833                          neon_uimm1_bare, op, VPR128, VPR128, v2f64, v2f64,
5834                          BinOpFrag<(Neon_vduplane
5835                                      (fneg node:$LHS), node:$RHS)>>;
5836
5837   // Index can only be half of the max value for lane in 64-bit vector
5838
5839   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
5840                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
5841                         BinOpFrag<(fneg (Neon_vduplane
5842                                     node:$LHS, node:$RHS))>>;
5843
5844   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_2s4s"),
5845                         neon_uimm1_bare, op, VPR64, VPR64, v2f32, v2f32,
5846                         BinOpFrag<(Neon_vduplane
5847                                     (fneg node:$LHS), node:$RHS)>>;
5848
5849   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
5850                         neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
5851                         BinOpFrag<(fneg (Neon_vduplane
5852                                     (Neon_combine_4f node:$LHS, undef),
5853                                     node:$RHS))>>;
5854
5855   def : NI_2VEswap_lane<!cast<Instruction>(subop # "_4s4s"),
5856                         neon_uimm1_bare, op, VPR128, VPR64, v4f32, v2f32,
5857                         BinOpFrag<(Neon_vduplane
5858                                     (Neon_combine_4f (fneg node:$LHS), undef),
5859                                     node:$RHS)>>;
5860
5861   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
5862                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
5863                              BinOpFrag<(fneg (Neon_combine_2d
5864                                          node:$LHS, node:$RHS))>>;
5865
5866   def : NI_2VEswap_lane_2d2d<!cast<Instruction>(subop # "_2d2d"),
5867                              neon_uimm1_bare, op, VPR128, VPR64, v2f64, v1f64,
5868                              BinOpFrag<(Neon_combine_2d
5869                                          (fneg node:$LHS), (fneg node:$RHS))>>;
5870 }
5871
5872 defm FMLS_lane_v2_s : NI_2VE_fms_v2_pat<"FMLSvve", fma>;
5873
5874 // Variant 3: Long type
5875 // E.g. SMLAL : 4S/4H/H (v0-v15), 2D/2S/S
5876 //      SMLAL2: 4S/8H/H (v0-v15), 2D/4S/S
5877
5878 multiclass NI_2VE_v3<bit u, bits<4> opcode, string asmop> {
5879   // vector register class for element is always 128-bit to cover the max index
5880   def _2d2s : NI_2VE<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
5881                      neon_uimm2_bare, VPR128, VPR64, VPR128> {
5882     let Inst{11} = {Index{1}};
5883     let Inst{21} = {Index{0}};
5884     let Inst{20-16} = Re;
5885   }
5886   
5887   def _2d4s : NI_2VE<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
5888                      neon_uimm2_bare, VPR128, VPR128, VPR128> {
5889     let Inst{11} = {Index{1}};
5890     let Inst{21} = {Index{0}};
5891     let Inst{20-16} = Re;
5892   }
5893
5894   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
5895   def _4s8h : NI_2VE<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
5896                      neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
5897     let Inst{11} = {Index{2}};
5898     let Inst{21} = {Index{1}};
5899     let Inst{20} = {Index{0}};
5900     let Inst{19-16} = Re{3-0};
5901   }
5902   
5903   def _4s4h : NI_2VE<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
5904                      neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
5905     let Inst{11} = {Index{2}};
5906     let Inst{21} = {Index{1}};
5907     let Inst{20} = {Index{0}};
5908     let Inst{19-16} = Re{3-0};
5909   }
5910 }
5911
5912 defm SMLALvve : NI_2VE_v3<0b0, 0b0010, "smlal">;
5913 defm UMLALvve : NI_2VE_v3<0b1, 0b0010, "umlal">;
5914 defm SMLSLvve : NI_2VE_v3<0b0, 0b0110, "smlsl">;
5915 defm UMLSLvve : NI_2VE_v3<0b1, 0b0110, "umlsl">;
5916 defm SQDMLALvve : NI_2VE_v3<0b0, 0b0011, "sqdmlal">;
5917 defm SQDMLSLvve : NI_2VE_v3<0b0, 0b0111, "sqdmlsl">;
5918
5919 multiclass NI_2VE_v3_2op<bit u, bits<4> opcode, string asmop> {
5920   // vector register class for element is always 128-bit to cover the max index
5921   def _2d2s : NI_2VE_2op<0b0, u, 0b10, opcode, asmop, "2d", "2s", "s",
5922                          neon_uimm2_bare, VPR128, VPR64, VPR128> {
5923     let Inst{11} = {Index{1}};
5924     let Inst{21} = {Index{0}};
5925     let Inst{20-16} = Re;
5926   }
5927   
5928   def _2d4s : NI_2VE_2op<0b1, u, 0b10, opcode, asmop # "2", "2d", "4s", "s",
5929                          neon_uimm2_bare, VPR128, VPR128, VPR128> {
5930     let Inst{11} = {Index{1}};
5931     let Inst{21} = {Index{0}};
5932     let Inst{20-16} = Re;
5933   }
5934
5935   // Index operations on 16-bit(H) elements are restricted to using v0-v15.
5936   def _4s8h : NI_2VE_2op<0b1, u, 0b01, opcode, asmop # "2", "4s", "8h", "h",
5937                          neon_uimm3_bare, VPR128, VPR128, VPR128Lo> {
5938     let Inst{11} = {Index{2}};
5939     let Inst{21} = {Index{1}};
5940     let Inst{20} = {Index{0}};
5941     let Inst{19-16} = Re{3-0};
5942   }
5943   
5944   def _4s4h : NI_2VE_2op<0b0, u, 0b01, opcode, asmop, "4s", "4h", "h",
5945                          neon_uimm3_bare, VPR128, VPR64, VPR128Lo> {
5946     let Inst{11} = {Index{2}};
5947     let Inst{21} = {Index{1}};
5948     let Inst{20} = {Index{0}};
5949     let Inst{19-16} = Re{3-0};
5950   }
5951 }
5952
5953 defm SMULLve : NI_2VE_v3_2op<0b0, 0b1010, "smull">;
5954 defm UMULLve : NI_2VE_v3_2op<0b1, 0b1010, "umull">;
5955 defm SQDMULLve : NI_2VE_v3_2op<0b0, 0b1011, "sqdmull">;
5956
5957 // Pattern for lane in 128-bit vector
5958 class NI_2VEL2_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
5959                      RegisterOperand EleOpVPR, ValueType ResTy,
5960                      ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
5961                      SDPatternOperator hiop, SDPatternOperator coreop>
5962   : Pat<(ResTy (op (ResTy VPR128:$src),
5963           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
5964           (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
5965         (INST VPR128:$src, VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
5966
5967 // Pattern for lane in 64-bit vector
5968 class NI_2VEL2_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
5969                     RegisterOperand EleOpVPR, ValueType ResTy,
5970                     ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
5971                     SDPatternOperator hiop, SDPatternOperator coreop>
5972   : Pat<(ResTy (op (ResTy VPR128:$src),
5973           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
5974           (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
5975         (INST VPR128:$src, VPR128:$Rn, 
5976           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
5977
5978 multiclass NI_2VEL_v3_pat<string subop, SDPatternOperator op> {
5979   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
5980                      op, VPR128, VPR64, VPR128Lo, v4i32, v4i16, v8i16,
5981                      BinOpFrag<(Neon_vduplane
5982                                  (Neon_low8H node:$LHS), node:$RHS)>>;
5983   
5984   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
5985                      op, VPR128, VPR64, VPR128, v2i64, v2i32, v4i32,
5986                      BinOpFrag<(Neon_vduplane
5987                                  (Neon_low4S node:$LHS), node:$RHS)>>;
5988   
5989   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
5990                        op, VPR128Lo, v4i32, v8i16, v8i16, v4i16, Neon_High8H,
5991                        BinOpFrag<(Neon_vduplane
5992                                    (Neon_low8H node:$LHS), node:$RHS)>>;
5993   
5994   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
5995                        op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S,
5996                        BinOpFrag<(Neon_vduplane
5997                                    (Neon_low4S node:$LHS), node:$RHS)>>;
5998   
5999   // Index can only be half of the max value for lane in 64-bit vector
6000
6001   def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
6002                     op, VPR128, VPR64, VPR64Lo, v4i32, v4i16, v4i16,
6003                     BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6004   
6005   def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
6006                     op, VPR128, VPR64, VPR64, v2i64, v2i32, v2i32,
6007                     BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6008
6009   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
6010                       op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H,
6011                       BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6012   
6013   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
6014                       op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S,
6015                       BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6016 }
6017
6018 defm SMLAL_lane_v3 : NI_2VEL_v3_pat<"SMLALvve", Neon_smlal>;
6019 defm UMLAL_lane_v3 : NI_2VEL_v3_pat<"UMLALvve", Neon_umlal>;
6020 defm SMLSL_lane_v3 : NI_2VEL_v3_pat<"SMLSLvve", Neon_smlsl>;
6021 defm UMLSL_lane_v3 : NI_2VEL_v3_pat<"UMLSLvve", Neon_umlsl>;
6022
6023 // Pattern for lane in 128-bit vector
6024 class NI_2VEL2_mul_laneq<Instruction INST, Operand OpImm, SDPatternOperator op,
6025                          RegisterOperand EleOpVPR, ValueType ResTy,
6026                          ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
6027                          SDPatternOperator hiop, SDPatternOperator coreop>
6028   : Pat<(ResTy (op 
6029           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
6030           (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
6031         (INST VPR128:$Rn, EleOpVPR:$Re, OpImm:$Index)>;
6032
6033 // Pattern for lane in 64-bit vector
6034 class NI_2VEL2_mul_lane<Instruction INST, Operand OpImm, SDPatternOperator op,
6035                         RegisterOperand EleOpVPR, ValueType ResTy,
6036                         ValueType OpTy, ValueType EleOpTy, ValueType HalfOpTy,
6037                         SDPatternOperator hiop, SDPatternOperator coreop>
6038   : Pat<(ResTy (op
6039           (HalfOpTy (hiop (OpTy VPR128:$Rn))),
6040           (HalfOpTy (coreop (EleOpTy EleOpVPR:$Re), (i64 OpImm:$Index))))),
6041         (INST VPR128:$Rn, 
6042           (SUBREG_TO_REG (i64 0), EleOpVPR:$Re, sub_64), OpImm:$Index)>;
6043
6044 multiclass NI_2VEL_mul_v3_pat<string subop, SDPatternOperator op> {
6045   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
6046                          op, VPR64, VPR128Lo, v4i32, v4i16, v8i16,
6047                          BinOpFrag<(Neon_vduplane
6048                                      (Neon_low8H node:$LHS), node:$RHS)>>;
6049
6050   def : NI_2VE_mul_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
6051                          op, VPR64, VPR128, v2i64, v2i32, v4i32,
6052                          BinOpFrag<(Neon_vduplane
6053                                      (Neon_low4S node:$LHS), node:$RHS)>>;
6054
6055   def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
6056                            op, VPR128Lo, v4i32, v8i16, v8i16, v4i16,
6057                            Neon_High8H,
6058                            BinOpFrag<(Neon_vduplane
6059                                        (Neon_low8H node:$LHS), node:$RHS)>>;
6060   
6061   def : NI_2VEL2_mul_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
6062                            op, VPR128, v2i64, v4i32, v4i32, v2i32, Neon_High4S,
6063                            BinOpFrag<(Neon_vduplane
6064                                        (Neon_low4S node:$LHS), node:$RHS)>>;
6065   
6066   // Index can only be half of the max value for lane in 64-bit vector
6067
6068   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
6069                         op, VPR64, VPR64Lo, v4i32, v4i16, v4i16,
6070                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6071
6072   def : NI_2VE_mul_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
6073                         op, VPR64, VPR64, v2i64, v2i32, v2i32,
6074                         BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6075
6076   def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
6077                           op, VPR64Lo, v4i32, v8i16, v4i16, v4i16, Neon_High8H,
6078                           BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6079   
6080   def : NI_2VEL2_mul_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
6081                           op, VPR64, v2i64, v4i32, v2i32, v2i32, Neon_High4S,
6082                           BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6083 }
6084
6085 defm SMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SMULLve", int_arm_neon_vmulls>;
6086 defm UMULL_lane_v3 : NI_2VEL_mul_v3_pat<"UMULLve", int_arm_neon_vmullu>;
6087 defm SQDMULL_lane_v3 : NI_2VEL_mul_v3_pat<"SQDMULLve", int_arm_neon_vqdmull>;
6088
6089 multiclass NI_qdma<SDPatternOperator op> {
6090   def _4s : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
6091                     (op node:$Ra,
6092                       (v4i32 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
6093
6094   def _2d : PatFrag<(ops node:$Ra, node:$Rn, node:$Rm),
6095                     (op node:$Ra,
6096                       (v2i64 (int_arm_neon_vqdmull node:$Rn, node:$Rm)))>;
6097 }
6098
6099 defm Neon_qdmlal : NI_qdma<int_arm_neon_vqadds>;
6100 defm Neon_qdmlsl : NI_qdma<int_arm_neon_vqsubs>;
6101
6102 multiclass NI_2VEL_v3_qdma_pat<string subop, string op> {
6103   def : NI_2VE_laneq<!cast<Instruction>(subop # "_4s4h"), neon_uimm3_bare,
6104                      !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR128Lo,
6105                      v4i32, v4i16, v8i16,
6106                      BinOpFrag<(Neon_vduplane
6107                                  (Neon_low8H node:$LHS), node:$RHS)>>;
6108   
6109   def : NI_2VE_laneq<!cast<Instruction>(subop # "_2d2s"), neon_uimm2_bare,
6110                      !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR128,
6111                      v2i64, v2i32, v4i32,
6112                      BinOpFrag<(Neon_vduplane
6113                                  (Neon_low4S node:$LHS), node:$RHS)>>;
6114   
6115   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_4s8h"), neon_uimm3_bare,
6116                        !cast<PatFrag>(op # "_4s"), VPR128Lo,
6117                        v4i32, v8i16, v8i16, v4i16, Neon_High8H,
6118                        BinOpFrag<(Neon_vduplane
6119                                    (Neon_low8H node:$LHS), node:$RHS)>>;
6120   
6121   def : NI_2VEL2_laneq<!cast<Instruction>(subop # "_2d4s"), neon_uimm2_bare,
6122                        !cast<PatFrag>(op # "_2d"), VPR128,
6123                        v2i64, v4i32, v4i32, v2i32, Neon_High4S,
6124                        BinOpFrag<(Neon_vduplane
6125                                    (Neon_low4S node:$LHS), node:$RHS)>>;
6126   
6127   // Index can only be half of the max value for lane in 64-bit vector
6128
6129   def : NI_2VE_lane<!cast<Instruction>(subop # "_4s4h"), neon_uimm2_bare,
6130                     !cast<PatFrag>(op # "_4s"), VPR128, VPR64, VPR64Lo,
6131                     v4i32, v4i16, v4i16,
6132                     BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6133   
6134   def : NI_2VE_lane<!cast<Instruction>(subop # "_2d2s"), neon_uimm1_bare,
6135                     !cast<PatFrag>(op # "_2d"), VPR128, VPR64, VPR64,
6136                     v2i64, v2i32, v2i32,
6137                     BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6138
6139   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_4s8h"), neon_uimm2_bare,
6140                       !cast<PatFrag>(op # "_4s"), VPR64Lo,
6141                       v4i32, v8i16, v4i16, v4i16, Neon_High8H,
6142                       BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6143   
6144   def : NI_2VEL2_lane<!cast<Instruction>(subop # "_2d4s"), neon_uimm1_bare,
6145                       !cast<PatFrag>(op # "_2d"), VPR64,
6146                       v2i64, v4i32, v2i32, v2i32, Neon_High4S,
6147                       BinOpFrag<(Neon_vduplane node:$LHS, node:$RHS)>>;
6148 }
6149
6150 defm SQDMLAL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLALvve", "Neon_qdmlal">;
6151 defm SQDMLSL_lane_v3 : NI_2VEL_v3_qdma_pat<"SQDMLSLvve", "Neon_qdmlsl">;
6152
6153 // End of implementation for instruction class (3V Elem)
6154
6155 class NeonI_INS_main<string asmop, string Res, ValueType ResTy,
6156                      RegisterClass OpGPR, ValueType OpTy, Operand OpImm>
6157   : NeonI_copy<0b1, 0b0, 0b0011,
6158                (outs VPR128:$Rd), (ins VPR128:$src, OpGPR:$Rn, OpImm:$Imm),
6159                asmop # "\t$Rd." # Res # "[$Imm], $Rn",
6160                [(set (ResTy VPR128:$Rd),
6161                  (ResTy (vector_insert
6162                    (ResTy VPR128:$src),
6163                    (OpTy OpGPR:$Rn),
6164                    (OpImm:$Imm))))],
6165                NoItinerary> {
6166   bits<4> Imm;
6167   let Constraints = "$src = $Rd";
6168 }
6169
6170 //Insert element (vector, from main)
6171 def INSbw : NeonI_INS_main<"ins", "b", v16i8, GPR32, i32,
6172                            neon_uimm4_bare> {
6173   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6174 }
6175 def INShw : NeonI_INS_main<"ins", "h", v8i16, GPR32, i32,
6176                            neon_uimm3_bare> {
6177   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6178 }
6179 def INSsw : NeonI_INS_main<"ins", "s", v4i32, GPR32, i32,
6180                            neon_uimm2_bare> {
6181   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
6182 }
6183 def INSdx : NeonI_INS_main<"ins", "d", v2i64, GPR64, i64,
6184                            neon_uimm1_bare> {
6185   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
6186 }
6187
6188 def : NeonInstAlias<"mov $Rd.b[$Imm], $Rn",
6189                     (INSbw VPR128:$Rd, GPR32:$Rn, neon_uimm4_bare:$Imm), 0>;
6190 def : NeonInstAlias<"mov $Rd.h[$Imm], $Rn",
6191                     (INShw VPR128:$Rd, GPR32:$Rn, neon_uimm3_bare:$Imm), 0>;
6192 def : NeonInstAlias<"mov $Rd.s[$Imm], $Rn",
6193                     (INSsw VPR128:$Rd, GPR32:$Rn, neon_uimm2_bare:$Imm), 0>;
6194 def : NeonInstAlias<"mov $Rd.d[$Imm], $Rn",
6195                     (INSdx VPR128:$Rd, GPR64:$Rn, neon_uimm1_bare:$Imm), 0>;
6196
6197 class Neon_INS_main_pattern <ValueType ResTy,ValueType ExtResTy,
6198                              RegisterClass OpGPR, ValueType OpTy, 
6199                              Operand OpImm, Instruction INS> 
6200   : Pat<(ResTy (vector_insert
6201               (ResTy VPR64:$src),
6202               (OpTy OpGPR:$Rn),
6203               (OpImm:$Imm))),
6204         (ResTy (EXTRACT_SUBREG 
6205           (ExtResTy (INS (ExtResTy (SUBREG_TO_REG (i64 0), VPR64:$src, sub_64)),
6206             OpGPR:$Rn, OpImm:$Imm)), sub_64))>;
6207
6208 def INSbw_pattern : Neon_INS_main_pattern<v8i8, v16i8, GPR32, i32,
6209                                           neon_uimm3_bare, INSbw>;
6210 def INShw_pattern : Neon_INS_main_pattern<v4i16, v8i16, GPR32, i32,
6211                                           neon_uimm2_bare, INShw>;
6212 def INSsw_pattern : Neon_INS_main_pattern<v2i32, v4i32, GPR32, i32,
6213                                           neon_uimm1_bare, INSsw>;
6214 def INSdx_pattern : Neon_INS_main_pattern<v1i64, v2i64, GPR64, i64,
6215                                           neon_uimm0_bare, INSdx>;
6216
6217 class NeonI_INS_element<string asmop, string Res, Operand ResImm>
6218   : NeonI_insert<0b1, 0b1,
6219                  (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn, 
6220                  ResImm:$Immd, ResImm:$Immn),
6221                  asmop # "\t$Rd." # Res # "[$Immd], $Rn." # Res # "[$Immn]",
6222                  [],
6223                  NoItinerary> {
6224   let Constraints = "$src = $Rd";
6225   bits<4> Immd;
6226   bits<4> Immn;
6227 }
6228
6229 //Insert element (vector, from element)
6230 def INSELb : NeonI_INS_element<"ins", "b", neon_uimm4_bare> {
6231   let Inst{20-16} = {Immd{3}, Immd{2}, Immd{1}, Immd{0}, 0b1};
6232   let Inst{14-11} = {Immn{3}, Immn{2}, Immn{1}, Immn{0}};
6233 }
6234 def INSELh : NeonI_INS_element<"ins", "h", neon_uimm3_bare> {
6235   let Inst{20-16} = {Immd{2}, Immd{1}, Immd{0}, 0b1, 0b0};
6236   let Inst{14-11} = {Immn{2}, Immn{1}, Immn{0}, 0b0};
6237   // bit 11 is unspecified, but should be set to zero.
6238 }
6239 def INSELs : NeonI_INS_element<"ins", "s", neon_uimm2_bare> {
6240   let Inst{20-16} = {Immd{1}, Immd{0}, 0b1, 0b0, 0b0};
6241   let Inst{14-11} = {Immn{1}, Immn{0}, 0b0, 0b0};
6242   // bits 11-12 are unspecified, but should be set to zero.
6243 }
6244 def INSELd : NeonI_INS_element<"ins", "d", neon_uimm1_bare> {
6245   let Inst{20-16} = {Immd, 0b1, 0b0, 0b0, 0b0};
6246   let Inst{14-11} = {Immn{0}, 0b0, 0b0, 0b0};
6247   // bits 11-13 are unspecified, but should be set to zero.
6248 }
6249
6250 def : NeonInstAlias<"mov $Rd.b[$Immd], $Rn.b[$Immn]",
6251                     (INSELb VPR128:$Rd, VPR128:$Rn,
6252                       neon_uimm4_bare:$Immd, neon_uimm4_bare:$Immn), 0>;
6253 def : NeonInstAlias<"mov $Rd.h[$Immd], $Rn.h[$Immn]",
6254                     (INSELh VPR128:$Rd, VPR128:$Rn,
6255                       neon_uimm3_bare:$Immd, neon_uimm3_bare:$Immn), 0>;
6256 def : NeonInstAlias<"mov $Rd.s[$Immd], $Rn.s[$Immn]",
6257                     (INSELs VPR128:$Rd, VPR128:$Rn,
6258                       neon_uimm2_bare:$Immd, neon_uimm2_bare:$Immn), 0>;
6259 def : NeonInstAlias<"mov $Rd.d[$Immd], $Rn.d[$Immn]",
6260                     (INSELd VPR128:$Rd, VPR128:$Rn,
6261                       neon_uimm1_bare:$Immd, neon_uimm1_bare:$Immn), 0>;
6262
6263 multiclass Neon_INS_elt_pattern<ValueType ResTy, ValueType NaTy,
6264                                 ValueType MidTy, Operand StImm, Operand NaImm,
6265                                 Instruction INS> {
6266 def : Pat<(ResTy (vector_insert
6267             (ResTy VPR128:$src),
6268             (MidTy (vector_extract
6269               (ResTy VPR128:$Rn),
6270               (StImm:$Immn))),
6271             (StImm:$Immd))),
6272           (INS (ResTy VPR128:$src), (ResTy VPR128:$Rn),
6273               StImm:$Immd, StImm:$Immn)>;
6274
6275 def : Pat <(ResTy (vector_insert
6276              (ResTy VPR128:$src),
6277              (MidTy (vector_extract
6278                (NaTy VPR64:$Rn),
6279                (NaImm:$Immn))),
6280              (StImm:$Immd))),
6281            (INS (ResTy VPR128:$src),
6282              (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
6283              StImm:$Immd, NaImm:$Immn)>;
6284
6285 def : Pat <(NaTy (vector_insert
6286              (NaTy VPR64:$src),
6287              (MidTy (vector_extract
6288                (ResTy VPR128:$Rn),
6289                (StImm:$Immn))),
6290              (NaImm:$Immd))),
6291            (NaTy (EXTRACT_SUBREG
6292              (ResTy (INS
6293                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6294                (ResTy VPR128:$Rn),
6295                NaImm:$Immd, StImm:$Immn)),
6296              sub_64))>;
6297
6298 def : Pat <(NaTy (vector_insert
6299              (NaTy VPR64:$src),
6300              (MidTy (vector_extract
6301                (NaTy VPR64:$Rn),
6302                (NaImm:$Immn))),
6303              (NaImm:$Immd))),
6304            (NaTy (EXTRACT_SUBREG
6305              (ResTy (INS
6306                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6307                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$Rn), sub_64)),
6308                NaImm:$Immd, NaImm:$Immn)),
6309              sub_64))>;
6310 }
6311
6312 defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, neon_uimm2_bare,
6313                             neon_uimm1_bare, INSELs>;
6314 defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, neon_uimm1_bare,
6315                             neon_uimm0_bare, INSELd>;
6316 defm : Neon_INS_elt_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
6317                             neon_uimm3_bare, INSELb>;
6318 defm : Neon_INS_elt_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
6319                             neon_uimm2_bare, INSELh>;
6320 defm : Neon_INS_elt_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
6321                             neon_uimm1_bare, INSELs>;
6322 defm : Neon_INS_elt_pattern<v2i64, v1i64, i64, neon_uimm1_bare,
6323                             neon_uimm0_bare, INSELd>;
6324
6325 multiclass Neon_INS_elt_float_pattern<ValueType ResTy, ValueType NaTy,
6326                                       ValueType MidTy,
6327                                       RegisterClass OpFPR, Operand ResImm,
6328                                       SubRegIndex SubIndex, Instruction INS> {
6329 def : Pat <(ResTy (vector_insert
6330              (ResTy VPR128:$src),
6331              (MidTy OpFPR:$Rn),
6332              (ResImm:$Imm))),
6333            (INS (ResTy VPR128:$src),
6334              (ResTy (SUBREG_TO_REG (i64 0), OpFPR:$Rn, SubIndex)),
6335              ResImm:$Imm,
6336              (i64 0))>;
6337
6338 def : Pat <(NaTy (vector_insert
6339              (NaTy VPR64:$src),
6340              (MidTy OpFPR:$Rn),
6341              (ResImm:$Imm))),
6342            (NaTy (EXTRACT_SUBREG 
6343              (ResTy (INS 
6344                (ResTy (SUBREG_TO_REG (i64 0), (NaTy VPR64:$src), sub_64)),
6345                (ResTy (SUBREG_TO_REG (i64 0), (MidTy OpFPR:$Rn), SubIndex)),
6346                ResImm:$Imm,
6347                (i64 0))),
6348              sub_64))>;
6349 }
6350
6351 defm : Neon_INS_elt_float_pattern<v4f32, v2f32, f32, FPR32, neon_uimm2_bare,
6352                                   sub_32, INSELs>;
6353 defm : Neon_INS_elt_float_pattern<v2f64, v1f64, f64, FPR64, neon_uimm1_bare,
6354                                   sub_64, INSELd>;
6355
6356 class NeonI_SMOV<string asmop, string Res, bit Q,
6357                  ValueType OpTy, ValueType eleTy,
6358                  Operand OpImm, RegisterClass ResGPR, ValueType ResTy>
6359   : NeonI_copy<Q, 0b0, 0b0101,
6360                (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
6361                asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
6362                [(set (ResTy ResGPR:$Rd),
6363                  (ResTy (sext_inreg
6364                    (ResTy (vector_extract
6365                      (OpTy VPR128:$Rn), (OpImm:$Imm))),
6366                    eleTy)))],
6367                NoItinerary> {
6368   bits<4> Imm;
6369 }
6370
6371 //Signed integer move (main, from element)
6372 def SMOVwb : NeonI_SMOV<"smov", "b", 0b0, v16i8, i8, neon_uimm4_bare,
6373                         GPR32, i32> {
6374   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6375 }
6376 def SMOVwh : NeonI_SMOV<"smov", "h", 0b0, v8i16, i16, neon_uimm3_bare,
6377                         GPR32, i32> {
6378   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6379 }
6380 def SMOVxb : NeonI_SMOV<"smov", "b", 0b1, v16i8, i8, neon_uimm4_bare,
6381                         GPR64, i64> {
6382   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6383 }
6384 def SMOVxh : NeonI_SMOV<"smov", "h", 0b1, v8i16, i16, neon_uimm3_bare,
6385                         GPR64, i64> {
6386   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6387 }
6388 def SMOVxs : NeonI_SMOV<"smov", "s", 0b1, v4i32, i32, neon_uimm2_bare,
6389                         GPR64, i64> {
6390   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
6391 }
6392
6393 multiclass Neon_SMOVx_pattern <ValueType StTy, ValueType NaTy,
6394                                ValueType eleTy, Operand StImm,  Operand NaImm,
6395                                Instruction SMOVI> {
6396   def : Pat<(i64 (sext_inreg
6397               (i64 (anyext
6398                 (i32 (vector_extract
6399                   (StTy VPR128:$Rn), (StImm:$Imm))))),
6400               eleTy)),
6401             (SMOVI VPR128:$Rn, StImm:$Imm)>;
6402   
6403   def : Pat<(i64 (sext
6404               (i32 (vector_extract
6405                 (StTy VPR128:$Rn), (StImm:$Imm))))),
6406             (SMOVI VPR128:$Rn, StImm:$Imm)>;
6407   
6408   def : Pat<(i64 (sext_inreg
6409               (i64 (vector_extract
6410                 (NaTy VPR64:$Rn), (NaImm:$Imm))),
6411               eleTy)),
6412             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6413               NaImm:$Imm)>;
6414   
6415   def : Pat<(i64 (sext_inreg
6416               (i64 (anyext
6417                 (i32 (vector_extract
6418                   (NaTy VPR64:$Rn), (NaImm:$Imm))))),
6419               eleTy)),
6420             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6421               NaImm:$Imm)>;
6422   
6423   def : Pat<(i64 (sext
6424               (i32 (vector_extract
6425                 (NaTy VPR64:$Rn), (NaImm:$Imm))))),
6426             (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6427               NaImm:$Imm)>; 
6428 }
6429
6430 defm : Neon_SMOVx_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
6431                           neon_uimm3_bare, SMOVxb>;
6432 defm : Neon_SMOVx_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
6433                           neon_uimm2_bare, SMOVxh>;
6434 defm : Neon_SMOVx_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
6435                           neon_uimm1_bare, SMOVxs>;
6436
6437 class Neon_SMOVw_pattern <ValueType StTy, ValueType NaTy,
6438                           ValueType eleTy, Operand StImm,  Operand NaImm,
6439                           Instruction SMOVI>
6440   : Pat<(i32 (sext_inreg
6441           (i32 (vector_extract
6442             (NaTy VPR64:$Rn), (NaImm:$Imm))),
6443           eleTy)),
6444         (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6445           NaImm:$Imm)>;
6446
6447 def : Neon_SMOVw_pattern<v16i8, v8i8, i8, neon_uimm4_bare,
6448                          neon_uimm3_bare, SMOVwb>;
6449 def : Neon_SMOVw_pattern<v8i16, v4i16, i16, neon_uimm3_bare,
6450                          neon_uimm2_bare, SMOVwh>;
6451
6452 class NeonI_UMOV<string asmop, string Res, bit Q,
6453                  ValueType OpTy, Operand OpImm,
6454                  RegisterClass ResGPR, ValueType ResTy>
6455   : NeonI_copy<Q, 0b0, 0b0111,
6456                (outs ResGPR:$Rd), (ins VPR128:$Rn, OpImm:$Imm),
6457                asmop # "\t$Rd, $Rn." # Res # "[$Imm]",
6458                [(set (ResTy ResGPR:$Rd),
6459                   (ResTy (vector_extract
6460                     (OpTy VPR128:$Rn), (OpImm:$Imm))))],
6461                NoItinerary> {
6462   bits<4> Imm;
6463 }
6464
6465 //Unsigned integer move (main, from element)
6466 def UMOVwb : NeonI_UMOV<"umov", "b", 0b0, v16i8, neon_uimm4_bare,
6467                          GPR32, i32> {
6468   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6469 }
6470 def UMOVwh : NeonI_UMOV<"umov", "h", 0b0, v8i16, neon_uimm3_bare,
6471                          GPR32, i32> {
6472   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6473 }
6474 def UMOVws : NeonI_UMOV<"umov", "s", 0b0, v4i32, neon_uimm2_bare,
6475                          GPR32, i32> {
6476   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
6477 }
6478 def UMOVxd : NeonI_UMOV<"umov", "d", 0b1, v2i64, neon_uimm1_bare,
6479                          GPR64, i64> {
6480   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
6481 }
6482
6483 def : NeonInstAlias<"mov $Rd, $Rn.s[$Imm]",
6484                     (UMOVws GPR32:$Rd, VPR128:$Rn, neon_uimm2_bare:$Imm), 0>;
6485 def : NeonInstAlias<"mov $Rd, $Rn.d[$Imm]",
6486                     (UMOVxd GPR64:$Rd, VPR128:$Rn, neon_uimm1_bare:$Imm), 0>;
6487
6488 class Neon_UMOV_pattern <ValueType StTy, ValueType NaTy, ValueType ResTy,
6489                          Operand StImm,  Operand NaImm,
6490                          Instruction SMOVI>
6491   : Pat<(ResTy (vector_extract
6492           (NaTy VPR64:$Rn), NaImm:$Imm)),
6493         (SMOVI (StTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6494           NaImm:$Imm)>;
6495
6496 def : Neon_UMOV_pattern<v16i8, v8i8, i32, neon_uimm4_bare,
6497                         neon_uimm3_bare, UMOVwb>;
6498 def : Neon_UMOV_pattern<v8i16, v4i16, i32, neon_uimm3_bare,
6499                         neon_uimm2_bare, UMOVwh>; 
6500 def : Neon_UMOV_pattern<v4i32, v2i32, i32, neon_uimm2_bare,
6501                         neon_uimm1_bare, UMOVws>;
6502
6503 def : Pat<(i32 (and
6504             (i32 (vector_extract
6505               (v16i8 VPR128:$Rn), (neon_uimm4_bare:$Imm))),
6506             255)),
6507           (UMOVwb VPR128:$Rn, neon_uimm4_bare:$Imm)>;
6508
6509 def : Pat<(i32 (and
6510             (i32 (vector_extract
6511               (v8i16 VPR128:$Rn), (neon_uimm3_bare:$Imm))),
6512             65535)),
6513           (UMOVwh VPR128:$Rn, neon_uimm3_bare:$Imm)>;
6514
6515 def : Pat<(i64 (zext
6516             (i32 (vector_extract
6517               (v2i64 VPR128:$Rn), (neon_uimm1_bare:$Imm))))),
6518           (UMOVxd VPR128:$Rn, neon_uimm1_bare:$Imm)>;
6519
6520 def : Pat<(i32 (and
6521             (i32 (vector_extract
6522               (v8i8 VPR64:$Rn), (neon_uimm3_bare:$Imm))),
6523             255)),
6524           (UMOVwb (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
6525             neon_uimm3_bare:$Imm)>;
6526
6527 def : Pat<(i32 (and
6528             (i32 (vector_extract
6529               (v4i16 VPR64:$Rn), (neon_uimm2_bare:$Imm))),
6530             65535)),
6531           (UMOVwh (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
6532             neon_uimm2_bare:$Imm)>;
6533
6534 def : Pat<(i64 (zext
6535             (i32 (vector_extract
6536               (v1i64 VPR64:$Rn), (neon_uimm0_bare:$Imm))))),
6537           (UMOVxd (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64),
6538             neon_uimm0_bare:$Imm)>;
6539
6540 // Additional copy patterns for scalar types
6541 def : Pat<(i32 (vector_extract (v1i8 FPR8:$Rn), (i64 0))),
6542           (UMOVwb (v16i8
6543             (SUBREG_TO_REG (i64 0), FPR8:$Rn, sub_8)), (i64 0))>;
6544
6545 def : Pat<(i32 (vector_extract (v1i16 FPR16:$Rn), (i64 0))),
6546           (UMOVwh (v8i16
6547             (SUBREG_TO_REG (i64 0), FPR16:$Rn, sub_16)), (i64 0))>;
6548
6549 def : Pat<(i32 (vector_extract (v1i32 FPR32:$Rn), (i64 0))),
6550           (FMOVws FPR32:$Rn)>;
6551
6552 def : Pat<(i64 (vector_extract (v1i64 FPR64:$Rn), (i64 0))),
6553           (FMOVxd FPR64:$Rn)>;
6554                
6555 def : Pat<(f64 (vector_extract (v1f64 FPR64:$Rn), (i64 0))),
6556           (f64 FPR64:$Rn)>;
6557
6558 def : Pat<(f32 (vector_extract (v1f32 FPR32:$Rn), (i64 0))),
6559           (f32 FPR32:$Rn)>;
6560
6561 def : Pat<(v1i8 (scalar_to_vector GPR32:$Rn)),
6562           (v1i8 (EXTRACT_SUBREG (v16i8
6563             (INSbw (v16i8 (IMPLICIT_DEF)), $Rn, (i64 0))),
6564             sub_8))>;
6565
6566 def : Pat<(v1i16 (scalar_to_vector GPR32:$Rn)),
6567           (v1i16 (EXTRACT_SUBREG (v8i16
6568             (INShw (v8i16 (IMPLICIT_DEF)), $Rn, (i64 0))),
6569             sub_16))>;
6570
6571 def : Pat<(v1i32 (scalar_to_vector GPR32:$src)),
6572           (FMOVsw $src)>;
6573
6574 def : Pat<(v1i64 (scalar_to_vector GPR64:$src)),
6575           (FMOVdx $src)>;
6576
6577 def : Pat<(v1f32 (scalar_to_vector (f32 FPR32:$Rn))),
6578           (v1f32 FPR32:$Rn)>;
6579 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Rn))),
6580           (v1f64 FPR64:$Rn)>;
6581
6582 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$src))),
6583           (FMOVdd $src)>;
6584
6585 def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$src))),
6586           (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)),
6587                          (f64 FPR64:$src), sub_64)>;
6588
6589 class NeonI_DUP_Elt<bit Q, string asmop, string rdlane,  string rnlane,
6590                     RegisterOperand ResVPR, Operand OpImm>
6591   : NeonI_copy<Q, 0b0, 0b0000, (outs ResVPR:$Rd),
6592                (ins VPR128:$Rn, OpImm:$Imm),
6593                asmop # "\t$Rd" # rdlane # ", $Rn" # rnlane # "[$Imm]",
6594                [],
6595                NoItinerary> {
6596   bits<4> Imm;
6597 }
6598
6599 def DUPELT16b : NeonI_DUP_Elt<0b1, "dup", ".16b", ".b", VPR128,
6600                               neon_uimm4_bare> {
6601   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6602 }
6603
6604 def DUPELT8h : NeonI_DUP_Elt<0b1, "dup", ".8h", ".h", VPR128,
6605                               neon_uimm3_bare> {
6606   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6607 }
6608
6609 def DUPELT4s : NeonI_DUP_Elt<0b1, "dup", ".4s", ".s", VPR128,
6610                               neon_uimm2_bare> {
6611   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
6612 }
6613
6614 def DUPELT2d : NeonI_DUP_Elt<0b1, "dup", ".2d", ".d", VPR128,
6615                               neon_uimm1_bare> {
6616   let Inst{20-16} = {Imm, 0b1, 0b0, 0b0, 0b0};
6617 }
6618
6619 def DUPELT8b : NeonI_DUP_Elt<0b0, "dup", ".8b", ".b", VPR64,
6620                               neon_uimm4_bare> {
6621   let Inst{20-16} = {Imm{3}, Imm{2}, Imm{1}, Imm{0}, 0b1};
6622 }
6623
6624 def DUPELT4h : NeonI_DUP_Elt<0b0, "dup", ".4h", ".h", VPR64,
6625                               neon_uimm3_bare> {
6626   let Inst{20-16} = {Imm{2}, Imm{1}, Imm{0}, 0b1, 0b0};
6627 }
6628
6629 def DUPELT2s : NeonI_DUP_Elt<0b0, "dup", ".2s", ".s", VPR64,
6630                               neon_uimm2_bare> {
6631   let Inst{20-16} = {Imm{1}, Imm{0}, 0b1, 0b0, 0b0};
6632 }
6633
6634 multiclass NeonI_DUP_Elt_pattern<Instruction DUPELT, ValueType ResTy,
6635                                        ValueType OpTy,ValueType NaTy,
6636                                        ValueType ExTy, Operand OpLImm,
6637                                        Operand OpNImm> {
6638 def  : Pat<(ResTy (Neon_vduplane (OpTy VPR128:$Rn), OpLImm:$Imm)),
6639         (ResTy (DUPELT (OpTy VPR128:$Rn), OpLImm:$Imm))>;
6640
6641 def : Pat<(ResTy (Neon_vduplane
6642             (NaTy VPR64:$Rn), OpNImm:$Imm)),
6643           (ResTy (DUPELT
6644             (ExTy (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)), OpNImm:$Imm))>;
6645 }
6646 defm : NeonI_DUP_Elt_pattern<DUPELT16b, v16i8, v16i8, v8i8, v16i8,
6647                              neon_uimm4_bare, neon_uimm3_bare>;
6648 defm : NeonI_DUP_Elt_pattern<DUPELT8b, v8i8, v16i8, v8i8, v16i8,
6649                              neon_uimm4_bare, neon_uimm3_bare>;
6650 defm : NeonI_DUP_Elt_pattern<DUPELT8h, v8i16, v8i16, v4i16, v8i16,
6651                              neon_uimm3_bare, neon_uimm2_bare>;
6652 defm : NeonI_DUP_Elt_pattern<DUPELT4h, v4i16, v8i16, v4i16, v8i16,
6653                              neon_uimm3_bare, neon_uimm2_bare>;
6654 defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4i32, v4i32, v2i32, v4i32,
6655                              neon_uimm2_bare, neon_uimm1_bare>;
6656 defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2i32, v4i32, v2i32, v4i32,
6657                              neon_uimm2_bare, neon_uimm1_bare>;
6658 defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2i64, v2i64, v1i64, v2i64,
6659                              neon_uimm1_bare, neon_uimm0_bare>;
6660 defm : NeonI_DUP_Elt_pattern<DUPELT4s, v4f32, v4f32, v2f32, v4f32,
6661                              neon_uimm2_bare, neon_uimm1_bare>;
6662 defm : NeonI_DUP_Elt_pattern<DUPELT2s, v2f32, v4f32, v2f32, v4f32,
6663                              neon_uimm2_bare, neon_uimm1_bare>;
6664 defm : NeonI_DUP_Elt_pattern<DUPELT2d, v2f64, v2f64, v1f64, v2f64,
6665                              neon_uimm1_bare, neon_uimm0_bare>;
6666
6667 def : Pat<(v2f32 (Neon_vdup (f32 FPR32:$Rn))),
6668           (v2f32 (DUPELT2s 
6669             (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
6670             (i64 0)))>;
6671 def : Pat<(v4f32 (Neon_vdup (f32 FPR32:$Rn))),
6672           (v4f32 (DUPELT4s 
6673             (SUBREG_TO_REG (i64 0), FPR32:$Rn, sub_32),
6674             (i64 0)))>;
6675 def : Pat<(v2f64 (Neon_vdup (f64 FPR64:$Rn))),
6676           (v2f64 (DUPELT2d 
6677             (SUBREG_TO_REG (i64 0), FPR64:$Rn, sub_64),
6678             (i64 0)))>;
6679
6680 class NeonI_DUP<bit Q, string asmop, string rdlane,
6681                 RegisterOperand ResVPR, ValueType ResTy,
6682                 RegisterClass OpGPR, ValueType OpTy>
6683   : NeonI_copy<Q, 0b0, 0b0001, (outs ResVPR:$Rd), (ins OpGPR:$Rn),
6684                asmop # "\t$Rd" # rdlane # ", $Rn",
6685                [(set (ResTy ResVPR:$Rd), 
6686                  (ResTy (Neon_vdup (OpTy OpGPR:$Rn))))],
6687                NoItinerary>;
6688
6689 def DUP16b : NeonI_DUP<0b1, "dup", ".16b", VPR128, v16i8, GPR32, i32> {
6690   let Inst{20-16} = 0b00001;
6691   // bits 17-20 are unspecified, but should be set to zero.
6692 }
6693
6694 def DUP8h : NeonI_DUP<0b1, "dup", ".8h", VPR128, v8i16, GPR32, i32> {
6695   let Inst{20-16} = 0b00010;
6696   // bits 18-20 are unspecified, but should be set to zero.
6697 }
6698
6699 def DUP4s : NeonI_DUP<0b1, "dup", ".4s", VPR128, v4i32, GPR32, i32> {
6700   let Inst{20-16} = 0b00100;
6701   // bits 19-20 are unspecified, but should be set to zero.
6702 }
6703
6704 def DUP2d : NeonI_DUP<0b1, "dup", ".2d", VPR128, v2i64, GPR64, i64> {
6705   let Inst{20-16} = 0b01000;
6706   // bit 20 is unspecified, but should be set to zero.
6707 }
6708
6709 def DUP8b : NeonI_DUP<0b0, "dup", ".8b", VPR64, v8i8, GPR32, i32> {
6710   let Inst{20-16} = 0b00001;
6711   // bits 17-20 are unspecified, but should be set to zero.
6712 }
6713
6714 def DUP4h : NeonI_DUP<0b0, "dup", ".4h", VPR64, v4i16, GPR32, i32> {
6715   let Inst{20-16} = 0b00010;
6716   // bits 18-20 are unspecified, but should be set to zero.
6717 }
6718
6719 def DUP2s : NeonI_DUP<0b0, "dup", ".2s", VPR64, v2i32, GPR32, i32> {
6720   let Inst{20-16} = 0b00100;
6721   // bits 19-20 are unspecified, but should be set to zero.
6722 }
6723
6724 // patterns for CONCAT_VECTORS
6725 multiclass Concat_Vector_Pattern<ValueType ResTy, ValueType OpTy> {
6726 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), undef)),
6727           (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)>;
6728 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rm))),
6729           (INSELd 
6730             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6731             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rm, sub_64)),
6732             (i64 1),
6733             (i64 0))>;
6734 def : Pat<(ResTy (concat_vectors (OpTy VPR64:$Rn), (OpTy VPR64:$Rn))),
6735           (DUPELT2d 
6736             (v2i64 (SUBREG_TO_REG (i64 0), VPR64:$Rn, sub_64)),
6737             (i64 0))> ;
6738 }
6739
6740 defm : Concat_Vector_Pattern<v16i8, v8i8>;
6741 defm : Concat_Vector_Pattern<v8i16, v4i16>;
6742 defm : Concat_Vector_Pattern<v4i32, v2i32>;
6743 defm : Concat_Vector_Pattern<v2i64, v1i64>;
6744 defm : Concat_Vector_Pattern<v4f32, v2f32>;
6745 defm : Concat_Vector_Pattern<v2f64, v1f64>;
6746
6747 //patterns for EXTRACT_SUBVECTOR
6748 def : Pat<(v8i8 (extract_subvector (v16i8 VPR128:$Rn), (i64 0))),
6749           (v8i8 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
6750 def : Pat<(v4i16 (extract_subvector (v8i16 VPR128:$Rn), (i64 0))),
6751           (v4i16 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
6752 def : Pat<(v2i32 (extract_subvector (v4i32 VPR128:$Rn), (i64 0))),
6753           (v2i32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
6754 def : Pat<(v1i64 (extract_subvector (v2i64 VPR128:$Rn), (i64 0))),
6755           (v1i64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
6756 def : Pat<(v2f32 (extract_subvector (v4f32 VPR128:$Rn), (i64 0))),
6757           (v2f32 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
6758 def : Pat<(v1f64 (extract_subvector (v2f64 VPR128:$Rn), (i64 0))),
6759           (v1f64 (EXTRACT_SUBREG VPR128:$Rn, sub_64))>;
6760
6761 class NeonI_REV<string asmop, string Res, bits<2> size, bit Q, bit U,
6762                 bits<5> opcode, RegisterOperand ResVPR, ValueType ResTy,
6763                 SDPatternOperator Neon_Rev>
6764   : NeonI_2VMisc<Q, U, size, opcode,
6765                (outs ResVPR:$Rd), (ins ResVPR:$Rn),
6766                asmop # "\t$Rd." # Res # ", $Rn." # Res,
6767                [(set (ResTy ResVPR:$Rd),
6768                   (ResTy (Neon_Rev (ResTy ResVPR:$Rn))))],
6769                NoItinerary> ;
6770
6771 def REV64_16b : NeonI_REV<"rev64", "16b", 0b00, 0b1, 0b0, 0b00000, VPR128,
6772                           v16i8, Neon_rev64>;
6773 def REV64_8h : NeonI_REV<"rev64", "8h", 0b01, 0b1, 0b0, 0b00000, VPR128,
6774                          v8i16, Neon_rev64>;
6775 def REV64_4s : NeonI_REV<"rev64", "4s", 0b10, 0b1, 0b0, 0b00000, VPR128,
6776                          v4i32, Neon_rev64>;
6777 def REV64_8b : NeonI_REV<"rev64", "8b", 0b00, 0b0, 0b0, 0b00000, VPR64,
6778                          v8i8, Neon_rev64>;
6779 def REV64_4h : NeonI_REV<"rev64", "4h", 0b01, 0b0, 0b0, 0b00000, VPR64,
6780                          v4i16, Neon_rev64>;
6781 def REV64_2s : NeonI_REV<"rev64", "2s", 0b10, 0b0, 0b0, 0b00000, VPR64,
6782                          v2i32, Neon_rev64>;
6783
6784 def : Pat<(v4f32 (Neon_rev64 (v4f32 VPR128:$Rn))), (REV64_4s VPR128:$Rn)>;
6785 def : Pat<(v2f32 (Neon_rev64 (v2f32 VPR64:$Rn))), (REV64_2s VPR64:$Rn)>;
6786
6787 def REV32_16b : NeonI_REV<"rev32", "16b", 0b00, 0b1, 0b1, 0b00000, VPR128,
6788                           v16i8, Neon_rev32>;
6789 def REV32_8h : NeonI_REV<"rev32", "8h", 0b01, 0b1, 0b1, 0b00000, VPR128,
6790                           v8i16, Neon_rev32>;
6791 def REV32_8b : NeonI_REV<"rev32", "8b", 0b00, 0b0, 0b1, 0b00000, VPR64,
6792                          v8i8, Neon_rev32>;
6793 def REV32_4h : NeonI_REV<"rev32", "4h", 0b01, 0b0, 0b1, 0b00000, VPR64,
6794                          v4i16, Neon_rev32>;
6795
6796 def REV16_16b : NeonI_REV<"rev16", "16b", 0b00, 0b1, 0b0, 0b00001, VPR128,
6797                           v16i8, Neon_rev16>;
6798 def REV16_8b : NeonI_REV<"rev16", "8b", 0b00, 0b0, 0b0, 0b00001, VPR64,
6799                          v8i8, Neon_rev16>;
6800
6801 multiclass NeonI_PairwiseAdd<string asmop, bit U, bits<5> opcode,
6802                              SDPatternOperator Neon_Padd> {
6803   def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
6804                            (outs VPR128:$Rd), (ins VPR128:$Rn),
6805                            asmop # "\t$Rd.8h, $Rn.16b",
6806                            [(set (v8i16 VPR128:$Rd),
6807                               (v8i16 (Neon_Padd (v16i8 VPR128:$Rn))))],
6808                            NoItinerary>;
6809   
6810   def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
6811                           (outs VPR64:$Rd), (ins VPR64:$Rn),
6812                           asmop # "\t$Rd.4h, $Rn.8b",
6813                           [(set (v4i16 VPR64:$Rd),
6814                              (v4i16 (Neon_Padd (v8i8 VPR64:$Rn))))],
6815                           NoItinerary>;
6816   
6817   def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
6818                            (outs VPR128:$Rd), (ins VPR128:$Rn),
6819                            asmop # "\t$Rd.4s, $Rn.8h",
6820                            [(set (v4i32 VPR128:$Rd),
6821                               (v4i32 (Neon_Padd (v8i16 VPR128:$Rn))))],
6822                            NoItinerary>;
6823   
6824   def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
6825                           (outs VPR64:$Rd), (ins VPR64:$Rn),
6826                           asmop # "\t$Rd.2s, $Rn.4h",
6827                           [(set (v2i32 VPR64:$Rd),
6828                              (v2i32 (Neon_Padd (v4i16 VPR64:$Rn))))],
6829                           NoItinerary>;
6830   
6831   def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
6832                            (outs VPR128:$Rd), (ins VPR128:$Rn),
6833                            asmop # "\t$Rd.2d, $Rn.4s",
6834                            [(set (v2i64 VPR128:$Rd),
6835                               (v2i64 (Neon_Padd (v4i32 VPR128:$Rn))))],
6836                            NoItinerary>;
6837   
6838   def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
6839                           (outs VPR64:$Rd), (ins VPR64:$Rn),
6840                           asmop # "\t$Rd.1d, $Rn.2s",
6841                           [(set (v1i64 VPR64:$Rd),
6842                              (v1i64 (Neon_Padd (v2i32 VPR64:$Rn))))],
6843                           NoItinerary>;
6844 }
6845
6846 defm SADDLP : NeonI_PairwiseAdd<"saddlp", 0b0, 0b00010,
6847                                 int_arm_neon_vpaddls>;
6848 defm UADDLP : NeonI_PairwiseAdd<"uaddlp", 0b1, 0b00010,
6849                                 int_arm_neon_vpaddlu>;
6850
6851 multiclass NeonI_PairwiseAddAcc<string asmop, bit U, bits<5> opcode,
6852                              SDPatternOperator Neon_Padd> {
6853   let Constraints = "$src = $Rd" in {
6854     def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
6855                              (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
6856                              asmop # "\t$Rd.8h, $Rn.16b",
6857                              [(set (v8i16 VPR128:$Rd),
6858                                 (v8i16 (Neon_Padd 
6859                                   (v8i16 VPR128:$src), (v16i8 VPR128:$Rn))))],
6860                              NoItinerary>;
6861     
6862     def 8b4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
6863                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
6864                             asmop # "\t$Rd.4h, $Rn.8b",
6865                             [(set (v4i16 VPR64:$Rd),
6866                                (v4i16 (Neon_Padd 
6867                                  (v4i16 VPR64:$src), (v8i8 VPR64:$Rn))))],
6868                             NoItinerary>;
6869     
6870     def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
6871                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
6872                             asmop # "\t$Rd.4s, $Rn.8h",
6873                             [(set (v4i32 VPR128:$Rd),
6874                                (v4i32 (Neon_Padd
6875                                  (v4i32 VPR128:$src), (v8i16 VPR128:$Rn))))],
6876                             NoItinerary>;
6877     
6878     def 4h2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
6879                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
6880                             asmop # "\t$Rd.2s, $Rn.4h",
6881                             [(set (v2i32 VPR64:$Rd),
6882                                (v2i32 (Neon_Padd
6883                                  (v2i32 VPR64:$src), (v4i16 VPR64:$Rn))))],
6884                             NoItinerary>;
6885     
6886     def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
6887                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
6888                             asmop # "\t$Rd.2d, $Rn.4s",
6889                             [(set (v2i64 VPR128:$Rd),
6890                                (v2i64 (Neon_Padd
6891                                  (v2i64 VPR128:$src), (v4i32 VPR128:$Rn))))],
6892                             NoItinerary>;
6893     
6894     def 2s1d : NeonI_2VMisc<0b0, U, 0b10, opcode,
6895                             (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
6896                             asmop # "\t$Rd.1d, $Rn.2s",
6897                             [(set (v1i64 VPR64:$Rd),
6898                                (v1i64 (Neon_Padd
6899                                  (v1i64 VPR64:$src), (v2i32 VPR64:$Rn))))],
6900                             NoItinerary>;
6901   }
6902 }
6903
6904 defm SADALP : NeonI_PairwiseAddAcc<"sadalp", 0b0, 0b00110,
6905                                    int_arm_neon_vpadals>;
6906 defm UADALP : NeonI_PairwiseAddAcc<"uadalp", 0b1, 0b00110,
6907                                    int_arm_neon_vpadalu>;
6908
6909 multiclass NeonI_2VMisc_BHSDsize_1Arg<string asmop, bit U, bits<5> opcode> {
6910   def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
6911                          (outs VPR128:$Rd), (ins VPR128:$Rn),
6912                          asmop # "\t$Rd.16b, $Rn.16b",
6913                          [], NoItinerary>;
6914   
6915   def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
6916                         (outs VPR128:$Rd), (ins VPR128:$Rn),
6917                         asmop # "\t$Rd.8h, $Rn.8h",
6918                         [], NoItinerary>;
6919   
6920   def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
6921                         (outs VPR128:$Rd), (ins VPR128:$Rn),
6922                         asmop # "\t$Rd.4s, $Rn.4s",
6923                         [], NoItinerary>;
6924   
6925   def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
6926                         (outs VPR128:$Rd), (ins VPR128:$Rn),
6927                         asmop # "\t$Rd.2d, $Rn.2d",
6928                         [], NoItinerary>;
6929   
6930   def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
6931                          (outs VPR64:$Rd), (ins VPR64:$Rn),
6932                          asmop # "\t$Rd.8b, $Rn.8b",
6933                          [], NoItinerary>;
6934   
6935   def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
6936                         (outs VPR64:$Rd), (ins VPR64:$Rn),
6937                         asmop # "\t$Rd.4h, $Rn.4h",
6938                         [], NoItinerary>;
6939   
6940   def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
6941                         (outs VPR64:$Rd), (ins VPR64:$Rn),
6942                         asmop # "\t$Rd.2s, $Rn.2s",
6943                         [], NoItinerary>;
6944 }
6945
6946 defm SQABS : NeonI_2VMisc_BHSDsize_1Arg<"sqabs", 0b0, 0b00111>;
6947 defm SQNEG : NeonI_2VMisc_BHSDsize_1Arg<"sqneg", 0b1, 0b00111>;
6948 defm ABS : NeonI_2VMisc_BHSDsize_1Arg<"abs", 0b0, 0b01011>;
6949 defm NEG : NeonI_2VMisc_BHSDsize_1Arg<"neg", 0b1, 0b01011>;
6950
6951 multiclass NeonI_2VMisc_BHSD_1Arg_Pattern<string Prefix,
6952                                           SDPatternOperator Neon_Op> {
6953   def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$Rn))),
6954             (v16i8 (!cast<Instruction>(Prefix # 16b) (v16i8 VPR128:$Rn)))>;
6955
6956   def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$Rn))),
6957             (v8i16 (!cast<Instruction>(Prefix # 8h) (v8i16 VPR128:$Rn)))>;
6958
6959   def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$Rn))),
6960             (v4i32 (!cast<Instruction>(Prefix # 4s) (v4i32 VPR128:$Rn)))>;
6961
6962   def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$Rn))),
6963             (v2i64 (!cast<Instruction>(Prefix # 2d) (v2i64 VPR128:$Rn)))>;
6964
6965   def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$Rn))),
6966             (v8i8 (!cast<Instruction>(Prefix # 8b) (v8i8 VPR64:$Rn)))>;
6967
6968   def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$Rn))),
6969             (v4i16 (!cast<Instruction>(Prefix # 4h) (v4i16 VPR64:$Rn)))>;
6970
6971   def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$Rn))),
6972             (v2i32 (!cast<Instruction>(Prefix # 2s) (v2i32 VPR64:$Rn)))>;
6973 }
6974
6975 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQABS", int_arm_neon_vqabs>;
6976 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"SQNEG", int_arm_neon_vqneg>;
6977 defm : NeonI_2VMisc_BHSD_1Arg_Pattern<"ABS", int_arm_neon_vabs>;
6978
6979 def : Pat<(v16i8 (sub 
6980             (v16i8 Neon_AllZero),
6981             (v16i8 VPR128:$Rn))),
6982           (v16i8 (NEG16b (v16i8 VPR128:$Rn)))>;
6983 def : Pat<(v8i8 (sub 
6984             (v8i8 Neon_AllZero),
6985             (v8i8 VPR64:$Rn))),
6986           (v8i8 (NEG8b (v8i8 VPR64:$Rn)))>;
6987 def : Pat<(v8i16 (sub 
6988             (v8i16 (bitconvert (v16i8 Neon_AllZero))),
6989             (v8i16 VPR128:$Rn))),
6990           (v8i16 (NEG8h (v8i16 VPR128:$Rn)))>;
6991 def : Pat<(v4i16 (sub 
6992             (v4i16 (bitconvert (v8i8 Neon_AllZero))),
6993             (v4i16 VPR64:$Rn))),
6994           (v4i16 (NEG4h (v4i16 VPR64:$Rn)))>;
6995 def : Pat<(v4i32 (sub 
6996             (v4i32 (bitconvert (v16i8 Neon_AllZero))),
6997             (v4i32 VPR128:$Rn))),
6998           (v4i32 (NEG4s (v4i32 VPR128:$Rn)))>;
6999 def : Pat<(v2i32 (sub 
7000             (v2i32 (bitconvert (v8i8 Neon_AllZero))),
7001             (v2i32 VPR64:$Rn))),
7002           (v2i32 (NEG2s (v2i32 VPR64:$Rn)))>;
7003 def : Pat<(v2i64 (sub 
7004             (v2i64 (bitconvert (v16i8 Neon_AllZero))),
7005             (v2i64 VPR128:$Rn))),
7006           (v2i64 (NEG2d (v2i64 VPR128:$Rn)))>;
7007
7008 multiclass NeonI_2VMisc_BHSDsize_2Args<string asmop, bit U, bits<5> opcode> {
7009   let Constraints = "$src = $Rd" in {
7010     def 16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
7011                            (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7012                            asmop # "\t$Rd.16b, $Rn.16b",
7013                            [], NoItinerary>;
7014     
7015     def 8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
7016                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7017                           asmop # "\t$Rd.8h, $Rn.8h",
7018                           [], NoItinerary>;
7019     
7020     def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
7021                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7022                           asmop # "\t$Rd.4s, $Rn.4s",
7023                           [], NoItinerary>;
7024     
7025     def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
7026                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7027                           asmop # "\t$Rd.2d, $Rn.2d",
7028                           [], NoItinerary>;
7029     
7030     def 8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
7031                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
7032                           asmop # "\t$Rd.8b, $Rn.8b",
7033                           [], NoItinerary>;
7034     
7035     def 4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
7036                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
7037                           asmop # "\t$Rd.4h, $Rn.4h",
7038                           [], NoItinerary>;
7039     
7040     def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
7041                           (outs VPR64:$Rd), (ins VPR64:$src, VPR64:$Rn),
7042                           asmop # "\t$Rd.2s, $Rn.2s",
7043                           [], NoItinerary>;
7044   }
7045 }
7046
7047 defm SUQADD : NeonI_2VMisc_BHSDsize_2Args<"suqadd", 0b0, 0b00011>;
7048 defm USQADD : NeonI_2VMisc_BHSDsize_2Args<"usqadd", 0b1, 0b00011>;
7049
7050 multiclass NeonI_2VMisc_BHSD_2Args_Pattern<string Prefix,
7051                                            SDPatternOperator Neon_Op> {
7052   def : Pat<(v16i8 (Neon_Op (v16i8 VPR128:$src), (v16i8 VPR128:$Rn))),
7053             (v16i8 (!cast<Instruction>(Prefix # 16b)
7054               (v16i8 VPR128:$src), (v16i8 VPR128:$Rn)))>;
7055
7056   def : Pat<(v8i16 (Neon_Op (v8i16 VPR128:$src), (v8i16 VPR128:$Rn))),
7057             (v8i16 (!cast<Instruction>(Prefix # 8h)
7058               (v8i16 VPR128:$src), (v8i16 VPR128:$Rn)))>;
7059
7060   def : Pat<(v4i32 (Neon_Op (v4i32 VPR128:$src), (v4i32 VPR128:$Rn))),
7061             (v4i32 (!cast<Instruction>(Prefix # 4s)
7062               (v4i32 VPR128:$src), (v4i32 VPR128:$Rn)))>;
7063
7064   def : Pat<(v2i64 (Neon_Op (v2i64 VPR128:$src), (v2i64 VPR128:$Rn))),
7065             (v2i64 (!cast<Instruction>(Prefix # 2d)
7066               (v2i64 VPR128:$src), (v2i64 VPR128:$Rn)))>;
7067
7068   def : Pat<(v8i8 (Neon_Op (v8i8 VPR64:$src), (v8i8 VPR64:$Rn))),
7069             (v8i8 (!cast<Instruction>(Prefix # 8b)
7070               (v8i8 VPR64:$src), (v8i8 VPR64:$Rn)))>;
7071
7072   def : Pat<(v4i16 (Neon_Op (v4i16 VPR64:$src), (v4i16 VPR64:$Rn))),
7073             (v4i16 (!cast<Instruction>(Prefix # 4h)
7074               (v4i16 VPR64:$src), (v4i16 VPR64:$Rn)))>;
7075
7076   def : Pat<(v2i32 (Neon_Op (v2i32 VPR64:$src), (v2i32 VPR64:$Rn))),
7077             (v2i32 (!cast<Instruction>(Prefix # 2s)
7078               (v2i32 VPR64:$src), (v2i32 VPR64:$Rn)))>;
7079 }
7080
7081 defm : NeonI_2VMisc_BHSD_2Args_Pattern<"SUQADD", int_aarch64_neon_suqadd>;
7082 defm : NeonI_2VMisc_BHSD_2Args_Pattern<"USQADD", int_aarch64_neon_usqadd>;
7083
7084 multiclass NeonI_2VMisc_BHSsizes<string asmop, bit U,
7085                           SDPatternOperator Neon_Op> {
7086   def 16b : NeonI_2VMisc<0b1, U, 0b00, 0b00100,
7087                          (outs VPR128:$Rd), (ins VPR128:$Rn),
7088                          asmop # "\t$Rd.16b, $Rn.16b",
7089                          [(set (v16i8 VPR128:$Rd),
7090                             (v16i8 (Neon_Op (v16i8 VPR128:$Rn))))],
7091                          NoItinerary>;
7092   
7093   def 8h : NeonI_2VMisc<0b1, U, 0b01, 0b00100,
7094                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7095                         asmop # "\t$Rd.8h, $Rn.8h",
7096                         [(set (v8i16 VPR128:$Rd),
7097                            (v8i16 (Neon_Op (v8i16 VPR128:$Rn))))],
7098                         NoItinerary>;
7099   
7100   def 4s : NeonI_2VMisc<0b1, U, 0b10, 0b00100,
7101                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7102                         asmop # "\t$Rd.4s, $Rn.4s",
7103                         [(set (v4i32 VPR128:$Rd),
7104                            (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
7105                         NoItinerary>;
7106   
7107   def 8b : NeonI_2VMisc<0b0, U, 0b00, 0b00100,
7108                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7109                         asmop # "\t$Rd.8b, $Rn.8b",
7110                         [(set (v8i8 VPR64:$Rd),
7111                            (v8i8 (Neon_Op (v8i8 VPR64:$Rn))))],
7112                         NoItinerary>;
7113   
7114   def 4h : NeonI_2VMisc<0b0, U, 0b01, 0b00100,
7115                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7116                         asmop # "\t$Rd.4h, $Rn.4h",
7117                         [(set (v4i16 VPR64:$Rd),
7118                            (v4i16 (Neon_Op (v4i16 VPR64:$Rn))))],
7119                         NoItinerary>;
7120   
7121   def 2s : NeonI_2VMisc<0b0, U, 0b10, 0b00100,
7122                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7123                         asmop # "\t$Rd.2s, $Rn.2s",
7124                         [(set (v2i32 VPR64:$Rd),
7125                            (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
7126                         NoItinerary>;
7127 }
7128
7129 defm CLS : NeonI_2VMisc_BHSsizes<"cls", 0b0, int_arm_neon_vcls>;
7130 defm CLZ : NeonI_2VMisc_BHSsizes<"clz", 0b1, ctlz>;
7131
7132 multiclass NeonI_2VMisc_Bsize<string asmop, bit U, bits<2> size,
7133                               bits<5> Opcode> {
7134   def 16b : NeonI_2VMisc<0b1, U, size, Opcode,
7135                          (outs VPR128:$Rd), (ins VPR128:$Rn),
7136                          asmop # "\t$Rd.16b, $Rn.16b",
7137                          [], NoItinerary>;
7138   
7139   def 8b : NeonI_2VMisc<0b0, U, size, Opcode,
7140                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7141                         asmop # "\t$Rd.8b, $Rn.8b",
7142                         [], NoItinerary>;
7143 }
7144
7145 defm CNT : NeonI_2VMisc_Bsize<"cnt", 0b0, 0b00, 0b00101>;
7146 defm NOT : NeonI_2VMisc_Bsize<"not", 0b1, 0b00, 0b00101>;
7147 defm RBIT : NeonI_2VMisc_Bsize<"rbit", 0b1, 0b01, 0b00101>;
7148
7149 def : NeonInstAlias<"mvn $Rd.16b, $Rn.16b",
7150                     (NOT16b VPR128:$Rd, VPR128:$Rn), 0>;
7151 def : NeonInstAlias<"mvn $Rd.8b, $Rn.8b",
7152                     (NOT8b VPR64:$Rd, VPR64:$Rn), 0>;
7153
7154 def : Pat<(v16i8 (ctpop (v16i8 VPR128:$Rn))),
7155           (v16i8 (CNT16b (v16i8 VPR128:$Rn)))>;
7156 def : Pat<(v8i8 (ctpop (v8i8 VPR64:$Rn))),
7157           (v8i8 (CNT8b (v8i8 VPR64:$Rn)))>;
7158
7159 def : Pat<(v16i8 (xor 
7160             (v16i8 VPR128:$Rn),
7161             (v16i8 Neon_AllOne))),
7162           (v16i8 (NOT16b (v16i8 VPR128:$Rn)))>;
7163 def : Pat<(v8i8 (xor 
7164             (v8i8 VPR64:$Rn),
7165             (v8i8 Neon_AllOne))),
7166           (v8i8 (NOT8b (v8i8 VPR64:$Rn)))>;
7167 def : Pat<(v8i16 (xor 
7168             (v8i16 VPR128:$Rn),
7169             (v8i16 (bitconvert (v16i8 Neon_AllOne))))),
7170           (NOT16b VPR128:$Rn)>;
7171 def : Pat<(v4i16 (xor 
7172             (v4i16 VPR64:$Rn),
7173             (v4i16 (bitconvert (v8i8 Neon_AllOne))))),
7174           (NOT8b VPR64:$Rn)>;
7175 def : Pat<(v4i32 (xor 
7176             (v4i32 VPR128:$Rn),
7177             (v4i32 (bitconvert (v16i8 Neon_AllOne))))),
7178           (NOT16b VPR128:$Rn)>;
7179 def : Pat<(v2i32 (xor 
7180             (v2i32 VPR64:$Rn),
7181             (v2i32 (bitconvert (v8i8 Neon_AllOne))))),
7182           (NOT8b VPR64:$Rn)>;
7183 def : Pat<(v2i64 (xor 
7184             (v2i64 VPR128:$Rn),
7185             (v2i64 (bitconvert (v16i8 Neon_AllOne))))),
7186           (NOT16b VPR128:$Rn)>;
7187
7188 def : Pat<(v16i8 (int_aarch64_neon_rbit (v16i8 VPR128:$Rn))),
7189           (v16i8 (RBIT16b (v16i8 VPR128:$Rn)))>;
7190 def : Pat<(v8i8 (int_aarch64_neon_rbit (v8i8 VPR64:$Rn))),
7191           (v8i8 (RBIT8b (v8i8 VPR64:$Rn)))>;
7192
7193 multiclass NeonI_2VMisc_SDsizes<string asmop, bit U, bits<5> opcode,
7194                                 SDPatternOperator Neon_Op> {
7195   def 4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
7196                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7197                         asmop # "\t$Rd.4s, $Rn.4s",
7198                         [(set (v4f32 VPR128:$Rd),
7199                            (v4f32 (Neon_Op (v4f32 VPR128:$Rn))))],
7200                         NoItinerary>;
7201   
7202   def 2d : NeonI_2VMisc<0b1, U, 0b11, opcode,
7203                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7204                         asmop # "\t$Rd.2d, $Rn.2d",
7205                         [(set (v2f64 VPR128:$Rd),
7206                            (v2f64 (Neon_Op (v2f64 VPR128:$Rn))))],
7207                         NoItinerary>;
7208   
7209   def 2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
7210                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7211                         asmop # "\t$Rd.2s, $Rn.2s",
7212                         [(set (v2f32 VPR64:$Rd),
7213                            (v2f32 (Neon_Op (v2f32 VPR64:$Rn))))],
7214                         NoItinerary>;
7215 }
7216
7217 defm FABS : NeonI_2VMisc_SDsizes<"fabs", 0b0, 0b01111, fabs>;
7218 defm FNEG : NeonI_2VMisc_SDsizes<"fneg", 0b1, 0b01111, fneg>;
7219
7220 multiclass NeonI_2VMisc_HSD_Narrow<string asmop, bit U, bits<5> opcode> {
7221   def 8h8b : NeonI_2VMisc<0b0, U, 0b00, opcode,
7222                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7223                           asmop # "\t$Rd.8b, $Rn.8h",
7224                           [], NoItinerary>;
7225
7226   def 4s4h : NeonI_2VMisc<0b0, U, 0b01, opcode,
7227                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7228                           asmop # "\t$Rd.4h, $Rn.4s",
7229                           [], NoItinerary>;
7230
7231   def 2d2s : NeonI_2VMisc<0b0, U, 0b10, opcode,
7232                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7233                           asmop # "\t$Rd.2s, $Rn.2d",
7234                           [], NoItinerary>;
7235
7236   let Constraints = "$Rd = $src" in {
7237     def 8h16b : NeonI_2VMisc<0b1, U, 0b00, opcode,
7238                              (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7239                              asmop # "2\t$Rd.16b, $Rn.8h",
7240                              [], NoItinerary>;
7241   
7242     def 4s8h : NeonI_2VMisc<0b1, U, 0b01, opcode,
7243                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7244                             asmop # "2\t$Rd.8h, $Rn.4s",
7245                             [], NoItinerary>;
7246   
7247     def 2d4s : NeonI_2VMisc<0b1, U, 0b10, opcode,
7248                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7249                             asmop # "2\t$Rd.4s, $Rn.2d",
7250                             [], NoItinerary>;
7251   }
7252 }
7253
7254 defm XTN : NeonI_2VMisc_HSD_Narrow<"xtn", 0b0, 0b10010>;
7255 defm SQXTUN : NeonI_2VMisc_HSD_Narrow<"sqxtun", 0b1, 0b10010>;
7256 defm SQXTN : NeonI_2VMisc_HSD_Narrow<"sqxtn", 0b0, 0b10100>;
7257 defm UQXTN : NeonI_2VMisc_HSD_Narrow<"uqxtn", 0b1, 0b10100>;
7258
7259 multiclass NeonI_2VMisc_Narrow_Patterns<string Prefix, 
7260                                         SDPatternOperator Neon_Op> {
7261   def : Pat<(v8i8 (Neon_Op (v8i16 VPR128:$Rn))),
7262             (v8i8 (!cast<Instruction>(Prefix # 8h8b) (v8i16 VPR128:$Rn)))>;
7263
7264   def : Pat<(v4i16 (Neon_Op (v4i32 VPR128:$Rn))),
7265             (v4i16 (!cast<Instruction>(Prefix # 4s4h) (v4i32 VPR128:$Rn)))>;
7266
7267   def : Pat<(v2i32 (Neon_Op (v2i64 VPR128:$Rn))),
7268             (v2i32 (!cast<Instruction>(Prefix # 2d2s) (v2i64 VPR128:$Rn)))>;
7269   
7270   def : Pat<(v16i8 (concat_vectors
7271               (v8i8 VPR64:$src),
7272               (v8i8 (Neon_Op (v8i16 VPR128:$Rn))))),
7273             (!cast<Instruction>(Prefix # 8h16b) 
7274               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
7275               VPR128:$Rn)>;
7276
7277   def : Pat<(v8i16 (concat_vectors
7278               (v4i16 VPR64:$src),
7279               (v4i16 (Neon_Op (v4i32 VPR128:$Rn))))),
7280             (!cast<Instruction>(Prefix # 4s8h)
7281               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
7282               VPR128:$Rn)>;
7283
7284   def : Pat<(v4i32 (concat_vectors
7285               (v2i32 VPR64:$src),
7286               (v2i32 (Neon_Op (v2i64 VPR128:$Rn))))),
7287             (!cast<Instruction>(Prefix # 2d4s)
7288               (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64),
7289               VPR128:$Rn)>;
7290 }
7291
7292 defm : NeonI_2VMisc_Narrow_Patterns<"XTN", trunc>;
7293 defm : NeonI_2VMisc_Narrow_Patterns<"SQXTUN", int_arm_neon_vqmovnsu>;
7294 defm : NeonI_2VMisc_Narrow_Patterns<"SQXTN", int_arm_neon_vqmovns>;
7295 defm : NeonI_2VMisc_Narrow_Patterns<"UQXTN", int_arm_neon_vqmovnu>;
7296
7297 multiclass NeonI_2VMisc_SHIFT<string asmop, bit U, bits<5> opcode> {
7298   def 8b8h : NeonI_2VMisc<0b0, U, 0b00, opcode,
7299                           (outs VPR128:$Rd),
7300                           (ins VPR64:$Rn, uimm_exact8:$Imm),
7301                           asmop # "\t$Rd.8h, $Rn.8b, $Imm",
7302                           [], NoItinerary>;
7303
7304   def 4h4s : NeonI_2VMisc<0b0, U, 0b01, opcode,
7305                           (outs VPR128:$Rd),
7306                           (ins VPR64:$Rn, uimm_exact16:$Imm),
7307                           asmop # "\t$Rd.4s, $Rn.4h, $Imm",
7308                           [], NoItinerary>;
7309
7310   def 2s2d : NeonI_2VMisc<0b0, U, 0b10, opcode,
7311                           (outs VPR128:$Rd),
7312                           (ins VPR64:$Rn, uimm_exact32:$Imm),
7313                           asmop # "\t$Rd.2d, $Rn.2s, $Imm",
7314                           [], NoItinerary>;
7315
7316   def 16b8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
7317                           (outs VPR128:$Rd),
7318                           (ins VPR128:$Rn, uimm_exact8:$Imm),
7319                           asmop # "2\t$Rd.8h, $Rn.16b, $Imm",
7320                           [], NoItinerary>;
7321
7322   def 8h4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
7323                           (outs VPR128:$Rd),
7324                           (ins VPR128:$Rn, uimm_exact16:$Imm),
7325                           asmop # "2\t$Rd.4s, $Rn.8h, $Imm",
7326                           [], NoItinerary>;
7327
7328   def 4s2d : NeonI_2VMisc<0b1, U, 0b10, opcode,
7329                           (outs VPR128:$Rd),
7330                           (ins VPR128:$Rn, uimm_exact32:$Imm),
7331                           asmop # "2\t$Rd.2d, $Rn.4s, $Imm",
7332                           [], NoItinerary>;
7333 }
7334
7335 defm SHLL : NeonI_2VMisc_SHIFT<"shll", 0b1, 0b10011>;
7336
7337 class NeonI_SHLL_Patterns<ValueType OpTy, ValueType DesTy,
7338                           SDPatternOperator ExtOp, Operand Neon_Imm,
7339                           string suffix> 
7340   : Pat<(DesTy (shl
7341           (DesTy (ExtOp (OpTy VPR64:$Rn))),
7342             (DesTy (Neon_vdup
7343               (i32 Neon_Imm:$Imm))))),
7344         (!cast<Instruction>("SHLL" # suffix) VPR64:$Rn, Neon_Imm:$Imm)>;
7345     
7346 class NeonI_SHLL_High_Patterns<ValueType OpTy, ValueType DesTy,
7347                                SDPatternOperator ExtOp, Operand Neon_Imm,
7348                                string suffix, PatFrag GetHigh> 
7349   : Pat<(DesTy (shl
7350           (DesTy (ExtOp
7351             (OpTy (GetHigh VPR128:$Rn)))),
7352               (DesTy (Neon_vdup
7353                 (i32 Neon_Imm:$Imm))))),
7354         (!cast<Instruction>("SHLL" # suffix) VPR128:$Rn, Neon_Imm:$Imm)>;
7355
7356 def : NeonI_SHLL_Patterns<v8i8, v8i16, zext, uimm_exact8, "8b8h">;
7357 def : NeonI_SHLL_Patterns<v8i8, v8i16, sext, uimm_exact8, "8b8h">;
7358 def : NeonI_SHLL_Patterns<v4i16, v4i32, zext, uimm_exact16, "4h4s">;
7359 def : NeonI_SHLL_Patterns<v4i16, v4i32, sext, uimm_exact16, "4h4s">;
7360 def : NeonI_SHLL_Patterns<v2i32, v2i64, zext, uimm_exact32, "2s2d">;
7361 def : NeonI_SHLL_Patterns<v2i32, v2i64, sext, uimm_exact32, "2s2d">;
7362 def : NeonI_SHLL_High_Patterns<v8i8, v8i16, zext, uimm_exact8, "16b8h",
7363                                Neon_High16B>;
7364 def : NeonI_SHLL_High_Patterns<v8i8, v8i16, sext, uimm_exact8, "16b8h",
7365                                Neon_High16B>;
7366 def : NeonI_SHLL_High_Patterns<v4i16, v4i32, zext, uimm_exact16, "8h4s",
7367                                Neon_High8H>;
7368 def : NeonI_SHLL_High_Patterns<v4i16, v4i32, sext, uimm_exact16, "8h4s",
7369                                Neon_High8H>;
7370 def : NeonI_SHLL_High_Patterns<v2i32, v2i64, zext, uimm_exact32, "4s2d",
7371                                Neon_High4S>;
7372 def : NeonI_SHLL_High_Patterns<v2i32, v2i64, sext, uimm_exact32, "4s2d",
7373                                Neon_High4S>;
7374
7375 multiclass NeonI_2VMisc_SD_Narrow<string asmop, bit U, bits<5> opcode> {
7376   def 4s4h : NeonI_2VMisc<0b0, U, 0b00, opcode,
7377                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7378                           asmop # "\t$Rd.4h, $Rn.4s",
7379                           [], NoItinerary>;
7380
7381   def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
7382                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7383                           asmop # "\t$Rd.2s, $Rn.2d",
7384                           [], NoItinerary>;
7385   
7386   let Constraints = "$src = $Rd" in {
7387     def 4s8h : NeonI_2VMisc<0b1, U, 0b00, opcode,
7388                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7389                             asmop # "2\t$Rd.8h, $Rn.4s",
7390                             [], NoItinerary>;
7391   
7392     def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
7393                             (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7394                             asmop # "2\t$Rd.4s, $Rn.2d",
7395                             [], NoItinerary>;
7396   }
7397 }
7398
7399 defm FCVTN : NeonI_2VMisc_SD_Narrow<"fcvtn", 0b0, 0b10110>;
7400
7401 multiclass NeonI_2VMisc_Narrow_Pattern<string prefix,
7402                                        SDPatternOperator f32_to_f16_Op,
7403                                        SDPatternOperator f64_to_f32_Op> {
7404   
7405   def : Pat<(v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))),
7406               (!cast<Instruction>(prefix # "4s4h") (v4f32 VPR128:$Rn))>;
7407   
7408   def : Pat<(v8i16 (concat_vectors
7409                 (v4i16 VPR64:$src),
7410                 (v4i16 (f32_to_f16_Op (v4f32 VPR128:$Rn))))),
7411                   (!cast<Instruction>(prefix # "4s8h")
7412                     (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
7413                     (v4f32 VPR128:$Rn))>;  
7414     
7415   def : Pat<(v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))),
7416             (!cast<Instruction>(prefix # "2d2s") (v2f64 VPR128:$Rn))>;
7417   
7418   def : Pat<(v4f32 (concat_vectors
7419               (v2f32 VPR64:$src),
7420               (v2f32 (f64_to_f32_Op (v2f64 VPR128:$Rn))))),
7421                 (!cast<Instruction>(prefix # "2d4s")
7422                   (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
7423                   (v2f64 VPR128:$Rn))>;
7424 }
7425
7426 defm : NeonI_2VMisc_Narrow_Pattern<"FCVTN", int_arm_neon_vcvtfp2hf, fround>;
7427
7428 multiclass NeonI_2VMisc_D_Narrow<string asmop, string prefix, bit U,
7429                                  bits<5> opcode> {
7430   def 2d2s : NeonI_2VMisc<0b0, U, 0b01, opcode,
7431                           (outs VPR64:$Rd), (ins VPR128:$Rn),
7432                           asmop # "\t$Rd.2s, $Rn.2d",
7433                           [], NoItinerary>;
7434
7435   def 2d4s : NeonI_2VMisc<0b1, U, 0b01, opcode,
7436                           (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7437                           asmop # "2\t$Rd.4s, $Rn.2d",
7438                           [], NoItinerary> {
7439     let Constraints = "$src = $Rd";
7440   }
7441   
7442   def : Pat<(v2f32 (int_aarch64_neon_fcvtxn (v2f64 VPR128:$Rn))),
7443             (!cast<Instruction>(prefix # "2d2s") VPR128:$Rn)>;
7444
7445   def : Pat<(v4f32 (concat_vectors
7446               (v2f32 VPR64:$src),
7447               (v2f32 (int_aarch64_neon_fcvtxn (v2f64 VPR128:$Rn))))),
7448             (!cast<Instruction>(prefix # "2d4s")
7449                (v4f32 (SUBREG_TO_REG (i32 0), VPR64:$src, sub_64)),
7450                VPR128:$Rn)>;
7451 }
7452
7453 defm FCVTXN : NeonI_2VMisc_D_Narrow<"fcvtxn","FCVTXN", 0b1, 0b10110>;
7454
7455 def Neon_High4Float : PatFrag<(ops node:$in),
7456                               (extract_subvector (v4f32 node:$in), (iPTR 2))>;
7457
7458 multiclass NeonI_2VMisc_HS_Extend<string asmop, bit U, bits<5> opcode> {
7459   def 4h4s : NeonI_2VMisc<0b0, U, 0b00, opcode,
7460                           (outs VPR128:$Rd), (ins VPR64:$Rn),
7461                           asmop # "\t$Rd.4s, $Rn.4h",
7462                           [], NoItinerary>;
7463
7464   def 2s2d : NeonI_2VMisc<0b0, U, 0b01, opcode,
7465                           (outs VPR128:$Rd), (ins VPR64:$Rn),
7466                           asmop # "\t$Rd.2d, $Rn.2s",
7467                           [], NoItinerary>;
7468
7469   def 8h4s : NeonI_2VMisc<0b1, U, 0b00, opcode,
7470                           (outs VPR128:$Rd), (ins VPR128:$Rn),
7471                           asmop # "2\t$Rd.4s, $Rn.8h",
7472                           [], NoItinerary>;
7473
7474   def 4s2d : NeonI_2VMisc<0b1, U, 0b01, opcode,
7475                           (outs VPR128:$Rd), (ins VPR128:$Rn),
7476                           asmop # "2\t$Rd.2d, $Rn.4s",
7477                           [], NoItinerary>;
7478 }
7479
7480 defm FCVTL : NeonI_2VMisc_HS_Extend<"fcvtl", 0b0, 0b10111>;
7481
7482 multiclass NeonI_2VMisc_Extend_Pattern<string prefix> {
7483   def : Pat<(v4f32 (int_arm_neon_vcvthf2fp (v4i16 VPR64:$Rn))),
7484             (!cast<Instruction>(prefix # "4h4s") VPR64:$Rn)>;
7485   
7486   def : Pat<(v4f32 (int_arm_neon_vcvthf2fp
7487               (v4i16 (Neon_High8H
7488                 (v8i16 VPR128:$Rn))))),
7489             (!cast<Instruction>(prefix # "8h4s") VPR128:$Rn)>;
7490   
7491   def : Pat<(v2f64 (fextend (v2f32 VPR64:$Rn))),
7492             (!cast<Instruction>(prefix # "2s2d") VPR64:$Rn)>;
7493   
7494   def : Pat<(v2f64 (fextend
7495               (v2f32 (Neon_High4Float
7496                 (v4f32 VPR128:$Rn))))),
7497             (!cast<Instruction>(prefix # "4s2d") VPR128:$Rn)>;
7498 }
7499
7500 defm : NeonI_2VMisc_Extend_Pattern<"FCVTL">;
7501
7502 multiclass NeonI_2VMisc_SD_Conv<string asmop, bit Size, bit U, bits<5> opcode,
7503                                 ValueType ResTy4s, ValueType OpTy4s,
7504                                 ValueType ResTy2d, ValueType OpTy2d,
7505                                 ValueType ResTy2s, ValueType OpTy2s,
7506                                 SDPatternOperator Neon_Op> {
7507   
7508   def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
7509                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7510                         asmop # "\t$Rd.4s, $Rn.4s",
7511                         [(set (ResTy4s VPR128:$Rd),
7512                            (ResTy4s (Neon_Op (OpTy4s VPR128:$Rn))))],
7513                         NoItinerary>;
7514
7515   def 2d : NeonI_2VMisc<0b1, U, {Size, 0b1}, opcode,
7516                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7517                         asmop # "\t$Rd.2d, $Rn.2d",
7518                         [(set (ResTy2d VPR128:$Rd),
7519                            (ResTy2d (Neon_Op (OpTy2d VPR128:$Rn))))],
7520                         NoItinerary>;
7521   
7522   def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
7523                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7524                         asmop # "\t$Rd.2s, $Rn.2s",
7525                         [(set (ResTy2s VPR64:$Rd),
7526                            (ResTy2s (Neon_Op (OpTy2s VPR64:$Rn))))],
7527                         NoItinerary>;
7528 }
7529
7530 multiclass NeonI_2VMisc_fp_to_int<string asmop, bit Size, bit U,
7531                                   bits<5> opcode, SDPatternOperator Neon_Op> {
7532   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4i32, v4f32, v2i64,
7533                                 v2f64, v2i32, v2f32, Neon_Op>;
7534 }
7535
7536 defm FCVTNS : NeonI_2VMisc_fp_to_int<"fcvtns", 0b0, 0b0, 0b11010,
7537                                      int_aarch64_neon_fcvtns>;
7538 defm FCVTNU : NeonI_2VMisc_fp_to_int<"fcvtnu", 0b0, 0b1, 0b11010,
7539                                      int_aarch64_neon_fcvtnu>;
7540 defm FCVTPS : NeonI_2VMisc_fp_to_int<"fcvtps", 0b1, 0b0, 0b11010,
7541                                      int_aarch64_neon_fcvtps>;
7542 defm FCVTPU : NeonI_2VMisc_fp_to_int<"fcvtpu", 0b1, 0b1, 0b11010,
7543                                      int_aarch64_neon_fcvtpu>;
7544 defm FCVTMS : NeonI_2VMisc_fp_to_int<"fcvtms", 0b0, 0b0, 0b11011,
7545                                      int_aarch64_neon_fcvtms>;
7546 defm FCVTMU : NeonI_2VMisc_fp_to_int<"fcvtmu", 0b0, 0b1, 0b11011,
7547                                      int_aarch64_neon_fcvtmu>;
7548 defm FCVTZS : NeonI_2VMisc_fp_to_int<"fcvtzs", 0b1, 0b0, 0b11011, fp_to_sint>;
7549 defm FCVTZU : NeonI_2VMisc_fp_to_int<"fcvtzu", 0b1, 0b1, 0b11011, fp_to_uint>;
7550 defm FCVTAS : NeonI_2VMisc_fp_to_int<"fcvtas", 0b0, 0b0, 0b11100,
7551                                      int_aarch64_neon_fcvtas>;
7552 defm FCVTAU : NeonI_2VMisc_fp_to_int<"fcvtau", 0b0, 0b1, 0b11100,
7553                                      int_aarch64_neon_fcvtau>;
7554
7555 multiclass NeonI_2VMisc_int_to_fp<string asmop, bit Size, bit U,
7556                                   bits<5> opcode, SDPatternOperator Neon_Op> {
7557   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4i32, v2f64,
7558                                 v2i64, v2f32, v2i32, Neon_Op>;
7559 }
7560
7561 defm SCVTF : NeonI_2VMisc_int_to_fp<"scvtf", 0b0, 0b0, 0b11101, sint_to_fp>;
7562 defm UCVTF : NeonI_2VMisc_int_to_fp<"ucvtf", 0b0, 0b1, 0b11101, uint_to_fp>;
7563
7564 multiclass NeonI_2VMisc_fp_to_fp<string asmop, bit Size, bit U,
7565                                  bits<5> opcode, SDPatternOperator Neon_Op> {
7566   defm _ : NeonI_2VMisc_SD_Conv<asmop, Size, U, opcode, v4f32, v4f32, v2f64,
7567                                 v2f64, v2f32, v2f32, Neon_Op>;
7568 }
7569
7570 defm FRINTN : NeonI_2VMisc_fp_to_fp<"frintn", 0b0, 0b0, 0b11000,
7571                                      int_aarch64_neon_frintn>;
7572 defm FRINTA : NeonI_2VMisc_fp_to_fp<"frinta", 0b0, 0b1, 0b11000, frnd>;
7573 defm FRINTP : NeonI_2VMisc_fp_to_fp<"frintp", 0b1, 0b0, 0b11000, fceil>;
7574 defm FRINTM : NeonI_2VMisc_fp_to_fp<"frintm", 0b0, 0b0, 0b11001, ffloor>;
7575 defm FRINTX : NeonI_2VMisc_fp_to_fp<"frintx", 0b0, 0b1, 0b11001, frint>;
7576 defm FRINTZ : NeonI_2VMisc_fp_to_fp<"frintz", 0b1, 0b0, 0b11001, ftrunc>;
7577 defm FRINTI : NeonI_2VMisc_fp_to_fp<"frinti", 0b1, 0b1, 0b11001, fnearbyint>;
7578 defm FRECPE : NeonI_2VMisc_fp_to_fp<"frecpe", 0b1, 0b0, 0b11101,
7579                                     int_arm_neon_vrecpe>;
7580 defm FRSQRTE : NeonI_2VMisc_fp_to_fp<"frsqrte", 0b1, 0b1, 0b11101,
7581                                      int_arm_neon_vrsqrte>;
7582 defm FSQRT : NeonI_2VMisc_fp_to_fp<"fsqrt", 0b1, 0b1, 0b11111,
7583                                    int_aarch64_neon_fsqrt>;
7584
7585 multiclass NeonI_2VMisc_S_Conv<string asmop, bit Size, bit U,
7586                                bits<5> opcode, SDPatternOperator Neon_Op> {
7587   def 4s : NeonI_2VMisc<0b1, U, {Size, 0b0}, opcode,
7588                         (outs VPR128:$Rd), (ins VPR128:$Rn),
7589                         asmop # "\t$Rd.4s, $Rn.4s",
7590                         [(set (v4i32 VPR128:$Rd),
7591                            (v4i32 (Neon_Op (v4i32 VPR128:$Rn))))],
7592                         NoItinerary>;
7593   
7594   def 2s : NeonI_2VMisc<0b0, U, {Size, 0b0}, opcode,
7595                         (outs VPR64:$Rd), (ins VPR64:$Rn),
7596                         asmop # "\t$Rd.2s, $Rn.2s",
7597                         [(set (v2i32 VPR64:$Rd),
7598                            (v2i32 (Neon_Op (v2i32 VPR64:$Rn))))],
7599                         NoItinerary>;
7600 }
7601
7602 defm URECPE : NeonI_2VMisc_S_Conv<"urecpe", 0b1, 0b0, 0b11100,
7603                                   int_arm_neon_vrecpe>;
7604 defm URSQRTE : NeonI_2VMisc_S_Conv<"ursqrte", 0b1, 0b1, 0b11100,
7605                                    int_arm_neon_vrsqrte>;
7606
7607 // Crypto Class
7608 class NeonI_Cryptoaes_2v<bits<2> size, bits<5> opcode,
7609                          string asmop, SDPatternOperator opnode>
7610   : NeonI_Crypto_AES<size, opcode,
7611                      (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7612                      asmop # "\t$Rd.16b, $Rn.16b",
7613                      [(set (v16i8 VPR128:$Rd),
7614                         (v16i8 (opnode (v16i8 VPR128:$src),
7615                                        (v16i8 VPR128:$Rn))))],
7616                      NoItinerary>{
7617   let Constraints = "$src = $Rd";
7618   let Predicates = [HasNEON, HasCrypto];
7619 }
7620
7621 def AESE : NeonI_Cryptoaes_2v<0b00, 0b00100, "aese", int_arm_neon_aese>;
7622 def AESD : NeonI_Cryptoaes_2v<0b00, 0b00101, "aesd", int_arm_neon_aesd>;
7623
7624 class NeonI_Cryptoaes<bits<2> size, bits<5> opcode,
7625                       string asmop, SDPatternOperator opnode>
7626   : NeonI_Crypto_AES<size, opcode,
7627                      (outs VPR128:$Rd), (ins VPR128:$Rn),
7628                      asmop # "\t$Rd.16b, $Rn.16b",
7629                      [(set (v16i8 VPR128:$Rd),
7630                         (v16i8 (opnode (v16i8 VPR128:$Rn))))],
7631                      NoItinerary>;
7632
7633 def AESMC : NeonI_Cryptoaes<0b00, 0b00110, "aesmc", int_arm_neon_aesmc>;
7634 def AESIMC : NeonI_Cryptoaes<0b00, 0b00111, "aesimc", int_arm_neon_aesimc>;
7635
7636 class NeonI_Cryptosha_vv<bits<2> size, bits<5> opcode,
7637                          string asmop, SDPatternOperator opnode>
7638   : NeonI_Crypto_SHA<size, opcode,
7639                      (outs VPR128:$Rd), (ins VPR128:$src, VPR128:$Rn),
7640                      asmop # "\t$Rd.4s, $Rn.4s",
7641                      [(set (v4i32 VPR128:$Rd),
7642                         (v4i32 (opnode (v4i32 VPR128:$src),
7643                                        (v4i32 VPR128:$Rn))))],
7644                      NoItinerary> {
7645   let Constraints = "$src = $Rd";
7646   let Predicates = [HasNEON, HasCrypto];
7647 }
7648
7649 def SHA1SU1 : NeonI_Cryptosha_vv<0b00, 0b00001, "sha1su1",
7650                                  int_arm_neon_sha1su1>;
7651 def SHA256SU0 : NeonI_Cryptosha_vv<0b00, 0b00010, "sha256su0",
7652                                    int_arm_neon_sha256su0>;
7653
7654 class NeonI_Cryptosha_ss<bits<2> size, bits<5> opcode,
7655                          string asmop, SDPatternOperator opnode>
7656   : NeonI_Crypto_SHA<size, opcode,
7657                      (outs FPR32:$Rd), (ins FPR32:$Rn),
7658                      asmop # "\t$Rd, $Rn",
7659                      [(set (v1i32 FPR32:$Rd),
7660                         (v1i32 (opnode (v1i32 FPR32:$Rn))))],
7661                      NoItinerary> {
7662   let Predicates = [HasNEON, HasCrypto];
7663 }
7664
7665 def SHA1H : NeonI_Cryptosha_ss<0b00, 0b00000, "sha1h", int_arm_neon_sha1h>;
7666
7667 class NeonI_Cryptosha3_vvv<bits<2> size, bits<3> opcode, string asmop,
7668                            SDPatternOperator opnode>
7669   : NeonI_Crypto_3VSHA<size, opcode,
7670                        (outs VPR128:$Rd),
7671                        (ins VPR128:$src, VPR128:$Rn, VPR128:$Rm),
7672                        asmop # "\t$Rd.4s, $Rn.4s, $Rm.4s",
7673                        [(set (v4i32 VPR128:$Rd),
7674                           (v4i32 (opnode (v4i32 VPR128:$src),
7675                                          (v4i32 VPR128:$Rn),
7676                                          (v4i32 VPR128:$Rm))))],
7677                        NoItinerary> {
7678   let Constraints = "$src = $Rd";
7679   let Predicates = [HasNEON, HasCrypto];
7680 }
7681
7682 def SHA1SU0 : NeonI_Cryptosha3_vvv<0b00, 0b011, "sha1su0",
7683                                    int_arm_neon_sha1su0>;
7684 def SHA256SU1 : NeonI_Cryptosha3_vvv<0b00, 0b110, "sha256su1",
7685                                      int_arm_neon_sha256su1>;
7686
7687 class NeonI_Cryptosha3_qqv<bits<2> size, bits<3> opcode, string asmop,
7688                            SDPatternOperator opnode>
7689   : NeonI_Crypto_3VSHA<size, opcode,
7690                        (outs FPR128:$Rd),
7691                        (ins FPR128:$src, FPR128:$Rn, VPR128:$Rm),
7692                        asmop # "\t$Rd, $Rn, $Rm.4s",
7693                        [(set (v4i32 FPR128:$Rd),
7694                           (v4i32 (opnode (v4i32 FPR128:$src),
7695                                          (v4i32 FPR128:$Rn),
7696                                          (v4i32 VPR128:$Rm))))],
7697                        NoItinerary> {
7698   let Constraints = "$src = $Rd";
7699   let Predicates = [HasNEON, HasCrypto];
7700 }
7701
7702 def SHA256H : NeonI_Cryptosha3_qqv<0b00, 0b100, "sha256h",
7703                                    int_arm_neon_sha256h>;
7704 def SHA256H2 : NeonI_Cryptosha3_qqv<0b00, 0b101, "sha256h2",
7705                                     int_arm_neon_sha256h2>;
7706
7707 class NeonI_Cryptosha3_qsv<bits<2> size, bits<3> opcode, string asmop,
7708                            SDPatternOperator opnode>
7709   : NeonI_Crypto_3VSHA<size, opcode,
7710                        (outs FPR128:$Rd),
7711                        (ins FPR128:$src, FPR32:$Rn, VPR128:$Rm),
7712                        asmop # "\t$Rd, $Rn, $Rm.4s",
7713                        [(set (v4i32 FPR128:$Rd),
7714                           (v4i32 (opnode (v4i32 FPR128:$src),
7715                                          (v1i32 FPR32:$Rn),
7716                                          (v4i32 VPR128:$Rm))))],
7717                        NoItinerary> {
7718   let Constraints = "$src = $Rd";
7719   let Predicates = [HasNEON, HasCrypto];
7720 }
7721
7722 def SHA1C : NeonI_Cryptosha3_qsv<0b00, 0b000, "sha1c", int_aarch64_neon_sha1c>;
7723 def SHA1P : NeonI_Cryptosha3_qsv<0b00, 0b001, "sha1p", int_aarch64_neon_sha1p>;
7724 def SHA1M : NeonI_Cryptosha3_qsv<0b00, 0b010, "sha1m", int_aarch64_neon_sha1m>;
7725