[opaque pointer type]: Pass explicit pointee type when building a constant GEP.
[oota-llvm.git] / include / llvm / IR / IntrinsicsAArch64.td
1 //===- IntrinsicsAARCH64.td - Defines AARCH64 intrinsics ---*- tablegen -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines all of the AARCH64-specific intrinsics.
11 //
12 //===----------------------------------------------------------------------===//
13
14 let TargetPrefix = "aarch64" in {
15
16 def int_aarch64_thread_pointer : GCCBuiltin<"__builtin_thread_pointer">,
17             Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
18
19 def int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
20 def int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
21 def int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
22 def int_aarch64_stlxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
23
24 def int_aarch64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
25 def int_aarch64_ldaxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
26 def int_aarch64_stxp : Intrinsic<[llvm_i32_ty],
27                                [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
28 def int_aarch64_stlxp : Intrinsic<[llvm_i32_ty],
29                                 [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
30
31 def int_aarch64_clrex : Intrinsic<[]>;
32
33 def int_aarch64_sdiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
34                                 LLVMMatchType<0>], [IntrNoMem]>;
35 def int_aarch64_udiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
36                                 LLVMMatchType<0>], [IntrNoMem]>;
37
38 //===----------------------------------------------------------------------===//
39 // HINT
40
41 def int_aarch64_hint : Intrinsic<[], [llvm_i32_ty]>;
42
43 //===----------------------------------------------------------------------===//
44 // RBIT
45
46 def int_aarch64_rbit : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
47                                  [IntrNoMem]>;
48
49 //===----------------------------------------------------------------------===//
50 // Data Barrier Instructions
51
52 def int_aarch64_dmb : GCCBuiltin<"__builtin_arm_dmb">, Intrinsic<[], [llvm_i32_ty]>;
53 def int_aarch64_dsb : GCCBuiltin<"__builtin_arm_dsb">, Intrinsic<[], [llvm_i32_ty]>;
54 def int_aarch64_isb : GCCBuiltin<"__builtin_arm_isb">, Intrinsic<[], [llvm_i32_ty]>;
55
56 }
57
58 //===----------------------------------------------------------------------===//
59 // Advanced SIMD (NEON)
60
61 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
62   class AdvSIMD_2Scalar_Float_Intrinsic
63     : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
64                 [IntrNoMem]>;
65
66   class AdvSIMD_FPToIntRounding_Intrinsic
67     : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
68
69   class AdvSIMD_1IntArg_Intrinsic
70     : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
71   class AdvSIMD_1FloatArg_Intrinsic
72     : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
73   class AdvSIMD_1VectorArg_Intrinsic
74     : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
75   class AdvSIMD_1VectorArg_Expand_Intrinsic
76     : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
77   class AdvSIMD_1VectorArg_Long_Intrinsic
78     : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
79   class AdvSIMD_1IntArg_Narrow_Intrinsic
80     : Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
81   class AdvSIMD_1VectorArg_Narrow_Intrinsic
82     : Intrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
83   class AdvSIMD_1VectorArg_Int_Across_Intrinsic
84     : Intrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>;
85   class AdvSIMD_1VectorArg_Float_Across_Intrinsic
86     : Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
87
88   class AdvSIMD_2IntArg_Intrinsic
89     : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
90                 [IntrNoMem]>;
91   class AdvSIMD_2FloatArg_Intrinsic
92     : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
93                 [IntrNoMem]>;
94   class AdvSIMD_2VectorArg_Intrinsic
95     : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
96                 [IntrNoMem]>;
97   class AdvSIMD_2VectorArg_Compare_Intrinsic
98     : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
99                 [IntrNoMem]>;
100   class AdvSIMD_2Arg_FloatCompare_Intrinsic
101     : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
102                 [IntrNoMem]>;
103   class AdvSIMD_2VectorArg_Long_Intrinsic
104     : Intrinsic<[llvm_anyvector_ty],
105                 [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
106                 [IntrNoMem]>;
107   class AdvSIMD_2VectorArg_Wide_Intrinsic
108     : Intrinsic<[llvm_anyvector_ty],
109                 [LLVMMatchType<0>, LLVMTruncatedType<0>],
110                 [IntrNoMem]>;
111   class AdvSIMD_2VectorArg_Narrow_Intrinsic
112     : Intrinsic<[llvm_anyvector_ty],
113                 [LLVMExtendedType<0>, LLVMExtendedType<0>],
114                 [IntrNoMem]>;
115   class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
116     : Intrinsic<[llvm_anyint_ty],
117                 [LLVMExtendedType<0>, llvm_i32_ty],
118                 [IntrNoMem]>;
119   class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
120     : Intrinsic<[llvm_anyvector_ty],
121                 [llvm_anyvector_ty],
122                 [IntrNoMem]>;
123   class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
124     : Intrinsic<[llvm_anyvector_ty],
125                 [LLVMTruncatedType<0>],
126                 [IntrNoMem]>;
127   class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic
128     : Intrinsic<[llvm_anyvector_ty],
129                 [LLVMTruncatedType<0>, llvm_i32_ty],
130                 [IntrNoMem]>;
131   class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
132     : Intrinsic<[llvm_anyvector_ty],
133                 [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
134                 [IntrNoMem]>;
135
136   class AdvSIMD_3VectorArg_Intrinsic
137       : Intrinsic<[llvm_anyvector_ty],
138                [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
139                [IntrNoMem]>;
140   class AdvSIMD_3VectorArg_Scalar_Intrinsic
141       : Intrinsic<[llvm_anyvector_ty],
142                [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
143                [IntrNoMem]>;
144   class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
145       : Intrinsic<[llvm_anyvector_ty],
146                [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty,
147                 LLVMMatchType<1>], [IntrNoMem]>;
148   class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
149     : Intrinsic<[llvm_anyvector_ty],
150                 [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty],
151                 [IntrNoMem]>;
152   class AdvSIMD_CvtFxToFP_Intrinsic
153     : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
154                 [IntrNoMem]>;
155   class AdvSIMD_CvtFPToFx_Intrinsic
156     : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
157                 [IntrNoMem]>;
158 }
159
160 // Arithmetic ops
161
162 let Properties = [IntrNoMem] in {
163   // Vector Add Across Lanes
164   def int_aarch64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
165   def int_aarch64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
166   def int_aarch64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
167
168   // Vector Long Add Across Lanes
169   def int_aarch64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
170   def int_aarch64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
171
172   // Vector Halving Add
173   def int_aarch64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic;
174   def int_aarch64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic;
175
176   // Vector Rounding Halving Add
177   def int_aarch64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic;
178   def int_aarch64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic;
179
180   // Vector Saturating Add
181   def int_aarch64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic;
182   def int_aarch64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic;
183   def int_aarch64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic;
184   def int_aarch64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic;
185
186   // Vector Add High-Half
187   // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
188   // header is no longer supported.
189   def int_aarch64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
190
191   // Vector Rounding Add High-Half
192   def int_aarch64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
193
194   // Vector Saturating Doubling Multiply High
195   def int_aarch64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
196
197   // Vector Saturating Rounding Doubling Multiply High
198   def int_aarch64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
199
200   // Vector Polynominal Multiply
201   def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
202
203   // Vector Long Multiply
204   def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
205   def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
206   def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
207
208   // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
209   // it with a v16i8.
210   def int_aarch64_neon_pmull64 :
211         Intrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
212
213   // Vector Extending Multiply
214   def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic {
215     let Properties = [IntrNoMem, Commutative];
216   }
217
218   // Vector Saturating Doubling Long Multiply
219   def int_aarch64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic;
220   def int_aarch64_neon_sqdmulls_scalar
221     : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
222
223   // Vector Halving Subtract
224   def int_aarch64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic;
225   def int_aarch64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic;
226
227   // Vector Saturating Subtract
228   def int_aarch64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic;
229   def int_aarch64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic;
230
231   // Vector Subtract High-Half
232   // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
233   // header is no longer supported.
234   def int_aarch64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
235
236   // Vector Rounding Subtract High-Half
237   def int_aarch64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
238
239   // Vector Compare Absolute Greater-than-or-equal
240   def int_aarch64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic;
241
242   // Vector Compare Absolute Greater-than
243   def int_aarch64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic;
244
245   // Vector Absolute Difference
246   def int_aarch64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic;
247   def int_aarch64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic;
248   def int_aarch64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic;
249
250   // Scalar Absolute Difference
251   def int_aarch64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic;
252
253   // Vector Max
254   def int_aarch64_neon_smax : AdvSIMD_2VectorArg_Intrinsic;
255   def int_aarch64_neon_umax : AdvSIMD_2VectorArg_Intrinsic;
256   def int_aarch64_neon_fmax : AdvSIMD_2VectorArg_Intrinsic;
257   def int_aarch64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic;
258
259   // Vector Max Across Lanes
260   def int_aarch64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
261   def int_aarch64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
262   def int_aarch64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
263   def int_aarch64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
264
265   // Vector Min
266   def int_aarch64_neon_smin : AdvSIMD_2VectorArg_Intrinsic;
267   def int_aarch64_neon_umin : AdvSIMD_2VectorArg_Intrinsic;
268   def int_aarch64_neon_fmin : AdvSIMD_2VectorArg_Intrinsic;
269   def int_aarch64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic;
270
271   // Vector Min/Max Number
272   def int_aarch64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic;
273   def int_aarch64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic;
274
275   // Vector Min Across Lanes
276   def int_aarch64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
277   def int_aarch64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
278   def int_aarch64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
279   def int_aarch64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
280
281   // Pairwise Add
282   def int_aarch64_neon_addp : AdvSIMD_2VectorArg_Intrinsic;
283
284   // Long Pairwise Add
285   // FIXME: In theory, we shouldn't need intrinsics for saddlp or
286   // uaddlp, but tblgen's type inference currently can't handle the
287   // pattern fragments this ends up generating.
288   def int_aarch64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
289   def int_aarch64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
290
291   // Folding Maximum
292   def int_aarch64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic;
293   def int_aarch64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic;
294   def int_aarch64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic;
295
296   // Folding Minimum
297   def int_aarch64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic;
298   def int_aarch64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic;
299   def int_aarch64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic;
300
301   // Reciprocal Estimate/Step
302   def int_aarch64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic;
303   def int_aarch64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic;
304
305   // Reciprocal Exponent
306   def int_aarch64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic;
307
308   // Vector Saturating Shift Left
309   def int_aarch64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic;
310   def int_aarch64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic;
311
312   // Vector Rounding Shift Left
313   def int_aarch64_neon_srshl : AdvSIMD_2IntArg_Intrinsic;
314   def int_aarch64_neon_urshl : AdvSIMD_2IntArg_Intrinsic;
315
316   // Vector Saturating Rounding Shift Left
317   def int_aarch64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic;
318   def int_aarch64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic;
319
320   // Vector Signed->Unsigned Shift Left by Constant
321   def int_aarch64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic;
322
323   // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant
324   def int_aarch64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
325
326   // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const
327   def int_aarch64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
328
329   // Vector Narrowing Shift Right by Constant
330   def int_aarch64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
331   def int_aarch64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
332
333   // Vector Rounding Narrowing Shift Right by Constant
334   def int_aarch64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
335
336   // Vector Rounding Narrowing Saturating Shift Right by Constant
337   def int_aarch64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
338   def int_aarch64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
339
340   // Vector Shift Left
341   def int_aarch64_neon_sshl : AdvSIMD_2IntArg_Intrinsic;
342   def int_aarch64_neon_ushl : AdvSIMD_2IntArg_Intrinsic;
343
344   // Vector Widening Shift Left by Constant
345   def int_aarch64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic;
346   def int_aarch64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
347   def int_aarch64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
348
349   // Vector Shift Right by Constant and Insert
350   def int_aarch64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic;
351
352   // Vector Shift Left by Constant and Insert
353   def int_aarch64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic;
354
355   // Vector Saturating Narrow
356   def int_aarch64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic;
357   def int_aarch64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic;
358   def int_aarch64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
359   def int_aarch64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
360
361   // Vector Saturating Extract and Unsigned Narrow
362   def int_aarch64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic;
363   def int_aarch64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic;
364
365   // Vector Absolute Value
366   def int_aarch64_neon_abs : AdvSIMD_1IntArg_Intrinsic;
367
368   // Vector Saturating Absolute Value
369   def int_aarch64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic;
370
371   // Vector Saturating Negation
372   def int_aarch64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic;
373
374   // Vector Count Leading Sign Bits
375   def int_aarch64_neon_cls : AdvSIMD_1VectorArg_Intrinsic;
376
377   // Vector Reciprocal Estimate
378   def int_aarch64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic;
379   def int_aarch64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic;
380
381   // Vector Square Root Estimate
382   def int_aarch64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic;
383   def int_aarch64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic;
384
385   // Vector Bitwise Reverse
386   def int_aarch64_neon_rbit : AdvSIMD_1VectorArg_Intrinsic;
387
388   // Vector Conversions Between Half-Precision and Single-Precision.
389   def int_aarch64_neon_vcvtfp2hf
390     : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
391   def int_aarch64_neon_vcvthf2fp
392     : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
393
394   // Vector Conversions Between Floating-point and Fixed-point.
395   def int_aarch64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic;
396   def int_aarch64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic;
397   def int_aarch64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic;
398   def int_aarch64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic;
399
400   // Vector FP->Int Conversions
401   def int_aarch64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic;
402   def int_aarch64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic;
403   def int_aarch64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic;
404   def int_aarch64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic;
405   def int_aarch64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic;
406   def int_aarch64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic;
407   def int_aarch64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic;
408   def int_aarch64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic;
409   def int_aarch64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic;
410   def int_aarch64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic;
411
412   // Vector FP Rounding: only ties to even is unrepresented by a normal
413   // intrinsic.
414   def int_aarch64_neon_frintn : AdvSIMD_1FloatArg_Intrinsic;
415
416   // Scalar FP->Int conversions
417
418   // Vector FP Inexact Narrowing
419   def int_aarch64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic;
420
421   // Scalar FP Inexact Narrowing
422   def int_aarch64_sisd_fcvtxn : Intrinsic<[llvm_float_ty], [llvm_double_ty],
423                                         [IntrNoMem]>;
424 }
425
426 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
427   class AdvSIMD_2Vector2Index_Intrinsic
428     : Intrinsic<[llvm_anyvector_ty],
429                 [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty],
430                 [IntrNoMem]>;
431 }
432
433 // Vector element to element moves
434 def int_aarch64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;
435
436 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
437   class AdvSIMD_1Vec_Load_Intrinsic
438       : Intrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
439                   [IntrReadArgMem]>;
440   class AdvSIMD_1Vec_Store_Lane_Intrinsic
441     : Intrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
442                 [IntrReadWriteArgMem, NoCapture<2>]>;
443
444   class AdvSIMD_2Vec_Load_Intrinsic
445     : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
446                 [LLVMAnyPointerType<LLVMMatchType<0>>],
447                 [IntrReadArgMem]>;
448   class AdvSIMD_2Vec_Load_Lane_Intrinsic
449     : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>],
450                 [LLVMMatchType<0>, LLVMMatchType<0>,
451                  llvm_i64_ty, llvm_anyptr_ty],
452                 [IntrReadArgMem]>;
453   class AdvSIMD_2Vec_Store_Intrinsic
454     : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
455                      LLVMAnyPointerType<LLVMMatchType<0>>],
456                 [IntrReadWriteArgMem, NoCapture<2>]>;
457   class AdvSIMD_2Vec_Store_Lane_Intrinsic
458     : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
459                  llvm_i64_ty, llvm_anyptr_ty],
460                 [IntrReadWriteArgMem, NoCapture<3>]>;
461
462   class AdvSIMD_3Vec_Load_Intrinsic
463     : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
464                 [LLVMAnyPointerType<LLVMMatchType<0>>],
465                 [IntrReadArgMem]>;
466   class AdvSIMD_3Vec_Load_Lane_Intrinsic
467     : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>],
468                 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>,
469                  llvm_i64_ty, llvm_anyptr_ty],
470                 [IntrReadArgMem]>;
471   class AdvSIMD_3Vec_Store_Intrinsic
472     : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
473                      LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
474                 [IntrReadWriteArgMem, NoCapture<3>]>;
475   class AdvSIMD_3Vec_Store_Lane_Intrinsic
476     : Intrinsic<[], [llvm_anyvector_ty,
477                  LLVMMatchType<0>, LLVMMatchType<0>,
478                  llvm_i64_ty, llvm_anyptr_ty],
479                 [IntrReadWriteArgMem, NoCapture<4>]>;
480
481   class AdvSIMD_4Vec_Load_Intrinsic
482     : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
483                  LLVMMatchType<0>, LLVMMatchType<0>],
484                 [LLVMAnyPointerType<LLVMMatchType<0>>],
485                 [IntrReadArgMem]>;
486   class AdvSIMD_4Vec_Load_Lane_Intrinsic
487     : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>,
488                  LLVMMatchType<0>, LLVMMatchType<0>],
489                 [LLVMMatchType<0>, LLVMMatchType<0>,
490                  LLVMMatchType<0>, LLVMMatchType<0>,
491                  llvm_i64_ty, llvm_anyptr_ty],
492                 [IntrReadArgMem]>;
493   class AdvSIMD_4Vec_Store_Intrinsic
494     : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
495                  LLVMMatchType<0>, LLVMMatchType<0>,
496                  LLVMAnyPointerType<LLVMMatchType<0>>],
497                 [IntrReadWriteArgMem, NoCapture<4>]>;
498   class AdvSIMD_4Vec_Store_Lane_Intrinsic
499     : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
500                  LLVMMatchType<0>, LLVMMatchType<0>,
501                  llvm_i64_ty, llvm_anyptr_ty],
502                 [IntrReadWriteArgMem, NoCapture<5>]>;
503 }
504
505 // Memory ops
506
507 def int_aarch64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic;
508 def int_aarch64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic;
509 def int_aarch64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic;
510
511 def int_aarch64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic;
512 def int_aarch64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic;
513 def int_aarch64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic;
514
515 def int_aarch64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic;
516 def int_aarch64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic;
517 def int_aarch64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic;
518
519 def int_aarch64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic;
520 def int_aarch64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic;
521 def int_aarch64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic;
522
523 def int_aarch64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic;
524 def int_aarch64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic;
525 def int_aarch64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic;
526
527 def int_aarch64_neon_st2  : AdvSIMD_2Vec_Store_Intrinsic;
528 def int_aarch64_neon_st3  : AdvSIMD_3Vec_Store_Intrinsic;
529 def int_aarch64_neon_st4  : AdvSIMD_4Vec_Store_Intrinsic;
530
531 def int_aarch64_neon_st2lane  : AdvSIMD_2Vec_Store_Lane_Intrinsic;
532 def int_aarch64_neon_st3lane  : AdvSIMD_3Vec_Store_Lane_Intrinsic;
533 def int_aarch64_neon_st4lane  : AdvSIMD_4Vec_Store_Lane_Intrinsic;
534
535 let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
536   class AdvSIMD_Tbl1_Intrinsic
537     : Intrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>],
538                 [IntrNoMem]>;
539   class AdvSIMD_Tbl2_Intrinsic
540     : Intrinsic<[llvm_anyvector_ty],
541                 [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
542   class AdvSIMD_Tbl3_Intrinsic
543     : Intrinsic<[llvm_anyvector_ty],
544                 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
545                  LLVMMatchType<0>],
546                 [IntrNoMem]>;
547   class AdvSIMD_Tbl4_Intrinsic
548     : Intrinsic<[llvm_anyvector_ty],
549                 [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
550                  LLVMMatchType<0>],
551                 [IntrNoMem]>;
552
553   class AdvSIMD_Tbx1_Intrinsic
554     : Intrinsic<[llvm_anyvector_ty],
555                 [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
556                 [IntrNoMem]>;
557   class AdvSIMD_Tbx2_Intrinsic
558     : Intrinsic<[llvm_anyvector_ty],
559                 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
560                  LLVMMatchType<0>],
561                 [IntrNoMem]>;
562   class AdvSIMD_Tbx3_Intrinsic
563     : Intrinsic<[llvm_anyvector_ty],
564                 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
565                  llvm_v16i8_ty, LLVMMatchType<0>],
566                 [IntrNoMem]>;
567   class AdvSIMD_Tbx4_Intrinsic
568     : Intrinsic<[llvm_anyvector_ty],
569                 [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
570                  llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
571                 [IntrNoMem]>;
572 }
573 def int_aarch64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic;
574 def int_aarch64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic;
575 def int_aarch64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic;
576 def int_aarch64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic;
577
578 def int_aarch64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic;
579 def int_aarch64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic;
580 def int_aarch64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic;
581 def int_aarch64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;
582
583 let TargetPrefix = "aarch64" in {
584   class Crypto_AES_DataKey_Intrinsic
585     : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
586
587   class Crypto_AES_Data_Intrinsic
588     : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
589
590   // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
591   // (v4i32).
592   class Crypto_SHA_5Hash4Schedule_Intrinsic
593     : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
594                 [IntrNoMem]>;
595
596   // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
597   // (v4i32).
598   class Crypto_SHA_1Hash_Intrinsic
599     : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
600
601   // SHA intrinsic taking 8 words of the schedule
602   class Crypto_SHA_8Schedule_Intrinsic
603     : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
604
605   // SHA intrinsic taking 12 words of the schedule
606   class Crypto_SHA_12Schedule_Intrinsic
607     : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
608                 [IntrNoMem]>;
609
610   // SHA intrinsic taking 8 words of the hash and 4 of the schedule.
611   class Crypto_SHA_8Hash4Schedule_Intrinsic
612     : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
613                 [IntrNoMem]>;
614 }
615
616 // AES
617 def int_aarch64_crypto_aese   : Crypto_AES_DataKey_Intrinsic;
618 def int_aarch64_crypto_aesd   : Crypto_AES_DataKey_Intrinsic;
619 def int_aarch64_crypto_aesmc  : Crypto_AES_Data_Intrinsic;
620 def int_aarch64_crypto_aesimc : Crypto_AES_Data_Intrinsic;
621
622 // SHA1
623 def int_aarch64_crypto_sha1c  : Crypto_SHA_5Hash4Schedule_Intrinsic;
624 def int_aarch64_crypto_sha1p  : Crypto_SHA_5Hash4Schedule_Intrinsic;
625 def int_aarch64_crypto_sha1m  : Crypto_SHA_5Hash4Schedule_Intrinsic;
626 def int_aarch64_crypto_sha1h  : Crypto_SHA_1Hash_Intrinsic;
627
628 def int_aarch64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic;
629 def int_aarch64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic;
630
631 // SHA256
632 def int_aarch64_crypto_sha256h   : Crypto_SHA_8Hash4Schedule_Intrinsic;
633 def int_aarch64_crypto_sha256h2  : Crypto_SHA_8Hash4Schedule_Intrinsic;
634 def int_aarch64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic;
635 def int_aarch64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic;
636
637 //===----------------------------------------------------------------------===//
638 // CRC32
639
640 let TargetPrefix = "aarch64" in {
641
642 def int_aarch64_crc32b  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
643     [IntrNoMem]>;
644 def int_aarch64_crc32cb : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
645     [IntrNoMem]>;
646 def int_aarch64_crc32h  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
647     [IntrNoMem]>;
648 def int_aarch64_crc32ch : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
649     [IntrNoMem]>;
650 def int_aarch64_crc32w  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
651     [IntrNoMem]>;
652 def int_aarch64_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
653     [IntrNoMem]>;
654 def int_aarch64_crc32x  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
655     [IntrNoMem]>;
656 def int_aarch64_crc32cx : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
657     [IntrNoMem]>;
658 }