1 //===- IntrinsicsAArch64.td - Defines AArch64 intrinsics -----------*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines all of the AArch64-specific intrinsics.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // Advanced SIMD (NEON)
17 let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.".
19 // Vector Absolute Compare (Floating Point)
20 def int_aarch64_neon_vacgeq :
21 Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
22 def int_aarch64_neon_vacgtq :
23 Intrinsic<[llvm_v2i64_ty], [llvm_v2f64_ty, llvm_v2f64_ty], [IntrNoMem]>;
25 // Vector saturating accumulate
26 def int_aarch64_neon_suqadd : Neon_2Arg_Intrinsic;
27 def int_aarch64_neon_usqadd : Neon_2Arg_Intrinsic;
29 // Vector Bitwise reverse
30 def int_aarch64_neon_rbit : Neon_1Arg_Intrinsic;
32 // Vector extract and narrow
33 def int_aarch64_neon_xtn :
34 Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
36 // Vector floating-point convert
37 def int_aarch64_neon_frintn : Neon_1Arg_Intrinsic;
38 def int_aarch64_neon_fsqrt : Neon_1Arg_Intrinsic;
39 def int_aarch64_neon_fcvtxn :
40 Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
41 def int_aarch64_neon_fcvtns :
42 Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
43 def int_aarch64_neon_fcvtnu :
44 Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
45 def int_aarch64_neon_fcvtps :
46 Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
47 def int_aarch64_neon_fcvtpu :
48 Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
49 def int_aarch64_neon_fcvtms :
50 Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
51 def int_aarch64_neon_fcvtmu :
52 Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
53 def int_aarch64_neon_fcvtas :
54 Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
55 def int_aarch64_neon_fcvtau :
56 Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
58 // Vector maxNum (Floating Point)
59 def int_aarch64_neon_vmaxnm : Neon_2Arg_Intrinsic;
61 // Vector minNum (Floating Point)
62 def int_aarch64_neon_vminnm : Neon_2Arg_Intrinsic;
64 // Vector Pairwise maxNum (Floating Point)
65 def int_aarch64_neon_vpmaxnm : Neon_2Arg_Intrinsic;
67 // Vector Pairwise minNum (Floating Point)
68 def int_aarch64_neon_vpminnm : Neon_2Arg_Intrinsic;
70 // Vector Multiply Extended (Floating Point)
71 def int_aarch64_neon_vmulx : Neon_2Arg_Intrinsic;
73 class Neon_N2V_Intrinsic
74 : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, llvm_i32_ty],
76 class Neon_N3V_Intrinsic
77 : Intrinsic<[llvm_anyvector_ty],
78 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
80 class Neon_N2V_Narrow_Intrinsic
81 : Intrinsic<[llvm_anyvector_ty],
82 [LLVMExtendedElementVectorType<0>, llvm_i32_ty],
85 // Vector rounding shift right by immediate (Signed)
86 def int_aarch64_neon_vsrshr : Neon_N2V_Intrinsic;
87 def int_aarch64_neon_vurshr : Neon_N2V_Intrinsic;
88 def int_aarch64_neon_vsqshlu : Neon_N2V_Intrinsic;
90 def int_aarch64_neon_vsri : Neon_N3V_Intrinsic;
91 def int_aarch64_neon_vsli : Neon_N3V_Intrinsic;
93 def int_aarch64_neon_vsqshrun : Neon_N2V_Narrow_Intrinsic;
94 def int_aarch64_neon_vrshrn : Neon_N2V_Narrow_Intrinsic;
95 def int_aarch64_neon_vsqrshrun : Neon_N2V_Narrow_Intrinsic;
96 def int_aarch64_neon_vsqshrn : Neon_N2V_Narrow_Intrinsic;
97 def int_aarch64_neon_vuqshrn : Neon_N2V_Narrow_Intrinsic;
98 def int_aarch64_neon_vsqrshrn : Neon_N2V_Narrow_Intrinsic;
99 def int_aarch64_neon_vuqrshrn : Neon_N2V_Narrow_Intrinsic;
102 class Neon_Across_Intrinsic
103 : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
105 class Neon_2Arg_Across_Float_Intrinsic
106 : Intrinsic<[llvm_anyvector_ty], [llvm_v4f32_ty], [IntrNoMem]>;
108 def int_aarch64_neon_saddlv : Neon_Across_Intrinsic;
109 def int_aarch64_neon_uaddlv : Neon_Across_Intrinsic;
110 def int_aarch64_neon_smaxv : Neon_Across_Intrinsic;
111 def int_aarch64_neon_umaxv : Neon_Across_Intrinsic;
112 def int_aarch64_neon_sminv : Neon_Across_Intrinsic;
113 def int_aarch64_neon_uminv : Neon_Across_Intrinsic;
114 def int_aarch64_neon_vaddv : Neon_Across_Intrinsic;
115 def int_aarch64_neon_vmaxv : Neon_Across_Intrinsic;
116 def int_aarch64_neon_vminv : Neon_Across_Intrinsic;
117 def int_aarch64_neon_vmaxnmv : Neon_Across_Intrinsic;
118 def int_aarch64_neon_vminnmv : Neon_Across_Intrinsic;
120 // Vector Table Lookup.
121 def int_aarch64_neon_vtbl1 :
122 Intrinsic<[llvm_anyvector_ty],
123 [llvm_anyvector_ty, LLVMMatchType<0>], [IntrNoMem]>;
125 def int_aarch64_neon_vtbl2 :
126 Intrinsic<[llvm_anyvector_ty],
127 [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<0>],
130 def int_aarch64_neon_vtbl3 :
131 Intrinsic<[llvm_anyvector_ty],
132 [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
133 LLVMMatchType<0>], [IntrNoMem]>;
135 def int_aarch64_neon_vtbl4 :
136 Intrinsic<[llvm_anyvector_ty],
137 [llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>,
138 LLVMMatchType<1>, LLVMMatchType<0>], [IntrNoMem]>;
140 // Vector Table Extension.
141 // Some elements of the destination vector may not be updated, so the original
142 // value of that vector is passed as the first argument. The next 1-4
143 // arguments after that are the table.
144 def int_aarch64_neon_vtbx1 :
145 Intrinsic<[llvm_anyvector_ty],
146 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
149 def int_aarch64_neon_vtbx2 :
150 Intrinsic<[llvm_anyvector_ty],
151 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>,
152 LLVMMatchType<0>], [IntrNoMem]>;
154 def int_aarch64_neon_vtbx3 :
155 Intrinsic<[llvm_anyvector_ty],
156 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>,
157 LLVMMatchType<1>, LLVMMatchType<0>], [IntrNoMem]>;
159 def int_aarch64_neon_vtbx4 :
160 Intrinsic<[llvm_anyvector_ty],
161 [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>,
162 LLVMMatchType<1>, LLVMMatchType<1>, LLVMMatchType<0>],
166 def int_aarch64_neon_vaddds :
167 Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
168 def int_aarch64_neon_vadddu :
169 Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
171 // Scalar Saturating Add (Signed, Unsigned)
172 def int_aarch64_neon_vqadds : Neon_2Arg_Intrinsic;
173 def int_aarch64_neon_vqaddu : Neon_2Arg_Intrinsic;
176 def int_aarch64_neon_vsubds :
177 Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
178 def int_aarch64_neon_vsubdu :
179 Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
181 // Scalar Saturating Sub (Signed, Unsigned)
182 def int_aarch64_neon_vqsubs : Neon_2Arg_Intrinsic;
183 def int_aarch64_neon_vqsubu : Neon_2Arg_Intrinsic;
187 def int_aarch64_neon_vshlds :
188 Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
189 def int_aarch64_neon_vshldu :
190 Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
192 // Scalar Saturating Shift Left
193 def int_aarch64_neon_vqshls : Neon_2Arg_Intrinsic;
194 def int_aarch64_neon_vqshlu : Neon_2Arg_Intrinsic;
196 // Scalar Shift Rouding Left
197 def int_aarch64_neon_vrshlds :
198 Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
199 def int_aarch64_neon_vrshldu :
200 Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty], [IntrNoMem]>;
202 // Scalar Saturating Rounding Shift Left
203 def int_aarch64_neon_vqrshls : Neon_2Arg_Intrinsic;
204 def int_aarch64_neon_vqrshlu : Neon_2Arg_Intrinsic;
206 // Scalar Reduce Pairwise Add.
207 def int_aarch64_neon_vpadd :
208 Intrinsic<[llvm_v1i64_ty], [llvm_v2i64_ty],[IntrNoMem]>;
209 def int_aarch64_neon_vpfadd :
210 Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
211 def int_aarch64_neon_vpfaddq :
212 Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
214 // Scalar Reduce Pairwise Floating Point Max/Min.
215 def int_aarch64_neon_vpmax :
216 Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
217 def int_aarch64_neon_vpmaxq :
218 Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
219 def int_aarch64_neon_vpmin :
220 Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
221 def int_aarch64_neon_vpminq :
222 Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
224 // Scalar Reduce Pairwise Floating Point Maxnm/Minnm.
225 def int_aarch64_neon_vpfmaxnm :
226 Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
227 def int_aarch64_neon_vpfmaxnmq :
228 Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
229 def int_aarch64_neon_vpfminnm :
230 Intrinsic<[llvm_v1f32_ty], [llvm_v2f32_ty], [IntrNoMem]>;
231 def int_aarch64_neon_vpfminnmq :
232 Intrinsic<[llvm_v1f64_ty], [llvm_v2f64_ty], [IntrNoMem]>;
234 // Scalar Signed Integer Convert To Floating-point
235 def int_aarch64_neon_vcvtf32_s32 :
236 Intrinsic<[llvm_float_ty], [llvm_v1i32_ty], [IntrNoMem]>;
237 def int_aarch64_neon_vcvtf64_s64 :
238 Intrinsic<[llvm_double_ty], [llvm_v1i64_ty], [IntrNoMem]>;
240 // Scalar Unsigned Integer Convert To Floating-point
241 def int_aarch64_neon_vcvtf32_u32 :
242 Intrinsic<[llvm_float_ty], [llvm_v1i32_ty], [IntrNoMem]>;
243 def int_aarch64_neon_vcvtf64_u64 :
244 Intrinsic<[llvm_double_ty], [llvm_v1i64_ty], [IntrNoMem]>;
246 // Scalar Floating-point Reciprocal Exponent
247 def int_aarch64_neon_vrecpx : Neon_1Arg_Intrinsic;
249 class Neon_Cmp_Intrinsic
250 : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, llvm_anyvector_ty],
253 // Scalar Compare Equal
254 def int_aarch64_neon_vceq : Neon_Cmp_Intrinsic;
256 // Scalar Compare Greater-Than or Equal
257 def int_aarch64_neon_vcge : Neon_Cmp_Intrinsic;
258 def int_aarch64_neon_vchs : Neon_Cmp_Intrinsic;
260 // Scalar Compare Less-Than or Equal
261 def int_aarch64_neon_vclez : Neon_Cmp_Intrinsic;
263 // Scalar Compare Less-Than
264 def int_aarch64_neon_vcltz : Neon_Cmp_Intrinsic;
266 // Scalar Compare Greater-Than
267 def int_aarch64_neon_vcgt : Neon_Cmp_Intrinsic;
268 def int_aarch64_neon_vchi : Neon_Cmp_Intrinsic;
270 // Scalar Compare Bitwise Test Bits
271 def int_aarch64_neon_vtstd : Neon_Cmp_Intrinsic;
273 // Scalar Floating-point Absolute Compare Greater Than Or Equal
274 def int_aarch64_neon_vcage : Neon_Cmp_Intrinsic;
276 // Scalar Floating-point Absolute Compare Greater Than
277 def int_aarch64_neon_vcagt : Neon_Cmp_Intrinsic;
279 // Scalar Signed Saturating Accumulated of Unsigned Value
280 def int_aarch64_neon_vuqadd : Neon_2Arg_Intrinsic;
282 // Scalar Unsigned Saturating Accumulated of Signed Value
283 def int_aarch64_neon_vsqadd : Neon_2Arg_Intrinsic;
285 // Scalar Absolute Value
286 def int_aarch64_neon_vabs :
287 Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
289 // Scalar Negate Value
290 def int_aarch64_neon_vneg :
291 Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty], [IntrNoMem]>;
293 // Signed Saturating Doubling Multiply-Add Long
294 def int_aarch64_neon_vqdmlal : Neon_3Arg_Long_Intrinsic;
296 // Signed Saturating Doubling Multiply-Subtract Long
297 def int_aarch64_neon_vqdmlsl : Neon_3Arg_Long_Intrinsic;
299 // Signed Saturating Doubling Multiply Long
300 def int_aarch64_neon_vqdmull : Neon_2Arg_Long_Intrinsic;
302 class Neon_2Arg_ShiftImm_Intrinsic
303 : Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
305 class Neon_3Arg_ShiftImm_Intrinsic
306 : Intrinsic<[llvm_v1i64_ty], [llvm_v1i64_ty, llvm_v1i64_ty, llvm_i32_ty],
309 // Scalar Shift Right (Immediate)
310 def int_aarch64_neon_vshrds_n : Neon_2Arg_ShiftImm_Intrinsic;
311 def int_aarch64_neon_vshrdu_n : Neon_2Arg_ShiftImm_Intrinsic;
313 // Scalar Shift Right and Accumulate (Immediate)
314 def int_aarch64_neon_vsrads_n : Neon_3Arg_ShiftImm_Intrinsic;
315 def int_aarch64_neon_vsradu_n : Neon_3Arg_ShiftImm_Intrinsic;
317 // Scalar Rounding Shift Right and Accumulate (Immediate)
318 def int_aarch64_neon_vrsrads_n : Neon_3Arg_ShiftImm_Intrinsic;
319 def int_aarch64_neon_vrsradu_n : Neon_3Arg_ShiftImm_Intrinsic;
321 // Scalar Shift Left (Immediate)
322 def int_aarch64_neon_vshld_n : Neon_2Arg_ShiftImm_Intrinsic;
324 // Scalar Saturating Shift Left (Immediate)
325 def int_aarch64_neon_vqshls_n : Neon_N2V_Intrinsic;
326 def int_aarch64_neon_vqshlu_n : Neon_N2V_Intrinsic;
328 // Scalar Signed Saturating Shift Left Unsigned (Immediate)
329 def int_aarch64_neon_vqshlus_n : Neon_N2V_Intrinsic;
331 // Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
332 def int_aarch64_neon_vcvtf32_n_s32 :
333 Intrinsic<[llvm_float_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>;
334 def int_aarch64_neon_vcvtf64_n_s64 :
335 Intrinsic<[llvm_double_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
337 // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
338 def int_aarch64_neon_vcvtf32_n_u32 :
339 Intrinsic<[llvm_float_ty], [llvm_v1i32_ty, llvm_i32_ty], [IntrNoMem]>;
340 def int_aarch64_neon_vcvtf64_n_u64 :
341 Intrinsic<[llvm_double_ty], [llvm_v1i64_ty, llvm_i32_ty], [IntrNoMem]>;
343 // Scalar Floating-point Convert To Signed Fixed-point (Immediate)
344 def int_aarch64_neon_vcvts_n_s32_f32 :
345 Intrinsic<[llvm_v1i32_ty], [llvm_v1f32_ty, llvm_i32_ty], [IntrNoMem]>;
346 def int_aarch64_neon_vcvtd_n_s64_f64 :
347 Intrinsic<[llvm_v1i64_ty], [llvm_v1f64_ty, llvm_i32_ty], [IntrNoMem]>;
349 // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
350 def int_aarch64_neon_vcvts_n_u32_f32 :
351 Intrinsic<[llvm_v1i32_ty], [llvm_v1f32_ty, llvm_i32_ty], [IntrNoMem]>;
352 def int_aarch64_neon_vcvtd_n_u64_f64 :
353 Intrinsic<[llvm_v1i64_ty], [llvm_v1f64_ty, llvm_i32_ty], [IntrNoMem]>;
355 class Neon_SHA_Intrinsic
356 : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v1i32_ty, llvm_v4i32_ty],
359 def int_aarch64_neon_sha1c : Neon_SHA_Intrinsic;
360 def int_aarch64_neon_sha1m : Neon_SHA_Intrinsic;
361 def int_aarch64_neon_sha1p : Neon_SHA_Intrinsic;