1 //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "aarch64-isel"
17 #include "AArch64ISelLowering.h"
18 #include "AArch64MachineFunctionInfo.h"
19 #include "AArch64TargetMachine.h"
20 #include "AArch64TargetObjectFile.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/IR/CallingConv.h"
32 static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) {
33 assert (TM.getSubtarget<AArch64Subtarget>().isTargetELF() &&
34 "unknown subtarget type");
35 return new AArch64ElfTargetObjectFile();
38 AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
39 : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) {
41 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
43 // SIMD compares set the entire lane's bits to 1
44 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
46 // Scalar register <-> type mapping
47 addRegisterClass(MVT::i32, &AArch64::GPR32RegClass);
48 addRegisterClass(MVT::i64, &AArch64::GPR64RegClass);
50 if (Subtarget->hasFPARMv8()) {
51 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
52 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
53 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
54 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
57 if (Subtarget->hasNEON()) {
59 addRegisterClass(MVT::v1i8, &AArch64::FPR8RegClass);
60 addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass);
61 addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass);
62 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
63 addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass);
64 addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass);
65 addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass);
66 addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass);
67 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
68 addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass);
69 addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass);
70 addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass);
71 addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass);
72 addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass);
73 addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass);
74 addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass);
77 computeRegisterProperties();
79 // We combine OR nodes for bitfield and NEON BSL operations.
80 setTargetDAGCombine(ISD::OR);
82 setTargetDAGCombine(ISD::AND);
83 setTargetDAGCombine(ISD::SRA);
84 setTargetDAGCombine(ISD::SRL);
85 setTargetDAGCombine(ISD::SHL);
87 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
88 setTargetDAGCombine(ISD::INTRINSIC_VOID);
89 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
91 // AArch64 does not have i1 loads, or much of anything for i1 really.
92 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
93 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
94 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
96 setStackPointerRegisterToSaveRestore(AArch64::XSP);
97 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
98 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
99 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
101 // We'll lower globals to wrappers for selection.
102 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
103 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
105 // A64 instructions have the comparison predicate attached to the user of the
106 // result, but having a separate comparison is valuable for matching.
107 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
108 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
109 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
110 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
112 setOperationAction(ISD::SELECT, MVT::i32, Custom);
113 setOperationAction(ISD::SELECT, MVT::i64, Custom);
114 setOperationAction(ISD::SELECT, MVT::f32, Custom);
115 setOperationAction(ISD::SELECT, MVT::f64, Custom);
117 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
118 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
119 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
120 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
122 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
124 setOperationAction(ISD::SETCC, MVT::i32, Custom);
125 setOperationAction(ISD::SETCC, MVT::i64, Custom);
126 setOperationAction(ISD::SETCC, MVT::f32, Custom);
127 setOperationAction(ISD::SETCC, MVT::f64, Custom);
129 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
130 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
131 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
133 setOperationAction(ISD::VASTART, MVT::Other, Custom);
134 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
135 setOperationAction(ISD::VAEND, MVT::Other, Expand);
136 setOperationAction(ISD::VAARG, MVT::Other, Expand);
138 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
139 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
141 setOperationAction(ISD::ROTL, MVT::i32, Expand);
142 setOperationAction(ISD::ROTL, MVT::i64, Expand);
144 setOperationAction(ISD::UREM, MVT::i32, Expand);
145 setOperationAction(ISD::UREM, MVT::i64, Expand);
146 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
147 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
149 setOperationAction(ISD::SREM, MVT::i32, Expand);
150 setOperationAction(ISD::SREM, MVT::i64, Expand);
151 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
152 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
154 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
155 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
156 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
157 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
159 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
160 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
162 // Legal floating-point operations.
163 setOperationAction(ISD::FABS, MVT::f32, Legal);
164 setOperationAction(ISD::FABS, MVT::f64, Legal);
166 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
167 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
169 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
170 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
172 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
173 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
175 setOperationAction(ISD::FNEG, MVT::f32, Legal);
176 setOperationAction(ISD::FNEG, MVT::f64, Legal);
178 setOperationAction(ISD::FRINT, MVT::f32, Legal);
179 setOperationAction(ISD::FRINT, MVT::f64, Legal);
181 setOperationAction(ISD::FSQRT, MVT::f32, Legal);
182 setOperationAction(ISD::FSQRT, MVT::f64, Legal);
184 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
185 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
187 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
188 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
189 setOperationAction(ISD::ConstantFP, MVT::f128, Legal);
191 // Illegal floating-point operations.
192 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
193 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
195 setOperationAction(ISD::FCOS, MVT::f32, Expand);
196 setOperationAction(ISD::FCOS, MVT::f64, Expand);
198 setOperationAction(ISD::FEXP, MVT::f32, Expand);
199 setOperationAction(ISD::FEXP, MVT::f64, Expand);
201 setOperationAction(ISD::FEXP2, MVT::f32, Expand);
202 setOperationAction(ISD::FEXP2, MVT::f64, Expand);
204 setOperationAction(ISD::FLOG, MVT::f32, Expand);
205 setOperationAction(ISD::FLOG, MVT::f64, Expand);
207 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
208 setOperationAction(ISD::FLOG2, MVT::f64, Expand);
210 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
211 setOperationAction(ISD::FLOG10, MVT::f64, Expand);
213 setOperationAction(ISD::FPOW, MVT::f32, Expand);
214 setOperationAction(ISD::FPOW, MVT::f64, Expand);
216 setOperationAction(ISD::FPOWI, MVT::f32, Expand);
217 setOperationAction(ISD::FPOWI, MVT::f64, Expand);
219 setOperationAction(ISD::FREM, MVT::f32, Expand);
220 setOperationAction(ISD::FREM, MVT::f64, Expand);
222 setOperationAction(ISD::FSIN, MVT::f32, Expand);
223 setOperationAction(ISD::FSIN, MVT::f64, Expand);
225 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
226 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
228 // Virtually no operation on f128 is legal, but LLVM can't expand them when
229 // there's a valid register class, so we need custom operations in most cases.
230 setOperationAction(ISD::FABS, MVT::f128, Expand);
231 setOperationAction(ISD::FADD, MVT::f128, Custom);
232 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
233 setOperationAction(ISD::FCOS, MVT::f128, Expand);
234 setOperationAction(ISD::FDIV, MVT::f128, Custom);
235 setOperationAction(ISD::FMA, MVT::f128, Expand);
236 setOperationAction(ISD::FMUL, MVT::f128, Custom);
237 setOperationAction(ISD::FNEG, MVT::f128, Expand);
238 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
239 setOperationAction(ISD::FP_ROUND, MVT::f128, Expand);
240 setOperationAction(ISD::FPOW, MVT::f128, Expand);
241 setOperationAction(ISD::FREM, MVT::f128, Expand);
242 setOperationAction(ISD::FRINT, MVT::f128, Expand);
243 setOperationAction(ISD::FSIN, MVT::f128, Expand);
244 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
245 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
246 setOperationAction(ISD::FSUB, MVT::f128, Custom);
247 setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
248 setOperationAction(ISD::SETCC, MVT::f128, Custom);
249 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
250 setOperationAction(ISD::SELECT, MVT::f128, Expand);
251 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
252 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
254 // Lowering for many of the conversions is actually specified by the non-f128
255 // type. The LowerXXX function will be trivial when f128 isn't involved.
256 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
257 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
258 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
259 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
260 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
261 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
262 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
263 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
264 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
265 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
266 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
267 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
268 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
269 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
271 // This prevents LLVM trying to compress double constants into a floating
272 // constant-pool entry and trying to load from there. It's of doubtful benefit
273 // for A64: we'd need LDR followed by FCVT, I believe.
274 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
275 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
276 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
278 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
279 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
280 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
281 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
282 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
283 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
285 setExceptionPointerRegister(AArch64::X0);
286 setExceptionSelectorRegister(AArch64::X1);
288 if (Subtarget->hasNEON()) {
289 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8, Expand);
290 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Expand);
291 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Expand);
292 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v1i64, Expand);
293 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v16i8, Expand);
294 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Expand);
295 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Expand);
296 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Expand);
298 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom);
299 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
300 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
301 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom);
302 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
303 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
304 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom);
305 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
306 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
307 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
308 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
309 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
310 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
311 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom);
312 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
314 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
315 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
316 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
317 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom);
318 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
319 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom);
320 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
321 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
322 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom);
323 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
324 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom);
325 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
327 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i32, Legal);
328 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal);
329 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal);
330 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal);
331 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal);
332 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal);
333 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal);
335 setOperationAction(ISD::SETCC, MVT::v8i8, Custom);
336 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
337 setOperationAction(ISD::SETCC, MVT::v4i16, Custom);
338 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
339 setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
340 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
341 setOperationAction(ISD::SETCC, MVT::v1i64, Custom);
342 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
343 setOperationAction(ISD::SETCC, MVT::v2f32, Custom);
344 setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
345 setOperationAction(ISD::SETCC, MVT::v1f64, Custom);
346 setOperationAction(ISD::SETCC, MVT::v2f64, Custom);
348 setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal);
349 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
350 setOperationAction(ISD::FFLOOR, MVT::v1f64, Legal);
351 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
353 setOperationAction(ISD::FCEIL, MVT::v2f32, Legal);
354 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
355 setOperationAction(ISD::FCEIL, MVT::v1f64, Legal);
356 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
358 setOperationAction(ISD::FTRUNC, MVT::v2f32, Legal);
359 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
360 setOperationAction(ISD::FTRUNC, MVT::v1f64, Legal);
361 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
363 setOperationAction(ISD::FRINT, MVT::v2f32, Legal);
364 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
365 setOperationAction(ISD::FRINT, MVT::v1f64, Legal);
366 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
368 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Legal);
369 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
370 setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Legal);
371 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
373 setOperationAction(ISD::FROUND, MVT::v2f32, Legal);
374 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
375 setOperationAction(ISD::FROUND, MVT::v1f64, Legal);
376 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
378 setOperationAction(ISD::SINT_TO_FP, MVT::v1i8, Custom);
379 setOperationAction(ISD::SINT_TO_FP, MVT::v1i16, Custom);
380 setOperationAction(ISD::SINT_TO_FP, MVT::v1i32, Custom);
381 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
382 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
383 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Custom);
385 setOperationAction(ISD::UINT_TO_FP, MVT::v1i8, Custom);
386 setOperationAction(ISD::UINT_TO_FP, MVT::v1i16, Custom);
387 setOperationAction(ISD::UINT_TO_FP, MVT::v1i32, Custom);
388 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
389 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
390 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Custom);
392 setOperationAction(ISD::FP_TO_SINT, MVT::v1i8, Custom);
393 setOperationAction(ISD::FP_TO_SINT, MVT::v1i16, Custom);
394 setOperationAction(ISD::FP_TO_SINT, MVT::v1i32, Custom);
395 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
396 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
397 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Custom);
399 setOperationAction(ISD::FP_TO_UINT, MVT::v1i8, Custom);
400 setOperationAction(ISD::FP_TO_UINT, MVT::v1i16, Custom);
401 setOperationAction(ISD::FP_TO_UINT, MVT::v1i32, Custom);
402 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
403 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
404 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Custom);
406 // Neon does not support vector divide/remainder operations except
407 // floating-point divide.
408 setOperationAction(ISD::SDIV, MVT::v1i8, Expand);
409 setOperationAction(ISD::SDIV, MVT::v8i8, Expand);
410 setOperationAction(ISD::SDIV, MVT::v16i8, Expand);
411 setOperationAction(ISD::SDIV, MVT::v1i16, Expand);
412 setOperationAction(ISD::SDIV, MVT::v4i16, Expand);
413 setOperationAction(ISD::SDIV, MVT::v8i16, Expand);
414 setOperationAction(ISD::SDIV, MVT::v1i32, Expand);
415 setOperationAction(ISD::SDIV, MVT::v2i32, Expand);
416 setOperationAction(ISD::SDIV, MVT::v4i32, Expand);
417 setOperationAction(ISD::SDIV, MVT::v1i64, Expand);
418 setOperationAction(ISD::SDIV, MVT::v2i64, Expand);
420 setOperationAction(ISD::UDIV, MVT::v1i8, Expand);
421 setOperationAction(ISD::UDIV, MVT::v8i8, Expand);
422 setOperationAction(ISD::UDIV, MVT::v16i8, Expand);
423 setOperationAction(ISD::UDIV, MVT::v1i16, Expand);
424 setOperationAction(ISD::UDIV, MVT::v4i16, Expand);
425 setOperationAction(ISD::UDIV, MVT::v8i16, Expand);
426 setOperationAction(ISD::UDIV, MVT::v1i32, Expand);
427 setOperationAction(ISD::UDIV, MVT::v2i32, Expand);
428 setOperationAction(ISD::UDIV, MVT::v4i32, Expand);
429 setOperationAction(ISD::UDIV, MVT::v1i64, Expand);
430 setOperationAction(ISD::UDIV, MVT::v2i64, Expand);
432 setOperationAction(ISD::SREM, MVT::v1i8, Expand);
433 setOperationAction(ISD::SREM, MVT::v8i8, Expand);
434 setOperationAction(ISD::SREM, MVT::v16i8, Expand);
435 setOperationAction(ISD::SREM, MVT::v1i16, Expand);
436 setOperationAction(ISD::SREM, MVT::v4i16, Expand);
437 setOperationAction(ISD::SREM, MVT::v8i16, Expand);
438 setOperationAction(ISD::SREM, MVT::v1i32, Expand);
439 setOperationAction(ISD::SREM, MVT::v2i32, Expand);
440 setOperationAction(ISD::SREM, MVT::v4i32, Expand);
441 setOperationAction(ISD::SREM, MVT::v1i64, Expand);
442 setOperationAction(ISD::SREM, MVT::v2i64, Expand);
444 setOperationAction(ISD::UREM, MVT::v1i8, Expand);
445 setOperationAction(ISD::UREM, MVT::v8i8, Expand);
446 setOperationAction(ISD::UREM, MVT::v16i8, Expand);
447 setOperationAction(ISD::UREM, MVT::v1i16, Expand);
448 setOperationAction(ISD::UREM, MVT::v4i16, Expand);
449 setOperationAction(ISD::UREM, MVT::v8i16, Expand);
450 setOperationAction(ISD::UREM, MVT::v1i32, Expand);
451 setOperationAction(ISD::UREM, MVT::v2i32, Expand);
452 setOperationAction(ISD::UREM, MVT::v4i32, Expand);
453 setOperationAction(ISD::UREM, MVT::v1i64, Expand);
454 setOperationAction(ISD::UREM, MVT::v2i64, Expand);
456 setOperationAction(ISD::FREM, MVT::v2f32, Expand);
457 setOperationAction(ISD::FREM, MVT::v4f32, Expand);
458 setOperationAction(ISD::FREM, MVT::v1f64, Expand);
459 setOperationAction(ISD::FREM, MVT::v2f64, Expand);
461 // Vector ExtLoad and TruncStore are expanded.
462 for (unsigned I = MVT::FIRST_VECTOR_VALUETYPE;
463 I <= MVT::LAST_VECTOR_VALUETYPE; ++I) {
464 MVT VT = (MVT::SimpleValueType) I;
465 setLoadExtAction(ISD::SEXTLOAD, VT, Expand);
466 setLoadExtAction(ISD::ZEXTLOAD, VT, Expand);
467 setLoadExtAction(ISD::EXTLOAD, VT, Expand);
468 for (unsigned II = MVT::FIRST_VECTOR_VALUETYPE;
469 II <= MVT::LAST_VECTOR_VALUETYPE; ++II) {
470 MVT VT1 = (MVT::SimpleValueType) II;
471 // A TruncStore has two vector types of the same number of elements
472 // and different element sizes.
473 if (VT.getVectorNumElements() == VT1.getVectorNumElements() &&
474 VT.getVectorElementType().getSizeInBits()
475 > VT1.getVectorElementType().getSizeInBits())
476 setTruncStoreAction(VT, VT1, Expand);
480 // There is no v1i64/v2i64 multiply, expand v1i64/v2i64 to GPR i64 multiply.
481 // FIXME: For a v2i64 multiply, we copy VPR to GPR and do 2 i64 multiplies,
482 // and then copy back to VPR. This solution may be optimized by Following 3
483 // NEON instructions:
484 // pmull v2.1q, v0.1d, v1.1d
485 // pmull2 v3.1q, v0.2d, v1.2d
486 // ins v2.d[1], v3.d[0]
487 // As currently we can't verify the correctness of such assumption, we can
488 // do such optimization in the future.
489 setOperationAction(ISD::MUL, MVT::v1i64, Expand);
490 setOperationAction(ISD::MUL, MVT::v2i64, Expand);
494 EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
495 // It's reasonably important that this value matches the "natural" legal
496 // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself
497 // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64).
498 if (!VT.isVector()) return MVT::i32;
499 return VT.changeVectorElementTypeToInteger();
502 static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
505 static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword,
506 AArch64::LDXR_word, AArch64::LDXR_dword};
507 static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword,
508 AArch64::LDAXR_word, AArch64::LDAXR_dword};
509 static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword,
510 AArch64::STXR_word, AArch64::STXR_dword};
511 static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword,
512 AArch64::STLXR_word, AArch64::STLXR_dword};
514 const unsigned *LoadOps, *StoreOps;
515 if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent)
520 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
521 StoreOps = StoreRels;
523 StoreOps = StoreBares;
525 assert(isPowerOf2_32(Size) && Size <= 8 &&
526 "unsupported size for atomic binary op!");
528 LdrOpc = LoadOps[Log2_32(Size)];
529 StrOpc = StoreOps[Log2_32(Size)];
532 // FIXME: AArch64::DTripleRegClass and AArch64::QTripleRegClass don't really
533 // have value type mapped, and they are both being defined as MVT::untyped.
534 // Without knowing the MVT type, MachineLICM::getRegisterClassIDAndCost
535 // would fail to figure out the register pressure correctly.
536 std::pair<const TargetRegisterClass*, uint8_t>
537 AArch64TargetLowering::findRepresentativeClass(MVT VT) const{
538 const TargetRegisterClass *RRC = 0;
540 switch (VT.SimpleTy) {
542 return TargetLowering::findRepresentativeClass(VT);
544 RRC = &AArch64::QPairRegClass;
548 RRC = &AArch64::QQuadRegClass;
552 return std::make_pair(RRC, Cost);
556 AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
558 unsigned BinOpcode) const {
559 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
560 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
562 const BasicBlock *LLVM_BB = BB->getBasicBlock();
563 MachineFunction *MF = BB->getParent();
564 MachineFunction::iterator It = BB;
567 unsigned dest = MI->getOperand(0).getReg();
568 unsigned ptr = MI->getOperand(1).getReg();
569 unsigned incr = MI->getOperand(2).getReg();
570 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
571 DebugLoc dl = MI->getDebugLoc();
573 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
575 unsigned ldrOpc, strOpc;
576 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
578 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
579 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
580 MF->insert(It, loopMBB);
581 MF->insert(It, exitMBB);
583 // Transfer the remainder of BB and its successor edges to exitMBB.
584 exitMBB->splice(exitMBB->begin(), BB,
585 llvm::next(MachineBasicBlock::iterator(MI)),
587 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
589 const TargetRegisterClass *TRC
590 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
591 unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
595 // fallthrough --> loopMBB
596 BB->addSuccessor(loopMBB);
600 // <binop> scratch, dest, incr
601 // stxr stxr_status, scratch, ptr
602 // cbnz stxr_status, loopMBB
603 // fallthrough --> exitMBB
605 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
607 // All arithmetic operations we'll be creating are designed to take an extra
608 // shift or extend operand, which we can conveniently set to zero.
610 // Operand order needs to go the other way for NAND.
611 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl)
612 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
613 .addReg(incr).addReg(dest).addImm(0);
615 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
616 .addReg(dest).addReg(incr).addImm(0);
619 // From the stxr, the register is GPR32; from the cmp it's GPR32wsp
620 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
621 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
623 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr);
624 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
625 .addReg(stxr_status).addMBB(loopMBB);
627 BB->addSuccessor(loopMBB);
628 BB->addSuccessor(exitMBB);
634 MI->eraseFromParent(); // The instruction is gone now.
640 AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI,
641 MachineBasicBlock *BB,
644 A64CC::CondCodes Cond) const {
645 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
647 const BasicBlock *LLVM_BB = BB->getBasicBlock();
648 MachineFunction *MF = BB->getParent();
649 MachineFunction::iterator It = BB;
652 unsigned dest = MI->getOperand(0).getReg();
653 unsigned ptr = MI->getOperand(1).getReg();
654 unsigned incr = MI->getOperand(2).getReg();
655 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
657 unsigned oldval = dest;
658 DebugLoc dl = MI->getDebugLoc();
660 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
661 const TargetRegisterClass *TRC, *TRCsp;
663 TRC = &AArch64::GPR64RegClass;
664 TRCsp = &AArch64::GPR64xspRegClass;
666 TRC = &AArch64::GPR32RegClass;
667 TRCsp = &AArch64::GPR32wspRegClass;
670 unsigned ldrOpc, strOpc;
671 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
673 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
674 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
675 MF->insert(It, loopMBB);
676 MF->insert(It, exitMBB);
678 // Transfer the remainder of BB and its successor edges to exitMBB.
679 exitMBB->splice(exitMBB->begin(), BB,
680 llvm::next(MachineBasicBlock::iterator(MI)),
682 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
684 unsigned scratch = MRI.createVirtualRegister(TRC);
685 MRI.constrainRegClass(scratch, TRCsp);
689 // fallthrough --> loopMBB
690 BB->addSuccessor(loopMBB);
694 // cmp incr, dest (, sign extend if necessary)
695 // csel scratch, dest, incr, cond
696 // stxr stxr_status, scratch, ptr
697 // cbnz stxr_status, loopMBB
698 // fallthrough --> exitMBB
700 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
702 // Build compare and cmov instructions.
703 MRI.constrainRegClass(incr, TRCsp);
704 BuildMI(BB, dl, TII->get(CmpOp))
705 .addReg(incr).addReg(oldval).addImm(0);
707 BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc),
709 .addReg(oldval).addReg(incr).addImm(Cond);
711 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
712 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
714 BuildMI(BB, dl, TII->get(strOpc), stxr_status)
715 .addReg(scratch).addReg(ptr);
716 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
717 .addReg(stxr_status).addMBB(loopMBB);
719 BB->addSuccessor(loopMBB);
720 BB->addSuccessor(exitMBB);
726 MI->eraseFromParent(); // The instruction is gone now.
732 AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
733 MachineBasicBlock *BB,
734 unsigned Size) const {
735 unsigned dest = MI->getOperand(0).getReg();
736 unsigned ptr = MI->getOperand(1).getReg();
737 unsigned oldval = MI->getOperand(2).getReg();
738 unsigned newval = MI->getOperand(3).getReg();
739 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm());
740 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
741 DebugLoc dl = MI->getDebugLoc();
743 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
744 const TargetRegisterClass *TRCsp;
745 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass;
747 unsigned ldrOpc, strOpc;
748 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
750 MachineFunction *MF = BB->getParent();
751 const BasicBlock *LLVM_BB = BB->getBasicBlock();
752 MachineFunction::iterator It = BB;
753 ++It; // insert the new blocks after the current block
755 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
756 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
757 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
758 MF->insert(It, loop1MBB);
759 MF->insert(It, loop2MBB);
760 MF->insert(It, exitMBB);
762 // Transfer the remainder of BB and its successor edges to exitMBB.
763 exitMBB->splice(exitMBB->begin(), BB,
764 llvm::next(MachineBasicBlock::iterator(MI)),
766 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
770 // fallthrough --> loop1MBB
771 BB->addSuccessor(loop1MBB);
778 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
780 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl;
781 MRI.constrainRegClass(dest, TRCsp);
782 BuildMI(BB, dl, TII->get(CmpOp))
783 .addReg(dest).addReg(oldval).addImm(0);
784 BuildMI(BB, dl, TII->get(AArch64::Bcc))
785 .addImm(A64CC::NE).addMBB(exitMBB);
786 BB->addSuccessor(loop2MBB);
787 BB->addSuccessor(exitMBB);
790 // strex stxr_status, newval, [ptr]
791 // cbnz stxr_status, loop1MBB
793 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
794 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
796 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr);
797 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
798 .addReg(stxr_status).addMBB(loop1MBB);
799 BB->addSuccessor(loop1MBB);
800 BB->addSuccessor(exitMBB);
806 MI->eraseFromParent(); // The instruction is gone now.
812 AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
813 MachineBasicBlock *MBB) const {
814 // We materialise the F128CSEL pseudo-instruction using conditional branches
815 // and loads, giving an instruciton sequence like:
824 // Using virtual registers would probably not be beneficial since COPY
825 // instructions are expensive for f128 (there's no actual instruction to
828 // An alternative would be to do an integer-CSEL on some address. E.g.:
833 // csel x0, x0, x1, ne
836 // It's unclear which approach is actually optimal.
837 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
838 MachineFunction *MF = MBB->getParent();
839 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
840 DebugLoc DL = MI->getDebugLoc();
841 MachineFunction::iterator It = MBB;
844 unsigned DestReg = MI->getOperand(0).getReg();
845 unsigned IfTrueReg = MI->getOperand(1).getReg();
846 unsigned IfFalseReg = MI->getOperand(2).getReg();
847 unsigned CondCode = MI->getOperand(3).getImm();
848 bool NZCVKilled = MI->getOperand(4).isKill();
850 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
851 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
852 MF->insert(It, TrueBB);
853 MF->insert(It, EndBB);
855 // Transfer rest of current basic-block to EndBB
856 EndBB->splice(EndBB->begin(), MBB,
857 llvm::next(MachineBasicBlock::iterator(MI)),
859 EndBB->transferSuccessorsAndUpdatePHIs(MBB);
861 // We need somewhere to store the f128 value needed.
862 int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16);
864 // [... start of incoming MBB ...]
865 // str qIFFALSE, [sp]
868 BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR))
870 .addFrameIndex(ScratchFI)
872 BuildMI(MBB, DL, TII->get(AArch64::Bcc))
875 BuildMI(MBB, DL, TII->get(AArch64::Bimm))
877 MBB->addSuccessor(TrueBB);
878 MBB->addSuccessor(EndBB);
881 // NZCV is live-through TrueBB.
882 TrueBB->addLiveIn(AArch64::NZCV);
883 EndBB->addLiveIn(AArch64::NZCV);
888 BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR))
890 .addFrameIndex(ScratchFI)
893 // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the
895 TrueBB->addSuccessor(EndBB);
899 // [... rest of incoming MBB ...]
900 MachineInstr *StartOfEnd = EndBB->begin();
901 BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg)
902 .addFrameIndex(ScratchFI)
905 MI->eraseFromParent();
910 AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
911 MachineBasicBlock *MBB) const {
912 switch (MI->getOpcode()) {
913 default: llvm_unreachable("Unhandled instruction with custom inserter");
914 case AArch64::F128CSEL:
915 return EmitF128CSEL(MI, MBB);
916 case AArch64::ATOMIC_LOAD_ADD_I8:
917 return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl);
918 case AArch64::ATOMIC_LOAD_ADD_I16:
919 return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl);
920 case AArch64::ATOMIC_LOAD_ADD_I32:
921 return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl);
922 case AArch64::ATOMIC_LOAD_ADD_I64:
923 return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl);
925 case AArch64::ATOMIC_LOAD_SUB_I8:
926 return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl);
927 case AArch64::ATOMIC_LOAD_SUB_I16:
928 return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl);
929 case AArch64::ATOMIC_LOAD_SUB_I32:
930 return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl);
931 case AArch64::ATOMIC_LOAD_SUB_I64:
932 return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl);
934 case AArch64::ATOMIC_LOAD_AND_I8:
935 return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl);
936 case AArch64::ATOMIC_LOAD_AND_I16:
937 return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl);
938 case AArch64::ATOMIC_LOAD_AND_I32:
939 return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl);
940 case AArch64::ATOMIC_LOAD_AND_I64:
941 return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl);
943 case AArch64::ATOMIC_LOAD_OR_I8:
944 return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl);
945 case AArch64::ATOMIC_LOAD_OR_I16:
946 return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl);
947 case AArch64::ATOMIC_LOAD_OR_I32:
948 return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl);
949 case AArch64::ATOMIC_LOAD_OR_I64:
950 return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl);
952 case AArch64::ATOMIC_LOAD_XOR_I8:
953 return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl);
954 case AArch64::ATOMIC_LOAD_XOR_I16:
955 return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl);
956 case AArch64::ATOMIC_LOAD_XOR_I32:
957 return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl);
958 case AArch64::ATOMIC_LOAD_XOR_I64:
959 return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl);
961 case AArch64::ATOMIC_LOAD_NAND_I8:
962 return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl);
963 case AArch64::ATOMIC_LOAD_NAND_I16:
964 return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl);
965 case AArch64::ATOMIC_LOAD_NAND_I32:
966 return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl);
967 case AArch64::ATOMIC_LOAD_NAND_I64:
968 return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl);
970 case AArch64::ATOMIC_LOAD_MIN_I8:
971 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT);
972 case AArch64::ATOMIC_LOAD_MIN_I16:
973 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT);
974 case AArch64::ATOMIC_LOAD_MIN_I32:
975 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT);
976 case AArch64::ATOMIC_LOAD_MIN_I64:
977 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT);
979 case AArch64::ATOMIC_LOAD_MAX_I8:
980 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT);
981 case AArch64::ATOMIC_LOAD_MAX_I16:
982 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT);
983 case AArch64::ATOMIC_LOAD_MAX_I32:
984 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT);
985 case AArch64::ATOMIC_LOAD_MAX_I64:
986 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT);
988 case AArch64::ATOMIC_LOAD_UMIN_I8:
989 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI);
990 case AArch64::ATOMIC_LOAD_UMIN_I16:
991 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI);
992 case AArch64::ATOMIC_LOAD_UMIN_I32:
993 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI);
994 case AArch64::ATOMIC_LOAD_UMIN_I64:
995 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI);
997 case AArch64::ATOMIC_LOAD_UMAX_I8:
998 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO);
999 case AArch64::ATOMIC_LOAD_UMAX_I16:
1000 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO);
1001 case AArch64::ATOMIC_LOAD_UMAX_I32:
1002 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO);
1003 case AArch64::ATOMIC_LOAD_UMAX_I64:
1004 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO);
1006 case AArch64::ATOMIC_SWAP_I8:
1007 return emitAtomicBinary(MI, MBB, 1, 0);
1008 case AArch64::ATOMIC_SWAP_I16:
1009 return emitAtomicBinary(MI, MBB, 2, 0);
1010 case AArch64::ATOMIC_SWAP_I32:
1011 return emitAtomicBinary(MI, MBB, 4, 0);
1012 case AArch64::ATOMIC_SWAP_I64:
1013 return emitAtomicBinary(MI, MBB, 8, 0);
1015 case AArch64::ATOMIC_CMP_SWAP_I8:
1016 return emitAtomicCmpSwap(MI, MBB, 1);
1017 case AArch64::ATOMIC_CMP_SWAP_I16:
1018 return emitAtomicCmpSwap(MI, MBB, 2);
1019 case AArch64::ATOMIC_CMP_SWAP_I32:
1020 return emitAtomicCmpSwap(MI, MBB, 4);
1021 case AArch64::ATOMIC_CMP_SWAP_I64:
1022 return emitAtomicCmpSwap(MI, MBB, 8);
1027 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
1029 case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC";
1030 case AArch64ISD::Call: return "AArch64ISD::Call";
1031 case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV";
1032 case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad";
1033 case AArch64ISD::BFI: return "AArch64ISD::BFI";
1034 case AArch64ISD::EXTR: return "AArch64ISD::EXTR";
1035 case AArch64ISD::Ret: return "AArch64ISD::Ret";
1036 case AArch64ISD::SBFX: return "AArch64ISD::SBFX";
1037 case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC";
1038 case AArch64ISD::SETCC: return "AArch64ISD::SETCC";
1039 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN";
1040 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
1041 case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL";
1042 case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge";
1043 case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall";
1045 case AArch64ISD::NEON_MOVIMM:
1046 return "AArch64ISD::NEON_MOVIMM";
1047 case AArch64ISD::NEON_MVNIMM:
1048 return "AArch64ISD::NEON_MVNIMM";
1049 case AArch64ISD::NEON_FMOVIMM:
1050 return "AArch64ISD::NEON_FMOVIMM";
1051 case AArch64ISD::NEON_CMP:
1052 return "AArch64ISD::NEON_CMP";
1053 case AArch64ISD::NEON_CMPZ:
1054 return "AArch64ISD::NEON_CMPZ";
1055 case AArch64ISD::NEON_TST:
1056 return "AArch64ISD::NEON_TST";
1057 case AArch64ISD::NEON_QSHLs:
1058 return "AArch64ISD::NEON_QSHLs";
1059 case AArch64ISD::NEON_QSHLu:
1060 return "AArch64ISD::NEON_QSHLu";
1061 case AArch64ISD::NEON_VDUP:
1062 return "AArch64ISD::NEON_VDUP";
1063 case AArch64ISD::NEON_VDUPLANE:
1064 return "AArch64ISD::NEON_VDUPLANE";
1065 case AArch64ISD::NEON_REV16:
1066 return "AArch64ISD::NEON_REV16";
1067 case AArch64ISD::NEON_REV32:
1068 return "AArch64ISD::NEON_REV32";
1069 case AArch64ISD::NEON_REV64:
1070 return "AArch64ISD::NEON_REV64";
1071 case AArch64ISD::NEON_UZP1:
1072 return "AArch64ISD::NEON_UZP1";
1073 case AArch64ISD::NEON_UZP2:
1074 return "AArch64ISD::NEON_UZP2";
1075 case AArch64ISD::NEON_ZIP1:
1076 return "AArch64ISD::NEON_ZIP1";
1077 case AArch64ISD::NEON_ZIP2:
1078 return "AArch64ISD::NEON_ZIP2";
1079 case AArch64ISD::NEON_TRN1:
1080 return "AArch64ISD::NEON_TRN1";
1081 case AArch64ISD::NEON_TRN2:
1082 return "AArch64ISD::NEON_TRN2";
1083 case AArch64ISD::NEON_LD1_UPD:
1084 return "AArch64ISD::NEON_LD1_UPD";
1085 case AArch64ISD::NEON_LD2_UPD:
1086 return "AArch64ISD::NEON_LD2_UPD";
1087 case AArch64ISD::NEON_LD3_UPD:
1088 return "AArch64ISD::NEON_LD3_UPD";
1089 case AArch64ISD::NEON_LD4_UPD:
1090 return "AArch64ISD::NEON_LD4_UPD";
1091 case AArch64ISD::NEON_ST1_UPD:
1092 return "AArch64ISD::NEON_ST1_UPD";
1093 case AArch64ISD::NEON_ST2_UPD:
1094 return "AArch64ISD::NEON_ST2_UPD";
1095 case AArch64ISD::NEON_ST3_UPD:
1096 return "AArch64ISD::NEON_ST3_UPD";
1097 case AArch64ISD::NEON_ST4_UPD:
1098 return "AArch64ISD::NEON_ST4_UPD";
1099 case AArch64ISD::NEON_LD1x2_UPD:
1100 return "AArch64ISD::NEON_LD1x2_UPD";
1101 case AArch64ISD::NEON_LD1x3_UPD:
1102 return "AArch64ISD::NEON_LD1x3_UPD";
1103 case AArch64ISD::NEON_LD1x4_UPD:
1104 return "AArch64ISD::NEON_LD1x4_UPD";
1105 case AArch64ISD::NEON_ST1x2_UPD:
1106 return "AArch64ISD::NEON_ST1x2_UPD";
1107 case AArch64ISD::NEON_ST1x3_UPD:
1108 return "AArch64ISD::NEON_ST1x3_UPD";
1109 case AArch64ISD::NEON_ST1x4_UPD:
1110 return "AArch64ISD::NEON_ST1x4_UPD";
1111 case AArch64ISD::NEON_LD2DUP:
1112 return "AArch64ISD::NEON_LD2DUP";
1113 case AArch64ISD::NEON_LD3DUP:
1114 return "AArch64ISD::NEON_LD3DUP";
1115 case AArch64ISD::NEON_LD4DUP:
1116 return "AArch64ISD::NEON_LD4DUP";
1117 case AArch64ISD::NEON_LD2DUP_UPD:
1118 return "AArch64ISD::NEON_LD2DUP_UPD";
1119 case AArch64ISD::NEON_LD3DUP_UPD:
1120 return "AArch64ISD::NEON_LD3DUP_UPD";
1121 case AArch64ISD::NEON_LD4DUP_UPD:
1122 return "AArch64ISD::NEON_LD4DUP_UPD";
1123 case AArch64ISD::NEON_LD2LN_UPD:
1124 return "AArch64ISD::NEON_LD2LN_UPD";
1125 case AArch64ISD::NEON_LD3LN_UPD:
1126 return "AArch64ISD::NEON_LD3LN_UPD";
1127 case AArch64ISD::NEON_LD4LN_UPD:
1128 return "AArch64ISD::NEON_LD4LN_UPD";
1129 case AArch64ISD::NEON_ST2LN_UPD:
1130 return "AArch64ISD::NEON_ST2LN_UPD";
1131 case AArch64ISD::NEON_ST3LN_UPD:
1132 return "AArch64ISD::NEON_ST3LN_UPD";
1133 case AArch64ISD::NEON_ST4LN_UPD:
1134 return "AArch64ISD::NEON_ST4LN_UPD";
1135 case AArch64ISD::NEON_VEXTRACT:
1136 return "AArch64ISD::NEON_VEXTRACT";
1142 static const uint16_t AArch64FPRArgRegs[] = {
1143 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
1144 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7
1146 static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs);
1148 static const uint16_t AArch64ArgRegs[] = {
1149 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3,
1150 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7
1152 static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs);
1154 static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
1155 CCValAssign::LocInfo LocInfo,
1156 ISD::ArgFlagsTy ArgFlags, CCState &State) {
1157 // Mark all remaining general purpose registers as allocated. We don't
1158 // backtrack: if (for example) an i128 gets put on the stack, no subsequent
1159 // i64 will go in registers (C.11).
1160 for (unsigned i = 0; i < NumArgRegs; ++i)
1161 State.AllocateReg(AArch64ArgRegs[i]);
1166 #include "AArch64GenCallingConv.inc"
1168 CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
1171 default: llvm_unreachable("Unsupported calling convention");
1172 case CallingConv::Fast:
1173 case CallingConv::C:
1179 AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
1180 SDLoc DL, SDValue &Chain) const {
1181 MachineFunction &MF = DAG.getMachineFunction();
1182 MachineFrameInfo *MFI = MF.getFrameInfo();
1183 AArch64MachineFunctionInfo *FuncInfo
1184 = MF.getInfo<AArch64MachineFunctionInfo>();
1186 SmallVector<SDValue, 8> MemOps;
1188 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs,
1190 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs,
1193 unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR);
1195 if (GPRSaveSize != 0) {
1196 GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false);
1198 SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy());
1200 for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) {
1201 unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass);
1202 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
1203 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
1204 MachinePointerInfo::getStack(i * 8),
1206 MemOps.push_back(Store);
1207 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
1208 DAG.getConstant(8, getPointerTy()));
1212 if (getSubtarget()->hasFPARMv8()) {
1213 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
1215 // According to the AArch64 Procedure Call Standard, section B.1/B.3, we
1216 // can omit a register save area if we know we'll never use registers of
1218 if (FPRSaveSize != 0) {
1219 FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
1221 SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
1223 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
1224 unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i],
1225 &AArch64::FPR128RegClass);
1226 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
1227 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
1228 MachinePointerInfo::getStack(i * 16),
1230 MemOps.push_back(Store);
1231 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
1232 DAG.getConstant(16, getPointerTy()));
1235 FuncInfo->setVariadicFPRIdx(FPRIdx);
1236 FuncInfo->setVariadicFPRSize(FPRSaveSize);
1239 int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true);
1241 FuncInfo->setVariadicStackIdx(StackIdx);
1242 FuncInfo->setVariadicGPRIdx(GPRIdx);
1243 FuncInfo->setVariadicGPRSize(GPRSaveSize);
1245 if (!MemOps.empty()) {
1246 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
1253 AArch64TargetLowering::LowerFormalArguments(SDValue Chain,
1254 CallingConv::ID CallConv, bool isVarArg,
1255 const SmallVectorImpl<ISD::InputArg> &Ins,
1256 SDLoc dl, SelectionDAG &DAG,
1257 SmallVectorImpl<SDValue> &InVals) const {
1258 MachineFunction &MF = DAG.getMachineFunction();
1259 AArch64MachineFunctionInfo *FuncInfo
1260 = MF.getInfo<AArch64MachineFunctionInfo>();
1261 MachineFrameInfo *MFI = MF.getFrameInfo();
1262 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1264 SmallVector<CCValAssign, 16> ArgLocs;
1265 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1266 getTargetMachine(), ArgLocs, *DAG.getContext());
1267 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
1269 SmallVector<SDValue, 16> ArgValues;
1272 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1273 CCValAssign &VA = ArgLocs[i];
1274 ISD::ArgFlagsTy Flags = Ins[i].Flags;
1276 if (Flags.isByVal()) {
1277 // Byval is used for small structs and HFAs in the PCS, but the system
1278 // should work in a non-compliant manner for larger structs.
1279 EVT PtrTy = getPointerTy();
1280 int Size = Flags.getByValSize();
1281 unsigned NumRegs = (Size + 7) / 8;
1283 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs,
1284 VA.getLocMemOffset(),
1286 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy);
1287 InVals.push_back(FrameIdxN);
1290 } else if (VA.isRegLoc()) {
1291 MVT RegVT = VA.getLocVT();
1292 const TargetRegisterClass *RC = getRegClassFor(RegVT);
1293 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1295 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1296 } else { // VA.isRegLoc()
1297 assert(VA.isMemLoc());
1299 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
1300 VA.getLocMemOffset(), true);
1302 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1303 ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1304 MachinePointerInfo::getFixedStack(FI),
1305 false, false, false, 0);
1310 switch (VA.getLocInfo()) {
1311 default: llvm_unreachable("Unknown loc info!");
1312 case CCValAssign::Full: break;
1313 case CCValAssign::BCvt:
1314 ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue);
1316 case CCValAssign::SExt:
1317 case CCValAssign::ZExt:
1318 case CCValAssign::AExt:
1319 case CCValAssign::FPExt: {
1320 unsigned DestSize = VA.getValVT().getSizeInBits();
1321 unsigned DestSubReg;
1324 case 8: DestSubReg = AArch64::sub_8; break;
1325 case 16: DestSubReg = AArch64::sub_16; break;
1326 case 32: DestSubReg = AArch64::sub_32; break;
1327 case 64: DestSubReg = AArch64::sub_64; break;
1328 default: llvm_unreachable("Unexpected argument promotion");
1331 ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
1332 VA.getValVT(), ArgValue,
1333 DAG.getTargetConstant(DestSubReg, MVT::i32)),
1339 InVals.push_back(ArgValue);
1343 SaveVarArgRegisters(CCInfo, DAG, dl, Chain);
1345 unsigned StackArgSize = CCInfo.getNextStackOffset();
1346 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
1347 // This is a non-standard ABI so by fiat I say we're allowed to make full
1348 // use of the stack area to be popped, which must be aligned to 16 bytes in
1350 StackArgSize = RoundUpToAlignment(StackArgSize, 16);
1352 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
1353 // a multiple of 16.
1354 FuncInfo->setArgumentStackToRestore(StackArgSize);
1356 // This realignment carries over to the available bytes below. Our own
1357 // callers will guarantee the space is free by giving an aligned value to
1360 // Even if we're not expected to free up the space, it's useful to know how
1361 // much is there while considering tail calls (because we can reuse it).
1362 FuncInfo->setBytesInStackArgArea(StackArgSize);
1368 AArch64TargetLowering::LowerReturn(SDValue Chain,
1369 CallingConv::ID CallConv, bool isVarArg,
1370 const SmallVectorImpl<ISD::OutputArg> &Outs,
1371 const SmallVectorImpl<SDValue> &OutVals,
1372 SDLoc dl, SelectionDAG &DAG) const {
1373 // CCValAssign - represent the assignment of the return value to a location.
1374 SmallVector<CCValAssign, 16> RVLocs;
1376 // CCState - Info about the registers and stack slots.
1377 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1378 getTargetMachine(), RVLocs, *DAG.getContext());
1380 // Analyze outgoing return values.
1381 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv));
1384 SmallVector<SDValue, 4> RetOps(1, Chain);
1386 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1387 // PCS: "If the type, T, of the result of a function is such that
1388 // void func(T arg) would require that arg be passed as a value in a
1389 // register (or set of registers) according to the rules in 5.4, then the
1390 // result is returned in the same registers as would be used for such an
1393 // Otherwise, the caller shall reserve a block of memory of sufficient
1394 // size and alignment to hold the result. The address of the memory block
1395 // shall be passed as an additional argument to the function in x8."
1397 // This is implemented in two places. The register-return values are dealt
1398 // with here, more complex returns are passed as an sret parameter, which
1399 // means we don't have to worry about it during actual return.
1400 CCValAssign &VA = RVLocs[i];
1401 assert(VA.isRegLoc() && "Only register-returns should be created by PCS");
1404 SDValue Arg = OutVals[i];
1406 // There's no convenient note in the ABI about this as there is for normal
1407 // arguments, but it says return values are passed in the same registers as
1408 // an argument would be. I believe that includes the comments about
1409 // unspecified higher bits, putting the burden of widening on the *caller*
1410 // for return values.
1411 switch (VA.getLocInfo()) {
1412 default: llvm_unreachable("Unknown loc info");
1413 case CCValAssign::Full: break;
1414 case CCValAssign::SExt:
1415 case CCValAssign::ZExt:
1416 case CCValAssign::AExt:
1417 // Floating-point values should only be extended when they're going into
1418 // memory, which can't happen here so an integer extend is acceptable.
1419 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1421 case CCValAssign::BCvt:
1422 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1426 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1427 Flag = Chain.getValue(1);
1428 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1431 RetOps[0] = Chain; // Update chain.
1433 // Add the flag if we have it.
1435 RetOps.push_back(Flag);
1437 return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other,
1438 &RetOps[0], RetOps.size());
1441 unsigned AArch64TargetLowering::getByValTypeAlignment(Type *Ty) const {
1442 // This is a new backend. For anything more precise than this a FE should
1443 // set an explicit alignment.
1448 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
1449 SmallVectorImpl<SDValue> &InVals) const {
1450 SelectionDAG &DAG = CLI.DAG;
1452 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1453 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1454 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1455 SDValue Chain = CLI.Chain;
1456 SDValue Callee = CLI.Callee;
1457 bool &IsTailCall = CLI.IsTailCall;
1458 CallingConv::ID CallConv = CLI.CallConv;
1459 bool IsVarArg = CLI.IsVarArg;
1461 MachineFunction &MF = DAG.getMachineFunction();
1462 AArch64MachineFunctionInfo *FuncInfo
1463 = MF.getInfo<AArch64MachineFunctionInfo>();
1464 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1465 bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet();
1466 bool IsSibCall = false;
1469 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1470 IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1471 Outs, OutVals, Ins, DAG);
1473 // A sibling call is one where we're under the usual C ABI and not planning
1474 // to change that but can still do a tail call:
1475 if (!TailCallOpt && IsTailCall)
1479 SmallVector<CCValAssign, 16> ArgLocs;
1480 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1481 getTargetMachine(), ArgLocs, *DAG.getContext());
1482 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
1484 // On AArch64 (and all other architectures I'm aware of) the most this has to
1485 // do is adjust the stack pointer.
1486 unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16);
1488 // Since we're not changing the ABI to make this a tail call, the memory
1489 // operands are already available in the caller's incoming argument space.
1493 // FPDiff is the byte offset of the call's argument area from the callee's.
1494 // Stores to callee stack arguments will be placed in FixedStackSlots offset
1495 // by this amount for a tail call. In a sibling call it must be 0 because the
1496 // caller will deallocate the entire stack and the callee still expects its
1497 // arguments to begin at SP+0. Completely unused for non-tail calls.
1500 if (IsTailCall && !IsSibCall) {
1501 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1503 // FPDiff will be negative if this tail call requires more space than we
1504 // would automatically have in our incoming argument space. Positive if we
1505 // can actually shrink the stack.
1506 FPDiff = NumReusableBytes - NumBytes;
1508 // The stack pointer must be 16-byte aligned at all times it's used for a
1509 // memory operation, which in practice means at *all* times and in
1510 // particular across call boundaries. Therefore our own arguments started at
1511 // a 16-byte aligned SP and the delta applied for the tail call should
1512 // satisfy the same constraint.
1513 assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
1517 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
1520 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
1523 SmallVector<SDValue, 8> MemOpChains;
1524 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1526 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1527 CCValAssign &VA = ArgLocs[i];
1528 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1529 SDValue Arg = OutVals[i];
1531 // Callee does the actual widening, so all extensions just use an implicit
1532 // definition of the rest of the Loc. Aesthetically, this would be nicer as
1533 // an ANY_EXTEND, but that isn't valid for floating-point types and this
1534 // alternative works on integer types too.
1535 switch (VA.getLocInfo()) {
1536 default: llvm_unreachable("Unknown loc info!");
1537 case CCValAssign::Full: break;
1538 case CCValAssign::SExt:
1539 case CCValAssign::ZExt:
1540 case CCValAssign::AExt:
1541 case CCValAssign::FPExt: {
1542 unsigned SrcSize = VA.getValVT().getSizeInBits();
1546 case 8: SrcSubReg = AArch64::sub_8; break;
1547 case 16: SrcSubReg = AArch64::sub_16; break;
1548 case 32: SrcSubReg = AArch64::sub_32; break;
1549 case 64: SrcSubReg = AArch64::sub_64; break;
1550 default: llvm_unreachable("Unexpected argument promotion");
1553 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
1555 DAG.getUNDEF(VA.getLocVT()),
1557 DAG.getTargetConstant(SrcSubReg, MVT::i32)),
1562 case CCValAssign::BCvt:
1563 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1567 if (VA.isRegLoc()) {
1568 // A normal register (sub-) argument. For now we just note it down because
1569 // we want to copy things into registers as late as possible to avoid
1570 // register-pressure (and possibly worse).
1571 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1575 assert(VA.isMemLoc() && "unexpected argument location");
1578 MachinePointerInfo DstInfo;
1580 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() :
1581 VA.getLocVT().getSizeInBits();
1582 OpSize = (OpSize + 7) / 8;
1583 int32_t Offset = VA.getLocMemOffset() + FPDiff;
1584 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
1586 DstAddr = DAG.getFrameIndex(FI, getPointerTy());
1587 DstInfo = MachinePointerInfo::getFixedStack(FI);
1589 // Make sure any stack arguments overlapping with where we're storing are
1590 // loaded before this eventual operation. Otherwise they'll be clobbered.
1591 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
1593 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset());
1595 DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1596 DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset());
1599 if (Flags.isByVal()) {
1600 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64);
1601 SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode,
1602 Flags.getByValAlign(),
1603 /*isVolatile = */ false,
1604 /*alwaysInline = */ false,
1605 DstInfo, MachinePointerInfo(0));
1606 MemOpChains.push_back(Cpy);
1608 // Normal stack argument, put it where it's needed.
1609 SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo,
1611 MemOpChains.push_back(Store);
1615 // The loads and stores generated above shouldn't clash with each
1616 // other. Combining them with this TokenFactor notes that fact for the rest of
1618 if (!MemOpChains.empty())
1619 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1620 &MemOpChains[0], MemOpChains.size());
1622 // Most of the rest of the instructions need to be glued together; we don't
1623 // want assignments to actual registers used by a call to be rearranged by a
1624 // well-meaning scheduler.
1627 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1628 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1629 RegsToPass[i].second, InFlag);
1630 InFlag = Chain.getValue(1);
1633 // The linker is responsible for inserting veneers when necessary to put a
1634 // function call destination in range, so we don't need to bother with a
1636 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1637 const GlobalValue *GV = G->getGlobal();
1638 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
1639 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1640 const char *Sym = S->getSymbol();
1641 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
1644 // We don't usually want to end the call-sequence here because we would tidy
1645 // the frame up *after* the call, however in the ABI-changing tail-call case
1646 // we've carefully laid out the parameters so that when sp is reset they'll be
1647 // in the correct location.
1648 if (IsTailCall && !IsSibCall) {
1649 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1650 DAG.getIntPtrConstant(0, true), InFlag, dl);
1651 InFlag = Chain.getValue(1);
1654 // We produce the following DAG scheme for the actual call instruction:
1655 // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag?
1657 // Most arguments aren't going to be used and just keep the values live as
1658 // far as LLVM is concerned. It's expected to be selected as simply "bl
1659 // callee" (for a direct, non-tail call).
1660 std::vector<SDValue> Ops;
1661 Ops.push_back(Chain);
1662 Ops.push_back(Callee);
1665 // Each tail call may have to adjust the stack by a different amount, so
1666 // this information must travel along with the operation for eventual
1667 // consumption by emitEpilogue.
1668 Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32));
1671 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1672 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1673 RegsToPass[i].second.getValueType()));
1676 // Add a register mask operand representing the call-preserved registers. This
1677 // is used later in codegen to constrain register-allocation.
1678 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
1679 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
1680 assert(Mask && "Missing call preserved mask for calling convention");
1681 Ops.push_back(DAG.getRegisterMask(Mask));
1683 // If we needed glue, put it in as the last argument.
1684 if (InFlag.getNode())
1685 Ops.push_back(InFlag);
1687 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1690 return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
1693 Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size());
1694 InFlag = Chain.getValue(1);
1696 // Now we can reclaim the stack, just as well do it before working out where
1697 // our return value is.
1699 uint64_t CalleePopBytes
1700 = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0;
1702 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1703 DAG.getIntPtrConstant(CalleePopBytes, true),
1705 InFlag = Chain.getValue(1);
1708 return LowerCallResult(Chain, InFlag, CallConv,
1709 IsVarArg, Ins, dl, DAG, InVals);
1713 AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1714 CallingConv::ID CallConv, bool IsVarArg,
1715 const SmallVectorImpl<ISD::InputArg> &Ins,
1716 SDLoc dl, SelectionDAG &DAG,
1717 SmallVectorImpl<SDValue> &InVals) const {
1718 // Assign locations to each value returned by this call.
1719 SmallVector<CCValAssign, 16> RVLocs;
1720 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1721 getTargetMachine(), RVLocs, *DAG.getContext());
1722 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv));
1724 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1725 CCValAssign VA = RVLocs[i];
1727 // Return values that are too big to fit into registers should use an sret
1728 // pointer, so this can be a lot simpler than the main argument code.
1729 assert(VA.isRegLoc() && "Memory locations not expected for call return");
1731 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1733 Chain = Val.getValue(1);
1734 InFlag = Val.getValue(2);
1736 switch (VA.getLocInfo()) {
1737 default: llvm_unreachable("Unknown loc info!");
1738 case CCValAssign::Full: break;
1739 case CCValAssign::BCvt:
1740 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1742 case CCValAssign::ZExt:
1743 case CCValAssign::SExt:
1744 case CCValAssign::AExt:
1745 // Floating-point arguments only get extended/truncated if they're going
1746 // in memory, so using the integer operation is acceptable here.
1747 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
1751 InVals.push_back(Val);
1758 AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
1759 CallingConv::ID CalleeCC,
1761 bool IsCalleeStructRet,
1762 bool IsCallerStructRet,
1763 const SmallVectorImpl<ISD::OutputArg> &Outs,
1764 const SmallVectorImpl<SDValue> &OutVals,
1765 const SmallVectorImpl<ISD::InputArg> &Ins,
1766 SelectionDAG& DAG) const {
1768 // For CallingConv::C this function knows whether the ABI needs
1769 // changing. That's not true for other conventions so they will have to opt in
1771 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
1774 const MachineFunction &MF = DAG.getMachineFunction();
1775 const Function *CallerF = MF.getFunction();
1776 CallingConv::ID CallerCC = CallerF->getCallingConv();
1777 bool CCMatch = CallerCC == CalleeCC;
1779 // Byval parameters hand the function a pointer directly into the stack area
1780 // we want to reuse during a tail call. Working around this *is* possible (see
1781 // X86) but less efficient and uglier in LowerCall.
1782 for (Function::const_arg_iterator i = CallerF->arg_begin(),
1783 e = CallerF->arg_end(); i != e; ++i)
1784 if (i->hasByValAttr())
1787 if (getTargetMachine().Options.GuaranteedTailCallOpt) {
1788 if (IsTailCallConvention(CalleeCC) && CCMatch)
1793 // Now we search for cases where we can use a tail call without changing the
1794 // ABI. Sibcall is used in some places (particularly gcc) to refer to this
1797 // I want anyone implementing a new calling convention to think long and hard
1798 // about this assert.
1799 assert((!IsVarArg || CalleeCC == CallingConv::C)
1800 && "Unexpected variadic calling convention");
1802 if (IsVarArg && !Outs.empty()) {
1803 // At least two cases here: if caller is fastcc then we can't have any
1804 // memory arguments (we'd be expected to clean up the stack afterwards). If
1805 // caller is C then we could potentially use its argument area.
1807 // FIXME: for now we take the most conservative of these in both cases:
1808 // disallow all variadic memory operands.
1809 SmallVector<CCValAssign, 16> ArgLocs;
1810 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1811 getTargetMachine(), ArgLocs, *DAG.getContext());
1813 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1814 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1815 if (!ArgLocs[i].isRegLoc())
1819 // If the calling conventions do not match, then we'd better make sure the
1820 // results are returned in the same way as what the caller expects.
1822 SmallVector<CCValAssign, 16> RVLocs1;
1823 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
1824 getTargetMachine(), RVLocs1, *DAG.getContext());
1825 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC));
1827 SmallVector<CCValAssign, 16> RVLocs2;
1828 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
1829 getTargetMachine(), RVLocs2, *DAG.getContext());
1830 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC));
1832 if (RVLocs1.size() != RVLocs2.size())
1834 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
1835 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1837 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1839 if (RVLocs1[i].isRegLoc()) {
1840 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1843 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1849 // Nothing more to check if the callee is taking no arguments
1853 SmallVector<CCValAssign, 16> ArgLocs;
1854 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1855 getTargetMachine(), ArgLocs, *DAG.getContext());
1857 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1859 const AArch64MachineFunctionInfo *FuncInfo
1860 = MF.getInfo<AArch64MachineFunctionInfo>();
1862 // If the stack arguments for this call would fit into our own save area then
1863 // the call can be made tail.
1864 return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea();
1867 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
1868 bool TailCallOpt) const {
1869 return CallCC == CallingConv::Fast && TailCallOpt;
1872 bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const {
1873 return CallCC == CallingConv::Fast;
1876 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
1878 MachineFrameInfo *MFI,
1879 int ClobberedFI) const {
1880 SmallVector<SDValue, 8> ArgChains;
1881 int64_t FirstByte = MFI->getObjectOffset(ClobberedFI);
1882 int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1;
1884 // Include the original chain at the beginning of the list. When this is
1885 // used by target LowerCall hooks, this helps legalize find the
1886 // CALLSEQ_BEGIN node.
1887 ArgChains.push_back(Chain);
1889 // Add a chain value for each stack argument corresponding
1890 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1891 UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U)
1892 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
1893 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
1894 if (FI->getIndex() < 0) {
1895 int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex());
1896 int64_t InLastByte = InFirstByte;
1897 InLastByte += MFI->getObjectSize(FI->getIndex()) - 1;
1899 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1900 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1901 ArgChains.push_back(SDValue(L, 1));
1904 // Build a tokenfactor for all the chains.
1905 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
1906 &ArgChains[0], ArgChains.size());
1909 static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) {
1911 case ISD::SETEQ: return A64CC::EQ;
1912 case ISD::SETGT: return A64CC::GT;
1913 case ISD::SETGE: return A64CC::GE;
1914 case ISD::SETLT: return A64CC::LT;
1915 case ISD::SETLE: return A64CC::LE;
1916 case ISD::SETNE: return A64CC::NE;
1917 case ISD::SETUGT: return A64CC::HI;
1918 case ISD::SETUGE: return A64CC::HS;
1919 case ISD::SETULT: return A64CC::LO;
1920 case ISD::SETULE: return A64CC::LS;
1921 default: llvm_unreachable("Unexpected condition code");
1925 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const {
1926 // icmp is implemented using adds/subs immediate, which take an unsigned
1927 // 12-bit immediate, optionally shifted left by 12 bits.
1929 // Symmetric by using adds/subs
1933 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0;
1936 SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS,
1937 ISD::CondCode CC, SDValue &A64cc,
1938 SelectionDAG &DAG, SDLoc &dl) const {
1939 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
1941 EVT VT = RHSC->getValueType(0);
1942 bool knownInvalid = false;
1944 // I'm not convinced the rest of LLVM handles these edge cases properly, but
1945 // we can at least get it right.
1946 if (isSignedIntSetCC(CC)) {
1947 C = RHSC->getSExtValue();
1948 } else if (RHSC->getZExtValue() > INT64_MAX) {
1949 // A 64-bit constant not representable by a signed 64-bit integer is far
1950 // too big to fit into a SUBS immediate anyway.
1951 knownInvalid = true;
1953 C = RHSC->getZExtValue();
1956 if (!knownInvalid && !isLegalICmpImmediate(C)) {
1957 // Constant does not fit, try adjusting it by one?
1962 if (isLegalICmpImmediate(C-1)) {
1963 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
1964 RHS = DAG.getConstant(C-1, VT);
1969 if (isLegalICmpImmediate(C-1)) {
1970 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
1971 RHS = DAG.getConstant(C-1, VT);
1976 if (isLegalICmpImmediate(C+1)) {
1977 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
1978 RHS = DAG.getConstant(C+1, VT);
1983 if (isLegalICmpImmediate(C+1)) {
1984 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1985 RHS = DAG.getConstant(C+1, VT);
1992 A64CC::CondCodes CondCode = IntCCToA64CC(CC);
1993 A64cc = DAG.getConstant(CondCode, MVT::i32);
1994 return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
1995 DAG.getCondCode(CC));
1998 static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC,
1999 A64CC::CondCodes &Alternative) {
2000 A64CC::CondCodes CondCode = A64CC::Invalid;
2001 Alternative = A64CC::Invalid;
2004 default: llvm_unreachable("Unknown FP condition!");
2006 case ISD::SETOEQ: CondCode = A64CC::EQ; break;
2008 case ISD::SETOGT: CondCode = A64CC::GT; break;
2010 case ISD::SETOGE: CondCode = A64CC::GE; break;
2011 case ISD::SETOLT: CondCode = A64CC::MI; break;
2012 case ISD::SETOLE: CondCode = A64CC::LS; break;
2013 case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break;
2014 case ISD::SETO: CondCode = A64CC::VC; break;
2015 case ISD::SETUO: CondCode = A64CC::VS; break;
2016 case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break;
2017 case ISD::SETUGT: CondCode = A64CC::HI; break;
2018 case ISD::SETUGE: CondCode = A64CC::PL; break;
2020 case ISD::SETULT: CondCode = A64CC::LT; break;
2022 case ISD::SETULE: CondCode = A64CC::LE; break;
2024 case ISD::SETUNE: CondCode = A64CC::NE; break;
2030 AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
2032 EVT PtrVT = getPointerTy();
2033 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2035 switch(getTargetMachine().getCodeModel()) {
2036 case CodeModel::Small:
2037 // The most efficient code is PC-relative anyway for the small memory model,
2038 // so we don't need to worry about relocation model.
2039 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2040 DAG.getTargetBlockAddress(BA, PtrVT, 0,
2041 AArch64II::MO_NO_FLAG),
2042 DAG.getTargetBlockAddress(BA, PtrVT, 0,
2043 AArch64II::MO_LO12),
2044 DAG.getConstant(/*Alignment=*/ 4, MVT::i32));
2045 case CodeModel::Large:
2047 AArch64ISD::WrapperLarge, DL, PtrVT,
2048 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3),
2049 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
2050 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
2051 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
2053 llvm_unreachable("Only small and large code models supported now");
2058 // (BRCOND chain, val, dest)
2060 AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2062 SDValue Chain = Op.getOperand(0);
2063 SDValue TheBit = Op.getOperand(1);
2064 SDValue DestBB = Op.getOperand(2);
2066 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
2067 // that as the consumer we are responsible for ignoring rubbish in higher
2069 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
2070 DAG.getConstant(1, MVT::i32));
2072 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
2073 DAG.getConstant(0, TheBit.getValueType()),
2074 DAG.getCondCode(ISD::SETNE));
2076 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain,
2077 A64CMP, DAG.getConstant(A64CC::NE, MVT::i32),
2081 // (BR_CC chain, condcode, lhs, rhs, dest)
2083 AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2085 SDValue Chain = Op.getOperand(0);
2086 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2087 SDValue LHS = Op.getOperand(2);
2088 SDValue RHS = Op.getOperand(3);
2089 SDValue DestBB = Op.getOperand(4);
2091 if (LHS.getValueType() == MVT::f128) {
2092 // f128 comparisons are lowered to runtime calls by a routine which sets
2093 // LHS, RHS and CC appropriately for the rest of this function to continue.
2094 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2096 // If softenSetCCOperands returned a scalar, we need to compare the result
2097 // against zero to select between true and false values.
2098 if (RHS.getNode() == 0) {
2099 RHS = DAG.getConstant(0, LHS.getValueType());
2104 if (LHS.getValueType().isInteger()) {
2107 // Integers are handled in a separate function because the combinations of
2108 // immediates and tests can get hairy and we may want to fiddle things.
2109 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2111 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
2112 Chain, CmpOp, A64cc, DestBB);
2115 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2116 // conditional branch, hence FPCCToA64CC can set a second test, where either
2117 // passing is sufficient.
2118 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2119 CondCode = FPCCToA64CC(CC, Alternative);
2120 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2121 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2122 DAG.getCondCode(CC));
2123 SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
2124 Chain, SetCC, A64cc, DestBB);
2126 if (Alternative != A64CC::Invalid) {
2127 A64cc = DAG.getConstant(Alternative, MVT::i32);
2128 A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
2129 A64BR_CC, SetCC, A64cc, DestBB);
2137 AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
2138 RTLIB::Libcall Call) const {
2141 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
2142 EVT ArgVT = Op.getOperand(i).getValueType();
2143 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2144 Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy;
2145 Entry.isSExt = false;
2146 Entry.isZExt = false;
2147 Args.push_back(Entry);
2149 SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy());
2151 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2153 // By default, the input chain to this libcall is the entry node of the
2154 // function. If the libcall is going to be emitted as a tail call then
2155 // isUsedByReturnOnly will change it to the right chain if the return
2156 // node which is being folded has a non-entry input chain.
2157 SDValue InChain = DAG.getEntryNode();
2159 // isTailCall may be true since the callee does not reference caller stack
2160 // frame. Check if it's in the right position.
2161 SDValue TCChain = InChain;
2162 bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain);
2167 CallLoweringInfo CLI(InChain, RetTy, false, false, false, false,
2168 0, getLibcallCallingConv(Call), isTailCall,
2169 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
2170 Callee, Args, DAG, SDLoc(Op));
2171 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2173 if (!CallInfo.second.getNode())
2174 // It's a tailcall, return the chain (which is the DAG root).
2175 return DAG.getRoot();
2177 return CallInfo.first;
2181 AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
2182 if (Op.getOperand(0).getValueType() != MVT::f128) {
2183 // It's legal except when f128 is involved
2188 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
2190 SDValue SrcVal = Op.getOperand(0);
2191 return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
2192 /*isSigned*/ false, SDLoc(Op)).first;
2196 AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
2197 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
2200 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
2202 return LowerF128ToCall(Op, DAG, LC);
2205 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2208 EVT VT = Op.getValueType();
2209 SDValue Vec = Op.getOperand(0);
2210 EVT OpVT = Vec.getValueType();
2211 unsigned Opc = IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
2213 if (VT.getVectorNumElements() == 1) {
2214 assert(OpVT == MVT::v1f64 && "Unexpected vector type!");
2215 if (VT.getSizeInBits() == OpVT.getSizeInBits())
2217 return DAG.UnrollVectorOp(Op.getNode());
2220 if (VT.getSizeInBits() > OpVT.getSizeInBits()) {
2221 assert(Vec.getValueType() == MVT::v2f32 && VT == MVT::v2i64 &&
2222 "Unexpected vector type!");
2223 Vec = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v2f64, Vec);
2224 return DAG.getNode(Opc, dl, VT, Vec);
2225 } else if (VT.getSizeInBits() < OpVT.getSizeInBits()) {
2226 EVT CastVT = EVT::getIntegerVT(*DAG.getContext(),
2227 OpVT.getVectorElementType().getSizeInBits());
2229 EVT::getVectorVT(*DAG.getContext(), CastVT, VT.getVectorNumElements());
2230 Vec = DAG.getNode(Opc, dl, CastVT, Vec);
2231 return DAG.getNode(ISD::TRUNCATE, dl, VT, Vec);
2233 return DAG.getNode(Opc, dl, VT, Vec);
2237 AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2238 bool IsSigned) const {
2239 if (Op.getValueType().isVector())
2240 return LowerVectorFP_TO_INT(Op, DAG, IsSigned);
2241 if (Op.getOperand(0).getValueType() != MVT::f128) {
2242 // It's legal except when f128 is involved
2248 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType());
2250 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType());
2252 return LowerF128ToCall(Op, DAG, LC);
2255 SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
2256 MachineFunction &MF = DAG.getMachineFunction();
2257 MachineFrameInfo *MFI = MF.getFrameInfo();
2258 MFI->setReturnAddressIsTaken(true);
2260 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2263 EVT VT = Op.getValueType();
2265 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2267 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
2268 SDValue Offset = DAG.getConstant(8, MVT::i64);
2269 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
2270 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
2271 MachinePointerInfo(), false, false, false, 0);
2274 // Return X30, which contains the return address. Mark it an implicit live-in.
2275 unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64));
2276 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64);
2280 SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG)
2282 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2283 MFI->setFrameAddressIsTaken(true);
2285 EVT VT = Op.getValueType();
2287 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2288 unsigned FrameReg = AArch64::X29;
2289 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2291 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
2292 MachinePointerInfo(),
2293 false, false, false, 0);
2298 AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op,
2299 SelectionDAG &DAG) const {
2300 assert(getTargetMachine().getCodeModel() == CodeModel::Large);
2301 assert(getTargetMachine().getRelocationModel() == Reloc::Static);
2303 EVT PtrVT = getPointerTy();
2305 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
2306 const GlobalValue *GV = GN->getGlobal();
2308 SDValue GlobalAddr = DAG.getNode(
2309 AArch64ISD::WrapperLarge, dl, PtrVT,
2310 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3),
2311 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
2312 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
2313 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
2315 if (GN->getOffset() != 0)
2316 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
2317 DAG.getConstant(GN->getOffset(), PtrVT));
2323 AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
2324 SelectionDAG &DAG) const {
2325 assert(getTargetMachine().getCodeModel() == CodeModel::Small);
2327 EVT PtrVT = getPointerTy();
2329 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
2330 const GlobalValue *GV = GN->getGlobal();
2331 unsigned Alignment = GV->getAlignment();
2332 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2333 if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) {
2334 // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate
2335 // to zero when they remain undefined. In PIC mode the GOT can take care of
2336 // this, but in absolute mode we use a constant pool load.
2338 PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2339 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
2340 AArch64II::MO_NO_FLAG),
2341 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
2342 AArch64II::MO_LO12),
2343 DAG.getConstant(8, MVT::i32));
2344 SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr,
2345 MachinePointerInfo::getConstantPool(),
2346 /*isVolatile=*/ false,
2347 /*isNonTemporal=*/ true,
2348 /*isInvariant=*/ true, 8);
2349 if (GN->getOffset() != 0)
2350 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
2351 DAG.getConstant(GN->getOffset(), PtrVT));
2356 if (Alignment == 0) {
2357 const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
2358 if (GVPtrTy->getElementType()->isSized()) {
2360 = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
2362 // Be conservative if we can't guess, not that it really matters:
2363 // functions and labels aren't valid for loads, and the methods used to
2364 // actually calculate an address work with any alignment.
2369 unsigned char HiFixup, LoFixup;
2370 bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM);
2373 HiFixup = AArch64II::MO_GOT;
2374 LoFixup = AArch64II::MO_GOT_LO12;
2377 HiFixup = AArch64II::MO_NO_FLAG;
2378 LoFixup = AArch64II::MO_LO12;
2381 // AArch64's small model demands the following sequence:
2382 // ADRP x0, somewhere
2383 // ADD x0, x0, #:lo12:somewhere ; (or LDR directly).
2384 SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2385 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2387 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2389 DAG.getConstant(Alignment, MVT::i32));
2392 GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(),
2396 if (GN->getOffset() != 0)
2397 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef,
2398 DAG.getConstant(GN->getOffset(), PtrVT));
2404 AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
2405 SelectionDAG &DAG) const {
2406 // TableGen doesn't have easy access to the CodeModel or RelocationModel, so
2407 // we make those distinctions here.
2409 switch (getTargetMachine().getCodeModel()) {
2410 case CodeModel::Small:
2411 return LowerGlobalAddressELFSmall(Op, DAG);
2412 case CodeModel::Large:
2413 return LowerGlobalAddressELFLarge(Op, DAG);
2415 llvm_unreachable("Only small and large code models supported now");
2420 AArch64TargetLowering::LowerConstantPool(SDValue Op,
2421 SelectionDAG &DAG) const {
2423 EVT PtrVT = getPointerTy();
2424 ConstantPoolSDNode *CN = cast<ConstantPoolSDNode>(Op);
2425 const Constant *C = CN->getConstVal();
2427 switch(getTargetMachine().getCodeModel()) {
2428 case CodeModel::Small:
2429 // The most efficient code is PC-relative anyway for the small memory model,
2430 // so we don't need to worry about relocation model.
2431 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2432 DAG.getTargetConstantPool(C, PtrVT, 0, 0,
2433 AArch64II::MO_NO_FLAG),
2434 DAG.getTargetConstantPool(C, PtrVT, 0, 0,
2435 AArch64II::MO_LO12),
2436 DAG.getConstant(CN->getAlignment(), MVT::i32));
2437 case CodeModel::Large:
2439 AArch64ISD::WrapperLarge, DL, PtrVT,
2440 DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G3),
2441 DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G2_NC),
2442 DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G1_NC),
2443 DAG.getTargetConstantPool(C, PtrVT, 0, 0, AArch64II::MO_ABS_G0_NC));
2445 llvm_unreachable("Only small and large code models supported now");
2449 SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
2452 SelectionDAG &DAG) const {
2453 EVT PtrVT = getPointerTy();
2455 // The function we need to call is simply the first entry in the GOT for this
2456 // descriptor, load it in preparation.
2457 SDValue Func, Chain;
2458 Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2461 // The function takes only one argument: the address of the descriptor itself
2464 Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue);
2465 Glue = Chain.getValue(1);
2467 // Finally, there's a special calling-convention which means that the lookup
2468 // must preserve all registers (except X0, obviously).
2469 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
2470 const AArch64RegisterInfo *A64RI
2471 = static_cast<const AArch64RegisterInfo *>(TRI);
2472 const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask();
2474 // We're now ready to populate the argument list, as with a normal call:
2475 std::vector<SDValue> Ops;
2476 Ops.push_back(Chain);
2477 Ops.push_back(Func);
2478 Ops.push_back(SymAddr);
2479 Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT));
2480 Ops.push_back(DAG.getRegisterMask(Mask));
2481 Ops.push_back(Glue);
2483 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2484 Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0],
2486 Glue = Chain.getValue(1);
2488 // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it
2489 // back to the generic handling code.
2490 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
2494 AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
2495 SelectionDAG &DAG) const {
2496 assert(getSubtarget()->isTargetELF() &&
2497 "TLS not implemented for non-ELF targets");
2498 assert(getTargetMachine().getCodeModel() == CodeModel::Small
2499 && "TLS only supported in small memory model");
2500 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2502 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
2505 EVT PtrVT = getPointerTy();
2507 const GlobalValue *GV = GA->getGlobal();
2509 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
2511 if (Model == TLSModel::InitialExec) {
2512 TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2513 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2514 AArch64II::MO_GOTTPREL),
2515 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2516 AArch64II::MO_GOTTPREL_LO12),
2517 DAG.getConstant(8, MVT::i32));
2518 TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2520 } else if (Model == TLSModel::LocalExec) {
2521 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2522 AArch64II::MO_TPREL_G1);
2523 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2524 AArch64II::MO_TPREL_G0_NC);
2526 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2527 DAG.getTargetConstant(1, MVT::i32)), 0);
2528 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2530 DAG.getTargetConstant(0, MVT::i32)), 0);
2531 } else if (Model == TLSModel::GeneralDynamic) {
2532 // Accesses used in this sequence go via the TLS descriptor which lives in
2533 // the GOT. Prepare an address we can use to handle this.
2534 SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2535 AArch64II::MO_TLSDESC);
2536 SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2537 AArch64II::MO_TLSDESC_LO12);
2538 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2540 DAG.getConstant(8, MVT::i32));
2541 SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0);
2543 TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2544 } else if (Model == TLSModel::LocalDynamic) {
2545 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
2546 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
2547 // the beginning of the module's TLS region, followed by a DTPREL offset
2550 // These accesses will need deduplicating if there's more than one.
2551 AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction()
2552 .getInfo<AArch64MachineFunctionInfo>();
2553 MFI->incNumLocalDynamicTLSAccesses();
2556 // Get the location of _TLS_MODULE_BASE_:
2557 SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2558 AArch64II::MO_TLSDESC);
2559 SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2560 AArch64II::MO_TLSDESC_LO12);
2561 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2563 DAG.getConstant(8, MVT::i32));
2564 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT);
2566 ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2568 // Get the variable's offset from _TLS_MODULE_BASE_
2569 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2570 AArch64II::MO_DTPREL_G1);
2571 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2572 AArch64II::MO_DTPREL_G0_NC);
2574 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2575 DAG.getTargetConstant(0, MVT::i32)), 0);
2576 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2578 DAG.getTargetConstant(0, MVT::i32)), 0);
2580 llvm_unreachable("Unsupported TLS access model");
2583 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
2586 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2589 EVT VT = Op.getValueType();
2590 SDValue Vec = Op.getOperand(0);
2591 unsigned Opc = IsSigned ? ISD::SINT_TO_FP : ISD::UINT_TO_FP;
2593 if (VT.getVectorNumElements() == 1) {
2594 assert(VT == MVT::v1f64 && "Unexpected vector type!");
2595 if (VT.getSizeInBits() == Vec.getValueSizeInBits())
2597 return DAG.UnrollVectorOp(Op.getNode());
2600 if (VT.getSizeInBits() < Vec.getValueSizeInBits()) {
2601 assert(Vec.getValueType() == MVT::v2i64 && VT == MVT::v2f32 &&
2602 "Unexpected vector type!");
2603 Vec = DAG.getNode(Opc, dl, MVT::v2f64, Vec);
2604 return DAG.getNode(ISD::FP_ROUND, dl, VT, Vec, DAG.getIntPtrConstant(0));
2605 } else if (VT.getSizeInBits() > Vec.getValueSizeInBits()) {
2606 unsigned CastOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
2607 EVT CastVT = EVT::getIntegerVT(*DAG.getContext(),
2608 VT.getVectorElementType().getSizeInBits());
2610 EVT::getVectorVT(*DAG.getContext(), CastVT, VT.getVectorNumElements());
2611 Vec = DAG.getNode(CastOpc, dl, CastVT, Vec);
2614 return DAG.getNode(Opc, dl, VT, Vec);
2618 AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2619 bool IsSigned) const {
2620 if (Op.getValueType().isVector())
2621 return LowerVectorINT_TO_FP(Op, DAG, IsSigned);
2622 if (Op.getValueType() != MVT::f128) {
2623 // Legal for everything except f128.
2629 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2631 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2633 return LowerF128ToCall(Op, DAG, LC);
2638 AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2639 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2641 EVT PtrVT = getPointerTy();
2643 // When compiling PIC, jump tables get put in the code section so a static
2644 // relocation-style is acceptable for both cases.
2645 switch (getTargetMachine().getCodeModel()) {
2646 case CodeModel::Small:
2647 return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2648 DAG.getTargetJumpTable(JT->getIndex(), PtrVT),
2649 DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2650 AArch64II::MO_LO12),
2651 DAG.getConstant(1, MVT::i32));
2652 case CodeModel::Large:
2654 AArch64ISD::WrapperLarge, dl, PtrVT,
2655 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3),
2656 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC),
2657 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC),
2658 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC));
2660 llvm_unreachable("Only small and large code models supported now");
2664 // (SELECT_CC lhs, rhs, iftrue, iffalse, condcode)
2666 AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
2668 SDValue LHS = Op.getOperand(0);
2669 SDValue RHS = Op.getOperand(1);
2670 SDValue IfTrue = Op.getOperand(2);
2671 SDValue IfFalse = Op.getOperand(3);
2672 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2674 if (LHS.getValueType() == MVT::f128) {
2675 // f128 comparisons are lowered to libcalls, but slot in nicely here
2677 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2679 // If softenSetCCOperands returned a scalar, we need to compare the result
2680 // against zero to select between true and false values.
2681 if (RHS.getNode() == 0) {
2682 RHS = DAG.getConstant(0, LHS.getValueType());
2687 if (LHS.getValueType().isInteger()) {
2690 // Integers are handled in a separate function because the combinations of
2691 // immediates and tests can get hairy and we may want to fiddle things.
2692 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2694 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2695 CmpOp, IfTrue, IfFalse, A64cc);
2698 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2699 // conditional branch, hence FPCCToA64CC can set a second test, where either
2700 // passing is sufficient.
2701 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2702 CondCode = FPCCToA64CC(CC, Alternative);
2703 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2704 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2705 DAG.getCondCode(CC));
2706 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl,
2708 SetCC, IfTrue, IfFalse, A64cc);
2710 if (Alternative != A64CC::Invalid) {
2711 A64cc = DAG.getConstant(Alternative, MVT::i32);
2712 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2713 SetCC, IfTrue, A64SELECT_CC, A64cc);
2717 return A64SELECT_CC;
2720 // (SELECT testbit, iftrue, iffalse)
2722 AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2724 SDValue TheBit = Op.getOperand(0);
2725 SDValue IfTrue = Op.getOperand(1);
2726 SDValue IfFalse = Op.getOperand(2);
2728 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
2729 // that as the consumer we are responsible for ignoring rubbish in higher
2731 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
2732 DAG.getConstant(1, MVT::i32));
2733 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
2734 DAG.getConstant(0, TheBit.getValueType()),
2735 DAG.getCondCode(ISD::SETNE));
2737 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2738 A64CMP, IfTrue, IfFalse,
2739 DAG.getConstant(A64CC::NE, MVT::i32));
2742 static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) {
2744 SDValue LHS = Op.getOperand(0);
2745 SDValue RHS = Op.getOperand(1);
2746 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2747 EVT VT = Op.getValueType();
2748 bool Invert = false;
2752 if (LHS.getValueType().isInteger()) {
2754 // Attempt to use Vector Integer Compare Mask Test instruction.
2755 // TST = icmp ne (and (op0, op1), zero).
2756 if (CC == ISD::SETNE) {
2757 if (((LHS.getOpcode() == ISD::AND) &&
2758 ISD::isBuildVectorAllZeros(RHS.getNode())) ||
2759 ((RHS.getOpcode() == ISD::AND) &&
2760 ISD::isBuildVectorAllZeros(LHS.getNode()))) {
2762 SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS;
2763 SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0));
2764 SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1));
2765 return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS);
2769 // Attempt to use Vector Integer Compare Mask against Zero instr (Signed).
2770 // Note: Compare against Zero does not support unsigned predicates.
2771 if ((ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2772 ISD::isBuildVectorAllZeros(LHS.getNode())) &&
2773 !isUnsignedIntSetCC(CC)) {
2775 // If LHS is the zero value, swap operands and CondCode.
2776 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2777 CC = getSetCCSwappedOperands(CC);
2782 // Ensure valid CondCode for Compare Mask against Zero instruction:
2783 // EQ, GE, GT, LE, LT.
2784 if (ISD::SETNE == CC) {
2789 // Using constant type to differentiate integer and FP compares with zero.
2790 Op1 = DAG.getConstant(0, MVT::i32);
2791 Opcode = AArch64ISD::NEON_CMPZ;
2794 // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned).
2795 // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT.
2799 llvm_unreachable("Illegal integer comparison.");
2815 CC = getSetCCSwappedOperands(CC);
2819 std::swap(LHS, RHS);
2821 Opcode = AArch64ISD::NEON_CMP;
2826 // Generate Compare Mask instr or Compare Mask against Zero instr.
2828 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2831 NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2836 // Now handle Floating Point cases.
2837 // Attempt to use Vector Floating Point Compare Mask against Zero instruction.
2838 if (ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2839 ISD::isBuildVectorAllZeros(LHS.getNode())) {
2841 // If LHS is the zero value, swap operands and CondCode.
2842 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2843 CC = getSetCCSwappedOperands(CC);
2848 // Using constant type to differentiate integer and FP compares with zero.
2849 Op1 = DAG.getConstantFP(0, MVT::f32);
2850 Opcode = AArch64ISD::NEON_CMPZ;
2852 // Attempt to use Vector Floating Point Compare Mask instruction.
2855 Opcode = AArch64ISD::NEON_CMP;
2859 // Some register compares have to be implemented with swapped CC and operands,
2860 // e.g.: OLT implemented as OGT with swapped operands.
2861 bool SwapIfRegArgs = false;
2863 // Ensure valid CondCode for FP Compare Mask against Zero instruction:
2864 // EQ, GE, GT, LE, LT.
2865 // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT.
2868 llvm_unreachable("Illegal FP comparison");
2871 Invert = true; // Fallthrough
2879 SwapIfRegArgs = true;
2888 SwapIfRegArgs = true;
2897 SwapIfRegArgs = true;
2906 SwapIfRegArgs = true;
2913 Invert = true; // Fallthrough
2915 // Expand this to (OGT |OLT).
2917 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT));
2919 SwapIfRegArgs = true;
2922 Invert = true; // Fallthrough
2924 // Expand this to (OGE | OLT).
2926 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE));
2928 SwapIfRegArgs = true;
2932 if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) {
2933 CC = getSetCCSwappedOperands(CC);
2934 std::swap(Op0, Op1);
2937 // Generate FP Compare Mask instr or FP Compare Mask against Zero instr
2938 SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2940 if (NeonCmpAlt.getNode())
2941 NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt);
2944 NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2949 // (SETCC lhs, rhs, condcode)
2951 AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2953 SDValue LHS = Op.getOperand(0);
2954 SDValue RHS = Op.getOperand(1);
2955 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2956 EVT VT = Op.getValueType();
2959 return LowerVectorSETCC(Op, DAG);
2961 if (LHS.getValueType() == MVT::f128) {
2962 // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS
2963 // for the rest of the function (some i32 or i64 values).
2964 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2966 // If softenSetCCOperands returned a scalar, use it.
2967 if (RHS.getNode() == 0) {
2968 assert(LHS.getValueType() == Op.getValueType() &&
2969 "Unexpected setcc expansion!");
2974 if (LHS.getValueType().isInteger()) {
2977 // Integers are handled in a separate function because the combinations of
2978 // immediates and tests can get hairy and we may want to fiddle things.
2979 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2981 return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2982 CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT),
2986 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2987 // conditional branch, hence FPCCToA64CC can set a second test, where either
2988 // passing is sufficient.
2989 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2990 CondCode = FPCCToA64CC(CC, Alternative);
2991 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2992 SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2993 DAG.getCondCode(CC));
2994 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2995 CmpOp, DAG.getConstant(1, VT),
2996 DAG.getConstant(0, VT), A64cc);
2998 if (Alternative != A64CC::Invalid) {
2999 A64cc = DAG.getConstant(Alternative, MVT::i32);
3000 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
3001 DAG.getConstant(1, VT), A64SELECT_CC, A64cc);
3004 return A64SELECT_CC;
3008 AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3009 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
3010 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
3012 // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes
3013 // rather than just 8.
3014 return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op),
3015 Op.getOperand(1), Op.getOperand(2),
3016 DAG.getConstant(32, MVT::i32), 8, false, false,
3017 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
3021 AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3022 // The layout of the va_list struct is specified in the AArch64 Procedure Call
3023 // Standard, section B.3.
3024 MachineFunction &MF = DAG.getMachineFunction();
3025 AArch64MachineFunctionInfo *FuncInfo
3026 = MF.getInfo<AArch64MachineFunctionInfo>();
3029 SDValue Chain = Op.getOperand(0);
3030 SDValue VAList = Op.getOperand(1);
3031 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3032 SmallVector<SDValue, 4> MemOps;
3034 // void *__stack at offset 0
3035 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(),
3037 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
3038 MachinePointerInfo(SV), false, false, 0));
3040 // void *__gr_top at offset 8
3041 int GPRSize = FuncInfo->getVariadicGPRSize();
3043 SDValue GRTop, GRTopAddr;
3045 GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3046 DAG.getConstant(8, getPointerTy()));
3048 GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy());
3049 GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop,
3050 DAG.getConstant(GPRSize, getPointerTy()));
3052 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
3053 MachinePointerInfo(SV, 8),
3057 // void *__vr_top at offset 16
3058 int FPRSize = FuncInfo->getVariadicFPRSize();
3060 SDValue VRTop, VRTopAddr;
3061 VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3062 DAG.getConstant(16, getPointerTy()));
3064 VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy());
3065 VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop,
3066 DAG.getConstant(FPRSize, getPointerTy()));
3068 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
3069 MachinePointerInfo(SV, 16),
3073 // int __gr_offs at offset 24
3074 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3075 DAG.getConstant(24, getPointerTy()));
3076 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32),
3077 GROffsAddr, MachinePointerInfo(SV, 24),
3080 // int __vr_offs at offset 28
3081 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
3082 DAG.getConstant(28, getPointerTy()));
3083 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32),
3084 VROffsAddr, MachinePointerInfo(SV, 28),
3087 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
3092 AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
3093 switch (Op.getOpcode()) {
3094 default: llvm_unreachable("Don't know how to custom lower this!");
3095 case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128);
3096 case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128);
3097 case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128);
3098 case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128);
3099 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true);
3100 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false);
3101 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true);
3102 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false);
3103 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
3104 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
3105 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
3106 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
3108 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
3109 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
3110 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
3111 case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG);
3112 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
3113 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
3114 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
3115 case ISD::SELECT: return LowerSELECT(Op, DAG);
3116 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
3117 case ISD::SETCC: return LowerSETCC(Op, DAG);
3118 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
3119 case ISD::VASTART: return LowerVASTART(Op, DAG);
3120 case ISD::BUILD_VECTOR:
3121 return LowerBUILD_VECTOR(Op, DAG, getSubtarget());
3122 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
3128 /// Check if the specified splat value corresponds to a valid vector constant
3129 /// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If
3130 /// so, return the encoded 8-bit immediate and the OpCmode instruction fields
3132 static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
3133 unsigned SplatBitSize, SelectionDAG &DAG,
3134 bool is128Bits, NeonModImmType type, EVT &VT,
3135 unsigned &Imm, unsigned &OpCmode) {
3136 switch (SplatBitSize) {
3138 llvm_unreachable("unexpected size for isNeonModifiedImm");
3140 if (type != Neon_Mov_Imm)
3142 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
3143 // Neon movi per byte: Op=0, Cmode=1110.
3146 VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
3150 // Neon move inst per halfword
3151 VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
3152 if ((SplatBits & ~0xff) == 0) {
3153 // Value = 0x00nn is 0x00nn LSL 0
3154 // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000
3155 // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001
3161 if ((SplatBits & ~0xff00) == 0) {
3162 // Value = 0xnn00 is 0x00nn LSL 8
3163 // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010
3164 // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011
3166 Imm = SplatBits >> 8;
3170 // can't handle any other
3175 // First the LSL variants (MSL is unusable by some interested instructions).
3177 // Neon move instr per word, shift zeros
3178 VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
3179 if ((SplatBits & ~0xff) == 0) {
3180 // Value = 0x000000nn is 0x000000nn LSL 0
3181 // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000
3182 // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001
3188 if ((SplatBits & ~0xff00) == 0) {
3189 // Value = 0x0000nn00 is 0x000000nn LSL 8
3190 // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010
3191 // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011
3193 Imm = SplatBits >> 8;
3197 if ((SplatBits & ~0xff0000) == 0) {
3198 // Value = 0x00nn0000 is 0x000000nn LSL 16
3199 // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100
3200 // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101
3202 Imm = SplatBits >> 16;
3206 if ((SplatBits & ~0xff000000) == 0) {
3207 // Value = 0xnn000000 is 0x000000nn LSL 24
3208 // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110
3209 // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111
3211 Imm = SplatBits >> 24;
3216 // Now the MSL immediates.
3218 // Neon move instr per word, shift ones
3219 if ((SplatBits & ~0xffff) == 0 &&
3220 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
3221 // Value = 0x0000nnff is 0x000000nn MSL 8
3222 // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100
3224 Imm = SplatBits >> 8;
3228 if ((SplatBits & ~0xffffff) == 0 &&
3229 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
3230 // Value = 0x00nnffff is 0x000000nn MSL 16
3231 // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101
3233 Imm = SplatBits >> 16;
3237 // can't handle any other
3242 if (type != Neon_Mov_Imm)
3244 // Neon move instr bytemask, where each byte is either 0x00 or 0xff.
3245 // movi Op=1, Cmode=1110.
3247 uint64_t BitMask = 0xff;
3249 unsigned ImmMask = 1;
3251 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
3252 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
3255 } else if ((SplatBits & BitMask) != 0) {
3262 VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
3270 static SDValue PerformANDCombine(SDNode *N,
3271 TargetLowering::DAGCombinerInfo &DCI) {
3273 SelectionDAG &DAG = DCI.DAG;
3275 EVT VT = N->getValueType(0);
3277 // We're looking for an SRA/SHL pair which form an SBFX.
3279 if (VT != MVT::i32 && VT != MVT::i64)
3282 if (!isa<ConstantSDNode>(N->getOperand(1)))
3285 uint64_t TruncMask = N->getConstantOperandVal(1);
3286 if (!isMask_64(TruncMask))
3289 uint64_t Width = CountPopulation_64(TruncMask);
3290 SDValue Shift = N->getOperand(0);
3292 if (Shift.getOpcode() != ISD::SRL)
3295 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
3297 uint64_t LSB = Shift->getConstantOperandVal(1);
3299 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
3302 return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0),
3303 DAG.getConstant(LSB, MVT::i64),
3304 DAG.getConstant(LSB + Width - 1, MVT::i64));
3307 /// For a true bitfield insert, the bits getting into that contiguous mask
3308 /// should come from the low part of an existing value: they must be formed from
3309 /// a compatible SHL operation (unless they're already low). This function
3310 /// checks that condition and returns the least-significant bit that's
3311 /// intended. If the operation not a field preparation, -1 is returned.
3312 static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT,
3313 SDValue &MaskedVal, uint64_t Mask) {
3314 if (!isShiftedMask_64(Mask))
3317 // Now we need to alter MaskedVal so that it is an appropriate input for a BFI
3318 // instruction. BFI will do a left-shift by LSB before applying the mask we've
3319 // spotted, so in general we should pre-emptively "undo" that by making sure
3320 // the incoming bits have had a right-shift applied to them.
3322 // This right shift, however, will combine with existing left/right shifts. In
3323 // the simplest case of a completely straight bitfield operation, it will be
3324 // expected to completely cancel out with an existing SHL. More complicated
3325 // cases (e.g. bitfield to bitfield copy) may still need a real shift before
3328 uint64_t LSB = countTrailingZeros(Mask);
3329 int64_t ShiftRightRequired = LSB;
3330 if (MaskedVal.getOpcode() == ISD::SHL &&
3331 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
3332 ShiftRightRequired -= MaskedVal.getConstantOperandVal(1);
3333 MaskedVal = MaskedVal.getOperand(0);
3334 } else if (MaskedVal.getOpcode() == ISD::SRL &&
3335 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
3336 ShiftRightRequired += MaskedVal.getConstantOperandVal(1);
3337 MaskedVal = MaskedVal.getOperand(0);
3340 if (ShiftRightRequired > 0)
3341 MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal,
3342 DAG.getConstant(ShiftRightRequired, MVT::i64));
3343 else if (ShiftRightRequired < 0) {
3344 // We could actually end up with a residual left shift, for example with
3345 // "struc.bitfield = val << 1".
3346 MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal,
3347 DAG.getConstant(-ShiftRightRequired, MVT::i64));
3353 /// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by
3354 /// a mask and an extension. Returns true if a BFI was found and provides
3355 /// information on its surroundings.
3356 static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask,
3359 if (N.getOpcode() == ISD::ZERO_EXTEND) {
3361 N = N.getOperand(0);
3364 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
3365 Mask = N->getConstantOperandVal(1);
3366 N = N.getOperand(0);
3368 // Mask is the whole width.
3369 Mask = -1ULL >> (64 - N.getValueType().getSizeInBits());
3372 if (N.getOpcode() == AArch64ISD::BFI) {
3380 /// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which
3381 /// is roughly equivalent to (and (BFI ...), mask). This form is used because it
3382 /// can often be further combined with a larger mask. Ultimately, we want mask
3383 /// to be 2^32-1 or 2^64-1 so the AND can be skipped.
3384 static SDValue tryCombineToBFI(SDNode *N,
3385 TargetLowering::DAGCombinerInfo &DCI,
3386 const AArch64Subtarget *Subtarget) {
3387 SelectionDAG &DAG = DCI.DAG;
3389 EVT VT = N->getValueType(0);
3391 assert(N->getOpcode() == ISD::OR && "Unexpected root");
3393 // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or
3394 // abandon the effort.
3395 SDValue LHS = N->getOperand(0);
3396 if (LHS.getOpcode() != ISD::AND)
3400 if (isa<ConstantSDNode>(LHS.getOperand(1)))
3401 LHSMask = LHS->getConstantOperandVal(1);
3405 // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask
3406 // is or abandon the effort.
3407 SDValue RHS = N->getOperand(1);
3408 if (RHS.getOpcode() != ISD::AND)
3412 if (isa<ConstantSDNode>(RHS.getOperand(1)))
3413 RHSMask = RHS->getConstantOperandVal(1);
3417 // Can't do anything if the masks are incompatible.
3418 if (LHSMask & RHSMask)
3421 // Now we need one of the masks to be a contiguous field. Without loss of
3422 // generality that should be the RHS one.
3423 SDValue Bitfield = LHS.getOperand(0);
3424 if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) {
3425 // We know that LHS is a candidate new value, and RHS isn't already a better
3427 std::swap(LHS, RHS);
3428 std::swap(LHSMask, RHSMask);
3431 // We've done our best to put the right operands in the right places, all we
3432 // can do now is check whether a BFI exists.
3433 Bitfield = RHS.getOperand(0);
3434 int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask);
3438 uint32_t Width = CountPopulation_64(RHSMask);
3439 assert(Width && "Expected non-zero bitfield width");
3441 SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
3442 LHS.getOperand(0), Bitfield,
3443 DAG.getConstant(LSB, MVT::i64),
3444 DAG.getConstant(Width, MVT::i64));
3447 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits())))
3450 return DAG.getNode(ISD::AND, DL, VT, BFI,
3451 DAG.getConstant(LHSMask | RHSMask, VT));
3454 /// Search for the bitwise combining (with careful masks) of a MaskedBFI and its
3455 /// original input. This is surprisingly common because SROA splits things up
3456 /// into i8 chunks, so the originally detected MaskedBFI may actually only act
3457 /// on the low (say) byte of a word. This is then orred into the rest of the
3458 /// word afterwards.
3460 /// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)).
3462 /// If MASK1 and MASK2 are compatible, we can fold the whole thing into the
3463 /// MaskedBFI. We can also deal with a certain amount of extend/truncate being
3465 static SDValue tryCombineToLargerBFI(SDNode *N,
3466 TargetLowering::DAGCombinerInfo &DCI,
3467 const AArch64Subtarget *Subtarget) {
3468 SelectionDAG &DAG = DCI.DAG;
3470 EVT VT = N->getValueType(0);
3472 // First job is to hunt for a MaskedBFI on either the left or right. Swap
3473 // operands if it's actually on the right.
3475 SDValue PossExtraMask;
3476 uint64_t ExistingMask = 0;
3477 bool Extended = false;
3478 if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended))
3479 PossExtraMask = N->getOperand(1);
3480 else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended))
3481 PossExtraMask = N->getOperand(0);
3485 // We can only combine a BFI with another compatible mask.
3486 if (PossExtraMask.getOpcode() != ISD::AND ||
3487 !isa<ConstantSDNode>(PossExtraMask.getOperand(1)))
3490 uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1);
3492 // Masks must be compatible.
3493 if (ExtraMask & ExistingMask)
3496 SDValue OldBFIVal = BFI.getOperand(0);
3497 SDValue NewBFIVal = BFI.getOperand(1);
3499 // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be
3500 // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments
3501 // need to be made compatible.
3502 assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32
3503 && "Invalid types for BFI");
3504 OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal);
3505 NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal);
3508 // We need the MaskedBFI to be combined with a mask of the *same* value.
3509 if (PossExtraMask.getOperand(0) != OldBFIVal)
3512 BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
3513 OldBFIVal, NewBFIVal,
3514 BFI.getOperand(2), BFI.getOperand(3));
3516 // If the masking is trivial, we don't need to create it.
3517 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits())))
3520 return DAG.getNode(ISD::AND, DL, VT, BFI,
3521 DAG.getConstant(ExtraMask | ExistingMask, VT));
3524 /// An EXTR instruction is made up of two shifts, ORed together. This helper
3525 /// searches for and classifies those shifts.
3526 static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
3528 if (N.getOpcode() == ISD::SHL)
3530 else if (N.getOpcode() == ISD::SRL)
3535 if (!isa<ConstantSDNode>(N.getOperand(1)))
3538 ShiftAmount = N->getConstantOperandVal(1);
3539 Src = N->getOperand(0);
3543 /// EXTR instruction extracts a contiguous chunk of bits from two existing
3544 /// registers viewed as a high/low pair. This function looks for the pattern:
3545 /// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an
3546 /// EXTR. Can't quite be done in TableGen because the two immediates aren't
3548 static SDValue tryCombineToEXTR(SDNode *N,
3549 TargetLowering::DAGCombinerInfo &DCI) {
3550 SelectionDAG &DAG = DCI.DAG;
3552 EVT VT = N->getValueType(0);
3554 assert(N->getOpcode() == ISD::OR && "Unexpected root");
3556 if (VT != MVT::i32 && VT != MVT::i64)
3560 uint32_t ShiftLHS = 0;
3562 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
3566 uint32_t ShiftRHS = 0;
3568 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
3571 // If they're both trying to come from the high part of the register, they're
3572 // not really an EXTR.
3573 if (LHSFromHi == RHSFromHi)
3576 if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
3580 std::swap(LHS, RHS);
3581 std::swap(ShiftLHS, ShiftRHS);
3584 return DAG.getNode(AArch64ISD::EXTR, DL, VT,
3586 DAG.getConstant(ShiftRHS, MVT::i64));
3589 /// Target-specific dag combine xforms for ISD::OR
3590 static SDValue PerformORCombine(SDNode *N,
3591 TargetLowering::DAGCombinerInfo &DCI,
3592 const AArch64Subtarget *Subtarget) {
3594 SelectionDAG &DAG = DCI.DAG;
3596 EVT VT = N->getValueType(0);
3598 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
3601 // Attempt to recognise bitfield-insert operations.
3602 SDValue Res = tryCombineToBFI(N, DCI, Subtarget);
3606 // Attempt to combine an existing MaskedBFI operation into one with a larger
3608 Res = tryCombineToLargerBFI(N, DCI, Subtarget);
3612 Res = tryCombineToEXTR(N, DCI);
3616 if (!Subtarget->hasNEON())
3619 // Attempt to use vector immediate-form BSL
3620 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
3622 SDValue N0 = N->getOperand(0);
3623 if (N0.getOpcode() != ISD::AND)
3626 SDValue N1 = N->getOperand(1);
3627 if (N1.getOpcode() != ISD::AND)
3630 if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
3632 unsigned SplatBitSize;
3634 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
3636 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
3639 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
3641 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
3642 HasAnyUndefs) && !HasAnyUndefs &&
3643 SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
3644 SplatBits0 == ~SplatBits1) {
3646 return DAG.getNode(ISD::VSELECT, DL, VT, N0->getOperand(1),
3647 N0->getOperand(0), N1->getOperand(0));
3655 /// Target-specific dag combine xforms for ISD::SRA
3656 static SDValue PerformSRACombine(SDNode *N,
3657 TargetLowering::DAGCombinerInfo &DCI) {
3659 SelectionDAG &DAG = DCI.DAG;
3661 EVT VT = N->getValueType(0);
3663 // We're looking for an SRA/SHL pair which form an SBFX.
3665 if (VT != MVT::i32 && VT != MVT::i64)
3668 if (!isa<ConstantSDNode>(N->getOperand(1)))
3671 uint64_t ExtraSignBits = N->getConstantOperandVal(1);
3672 SDValue Shift = N->getOperand(0);
3674 if (Shift.getOpcode() != ISD::SHL)
3677 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
3680 uint64_t BitsOnLeft = Shift->getConstantOperandVal(1);
3681 uint64_t Width = VT.getSizeInBits() - ExtraSignBits;
3682 uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft;
3684 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
3687 return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0),
3688 DAG.getConstant(LSB, MVT::i64),
3689 DAG.getConstant(LSB + Width - 1, MVT::i64));
3692 /// Check if this is a valid build_vector for the immediate operand of
3693 /// a vector shift operation, where all the elements of the build_vector
3694 /// must have the same constant integer value.
3695 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
3696 // Ignore bit_converts.
3697 while (Op.getOpcode() == ISD::BITCAST)
3698 Op = Op.getOperand(0);
3699 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
3700 APInt SplatBits, SplatUndef;
3701 unsigned SplatBitSize;
3703 if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
3704 HasAnyUndefs, ElementBits) ||
3705 SplatBitSize > ElementBits)
3707 Cnt = SplatBits.getSExtValue();
3711 /// Check if this is a valid build_vector for the immediate operand of
3712 /// a vector shift left operation. That value must be in the range:
3713 /// 0 <= Value < ElementBits
3714 static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) {
3715 assert(VT.isVector() && "vector shift count is not a vector type");
3716 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3717 if (!getVShiftImm(Op, ElementBits, Cnt))
3719 return (Cnt >= 0 && Cnt < ElementBits);
3722 /// Check if this is a valid build_vector for the immediate operand of a
3723 /// vector shift right operation. The value must be in the range:
3724 /// 1 <= Value <= ElementBits
3725 static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) {
3726 assert(VT.isVector() && "vector shift count is not a vector type");
3727 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3728 if (!getVShiftImm(Op, ElementBits, Cnt))
3730 return (Cnt >= 1 && Cnt <= ElementBits);
3733 static SDValue GenForSextInreg(SDNode *N,
3734 TargetLowering::DAGCombinerInfo &DCI,
3735 EVT SrcVT, EVT DestVT, EVT SubRegVT,
3736 const int *Mask, SDValue Src) {
3737 SelectionDAG &DAG = DCI.DAG;
3739 = DAG.getNode(ISD::BITCAST, SDLoc(N), SrcVT, Src);
3741 = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), DestVT, Bitcast);
3743 = DAG.getVectorShuffle(DestVT, SDLoc(N), Sext, DAG.getUNDEF(DestVT), Mask);
3744 SDValue ExtractSubreg
3745 = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, SDLoc(N),
3746 SubRegVT, ShuffleVec,
3747 DAG.getTargetConstant(AArch64::sub_64, MVT::i32)), 0);
3748 return ExtractSubreg;
3751 /// Checks for vector shifts and lowers them.
3752 static SDValue PerformShiftCombine(SDNode *N,
3753 TargetLowering::DAGCombinerInfo &DCI,
3754 const AArch64Subtarget *ST) {
3755 SelectionDAG &DAG = DCI.DAG;
3756 EVT VT = N->getValueType(0);
3757 if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64))
3758 return PerformSRACombine(N, DCI);
3760 // We're looking for an SRA/SHL pair to help generating instruction
3761 // sshll v0.8h, v0.8b, #0
3762 // The instruction STXL is also the alias of this instruction.
3764 // For example, for DAG like below,
3765 // v2i32 = sra (v2i32 (shl v2i32, 16)), 16
3766 // we can transform it into
3767 // v2i32 = EXTRACT_SUBREG
3768 // (v4i32 (suffle_vector
3769 // (v4i32 (sext (v4i16 (bitcast v2i32))),
3770 // undef, (0, 2, u, u)),
3773 // With this transformation we expect to generate "SSHLL + UZIP1"
3774 // Sometimes UZIP1 can be optimized away by combining with other context.
3775 int64_t ShrCnt, ShlCnt;
3776 if (N->getOpcode() == ISD::SRA
3777 && (VT == MVT::v2i32 || VT == MVT::v4i16)
3778 && isVShiftRImm(N->getOperand(1), VT, ShrCnt)
3779 && N->getOperand(0).getOpcode() == ISD::SHL
3780 && isVShiftRImm(N->getOperand(0).getOperand(1), VT, ShlCnt)) {
3781 SDValue Src = N->getOperand(0).getOperand(0);
3782 if (VT == MVT::v2i32 && ShrCnt == 16 && ShlCnt == 16) {
3783 // sext_inreg(v2i32, v2i16)
3784 // We essentially only care the Mask {0, 2, u, u}
3785 int Mask[4] = {0, 2, 4, 6};
3786 return GenForSextInreg(N, DCI, MVT::v4i16, MVT::v4i32, MVT::v2i32,
3789 else if (VT == MVT::v2i32 && ShrCnt == 24 && ShlCnt == 24) {
3790 // sext_inreg(v2i16, v2i8)
3791 // We essentially only care the Mask {0, u, 4, u, u, u, u, u, u, u, u, u}
3792 int Mask[8] = {0, 2, 4, 6, 8, 10, 12, 14};
3793 return GenForSextInreg(N, DCI, MVT::v8i8, MVT::v8i16, MVT::v2i32,
3796 else if (VT == MVT::v4i16 && ShrCnt == 8 && ShlCnt == 8) {
3797 // sext_inreg(v4i16, v4i8)
3798 // We essentially only care the Mask {0, 2, 4, 6, u, u, u, u, u, u, u, u}
3799 int Mask[8] = {0, 2, 4, 6, 8, 10, 12, 14};
3800 return GenForSextInreg(N, DCI, MVT::v8i8, MVT::v8i16, MVT::v4i16,
3805 // Nothing to be done for scalar shifts.
3806 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3807 if (!VT.isVector() || !TLI.isTypeLegal(VT))
3810 assert(ST->hasNEON() && "unexpected vector shift");
3813 switch (N->getOpcode()) {
3815 llvm_unreachable("unexpected shift opcode");
3818 if (isVShiftLImm(N->getOperand(1), VT, Cnt)) {
3820 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
3821 DAG.getConstant(Cnt, MVT::i32));
3822 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS);
3828 if (isVShiftRImm(N->getOperand(1), VT, Cnt)) {
3830 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
3831 DAG.getConstant(Cnt, MVT::i32));
3832 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS);
3840 /// ARM-specific DAG combining for intrinsics.
3841 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
3842 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3846 // Don't do anything for most intrinsics.
3849 case Intrinsic::arm_neon_vqshifts:
3850 case Intrinsic::arm_neon_vqshiftu:
3851 EVT VT = N->getOperand(1).getValueType();
3853 if (!isVShiftLImm(N->getOperand(2), VT, Cnt))
3855 unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts)
3856 ? AArch64ISD::NEON_QSHLs
3857 : AArch64ISD::NEON_QSHLu;
3858 return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0),
3859 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
3865 /// Target-specific DAG combine function for NEON load/store intrinsics
3866 /// to merge base address updates.
3867 static SDValue CombineBaseUpdate(SDNode *N,
3868 TargetLowering::DAGCombinerInfo &DCI) {
3869 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
3872 SelectionDAG &DAG = DCI.DAG;
3873 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
3874 N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
3875 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
3876 SDValue Addr = N->getOperand(AddrOpIdx);
3878 // Search for a use of the address operand that is an increment.
3879 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
3880 UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
3882 if (User->getOpcode() != ISD::ADD ||
3883 UI.getUse().getResNo() != Addr.getResNo())
3886 // Check that the add is independent of the load/store. Otherwise, folding
3887 // it would create a cycle.
3888 if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
3891 // Find the new opcode for the updating load/store.
3893 bool isLaneOp = false;
3894 unsigned NewOpc = 0;
3895 unsigned NumVecs = 0;
3897 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
3899 default: llvm_unreachable("unexpected intrinsic for Neon base update");
3900 case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD;
3902 case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD;
3904 case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD;
3906 case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD;
3908 case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD;
3909 NumVecs = 1; isLoad = false; break;
3910 case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD;
3911 NumVecs = 2; isLoad = false; break;
3912 case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD;
3913 NumVecs = 3; isLoad = false; break;
3914 case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD;
3915 NumVecs = 4; isLoad = false; break;
3916 case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD;
3918 case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD;
3920 case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD;
3922 case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD;
3923 NumVecs = 2; isLoad = false; break;
3924 case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD;
3925 NumVecs = 3; isLoad = false; break;
3926 case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD;
3927 NumVecs = 4; isLoad = false; break;
3928 case Intrinsic::arm_neon_vld2lane: NewOpc = AArch64ISD::NEON_LD2LN_UPD;
3929 NumVecs = 2; isLaneOp = true; break;
3930 case Intrinsic::arm_neon_vld3lane: NewOpc = AArch64ISD::NEON_LD3LN_UPD;
3931 NumVecs = 3; isLaneOp = true; break;
3932 case Intrinsic::arm_neon_vld4lane: NewOpc = AArch64ISD::NEON_LD4LN_UPD;
3933 NumVecs = 4; isLaneOp = true; break;
3934 case Intrinsic::arm_neon_vst2lane: NewOpc = AArch64ISD::NEON_ST2LN_UPD;
3935 NumVecs = 2; isLoad = false; isLaneOp = true; break;
3936 case Intrinsic::arm_neon_vst3lane: NewOpc = AArch64ISD::NEON_ST3LN_UPD;
3937 NumVecs = 3; isLoad = false; isLaneOp = true; break;
3938 case Intrinsic::arm_neon_vst4lane: NewOpc = AArch64ISD::NEON_ST4LN_UPD;
3939 NumVecs = 4; isLoad = false; isLaneOp = true; break;
3943 switch (N->getOpcode()) {
3944 default: llvm_unreachable("unexpected opcode for Neon base update");
3945 case AArch64ISD::NEON_LD2DUP: NewOpc = AArch64ISD::NEON_LD2DUP_UPD;
3947 case AArch64ISD::NEON_LD3DUP: NewOpc = AArch64ISD::NEON_LD3DUP_UPD;
3949 case AArch64ISD::NEON_LD4DUP: NewOpc = AArch64ISD::NEON_LD4DUP_UPD;
3954 // Find the size of memory referenced by the load/store.
3957 VecTy = N->getValueType(0);
3959 VecTy = N->getOperand(AddrOpIdx + 1).getValueType();
3960 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
3962 NumBytes /= VecTy.getVectorNumElements();
3964 // If the increment is a constant, it must match the memory ref size.
3965 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
3966 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
3967 uint32_t IncVal = CInc->getZExtValue();
3968 if (IncVal != NumBytes)
3970 Inc = DAG.getTargetConstant(IncVal, MVT::i32);
3973 // Create the new updating load/store node.
3975 unsigned NumResultVecs = (isLoad ? NumVecs : 0);
3977 for (n = 0; n < NumResultVecs; ++n)
3979 Tys[n++] = MVT::i64;
3980 Tys[n] = MVT::Other;
3981 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2);
3982 SmallVector<SDValue, 8> Ops;
3983 Ops.push_back(N->getOperand(0)); // incoming chain
3984 Ops.push_back(N->getOperand(AddrOpIdx));
3986 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
3987 Ops.push_back(N->getOperand(i));
3989 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
3990 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
3991 Ops.data(), Ops.size(),
3992 MemInt->getMemoryVT(),
3993 MemInt->getMemOperand());
3996 std::vector<SDValue> NewResults;
3997 for (unsigned i = 0; i < NumResultVecs; ++i) {
3998 NewResults.push_back(SDValue(UpdN.getNode(), i));
4000 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
4001 DCI.CombineTo(N, NewResults);
4002 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
4009 /// For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1)
4010 /// intrinsic, and if all the other uses of that intrinsic are also VDUPLANEs.
4011 /// If so, combine them to a vldN-dup operation and return true.
4012 static SDValue CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
4013 SelectionDAG &DAG = DCI.DAG;
4014 EVT VT = N->getValueType(0);
4016 // Check if the VDUPLANE operand is a vldN-dup intrinsic.
4017 SDNode *VLD = N->getOperand(0).getNode();
4018 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
4020 unsigned NumVecs = 0;
4021 unsigned NewOpc = 0;
4022 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
4023 if (IntNo == Intrinsic::arm_neon_vld2lane) {
4025 NewOpc = AArch64ISD::NEON_LD2DUP;
4026 } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
4028 NewOpc = AArch64ISD::NEON_LD3DUP;
4029 } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
4031 NewOpc = AArch64ISD::NEON_LD4DUP;
4036 // First check that all the vldN-lane uses are VDUPLANEs and that the lane
4037 // numbers match the load.
4038 unsigned VLDLaneNo =
4039 cast<ConstantSDNode>(VLD->getOperand(NumVecs + 3))->getZExtValue();
4040 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
4042 // Ignore uses of the chain result.
4043 if (UI.getUse().getResNo() == NumVecs)
4046 if (User->getOpcode() != AArch64ISD::NEON_VDUPLANE ||
4047 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
4051 // Create the vldN-dup node.
4054 for (n = 0; n < NumVecs; ++n)
4056 Tys[n] = MVT::Other;
4057 SDVTList SDTys = DAG.getVTList(Tys, NumVecs + 1);
4058 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
4059 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
4060 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, 2,
4061 VLDMemInt->getMemoryVT(),
4062 VLDMemInt->getMemOperand());
4065 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
4067 unsigned ResNo = UI.getUse().getResNo();
4068 // Ignore uses of the chain result.
4069 if (ResNo == NumVecs)
4072 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
4075 // Now the vldN-lane intrinsic is dead except for its chain result.
4076 // Update uses of the chain.
4077 std::vector<SDValue> VLDDupResults;
4078 for (unsigned n = 0; n < NumVecs; ++n)
4079 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
4080 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
4081 DCI.CombineTo(VLD, VLDDupResults);
4083 return SDValue(N, 0);
4087 AArch64TargetLowering::PerformDAGCombine(SDNode *N,
4088 DAGCombinerInfo &DCI) const {
4089 switch (N->getOpcode()) {
4091 case ISD::AND: return PerformANDCombine(N, DCI);
4092 case ISD::OR: return PerformORCombine(N, DCI, getSubtarget());
4096 return PerformShiftCombine(N, DCI, getSubtarget());
4097 case ISD::INTRINSIC_WO_CHAIN:
4098 return PerformIntrinsicCombine(N, DCI.DAG);
4099 case AArch64ISD::NEON_VDUPLANE:
4100 return CombineVLDDUP(N, DCI);
4101 case AArch64ISD::NEON_LD2DUP:
4102 case AArch64ISD::NEON_LD3DUP:
4103 case AArch64ISD::NEON_LD4DUP:
4104 return CombineBaseUpdate(N, DCI);
4105 case ISD::INTRINSIC_VOID:
4106 case ISD::INTRINSIC_W_CHAIN:
4107 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
4108 case Intrinsic::arm_neon_vld1:
4109 case Intrinsic::arm_neon_vld2:
4110 case Intrinsic::arm_neon_vld3:
4111 case Intrinsic::arm_neon_vld4:
4112 case Intrinsic::arm_neon_vst1:
4113 case Intrinsic::arm_neon_vst2:
4114 case Intrinsic::arm_neon_vst3:
4115 case Intrinsic::arm_neon_vst4:
4116 case Intrinsic::arm_neon_vld2lane:
4117 case Intrinsic::arm_neon_vld3lane:
4118 case Intrinsic::arm_neon_vld4lane:
4119 case Intrinsic::aarch64_neon_vld1x2:
4120 case Intrinsic::aarch64_neon_vld1x3:
4121 case Intrinsic::aarch64_neon_vld1x4:
4122 case Intrinsic::aarch64_neon_vst1x2:
4123 case Intrinsic::aarch64_neon_vst1x3:
4124 case Intrinsic::aarch64_neon_vst1x4:
4125 case Intrinsic::arm_neon_vst2lane:
4126 case Intrinsic::arm_neon_vst3lane:
4127 case Intrinsic::arm_neon_vst4lane:
4128 return CombineBaseUpdate(N, DCI);
4137 AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
4138 VT = VT.getScalarType();
4143 switch (VT.getSimpleVT().SimpleTy) {
4157 // Check whether a shuffle_vecotor could be presented as conact_vector.
4158 bool AArch64TargetLowering::isConactVector(SDValue Op,SelectionDAG &DAG,
4159 SDValue V0, SDValue V1,
4161 SDValue &Res) const {
4163 EVT VT = Op.getValueType();
4164 unsigned NumElts = VT.getVectorNumElements();
4165 unsigned V0NumElts = V0.getValueType().getVectorNumElements();
4166 bool isContactVector = true;
4167 bool splitV0 = false;
4169 for (int I = 0, E = NumElts; I != E; I++){
4170 if (Mask[I] != I + offset) {
4171 if(I && !splitV0 && Mask[I] == I + (int)V0NumElts / 2) {
4173 offset = V0NumElts / 2;
4175 isContactVector = false;
4180 if (isContactVector) {
4181 EVT CastVT = EVT::getVectorVT(*DAG.getContext(),
4182 VT.getVectorElementType(), NumElts / 2);
4183 if(CastVT.getSizeInBits() < 64)
4187 assert(V0NumElts >= NumElts / 2 &&
4188 "invalid operand for extract_subvector!");
4189 V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0,
4190 DAG.getConstant(0, MVT::i64));
4192 if (NumElts != V1.getValueType().getVectorNumElements() * 2) {
4193 assert(V1.getValueType().getVectorNumElements() >= NumElts / 2 &&
4194 "invalid operand for extract_subvector!");
4195 V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1,
4196 DAG.getConstant(0, MVT::i64));
4198 Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1);
4204 // Check whether a Build Vector could be presented as Shuffle Vector.
4205 // This Shuffle Vector maybe not legalized, so the length of its operand and
4206 // the length of result may not equal.
4207 bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG,
4208 SDValue &V0, SDValue &V1,
4211 EVT VT = Op.getValueType();
4212 unsigned NumElts = VT.getVectorNumElements();
4213 unsigned V0NumElts = 0;
4215 // Check if all elements are extracted from less than 3 vectors.
4216 for (unsigned i = 0; i < NumElts; ++i) {
4217 SDValue Elt = Op.getOperand(i);
4218 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4219 Elt.getOperand(0).getValueType().getVectorElementType() !=
4220 VT.getVectorElementType())
4223 if (V0.getNode() == 0) {
4224 V0 = Elt.getOperand(0);
4225 V0NumElts = V0.getValueType().getVectorNumElements();
4227 if (Elt.getOperand(0) == V0) {
4228 Mask[i] = (cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue());
4230 } else if (V1.getNode() == 0) {
4231 V1 = Elt.getOperand(0);
4233 if (Elt.getOperand(0) == V1) {
4234 unsigned Lane = cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue();
4235 Mask[i] = (Lane + V0NumElts);
4244 // If this is a case we can't handle, return null and let the default
4245 // expansion code take care of it.
4247 AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
4248 const AArch64Subtarget *ST) const {
4250 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
4252 EVT VT = Op.getValueType();
4254 APInt SplatBits, SplatUndef;
4255 unsigned SplatBitSize;
4258 unsigned UseNeonMov = VT.getSizeInBits() >= 64;
4260 // Note we favor lowering MOVI over MVNI.
4261 // This has implications on the definition of patterns in TableGen to select
4262 // BIC immediate instructions but not ORR immediate instructions.
4263 // If this lowering order is changed, TableGen patterns for BIC immediate and
4264 // ORR immediate instructions have to be updated.
4266 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
4267 if (SplatBitSize <= 64) {
4268 // First attempt to use vector immediate-form MOVI
4271 unsigned OpCmode = 0;
4273 if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
4274 SplatBitSize, DAG, VT.is128BitVector(),
4275 Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) {
4276 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
4277 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
4279 if (ImmVal.getNode() && OpCmodeVal.getNode()) {
4280 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT,
4281 ImmVal, OpCmodeVal);
4282 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
4286 // Then attempt to use vector immediate-form MVNI
4287 uint64_t NegatedImm = (~SplatBits).getZExtValue();
4288 if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize,
4289 DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT,
4291 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
4292 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
4293 if (ImmVal.getNode() && OpCmodeVal.getNode()) {
4294 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT,
4295 ImmVal, OpCmodeVal);
4296 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
4300 // Attempt to use vector immediate-form FMOV
4301 if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) ||
4302 (VT == MVT::v2f64 && SplatBitSize == 64)) {
4304 SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble,
4307 if (A64Imms::isFPImm(RealVal, ImmVal)) {
4308 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
4309 return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val);
4315 unsigned NumElts = VT.getVectorNumElements();
4316 bool isOnlyLowElement = true;
4317 bool usesOnlyOneValue = true;
4318 bool hasDominantValue = false;
4319 bool isConstant = true;
4321 // Map of the number of times a particular SDValue appears in the
4323 DenseMap<SDValue, unsigned> ValueCounts;
4325 for (unsigned i = 0; i < NumElts; ++i) {
4326 SDValue V = Op.getOperand(i);
4327 if (V.getOpcode() == ISD::UNDEF)
4330 isOnlyLowElement = false;
4331 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
4334 ValueCounts.insert(std::make_pair(V, 0));
4335 unsigned &Count = ValueCounts[V];
4337 // Is this value dominant? (takes up more than half of the lanes)
4338 if (++Count > (NumElts / 2)) {
4339 hasDominantValue = true;
4343 if (ValueCounts.size() != 1)
4344 usesOnlyOneValue = false;
4345 if (!Value.getNode() && ValueCounts.size() > 0)
4346 Value = ValueCounts.begin()->first;
4348 if (ValueCounts.size() == 0)
4349 return DAG.getUNDEF(VT);
4351 if (isOnlyLowElement)
4352 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
4354 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4355 if (hasDominantValue && EltSize <= 64) {
4356 // Use VDUP for non-constant splats.
4360 // If we are DUPing a value that comes directly from a vector, we could
4361 // just use DUPLANE. We can only do this if the lane being extracted
4362 // is at a constant index, as the DUP from lane instructions only have
4363 // constant-index forms.
4365 // If there is a TRUNCATE between EXTRACT_VECTOR_ELT and DUP, we can
4366 // remove TRUNCATE for DUPLANE by apdating the source vector to
4367 // appropriate vector type and lane index.
4369 // FIXME: for now we have v1i8, v1i16, v1i32 legal vector types, if they
4370 // are not legal any more, no need to check the type size in bits should
4371 // be large than 64.
4373 if (Value->getOpcode() == ISD::TRUNCATE)
4374 V = Value->getOperand(0);
4375 if (V->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4376 isa<ConstantSDNode>(V->getOperand(1)) &&
4377 V->getOperand(0).getValueType().getSizeInBits() >= 64) {
4379 // If the element size of source vector is larger than DUPLANE
4380 // element size, we can do transformation by,
4381 // 1) bitcasting source register to smaller element vector
4382 // 2) mutiplying the lane index by SrcEltSize/ResEltSize
4383 // For example, we can lower
4384 // "v8i16 vdup_lane(v4i32, 1)"
4386 // "v8i16 vdup_lane(v8i16 bitcast(v4i32), 2)".
4387 SDValue SrcVec = V->getOperand(0);
4388 unsigned SrcEltSize =
4389 SrcVec.getValueType().getVectorElementType().getSizeInBits();
4390 unsigned ResEltSize = VT.getVectorElementType().getSizeInBits();
4391 if (SrcEltSize > ResEltSize) {
4392 assert((SrcEltSize % ResEltSize == 0) && "Invalid element size");
4394 unsigned SrcSize = SrcVec.getValueType().getSizeInBits();
4395 unsigned ResSize = VT.getSizeInBits();
4397 if (SrcSize > ResSize) {
4398 assert((SrcSize % ResSize == 0) && "Invalid vector size");
4400 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(),
4401 SrcSize / ResEltSize);
4402 BitCast = DAG.getNode(ISD::BITCAST, DL, CastVT, SrcVec);
4404 assert((SrcSize == ResSize) && "Invalid vector size of source vec");
4405 BitCast = DAG.getNode(ISD::BITCAST, DL, VT, SrcVec);
4408 unsigned LaneIdx = V->getConstantOperandVal(1);
4410 DAG.getConstant((SrcEltSize / ResEltSize) * LaneIdx, MVT::i64);
4411 N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, BitCast, Lane);
4413 assert((SrcEltSize == ResEltSize) &&
4414 "Invalid element size of source vec");
4415 N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT, V->getOperand(0),
4419 N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
4421 if (!usesOnlyOneValue) {
4422 // The dominant value was splatted as 'N', but we now have to insert
4423 // all differing elements.
4424 for (unsigned I = 0; I < NumElts; ++I) {
4425 if (Op.getOperand(I) == Value)
4427 SmallVector<SDValue, 3> Ops;
4429 Ops.push_back(Op.getOperand(I));
4430 Ops.push_back(DAG.getConstant(I, MVT::i64));
4431 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3);
4436 if (usesOnlyOneValue && isConstant) {
4437 return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
4440 // If all elements are constants and the case above didn't get hit, fall back
4441 // to the default expansion, which will generate a load from the constant
4446 // Try to lower this in lowering ShuffleVector way.
4449 if (isKnownShuffleVector(Op, DAG, V0, V1, Mask)) {
4450 unsigned V0NumElts = V0.getValueType().getVectorNumElements();
4451 if (!V1.getNode() && V0NumElts == NumElts * 2) {
4452 V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
4453 DAG.getConstant(NumElts, MVT::i64));
4454 V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
4455 DAG.getConstant(0, MVT::i64));
4456 V0NumElts = V0.getValueType().getVectorNumElements();
4459 if (V1.getNode() && NumElts == V0NumElts &&
4460 V0NumElts == V1.getValueType().getVectorNumElements()) {
4461 SDValue Shuffle = DAG.getVectorShuffle(VT, DL, V0, V1, Mask);
4462 if(Shuffle.getOpcode() != ISD::VECTOR_SHUFFLE)
4465 return LowerVECTOR_SHUFFLE(Shuffle, DAG);
4468 if(isConactVector(Op, DAG, V0, V1, Mask, Res))
4473 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
4474 // know the default expansion would otherwise fall back on something even
4475 // worse. For a vector with one or two non-undef values, that's
4476 // scalar_to_vector for the elements followed by a shuffle (provided the
4477 // shuffle is valid for the target) and materialization element by element
4478 // on the stack followed by a load for everything else.
4479 if (!isConstant && !usesOnlyOneValue) {
4480 SDValue Vec = DAG.getUNDEF(VT);
4481 for (unsigned i = 0 ; i < NumElts; ++i) {
4482 SDValue V = Op.getOperand(i);
4483 if (V.getOpcode() == ISD::UNDEF)
4485 SDValue LaneIdx = DAG.getConstant(i, MVT::i64);
4486 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx);
4493 /// isREVMask - Check if a vector shuffle corresponds to a REV
4494 /// instruction with the specified blocksize. (The order of the elements
4495 /// within each block of the vector is reversed.)
4496 static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
4497 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
4498 "Only possible block sizes for REV are: 16, 32, 64");
4500 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
4504 unsigned NumElts = VT.getVectorNumElements();
4505 unsigned BlockElts = M[0] + 1;
4506 // If the first shuffle index is UNDEF, be optimistic.
4508 BlockElts = BlockSize / EltSz;
4510 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
4513 for (unsigned i = 0; i < NumElts; ++i) {
4515 continue; // ignore UNDEF indices
4516 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
4523 // isPermuteMask - Check whether the vector shuffle matches to UZP, ZIP and
4525 static unsigned isPermuteMask(ArrayRef<int> M, EVT VT, bool isV2undef) {
4526 unsigned NumElts = VT.getVectorNumElements();
4530 bool ismatch = true;
4533 for (unsigned i = 0; i < NumElts; ++i) {
4534 unsigned answer = i * 2;
4535 if (isV2undef && answer >= NumElts)
4537 if (M[i] != -1 && (unsigned)M[i] != answer) {
4543 return AArch64ISD::NEON_UZP1;
4547 for (unsigned i = 0; i < NumElts; ++i) {
4548 unsigned answer = i * 2 + 1;
4549 if (isV2undef && answer >= NumElts)
4551 if (M[i] != -1 && (unsigned)M[i] != answer) {
4557 return AArch64ISD::NEON_UZP2;
4561 for (unsigned i = 0; i < NumElts; ++i) {
4562 unsigned answer = i / 2 + NumElts * (i % 2);
4563 if (isV2undef && answer >= NumElts)
4565 if (M[i] != -1 && (unsigned)M[i] != answer) {
4571 return AArch64ISD::NEON_ZIP1;
4575 for (unsigned i = 0; i < NumElts; ++i) {
4576 unsigned answer = (NumElts + i) / 2 + NumElts * (i % 2);
4577 if (isV2undef && answer >= NumElts)
4579 if (M[i] != -1 && (unsigned)M[i] != answer) {
4585 return AArch64ISD::NEON_ZIP2;
4589 for (unsigned i = 0; i < NumElts; ++i) {
4590 unsigned answer = i + (NumElts - 1) * (i % 2);
4591 if (isV2undef && answer >= NumElts)
4593 if (M[i] != -1 && (unsigned)M[i] != answer) {
4599 return AArch64ISD::NEON_TRN1;
4603 for (unsigned i = 0; i < NumElts; ++i) {
4604 unsigned answer = 1 + i + (NumElts - 1) * (i % 2);
4605 if (isV2undef && answer >= NumElts)
4607 if (M[i] != -1 && (unsigned)M[i] != answer) {
4613 return AArch64ISD::NEON_TRN2;
4619 AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
4620 SelectionDAG &DAG) const {
4621 SDValue V1 = Op.getOperand(0);
4622 SDValue V2 = Op.getOperand(1);
4624 EVT VT = Op.getValueType();
4625 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
4627 // Convert shuffles that are directly supported on NEON to target-specific
4628 // DAG nodes, instead of keeping them as shuffles and matching them again
4629 // during code selection. This is more efficient and avoids the possibility
4630 // of inconsistencies between legalization and selection.
4631 ArrayRef<int> ShuffleMask = SVN->getMask();
4633 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4637 if (isREVMask(ShuffleMask, VT, 64))
4638 return DAG.getNode(AArch64ISD::NEON_REV64, dl, VT, V1);
4639 if (isREVMask(ShuffleMask, VT, 32))
4640 return DAG.getNode(AArch64ISD::NEON_REV32, dl, VT, V1);
4641 if (isREVMask(ShuffleMask, VT, 16))
4642 return DAG.getNode(AArch64ISD::NEON_REV16, dl, VT, V1);
4645 if (V2.getOpcode() == ISD::UNDEF)
4646 ISDNo = isPermuteMask(ShuffleMask, VT, true);
4648 ISDNo = isPermuteMask(ShuffleMask, VT, false);
4651 if (V2.getOpcode() == ISD::UNDEF)
4652 return DAG.getNode(ISDNo, dl, VT, V1, V1);
4654 return DAG.getNode(ISDNo, dl, VT, V1, V2);
4658 if (isConactVector(Op, DAG, V1, V2, &ShuffleMask[0], Res))
4661 // If the element of shuffle mask are all the same constant, we can
4662 // transform it into either NEON_VDUP or NEON_VDUPLANE
4663 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
4664 int Lane = SVN->getSplatIndex();
4665 // If this is undef splat, generate it via "just" vdup, if possible.
4666 if (Lane == -1) Lane = 0;
4668 // Test if V1 is a SCALAR_TO_VECTOR.
4669 if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
4670 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0));
4672 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR.
4673 if (V1.getOpcode() == ISD::BUILD_VECTOR) {
4674 bool IsScalarToVector = true;
4675 for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i)
4676 if (V1.getOperand(i).getOpcode() != ISD::UNDEF &&
4677 i != (unsigned)Lane) {
4678 IsScalarToVector = false;
4681 if (IsScalarToVector)
4682 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT,
4683 V1.getOperand(Lane));
4686 // Test if V1 is a EXTRACT_SUBVECTOR.
4687 if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
4688 int ExtLane = cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue();
4689 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1.getOperand(0),
4690 DAG.getConstant(Lane + ExtLane, MVT::i64));
4692 // Test if V1 is a CONCAT_VECTORS.
4693 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
4694 V1.getOperand(1).getOpcode() == ISD::UNDEF) {
4695 SDValue Op0 = V1.getOperand(0);
4696 assert((unsigned)Lane < Op0.getValueType().getVectorNumElements() &&
4697 "Invalid vector lane access");
4698 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, Op0,
4699 DAG.getConstant(Lane, MVT::i64));
4702 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1,
4703 DAG.getConstant(Lane, MVT::i64));
4706 int Length = ShuffleMask.size();
4707 int V1EltNum = V1.getValueType().getVectorNumElements();
4709 // If the number of v1 elements is the same as the number of shuffle mask
4710 // element and the shuffle masks are sequential values, we can transform
4711 // it into NEON_VEXTRACT.
4712 if (V1EltNum == Length) {
4713 // Check if the shuffle mask is sequential.
4715 while (ShuffleMask[SkipUndef] == -1) {
4718 int CurMask = ShuffleMask[SkipUndef];
4719 if (CurMask >= SkipUndef) {
4720 bool IsSequential = true;
4721 for (int I = SkipUndef; I < Length; ++I) {
4722 if (ShuffleMask[I] != -1 && ShuffleMask[I] != CurMask) {
4723 IsSequential = false;
4729 assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect");
4730 unsigned VecSize = EltSize * V1EltNum;
4731 unsigned Index = (EltSize / 8) * (ShuffleMask[SkipUndef] - SkipUndef);
4732 if (VecSize == 64 || VecSize == 128)
4733 return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2,
4734 DAG.getConstant(Index, MVT::i64));
4739 // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert
4740 // by element from V2 to V1 .
4741 // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a
4742 // better choice to be inserted than V1 as less insert needed, so we count
4743 // element to be inserted for both V1 and V2, and select less one as insert
4746 // Collect elements need to be inserted and their index.
4747 SmallVector<int, 8> NV1Elt;
4748 SmallVector<int, 8> N1Index;
4749 SmallVector<int, 8> NV2Elt;
4750 SmallVector<int, 8> N2Index;
4751 for (int I = 0; I != Length; ++I) {
4752 if (ShuffleMask[I] != I) {
4753 NV1Elt.push_back(ShuffleMask[I]);
4754 N1Index.push_back(I);
4757 for (int I = 0; I != Length; ++I) {
4758 if (ShuffleMask[I] != (I + V1EltNum)) {
4759 NV2Elt.push_back(ShuffleMask[I]);
4760 N2Index.push_back(I);
4764 // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2
4765 // will be inserted.
4767 SmallVector<int, 8> InsMasks = NV1Elt;
4768 SmallVector<int, 8> InsIndex = N1Index;
4769 if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) {
4770 if (NV1Elt.size() > NV2Elt.size()) {
4776 InsV = DAG.getNode(ISD::UNDEF, dl, VT);
4779 for (int I = 0, E = InsMasks.size(); I != E; ++I) {
4781 int Mask = InsMasks[I];
4782 if (Mask >= V1EltNum) {
4786 // Any value type smaller than i32 is illegal in AArch64, and this lower
4787 // function is called after legalize pass, so we need to legalize
4790 if (VT.getVectorElementType().isFloatingPoint())
4791 EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32;
4793 EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32;
4796 ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV,
4797 DAG.getConstant(Mask, MVT::i64));
4798 InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV,
4799 DAG.getConstant(InsIndex[I], MVT::i64));
4805 AArch64TargetLowering::ConstraintType
4806 AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
4807 if (Constraint.size() == 1) {
4808 switch (Constraint[0]) {
4810 case 'w': // An FP/SIMD vector register
4811 return C_RegisterClass;
4812 case 'I': // Constant that can be used with an ADD instruction
4813 case 'J': // Constant that can be used with a SUB instruction
4814 case 'K': // Constant that can be used with a 32-bit logical instruction
4815 case 'L': // Constant that can be used with a 64-bit logical instruction
4816 case 'M': // Constant that can be used as a 32-bit MOV immediate
4817 case 'N': // Constant that can be used as a 64-bit MOV immediate
4818 case 'Y': // Floating point constant zero
4819 case 'Z': // Integer constant zero
4821 case 'Q': // A memory reference with base register and no offset
4823 case 'S': // A symbolic address
4828 // FIXME: Ump, Utf, Usa, Ush
4829 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes,
4830 // whatever they may be
4831 // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
4832 // Usa: An absolute symbolic address
4833 // Ush: The high part (bits 32:12) of a pc-relative symbolic address
4834 assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa"
4835 && Constraint != "Ush" && "Unimplemented constraints");
4837 return TargetLowering::getConstraintType(Constraint);
4840 TargetLowering::ConstraintWeight
4841 AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info,
4842 const char *Constraint) const {
4844 llvm_unreachable("Constraint weight unimplemented");
4848 AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4849 std::string &Constraint,
4850 std::vector<SDValue> &Ops,
4851 SelectionDAG &DAG) const {
4852 SDValue Result(0, 0);
4854 // Only length 1 constraints are C_Other.
4855 if (Constraint.size() != 1) return;
4857 // Only C_Other constraints get lowered like this. That means constants for us
4858 // so return early if there's no hope the constraint can be lowered.
4860 switch(Constraint[0]) {
4862 case 'I': case 'J': case 'K': case 'L':
4863 case 'M': case 'N': case 'Z': {
4864 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4868 uint64_t CVal = C->getZExtValue();
4871 switch (Constraint[0]) {
4873 // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J'
4874 // is a peculiarly useless SUB constraint.
4875 llvm_unreachable("Unimplemented C_Other constraint");
4881 if (A64Imms::isLogicalImm(32, CVal, Bits))
4885 if (A64Imms::isLogicalImm(64, CVal, Bits))
4894 Result = DAG.getTargetConstant(CVal, Op.getValueType());
4898 // An absolute symbolic address or label reference.
4899 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
4900 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
4901 GA->getValueType(0));
4902 } else if (const BlockAddressSDNode *BA
4903 = dyn_cast<BlockAddressSDNode>(Op)) {
4904 Result = DAG.getTargetBlockAddress(BA->getBlockAddress(),
4905 BA->getValueType(0));
4906 } else if (const ExternalSymbolSDNode *ES
4907 = dyn_cast<ExternalSymbolSDNode>(Op)) {
4908 Result = DAG.getTargetExternalSymbol(ES->getSymbol(),
4909 ES->getValueType(0));
4915 if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
4916 if (CFP->isExactlyValue(0.0)) {
4917 Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0));
4924 if (Result.getNode()) {
4925 Ops.push_back(Result);
4929 // It's an unknown constraint for us. Let generic code have a go.
4930 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
4933 std::pair<unsigned, const TargetRegisterClass*>
4934 AArch64TargetLowering::getRegForInlineAsmConstraint(
4935 const std::string &Constraint,
4937 if (Constraint.size() == 1) {
4938 switch (Constraint[0]) {
4940 if (VT.getSizeInBits() <= 32)
4941 return std::make_pair(0U, &AArch64::GPR32RegClass);
4942 else if (VT == MVT::i64)
4943 return std::make_pair(0U, &AArch64::GPR64RegClass);
4947 return std::make_pair(0U, &AArch64::FPR16RegClass);
4948 else if (VT == MVT::f32)
4949 return std::make_pair(0U, &AArch64::FPR32RegClass);
4950 else if (VT.getSizeInBits() == 64)
4951 return std::make_pair(0U, &AArch64::FPR64RegClass);
4952 else if (VT.getSizeInBits() == 128)
4953 return std::make_pair(0U, &AArch64::FPR128RegClass);
4958 // Use the default implementation in TargetLowering to convert the register
4959 // constraint into a member of a register class.
4960 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
4963 /// Represent NEON load and store intrinsics as MemIntrinsicNodes.
4964 /// The associated MachineMemOperands record the alignment specified
4965 /// in the intrinsic calls.
4966 bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
4968 unsigned Intrinsic) const {
4969 switch (Intrinsic) {
4970 case Intrinsic::arm_neon_vld1:
4971 case Intrinsic::arm_neon_vld2:
4972 case Intrinsic::arm_neon_vld3:
4973 case Intrinsic::arm_neon_vld4:
4974 case Intrinsic::aarch64_neon_vld1x2:
4975 case Intrinsic::aarch64_neon_vld1x3:
4976 case Intrinsic::aarch64_neon_vld1x4:
4977 case Intrinsic::arm_neon_vld2lane:
4978 case Intrinsic::arm_neon_vld3lane:
4979 case Intrinsic::arm_neon_vld4lane: {
4980 Info.opc = ISD::INTRINSIC_W_CHAIN;
4981 // Conservatively set memVT to the entire set of vectors loaded.
4982 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
4983 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
4984 Info.ptrVal = I.getArgOperand(0);
4986 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
4987 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
4988 Info.vol = false; // volatile loads with NEON intrinsics not supported
4989 Info.readMem = true;
4990 Info.writeMem = false;
4993 case Intrinsic::arm_neon_vst1:
4994 case Intrinsic::arm_neon_vst2:
4995 case Intrinsic::arm_neon_vst3:
4996 case Intrinsic::arm_neon_vst4:
4997 case Intrinsic::aarch64_neon_vst1x2:
4998 case Intrinsic::aarch64_neon_vst1x3:
4999 case Intrinsic::aarch64_neon_vst1x4:
5000 case Intrinsic::arm_neon_vst2lane:
5001 case Intrinsic::arm_neon_vst3lane:
5002 case Intrinsic::arm_neon_vst4lane: {
5003 Info.opc = ISD::INTRINSIC_VOID;
5004 // Conservatively set memVT to the entire set of vectors stored.
5005 unsigned NumElts = 0;
5006 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
5007 Type *ArgTy = I.getArgOperand(ArgI)->getType();
5008 if (!ArgTy->isVectorTy())
5010 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
5012 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
5013 Info.ptrVal = I.getArgOperand(0);
5015 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
5016 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
5017 Info.vol = false; // volatile stores with NEON intrinsics not supported
5018 Info.readMem = false;
5019 Info.writeMem = true;