1 //===-- AArch64ISelDAGToDAG.cpp - A dag to dag inst selector for AArch64 --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines an instruction selector for the AArch64 target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "aarch64-isel"
16 #include "AArch64InstrInfo.h"
17 #include "AArch64Subtarget.h"
18 #include "AArch64TargetMachine.h"
19 #include "Utils/AArch64BaseInfo.h"
20 #include "llvm/ADT/APSInt.h"
21 #include "llvm/CodeGen/SelectionDAGISel.h"
22 #include "llvm/IR/GlobalValue.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
28 //===--------------------------------------------------------------------===//
29 /// AArch64 specific code to select AArch64 machine instructions for
30 /// SelectionDAG operations.
34 class AArch64DAGToDAGISel : public SelectionDAGISel {
35 AArch64TargetMachine &TM;
36 const AArch64InstrInfo *TII;
38 /// Keep a pointer to the AArch64Subtarget around so that we can
39 /// make the right decision when generating code for different targets.
40 const AArch64Subtarget *Subtarget;
43 explicit AArch64DAGToDAGISel(AArch64TargetMachine &tm,
44 CodeGenOpt::Level OptLevel)
45 : SelectionDAGISel(tm, OptLevel), TM(tm),
46 TII(static_cast<const AArch64InstrInfo*>(TM.getInstrInfo())),
47 Subtarget(&TM.getSubtarget<AArch64Subtarget>()) {
50 virtual const char *getPassName() const {
51 return "AArch64 Instruction Selection";
54 // Include the pieces autogenerated from the target description.
55 #include "AArch64GenDAGISel.inc"
57 template<unsigned MemSize>
58 bool SelectOffsetUImm12(SDValue N, SDValue &UImm12) {
59 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
60 if (!CN || CN->getZExtValue() % MemSize != 0
61 || CN->getZExtValue() / MemSize > 0xfff)
64 UImm12 = CurDAG->getTargetConstant(CN->getZExtValue() / MemSize, MVT::i64);
68 template<unsigned RegWidth>
69 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos) {
70 return SelectCVTFixedPosOperand(N, FixedPos, RegWidth);
73 bool SelectFPZeroOperand(SDValue N, SDValue &Dummy);
75 bool SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
78 bool SelectInlineAsmMemoryOperand(const SDValue &Op,
80 std::vector<SDValue> &OutOps);
82 bool SelectLogicalImm(SDValue N, SDValue &Imm);
84 template<unsigned RegWidth>
85 bool SelectTSTBOperand(SDValue N, SDValue &FixedPos) {
86 return SelectTSTBOperand(N, FixedPos, RegWidth);
89 bool SelectTSTBOperand(SDValue N, SDValue &FixedPos, unsigned RegWidth);
91 SDNode *SelectAtomic(SDNode *N, unsigned Op8, unsigned Op16, unsigned Op32, unsigned Op64);
93 SDNode *TrySelectToMoveImm(SDNode *N);
94 SDNode *LowerToFPLitPool(SDNode *Node);
95 SDNode *SelectToLitPool(SDNode *N);
97 SDNode* Select(SDNode*);
103 AArch64DAGToDAGISel::SelectCVTFixedPosOperand(SDValue N, SDValue &FixedPos,
105 const ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(N);
106 if (!CN) return false;
108 // An FCVT[SU] instruction performs: convertToInt(Val * 2^fbits) where fbits
109 // is between 1 and 32 for a destination w-register, or 1 and 64 for an
112 // By this stage, we've detected (fp_to_[su]int (fmul Val, THIS_NODE)) so we
113 // want THIS_NODE to be 2^fbits. This is much easier to deal with using
117 // fbits is between 1 and 64 in the worst-case, which means the fmul
118 // could have 2^64 as an actual operand. Need 65 bits of precision.
119 APSInt IntVal(65, true);
120 CN->getValueAPF().convertToInteger(IntVal, APFloat::rmTowardZero, &IsExact);
122 // N.b. isPowerOf2 also checks for > 0.
123 if (!IsExact || !IntVal.isPowerOf2()) return false;
124 unsigned FBits = IntVal.logBase2();
126 // Checks above should have guaranteed that we haven't lost information in
127 // finding FBits, but it must still be in range.
128 if (FBits == 0 || FBits > RegWidth) return false;
130 FixedPos = CurDAG->getTargetConstant(64 - FBits, MVT::i32);
135 AArch64DAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
137 std::vector<SDValue> &OutOps) {
138 switch (ConstraintCode) {
139 default: llvm_unreachable("Unrecognised AArch64 memory constraint");
141 // FIXME: more freedom is actually permitted for 'm'. We can go
142 // hunting for a base and an offset if we want. Of course, since
143 // we don't really know how the operand is going to be used we're
144 // probably restricted to the load/store pair's simm7 as an offset
147 OutOps.push_back(Op);
154 AArch64DAGToDAGISel::SelectFPZeroOperand(SDValue N, SDValue &Dummy) {
155 ConstantFPSDNode *Imm = dyn_cast<ConstantFPSDNode>(N);
156 if (!Imm || !Imm->getValueAPF().isPosZero())
159 // Doesn't actually carry any information, but keeps TableGen quiet.
160 Dummy = CurDAG->getTargetConstant(0, MVT::i32);
164 bool AArch64DAGToDAGISel::SelectLogicalImm(SDValue N, SDValue &Imm) {
166 uint32_t RegWidth = N.getValueType().getSizeInBits();
168 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
169 if (!CN) return false;
171 if (!A64Imms::isLogicalImm(RegWidth, CN->getZExtValue(), Bits))
174 Imm = CurDAG->getTargetConstant(Bits, MVT::i32);
178 SDNode *AArch64DAGToDAGISel::TrySelectToMoveImm(SDNode *Node) {
180 DebugLoc dl = Node->getDebugLoc();
181 EVT DestType = Node->getValueType(0);
182 unsigned DestWidth = DestType.getSizeInBits();
187 uint32_t LogicalBits;
189 uint64_t BitPat = cast<ConstantSDNode>(Node)->getZExtValue();
190 if (A64Imms::isMOVZImm(DestWidth, BitPat, UImm16, Shift)) {
192 MOVOpcode = DestWidth == 64 ? AArch64::MOVZxii : AArch64::MOVZwii;
193 } else if (A64Imms::isMOVNImm(DestWidth, BitPat, UImm16, Shift)) {
195 MOVOpcode = DestWidth == 64 ? AArch64::MOVNxii : AArch64::MOVNwii;
196 } else if (DestWidth == 64 && A64Imms::isMOVNImm(32, BitPat, UImm16, Shift)) {
197 // To get something like 0x0000_0000_ffff_1234 into a 64-bit register we can
198 // use a 32-bit instruction: "movn w0, 0xedbc".
200 MOVOpcode = AArch64::MOVNwii;
201 } else if (A64Imms::isLogicalImm(DestWidth, BitPat, LogicalBits)) {
202 MOVOpcode = DestWidth == 64 ? AArch64::ORRxxi : AArch64::ORRwwi;
203 uint16_t ZR = DestWidth == 64 ? AArch64::XZR : AArch64::WZR;
205 return CurDAG->getMachineNode(MOVOpcode, dl, DestType,
206 CurDAG->getRegister(ZR, DestType),
207 CurDAG->getTargetConstant(LogicalBits, MVT::i32));
209 // Can't handle it in one instruction. There's scope for permitting two (or
210 // more) instructions, but that'll need more thought.
214 ResNode = CurDAG->getMachineNode(MOVOpcode, dl, MOVType,
215 CurDAG->getTargetConstant(UImm16, MVT::i32),
216 CurDAG->getTargetConstant(Shift, MVT::i32));
218 if (MOVType != DestType) {
219 ResNode = CurDAG->getMachineNode(TargetOpcode::SUBREG_TO_REG, dl,
220 MVT::i64, MVT::i32, MVT::Other,
221 CurDAG->getTargetConstant(0, MVT::i64),
223 CurDAG->getTargetConstant(AArch64::sub_32, MVT::i32));
229 SDNode *AArch64DAGToDAGISel::SelectToLitPool(SDNode *Node) {
230 DebugLoc DL = Node->getDebugLoc();
231 uint64_t UnsignedVal = cast<ConstantSDNode>(Node)->getZExtValue();
232 int64_t SignedVal = cast<ConstantSDNode>(Node)->getSExtValue();
233 EVT DestType = Node->getValueType(0);
234 EVT PtrVT = TLI.getPointerTy();
236 // Since we may end up loading a 64-bit constant from a 32-bit entry the
237 // constant in the pool may have a different type to the eventual node.
238 ISD::LoadExtType Extension;
241 assert((DestType == MVT::i64 || DestType == MVT::i32)
242 && "Only expect integer constants at the moment");
244 if (DestType == MVT::i32) {
245 Extension = ISD::NON_EXTLOAD;
247 } else if (UnsignedVal <= UINT32_MAX) {
248 Extension = ISD::ZEXTLOAD;
250 } else if (SignedVal >= INT32_MIN && SignedVal <= INT32_MAX) {
251 Extension = ISD::SEXTLOAD;
254 Extension = ISD::NON_EXTLOAD;
258 Constant *CV = ConstantInt::get(Type::getIntNTy(*CurDAG->getContext(),
259 MemType.getSizeInBits()),
262 unsigned Alignment = TLI.getDataLayout()->getABITypeAlignment(CV->getType());
263 PoolAddr = CurDAG->getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
264 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0,
265 AArch64II::MO_NO_FLAG),
266 CurDAG->getTargetConstantPool(CV, PtrVT, 0, 0,
268 CurDAG->getConstant(Alignment, MVT::i32));
270 return CurDAG->getExtLoad(Extension, DL, DestType, CurDAG->getEntryNode(),
272 MachinePointerInfo::getConstantPool(), MemType,
273 /* isVolatile = */ false,
274 /* isNonTemporal = */ false,
275 Alignment).getNode();
278 SDNode *AArch64DAGToDAGISel::LowerToFPLitPool(SDNode *Node) {
279 DebugLoc DL = Node->getDebugLoc();
280 const ConstantFP *FV = cast<ConstantFPSDNode>(Node)->getConstantFPValue();
281 EVT PtrVT = TLI.getPointerTy();
282 EVT DestType = Node->getValueType(0);
284 unsigned Alignment = TLI.getDataLayout()->getABITypeAlignment(FV->getType());
287 assert(TM.getCodeModel() == CodeModel::Small &&
288 "Only small code model supported");
289 PoolAddr = CurDAG->getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
290 CurDAG->getTargetConstantPool(FV, PtrVT, 0, 0,
291 AArch64II::MO_NO_FLAG),
292 CurDAG->getTargetConstantPool(FV, PtrVT, 0, 0,
294 CurDAG->getConstant(Alignment, MVT::i32));
296 return CurDAG->getLoad(DestType, DL, CurDAG->getEntryNode(), PoolAddr,
297 MachinePointerInfo::getConstantPool(),
298 /* isVolatile = */ false,
299 /* isNonTemporal = */ false,
300 /* isInvariant = */ true,
301 Alignment).getNode();
305 AArch64DAGToDAGISel::SelectTSTBOperand(SDValue N, SDValue &FixedPos,
307 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
308 if (!CN) return false;
310 uint64_t Val = CN->getZExtValue();
312 if (!isPowerOf2_64(Val)) return false;
314 unsigned TestedBit = Log2_64(Val);
315 // Checks above should have guaranteed that we haven't lost information in
316 // finding TestedBit, but it must still be in range.
317 if (TestedBit >= RegWidth) return false;
319 FixedPos = CurDAG->getTargetConstant(TestedBit, MVT::i64);
323 SDNode *AArch64DAGToDAGISel::SelectAtomic(SDNode *Node, unsigned Op8,
324 unsigned Op16,unsigned Op32,
326 // Mostly direct translation to the given operations, except that we preserve
327 // the AtomicOrdering for use later on.
328 AtomicSDNode *AN = cast<AtomicSDNode>(Node);
329 EVT VT = AN->getMemoryVT();
334 else if (VT == MVT::i16)
336 else if (VT == MVT::i32)
338 else if (VT == MVT::i64)
341 llvm_unreachable("Unexpected atomic operation");
343 SmallVector<SDValue, 4> Ops;
344 for (unsigned i = 1; i < AN->getNumOperands(); ++i)
345 Ops.push_back(AN->getOperand(i));
347 Ops.push_back(CurDAG->getTargetConstant(AN->getOrdering(), MVT::i32));
348 Ops.push_back(AN->getOperand(0)); // Chain moves to the end
350 return CurDAG->SelectNodeTo(Node, Op,
351 AN->getValueType(0), MVT::Other,
352 &Ops[0], Ops.size());
355 SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) {
356 // Dump information about the Node being selected
357 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n");
359 if (Node->isMachineOpcode()) {
360 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n");
364 switch (Node->getOpcode()) {
365 case ISD::ATOMIC_LOAD_ADD:
366 return SelectAtomic(Node,
367 AArch64::ATOMIC_LOAD_ADD_I8,
368 AArch64::ATOMIC_LOAD_ADD_I16,
369 AArch64::ATOMIC_LOAD_ADD_I32,
370 AArch64::ATOMIC_LOAD_ADD_I64);
371 case ISD::ATOMIC_LOAD_SUB:
372 return SelectAtomic(Node,
373 AArch64::ATOMIC_LOAD_SUB_I8,
374 AArch64::ATOMIC_LOAD_SUB_I16,
375 AArch64::ATOMIC_LOAD_SUB_I32,
376 AArch64::ATOMIC_LOAD_SUB_I64);
377 case ISD::ATOMIC_LOAD_AND:
378 return SelectAtomic(Node,
379 AArch64::ATOMIC_LOAD_AND_I8,
380 AArch64::ATOMIC_LOAD_AND_I16,
381 AArch64::ATOMIC_LOAD_AND_I32,
382 AArch64::ATOMIC_LOAD_AND_I64);
383 case ISD::ATOMIC_LOAD_OR:
384 return SelectAtomic(Node,
385 AArch64::ATOMIC_LOAD_OR_I8,
386 AArch64::ATOMIC_LOAD_OR_I16,
387 AArch64::ATOMIC_LOAD_OR_I32,
388 AArch64::ATOMIC_LOAD_OR_I64);
389 case ISD::ATOMIC_LOAD_XOR:
390 return SelectAtomic(Node,
391 AArch64::ATOMIC_LOAD_XOR_I8,
392 AArch64::ATOMIC_LOAD_XOR_I16,
393 AArch64::ATOMIC_LOAD_XOR_I32,
394 AArch64::ATOMIC_LOAD_XOR_I64);
395 case ISD::ATOMIC_LOAD_NAND:
396 return SelectAtomic(Node,
397 AArch64::ATOMIC_LOAD_NAND_I8,
398 AArch64::ATOMIC_LOAD_NAND_I16,
399 AArch64::ATOMIC_LOAD_NAND_I32,
400 AArch64::ATOMIC_LOAD_NAND_I64);
401 case ISD::ATOMIC_LOAD_MIN:
402 return SelectAtomic(Node,
403 AArch64::ATOMIC_LOAD_MIN_I8,
404 AArch64::ATOMIC_LOAD_MIN_I16,
405 AArch64::ATOMIC_LOAD_MIN_I32,
406 AArch64::ATOMIC_LOAD_MIN_I64);
407 case ISD::ATOMIC_LOAD_MAX:
408 return SelectAtomic(Node,
409 AArch64::ATOMIC_LOAD_MAX_I8,
410 AArch64::ATOMIC_LOAD_MAX_I16,
411 AArch64::ATOMIC_LOAD_MAX_I32,
412 AArch64::ATOMIC_LOAD_MAX_I64);
413 case ISD::ATOMIC_LOAD_UMIN:
414 return SelectAtomic(Node,
415 AArch64::ATOMIC_LOAD_UMIN_I8,
416 AArch64::ATOMIC_LOAD_UMIN_I16,
417 AArch64::ATOMIC_LOAD_UMIN_I32,
418 AArch64::ATOMIC_LOAD_UMIN_I64);
419 case ISD::ATOMIC_LOAD_UMAX:
420 return SelectAtomic(Node,
421 AArch64::ATOMIC_LOAD_UMAX_I8,
422 AArch64::ATOMIC_LOAD_UMAX_I16,
423 AArch64::ATOMIC_LOAD_UMAX_I32,
424 AArch64::ATOMIC_LOAD_UMAX_I64);
425 case ISD::ATOMIC_SWAP:
426 return SelectAtomic(Node,
427 AArch64::ATOMIC_SWAP_I8,
428 AArch64::ATOMIC_SWAP_I16,
429 AArch64::ATOMIC_SWAP_I32,
430 AArch64::ATOMIC_SWAP_I64);
431 case ISD::ATOMIC_CMP_SWAP:
432 return SelectAtomic(Node,
433 AArch64::ATOMIC_CMP_SWAP_I8,
434 AArch64::ATOMIC_CMP_SWAP_I16,
435 AArch64::ATOMIC_CMP_SWAP_I32,
436 AArch64::ATOMIC_CMP_SWAP_I64);
437 case ISD::FrameIndex: {
438 int FI = cast<FrameIndexSDNode>(Node)->getIndex();
439 EVT PtrTy = TLI.getPointerTy();
440 SDValue TFI = CurDAG->getTargetFrameIndex(FI, PtrTy);
441 return CurDAG->SelectNodeTo(Node, AArch64::ADDxxi_lsl0_s, PtrTy,
442 TFI, CurDAG->getTargetConstant(0, PtrTy));
444 case ISD::ConstantPool: {
445 // Constant pools are fine, just create a Target entry.
446 ConstantPoolSDNode *CN = cast<ConstantPoolSDNode>(Node);
447 const Constant *C = CN->getConstVal();
448 SDValue CP = CurDAG->getTargetConstantPool(C, CN->getValueType(0));
450 ReplaceUses(SDValue(Node, 0), CP);
453 case ISD::Constant: {
455 if (cast<ConstantSDNode>(Node)->getZExtValue() == 0) {
456 // XZR and WZR are probably even better than an actual move: most of the
457 // time they can be folded into another instruction with *no* cost.
459 EVT Ty = Node->getValueType(0);
460 assert((Ty == MVT::i32 || Ty == MVT::i64) && "unexpected type");
461 uint16_t Register = Ty == MVT::i32 ? AArch64::WZR : AArch64::XZR;
462 ResNode = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
464 Register, Ty).getNode();
467 // Next best option is a move-immediate, see if we can do that.
469 ResNode = TrySelectToMoveImm(Node);
475 // If even that fails we fall back to a lit-pool entry at the moment. Future
476 // tuning may change this to a sequence of MOVZ/MOVN/MOVK instructions.
477 ResNode = SelectToLitPool(Node);
478 assert(ResNode && "We need *some* way to materialise a constant");
480 // We want to continue selection at this point since the litpool access
481 // generated used generic nodes for simplicity.
482 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
486 case ISD::ConstantFP: {
487 if (A64Imms::isFPImm(cast<ConstantFPSDNode>(Node)->getValueAPF())) {
488 // FMOV will take care of it from TableGen
492 SDNode *ResNode = LowerToFPLitPool(Node);
493 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
495 // We want to continue selection at this point since the litpool access
496 // generated used generic nodes for simplicity.
501 break; // Let generic code handle it
504 SDNode *ResNode = SelectCode(Node);
506 DEBUG(dbgs() << "=> ";
507 if (ResNode == NULL || ResNode == Node)
510 ResNode->dump(CurDAG);
516 /// This pass converts a legalized DAG into a AArch64-specific DAG, ready for
517 /// instruction scheduling.
518 FunctionPass *llvm::createAArch64ISelDAG(AArch64TargetMachine &TM,
519 CodeGenOpt::Level OptLevel) {
520 return new AArch64DAGToDAGISel(TM, OptLevel);