1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/IR/CallingConv.h"
21 #include "llvm/IR/Instruction.h"
22 #include "llvm/Target/TargetLowering.h"
26 namespace AArch64ISD {
28 enum NodeType : unsigned {
29 FIRST_NUMBER = ISD::BUILTIN_OP_END,
30 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
31 CALL, // Function call.
33 // Produces the full sequence of instructions for getting the thread pointer
34 // offset of a variable into X0, using the TLSDesc model.
36 ADRP, // Page address of a TargetGlobalAddress operand.
37 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
38 LOADgot, // Load from automatically generated descriptor (e.g. Global
39 // Offset Table, TLS record).
40 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
41 BRCOND, // Conditional branch instruction; "b.cond".
43 FCSEL, // Conditional move instruction.
44 CSINV, // Conditional select invert.
45 CSNEG, // Conditional select negate.
46 CSINC, // Conditional select increment.
48 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
52 SBC, // adc, sbc instructions
54 // Arithmetic instructions which write flags.
61 // Conditional compares. Operands: left,right,falsecc,cc,flags
66 // Floating point comparison
69 // Floating point max and min instructions.
76 // Scalar-to-vector duplication
83 // Vector immedate moves
92 // Vector immediate ops
96 // Vector bit select: similar to ISD::VSELECT but not all bits within an
97 // element must be identical.
100 // Vector arithmetic negation
115 // Vector shift by scalar
120 // Vector shift by scalar (again)
127 // Vector comparisons
137 // Vector zero comparisons
149 // Vector across-lanes addition
150 // Only the lower result lane is defined.
154 // Vector across-lanes min/max
155 // Only the lower result lane is defined.
161 // Vector bitwise negation
164 // Vector bitwise selection
167 // Compare-and-branch
176 // Custom prefetch handling
179 // {s|u}int to FP within a FP register.
183 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
184 /// world w.r.t vectors; which causes additional REV instructions to be
185 /// generated to compensate for the byte-swapping. But sometimes we do
186 /// need to re-interpret the data in SIMD vector registers in big-endian
187 /// mode without emitting such REV instructions.
193 // NEON Load/Store with post-increment base updates
194 LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
219 } // end namespace AArch64ISD
221 class AArch64Subtarget;
222 class AArch64TargetMachine;
224 class AArch64TargetLowering : public TargetLowering {
225 bool RequireStrictAlign;
228 explicit AArch64TargetLowering(const TargetMachine &TM,
229 const AArch64Subtarget &STI);
231 /// Selects the correct CCAssignFn for a given CallingConvention value.
232 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
234 /// computeKnownBitsForTargetNode - Determine which of the bits specified in
235 /// Mask are known to be either zero or one and return them in the
236 /// KnownZero/KnownOne bitsets.
237 void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
238 APInt &KnownOne, const SelectionDAG &DAG,
239 unsigned Depth = 0) const override;
241 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
243 /// allowsMisalignedMemoryAccesses - Returns true if the target allows
244 /// unaligned memory accesses of the specified type.
245 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
247 bool *Fast = nullptr) const override {
248 if (RequireStrictAlign)
250 // FIXME: True for Cyclone, but not necessary others.
256 /// LowerOperation - Provide custom lowering hooks for some operations.
257 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
259 const char *getTargetNodeName(unsigned Opcode) const override;
261 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
263 /// getFunctionAlignment - Return the Log2 alignment of this function.
264 unsigned getFunctionAlignment(const Function *F) const;
266 /// Returns true if a cast between SrcAS and DestAS is a noop.
267 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
268 // Addrspacecasts are always noops.
272 /// createFastISel - This method returns a target specific FastISel object,
273 /// or null if the target does not support "fast" ISel.
274 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
275 const TargetLibraryInfo *libInfo) const override;
277 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
279 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
281 /// isShuffleMaskLegal - Return true if the given shuffle mask can be
282 /// codegen'd directly, or if it should be stack expanded.
283 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
285 /// getSetCCResultType - Return the ISD::SETCC ValueType
286 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
287 EVT VT) const override;
289 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
291 MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
292 MachineBasicBlock *BB) const;
295 EmitInstrWithCustomInserter(MachineInstr *MI,
296 MachineBasicBlock *MBB) const override;
298 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
299 unsigned Intrinsic) const override;
301 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
302 bool isTruncateFree(EVT VT1, EVT VT2) const override;
304 bool isProfitableToHoist(Instruction *I) const override;
306 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
307 bool isZExtFree(EVT VT1, EVT VT2) const override;
308 bool isZExtFree(SDValue Val, EVT VT2) const override;
310 bool hasPairedLoad(Type *LoadedType,
311 unsigned &RequiredAligment) const override;
312 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
314 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
316 bool lowerInterleavedLoad(LoadInst *LI,
317 ArrayRef<ShuffleVectorInst *> Shuffles,
318 ArrayRef<unsigned> Indices,
319 unsigned Factor) const override;
320 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
321 unsigned Factor) const override;
323 bool isLegalAddImmediate(int64_t) const override;
324 bool isLegalICmpImmediate(int64_t) const override;
326 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
327 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
328 MachineFunction &MF) const override;
330 /// isLegalAddressingMode - Return true if the addressing mode represented
331 /// by AM is legal for this target, for a load/store of the specified type.
332 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
333 unsigned AS) const override;
335 /// \brief Return the cost of the scaling factor used in the addressing
336 /// mode represented by AM for this target, for a load/store
337 /// of the specified type.
338 /// If the AM is supported, the return value must be >= 0.
339 /// If the AM is not supported, it returns a negative value.
340 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
341 unsigned AS) const override;
343 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
344 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
345 /// expanded to FMAs when this method returns true, otherwise fmuladd is
346 /// expanded to fmul + fadd.
347 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
349 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
351 /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
352 bool isDesirableToCommuteWithShift(const SDNode *N) const override;
354 /// \brief Returns true if it is beneficial to convert a load of a constant
355 /// to just the constant itself.
356 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
357 Type *Ty) const override;
359 bool hasLoadLinkedStoreConditional() const override;
360 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
361 AtomicOrdering Ord) const override;
362 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
363 Value *Addr, AtomicOrdering Ord) const override;
365 bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
366 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
367 TargetLoweringBase::AtomicRMWExpansionKind
368 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
370 bool useLoadStackGuardNode() const override;
371 TargetLoweringBase::LegalizeTypeAction
372 getPreferredVectorAction(EVT VT) const override;
375 bool isExtFreeImpl(const Instruction *Ext) const override;
377 /// Subtarget - Keep a pointer to the AArch64Subtarget around so that we can
378 /// make the right decision when generating code for different targets.
379 const AArch64Subtarget *Subtarget;
381 void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
382 void addDRTypeForNEON(MVT VT);
383 void addQRTypeForNEON(MVT VT);
386 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
387 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
389 SmallVectorImpl<SDValue> &InVals) const override;
391 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
392 SmallVectorImpl<SDValue> &InVals) const override;
394 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
395 CallingConv::ID CallConv, bool isVarArg,
396 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
397 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
398 bool isThisReturn, SDValue ThisVal) const;
400 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
402 bool isEligibleForTailCallOptimization(
403 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
404 bool isCalleeStructRet, bool isCallerStructRet,
405 const SmallVectorImpl<ISD::OutputArg> &Outs,
406 const SmallVectorImpl<SDValue> &OutVals,
407 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
409 /// Finds the incoming stack arguments which overlap the given fixed stack
410 /// object and incorporates their load into the current chain. This prevents
411 /// an upcoming store from clobbering the stack argument before it's used.
412 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
413 MachineFrameInfo *MFI, int ClobberedFI) const;
415 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
417 bool IsTailCallConvention(CallingConv::ID CallCC) const;
419 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
420 SDValue &Chain) const;
422 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
424 const SmallVectorImpl<ISD::OutputArg> &Outs,
425 LLVMContext &Context) const override;
427 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
428 const SmallVectorImpl<ISD::OutputArg> &Outs,
429 const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
430 SelectionDAG &DAG) const override;
432 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
433 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
434 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
435 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
436 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, SDLoc DL,
437 SelectionDAG &DAG) const;
438 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
439 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
440 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
441 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
442 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
443 SDValue TVal, SDValue FVal, SDLoc dl,
444 SelectionDAG &DAG) const;
445 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
446 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
447 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
448 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
449 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
450 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
451 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
452 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
453 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
454 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
455 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
456 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
457 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
458 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
459 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
460 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
461 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
462 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
463 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
464 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
465 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
466 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
467 RTLIB::Libcall Call) const;
468 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
469 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
470 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
471 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
472 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
473 SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
474 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
475 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
476 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
478 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
479 std::vector<SDNode *> *Created) const override;
480 unsigned combineRepeatedFPDivisors() const override;
482 ConstraintType getConstraintType(StringRef Constraint) const override;
483 unsigned getRegisterByName(const char* RegName, EVT VT,
484 SelectionDAG &DAG) const override;
486 /// Examine constraint string and operand type and determine a weight value.
487 /// The operand object must already have been set up with the operand type.
489 getSingleConstraintMatchWeight(AsmOperandInfo &info,
490 const char *constraint) const override;
492 std::pair<unsigned, const TargetRegisterClass *>
493 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
494 StringRef Constraint, MVT VT) const override;
495 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
496 std::vector<SDValue> &Ops,
497 SelectionDAG &DAG) const override;
499 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
500 if (ConstraintCode == "Q")
501 return InlineAsm::Constraint_Q;
502 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
503 // followed by llvm_unreachable so we'll leave them unimplemented in
504 // the backend for now.
505 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
508 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
509 bool mayBeEmittedAsTailCall(CallInst *CI) const override;
510 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
511 ISD::MemIndexedMode &AM, bool &IsInc,
512 SelectionDAG &DAG) const;
513 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
514 ISD::MemIndexedMode &AM,
515 SelectionDAG &DAG) const override;
516 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
517 SDValue &Offset, ISD::MemIndexedMode &AM,
518 SelectionDAG &DAG) const override;
520 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
521 SelectionDAG &DAG) const override;
523 bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
524 CallingConv::ID CallConv,
525 bool isVarArg) const override;
527 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
531 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
532 const TargetLibraryInfo *libInfo);
533 } // end namespace AArch64
535 } // end namespace llvm