1 //==-- ARM64ISelLowering.h - ARM64 DAG Lowering Interface --------*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_TARGET_ARM64_ISELLOWERING_H
16 #define LLVM_TARGET_ARM64_ISELLOWERING_H
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/IR/CallingConv.h"
21 #include "llvm/Target/TargetLowering.h"
28 FIRST_NUMBER = ISD::BUILTIN_OP_END,
29 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
30 CALL, // Function call.
32 // Almost the same as a normal call node, except that a TLSDesc relocation is
33 // needed so the linker can relax it correctly if possible.
35 ADRP, // Page address of a TargetGlobalAddress operand.
36 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
37 LOADgot, // Load from automatically generated descriptor (e.g. Global
38 // Offset Table, TLS record).
39 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
40 BRCOND, // Conditional branch instruction; "b.cond".
42 FCSEL, // Conditional move instruction.
43 CSINV, // Conditional select invert.
44 CSNEG, // Conditional select negate.
45 CSINC, // Conditional select increment.
47 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
51 SBC, // adc, sbc instructions
53 // Arithmetic instructions which write flags.
60 // Floating point comparison
63 // Floating point max and min instructions.
70 // Scalar-to-vector duplication
77 // Vector immedate moves
86 // Vector immediate ops
90 // Vector bit select: similar to ISD::VSELECT but not all bits within an
91 // element must be identical.
94 // Vector arithmetic negation
109 // Vector shift by scalar
114 // Vector shift by scalar (again)
121 // Vector comparisons
131 // Vector zero comparisons
143 // Vector bitwise negation
146 // Vector bitwise selection
149 // Compare-and-branch
158 // Custom prefetch handling
161 // {s|u}int to FP within a FP register.
166 } // end namespace ARM64ISD
168 class ARM64Subtarget;
169 class ARM64TargetMachine;
171 class ARM64TargetLowering : public TargetLowering {
172 bool RequireStrictAlign;
175 explicit ARM64TargetLowering(ARM64TargetMachine &TM);
177 /// Selects the correct CCAssignFn for a the given CallingConvention
179 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
181 /// computeMaskedBitsForTargetNode - Determine which of the bits specified in
182 /// Mask are known to be either zero or one and return them in the
183 /// KnownZero/KnownOne bitsets.
184 void computeMaskedBitsForTargetNode(const SDValue Op, APInt &KnownZero,
185 APInt &KnownOne, const SelectionDAG &DAG,
186 unsigned Depth = 0) const override;
188 MVT getScalarShiftAmountTy(EVT LHSTy) const override;
190 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
191 /// unaligned memory accesses. of the specified type.
192 bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
193 bool *Fast = nullptr) const override {
194 if (RequireStrictAlign)
196 // FIXME: True for Cyclone, but not necessary others.
202 /// LowerOperation - Provide custom lowering hooks for some operations.
203 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
205 const char *getTargetNodeName(unsigned Opcode) const override;
207 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
209 /// getFunctionAlignment - Return the Log2 alignment of this function.
210 unsigned getFunctionAlignment(const Function *F) const;
212 /// getMaximalGlobalOffset - Returns the maximal possible offset which can
213 /// be used for loads / stores from the global.
214 unsigned getMaximalGlobalOffset() const override;
216 /// Returns true if a cast between SrcAS and DestAS is a noop.
217 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
218 // Addrspacecasts are always noops.
222 /// createFastISel - This method returns a target specific FastISel object,
223 /// or null if the target does not support "fast" ISel.
224 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
225 const TargetLibraryInfo *libInfo) const override;
227 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
229 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
231 /// isShuffleMaskLegal - Return true if the given shuffle mask can be
232 /// codegen'd directly, or if it should be stack expanded.
233 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const override;
235 /// getSetCCResultType - Return the ISD::SETCC ValueType
236 EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
238 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
240 MachineBasicBlock *EmitF128CSEL(MachineInstr *MI,
241 MachineBasicBlock *BB) const;
244 EmitInstrWithCustomInserter(MachineInstr *MI,
245 MachineBasicBlock *MBB) const override;
247 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
248 unsigned Intrinsic) const override;
250 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
251 bool isTruncateFree(EVT VT1, EVT VT2) const override;
253 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
254 bool isZExtFree(EVT VT1, EVT VT2) const override;
255 bool isZExtFree(SDValue Val, EVT VT2) const override;
257 bool hasPairedLoad(Type *LoadedType,
258 unsigned &RequiredAligment) const override;
259 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
261 bool isLegalAddImmediate(int64_t) const override;
262 bool isLegalICmpImmediate(int64_t) const override;
264 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
265 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
266 MachineFunction &MF) const override;
268 /// isLegalAddressingMode - Return true if the addressing mode represented
269 /// by AM is legal for this target, for a load/store of the specified type.
270 bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
272 /// \brief Return the cost of the scaling factor used in the addressing
273 /// mode represented by AM for this target, for a load/store
274 /// of the specified type.
275 /// If the AM is supported, the return value must be >= 0.
276 /// If the AM is not supported, it returns a negative value.
277 int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override;
279 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
280 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
281 /// expanded to FMAs when this method returns true, otherwise fmuladd is
282 /// expanded to fmul + fadd.
283 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
285 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
287 /// \brief Returns false if N is a bit extraction pattern of (X >> C) & Mask.
288 bool isDesirableToCommuteWithShift(const SDNode *N) const override;
290 /// \brief Returns true if it is beneficial to convert a load of a constant
291 /// to just the constant itself.
292 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
293 Type *Ty) const override;
295 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
296 AtomicOrdering Ord) const override;
297 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
298 Value *Addr, AtomicOrdering Ord) const override;
300 bool shouldExpandAtomicInIR(Instruction *Inst) const override;
303 /// Subtarget - Keep a pointer to the ARM64Subtarget around so that we can
304 /// make the right decision when generating code for different targets.
305 const ARM64Subtarget *Subtarget;
307 void addTypeForNEON(EVT VT, EVT PromotedBitwiseVT);
308 void addDRTypeForNEON(MVT VT);
309 void addQRTypeForNEON(MVT VT);
312 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
313 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
315 SmallVectorImpl<SDValue> &InVals) const override;
317 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
318 SmallVectorImpl<SDValue> &InVals) const override;
320 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
321 CallingConv::ID CallConv, bool isVarArg,
322 const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc DL,
323 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
324 bool isThisReturn, SDValue ThisVal) const;
326 bool isEligibleForTailCallOptimization(
327 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
328 bool isCalleeStructRet, bool isCallerStructRet,
329 const SmallVectorImpl<ISD::OutputArg> &Outs,
330 const SmallVectorImpl<SDValue> &OutVals,
331 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
333 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
334 SDValue &Chain) const;
336 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
338 const SmallVectorImpl<ISD::OutputArg> &Outs,
339 LLVMContext &Context) const override;
341 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
342 const SmallVectorImpl<ISD::OutputArg> &Outs,
343 const SmallVectorImpl<SDValue> &OutVals, SDLoc DL,
344 SelectionDAG &DAG) const override;
346 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
347 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
348 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
349 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
350 SDValue LowerELFTLSDescCall(SDValue SymAddr, SDValue DescAddr, SDLoc DL,
351 SelectionDAG &DAG) const;
352 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
353 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
354 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
355 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
356 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
357 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
358 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
359 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
360 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
361 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
362 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
363 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
364 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
365 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
366 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
367 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
368 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
369 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
370 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
371 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
372 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
373 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
374 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
375 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
376 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
377 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
378 RTLIB::Libcall Call) const;
379 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
380 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
381 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
382 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
383 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
384 SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
385 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
386 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
387 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
390 getConstraintType(const std::string &Constraint) const override;
392 /// Examine constraint string and operand type and determine a weight value.
393 /// The operand object must already have been set up with the operand type.
395 getSingleConstraintMatchWeight(AsmOperandInfo &info,
396 const char *constraint) const override;
398 std::pair<unsigned, const TargetRegisterClass *>
399 getRegForInlineAsmConstraint(const std::string &Constraint,
400 MVT VT) const override;
401 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
402 std::vector<SDValue> &Ops,
403 SelectionDAG &DAG) const override;
405 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
406 bool mayBeEmittedAsTailCall(CallInst *CI) const override;
407 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
408 ISD::MemIndexedMode &AM, bool &IsInc,
409 SelectionDAG &DAG) const;
410 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
411 ISD::MemIndexedMode &AM,
412 SelectionDAG &DAG) const override;
413 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
414 SDValue &Offset, ISD::MemIndexedMode &AM,
415 SelectionDAG &DAG) const override;
417 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
418 SelectionDAG &DAG) const override;
422 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
423 const TargetLibraryInfo *libInfo);
424 } // end namespace ARM64
426 } // end namespace llvm
428 #endif // LLVM_TARGET_ARM64_ISELLOWERING_H