1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_TARGET_AARCH64_ISELLOWERING_H
16 #define LLVM_TARGET_AARCH64_ISELLOWERING_H
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/Target/TargetLowering.h"
22 #include "llvm/IR/Intrinsics.h"
25 namespace AArch64ISD {
27 // Start the numbering from where ISD NodeType finishes.
28 FIRST_NUMBER = ISD::BUILTIN_OP_END,
30 // This is a conditional branch which also notes the flag needed
31 // (eq/sgt/...). A64 puts this information on the branches rather than
32 // compares as LLVM does.
35 // A node to be selected to an actual call operation: either BL or BLR in
36 // the absence of tail calls.
39 // Indicates a floating-point immediate which fits into the format required
40 // by the FMOV instructions. First (and only) operand is the 8-bit encoded
41 // value of that immediate.
44 // Corresponds directly to an EXTR instruction. Operands are an LHS an RHS
48 // Wraps a load from the GOT, which should always be performed with a 64-bit
49 // load instruction. This prevents the DAG combiner folding a truncate to
50 // form a smaller memory access.
53 // Performs a bitfield insert. Arguments are: the value being inserted into;
54 // the value being inserted; least significant bit changed; width of the
58 // Simply a convenient node inserted during ISelLowering to represent
59 // procedure return. Will almost certainly be selected to "RET".
62 /// Extracts a field of contiguous bits from the source and sign extends
63 /// them into a single register. Arguments are: source; immr; imms. Note
64 /// these are pre-encoded since DAG matching can't cope with combining LSB
65 /// and Width into these values itself.
68 /// This is an A64-ification of the standard LLVM SELECT_CC operation. The
69 /// main difference is that it only has the values and an A64 condition,
70 /// which will be produced by a setcc instruction.
73 /// This serves most of the functions of the LLVM SETCC instruction, for two
74 /// purposes. First, it prevents optimisations from fiddling with the
75 /// compare after we've moved the CondCode information onto the SELECT_CC or
76 /// BR_CC instructions. Second, it gives a legal instruction for the actual
79 /// It keeps a record of the condition flags asked for because certain
80 /// instructions are only valid for a subset of condition codes.
83 // Designates a node which is a tail call: both a call and a return
84 // instruction as far as selction is concerned. It should be selected to an
85 // unconditional branch. Has the usual plethora of call operands, but: 1st
86 // is callee, 2nd is stack adjustment required immediately before branch.
89 // Designates a call used to support the TLS descriptor ABI. The call itself
90 // will be indirect ("BLR xN") but a relocation-specifier (".tlsdesccall
91 // var") must be attached somehow during code generation. It takes two
92 // operands: the callee and the symbol to be relocated against.
95 // Leaf node which will be lowered to an appropriate MRS to obtain the
96 // thread pointer: TPIDR_EL0.
99 /// Extracts a field of contiguous bits from the source and zero extends
100 /// them into a single register. Arguments are: source; immr; imms. Note
101 /// these are pre-encoded since DAG matching can't cope with combining LSB
102 /// and Width into these values itself.
105 // Wraps an address which the ISelLowering phase has decided should be
106 // created using the large memory model style: i.e. a sequence of four
107 // movz/movk instructions.
110 // Wraps an address which the ISelLowering phase has decided should be
111 // created using the small memory model style: i.e. adrp/add or
112 // adrp/mem-op. This exists to prevent bare TargetAddresses which may never
116 // Vector bitwise select
119 // Vector move immediate
122 // Vector Move Inverted Immediate
125 // Vector FP move immediate
128 // Vector Element reverse
136 // Vector compare zero
139 // Vector compare bitwise test
142 // Vector saturating shift
149 // Vector dup by lane
155 // NEON loads with post-increment base updates:
156 NEON_LD1_UPD = ISD::FIRST_TARGET_MEMORY_OPCODE,
164 // NEON stores with post-increment base updates:
176 class AArch64Subtarget;
177 class AArch64TargetMachine;
179 class AArch64TargetLowering : public TargetLowering {
181 explicit AArch64TargetLowering(AArch64TargetMachine &TM);
183 const char *getTargetNodeName(unsigned Opcode) const;
185 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC) const;
187 SDValue LowerFormalArguments(SDValue Chain,
188 CallingConv::ID CallConv, bool isVarArg,
189 const SmallVectorImpl<ISD::InputArg> &Ins,
190 SDLoc dl, SelectionDAG &DAG,
191 SmallVectorImpl<SDValue> &InVals) const;
193 SDValue LowerReturn(SDValue Chain,
194 CallingConv::ID CallConv, bool isVarArg,
195 const SmallVectorImpl<ISD::OutputArg> &Outs,
196 const SmallVectorImpl<SDValue> &OutVals,
197 SDLoc dl, SelectionDAG &DAG) const;
199 SDValue LowerCall(CallLoweringInfo &CLI,
200 SmallVectorImpl<SDValue> &InVals) const;
202 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
203 CallingConv::ID CallConv, bool IsVarArg,
204 const SmallVectorImpl<ISD::InputArg> &Ins,
205 SDLoc dl, SelectionDAG &DAG,
206 SmallVectorImpl<SDValue> &InVals) const;
208 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
209 const AArch64Subtarget *ST) const;
211 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
213 void SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, SDLoc DL,
214 SDValue &Chain) const;
216 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
217 /// for tail call optimization. Targets which want to do tail call
218 /// optimization should implement this function.
219 bool IsEligibleForTailCallOptimization(SDValue Callee,
220 CallingConv::ID CalleeCC,
222 bool IsCalleeStructRet,
223 bool IsCallerStructRet,
224 const SmallVectorImpl<ISD::OutputArg> &Outs,
225 const SmallVectorImpl<SDValue> &OutVals,
226 const SmallVectorImpl<ISD::InputArg> &Ins,
227 SelectionDAG& DAG) const;
229 /// Finds the incoming stack arguments which overlap the given fixed stack
230 /// object and incorporates their load into the current chain. This prevents
231 /// an upcoming store from clobbering the stack argument before it's used.
232 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
233 MachineFrameInfo *MFI, int ClobberedFI) const;
235 EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
237 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
239 bool IsTailCallConvention(CallingConv::ID CallCC) const;
241 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
243 bool isLegalICmpImmediate(int64_t Val) const;
244 SDValue getSelectableIntSetCC(SDValue LHS, SDValue RHS, ISD::CondCode CC,
245 SDValue &A64cc, SelectionDAG &DAG, SDLoc &dl) const;
247 virtual MachineBasicBlock *
248 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
251 emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *MBB,
252 unsigned Size, unsigned Opcode) const;
255 emitAtomicBinaryMinMax(MachineInstr *MI, MachineBasicBlock *BB,
256 unsigned Size, unsigned CmpOp,
257 A64CC::CondCodes Cond) const;
259 emitAtomicCmpSwap(MachineInstr *MI, MachineBasicBlock *BB,
260 unsigned Size) const;
263 EmitF128CSEL(MachineInstr *MI, MachineBasicBlock *MBB) const;
265 SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
266 SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
267 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
268 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
269 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
270 SDValue LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
271 RTLIB::Libcall Call) const;
272 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
273 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
274 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG, bool IsSigned) const;
275 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
276 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
278 SDValue LowerGlobalAddressELFSmall(SDValue Op, SelectionDAG &DAG) const;
279 SDValue LowerGlobalAddressELFLarge(SDValue Op, SelectionDAG &DAG) const;
280 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
282 SDValue LowerTLSDescCall(SDValue SymAddr, SDValue DescAddr, SDLoc DL,
283 SelectionDAG &DAG) const;
284 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
285 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool IsSigned) const;
286 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
287 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
288 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
289 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
290 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
291 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
293 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
295 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
296 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
297 /// expanded to FMAs when this method returns true, otherwise fmuladd is
298 /// expanded to fmul + fadd.
299 virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
301 ConstraintType getConstraintType(const std::string &Constraint) const;
303 ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &Info,
304 const char *Constraint) const;
305 void LowerAsmOperandForConstraint(SDValue Op,
306 std::string &Constraint,
307 std::vector<SDValue> &Ops,
308 SelectionDAG &DAG) const;
310 std::pair<unsigned, const TargetRegisterClass*>
311 getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const;
313 virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
314 unsigned Intrinsic) const LLVM_OVERRIDE;
317 const InstrItineraryData *Itins;
319 const AArch64Subtarget *getSubtarget() const {
320 return &getTargetMachine().getSubtarget<AArch64Subtarget>();
323 enum NeonModImmType {
328 extern SDValue ScanBUILD_VECTOR(SDValue Op, bool &isOnlyLowElement,
329 bool &usesOnlyOneValue, bool &hasDominantValue,
330 bool &isConstant, bool &isUNDEF);
333 #endif // LLVM_TARGET_AARCH64_ISELLOWERING_H