1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
18 #include "ARMSubtarget.h"
19 #include "llvm/Target/TargetLowering.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
25 class ARMConstantPoolValue;
28 // ARM Specific DAG Nodes
30 // Start the numbering where the builtin ops and target ops leave off.
31 FIRST_NUMBER = ISD::BUILTIN_OP_END,
33 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
34 // TargetExternalSymbol, and TargetGlobalAddress.
35 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
37 CALL, // Function call.
38 CALL_PRED, // Function call that's predicable.
39 CALL_NOLINK, // Function call with branch not branch-and-link.
40 tCALL, // Thumb function call.
41 BRCOND, // Conditional branch.
42 BR_JT, // Jumptable branch.
43 RET_FLAG, // Return with a flag operand.
45 PIC_ADD, // Add with a PC operand and a PIC label.
47 CMP, // ARM compare instructions.
48 CMPZ, // ARM compare that sets only Z flag.
49 CMPFP, // ARM VFP compare instruction, sets FPSCR.
50 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
51 FMSTAT, // ARM fmstat instruction.
52 CMOV, // ARM conditional move instructions.
53 CNEG, // ARM conditional negate instructions.
55 FTOSI, // FP to sint within a FP register.
56 FTOUI, // FP to uint within a FP register.
57 SITOF, // sint to FP within a FP register.
58 UITOF, // uint to FP within a FP register.
60 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
61 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
62 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
64 FMRRD, // double to two gprs.
65 FMDRR, // Two gprs to double.
67 EH_SJLJ_SETJMP, // SjLj exception handling setjmp
68 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp
72 VCEQ, // Vector compare equal.
73 VCGE, // Vector compare greater than or equal.
74 VCGEU, // Vector compare unsigned greater than or equal.
75 VCGT, // Vector compare greater than.
76 VCGTU, // Vector compare unsigned greater than.
77 VTST, // Vector test bits.
79 // Vector shift by immediate:
81 VSHRs, // ...right (signed)
82 VSHRu, // ...right (unsigned)
83 VSHLLs, // ...left long (signed)
84 VSHLLu, // ...left long (unsigned)
85 VSHLLi, // ...left long (with maximum shift count)
86 VSHRN, // ...right narrow
88 // Vector rounding shift by immediate:
89 VRSHRs, // ...right (signed)
90 VRSHRu, // ...right (unsigned)
91 VRSHRN, // ...right narrow
93 // Vector saturating shift by immediate:
94 VQSHLs, // ...left (signed)
95 VQSHLu, // ...left (unsigned)
96 VQSHLsu, // ...left (signed to unsigned)
97 VQSHRNs, // ...right narrow (signed)
98 VQSHRNu, // ...right narrow (unsigned)
99 VQSHRNsu, // ...right narrow (signed to unsigned)
101 // Vector saturating rounding shift by immediate:
102 VQRSHRNs, // ...right narrow (signed)
103 VQRSHRNu, // ...right narrow (unsigned)
104 VQRSHRNsu, // ...right narrow (signed to unsigned)
106 // Vector shift and insert:
110 // Vector get lane (VMOV scalar to ARM core register)
111 // (These are used for 8- and 16-bit element types only.)
112 VGETLANEu, // zero-extend vector extract element
113 VGETLANEs, // sign-extend vector extract element
115 // Vector duplicate lane (128-bit result only; 64-bit is a shuffle)
116 VDUPLANEQ // splat a lane from a 64-bit vector to a 128-bit vector
120 /// Define some predicates that are used for node matching.
122 /// getVMOVImm - If this is a build_vector of constants which can be
123 /// formed by using a VMOV instruction of the specified element size,
124 /// return the constant being splatted. The ByteSize field indicates the
125 /// number of bytes of each element [1248].
126 SDValue getVMOVImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
128 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
129 /// instruction with the specified blocksize. (The order of the elements
130 /// within each block of the vector is reversed.)
131 bool isVREVMask(ShuffleVectorSDNode *N, unsigned blocksize);
134 //===--------------------------------------------------------------------===//
135 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
137 class ARMTargetLowering : public TargetLowering {
138 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
140 explicit ARMTargetLowering(TargetMachine &TM);
142 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
144 /// ReplaceNodeResults - Replace the results of node with an illegal result
145 /// type with new values built out of custom code.
147 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
150 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
152 virtual const char *getTargetNodeName(unsigned Opcode) const;
154 virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
155 MachineBasicBlock *MBB) const;
157 /// isLegalAddressingMode - Return true if the addressing mode represented
158 /// by AM is legal for this target, for a load/store of the specified type.
159 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
161 /// getPreIndexedAddressParts - returns true by value, base pointer and
162 /// offset pointer and addressing mode by reference if the node's address
163 /// can be legally represented as pre-indexed load / store address.
164 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
166 ISD::MemIndexedMode &AM,
167 SelectionDAG &DAG) const;
169 /// getPostIndexedAddressParts - returns true by value, base pointer and
170 /// offset pointer and addressing mode by reference if this node can be
171 /// combined with a load / store to form a post-indexed load / store.
172 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
173 SDValue &Base, SDValue &Offset,
174 ISD::MemIndexedMode &AM,
175 SelectionDAG &DAG) const;
177 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
181 const SelectionDAG &DAG,
182 unsigned Depth) const;
183 ConstraintType getConstraintType(const std::string &Constraint) const;
184 std::pair<unsigned, const TargetRegisterClass*>
185 getRegForInlineAsmConstraint(const std::string &Constraint,
187 std::vector<unsigned>
188 getRegClassForInlineAsmConstraint(const std::string &Constraint,
191 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
192 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
193 /// true it means one of the asm constraint of the inline asm instruction
194 /// being processed is 'm'.
195 virtual void LowerAsmOperandForConstraint(SDValue Op,
196 char ConstraintLetter,
198 std::vector<SDValue> &Ops,
199 SelectionDAG &DAG) const;
201 virtual const ARMSubtarget* getSubtarget() {
205 /// getFunctionAlignment - Return the Log2 alignment of this function.
206 virtual unsigned getFunctionAlignment(const Function *F) const;
209 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
210 /// make the right decision when generating code for different targets.
211 const ARMSubtarget *Subtarget;
213 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
215 unsigned ARMPCLabelIndex;
217 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
218 void addDRTypeForNEON(MVT VT);
219 void addQRTypeForNEON(MVT VT);
221 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
222 void PassF64ArgInRegs(CallSDNode *TheCall, SelectionDAG &DAG,
223 SDValue Chain, SDValue &Arg,
224 RegsToPassVector &RegsToPass,
225 CCValAssign &VA, CCValAssign &NextVA,
227 SmallVector<SDValue, 8> &MemOpChains,
228 ISD::ArgFlagsTy Flags);
229 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
230 SDValue &Root, SelectionDAG &DAG, DebugLoc dl);
232 CCAssignFn *CCAssignFnForNode(unsigned CC, bool Return) const;
233 SDValue LowerMemOpCallTo(CallSDNode *TheCall, SelectionDAG &DAG,
234 const SDValue &StackPtr, const CCValAssign &VA,
235 SDValue Chain, SDValue Arg, ISD::ArgFlagsTy Flags);
236 SDNode *LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
237 unsigned CallingConv, SelectionDAG &DAG);
238 SDValue LowerCALL(SDValue Op, SelectionDAG &DAG);
239 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG);
240 SDValue LowerRET(SDValue Op, SelectionDAG &DAG);
241 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG);
242 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG);
243 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG);
244 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
246 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
248 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG);
249 SDValue LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG);
250 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG);
251 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG);
253 SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl,
255 SDValue Dst, SDValue Src,
256 SDValue Size, unsigned Align,
258 const Value *DstSV, uint64_t DstSVOff,
259 const Value *SrcSV, uint64_t SrcSVOff);
263 #endif // ARMISELLOWERING_H