1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
18 #include "ARMSubtarget.h"
19 #include "llvm/Target/TargetLowering.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
25 class ARMConstantPoolValue;
28 // ARM Specific DAG Nodes
30 // Start the numbering where the builtin ops and target ops leave off.
31 FIRST_NUMBER = ISD::BUILTIN_OP_END,
33 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
34 // TargetExternalSymbol, and TargetGlobalAddress.
35 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
37 CALL, // Function call.
38 CALL_PRED, // Function call that's predicable.
39 CALL_NOLINK, // Function call with branch not branch-and-link.
40 tCALL, // Thumb function call.
41 BRCOND, // Conditional branch.
42 BR_JT, // Jumptable branch.
43 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
44 RET_FLAG, // Return with a flag operand.
46 PIC_ADD, // Add with a PC operand and a PIC label.
48 CMP, // ARM compare instructions.
49 CMPZ, // ARM compare that sets only Z flag.
50 CMPFP, // ARM VFP compare instruction, sets FPSCR.
51 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
52 FMSTAT, // ARM fmstat instruction.
53 CMOV, // ARM conditional move instructions.
54 CNEG, // ARM conditional negate instructions.
56 RBIT, // ARM bitreverse instruction
58 FTOSI, // FP to sint within a FP register.
59 FTOUI, // FP to uint within a FP register.
60 SITOF, // sint to FP within a FP register.
61 UITOF, // uint to FP within a FP register.
63 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
64 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
65 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
67 VMOVRRD, // double to two gprs.
68 VMOVDRR, // Two gprs to double.
70 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
71 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
73 TC_RETURN, // Tail call return pseudo.
77 DYN_ALLOC, // Dynamic allocation on the stack.
79 MEMBARRIER, // Memory barrier
80 SYNCBARRIER, // Memory sync barrier
82 VCEQ, // Vector compare equal.
83 VCGE, // Vector compare greater than or equal.
84 VCGEU, // Vector compare unsigned greater than or equal.
85 VCGT, // Vector compare greater than.
86 VCGTU, // Vector compare unsigned greater than.
87 VTST, // Vector test bits.
89 // Vector shift by immediate:
91 VSHRs, // ...right (signed)
92 VSHRu, // ...right (unsigned)
93 VSHLLs, // ...left long (signed)
94 VSHLLu, // ...left long (unsigned)
95 VSHLLi, // ...left long (with maximum shift count)
96 VSHRN, // ...right narrow
98 // Vector rounding shift by immediate:
99 VRSHRs, // ...right (signed)
100 VRSHRu, // ...right (unsigned)
101 VRSHRN, // ...right narrow
103 // Vector saturating shift by immediate:
104 VQSHLs, // ...left (signed)
105 VQSHLu, // ...left (unsigned)
106 VQSHLsu, // ...left (signed to unsigned)
107 VQSHRNs, // ...right narrow (signed)
108 VQSHRNu, // ...right narrow (unsigned)
109 VQSHRNsu, // ...right narrow (signed to unsigned)
111 // Vector saturating rounding shift by immediate:
112 VQRSHRNs, // ...right narrow (signed)
113 VQRSHRNu, // ...right narrow (unsigned)
114 VQRSHRNsu, // ...right narrow (signed to unsigned)
116 // Vector shift and insert:
120 // Vector get lane (VMOV scalar to ARM core register)
121 // (These are used for 8- and 16-bit element types only.)
122 VGETLANEu, // zero-extend vector extract element
123 VGETLANEs, // sign-extend vector extract element
131 VREV64, // reverse elements within 64-bit doublewords
132 VREV32, // reverse elements within 32-bit words
133 VREV16, // reverse elements within 16-bit halfwords
134 VZIP, // zip (interleave)
135 VUZP, // unzip (deinterleave)
138 // Operands of the standard BUILD_VECTOR node are not legalized, which
139 // is fine if BUILD_VECTORs are always lowered to shuffles or other
140 // operations, but for ARM some BUILD_VECTORs are legal as-is and their
141 // operands need to be legalized. Define an ARM-specific version of
142 // BUILD_VECTOR for this purpose.
145 // Floating-point max and min:
151 /// Define some predicates that are used for node matching.
153 /// getNEONModImm - If this is a valid vector constant for a NEON
154 /// instruction with a "modified immediate" operand (e.g., VMOV) of the
155 /// specified element size, return the encoded value for that immediate.
156 /// The ByteSize field indicates the number of bytes of each element [1248].
157 SDValue getNEONModImm(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
159 /// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be
160 /// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd)
161 /// instruction, returns its 8-bit integer representation. Otherwise,
163 int getVFPf32Imm(const APFloat &FPImm);
164 int getVFPf64Imm(const APFloat &FPImm);
167 //===--------------------------------------------------------------------===//
168 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
170 class ARMTargetLowering : public TargetLowering {
172 explicit ARMTargetLowering(TargetMachine &TM);
174 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
176 /// ReplaceNodeResults - Replace the results of node with an illegal result
177 /// type with new values built out of custom code.
179 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
180 SelectionDAG &DAG) const;
182 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
184 virtual const char *getTargetNodeName(unsigned Opcode) const;
186 virtual MachineBasicBlock *
187 EmitInstrWithCustomInserter(MachineInstr *MI,
188 MachineBasicBlock *MBB) const;
190 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
191 /// unaligned memory accesses. of the specified type.
192 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON?
193 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const;
195 /// isLegalAddressingMode - Return true if the addressing mode represented
196 /// by AM is legal for this target, for a load/store of the specified type.
197 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
198 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
200 /// isLegalICmpImmediate - Return true if the specified immediate is legal
201 /// icmp immediate, that is the target has icmp instructions which can
202 /// compare a register against the immediate without having to materialize
203 /// the immediate into a register.
204 virtual bool isLegalICmpImmediate(int64_t Imm) const;
206 /// getPreIndexedAddressParts - returns true by value, base pointer and
207 /// offset pointer and addressing mode by reference if the node's address
208 /// can be legally represented as pre-indexed load / store address.
209 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
211 ISD::MemIndexedMode &AM,
212 SelectionDAG &DAG) const;
214 /// getPostIndexedAddressParts - returns true by value, base pointer and
215 /// offset pointer and addressing mode by reference if this node can be
216 /// combined with a load / store to form a post-indexed load / store.
217 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
218 SDValue &Base, SDValue &Offset,
219 ISD::MemIndexedMode &AM,
220 SelectionDAG &DAG) const;
222 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
226 const SelectionDAG &DAG,
227 unsigned Depth) const;
230 ConstraintType getConstraintType(const std::string &Constraint) const;
231 std::pair<unsigned, const TargetRegisterClass*>
232 getRegForInlineAsmConstraint(const std::string &Constraint,
234 std::vector<unsigned>
235 getRegClassForInlineAsmConstraint(const std::string &Constraint,
238 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
239 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
240 /// true it means one of the asm constraint of the inline asm instruction
241 /// being processed is 'm'.
242 virtual void LowerAsmOperandForConstraint(SDValue Op,
243 char ConstraintLetter,
245 std::vector<SDValue> &Ops,
246 SelectionDAG &DAG) const;
248 const ARMSubtarget* getSubtarget() const {
252 /// getRegClassFor - Return the register class that should be used for the
253 /// specified value type.
254 virtual TargetRegisterClass *getRegClassFor(EVT VT) const;
256 /// getFunctionAlignment - Return the Log2 alignment of this function.
257 virtual unsigned getFunctionAlignment(const Function *F) const;
259 Sched::Preference getSchedulingPreference(SDNode *N) const;
261 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
262 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
264 /// isFPImmLegal - Returns true if the target can instruction select the
265 /// specified FP immediate natively. If false, the legalizer will
266 /// materialize the FP immediate as a load from a constant pool.
267 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
270 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
271 /// make the right decision when generating code for different targets.
272 const ARMSubtarget *Subtarget;
274 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
276 unsigned ARMPCLabelIndex;
278 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT);
279 void addDRTypeForNEON(EVT VT);
280 void addQRTypeForNEON(EVT VT);
282 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
283 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
284 SDValue Chain, SDValue &Arg,
285 RegsToPassVector &RegsToPass,
286 CCValAssign &VA, CCValAssign &NextVA,
288 SmallVector<SDValue, 8> &MemOpChains,
289 ISD::ArgFlagsTy Flags) const;
290 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
291 SDValue &Root, SelectionDAG &DAG,
294 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
295 bool isVarArg) const;
296 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
297 DebugLoc dl, SelectionDAG &DAG,
298 const CCValAssign &VA,
299 ISD::ArgFlagsTy Flags) const;
300 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
301 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
302 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
303 const ARMSubtarget *Subtarget) const;
304 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
305 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
306 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
307 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
308 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
309 SelectionDAG &DAG) const;
310 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
311 SelectionDAG &DAG) const;
312 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
313 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
314 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
315 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
316 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
317 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
318 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
319 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
320 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
322 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
323 CallingConv::ID CallConv, bool isVarArg,
324 const SmallVectorImpl<ISD::InputArg> &Ins,
325 DebugLoc dl, SelectionDAG &DAG,
326 SmallVectorImpl<SDValue> &InVals) const;
329 LowerFormalArguments(SDValue Chain,
330 CallingConv::ID CallConv, bool isVarArg,
331 const SmallVectorImpl<ISD::InputArg> &Ins,
332 DebugLoc dl, SelectionDAG &DAG,
333 SmallVectorImpl<SDValue> &InVals) const;
336 LowerCall(SDValue Chain, SDValue Callee,
337 CallingConv::ID CallConv, bool isVarArg,
339 const SmallVectorImpl<ISD::OutputArg> &Outs,
340 const SmallVectorImpl<ISD::InputArg> &Ins,
341 DebugLoc dl, SelectionDAG &DAG,
342 SmallVectorImpl<SDValue> &InVals) const;
344 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
345 /// for tail call optimization. Targets which want to do tail call
346 /// optimization should implement this function.
347 bool IsEligibleForTailCallOptimization(SDValue Callee,
348 CallingConv::ID CalleeCC,
350 bool isCalleeStructRet,
351 bool isCallerStructRet,
352 const SmallVectorImpl<ISD::OutputArg> &Outs,
353 const SmallVectorImpl<ISD::InputArg> &Ins,
354 SelectionDAG& DAG) const;
356 LowerReturn(SDValue Chain,
357 CallingConv::ID CallConv, bool isVarArg,
358 const SmallVectorImpl<ISD::OutputArg> &Outs,
359 DebugLoc dl, SelectionDAG &DAG) const;
361 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
362 SDValue &ARMCC, SelectionDAG &DAG, DebugLoc dl) const;
364 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
365 MachineBasicBlock *BB,
366 unsigned Size) const;
367 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
368 MachineBasicBlock *BB,
370 unsigned BinOpcode) const;
375 #endif // ARMISELLOWERING_H