1 //===-- ARMISelLowering.h - ARM DAG Lowering Interface ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef ARMISELLOWERING_H
16 #define ARMISELLOWERING_H
18 #include "ARMSubtarget.h"
19 #include "llvm/Target/TargetLowering.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
25 class ARMConstantPoolValue;
28 // ARM Specific DAG Nodes
30 // Start the numbering where the builtin ops and target ops leave off.
31 FIRST_NUMBER = ISD::BUILTIN_OP_END,
33 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
34 // TargetExternalSymbol, and TargetGlobalAddress.
35 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
37 CALL, // Function call.
38 CALL_PRED, // Function call that's predicable.
39 CALL_NOLINK, // Function call with branch not branch-and-link.
40 tCALL, // Thumb function call.
41 BRCOND, // Conditional branch.
42 BR_JT, // Jumptable branch.
43 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
44 RET_FLAG, // Return with a flag operand.
46 PIC_ADD, // Add with a PC operand and a PIC label.
48 CMP, // ARM compare instructions.
49 CMPZ, // ARM compare that sets only Z flag.
50 CMPFP, // ARM VFP compare instruction, sets FPSCR.
51 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
52 FMSTAT, // ARM fmstat instruction.
53 CMOV, // ARM conditional move instructions.
54 CNEG, // ARM conditional negate instructions.
58 RBIT, // ARM bitreverse instruction
60 FTOSI, // FP to sint within a FP register.
61 FTOUI, // FP to uint within a FP register.
62 SITOF, // sint to FP within a FP register.
63 UITOF, // uint to FP within a FP register.
65 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
66 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
67 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
69 VMOVRRD, // double to two gprs.
70 VMOVDRR, // Two gprs to double.
72 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
73 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
75 TC_RETURN, // Tail call return pseudo.
79 DYN_ALLOC, // Dynamic allocation on the stack.
81 MEMBARRIER, // Memory barrier
82 SYNCBARRIER, // Memory sync barrier
84 VCEQ, // Vector compare equal.
85 VCGE, // Vector compare greater than or equal.
86 VCGEU, // Vector compare unsigned greater than or equal.
87 VCGT, // Vector compare greater than.
88 VCGTU, // Vector compare unsigned greater than.
89 VTST, // Vector test bits.
91 // Vector shift by immediate:
93 VSHRs, // ...right (signed)
94 VSHRu, // ...right (unsigned)
95 VSHLLs, // ...left long (signed)
96 VSHLLu, // ...left long (unsigned)
97 VSHLLi, // ...left long (with maximum shift count)
98 VSHRN, // ...right narrow
100 // Vector rounding shift by immediate:
101 VRSHRs, // ...right (signed)
102 VRSHRu, // ...right (unsigned)
103 VRSHRN, // ...right narrow
105 // Vector saturating shift by immediate:
106 VQSHLs, // ...left (signed)
107 VQSHLu, // ...left (unsigned)
108 VQSHLsu, // ...left (signed to unsigned)
109 VQSHRNs, // ...right narrow (signed)
110 VQSHRNu, // ...right narrow (unsigned)
111 VQSHRNsu, // ...right narrow (signed to unsigned)
113 // Vector saturating rounding shift by immediate:
114 VQRSHRNs, // ...right narrow (signed)
115 VQRSHRNu, // ...right narrow (unsigned)
116 VQRSHRNsu, // ...right narrow (signed to unsigned)
118 // Vector shift and insert:
122 // Vector get lane (VMOV scalar to ARM core register)
123 // (These are used for 8- and 16-bit element types only.)
124 VGETLANEu, // zero-extend vector extract element
125 VGETLANEs, // sign-extend vector extract element
133 VREV64, // reverse elements within 64-bit doublewords
134 VREV32, // reverse elements within 32-bit words
135 VREV16, // reverse elements within 16-bit halfwords
136 VZIP, // zip (interleave)
137 VUZP, // unzip (deinterleave)
140 // Operands of the standard BUILD_VECTOR node are not legalized, which
141 // is fine if BUILD_VECTORs are always lowered to shuffles or other
142 // operations, but for ARM some BUILD_VECTORs are legal as-is and their
143 // operands need to be legalized. Define an ARM-specific version of
144 // BUILD_VECTOR for this purpose.
147 // Floating-point max and min:
153 /// Define some predicates that are used for node matching.
155 /// getNEONModImm - If this is a valid vector constant for a NEON
156 /// instruction with a "modified immediate" operand (e.g., VMOV) of the
157 /// specified element size, return the encoded value for that immediate.
158 /// The ByteSize field indicates the number of bytes of each element [1248].
159 SDValue getNEONModImm(SDNode *N, unsigned ByteSize, bool isVMOV,
162 /// getVFPf32Imm / getVFPf64Imm - If the given fp immediate can be
163 /// materialized with a VMOV.f32 / VMOV.f64 (i.e. fconsts / fconstd)
164 /// instruction, returns its 8-bit integer representation. Otherwise,
166 int getVFPf32Imm(const APFloat &FPImm);
167 int getVFPf64Imm(const APFloat &FPImm);
170 //===--------------------------------------------------------------------===//
171 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
173 class ARMTargetLowering : public TargetLowering {
175 explicit ARMTargetLowering(TargetMachine &TM);
177 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
179 /// ReplaceNodeResults - Replace the results of node with an illegal result
180 /// type with new values built out of custom code.
182 virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
183 SelectionDAG &DAG) const;
185 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
187 virtual const char *getTargetNodeName(unsigned Opcode) const;
189 virtual MachineBasicBlock *
190 EmitInstrWithCustomInserter(MachineInstr *MI,
191 MachineBasicBlock *MBB) const;
193 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
194 /// unaligned memory accesses. of the specified type.
195 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON?
196 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const;
198 /// isLegalAddressingMode - Return true if the addressing mode represented
199 /// by AM is legal for this target, for a load/store of the specified type.
200 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
201 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
203 /// isLegalICmpImmediate - Return true if the specified immediate is legal
204 /// icmp immediate, that is the target has icmp instructions which can
205 /// compare a register against the immediate without having to materialize
206 /// the immediate into a register.
207 virtual bool isLegalICmpImmediate(int64_t Imm) const;
209 /// getPreIndexedAddressParts - returns true by value, base pointer and
210 /// offset pointer and addressing mode by reference if the node's address
211 /// can be legally represented as pre-indexed load / store address.
212 virtual bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
214 ISD::MemIndexedMode &AM,
215 SelectionDAG &DAG) const;
217 /// getPostIndexedAddressParts - returns true by value, base pointer and
218 /// offset pointer and addressing mode by reference if this node can be
219 /// combined with a load / store to form a post-indexed load / store.
220 virtual bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
221 SDValue &Base, SDValue &Offset,
222 ISD::MemIndexedMode &AM,
223 SelectionDAG &DAG) const;
225 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
229 const SelectionDAG &DAG,
230 unsigned Depth) const;
233 ConstraintType getConstraintType(const std::string &Constraint) const;
234 std::pair<unsigned, const TargetRegisterClass*>
235 getRegForInlineAsmConstraint(const std::string &Constraint,
237 std::vector<unsigned>
238 getRegClassForInlineAsmConstraint(const std::string &Constraint,
241 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
242 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
243 /// true it means one of the asm constraint of the inline asm instruction
244 /// being processed is 'm'.
245 virtual void LowerAsmOperandForConstraint(SDValue Op,
246 char ConstraintLetter,
247 std::vector<SDValue> &Ops,
248 SelectionDAG &DAG) const;
250 const ARMSubtarget* getSubtarget() const {
254 /// getRegClassFor - Return the register class that should be used for the
255 /// specified value type.
256 virtual TargetRegisterClass *getRegClassFor(EVT VT) const;
258 /// getFunctionAlignment - Return the Log2 alignment of this function.
259 virtual unsigned getFunctionAlignment(const Function *F) const;
261 Sched::Preference getSchedulingPreference(SDNode *N) const;
263 bool isShuffleMaskLegal(const SmallVectorImpl<int> &M, EVT VT) const;
264 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
266 /// isFPImmLegal - Returns true if the target can instruction select the
267 /// specified FP immediate natively. If false, the legalizer will
268 /// materialize the FP immediate as a load from a constant pool.
269 virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
272 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
273 /// make the right decision when generating code for different targets.
274 const ARMSubtarget *Subtarget;
276 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
278 unsigned ARMPCLabelIndex;
280 void addTypeForNEON(EVT VT, EVT PromotedLdStVT, EVT PromotedBitwiseVT);
281 void addDRTypeForNEON(EVT VT);
282 void addQRTypeForNEON(EVT VT);
284 typedef SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPassVector;
285 void PassF64ArgInRegs(DebugLoc dl, SelectionDAG &DAG,
286 SDValue Chain, SDValue &Arg,
287 RegsToPassVector &RegsToPass,
288 CCValAssign &VA, CCValAssign &NextVA,
290 SmallVector<SDValue, 8> &MemOpChains,
291 ISD::ArgFlagsTy Flags) const;
292 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
293 SDValue &Root, SelectionDAG &DAG,
296 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
297 bool isVarArg) const;
298 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
299 DebugLoc dl, SelectionDAG &DAG,
300 const CCValAssign &VA,
301 ISD::ArgFlagsTy Flags) const;
302 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
303 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
304 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
305 const ARMSubtarget *Subtarget) const;
306 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
307 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
308 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
309 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
310 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
311 SelectionDAG &DAG) const;
312 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
313 SelectionDAG &DAG) const;
314 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
315 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
316 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
317 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
318 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
319 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
320 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
321 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
322 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
323 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
325 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
326 CallingConv::ID CallConv, bool isVarArg,
327 const SmallVectorImpl<ISD::InputArg> &Ins,
328 DebugLoc dl, SelectionDAG &DAG,
329 SmallVectorImpl<SDValue> &InVals) const;
332 LowerFormalArguments(SDValue Chain,
333 CallingConv::ID CallConv, bool isVarArg,
334 const SmallVectorImpl<ISD::InputArg> &Ins,
335 DebugLoc dl, SelectionDAG &DAG,
336 SmallVectorImpl<SDValue> &InVals) const;
339 LowerCall(SDValue Chain, SDValue Callee,
340 CallingConv::ID CallConv, bool isVarArg,
342 const SmallVectorImpl<ISD::OutputArg> &Outs,
343 const SmallVectorImpl<SDValue> &OutVals,
344 const SmallVectorImpl<ISD::InputArg> &Ins,
345 DebugLoc dl, SelectionDAG &DAG,
346 SmallVectorImpl<SDValue> &InVals) const;
348 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
349 /// for tail call optimization. Targets which want to do tail call
350 /// optimization should implement this function.
351 bool IsEligibleForTailCallOptimization(SDValue Callee,
352 CallingConv::ID CalleeCC,
354 bool isCalleeStructRet,
355 bool isCallerStructRet,
356 const SmallVectorImpl<ISD::OutputArg> &Outs,
357 const SmallVectorImpl<SDValue> &OutVals,
358 const SmallVectorImpl<ISD::InputArg> &Ins,
359 SelectionDAG& DAG) const;
361 LowerReturn(SDValue Chain,
362 CallingConv::ID CallConv, bool isVarArg,
363 const SmallVectorImpl<ISD::OutputArg> &Outs,
364 const SmallVectorImpl<SDValue> &OutVals,
365 DebugLoc dl, SelectionDAG &DAG) const;
367 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
368 SDValue &ARMcc, SelectionDAG &DAG, DebugLoc dl) const;
369 SDValue getVFPCmp(SDValue LHS, SDValue RHS,
370 SelectionDAG &DAG, DebugLoc dl) const;
372 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
374 MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI,
375 MachineBasicBlock *BB,
376 unsigned Size) const;
377 MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI,
378 MachineBasicBlock *BB,
380 unsigned BinOpcode) const;
385 #endif // ARMISELLOWERING_H