X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;ds=sidebyside;f=include%2Fllvm%2FTarget%2FTargetLowering.h;h=4515b90b2baa18dda04d9422eb9913f5087829ab;hb=ab0b949e0e9de452f3b052b11634ab761e008b23;hp=f6bd676277e7cda5f28ba67332524e1ae675b307;hpb=9f7f49caa46d00d455c8ae5152e4412dd5eed26d;p=oota-llvm.git diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index f6bd676277e..4515b90b2ba 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -2,8 +2,8 @@ // // The LLVM Compiler Infrastructure // -// This file was developed by the LLVM research group and is distributed under -// the University of Illinois Open Source License. See LICENSE.TXT for details. +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // @@ -22,9 +22,12 @@ #ifndef LLVM_TARGET_TARGETLOWERING_H #define LLVM_TARGET_TARGETLOWERING_H -#include "llvm/DerivedTypes.h" #include "llvm/CodeGen/SelectionDAGNodes.h" +#include "llvm/CodeGen/RuntimeLibcalls.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/STLExtras.h" #include +#include namespace llvm { class Value; @@ -37,93 +40,8 @@ namespace llvm { class SelectionDAG; class MachineBasicBlock; class MachineInstr; - -namespace RTLIB { - /// RTLIB::Libcall enum - This enum defines all of the runtime library calls - /// the backend can emit. - /// - enum Libcall { - // Integer - SHL_I32, - SHL_I64, - SRL_I32, - SRL_I64, - SRA_I32, - SRA_I64, - MUL_I32, - MUL_I64, - SDIV_I32, - SDIV_I64, - UDIV_I32, - UDIV_I64, - SREM_I32, - SREM_I64, - UREM_I32, - UREM_I64, - NEG_I32, - NEG_I64, - - // FLOATING POINT - ADD_F32, - ADD_F64, - SUB_F32, - SUB_F64, - MUL_F32, - MUL_F64, - DIV_F32, - DIV_F64, - REM_F32, - REM_F64, - NEG_F32, - NEG_F64, - POWI_F32, - POWI_F64, - SQRT_F32, - SQRT_F64, - SIN_F32, - SIN_F64, - COS_F32, - COS_F64, - - // CONVERSION - FPEXT_F32_F64, - FPROUND_F64_F32, - FPTOSINT_F32_I32, - FPTOSINT_F32_I64, - FPTOSINT_F64_I32, - FPTOSINT_F64_I64, - FPTOUINT_F32_I32, - FPTOUINT_F32_I64, - FPTOUINT_F64_I32, - FPTOUINT_F64_I64, - SINTTOFP_I32_F32, - SINTTOFP_I32_F64, - SINTTOFP_I64_F32, - SINTTOFP_I64_F64, - UINTTOFP_I32_F32, - UINTTOFP_I32_F64, - UINTTOFP_I64_F32, - UINTTOFP_I64_F64, - - // COMPARISON - OEQ_F32, - OEQ_F64, - UNE_F32, - UNE_F64, - OGE_F32, - OGE_F64, - OLT_F32, - OLT_F64, - OLE_F32, - OLE_F64, - OGT_F32, - OGT_F64, - UO_F32, - UO_F64, - - UNKNOWN_LIBCALL - }; - } + class VectorType; + class TargetSubtarget; //===----------------------------------------------------------------------===// /// TargetLowering - This class defines information used to lower LLVM code to @@ -161,12 +79,13 @@ public: SchedulingForRegPressure // Scheduling for lowest register pressure. }; - TargetLowering(TargetMachine &TM); + explicit TargetLowering(TargetMachine &TM); virtual ~TargetLowering(); TargetMachine &getTargetMachine() const { return TM; } const TargetData *getTargetData() const { return TD; } + bool isBigEndian() const { return !IsLittleEndian; } bool isLittleEndian() const { return IsLittleEndian; } MVT::ValueType getPointerTy() const { return PointerTy; } MVT::ValueType getShiftAmountTy() const { return ShiftAmountTy; } @@ -175,7 +94,7 @@ public: /// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC /// codegen. bool usesGlobalOffsetTable() const { return UsesGlobalOffsetTable; } - + /// isSelectExpensive - Return true if the select operation is expensive for /// this target. bool isSelectExpensive() const { return SelectIsExpensive; } @@ -187,7 +106,7 @@ public: /// isPow2DivCheap() - Return true if pow2 div is cheaper than a chain of /// srl/add/sra. bool isPow2DivCheap() const { return Pow2DivIsCheap; } - + /// getSetCCResultTy - Return the ValueType of the result of setcc operations. /// MVT::ValueType getSetCCResultTy() const { return SetCCResultTy; } @@ -205,6 +124,7 @@ public: /// getRegClassFor - Return the register class that should be used for the /// specified value type. This may only be called on legal types. TargetRegisterClass *getRegClassFor(MVT::ValueType VT) const { + assert(VT < array_lengthof(RegClassForVT)); TargetRegisterClass *RC = RegClassForVT[VT]; assert(RC && "This value type is not natively supported!"); return RC; @@ -214,7 +134,8 @@ public: /// specified value type. This means that it has a register that directly /// holds it without promotions or expansions. bool isTypeLegal(MVT::ValueType VT) const { - return RegClassForVT[VT] != 0; + assert(MVT::isExtendedVT(VT) || VT < array_lengthof(RegClassForVT)); + return !MVT::isExtendedVT(VT) && RegClassForVT[VT] != 0; } class ValueTypeActionImpl { @@ -232,11 +153,18 @@ public: } LegalizeAction getTypeAction(MVT::ValueType VT) const { + if (MVT::isExtendedVT(VT)) { + if (MVT::isVector(VT)) return Expand; + if (MVT::isInteger(VT)) + // First promote to a power-of-two size, then expand if necessary. + return VT == MVT::RoundIntegerType(VT) ? Expand : Promote; + assert(0 && "Unsupported extended type!"); + } + assert(VT<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); return (LegalizeAction)((ValueTypeActions[VT>>4] >> ((2*VT) & 31)) & 3); } void setTypeAction(MVT::ValueType VT, LegalizeAction Action) { - assert(unsigned(VT >> 4) < - sizeof(ValueTypeActions)/sizeof(ValueTypeActions[0])); + assert(VT<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0])); ValueTypeActions[VT>>4] |= Action << ((VT*2) & 31); } }; @@ -260,20 +188,41 @@ public: /// to get to the smaller register. For illegal floating point types, this /// returns the integer type to transform to. MVT::ValueType getTypeToTransformTo(MVT::ValueType VT) const { - return TransformToType[VT]; + if (!MVT::isExtendedVT(VT)) { + assert(VT < array_lengthof(TransformToType)); + MVT::ValueType NVT = TransformToType[VT]; + assert(getTypeAction(NVT) != Promote && + "Promote may not follow Expand or Promote"); + return NVT; + } + + if (MVT::isVector(VT)) + return MVT::getVectorType(MVT::getVectorElementType(VT), + MVT::getVectorNumElements(VT) / 2); + if (MVT::isInteger(VT)) { + MVT::ValueType NVT = MVT::RoundIntegerType(VT); + if (NVT == VT) + // Size is a power of two - expand to half the size. + return MVT::getIntegerType(MVT::getSizeInBits(VT) / 2); + else + // Promote to a power of two size, avoiding multi-step promotion. + return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT; + } + assert(0 && "Unsupported extended type!"); } - + /// getTypeToExpandTo - For types supported by the target, this is an /// identity function. For types that must be expanded (i.e. integer types /// that are larger than the largest integer register or illegal floating /// point types), this returns the largest legal type it will be expanded to. MVT::ValueType getTypeToExpandTo(MVT::ValueType VT) const { + assert(!MVT::isVector(VT)); while (true) { switch (getTypeAction(VT)) { case Legal: return VT; case Expand: - VT = TransformToType[VT]; + VT = getTypeToTransformTo(VT); break; default: assert(false && "Type is not legal nor is it to be expanded!"); @@ -283,20 +232,21 @@ public: return VT; } - /// getPackedTypeBreakdown - Packed types are broken down into some number of - /// legal first class types. For example, <8 x float> maps to 2 MVT::v4f32 + /// getVectorTypeBreakdown - Vector types are broken down into some number of + /// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32 /// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack. - /// Similarly, <2 x long> turns into 4 MVT::i32 values with both PPC and X86. + /// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86. /// /// This method returns the number of registers needed, and the VT for each - /// register. It also returns the VT of the PackedType elements before they - /// are promoted/expanded. + /// register. It also returns the VT and quantity of the intermediate values + /// before they are promoted/expanded. /// - unsigned getPackedTypeBreakdown(const PackedType *PTy, - MVT::ValueType &PTyElementVT, - MVT::ValueType &PTyLegalElementVT) const; + unsigned getVectorTypeBreakdown(MVT::ValueType VT, + MVT::ValueType &IntermediateVT, + unsigned &NumIntermediates, + MVT::ValueType &RegisterVT) const; - typedef std::vector::const_iterator legal_fpimm_iterator; + typedef std::vector::const_iterator legal_fpimm_iterator; legal_fpimm_iterator legal_fpimm_begin() const { return LegalFPImmediates.begin(); } @@ -327,6 +277,9 @@ public: /// expanded to some other code sequence, or the target has a custom expander /// for it. LegalizeAction getOperationAction(unsigned Op, MVT::ValueType VT) const { + if (MVT::isExtendedVT(VT)) return Expand; + assert(Op < array_lengthof(OpActions) && + VT < sizeof(OpActions[0])*4 && "Table isn't big enough!"); return (LegalizeAction)((OpActions[Op] >> (2*VT)) & 3); } @@ -342,28 +295,36 @@ public: /// expanded to some other code sequence, or the target has a custom expander /// for it. LegalizeAction getLoadXAction(unsigned LType, MVT::ValueType VT) const { + assert(LType < array_lengthof(LoadXActions) && + VT < sizeof(LoadXActions[0])*4 && "Table isn't big enough!"); return (LegalizeAction)((LoadXActions[LType] >> (2*VT)) & 3); } /// isLoadXLegal - Return true if the specified load with extension is legal /// on this target. bool isLoadXLegal(unsigned LType, MVT::ValueType VT) const { - return getLoadXAction(LType, VT) == Legal || - getLoadXAction(LType, VT) == Custom; + return !MVT::isExtendedVT(VT) && + (getLoadXAction(LType, VT) == Legal || + getLoadXAction(LType, VT) == Custom); } - /// getStoreXAction - Return how this store with truncation should be treated: - /// either it is legal, needs to be promoted to a larger size, needs to be - /// expanded to some other code sequence, or the target has a custom expander - /// for it. - LegalizeAction getStoreXAction(MVT::ValueType VT) const { - return (LegalizeAction)((StoreXActions >> (2*VT)) & 3); + /// getTruncStoreAction - Return how this store with truncation should be + /// treated: either it is legal, needs to be promoted to a larger size, needs + /// to be expanded to some other code sequence, or the target has a custom + /// expander for it. + LegalizeAction getTruncStoreAction(MVT::ValueType ValVT, + MVT::ValueType MemVT) const { + assert(ValVT < array_lengthof(TruncStoreActions) && + MemVT < sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!"); + return (LegalizeAction)((TruncStoreActions[ValVT] >> (2*MemVT)) & 3); } - /// isStoreXLegal - Return true if the specified store with truncation is + /// isTruncStoreLegal - Return true if the specified store with truncation is /// legal on this target. - bool isStoreXLegal(MVT::ValueType VT) const { - return getStoreXAction(VT) == Legal || getStoreXAction(VT) == Custom; + bool isTruncStoreLegal(MVT::ValueType ValVT, MVT::ValueType MemVT) const { + return !MVT::isExtendedVT(MemVT) && + (getTruncStoreAction(ValVT, MemVT) == Legal || + getTruncStoreAction(ValVT, MemVT) == Custom); } /// getIndexedLoadAction - Return how the indexed load should be treated: @@ -372,6 +333,9 @@ public: /// for it. LegalizeAction getIndexedLoadAction(unsigned IdxMode, MVT::ValueType VT) const { + assert(IdxMode < array_lengthof(IndexedModeActions[0]) && + VT < sizeof(IndexedModeActions[0][0])*4 && + "Table isn't big enough!"); return (LegalizeAction)((IndexedModeActions[0][IdxMode] >> (2*VT)) & 3); } @@ -388,6 +352,9 @@ public: /// for it. LegalizeAction getIndexedStoreAction(unsigned IdxMode, MVT::ValueType VT) const { + assert(IdxMode < array_lengthof(IndexedModeActions[1]) && + VT < sizeof(IndexedModeActions[1][0])*4 && + "Table isn't big enough!"); return (LegalizeAction)((IndexedModeActions[1][IdxMode] >> (2*VT)) & 3); } @@ -398,6 +365,24 @@ public: getIndexedStoreAction(IdxMode, VT) == Custom; } + /// getConvertAction - Return how the conversion should be treated: + /// either it is legal, needs to be promoted to a larger size, needs to be + /// expanded to some other code sequence, or the target has a custom expander + /// for it. + LegalizeAction + getConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT) const { + assert(FromVT < array_lengthof(ConvertActions) && + ToVT < sizeof(ConvertActions[0])*4 && "Table isn't big enough!"); + return (LegalizeAction)((ConvertActions[FromVT] >> (2*ToVT)) & 3); + } + + /// isConvertLegal - Return true if the specified conversion is legal + /// on this target. + bool isConvertLegal(MVT::ValueType FromVT, MVT::ValueType ToVT) const { + return getConvertAction(FromVT, ToVT) == Legal || + getConvertAction(FromVT, ToVT) == Custom; + } + /// getTypeToPromoteTo - If the action for this operation is to promote, this /// method returns the ValueType to promote to. MVT::ValueType getTypeToPromoteTo(unsigned Op, MVT::ValueType VT) const { @@ -424,40 +409,65 @@ public: } /// getValueType - Return the MVT::ValueType corresponding to this LLVM type. - /// This is fixed by the LLVM operations except for the pointer size. - MVT::ValueType getValueType(const Type *Ty) const { - switch (Ty->getTypeID()) { - default: assert(0 && "Unknown type!"); - case Type::VoidTyID: return MVT::isVoid; - case Type::IntegerTyID: - switch (cast(Ty)->getBitWidth()) { - default: assert(0 && "Invalid width for value type"); - case 1: return MVT::i1; - case 8: return MVT::i8; - case 16: return MVT::i16; - case 32: return MVT::i32; - case 64: return MVT::i64; - } - break; - case Type::FloatTyID: return MVT::f32; - case Type::DoubleTyID: return MVT::f64; - case Type::PointerTyID: return PointerTy; - case Type::PackedTyID: return MVT::Vector; + /// This is fixed by the LLVM operations except for the pointer size. If + /// AllowUnknown is true, this will return MVT::Other for types with no MVT + /// counterpart (e.g. structs), otherwise it will assert. + MVT::ValueType getValueType(const Type *Ty, bool AllowUnknown = false) const { + MVT::ValueType VT = MVT::getValueType(Ty, AllowUnknown); + return VT == MVT::iPTR ? PointerTy : VT; + } + + /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate + /// function arguments in the caller parameter area. + virtual unsigned getByValTypeAlignment(const Type *Ty) const; + + /// getRegisterType - Return the type of registers that this ValueType will + /// eventually require. + MVT::ValueType getRegisterType(MVT::ValueType VT) const { + if (!MVT::isExtendedVT(VT)) { + assert(VT < array_lengthof(RegisterTypeForVT)); + return RegisterTypeForVT[VT]; + } + if (MVT::isVector(VT)) { + MVT::ValueType VT1, RegisterVT; + unsigned NumIntermediates; + (void)getVectorTypeBreakdown(VT, VT1, NumIntermediates, RegisterVT); + return RegisterVT; } - return MVT::isVoid; // Silence a compiler warning. + if (MVT::isInteger(VT)) { + return getRegisterType(getTypeToTransformTo(VT)); + } + assert(0 && "Unsupported extended type!"); } - /// getNumElements - Return the number of registers that this ValueType will + /// getNumRegisters - Return the number of registers that this ValueType will /// eventually require. This is one for any types promoted to live in larger /// registers, but may be more than one for types (like i64) that are split - /// into pieces. - unsigned getNumElements(MVT::ValueType VT) const { - return NumElementsForVT[VT]; + /// into pieces. For types like i140, which are first promoted then expanded, + /// it is the number of registers needed to hold all the bits of the original + /// type. For an i140 on a 32 bit machine this means 5 registers. + unsigned getNumRegisters(MVT::ValueType VT) const { + if (!MVT::isExtendedVT(VT)) { + assert(VT < array_lengthof(NumRegistersForVT)); + return NumRegistersForVT[VT]; + } + if (MVT::isVector(VT)) { + MVT::ValueType VT1, VT2; + unsigned NumIntermediates; + return getVectorTypeBreakdown(VT, VT1, NumIntermediates, VT2); + } + if (MVT::isInteger(VT)) { + unsigned BitWidth = MVT::getSizeInBits(VT); + unsigned RegWidth = MVT::getSizeInBits(getRegisterType(VT)); + return (BitWidth + RegWidth - 1) / RegWidth; + } + assert(0 && "Unsupported extended type!"); } - + /// hasTargetDAGCombine - If true, the target has custom DAG combine /// transformations that it can perform for the specified node. bool hasTargetDAGCombine(ISD::NodeType NT) const { + assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7)); } @@ -508,6 +518,20 @@ public: return StackPointerRegisterToSaveRestore; } + /// getExceptionAddressRegister - If a physical register, this returns + /// the register that receives the exception address on entry to a landing + /// pad. + unsigned getExceptionAddressRegister() const { + return ExceptionPointerRegister; + } + + /// getExceptionSelectorRegister - If a physical register, this returns + /// the register that receives the exception typeid on entry to a landing + /// pad. + unsigned getExceptionSelectorRegister() const { + return ExceptionSelectorRegister; + } + /// getJumpBufSize - returns the target's jmp_buf size in bytes (if never /// set, the default is 200) unsigned getJumpBufSize() const { @@ -520,6 +544,19 @@ public: return JumpBufAlignment; } + /// getIfCvtBlockLimit - returns the target specific if-conversion block size + /// limit. Any block whose size is greater should not be predicated. + virtual unsigned getIfCvtBlockSizeLimit() const { + return IfCvtBlockSizeLimit; + } + + /// getIfCvtDupBlockLimit - returns the target specific size limit for a + /// block to be considered for duplication. Any block whose size is greater + /// should not be duplicated to facilitate its predication. + virtual unsigned getIfCvtDupBlockSizeLimit() const { + return IfCvtDupBlockSizeLimit; + } + /// getPreIndexedAddressParts - returns true by value, base pointer and /// offset pointer and addressing mode by reference if the node's address /// can be legally represented as pre-indexed load / store address. @@ -540,6 +577,11 @@ public: return false; } + /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC + /// jumptable. + virtual SDOperand getPICJumpTableRelocBase(SDOperand Table, + SelectionDAG &DAG) const; + //===--------------------------------------------------------------------===// // TargetLowering Optimization Methods // @@ -549,10 +591,12 @@ public: /// that want to combine struct TargetLoweringOpt { SelectionDAG &DAG; + bool AfterLegalize; SDOperand Old; SDOperand New; - TargetLoweringOpt(SelectionDAG &InDAG) : DAG(InDAG) {} + explicit TargetLoweringOpt(SelectionDAG &InDAG, bool afterLegalize) + : DAG(InDAG), AfterLegalize(afterLegalize) {} bool CombineTo(SDOperand O, SDOperand N) { Old = O; @@ -561,26 +605,12 @@ public: } /// ShrinkDemandedConstant - Check to see if the specified operand of the - /// specified instruction is a constant integer. If so, check to see if there - /// are any bits set in the constant that are not demanded. If so, shrink the - /// constant and return true. + /// specified instruction is a constant integer. If so, check to see if + /// there are any bits set in the constant that are not demanded. If so, + /// shrink the constant and return true. bool ShrinkDemandedConstant(SDOperand Op, uint64_t Demanded); }; - /// MaskedValueIsZero - Return true if 'Op & Mask' is known to be zero. We - /// use this predicate to simplify operations downstream. Op and Mask are - /// known to be the same type. - bool MaskedValueIsZero(SDOperand Op, uint64_t Mask, unsigned Depth = 0) - const; - - /// ComputeMaskedBits - Determine which of the bits specified in Mask are - /// known to be either zero or one and return them in the KnownZero/KnownOne - /// bitsets. This code only analyzes bits in Mask, in order to short-circuit - /// processing. Targets can implement the computeMaskedBitsForTargetNode - /// method, to allow target nodes to be understood. - void ComputeMaskedBits(SDOperand Op, uint64_t Mask, uint64_t &KnownZero, - uint64_t &KnownOne, unsigned Depth = 0) const; - /// SimplifyDemandedBits - Look at Op. At this point, we know that only the /// DemandedMask bits of the result of Op are ever used downstream. If we can /// use this information to simplify Op, create a new simplified DAG node and @@ -597,18 +627,12 @@ public: /// Mask are known to be either zero or one and return them in the /// KnownZero/KnownOne bitsets. virtual void computeMaskedBitsForTargetNode(const SDOperand Op, - uint64_t Mask, - uint64_t &KnownZero, - uint64_t &KnownOne, + const APInt &Mask, + APInt &KnownZero, + APInt &KnownOne, + const SelectionDAG &DAG, unsigned Depth = 0) const; - /// ComputeNumSignBits - Return the number of times the sign bit of the - /// register is replicated into the other bits. We know that at least 1 bit - /// is always equal to the sign bit (itself), but other cases can give us - /// information. For example, immediately after an "SRA X, 2", we know that - /// the top 3 bits are all equal to each other, so we return 3. - unsigned ComputeNumSignBits(SDOperand Op, unsigned Depth = 0) const; - /// ComputeNumSignBitsForTargetNode - This method can be implemented by /// targets that want to expose additional information about sign bits to the /// DAG Combiner. @@ -618,13 +642,15 @@ public: struct DAGCombinerInfo { void *DC; // The DAG Combiner object. bool BeforeLegalize; + bool CalledByLegalizer; public: SelectionDAG &DAG; - DAGCombinerInfo(SelectionDAG &dag, bool bl, void *dc) - : DC(dc), BeforeLegalize(bl), DAG(dag) {} + DAGCombinerInfo(SelectionDAG &dag, bool bl, bool cl, void *dc) + : DC(dc), BeforeLegalize(bl), CalledByLegalizer(cl), DAG(dag) {} bool isBeforeLegalize() const { return BeforeLegalize; } + bool isCalledByLegalizer() const { return CalledByLegalizer; } void AddToWorklist(SDNode *N); SDOperand CombineTo(SDNode *N, const std::vector &To); @@ -632,6 +658,12 @@ public: SDOperand CombineTo(SDNode *N, SDOperand Res0, SDOperand Res1); }; + /// SimplifySetCC - Try to simplify a setcc built with the specified operands + /// and cc. If it is unable to simplify it, return a null SDOperand. + SDOperand SimplifySetCC(MVT::ValueType VT, SDOperand N0, SDOperand N1, + ISD::CondCode Cond, bool foldBooleans, + DAGCombinerInfo &DCI) const; + /// PerformDAGCombine - This method will be invoked for all target nodes and /// for any target-independent nodes that the target has registered with /// invoke it for. @@ -701,6 +733,20 @@ protected: StackPointerRegisterToSaveRestore = R; } + /// setExceptionPointerRegister - If set to a physical register, this sets + /// the register that receives the exception address on entry to a landing + /// pad. + void setExceptionPointerRegister(unsigned R) { + ExceptionPointerRegister = R; + } + + /// setExceptionSelectorRegister - If set to a physical register, this sets + /// the register that receives the exception typeid on entry to a landing + /// pad. + void setExceptionSelectorRegister(unsigned R) { + ExceptionSelectorRegister = R; + } + /// SelectIsExpensive - Tells the code generator not to expand operations /// into sequences that use the select operations if possible. void setSelectIsExpensive() { SelectIsExpensive = true; } @@ -719,6 +765,7 @@ protected: /// regclass for the specified value type. This indicates the selector can /// handle values of that class natively. void addRegisterClass(MVT::ValueType VT, TargetRegisterClass *RC) { + assert(VT < array_lengthof(RegClassForVT)); AvailableRegClasses.push_back(std::make_pair(VT, RC)); RegClassForVT[VT] = RC; } @@ -731,7 +778,7 @@ protected: /// with the specified type and indicate what to do about it. void setOperationAction(unsigned Op, MVT::ValueType VT, LegalizeAction Action) { - assert(VT < 32 && Op < sizeof(OpActions)/sizeof(OpActions[0]) && + assert(VT < sizeof(OpActions[0])*4 && Op < array_lengthof(OpActions) && "Table isn't big enough!"); OpActions[Op] &= ~(uint64_t(3UL) << VT*2); OpActions[Op] |= (uint64_t)Action << VT*2; @@ -741,18 +788,21 @@ protected: /// work with the with specified type and indicate what to do about it. void setLoadXAction(unsigned ExtType, MVT::ValueType VT, LegalizeAction Action) { - assert(VT < 32 && ExtType < sizeof(LoadXActions)/sizeof(LoadXActions[0]) && + assert(VT < sizeof(LoadXActions[0])*4 && + ExtType < array_lengthof(LoadXActions) && "Table isn't big enough!"); LoadXActions[ExtType] &= ~(uint64_t(3UL) << VT*2); LoadXActions[ExtType] |= (uint64_t)Action << VT*2; } - /// setStoreXAction - Indicate that the specified store with truncation does + /// setTruncStoreAction - Indicate that the specified truncating store does /// not work with the with specified type and indicate what to do about it. - void setStoreXAction(MVT::ValueType VT, LegalizeAction Action) { - assert(VT < 32 && "Table isn't big enough!"); - StoreXActions &= ~(uint64_t(3UL) << VT*2); - StoreXActions |= (uint64_t)Action << VT*2; + void setTruncStoreAction(MVT::ValueType ValVT, MVT::ValueType MemVT, + LegalizeAction Action) { + assert(ValVT < array_lengthof(TruncStoreActions) && + MemVT < sizeof(TruncStoreActions[0])*4 && "Table isn't big enough!"); + TruncStoreActions[ValVT] &= ~(uint64_t(3UL) << MemVT*2); + TruncStoreActions[ValVT] |= (uint64_t)Action << MemVT*2; } /// setIndexedLoadAction - Indicate that the specified indexed load does or @@ -761,8 +811,8 @@ protected: /// TargetLowering.cpp void setIndexedLoadAction(unsigned IdxMode, MVT::ValueType VT, LegalizeAction Action) { - assert(VT < 32 && IdxMode < - sizeof(IndexedModeActions[0]) / sizeof(IndexedModeActions[0][0]) && + assert(VT < sizeof(IndexedModeActions[0])*4 && IdxMode < + array_lengthof(IndexedModeActions[0]) && "Table isn't big enough!"); IndexedModeActions[0][IdxMode] &= ~(uint64_t(3UL) << VT*2); IndexedModeActions[0][IdxMode] |= (uint64_t)Action << VT*2; @@ -774,13 +824,23 @@ protected: /// TargetLowering.cpp void setIndexedStoreAction(unsigned IdxMode, MVT::ValueType VT, LegalizeAction Action) { - assert(VT < 32 && IdxMode < - sizeof(IndexedModeActions[1]) / sizeof(IndexedModeActions[1][0]) && + assert(VT < sizeof(IndexedModeActions[1][0])*4 && + IdxMode < array_lengthof(IndexedModeActions[1]) && "Table isn't big enough!"); IndexedModeActions[1][IdxMode] &= ~(uint64_t(3UL) << VT*2); IndexedModeActions[1][IdxMode] |= (uint64_t)Action << VT*2; } + /// setConvertAction - Indicate that the specified conversion does or does + /// not work with the with specified type and indicate what to do about it. + void setConvertAction(MVT::ValueType FromVT, MVT::ValueType ToVT, + LegalizeAction Action) { + assert(FromVT < array_lengthof(ConvertActions) && + ToVT < sizeof(ConvertActions[0])*4 && "Table isn't big enough!"); + ConvertActions[FromVT] &= ~(uint64_t(3UL) << ToVT*2); + ConvertActions[FromVT] |= (uint64_t)Action << ToVT*2; + } + /// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the /// promotion code defaults to trying a larger integer/fp until it can find /// one that works. If that default is insufficient, this method can be used @@ -792,7 +852,7 @@ protected: /// addLegalFPImmediate - Indicate that this target can instruction select /// the specified FP immediate natively. - void addLegalFPImmediate(double Imm) { + void addLegalFPImmediate(const APFloat& Imm) { LegalFPImmediates.push_back(Imm); } @@ -800,6 +860,7 @@ protected: /// independent node that they want to provide a custom DAG combiner for by /// implementing the PerformDAGCombine virtual method. void setTargetDAGCombine(ISD::NodeType NT) { + assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray)); TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7); } @@ -814,9 +875,26 @@ protected: void setJumpBufAlignment(unsigned Align) { JumpBufAlignment = Align; } + + /// setIfCvtBlockSizeLimit - Set the target's if-conversion block size + /// limit (in number of instructions); default is 2. + void setIfCvtBlockSizeLimit(unsigned Limit) { + IfCvtBlockSizeLimit = Limit; + } + + /// setIfCvtDupBlockSizeLimit - Set the target's block size limit (in number + /// of instructions) to be considered for code duplication during + /// if-conversion; default is 2. + void setIfCvtDupBlockSizeLimit(unsigned Limit) { + IfCvtDupBlockSizeLimit = Limit; + } public: + virtual const TargetSubtarget *getSubtarget() { + assert(0 && "Not Implemented"); + return NULL; // this is here to silence compiler errors + } //===--------------------------------------------------------------------===// // Lowering methods - These methods must be implemented by targets so that // the SelectionDAGLowering code knows how to lower these. @@ -834,21 +912,34 @@ public: struct ArgListEntry { SDOperand Node; const Type* Ty; - bool isSigned; + bool isSExt; + bool isZExt; + bool isInReg; + bool isSRet; + bool isNest; + bool isByVal; + + ArgListEntry() : isSExt(false), isZExt(false), isInReg(false), + isSRet(false), isNest(false), isByVal(false) { } }; typedef std::vector ArgListTy; virtual std::pair - LowerCallTo(SDOperand Chain, const Type *RetTy, bool RetTyIsSigned, - bool isVarArg, unsigned CallingConv, bool isTailCall, + LowerCallTo(SDOperand Chain, const Type *RetTy, bool RetSExt, bool RetZExt, + bool isVarArg, unsigned CallingConv, bool isTailCall, SDOperand Callee, ArgListTy &Args, SelectionDAG &DAG); - /// LowerFrameReturnAddress - This hook lowers a call to llvm.returnaddress or - /// llvm.frameaddress (depending on the value of the first argument). The - /// return values are the result pointer and the resultant token chain. If - /// not implemented, both of these intrinsics will return null. - virtual std::pair - LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain, unsigned Depth, - SelectionDAG &DAG); + + virtual SDOperand LowerMEMCPY(SDOperand Op, SelectionDAG &DAG); + virtual SDOperand LowerMEMCPYCall(SDOperand Chain, SDOperand Dest, + SDOperand Source, SDOperand Count, + SelectionDAG &DAG); + virtual SDOperand LowerMEMCPYInline(SDOperand Chain, SDOperand Dest, + SDOperand Source, unsigned Size, + unsigned Align, SelectionDAG &DAG) { + assert(0 && "Not Implemented"); + return SDOperand(); // this is here to silence compiler errors + } + /// LowerOperation - This callback is invoked for operations that are /// unsupported by the target, which are registered to use 'custom' lowering, @@ -857,6 +948,28 @@ public: /// implement this. The default implementation of this aborts. virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG); + /// ExpandOperationResult - This callback is invoked for operations that are + /// unsupported by the target, which are registered to use 'custom' lowering, + /// and whose result type needs to be expanded. This must return a node whose + /// results precisely match the results of the input node. This typically + /// involves a MERGE_VALUES node and/or BUILD_PAIR. + /// + /// If the target has no operations that require custom lowering, it need not + /// implement this. The default implementation of this aborts. + virtual SDNode *ExpandOperationResult(SDNode *N, SelectionDAG &DAG) { + assert(0 && "ExpandOperationResult not implemented for this target!"); + return 0; + } + + /// IsEligibleForTailCallOptimization - Check whether the call is eligible for + /// tail call optimization. Targets which want to do tail call optimization + /// should override this function. + virtual bool IsEligibleForTailCallOptimization(SDOperand Call, + SDOperand Ret, + SelectionDAG &DAG) const { + return false; + } + /// CustomPromoteOperation - This callback is invoked for operations that are /// unsupported by the target, are registered to use 'custom' lowering, and /// whose type needs to be promoted. @@ -878,9 +991,9 @@ public: C_Unknown // Unsupported constraint. }; - /// getConstraintType - Given a constraint letter, return the type of - /// constraint it is for this target. - virtual ConstraintType getConstraintType(char ConstraintLetter) const; + /// getConstraintType - Given a constraint, return the type of constraint it + /// is for this target. + virtual ConstraintType getConstraintType(const std::string &Constraint) const; /// getRegClassForInlineAsmConstraint - Given a constraint letter (e.g. "r"), @@ -904,50 +1017,73 @@ public: getRegForInlineAsmConstraint(const std::string &Constraint, MVT::ValueType VT) const; + /// LowerXConstraint - try to replace an X constraint, which matches anything, + /// with another that has more specific requirements based on the type of the + /// corresponding operand. + virtual void lowerXConstraint(MVT::ValueType ConstraintVT, + std::string&) const; - /// isOperandValidForConstraint - Return the specified operand (possibly - /// modified) if the specified SDOperand is valid for the specified target - /// constraint letter, otherwise return null. - virtual SDOperand - isOperandValidForConstraint(SDOperand Op, char ConstraintLetter, - SelectionDAG &DAG); + /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops + /// vector. If it is invalid, don't add anything to Ops. + virtual void LowerAsmOperandForConstraint(SDOperand Op, char ConstraintLetter, + std::vector &Ops, + SelectionDAG &DAG); //===--------------------------------------------------------------------===// // Scheduler hooks // - // InsertAtEndOfBasicBlock - This method should be implemented by targets that - // mark instructions with the 'usesCustomDAGSchedInserter' flag. These + // EmitInstrWithCustomInserter - This method should be implemented by targets + // that mark instructions with the 'usesCustomDAGSchedInserter' flag. These // instructions are special in various ways, which require special support to // insert. The specified MachineInstr is created but not inserted into any // basic blocks, and the scheduler passes ownership of it to this method. - virtual MachineBasicBlock *InsertAtEndOfBasicBlock(MachineInstr *MI, - MachineBasicBlock *MBB); + virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI, + MachineBasicBlock *MBB); //===--------------------------------------------------------------------===// - // Loop Strength Reduction hooks + // Addressing mode description hooks (used by LSR etc). // - - /// isLegalAddressImmediate - Return true if the integer value or GlobalValue - /// can be used as the offset of the target addressing mode. - virtual bool isLegalAddressImmediate(int64_t V) const; - virtual bool isLegalAddressImmediate(GlobalValue *GV) const; - typedef std::vector::const_iterator legal_am_scale_iterator; - legal_am_scale_iterator legal_am_scale_begin() const { - return LegalAddressScales.begin(); - } - legal_am_scale_iterator legal_am_scale_end() const { - return LegalAddressScales.end(); + /// AddrMode - This represents an addressing mode of: + /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + /// If BaseGV is null, there is no BaseGV. + /// If BaseOffs is zero, there is no base offset. + /// If HasBaseReg is false, there is no base register. + /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with + /// no scale. + /// + struct AddrMode { + GlobalValue *BaseGV; + int64_t BaseOffs; + bool HasBaseReg; + int64_t Scale; + AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {} + }; + + /// isLegalAddressingMode - Return true if the addressing mode represented by + /// AM is legal for this target, for a load/store of the specified type. + /// TODO: Handle pre/postinc as well. + virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const; + + /// isTruncateFree - Return true if it's free to truncate a value of + /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in + /// register EAX to i16 by referencing its sub-register AX. + virtual bool isTruncateFree(const Type *Ty1, const Type *Ty2) const { + return false; } + virtual bool isTruncateFree(MVT::ValueType VT1, MVT::ValueType VT2) const { + return false; + } + //===--------------------------------------------------------------------===// // Div utility functions // SDOperand BuildSDIV(SDNode *N, SelectionDAG &DAG, - std::vector* Created) const; + std::vector* Created) const; SDOperand BuildUDIV(SDNode *N, SelectionDAG &DAG, - std::vector* Created) const; + std::vector* Created) const; //===--------------------------------------------------------------------===// @@ -956,27 +1092,29 @@ public: /// setLibcallName - Rename the default libcall routine name for the specified /// libcall. - void setLibcallName(RTLIB::Libcall Call, std::string Name) { + void setLibcallName(RTLIB::Libcall Call, const char *Name) { LibcallRoutineNames[Call] = Name; } /// getLibcallName - Get the libcall routine name for the specified libcall. /// const char *getLibcallName(RTLIB::Libcall Call) const { - return LibcallRoutineNames[Call].c_str(); + return LibcallRoutineNames[Call]; } -protected: - /// addLegalAddressScale - Add a integer (> 1) value which can be used as - /// scale in the target addressing mode. Note: the ordering matters so the - /// least efficient ones should be entered first. - void addLegalAddressScale(unsigned Scale) { - LegalAddressScales.push_back(Scale); + /// setCmpLibcallCC - Override the default CondCode to be used to test the + /// result of the comparison libcall against zero. + void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) { + CmpLibcallCCs[Call] = CC; + } + + /// getCmpLibcallCC - Get the CondCode that's to be used to test the result of + /// the comparison libcall against zero. + ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const { + return CmpLibcallCCs[Call]; } private: - std::vector LegalAddressScales; - TargetMachine &TM; const TargetData *TD; @@ -1039,16 +1177,35 @@ private: /// JumpBufAlignment - The alignment, in bytes, of the target's jmp_buf /// buffers unsigned JumpBufAlignment; + + /// IfCvtBlockSizeLimit - The maximum allowed size for a block to be + /// if-converted. + unsigned IfCvtBlockSizeLimit; + /// IfCvtDupBlockSizeLimit - The maximum allowed size for a block to be + /// duplicated during if-conversion. + unsigned IfCvtDupBlockSizeLimit; + /// StackPointerRegisterToSaveRestore - If set to a physical register, this /// specifies the register that llvm.savestack/llvm.restorestack should save /// and restore. unsigned StackPointerRegisterToSaveRestore; + /// ExceptionPointerRegister - If set to a physical register, this specifies + /// the register that receives the exception address on entry to a landing + /// pad. + unsigned ExceptionPointerRegister; + + /// ExceptionSelectorRegister - If set to a physical register, this specifies + /// the register that receives the exception typeid on entry to a landing + /// pad. + unsigned ExceptionSelectorRegister; + /// RegClassForVT - This indicates the default register class to use for /// each ValueType the target supports natively. TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE]; - unsigned char NumElementsForVT[MVT::LAST_VALUETYPE]; + unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE]; + MVT::ValueType RegisterTypeForVT[MVT::LAST_VALUETYPE]; /// TransformToType - For any value types we are promoting or expanding, this /// contains the value type that we are changing to. For Expanded types, this @@ -1069,19 +1226,25 @@ private: /// with the load. uint64_t LoadXActions[ISD::LAST_LOADX_TYPE]; - /// StoreXActions - For each store with truncation of each value type, keep a - /// LegalizeAction that indicates how instruction selection should deal with - /// the store. - uint64_t StoreXActions; + /// TruncStoreActions - For each truncating store, keep a LegalizeAction that + /// indicates how instruction selection should deal with the store. + uint64_t TruncStoreActions[MVT::LAST_VALUETYPE]; /// IndexedModeActions - For each indexed mode and each value type, keep a /// pair of LegalizeAction that indicates how instruction selection should /// deal with the load / store. uint64_t IndexedModeActions[2][ISD::LAST_INDEXED_MODE]; + /// ConvertActions - For each conversion from source type to destination type, + /// keep a LegalizeAction that indicates how instruction selection should + /// deal with the conversion. + /// Currently, this is used only for floating->floating conversions + /// (FP_EXTEND and FP_ROUND). + uint64_t ConvertActions[MVT::LAST_VALUETYPE]; + ValueTypeActionImpl ValueTypeActions; - std::vector LegalFPImmediates; + std::vector LegalFPImmediates; std::vector > AvailableRegClasses; @@ -1089,7 +1252,7 @@ private: /// TargetDAGCombineArray - Targets can specify ISD nodes that they would /// like PerformDAGCombine callbacks for by calling setTargetDAGCombine(), /// which sets a bit in this array. - unsigned char TargetDAGCombineArray[156/(sizeof(unsigned char)*8)]; + unsigned char TargetDAGCombineArray[160/(sizeof(unsigned char)*8)]; /// PromoteToType - For operations that must be promoted to a specific type, /// this holds the destination type. This map should be sparse, so don't hold @@ -1101,7 +1264,11 @@ private: /// LibcallRoutineNames - Stores the name each libcall. /// - std::string LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL]; + const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL]; + + /// CmpLibcallCCs - The ISD::CondCode that should be used to test the result + /// of each of the comparison libcall against zero. + ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL]; protected: /// When lowering %llvm.memset this field specifies the maximum number of