X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FTargetLoweringBase.cpp;h=e119eae511441d285c80fe5a1ee67ea4d78fe45b;hb=de0129ac0821e693b08df7269f956f5418b2b5f7;hp=a8c7a28a4e831d4ba5cce75ff0e6b312e9649114;hpb=9f85dccfc64b5f0b0c63ddfa0a42d8615aa1fcb3;p=oota-llvm.git diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp index a8c7a28a4e8..e119eae5114 100644 --- a/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -38,6 +38,11 @@ #include using namespace llvm; +static cl::opt JumpIsExpensiveOverride( + "jump-is-expensive", cl::init(false), + cl::desc("Do not create extra branches to split comparison logic."), + cl::Hidden); + /// InitLibcallNames - Set default libcall names. /// static void InitLibcallNames(const char **Names, const Triple &TT) { @@ -206,6 +211,16 @@ static void InitLibcallNames(const char **Names, const Triple &TT) { Names[RTLIB::FLOOR_F80] = "floorl"; Names[RTLIB::FLOOR_F128] = "floorl"; Names[RTLIB::FLOOR_PPCF128] = "floorl"; + Names[RTLIB::FMIN_F32] = "fminf"; + Names[RTLIB::FMIN_F64] = "fmin"; + Names[RTLIB::FMIN_F80] = "fminl"; + Names[RTLIB::FMIN_F128] = "fminl"; + Names[RTLIB::FMIN_PPCF128] = "fminl"; + Names[RTLIB::FMAX_F32] = "fmaxf"; + Names[RTLIB::FMAX_F64] = "fmax"; + Names[RTLIB::FMAX_F80] = "fmaxl"; + Names[RTLIB::FMAX_F128] = "fmaxl"; + Names[RTLIB::FMAX_PPCF128] = "fmaxl"; Names[RTLIB::ROUND_F32] = "roundf"; Names[RTLIB::ROUND_F64] = "round"; Names[RTLIB::ROUND_F80] = "roundl"; @@ -404,12 +419,20 @@ static void InitLibcallNames(const char **Names, const Triple &TT) { Names[RTLIB::SINCOS_PPCF128] = nullptr; } - if (TT.getOS() != Triple::OpenBSD) { + if (!TT.isOSOpenBSD()) { Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = "__stack_chk_fail"; } else { // These are generally not available. Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = nullptr; } + + // For f16/f32 conversions, Darwin uses the standard naming scheme, instead + // of the gnueabi-style __gnu_*_ieee. + // FIXME: What about other targets? + if (TT.isOSDarwin()) { + Names[RTLIB::FPEXT_F16_F32] = "__extendhfsf2"; + Names[RTLIB::FPROUND_F32_F16] = "__truncsfhf2"; + } } /// InitLibcallCallingConvs - Set default libcall CallingConvs. @@ -654,6 +677,44 @@ RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) { return UNKNOWN_LIBCALL; } +RTLIB::Libcall RTLIB::getATOMIC(unsigned Opc, MVT VT) { +#define OP_TO_LIBCALL(Name, Enum) \ + case Name: \ + switch (VT.SimpleTy) { \ + default: \ + return UNKNOWN_LIBCALL; \ + case MVT::i8: \ + return Enum##_1; \ + case MVT::i16: \ + return Enum##_2; \ + case MVT::i32: \ + return Enum##_4; \ + case MVT::i64: \ + return Enum##_8; \ + case MVT::i128: \ + return Enum##_16; \ + } + + switch (Opc) { + OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET) + OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP) + OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD) + OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB) + OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND) + OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR) + OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR) + OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND) + OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX) + OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX) + OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN) + OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN) + } + +#undef OP_TO_LIBCALL + + return UNKNOWN_LIBCALL; +} + /// InitCmpLibcallCCs - Set default comparison libcall CC. /// static void InitCmpLibcallCCs(ISD::CondCode *CCs) { @@ -684,14 +745,11 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) { CCs[RTLIB::O_F128] = ISD::SETEQ; } -/// NOTE: The constructor takes ownership of TLOF. -TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm, - const TargetLoweringObjectFile *tlof) - : TM(tm), DL(TM.getSubtargetImpl()->getDataLayout()), TLOF(*tlof) { +/// NOTE: The TargetMachine owns TLOF. +TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { initActions(); // Perform these initializations only once. - IsLittleEndian = DL->isLittleEndian(); MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8; MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize = MaxStoresPerMemmoveOptSize = 4; @@ -701,10 +759,13 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm, HasMultipleConditionRegisters = false; HasExtractBitsInsn = false; IntDivIsCheap = false; - Pow2DivIsCheap = false; - JumpIsExpensive = false; + FsqrtIsCheap = false; + Pow2SDivIsCheap = false; + JumpIsExpensive = JumpIsExpensiveOverride; PredictableSelectIsExpensive = false; MaskAndBranchFoldingIsLegal = false; + EnableExtLdPromotion = false; + HasFloatingPointExceptions = true; StackPointerRegisterToSaveRestore = 0; ExceptionPointerRegister = 0; ExceptionSelectorRegister = 0; @@ -719,18 +780,13 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm, PrefLoopAlignment = 0; MinStackArgumentAlignment = 1; InsertFencesForAtomic = false; - SupportJumpTables = true; MinimumJumpTableEntries = 4; - InitLibcallNames(LibcallRoutineNames, Triple(TM.getTargetTriple())); + InitLibcallNames(LibcallRoutineNames, TM.getTargetTriple()); InitCmpLibcallCCs(CmpLibcallCCs); InitLibcallCallingConvs(LibcallCallingConvs); } -TargetLoweringBase::~TargetLoweringBase() { - delete &TLOF; -} - void TargetLoweringBase::initActions() { // All operations default to being supported. memset(OpActions, 0, sizeof(OpActions)); @@ -742,35 +798,49 @@ void TargetLoweringBase::initActions() { memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray)); // Set default actions for various operations. - for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) { + for (MVT VT : MVT::all_valuetypes()) { // Default all indexed load / store to expand. for (unsigned IM = (unsigned)ISD::PRE_INC; IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) { - setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand); - setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand); + setIndexedLoadAction(IM, VT, Expand); + setIndexedStoreAction(IM, VT, Expand); } // Most backends expect to see the node which just returns the value loaded. - setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, - (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Expand); // These operations default to expand. - setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand); - setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FGETSIGN, VT, Expand); + setOperationAction(ISD::CONCAT_VECTORS, VT, Expand); + setOperationAction(ISD::FMINNUM, VT, Expand); + setOperationAction(ISD::FMAXNUM, VT, Expand); + setOperationAction(ISD::FMINNAN, VT, Expand); + setOperationAction(ISD::FMAXNAN, VT, Expand); + setOperationAction(ISD::FMAD, VT, Expand); + setOperationAction(ISD::SMIN, VT, Expand); + setOperationAction(ISD::SMAX, VT, Expand); + setOperationAction(ISD::UMIN, VT, Expand); + setOperationAction(ISD::UMAX, VT, Expand); + + // Overflow operations default to expand + setOperationAction(ISD::SADDO, VT, Expand); + setOperationAction(ISD::SSUBO, VT, Expand); + setOperationAction(ISD::UADDO, VT, Expand); + setOperationAction(ISD::USUBO, VT, Expand); + setOperationAction(ISD::SMULO, VT, Expand); + setOperationAction(ISD::UMULO, VT, Expand); + setOperationAction(ISD::UABSDIFF, VT, Expand); + setOperationAction(ISD::SABSDIFF, VT, Expand); // These library functions default to expand. - setOperationAction(ISD::FROUND, (MVT::SimpleValueType)VT, Expand); + setOperationAction(ISD::FROUND, VT, Expand); // These operations default to expand for vector types. - if (VT >= MVT::FIRST_VECTOR_VALUETYPE && - VT <= MVT::LAST_VECTOR_VALUETYPE) { - setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand); - setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, - (MVT::SimpleValueType)VT, Expand); - setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, - (MVT::SimpleValueType)VT, Expand); - setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, - (MVT::SimpleValueType)VT, Expand); + if (VT.isVector()) { + setOperationAction(ISD::FCOPYSIGN, VT, Expand); + setOperationAction(ISD::ANY_EXTEND_VECTOR_INREG, VT, Expand); + setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Expand); + setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Expand); } } @@ -787,50 +857,21 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::ConstantFP, MVT::f128, Expand); // These library functions default to expand. - setOperationAction(ISD::FLOG , MVT::f16, Expand); - setOperationAction(ISD::FLOG2, MVT::f16, Expand); - setOperationAction(ISD::FLOG10, MVT::f16, Expand); - setOperationAction(ISD::FEXP , MVT::f16, Expand); - setOperationAction(ISD::FEXP2, MVT::f16, Expand); - setOperationAction(ISD::FFLOOR, MVT::f16, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::f16, Expand); - setOperationAction(ISD::FCEIL, MVT::f16, Expand); - setOperationAction(ISD::FRINT, MVT::f16, Expand); - setOperationAction(ISD::FTRUNC, MVT::f16, Expand); - setOperationAction(ISD::FROUND, MVT::f16, Expand); - setOperationAction(ISD::FLOG , MVT::f32, Expand); - setOperationAction(ISD::FLOG2, MVT::f32, Expand); - setOperationAction(ISD::FLOG10, MVT::f32, Expand); - setOperationAction(ISD::FEXP , MVT::f32, Expand); - setOperationAction(ISD::FEXP2, MVT::f32, Expand); - setOperationAction(ISD::FFLOOR, MVT::f32, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::f32, Expand); - setOperationAction(ISD::FCEIL, MVT::f32, Expand); - setOperationAction(ISD::FRINT, MVT::f32, Expand); - setOperationAction(ISD::FTRUNC, MVT::f32, Expand); - setOperationAction(ISD::FROUND, MVT::f32, Expand); - setOperationAction(ISD::FLOG , MVT::f64, Expand); - setOperationAction(ISD::FLOG2, MVT::f64, Expand); - setOperationAction(ISD::FLOG10, MVT::f64, Expand); - setOperationAction(ISD::FEXP , MVT::f64, Expand); - setOperationAction(ISD::FEXP2, MVT::f64, Expand); - setOperationAction(ISD::FFLOOR, MVT::f64, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand); - setOperationAction(ISD::FCEIL, MVT::f64, Expand); - setOperationAction(ISD::FRINT, MVT::f64, Expand); - setOperationAction(ISD::FTRUNC, MVT::f64, Expand); - setOperationAction(ISD::FROUND, MVT::f64, Expand); - setOperationAction(ISD::FLOG , MVT::f128, Expand); - setOperationAction(ISD::FLOG2, MVT::f128, Expand); - setOperationAction(ISD::FLOG10, MVT::f128, Expand); - setOperationAction(ISD::FEXP , MVT::f128, Expand); - setOperationAction(ISD::FEXP2, MVT::f128, Expand); - setOperationAction(ISD::FFLOOR, MVT::f128, Expand); - setOperationAction(ISD::FNEARBYINT, MVT::f128, Expand); - setOperationAction(ISD::FCEIL, MVT::f128, Expand); - setOperationAction(ISD::FRINT, MVT::f128, Expand); - setOperationAction(ISD::FTRUNC, MVT::f128, Expand); - setOperationAction(ISD::FROUND, MVT::f128, Expand); + for (MVT VT : {MVT::f32, MVT::f64, MVT::f128}) { + setOperationAction(ISD::FLOG , VT, Expand); + setOperationAction(ISD::FLOG2, VT, Expand); + setOperationAction(ISD::FLOG10, VT, Expand); + setOperationAction(ISD::FEXP , VT, Expand); + setOperationAction(ISD::FEXP2, VT, Expand); + setOperationAction(ISD::FFLOOR, VT, Expand); + setOperationAction(ISD::FMINNUM, VT, Expand); + setOperationAction(ISD::FMAXNUM, VT, Expand); + setOperationAction(ISD::FNEARBYINT, VT, Expand); + setOperationAction(ISD::FCEIL, VT, Expand); + setOperationAction(ISD::FRINT, VT, Expand); + setOperationAction(ISD::FTRUNC, VT, Expand); + setOperationAction(ISD::FROUND, VT, Expand); + } // Default ISD::TRAP to expand (which turns it into abort). setOperationAction(ISD::TRAP, MVT::Other, Expand); @@ -841,28 +882,17 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); } -MVT TargetLoweringBase::getPointerTy(uint32_t AS) const { - return MVT::getIntegerVT(getPointerSizeInBits(AS)); -} - -unsigned TargetLoweringBase::getPointerSizeInBits(uint32_t AS) const { - return DL->getPointerSizeInBits(AS); -} - -unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const { - assert(Ty->isPointerTy()); - return getPointerSizeInBits(Ty->getPointerAddressSpace()); -} - -MVT TargetLoweringBase::getScalarShiftAmountTy(EVT LHSTy) const { - return MVT::getIntegerVT(8*DL->getPointerSize(0)); +MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL, + EVT) const { + return MVT::getIntegerVT(8 * DL.getPointerSize(0)); } -EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy) const { +EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, + const DataLayout &DL) const { assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); if (LHSTy.isVector()) return LHSTy; - return getScalarShiftAmountTy(LHSTy); + return getScalarShiftAmountTy(DL, LHSTy); } /// canOpTrap - Returns true if the operation can trap for the value type. @@ -882,6 +912,144 @@ bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const { } } +void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) { + // If the command-line option was specified, ignore this request. + if (!JumpIsExpensiveOverride.getNumOccurrences()) + JumpIsExpensive = isExpensive; +} + +TargetLoweringBase::LegalizeKind +TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { + // If this is a simple type, use the ComputeRegisterProp mechanism. + if (VT.isSimple()) { + MVT SVT = VT.getSimpleVT(); + assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType)); + MVT NVT = TransformToType[SVT.SimpleTy]; + LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT); + + assert((LA == TypeLegal || LA == TypeSoftenFloat || + ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger) && + "Promote may not follow Expand or Promote"); + + if (LA == TypeSplitVector) + return LegalizeKind(LA, + EVT::getVectorVT(Context, SVT.getVectorElementType(), + SVT.getVectorNumElements() / 2)); + if (LA == TypeScalarizeVector) + return LegalizeKind(LA, SVT.getVectorElementType()); + return LegalizeKind(LA, NVT); + } + + // Handle Extended Scalar Types. + if (!VT.isVector()) { + assert(VT.isInteger() && "Float types must be simple"); + unsigned BitSize = VT.getSizeInBits(); + // First promote to a power-of-two size, then expand if necessary. + if (BitSize < 8 || !isPowerOf2_32(BitSize)) { + EVT NVT = VT.getRoundIntegerType(Context); + assert(NVT != VT && "Unable to round integer VT"); + LegalizeKind NextStep = getTypeConversion(Context, NVT); + // Avoid multi-step promotion. + if (NextStep.first == TypePromoteInteger) + return NextStep; + // Return rounded integer type. + return LegalizeKind(TypePromoteInteger, NVT); + } + + return LegalizeKind(TypeExpandInteger, + EVT::getIntegerVT(Context, VT.getSizeInBits() / 2)); + } + + // Handle vector types. + unsigned NumElts = VT.getVectorNumElements(); + EVT EltVT = VT.getVectorElementType(); + + // Vectors with only one element are always scalarized. + if (NumElts == 1) + return LegalizeKind(TypeScalarizeVector, EltVT); + + // Try to widen vector elements until the element type is a power of two and + // promote it to a legal type later on, for example: + // <3 x i8> -> <4 x i8> -> <4 x i32> + if (EltVT.isInteger()) { + // Vectors with a number of elements that is not a power of two are always + // widened, for example <3 x i8> -> <4 x i8>. + if (!VT.isPow2VectorType()) { + NumElts = (unsigned)NextPowerOf2(NumElts); + EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts); + return LegalizeKind(TypeWidenVector, NVT); + } + + // Examine the element type. + LegalizeKind LK = getTypeConversion(Context, EltVT); + + // If type is to be expanded, split the vector. + // <4 x i140> -> <2 x i140> + if (LK.first == TypeExpandInteger) + return LegalizeKind(TypeSplitVector, + EVT::getVectorVT(Context, EltVT, NumElts / 2)); + + // Promote the integer element types until a legal vector type is found + // or until the element integer type is too big. If a legal type was not + // found, fallback to the usual mechanism of widening/splitting the + // vector. + EVT OldEltVT = EltVT; + while (1) { + // Increase the bitwidth of the element to the next pow-of-two + // (which is greater than 8 bits). + EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()) + .getRoundIntegerType(Context); + + // Stop trying when getting a non-simple element type. + // Note that vector elements may be greater than legal vector element + // types. Example: X86 XMM registers hold 64bit element on 32bit + // systems. + if (!EltVT.isSimple()) + break; + + // Build a new vector type and check if it is legal. + MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); + // Found a legal promoted vector type. + if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal) + return LegalizeKind(TypePromoteInteger, + EVT::getVectorVT(Context, EltVT, NumElts)); + } + + // Reset the type to the unexpanded type if we did not find a legal vector + // type with a promoted vector element type. + EltVT = OldEltVT; + } + + // Try to widen the vector until a legal type is found. + // If there is no wider legal type, split the vector. + while (1) { + // Round up to the next power of 2. + NumElts = (unsigned)NextPowerOf2(NumElts); + + // If there is no simple vector type with this many elements then there + // cannot be a larger legal vector type. Note that this assumes that + // there are no skipped intermediate vector types in the simple types. + if (!EltVT.isSimple()) + break; + MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts); + if (LargerVector == MVT()) + break; + + // If this type is legal then widen the vector. + if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal) + return LegalizeKind(TypeWidenVector, LargerVector); + } + + // Widen odd vectors to next power of two. + if (!VT.isPow2VectorType()) { + EVT NVT = VT.getPow2VectorType(Context); + return LegalizeKind(TypeWidenVector, NVT); + } + + // Vectors with illegal element types are expanded. + EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2); + return LegalizeKind(TypeSplitVector, NVT); +} static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, unsigned &NumIntermediates, @@ -977,10 +1145,15 @@ TargetLoweringBase::emitPatchPoint(MachineInstr *MI, // Add a new memory operand for this FI. const MachineFrameInfo &MFI = *MF.getFrameInfo(); assert(MFI.getObjectOffset(FI) != -1); + + unsigned Flags = MachineMemOperand::MOLoad; + if (MI->getOpcode() == TargetOpcode::STATEPOINT) { + Flags |= MachineMemOperand::MOStore; + Flags |= MachineMemOperand::MOVolatile; + } MachineMemOperand *MMO = MF.getMachineMemOperand( - MachinePointerInfo::getFixedStack(FI), MachineMemOperand::MOLoad, - TM.getSubtargetImpl()->getDataLayout()->getPointerSize(), - MFI.getObjectAlignment(FI)); + MachinePointerInfo::getFixedStack(MF, FI), Flags, + MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI)); MIB->addMemOperand(MF, MMO); // Replace the instruction and update the operand index. @@ -994,10 +1167,13 @@ TargetLoweringBase::emitPatchPoint(MachineInstr *MI, /// findRepresentativeClass - Return the largest legal super-reg register class /// of the register class for the specified type and its associated "cost". -std::pair -TargetLoweringBase::findRepresentativeClass(MVT VT) const { - const TargetRegisterInfo *TRI = - getTargetMachine().getSubtargetImpl()->getRegisterInfo(); +// This function is in TargetLowering because it uses RegClassForVT which would +// need to be moved to TargetRegisterInfo and would necessitate moving +// isTypeLegal over as well - a massive change that would just require +// TargetLowering having a TargetRegisterInfo class member that it would use. +std::pair +TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI, + MVT VT) const { const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy]; if (!RC) return std::make_pair(RC, 0); @@ -1023,9 +1199,10 @@ TargetLoweringBase::findRepresentativeClass(MVT VT) const { /// computeRegisterProperties - Once all of the register classes are added, /// this allows us to compute derived properties we expose. -void TargetLoweringBase::computeRegisterProperties() { - assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE && - "Too many value types for ValueTypeActions to hold!"); +void TargetLoweringBase::computeRegisterProperties( + const TargetRegisterInfo *TRI) { + static_assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE, + "Too many value types for ValueTypeActions to hold!"); // Everything defaults to needing one register. for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { @@ -1092,27 +1269,29 @@ void TargetLoweringBase::computeRegisterProperties() { ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat); } - // Decide how to handle f32. If the target does not have native support for - // f32, promote it to f64 if it is legal. Otherwise, expand it to i32. + // Decide how to handle f32. If the target does not have native f32 support, + // expand it to i32 and we will be generating soft float library calls. if (!isTypeLegal(MVT::f32)) { - if (isTypeLegal(MVT::f64)) { - NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64]; - RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64]; - TransformToType[MVT::f32] = MVT::f64; - ValueTypeActions.setTypeAction(MVT::f32, TypePromoteInteger); - } else { - NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; - RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; - TransformToType[MVT::f32] = MVT::i32; - ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat); - } + NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32]; + RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32]; + TransformToType[MVT::f32] = MVT::i32; + ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat); } if (!isTypeLegal(MVT::f16)) { - NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; - RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; - TransformToType[MVT::f16] = MVT::i16; - ValueTypeActions.setTypeAction(MVT::f16, TypeSoftenFloat); + // If the target has native f32 support, promote f16 operations to f32. If + // f32 is not supported, generate soft float library calls. + if (isTypeLegal(MVT::f32)) { + NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32]; + RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32]; + TransformToType[MVT::f16] = MVT::f32; + ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat); + } else { + NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; + RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; + TransformToType[MVT::f16] = MVT::i16; + ValueTypeActions.setTypeAction(MVT::f16, TypeSoftenFloat); + } } // Loop over all of the vector value types to see which need transformations. @@ -1180,8 +1359,12 @@ void TargetLoweringBase::computeRegisterProperties() { TransformToType[i] = MVT::Other; if (PreferredAction == TypeScalarizeVector) ValueTypeActions.setTypeAction(VT, TypeScalarizeVector); - else + else if (PreferredAction == TypeSplitVector) ValueTypeActions.setTypeAction(VT, TypeSplitVector); + else + // Set type action according to the number of elements. + ValueTypeActions.setTypeAction(VT, NElts == 1 ? TypeScalarizeVector + : TypeSplitVector); } else { TransformToType[i] = NVT; ValueTypeActions.setTypeAction(VT, TypeWidenVector); @@ -1201,15 +1384,16 @@ void TargetLoweringBase::computeRegisterProperties() { for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) { const TargetRegisterClass* RRC; uint8_t Cost; - std::tie(RRC, Cost) = findRepresentativeClass((MVT::SimpleValueType)i); + std::tie(RRC, Cost) = findRepresentativeClass(TRI, (MVT::SimpleValueType)i); RepRegClassForVT[i] = RRC; RepRegClassCostForVT[i] = Cost; } } -EVT TargetLoweringBase::getSetCCResultType(LLVMContext &, EVT VT) const { +EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &, + EVT VT) const { assert(!VT.isVector() && "No default SetCC type for vectors!"); - return getPointerTy(0).SimpleTy; + return getPointerTy(DL).SimpleTy; } MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const { @@ -1294,11 +1478,11 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT /// type of the given function. This does not require a DAG or a return value, /// and is suitable for use before any DAGs for the function are constructed. /// TODO: Move this out of TargetLowering.cpp. -void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr, +void llvm::GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl &Outs, - const TargetLowering &TLI) { + const TargetLowering &TLI, const DataLayout &DL) { SmallVector ValueVTs; - ComputeValueVTs(TLI, ReturnType, ValueVTs); + ComputeValueVTs(TLI, DL, ReturnType, ValueVTs); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) return; @@ -1343,10 +1527,34 @@ void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr, /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// function arguments in the caller parameter area. This is the actual /// alignment, not its logarithm. -unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty) const { - return DL->getABITypeAlignment(Ty); +unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty, + const DataLayout &DL) const { + return DL.getABITypeAlignment(Ty); } +bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, + const DataLayout &DL, EVT VT, + unsigned AddrSpace, + unsigned Alignment, + bool *Fast) const { + // Check if the specified alignment is sufficient based on the data layout. + // TODO: While using the data layout works in practice, a better solution + // would be to implement this check directly (make this a virtual function). + // For example, the ABI alignment may change based on software platform while + // this function should only be affected by hardware implementation. + Type *Ty = VT.getTypeForEVT(Context); + if (Alignment >= DL.getABITypeAlignment(Ty)) { + // Assume that an access that meets the ABI-specified alignment is fast. + if (Fast != nullptr) + *Fast = true; + return true; + } + + // This is a misaligned access. + return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast); +} + + //===----------------------------------------------------------------------===// // TargetTransformInfo Helpers //===----------------------------------------------------------------------===// @@ -1365,6 +1573,12 @@ int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { case Invoke: return 0; case Resume: return 0; case Unreachable: return 0; + case CleanupRet: return 0; + case CatchEndPad: return 0; + case CatchRet: return 0; + case CatchPad: return 0; + case TerminatePad: return 0; + case CleanupPad: return 0; case Add: return ISD::ADD; case FAdd: return ISD::FADD; case Sub: return ISD::SUB; @@ -1422,12 +1636,13 @@ int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { llvm_unreachable("Unknown instruction type encountered!"); } -std::pair -TargetLoweringBase::getTypeLegalizationCost(Type *Ty) const { +std::pair +TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL, + Type *Ty) const { LLVMContext &C = Ty->getContext(); - EVT MTy = getValueType(Ty); + EVT MTy = getValueType(DL, Ty); - unsigned Cost = 1; + int Cost = 1; // We keep legalizing the type until we find a legal kind. We assume that // the only operation that costs anything is the split. After splitting // we need to handle two types. @@ -1451,8 +1666,9 @@ TargetLoweringBase::getTypeLegalizationCost(Type *Ty) const { /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. -bool TargetLoweringBase::isLegalAddressingMode(const AddrMode &AM, - Type *Ty) const { +bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL, + const AddrMode &AM, Type *Ty, + unsigned AS) const { // The default implementation of this implements a conservative RISCy, r+r and // r+i addr mode.