X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FTargetLoweringBase.cpp;h=7250fdc9385a3064aa48d2b5c240e6ea3effe445;hb=226889eb73e83dea20c050047bcff71e1552a90f;hp=2162a513b43d1e4b59e3a4ad9544c10f4de62755;hpb=44c1c890e459459fdd039f3e9c200ce35d1ebc54;p=oota-llvm.git diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp index 2162a513b43..7250fdc9385 100644 --- a/lib/CodeGen/TargetLoweringBase.cpp +++ b/lib/CodeGen/TargetLoweringBase.cpp @@ -38,6 +38,11 @@ #include using namespace llvm; +static cl::opt JumpIsExpensiveOverride( + "jump-is-expensive", cl::init(false), + cl::desc("Do not create extra branches to split comparison logic."), + cl::Hidden); + /// InitLibcallNames - Set default libcall names. /// static void InitLibcallNames(const char **Names, const Triple &TT) { @@ -420,6 +425,14 @@ static void InitLibcallNames(const char **Names, const Triple &TT) { // These are generally not available. Names[RTLIB::STACKPROTECTOR_CHECK_FAIL] = nullptr; } + + // For f16/f32 conversions, Darwin uses the standard naming scheme, instead + // of the gnueabi-style __gnu_*_ieee. + // FIXME: What about other targets? + if (TT.isOSDarwin()) { + Names[RTLIB::FPEXT_F16_F32] = "__extendhfsf2"; + Names[RTLIB::FPROUND_F32_F16] = "__truncsfhf2"; + } } /// InitLibcallCallingConvs - Set default libcall CallingConvs. @@ -737,7 +750,6 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { initActions(); // Perform these initializations only once. - IsLittleEndian = getDataLayout()->isLittleEndian(); MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8; MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize = MaxStoresPerMemmoveOptSize = 4; @@ -746,10 +758,8 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { SelectIsExpensive = false; HasMultipleConditionRegisters = false; HasExtractBitsInsn = false; - IntDivIsCheap = false; FsqrtIsCheap = false; - Pow2SDivIsCheap = false; - JumpIsExpensive = false; + JumpIsExpensive = JumpIsExpensiveOverride; PredictableSelectIsExpensive = false; MaskAndBranchFoldingIsLegal = false; EnableExtLdPromotion = false; @@ -770,7 +780,7 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) { InsertFencesForAtomic = false; MinimumJumpTableEntries = 4; - InitLibcallNames(LibcallRoutineNames, Triple(TM.getTargetTriple())); + InitLibcallNames(LibcallRoutineNames, TM.getTargetTriple()); InitCmpLibcallCCs(CmpLibcallCCs); InitLibcallCallingConvs(LibcallCallingConvs); } @@ -802,7 +812,23 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::CONCAT_VECTORS, VT, Expand); setOperationAction(ISD::FMINNUM, VT, Expand); setOperationAction(ISD::FMAXNUM, VT, Expand); + setOperationAction(ISD::FMINNAN, VT, Expand); + setOperationAction(ISD::FMAXNAN, VT, Expand); setOperationAction(ISD::FMAD, VT, Expand); + setOperationAction(ISD::SMIN, VT, Expand); + setOperationAction(ISD::SMAX, VT, Expand); + setOperationAction(ISD::UMIN, VT, Expand); + setOperationAction(ISD::UMAX, VT, Expand); + + // Overflow operations default to expand + setOperationAction(ISD::SADDO, VT, Expand); + setOperationAction(ISD::SSUBO, VT, Expand); + setOperationAction(ISD::UADDO, VT, Expand); + setOperationAction(ISD::USUBO, VT, Expand); + setOperationAction(ISD::SMULO, VT, Expand); + setOperationAction(ISD::UMULO, VT, Expand); + setOperationAction(ISD::UABSDIFF, VT, Expand); + setOperationAction(ISD::SABSDIFF, VT, Expand); // These library functions default to expand. setOperationAction(ISD::FROUND, VT, Expand); @@ -819,6 +845,9 @@ void TargetLoweringBase::initActions() { // Most targets ignore the @llvm.prefetch intrinsic. setOperationAction(ISD::PREFETCH, MVT::Other, Expand); + // Most targets also ignore the @llvm.readcyclecounter intrinsic. + setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Expand); + // ConstantFP nodes default to expand. Targets can either change this to // Legal, in which case all fp constants are legal, or use isFPImmLegal() // to optimize expansions for certain constants. @@ -854,28 +883,17 @@ void TargetLoweringBase::initActions() { setOperationAction(ISD::DEBUGTRAP, MVT::Other, Expand); } -MVT TargetLoweringBase::getPointerTy(uint32_t AS) const { - return MVT::getIntegerVT(getPointerSizeInBits(AS)); -} - -unsigned TargetLoweringBase::getPointerSizeInBits(uint32_t AS) const { - return getDataLayout()->getPointerSizeInBits(AS); -} - -unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const { - assert(Ty->isPointerTy()); - return getPointerSizeInBits(Ty->getPointerAddressSpace()); -} - -MVT TargetLoweringBase::getScalarShiftAmountTy(EVT LHSTy) const { - return MVT::getIntegerVT(8 * getDataLayout()->getPointerSize(0)); +MVT TargetLoweringBase::getScalarShiftAmountTy(const DataLayout &DL, + EVT) const { + return MVT::getIntegerVT(8 * DL.getPointerSize(0)); } -EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy) const { +EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy, + const DataLayout &DL) const { assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); if (LHSTy.isVector()) return LHSTy; - return getScalarShiftAmountTy(LHSTy); + return getScalarShiftAmountTy(DL, LHSTy); } /// canOpTrap - Returns true if the operation can trap for the value type. @@ -895,6 +913,12 @@ bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const { } } +void TargetLoweringBase::setJumpIsExpensive(bool isExpensive) { + // If the command-line option was specified, ignore this request. + if (!JumpIsExpensiveOverride.getNumOccurrences()) + JumpIsExpensive = isExpensive; +} + TargetLoweringBase::LegalizeKind TargetLoweringBase::getTypeConversion(LLVMContext &Context, EVT VT) const { // If this is a simple type, use the ComputeRegisterProp mechanism. @@ -1129,8 +1153,8 @@ TargetLoweringBase::emitPatchPoint(MachineInstr *MI, Flags |= MachineMemOperand::MOVolatile; } MachineMemOperand *MMO = MF.getMachineMemOperand( - MachinePointerInfo::getFixedStack(FI), Flags, - TM.getDataLayout()->getPointerSize(), MFI.getObjectAlignment(FI)); + MachinePointerInfo::getFixedStack(MF, FI), Flags, + MF.getDataLayout().getPointerSize(), MFI.getObjectAlignment(FI)); MIB->addMemOperand(MF, MMO); // Replace the instruction and update the operand index. @@ -1256,10 +1280,19 @@ void TargetLoweringBase::computeRegisterProperties( } if (!isTypeLegal(MVT::f16)) { - NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; - RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; - TransformToType[MVT::f16] = MVT::i16; - ValueTypeActions.setTypeAction(MVT::f16, TypeSoftenFloat); + // If the target has native f32 support, promote f16 operations to f32. If + // f32 is not supported, generate soft float library calls. + if (isTypeLegal(MVT::f32)) { + NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32]; + RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32]; + TransformToType[MVT::f16] = MVT::f32; + ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat); + } else { + NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16]; + RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16]; + TransformToType[MVT::f16] = MVT::i16; + ValueTypeActions.setTypeAction(MVT::f16, TypeSoftenFloat); + } } // Loop over all of the vector value types to see which need transformations. @@ -1358,9 +1391,10 @@ void TargetLoweringBase::computeRegisterProperties( } } -EVT TargetLoweringBase::getSetCCResultType(LLVMContext &, EVT VT) const { +EVT TargetLoweringBase::getSetCCResultType(const DataLayout &DL, LLVMContext &, + EVT VT) const { assert(!VT.isVector() && "No default SetCC type for vectors!"); - return getPointerTy(0).SimpleTy; + return getPointerTy(DL).SimpleTy; } MVT::SimpleValueType TargetLoweringBase::getCmpLibcallReturnType() const { @@ -1445,11 +1479,11 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT /// type of the given function. This does not require a DAG or a return value, /// and is suitable for use before any DAGs for the function are constructed. /// TODO: Move this out of TargetLowering.cpp. -void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr, +void llvm::GetReturnInfo(Type *ReturnType, AttributeSet attr, SmallVectorImpl &Outs, - const TargetLowering &TLI) { + const TargetLowering &TLI, const DataLayout &DL) { SmallVector ValueVTs; - ComputeValueVTs(TLI, ReturnType, ValueVTs); + ComputeValueVTs(TLI, DL, ReturnType, ValueVTs); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) return; @@ -1494,10 +1528,34 @@ void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr, /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// function arguments in the caller parameter area. This is the actual /// alignment, not its logarithm. -unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty) const { - return getDataLayout()->getABITypeAlignment(Ty); +unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty, + const DataLayout &DL) const { + return DL.getABITypeAlignment(Ty); +} + +bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context, + const DataLayout &DL, EVT VT, + unsigned AddrSpace, + unsigned Alignment, + bool *Fast) const { + // Check if the specified alignment is sufficient based on the data layout. + // TODO: While using the data layout works in practice, a better solution + // would be to implement this check directly (make this a virtual function). + // For example, the ABI alignment may change based on software platform while + // this function should only be affected by hardware implementation. + Type *Ty = VT.getTypeForEVT(Context); + if (Alignment >= DL.getABITypeAlignment(Ty)) { + // Assume that an access that meets the ABI-specified alignment is fast. + if (Fast != nullptr) + *Fast = true; + return true; + } + + // This is a misaligned access. + return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast); } + //===----------------------------------------------------------------------===// // TargetTransformInfo Helpers //===----------------------------------------------------------------------===// @@ -1516,6 +1574,13 @@ int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { case Invoke: return 0; case Resume: return 0; case Unreachable: return 0; + case CleanupEndPad: return 0; + case CleanupRet: return 0; + case CatchEndPad: return 0; + case CatchRet: return 0; + case CatchPad: return 0; + case TerminatePad: return 0; + case CleanupPad: return 0; case Add: return ISD::ADD; case FAdd: return ISD::FADD; case Sub: return ISD::SUB; @@ -1573,12 +1638,13 @@ int TargetLoweringBase::InstructionOpcodeToISD(unsigned Opcode) const { llvm_unreachable("Unknown instruction type encountered!"); } -std::pair -TargetLoweringBase::getTypeLegalizationCost(Type *Ty) const { +std::pair +TargetLoweringBase::getTypeLegalizationCost(const DataLayout &DL, + Type *Ty) const { LLVMContext &C = Ty->getContext(); - EVT MTy = getValueType(Ty); + EVT MTy = getValueType(DL, Ty); - unsigned Cost = 1; + int Cost = 1; // We keep legalizing the type until we find a legal kind. We assume that // the only operation that costs anything is the split. After splitting // we need to handle two types. @@ -1602,8 +1668,9 @@ TargetLoweringBase::getTypeLegalizationCost(Type *Ty) const { /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. -bool TargetLoweringBase::isLegalAddressingMode(const AddrMode &AM, - Type *Ty) const { +bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL, + const AddrMode &AM, Type *Ty, + unsigned AS) const { // The default implementation of this implements a conservative RISCy, r+r and // r+i addr mode.