From: Bill Wendling Date: Sun, 30 Dec 2012 10:32:01 +0000 (+0000) Subject: Remove the Function::getFnAttributes method in favor of using the AttributeSet X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=commitdiff_plain;h=831737d329a727f53a1fb0572f7b7a8127208881 Remove the Function::getFnAttributes method in favor of using the AttributeSet directly. This is in preparation for removing the use of the 'Attribute' class as a collection of attributes. That will shift to the AttributeSet class instead. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@171253 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/Attributes.h b/include/llvm/Attributes.h index 7b621249d72..8ce7651ae94 100644 --- a/include/llvm/Attributes.h +++ b/include/llvm/Attributes.h @@ -341,6 +341,20 @@ public: return getAttributes(Idx).getAlignment(); } + /// \brief Return true if the attribute exists at the given index. + bool hasAttribute(unsigned Index, Attribute::AttrKind Kind) const; + + /// \brief Return true if attribute exists at the given index. + bool hasAttributes(unsigned Index) const; + + /// \brief Get the stack alignment. + unsigned getStackAlignment(unsigned Index) const; + + /// \brief Return the attributes at the index as a string. + std::string getAsString(unsigned Index) const; + + uint64_t getBitMask(unsigned Index) const; + /// \brief Return true if the specified attribute is set for at least one /// parameter or for the return value. bool hasAttrSomewhere(Attribute::AttrKind Attr) const; diff --git a/include/llvm/Function.h b/include/llvm/Function.h index 98be4a93fa5..ba04098dfe7 100644 --- a/include/llvm/Function.h +++ b/include/llvm/Function.h @@ -168,12 +168,6 @@ public: /// void setAttributes(const AttributeSet &attrs) { AttributeList = attrs; } - /// getFnAttributes - Return the function attributes for querying. - /// - Attribute getFnAttributes() const { - return AttributeList.getFnAttributes(); - } - /// addFnAttr - Add function attributes to this function. /// void addFnAttr(Attribute::AttrKind N) { @@ -219,7 +213,8 @@ public: /// @brief Determine if the function does not access memory. bool doesNotAccessMemory() const { - return getFnAttributes().hasAttribute(Attribute::ReadNone); + return AttributeList.hasAttribute(AttributeSet::FunctionIndex, + Attribute::ReadNone); } void setDoesNotAccessMemory() { addFnAttr(Attribute::ReadNone); @@ -228,7 +223,8 @@ public: /// @brief Determine if the function does not access or only reads memory. bool onlyReadsMemory() const { return doesNotAccessMemory() || - getFnAttributes().hasAttribute(Attribute::ReadOnly); + AttributeList.hasAttribute(AttributeSet::FunctionIndex, + Attribute::ReadOnly); } void setOnlyReadsMemory() { addFnAttr(Attribute::ReadOnly); @@ -236,7 +232,8 @@ public: /// @brief Determine if the function cannot return. bool doesNotReturn() const { - return getFnAttributes().hasAttribute(Attribute::NoReturn); + return AttributeList.hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoReturn); } void setDoesNotReturn() { addFnAttr(Attribute::NoReturn); @@ -244,7 +241,8 @@ public: /// @brief Determine if the function cannot unwind. bool doesNotThrow() const { - return getFnAttributes().hasAttribute(Attribute::NoUnwind); + return AttributeList.hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoUnwind); } void setDoesNotThrow() { addFnAttr(Attribute::NoUnwind); @@ -252,7 +250,8 @@ public: /// @brief Determine if the call cannot be duplicated. bool cannotDuplicate() const { - return getFnAttributes().hasAttribute(Attribute::NoDuplicate); + return AttributeList.hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoDuplicate); } void setCannotDuplicate() { addFnAttr(Attribute::NoDuplicate); @@ -261,7 +260,8 @@ public: /// @brief True if the ABI mandates (or the user requested) that this /// function be in a unwind table. bool hasUWTable() const { - return getFnAttributes().hasAttribute(Attribute::UWTable); + return AttributeList.hasAttribute(AttributeSet::FunctionIndex, + Attribute::UWTable); } void setHasUWTable() { addFnAttr(Attribute::UWTable); diff --git a/lib/Analysis/CodeMetrics.cpp b/lib/Analysis/CodeMetrics.cpp index 401c1beaa42..b5fb93244b2 100644 --- a/lib/Analysis/CodeMetrics.cpp +++ b/lib/Analysis/CodeMetrics.cpp @@ -203,7 +203,8 @@ void CodeMetrics::analyzeFunction(Function *F, const DataLayout *TD) { // as volatile if they are live across a setjmp call, and they probably // won't do this in callers. exposesReturnsTwice = F->callsFunctionThatReturnsTwice() && - !F->getFnAttributes().hasAttribute(Attribute::ReturnsTwice); + !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::ReturnsTwice); // Look at the size of the callee. for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB) diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp index 835b8343b28..ba8a5dd8e73 100644 --- a/lib/Analysis/InlineCost.cpp +++ b/lib/Analysis/InlineCost.cpp @@ -694,7 +694,8 @@ bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) { bool CallAnalyzer::visitCallSite(CallSite CS) { if (CS.isCall() && cast(CS.getInstruction())->canReturnTwice() && - !F.getFnAttributes().hasAttribute(Attribute::ReturnsTwice)) { + !F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::ReturnsTwice)) { // This aborts the entire analysis. ExposesReturnsTwice = true; return false; @@ -1143,7 +1144,8 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee, // Calls to functions with always-inline attributes should be inlined // whenever possible. - if (Callee->getFnAttributes().hasAttribute(Attribute::AlwaysInline)) { + if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::AlwaysInline)) { if (isInlineViable(*Callee)) return llvm::InlineCost::getAlways(); return llvm::InlineCost::getNever(); @@ -1153,7 +1155,8 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee, // something else. Don't inline functions marked noinline or call sites // marked noinline. if (Callee->mayBeOverridden() || - Callee->getFnAttributes().hasAttribute(Attribute::NoInline) || + Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoInline) || CS.isNoInline()) return llvm::InlineCost::getNever(); @@ -1175,7 +1178,8 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, Function *Callee, } bool InlineCostAnalyzer::isInlineViable(Function &F) { - bool ReturnsTwice =F.getFnAttributes().hasAttribute(Attribute::ReturnsTwice); + bool ReturnsTwice =F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::ReturnsTwice); for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { // Disallow inlining of functions which contain an indirect branch. if (isa(BI->getTerminator())) diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index 766f5d0e0a6..f3320fb987a 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -327,8 +327,8 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, return 0; if (LIOffs+NewLoadByteSize > MemLocEnd && - LI->getParent()->getParent()->getFnAttributes(). - hasAttribute(Attribute::AddressSafety)) + LI->getParent()->getParent()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::AddressSafety)) // We will be reading past the location accessed by the original program. // While this is safe in a regular build, Address Safety analysis tools // may start reporting false warnings. So, don't do widening. diff --git a/lib/CodeGen/BranchFolding.cpp b/lib/CodeGen/BranchFolding.cpp index d349e0249b8..1c7209a92ee 100644 --- a/lib/CodeGen/BranchFolding.cpp +++ b/lib/CodeGen/BranchFolding.cpp @@ -570,8 +570,8 @@ static bool ProfitableToMerge(MachineBasicBlock *MBB1, // instructions that would be deleted in the merge. MachineFunction *MF = MBB1->getParent(); if (EffectiveTailLen >= 2 && - MF->getFunction()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize) && + MF->getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) && (I1 == MBB1->begin() || I2 == MBB2->begin())) return true; diff --git a/lib/CodeGen/CodePlacementOpt.cpp b/lib/CodeGen/CodePlacementOpt.cpp index 75dbf72dd0c..24518443a76 100644 --- a/lib/CodeGen/CodePlacementOpt.cpp +++ b/lib/CodeGen/CodePlacementOpt.cpp @@ -373,7 +373,8 @@ bool CodePlacementOpt::OptimizeIntraLoopEdges(MachineFunction &MF) { /// bool CodePlacementOpt::AlignLoops(MachineFunction &MF) { const Function *F = MF.getFunction(); - if (F->getFnAttributes().hasAttribute(Attribute::OptimizeForSize)) + if (F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::OptimizeForSize)) return false; unsigned Align = TLI->getPrefLoopAlignment(); diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp index 044d9edf8af..07a3e039ed6 100644 --- a/lib/CodeGen/MachineBlockPlacement.cpp +++ b/lib/CodeGen/MachineBlockPlacement.cpp @@ -1013,8 +1013,8 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) { // exclusively on the loop info here so that we can align backedges in // unnatural CFGs and backedges that were introduced purely because of the // loop rotations done during this layout pass. - if (F.getFunction()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize)) + if (F.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize)) return; unsigned Align = TLI->getPrefLoopAlignment(); if (!Align) diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp index 0bbacd02f0d..84e47affd80 100644 --- a/lib/CodeGen/MachineFunction.cpp +++ b/lib/CodeGen/MachineFunction.cpp @@ -60,13 +60,15 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM, MFInfo = 0; FrameInfo = new (Allocator) MachineFrameInfo(*TM.getFrameLowering(), TM.Options.RealignStack); - if (Fn->getFnAttributes().hasAttribute(Attribute::StackAlignment)) + if (Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackAlignment)) FrameInfo->ensureMaxAlignment(Fn->getAttributes(). - getFnAttributes().getStackAlignment()); + getStackAlignment(AttributeSet::FunctionIndex)); ConstantPool = new (Allocator) MachineConstantPool(TM.getDataLayout()); Alignment = TM.getTargetLowering()->getMinFunctionAlignment(); // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn. - if (!Fn->getFnAttributes().hasAttribute(Attribute::OptimizeForSize)) + if (!Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::OptimizeForSize)) Alignment = std::max(Alignment, TM.getTargetLowering()->getPrefFunctionAlignment()); FunctionNumber = FunctionNum; diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp index 4b1e979a62c..4e9456935b2 100644 --- a/lib/CodeGen/PrologEpilogInserter.cpp +++ b/lib/CodeGen/PrologEpilogInserter.cpp @@ -96,7 +96,8 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) { placeCSRSpillsAndRestores(Fn); // Add the code to save and restore the callee saved registers - if (!F->getFnAttributes().hasAttribute(Attribute::Naked)) + if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::Naked)) insertCSRSpillsAndRestores(Fn); // Allow the target machine to make final modifications to the function @@ -111,7 +112,8 @@ bool PEI::runOnMachineFunction(MachineFunction &Fn) { // called functions. Because of this, calculateCalleeSavedRegisters() // must be called before this function in order to set the AdjustsStack // and MaxCallFrameSize variables. - if (!F->getFnAttributes().hasAttribute(Attribute::Naked)) + if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::Naked)) insertPrologEpilogCode(Fn); // Replace all MO_FrameIndex operands with physical register references @@ -191,13 +193,13 @@ void PEI::calculateCallsInformation(MachineFunction &Fn) { /// calculateCalleeSavedRegisters - Scan the function for modified callee saved /// registers. -void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) { - const TargetRegisterInfo *RegInfo = Fn.getTarget().getRegisterInfo(); - const TargetFrameLowering *TFI = Fn.getTarget().getFrameLowering(); - MachineFrameInfo *MFI = Fn.getFrameInfo(); +void PEI::calculateCalleeSavedRegisters(MachineFunction &F) { + const TargetRegisterInfo *RegInfo = F.getTarget().getRegisterInfo(); + const TargetFrameLowering *TFI = F.getTarget().getFrameLowering(); + MachineFrameInfo *MFI = F.getFrameInfo(); // Get the callee saved register list... - const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&Fn); + const uint16_t *CSRegs = RegInfo->getCalleeSavedRegs(&F); // These are used to keep track the callee-save area. Initialize them. MinCSFrameIndex = INT_MAX; @@ -208,13 +210,14 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) { return; // In Naked functions we aren't going to save any registers. - if (Fn.getFunction()->getFnAttributes().hasAttribute(Attribute::Naked)) + if (F.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::Naked)) return; std::vector CSI; for (unsigned i = 0; CSRegs[i]; ++i) { unsigned Reg = CSRegs[i]; - if (Fn.getRegInfo().isPhysRegUsed(Reg)) { + if (F.getRegInfo().isPhysRegUsed(Reg)) { // If the reg is modified, save it! CSI.push_back(CalleeSavedInfo(Reg)); } @@ -235,7 +238,7 @@ void PEI::calculateCalleeSavedRegisters(MachineFunction &Fn) { const TargetRegisterClass *RC = RegInfo->getMinimalPhysRegClass(Reg); int FrameIdx; - if (RegInfo->hasReservedSpillSlot(Fn, Reg, FrameIdx)) { + if (RegInfo->hasReservedSpillSlot(F, Reg, FrameIdx)) { I->setFrameIdx(FrameIdx); continue; } diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index ee61c3748d2..7e95f33fdf1 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7751,8 +7751,9 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) { // We only use vectors if the constant is known to be zero and the // function is not marked with the noimplicitfloat attribute. - if (NonZero || (DAG.getMachineFunction().getFunction()->getFnAttributes(). - hasAttribute(Attribute::NoImplicitFloat))) + if (NonZero || (DAG.getMachineFunction().getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoImplicitFloat))) LastLegalVectorType = 0; // Check if we found a legal integer type to store. diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 74127fe02c6..a38b3a5896f 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -3545,8 +3545,8 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); bool OptSize = - MF.getFunction()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize); + MF.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); FrameIndexSDNode *FI = dyn_cast(Dst); if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; @@ -3651,8 +3651,8 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl, bool DstAlignCanChange = false; MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); - bool OptSize = MF.getFunction()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize); + bool OptSize = MF.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); FrameIndexSDNode *FI = dyn_cast(Dst); if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; @@ -3730,8 +3730,8 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl, bool DstAlignCanChange = false; MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo *MFI = MF.getFrameInfo(); - bool OptSize = MF.getFunction()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize); + bool OptSize = MF.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); FrameIndexSDNode *FI = dyn_cast(Dst); if (FI && !MFI->isFixedObjectIndex(FI->getIndex())) DstAlignCanChange = true; diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index cfd77ea9c93..23f277ae80e 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -4296,7 +4296,8 @@ static SDValue ExpandPowI(DebugLoc DL, SDValue LHS, SDValue RHS, return DAG.getConstantFP(1.0, LHS.getValueType()); const Function *F = DAG.getMachineFunction().getFunction(); - if (!F->getFnAttributes().hasAttribute(Attribute::OptimizeForSize) || + if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::OptimizeForSize) || // If optimizing for size, don't insert too many multiplies. This // inserts up to 5 multiplies. CountPopulation_32(Val)+Log2_32(Val) < 7) { diff --git a/lib/CodeGen/StackProtector.cpp b/lib/CodeGen/StackProtector.cpp index 1b3866749c9..ccf18e13018 100644 --- a/lib/CodeGen/StackProtector.cpp +++ b/lib/CodeGen/StackProtector.cpp @@ -137,10 +137,12 @@ bool StackProtector::ContainsProtectableArray(Type *Ty, bool InStruct) const { /// add a guard variable to functions that call alloca, and functions with /// buffers larger than SSPBufferSize bytes. bool StackProtector::RequiresStackProtector() const { - if (F->getFnAttributes().hasAttribute(Attribute::StackProtectReq)) + if (F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtectReq)) return true; - if (!F->getFnAttributes().hasAttribute(Attribute::StackProtect)) + if (!F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtect)) return false; for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) { diff --git a/lib/CodeGen/TailDuplication.cpp b/lib/CodeGen/TailDuplication.cpp index eb44478720e..3cf79347809 100644 --- a/lib/CodeGen/TailDuplication.cpp +++ b/lib/CodeGen/TailDuplication.cpp @@ -551,8 +551,8 @@ TailDuplicatePass::shouldTailDuplicate(const MachineFunction &MF, // compensate for the duplication. unsigned MaxDuplicateCount; if (TailDuplicateSize.getNumOccurrences() == 0 && - MF.getFunction()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize)) + MF.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize)) MaxDuplicateCount = 1; else MaxDuplicateCount = TailDuplicateSize; diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index a12e333fdcc..309dfded697 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -3329,8 +3329,9 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, // instructions). if (Latency > 0 && Subtarget.isThumb2()) { const MachineFunction *MF = DefMI->getParent()->getParent(); - if (MF->getFunction()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize)) + if (MF->getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, + Attribute::OptimizeForSize)) --Latency; } return Latency; diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 42742bf3b66..7d712bec3a0 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -326,7 +326,8 @@ needsStackRealignment(const MachineFunction &MF) const { unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment(); bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || - F->getFnAttributes().hasAttribute(Attribute::StackAlignment)); + F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackAlignment)); return requiresRealignment && canRealignStack(MF); } diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp index a6a32f28454..82598f1d0f3 100644 --- a/lib/Target/ARM/ARMFrameLowering.cpp +++ b/lib/Target/ARM/ARMFrameLowering.cpp @@ -1152,7 +1152,8 @@ static void checkNumAlignedDPRCS2Regs(MachineFunction &MF) { return; // Naked functions don't spill callee-saved registers. - if (MF.getFunction()->getFnAttributes().hasAttribute(Attribute::Naked)) + if (MF.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::Naked)) return; // We are planning to use NEON instructions vst1 / vld1. diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 8034ce16043..790bb385f2d 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -1616,8 +1616,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, // FIXME: handle tail calls differently. unsigned CallOpc; - bool HasMinSizeAttr = MF.getFunction()->getFnAttributes(). - hasAttribute(Attribute::MinSize); + bool HasMinSizeAttr = MF.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize); if (Subtarget->isThumb()) { if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps()) CallOpc = ARMISD::CALL_NOLINK; @@ -6689,8 +6689,9 @@ EmitStructByval(MachineInstr *MI, MachineBasicBlock *BB) const { UnitSize = 2; } else { // Check whether we can use NEON instructions. - if (!MF->getFunction()->getFnAttributes(). - hasAttribute(Attribute::NoImplicitFloat) && + if (!MF->getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoImplicitFloat) && Subtarget->hasNEON()) { if ((Align % 16 == 0) && SizeVal >= 16) { ldrOpc = ARM::VLD1q32wb_fixed; @@ -9458,7 +9459,8 @@ EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size, // See if we can use NEON instructions for this... if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() && - !F->getFnAttributes().hasAttribute(Attribute::NoImplicitFloat)) { + !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoImplicitFloat)) { bool Fast; if (Size >= 16 && (memOpAlign(SrcAlign, DstAlign, 16) || diff --git a/lib/Target/ARM/Thumb2SizeReduction.cpp b/lib/Target/ARM/Thumb2SizeReduction.cpp index a4f08474509..4a7eb4010b3 100644 --- a/lib/Target/ARM/Thumb2SizeReduction.cpp +++ b/lib/Target/ARM/Thumb2SizeReduction.cpp @@ -961,9 +961,11 @@ bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) { STI = &TM.getSubtarget(); // Optimizing / minimizing size? - Attribute FnAttrs = MF.getFunction()->getFnAttributes(); - OptimizeSize = FnAttrs.hasAttribute(Attribute::OptimizeForSize); - MinimizeSize = FnAttrs.hasAttribute(Attribute::MinSize); + AttributeSet FnAttrs = MF.getFunction()->getAttributes(); + OptimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::OptimizeForSize); + MinimizeSize = FnAttrs.hasAttribute(AttributeSet::FunctionIndex, + Attribute::MinSize); bool Modified = false; for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp index 1f54fcad3ac..3d1917c5572 100644 --- a/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -198,8 +198,8 @@ void PPCFrameLowering::determineFrameLayout(MachineFunction &MF) const { // to adjust the stack pointer (we fit in the Red Zone). For 64-bit // SVR4, we also require a stack frame if we need to spill the CR, // since this spill area is addressed relative to the stack pointer. - bool DisableRedZone = MF.getFunction()->getFnAttributes(). - hasAttribute(Attribute::NoRedZone); + bool DisableRedZone = MF.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::NoRedZone); // FIXME SVR4 The 32-bit SVR4 ABI has no red zone. However, it can // still generate stackless code if all local vars are reg-allocated. // Try: (FrameSize <= 224 @@ -261,7 +261,8 @@ bool PPCFrameLowering::needsFP(const MachineFunction &MF) const { // Naked functions have no stack frame pushed, so we don't have a frame // pointer. - if (MF.getFunction()->getFnAttributes().hasAttribute(Attribute::Naked)) + if (MF.getFunction()->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::Naked)) return false; return MF.getTarget().Options.DisableFramePointerElim(MF) || diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index b14c1812492..0a7e5633c8c 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -6822,8 +6822,8 @@ SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op, bool is31 = (getTargetMachine().Options.DisableFramePointerElim(MF) || MFI->hasVarSizedObjects()) && MFI->getStackSize() && - !MF.getFunction()->getFnAttributes(). - hasAttribute(Attribute::Naked); + !MF.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked); unsigned FrameReg = isPPC64 ? (is31 ? PPC::X31 : PPC::X1) : (is31 ? PPC::R31 : PPC::R1); SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, diff --git a/lib/Target/PowerPC/PPCRegisterInfo.cpp b/lib/Target/PowerPC/PPCRegisterInfo.cpp index 6c8b1edf4b2..a95d04994d5 100644 --- a/lib/Target/PowerPC/PPCRegisterInfo.cpp +++ b/lib/Target/PowerPC/PPCRegisterInfo.cpp @@ -597,7 +597,8 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, // to Offset to get the correct offset. // Naked functions have stack size 0, although getStackSize may not reflect that // because we didn't call all the pieces that compute it for naked functions. - if (!MF.getFunction()->getFnAttributes().hasAttribute(Attribute::Naked)) + if (!MF.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::Naked)) Offset += MFI->getStackSize(); // If we can, encode the offset directly into the instruction. If this is a diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 63454877514..f70be1d2c19 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -691,7 +691,8 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF) const { // pointer, calls, or dynamic alloca then we do not need to adjust the // stack pointer (we fit in the Red Zone). We also check that we don't // push and pop from the stack. - if (Is64Bit && !Fn->getFnAttributes().hasAttribute(Attribute::NoRedZone) && + if (Is64Bit && !Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoRedZone) && !RegInfo->needsStackRealignment(MF) && !MFI->hasVarSizedObjects() && // No dynamic alloca. !MFI->adjustsStack() && // No calls. diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index acb89c11fca..c9918b900de 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -431,8 +431,8 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) { void X86DAGToDAGISel::PreprocessISelDAG() { // OptForSize is used in pattern predicates that isel is matching. - OptForSize = MF->getFunction()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize); + OptForSize = MF->getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), E = CurDAG->allnodes_end(); I != E; ) { diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index d279d2da527..09006030927 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1389,7 +1389,8 @@ X86TargetLowering::getOptimalMemOpType(uint64_t Size, MachineFunction &MF) const { const Function *F = MF.getFunction(); if ((!IsMemset || ZeroMemset) && - !F->getFnAttributes().hasAttribute(Attribute::NoImplicitFloat)) { + !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoImplicitFloat)) { if (Size >= 16 && (Subtarget->isUnalignedMemAccessFast() || ((DstAlign == 0 || DstAlign >= 16) && @@ -2068,8 +2069,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, unsigned NumIntRegs = CCInfo.getFirstUnallocated(GPR64ArgRegs, TotalNumIntRegs); - bool NoImplicitFloatOps = Fn->getFnAttributes(). - hasAttribute(Attribute::NoImplicitFloat); + bool NoImplicitFloatOps = Fn->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat); assert(!(NumXMMRegs && !Subtarget->hasSSE1()) && "SSE register cannot be used when SSE is disabled!"); assert(!(NumXMMRegs && MF.getTarget().Options.UseSoftFloat && @@ -2547,8 +2548,9 @@ X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, OpFlags = X86II::MO_DARWIN_STUB; } else if (Subtarget->isPICStyleRIPRel() && isa(GV) && - cast(GV)->getFnAttributes(). - hasAttribute(Attribute::NonLazyBind)) { + cast(GV)->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, + Attribute::NonLazyBind)) { // If the function is marked as non-lazy, generate an indirect call // which loads from the GOT directly. This avoids runtime overhead // at the cost of eager binding (and one extra byte of encoding). @@ -6734,8 +6736,8 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { bool HasFp256 = Subtarget->hasFp256(); bool HasInt256 = Subtarget->hasInt256(); MachineFunction &MF = DAG.getMachineFunction(); - bool OptForSize = MF.getFunction()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize); + bool OptForSize = MF.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize); assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles"); @@ -10103,8 +10105,9 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { // Sanity Check: Make sure using fp_offset makes sense. assert(!getTargetMachine().Options.UseSoftFloat && !(DAG.getMachineFunction() - .getFunction()->getFnAttributes() - .hasAttribute(Attribute::NoImplicitFloat)) && + .getFunction()->getAttributes() + .hasAttribute(AttributeSet::FunctionIndex, + Attribute::NoImplicitFloat)) && Subtarget->hasSSE1()); } @@ -16574,8 +16577,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG, return SDValue(); const Function *F = DAG.getMachineFunction().getFunction(); - bool NoImplicitFloatOps = F->getFnAttributes(). - hasAttribute(Attribute::NoImplicitFloat); + bool NoImplicitFloatOps = F->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat); bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps && Subtarget->hasSSE2(); if ((VT.isVector() || diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index a7424096a8c..16c6b3d73cc 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -3864,8 +3864,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Unless optimizing for size, don't fold to avoid partial // register update stalls - if (!MF.getFunction()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize) && + if (!MF.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) && hasPartialRegUpdate(MI->getOpcode())) return 0; @@ -3906,8 +3906,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Unless optimizing for size, don't fold to avoid partial // register update stalls - if (!MF.getFunction()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize) && + if (!MF.getFunction()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize) && hasPartialRegUpdate(MI->getOpcode())) return 0; diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 6b36101a12e..07c0d148aa9 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -419,7 +419,8 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { unsigned StackAlign = TM.getFrameLowering()->getStackAlignment(); bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || - F->getFnAttributes().hasAttribute(Attribute::StackAlignment)); + F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackAlignment)); // If we've requested that we force align the stack do so now. if (ForceStackAlign) diff --git a/lib/Transforms/IPO/InlineAlways.cpp b/lib/Transforms/IPO/InlineAlways.cpp index 2ae402e497e..52d81764df8 100644 --- a/lib/Transforms/IPO/InlineAlways.cpp +++ b/lib/Transforms/IPO/InlineAlways.cpp @@ -87,7 +87,8 @@ InlineCost AlwaysInliner::getInlineCost(CallSite CS) { // that are viable for inlining. FIXME: We shouldn't even get here for // declarations. if (Callee && !Callee->isDeclaration() && - Callee->getFnAttributes().hasAttribute(Attribute::AlwaysInline) && + Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::AlwaysInline) && CA.isInlineViable(*Callee)) return InlineCost::getAlways(); diff --git a/lib/Transforms/IPO/Inliner.cpp b/lib/Transforms/IPO/Inliner.cpp index 0624e746bc9..6ca50525f3c 100644 --- a/lib/Transforms/IPO/Inliner.cpp +++ b/lib/Transforms/IPO/Inliner.cpp @@ -93,10 +93,13 @@ static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI, // If the inlined function had a higher stack protection level than the // calling function, then bump up the caller's stack protection level. - if (Callee->getFnAttributes().hasAttribute(Attribute::StackProtectReq)) + if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtectReq)) Caller->addFnAttr(Attribute::StackProtectReq); - else if (Callee->getFnAttributes().hasAttribute(Attribute::StackProtect) && - !Caller->getFnAttributes().hasAttribute(Attribute::StackProtectReq)) + else if (Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtect) && + !Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::StackProtectReq)) Caller->addFnAttr(Attribute::StackProtect); // Look at all of the allocas that we inlined through this call site. If we @@ -209,7 +212,8 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const { // would decrease the threshold. Function *Caller = CS.getCaller(); bool OptSize = Caller && !Caller->isDeclaration() && - Caller->getFnAttributes().hasAttribute(Attribute::OptimizeForSize); + Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::OptimizeForSize); if (!(InlineLimit.getNumOccurrences() > 0) && OptSize && OptSizeThreshold < thres) thres = OptSizeThreshold; @@ -218,9 +222,11 @@ unsigned Inliner::getInlineThreshold(CallSite CS) const { // and the caller does not need to minimize its size. Function *Callee = CS.getCalledFunction(); bool InlineHint = Callee && !Callee->isDeclaration() && - Callee->getFnAttributes().hasAttribute(Attribute::InlineHint); + Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::InlineHint); if (InlineHint && HintThreshold > thres - && !Caller->getFnAttributes().hasAttribute(Attribute::MinSize)) + && !Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::MinSize)) thres = HintThreshold; return thres; @@ -536,7 +542,8 @@ bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) { // about always-inline functions. This is a bit of a hack to share code // between here and the InlineAlways pass. if (AlwaysInlineOnly && - !F->getFnAttributes().hasAttribute(Attribute::AlwaysInline)) + !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::AlwaysInline)) continue; // If the only remaining users of the function are dead constants, remove diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index dab2571d968..1c123919487 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -1018,7 +1018,8 @@ bool AddressSanitizer::runOnFunction(Function &F) { // If needed, insert __asan_init before checking for AddressSafety attr. maybeInsertAsanInitAtFunctionEntry(F); - if (!F.getFnAttributes().hasAttribute(Attribute::AddressSafety)) + if (!F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::AddressSafety)) return false; if (!ClDebugFunc.empty() && ClDebugFunc != F.getName()) diff --git a/lib/Transforms/Scalar/CodeGenPrepare.cpp b/lib/Transforms/Scalar/CodeGenPrepare.cpp index b8900ac3d77..74b8447c64b 100644 --- a/lib/Transforms/Scalar/CodeGenPrepare.cpp +++ b/lib/Transforms/Scalar/CodeGenPrepare.cpp @@ -150,7 +150,8 @@ bool CodeGenPrepare::runOnFunction(Function &F) { TLInfo = &getAnalysis(); DT = getAnalysisIfAvailable(); PFI = getAnalysisIfAvailable(); - OptSize = F.getFnAttributes().hasAttribute(Attribute::OptimizeForSize); + OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::OptimizeForSize); /// This optimization identifies DIV instructions that can be /// profitably bypassed and carried out with a shorter, faster divide. diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp index 2a04af33d99..6f8269bc08b 100644 --- a/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -146,8 +146,9 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) { // not user specified. unsigned Threshold = CurrentThreshold; if (!UserThreshold && - Header->getParent()->getFnAttributes(). - hasAttribute(Attribute::OptimizeForSize)) + Header->getParent()->getAttributes(). + hasAttribute(AttributeSet::FunctionIndex, + Attribute::OptimizeForSize)) Threshold = OptSizeUnrollThreshold; // Find trip count and trip multiple if count is not available diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp index e2e42e5054f..d8cd6538bf4 100644 --- a/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -646,7 +646,8 @@ bool LoopUnswitch::UnswitchIfProfitable(Value *LoopCond, Constant *Val) { // Do not do non-trivial unswitch while optimizing for size. if (OptimizeForSize || - F->getFnAttributes().hasAttribute(Attribute::OptimizeForSize)) + F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, + Attribute::OptimizeForSize)) return false; UnswitchNontrivialCondition(LoopCond, Val, currentLoop); diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 653c111182c..adf90818aab 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -95,7 +95,8 @@ struct LoopVectorize : public LoopPass { // optimized for size. Function *F = L->getHeader()->getParent(); Attribute::AttrKind SzAttr= Attribute::OptimizeForSize; - bool OptForSize = F->getFnAttributes().hasAttribute(SzAttr); + bool OptForSize = + F->getAttributes().hasAttribute(AttributeSet::FunctionIndex, SzAttr); unsigned VF = CM.selectVectorizationFactor(OptForSize, VectorizationFactor); diff --git a/lib/VMCore/AsmWriter.cpp b/lib/VMCore/AsmWriter.cpp index da92c853817..08aa73cfa9f 100644 --- a/lib/VMCore/AsmWriter.cpp +++ b/lib/VMCore/AsmWriter.cpp @@ -1601,9 +1601,8 @@ void AssemblyWriter::printFunction(const Function *F) { Out << ')'; if (F->hasUnnamedAddr()) Out << " unnamed_addr"; - Attribute FnAttrs = Attrs.getFnAttributes(); - if (FnAttrs.hasAttributes()) - Out << ' ' << Attrs.getFnAttributes().getAsString(); + if (Attrs.hasAttributes(AttributeSet::FunctionIndex)) + Out << ' ' << Attrs.getAsString(AttributeSet::FunctionIndex); if (F->hasSection()) { Out << " section \""; PrintEscapedString(F->getSection(), Out); @@ -1875,8 +1874,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) { writeParamOperand(CI->getArgOperand(op), PAL.getParamAttributes(op + 1)); } Out << ')'; - if (PAL.getFnAttributes().hasAttributes()) - Out << ' ' << PAL.getFnAttributes().getAsString(); + if (PAL.hasAttributes(AttributeSet::FunctionIndex)) + Out << ' ' << PAL.getAsString(AttributeSet::FunctionIndex); } else if (const InvokeInst *II = dyn_cast(&I)) { Operand = II->getCalledValue(); PointerType *PTy = cast(Operand->getType()); @@ -1915,8 +1914,8 @@ void AssemblyWriter::printInstruction(const Instruction &I) { } Out << ')'; - if (PAL.getFnAttributes().hasAttributes()) - Out << ' ' << PAL.getFnAttributes().getAsString(); + if (PAL.hasAttributes(AttributeSet::FunctionIndex)) + Out << ' ' << PAL.getAsString(AttributeSet::FunctionIndex); Out << "\n to "; writeOperand(II->getNormalDest(), true); diff --git a/lib/VMCore/AttributeImpl.h b/lib/VMCore/AttributeImpl.h index cab1c94255d..2726e0edf64 100644 --- a/lib/VMCore/AttributeImpl.h +++ b/lib/VMCore/AttributeImpl.h @@ -87,7 +87,7 @@ public: /// \class /// \brief This class represents a set of attributes. class AttributeSetImpl : public FoldingSetNode { - // AttributesList is uniqued, these should not be publicly available. + // AttributesSet is uniqued, these should not be publicly available. void operator=(const AttributeSetImpl &) LLVM_DELETED_FUNCTION; AttributeSetImpl(const AttributeSetImpl &) LLVM_DELETED_FUNCTION; public: diff --git a/lib/VMCore/Attributes.cpp b/lib/VMCore/Attributes.cpp index b32358708d8..de83deb2e9a 100644 --- a/lib/VMCore/Attributes.cpp +++ b/lib/VMCore/Attributes.cpp @@ -451,6 +451,27 @@ const AttributeWithIndex &AttributeSet::getSlot(unsigned Slot) const { return AttrList->Attrs[Slot]; } +bool AttributeSet::hasAttribute(unsigned Index, Attribute::AttrKind Kind) const{ + return getAttributes(Index).hasAttribute(Kind); +} + +bool AttributeSet::hasAttributes(unsigned Index) const { + return getAttributes(Index).hasAttributes(); +} + +std::string AttributeSet::getAsString(unsigned Index) const { + return getAttributes(Index).getAsString(); +} + +unsigned AttributeSet::getStackAlignment(unsigned Index) const { + return getAttributes(Index).getStackAlignment(); +} + +uint64_t AttributeSet::getBitMask(unsigned Index) const { + // FIXME: Remove this. + return getAttributes(Index).getBitMask(); +} + /// getAttributes - The attributes for the specified index are returned. /// Attribute for the result are denoted with Idx = 0. Function notes are /// denoted with idx = ~0. diff --git a/lib/VMCore/Core.cpp b/lib/VMCore/Core.cpp index 1fb85285683..60a8483aba5 100644 --- a/lib/VMCore/Core.cpp +++ b/lib/VMCore/Core.cpp @@ -1401,8 +1401,7 @@ void LLVMRemoveFunctionAttr(LLVMValueRef Fn, LLVMAttribute PA) { LLVMAttribute LLVMGetFunctionAttr(LLVMValueRef Fn) { Function *Func = unwrap(Fn); const AttributeSet PAL = Func->getAttributes(); - Attribute attr = PAL.getFnAttributes(); - return (LLVMAttribute)attr.getBitMask(); + return (LLVMAttribute)PAL.getBitMask(AttributeSet::FunctionIndex); } /*--.. Operations on parameters ............................................--*/