1 //===- Inliner.cpp - Code common to all inliners --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the mechanics required to implement inlining without
11 // missing any calls and updating the call graph. The decisions of which calls
12 // are profitable to inline are implemented elsewhere.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/IPO/InlinerPass.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/CallGraph.h"
22 #include "llvm/Analysis/InlineCost.h"
23 #include "llvm/IR/CallSite.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/DiagnosticInfo.h"
26 #include "llvm/IR/Instructions.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/Module.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Analysis/TargetLibraryInfo.h"
33 #include "llvm/Transforms/Utils/Cloning.h"
34 #include "llvm/Transforms/Utils/Local.h"
37 #define DEBUG_TYPE "inline"
39 STATISTIC(NumInlined, "Number of functions inlined");
40 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
41 STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
42 STATISTIC(NumMergedAllocas, "Number of allocas merged together");
44 // This weirdly named statistic tracks the number of times that, when attempting
45 // to inline a function A into B, we analyze the callers of B in order to see
46 // if those would be more profitable and blocked inline steps.
47 STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed");
50 InlineLimit("inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
51 cl::desc("Control the amount of inlining to perform (default = 225)"));
54 HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325),
55 cl::desc("Threshold for inlining functions with inline hint"));
57 // We instroduce this threshold to help performance of instrumentation based
58 // PGO before we actually hook up inliner with analysis passes such as BPI and
61 ColdThreshold("inlinecold-threshold", cl::Hidden, cl::init(225),
62 cl::desc("Threshold for inlining functions with cold attribute"));
64 // Threshold to use when optsize is specified (and there is no -inline-limit).
65 const int OptSizeThreshold = 75;
67 Inliner::Inliner(char &ID)
68 : CallGraphSCCPass(ID), InlineThreshold(InlineLimit), InsertLifetime(true) {}
70 Inliner::Inliner(char &ID, int Threshold, bool InsertLifetime)
71 : CallGraphSCCPass(ID), InlineThreshold(InlineLimit.getNumOccurrences() > 0 ?
72 InlineLimit : Threshold),
73 InsertLifetime(InsertLifetime) {}
75 /// getAnalysisUsage - For this class, we declare that we require and preserve
76 /// the call graph. If the derived class implements this method, it should
77 /// always explicitly call the implementation here.
78 void Inliner::getAnalysisUsage(AnalysisUsage &AU) const {
79 AU.addRequired<AliasAnalysis>();
80 AU.addRequired<AssumptionCacheTracker>();
81 CallGraphSCCPass::getAnalysisUsage(AU);
85 typedef DenseMap<ArrayType*, std::vector<AllocaInst*> >
86 InlinedArrayAllocasTy;
88 /// \brief If the inlined function had a higher stack protection level than the
89 /// calling function, then bump up the caller's stack protection level.
90 static void AdjustCallerSSPLevel(Function *Caller, Function *Callee) {
91 // If upgrading the SSP attribute, clear out the old SSP Attributes first.
92 // Having multiple SSP attributes doesn't actually hurt, but it adds useless
95 B.addAttribute(Attribute::StackProtect)
96 .addAttribute(Attribute::StackProtectStrong);
97 AttributeSet OldSSPAttr = AttributeSet::get(Caller->getContext(),
98 AttributeSet::FunctionIndex,
101 if (Callee->hasFnAttribute(Attribute::StackProtectReq)) {
102 Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
103 Caller->addFnAttr(Attribute::StackProtectReq);
104 } else if (Callee->hasFnAttribute(Attribute::StackProtectStrong) &&
105 !Caller->hasFnAttribute(Attribute::StackProtectReq)) {
106 Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
107 Caller->addFnAttr(Attribute::StackProtectStrong);
108 } else if (Callee->hasFnAttribute(Attribute::StackProtect) &&
109 !Caller->hasFnAttribute(Attribute::StackProtectReq) &&
110 !Caller->hasFnAttribute(Attribute::StackProtectStrong))
111 Caller->addFnAttr(Attribute::StackProtect);
114 /// InlineCallIfPossible - If it is possible to inline the specified call site,
115 /// do so and update the CallGraph for this operation.
117 /// This function also does some basic book-keeping to update the IR. The
118 /// InlinedArrayAllocas map keeps track of any allocas that are already
119 /// available from other functions inlined into the caller. If we are able to
120 /// inline this call site we attempt to reuse already available allocas or add
121 /// any new allocas to the set if not possible.
122 static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
123 InlinedArrayAllocasTy &InlinedArrayAllocas,
124 int InlineHistory, bool InsertLifetime,
125 const DataLayout *DL) {
126 Function *Callee = CS.getCalledFunction();
127 Function *Caller = CS.getCaller();
129 // Try to inline the function. Get the list of static allocas that were
131 if (!InlineFunction(CS, IFI, InsertLifetime))
134 AdjustCallerSSPLevel(Caller, Callee);
136 // Look at all of the allocas that we inlined through this call site. If we
137 // have already inlined other allocas through other calls into this function,
138 // then we know that they have disjoint lifetimes and that we can merge them.
140 // There are many heuristics possible for merging these allocas, and the
141 // different options have different tradeoffs. One thing that we *really*
142 // don't want to hurt is SRoA: once inlining happens, often allocas are no
143 // longer address taken and so they can be promoted.
145 // Our "solution" for that is to only merge allocas whose outermost type is an
146 // array type. These are usually not promoted because someone is using a
147 // variable index into them. These are also often the most important ones to
150 // A better solution would be to have real memory lifetime markers in the IR
151 // and not have the inliner do any merging of allocas at all. This would
152 // allow the backend to do proper stack slot coloring of all allocas that
153 // *actually make it to the backend*, which is really what we want.
155 // Because we don't have this information, we do this simple and useful hack.
157 SmallPtrSet<AllocaInst*, 16> UsedAllocas;
159 // When processing our SCC, check to see if CS was inlined from some other
160 // call site. For example, if we're processing "A" in this code:
162 // B() { x = alloca ... C() }
163 // C() { y = alloca ... }
164 // Assume that C was not inlined into B initially, and so we're processing A
165 // and decide to inline B into A. Doing this makes an alloca available for
166 // reuse and makes a callsite (C) available for inlining. When we process
167 // the C call site we don't want to do any alloca merging between X and Y
168 // because their scopes are not disjoint. We could make this smarter by
169 // keeping track of the inline history for each alloca in the
170 // InlinedArrayAllocas but this isn't likely to be a significant win.
171 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC.
174 // Loop over all the allocas we have so far and see if they can be merged with
175 // a previously inlined alloca. If not, remember that we had it.
176 for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size();
177 AllocaNo != e; ++AllocaNo) {
178 AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
180 // Don't bother trying to merge array allocations (they will usually be
181 // canonicalized to be an allocation *of* an array), or allocations whose
182 // type is not itself an array (because we're afraid of pessimizing SRoA).
183 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
184 if (!ATy || AI->isArrayAllocation())
187 // Get the list of all available allocas for this array type.
188 std::vector<AllocaInst*> &AllocasForType = InlinedArrayAllocas[ATy];
190 // Loop over the allocas in AllocasForType to see if we can reuse one. Note
191 // that we have to be careful not to reuse the same "available" alloca for
192 // multiple different allocas that we just inlined, we use the 'UsedAllocas'
193 // set to keep track of which "available" allocas are being used by this
194 // function. Also, AllocasForType can be empty of course!
195 bool MergedAwayAlloca = false;
196 for (unsigned i = 0, e = AllocasForType.size(); i != e; ++i) {
197 AllocaInst *AvailableAlloca = AllocasForType[i];
199 unsigned Align1 = AI->getAlignment(),
200 Align2 = AvailableAlloca->getAlignment();
201 // If we don't have data layout information, and only one alloca is using
202 // the target default, then we can't safely merge them because we can't
203 // pick the greater alignment.
204 if (!DL && (!Align1 || !Align2) && Align1 != Align2)
207 // The available alloca has to be in the right function, not in some other
208 // function in this SCC.
209 if (AvailableAlloca->getParent() != AI->getParent())
212 // If the inlined function already uses this alloca then we can't reuse
214 if (!UsedAllocas.insert(AvailableAlloca).second)
217 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
219 DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI << "\n\t\tINTO: "
220 << *AvailableAlloca << '\n');
222 AI->replaceAllUsesWith(AvailableAlloca);
224 if (Align1 != Align2) {
225 if (!Align1 || !Align2) {
226 assert(DL && "DataLayout required to compare default alignments");
227 unsigned TypeAlign = DL->getABITypeAlignment(AI->getAllocatedType());
229 Align1 = Align1 ? Align1 : TypeAlign;
230 Align2 = Align2 ? Align2 : TypeAlign;
234 AvailableAlloca->setAlignment(AI->getAlignment());
237 AI->eraseFromParent();
238 MergedAwayAlloca = true;
240 IFI.StaticAllocas[AllocaNo] = nullptr;
244 // If we already nuked the alloca, we're done with it.
245 if (MergedAwayAlloca)
248 // If we were unable to merge away the alloca either because there are no
249 // allocas of the right type available or because we reused them all
250 // already, remember that this alloca came from an inlined function and mark
251 // it used so we don't reuse it for other allocas from this inline
253 AllocasForType.push_back(AI);
254 UsedAllocas.insert(AI);
260 unsigned Inliner::getInlineThreshold(CallSite CS) const {
261 int thres = InlineThreshold; // -inline-threshold or else selected by
264 // If -inline-threshold is not given, listen to the optsize attribute when it
265 // would decrease the threshold.
266 Function *Caller = CS.getCaller();
267 bool OptSize = Caller && !Caller->isDeclaration() &&
268 Caller->hasFnAttribute(Attribute::OptimizeForSize);
269 if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
270 OptSizeThreshold < thres)
271 thres = OptSizeThreshold;
273 // Listen to the inlinehint attribute when it would increase the threshold
274 // and the caller does not need to minimize its size.
275 Function *Callee = CS.getCalledFunction();
276 bool InlineHint = Callee && !Callee->isDeclaration() &&
277 Callee->hasFnAttribute(Attribute::InlineHint);
278 if (InlineHint && HintThreshold > thres &&
279 !Caller->hasFnAttribute(Attribute::MinSize))
280 thres = HintThreshold;
282 // Listen to the cold attribute when it would decrease the threshold.
283 bool ColdCallee = Callee && !Callee->isDeclaration() &&
284 Callee->hasFnAttribute(Attribute::Cold);
285 // Command line argument for InlineLimit will override the default
286 // ColdThreshold. If we have -inline-threshold but no -inlinecold-threshold,
287 // do not use the default cold threshold even if it is smaller.
288 if ((InlineLimit.getNumOccurrences() == 0 ||
289 ColdThreshold.getNumOccurrences() > 0) && ColdCallee &&
290 ColdThreshold < thres)
291 thres = ColdThreshold;
296 static void emitAnalysis(CallSite CS, const Twine &Msg) {
297 Function *Caller = CS.getCaller();
298 LLVMContext &Ctx = Caller->getContext();
299 DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
300 emitOptimizationRemarkAnalysis(Ctx, DEBUG_TYPE, *Caller, DLoc, Msg);
303 /// shouldInline - Return true if the inliner should attempt to inline
304 /// at the given CallSite.
305 bool Inliner::shouldInline(CallSite CS) {
306 InlineCost IC = getInlineCost(CS);
309 DEBUG(dbgs() << " Inlining: cost=always"
310 << ", Call: " << *CS.getInstruction() << "\n");
311 emitAnalysis(CS, Twine(CS.getCalledFunction()->getName()) +
312 " should always be inlined (cost=always)");
317 DEBUG(dbgs() << " NOT Inlining: cost=never"
318 << ", Call: " << *CS.getInstruction() << "\n");
319 emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() +
320 " should never be inlined (cost=never)"));
324 Function *Caller = CS.getCaller();
326 DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost()
327 << ", thres=" << (IC.getCostDelta() + IC.getCost())
328 << ", Call: " << *CS.getInstruction() << "\n");
329 emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() +
330 " too costly to inline (cost=") +
331 Twine(IC.getCost()) + ", threshold=" +
332 Twine(IC.getCostDelta() + IC.getCost()) + ")");
336 // Try to detect the case where the current inlining candidate caller (call
337 // it B) is a static or linkonce-ODR function and is an inlining candidate
338 // elsewhere, and the current candidate callee (call it C) is large enough
339 // that inlining it into B would make B too big to inline later. In these
340 // circumstances it may be best not to inline C into B, but to inline B into
343 // This only applies to static and linkonce-ODR functions because those are
344 // expected to be available for inlining in the translation units where they
345 // are used. Thus we will always have the opportunity to make local inlining
346 // decisions. Importantly the linkonce-ODR linkage covers inline functions
347 // and templates in C++.
349 // FIXME: All of this logic should be sunk into getInlineCost. It relies on
350 // the internal implementation of the inline cost metrics rather than
351 // treating them as truly abstract units etc.
352 if (Caller->hasLocalLinkage() || Caller->hasLinkOnceODRLinkage()) {
353 int TotalSecondaryCost = 0;
354 // The candidate cost to be imposed upon the current function.
355 int CandidateCost = IC.getCost() - (InlineConstants::CallPenalty + 1);
356 // This bool tracks what happens if we do NOT inline C into B.
357 bool callerWillBeRemoved = Caller->hasLocalLinkage();
358 // This bool tracks what happens if we DO inline C into B.
359 bool inliningPreventsSomeOuterInline = false;
360 for (User *U : Caller->users()) {
363 // If this isn't a call to Caller (it could be some other sort
364 // of reference) skip it. Such references will prevent the caller
365 // from being removed.
366 if (!CS2 || CS2.getCalledFunction() != Caller) {
367 callerWillBeRemoved = false;
371 InlineCost IC2 = getInlineCost(CS2);
372 ++NumCallerCallersAnalyzed;
374 callerWillBeRemoved = false;
380 // See if inlining or original callsite would erase the cost delta of
381 // this callsite. We subtract off the penalty for the call instruction,
382 // which we would be deleting.
383 if (IC2.getCostDelta() <= CandidateCost) {
384 inliningPreventsSomeOuterInline = true;
385 TotalSecondaryCost += IC2.getCost();
388 // If all outer calls to Caller would get inlined, the cost for the last
389 // one is set very low by getInlineCost, in anticipation that Caller will
390 // be removed entirely. We did not account for this above unless there
391 // is only one caller of Caller.
392 if (callerWillBeRemoved && !Caller->use_empty())
393 TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
395 if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) {
396 DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() <<
397 " Cost = " << IC.getCost() <<
398 ", outer Cost = " << TotalSecondaryCost << '\n');
400 CS, Twine("Not inlining. Cost of inlining " +
401 CS.getCalledFunction()->getName() +
402 " increases the cost of inlining " +
403 CS.getCaller()->getName() + " in other contexts"));
408 DEBUG(dbgs() << " Inlining: cost=" << IC.getCost()
409 << ", thres=" << (IC.getCostDelta() + IC.getCost())
410 << ", Call: " << *CS.getInstruction() << '\n');
412 CS, CS.getCalledFunction()->getName() + Twine(" can be inlined into ") +
413 CS.getCaller()->getName() + " with cost=" + Twine(IC.getCost()) +
414 " (threshold=" + Twine(IC.getCostDelta() + IC.getCost()) + ")");
418 /// InlineHistoryIncludes - Return true if the specified inline history ID
419 /// indicates an inline history that includes the specified function.
420 static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
421 const SmallVectorImpl<std::pair<Function*, int> > &InlineHistory) {
422 while (InlineHistoryID != -1) {
423 assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
424 "Invalid inline history ID");
425 if (InlineHistory[InlineHistoryID].first == F)
427 InlineHistoryID = InlineHistory[InlineHistoryID].second;
432 bool Inliner::runOnSCC(CallGraphSCC &SCC) {
433 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
434 AssumptionCacheTracker *ACT = &getAnalysis<AssumptionCacheTracker>();
435 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
436 const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
437 auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
438 const TargetLibraryInfo *TLI = TLIP ? &TLIP->getTLI() : nullptr;
439 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();
441 SmallPtrSet<Function*, 8> SCCFunctions;
442 DEBUG(dbgs() << "Inliner visiting SCC:");
443 for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
444 Function *F = (*I)->getFunction();
445 if (F) SCCFunctions.insert(F);
446 DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
449 // Scan through and identify all call sites ahead of time so that we only
450 // inline call sites in the original functions, not call sites that result
451 // from inlining other functions.
452 SmallVector<std::pair<CallSite, int>, 16> CallSites;
454 // When inlining a callee produces new call sites, we want to keep track of
455 // the fact that they were inlined from the callee. This allows us to avoid
456 // infinite inlining in some obscure cases. To represent this, we use an
457 // index into the InlineHistory vector.
458 SmallVector<std::pair<Function*, int>, 8> InlineHistory;
460 for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
461 Function *F = (*I)->getFunction();
464 for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
465 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
466 CallSite CS(cast<Value>(I));
467 // If this isn't a call, or it is a call to an intrinsic, it can
469 if (!CS || isa<IntrinsicInst>(I))
472 // If this is a direct call to an external function, we can never inline
473 // it. If it is an indirect call, inlining may resolve it to be a
474 // direct call, so we keep it.
475 if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration())
478 CallSites.push_back(std::make_pair(CS, -1));
482 DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
484 // If there are no calls in this function, exit early.
485 if (CallSites.empty())
488 // Now that we have all of the call sites, move the ones to functions in the
489 // current SCC to the end of the list.
490 unsigned FirstCallInSCC = CallSites.size();
491 for (unsigned i = 0; i < FirstCallInSCC; ++i)
492 if (Function *F = CallSites[i].first.getCalledFunction())
493 if (SCCFunctions.count(F))
494 std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);
497 InlinedArrayAllocasTy InlinedArrayAllocas;
498 InlineFunctionInfo InlineInfo(&CG, DL, AA, ACT);
500 // Now that we have all of the call sites, loop over them and inline them if
501 // it looks profitable to do so.
502 bool Changed = false;
506 // Iterate over the outer loop because inlining functions can cause indirect
507 // calls to become direct calls.
508 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
509 CallSite CS = CallSites[CSi].first;
511 Function *Caller = CS.getCaller();
512 Function *Callee = CS.getCalledFunction();
514 // If this call site is dead and it is to a readonly function, we should
515 // just delete the call instead of trying to inline it, regardless of
516 // size. This happens because IPSCCP propagates the result out of the
517 // call and then we're left with the dead call.
518 if (isInstructionTriviallyDead(CS.getInstruction(), TLI)) {
519 DEBUG(dbgs() << " -> Deleting dead call: "
520 << *CS.getInstruction() << "\n");
521 // Update the call graph by deleting the edge from Callee to Caller.
522 CG[Caller]->removeCallEdgeFor(CS);
523 CS.getInstruction()->eraseFromParent();
526 // We can only inline direct calls to non-declarations.
527 if (!Callee || Callee->isDeclaration()) continue;
529 // If this call site was obtained by inlining another function, verify
530 // that the include path for the function did not include the callee
531 // itself. If so, we'd be recursively inlining the same function,
532 // which would provide the same callsites, which would cause us to
533 // infinitely inline.
534 int InlineHistoryID = CallSites[CSi].second;
535 if (InlineHistoryID != -1 &&
536 InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
539 LLVMContext &CallerCtx = Caller->getContext();
541 // Get DebugLoc to report. CS will be invalid after Inliner.
542 DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
544 // If the policy determines that we should inline this function,
546 if (!shouldInline(CS)) {
547 emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
548 Twine(Callee->getName() +
549 " will not be inlined into " +
554 // Attempt to inline the function.
555 if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
556 InlineHistoryID, InsertLifetime, DL)) {
557 emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
558 Twine(Callee->getName() +
559 " will not be inlined into " +
565 // Report the inline decision.
566 emitOptimizationRemark(
567 CallerCtx, DEBUG_TYPE, *Caller, DLoc,
568 Twine(Callee->getName() + " inlined into " + Caller->getName()));
570 // If inlining this function gave us any new call sites, throw them
571 // onto our worklist to process. They are useful inline candidates.
572 if (!InlineInfo.InlinedCalls.empty()) {
573 // Create a new inline history entry for this, so that we remember
574 // that these new callsites came about due to inlining Callee.
575 int NewHistoryID = InlineHistory.size();
576 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
578 for (unsigned i = 0, e = InlineInfo.InlinedCalls.size();
580 Value *Ptr = InlineInfo.InlinedCalls[i];
581 CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
586 // If we inlined or deleted the last possible call site to the function,
587 // delete the function body now.
588 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
589 // TODO: Can remove if in SCC now.
590 !SCCFunctions.count(Callee) &&
592 // The function may be apparently dead, but if there are indirect
593 // callgraph references to the node, we cannot delete it yet, this
594 // could invalidate the CGSCC iterator.
595 CG[Callee]->getNumReferences() == 0) {
596 DEBUG(dbgs() << " -> Deleting dead function: "
597 << Callee->getName() << "\n");
598 CallGraphNode *CalleeNode = CG[Callee];
600 // Remove any call graph edges from the callee to its callees.
601 CalleeNode->removeAllCalledFunctions();
603 // Removing the node for callee from the call graph and delete it.
604 delete CG.removeFunctionFromModule(CalleeNode);
608 // Remove this call site from the list. If possible, use
609 // swap/pop_back for efficiency, but do not use it if doing so would
610 // move a call site to a function in this SCC before the
611 // 'FirstCallInSCC' barrier.
612 if (SCC.isSingular()) {
613 CallSites[CSi] = CallSites.back();
614 CallSites.pop_back();
616 CallSites.erase(CallSites.begin()+CSi);
623 } while (LocalChange);
628 // doFinalization - Remove now-dead linkonce functions at the end of
629 // processing to avoid breaking the SCC traversal.
630 bool Inliner::doFinalization(CallGraph &CG) {
631 return removeDeadFunctions(CG);
634 /// removeDeadFunctions - Remove dead functions that are not included in
635 /// DNR (Do Not Remove) list.
636 bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
637 SmallVector<CallGraphNode*, 16> FunctionsToRemove;
639 // Scan for all of the functions, looking for ones that should now be removed
640 // from the program. Insert the dead ones in the FunctionsToRemove set.
641 for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
642 CallGraphNode *CGN = I->second;
643 Function *F = CGN->getFunction();
644 if (!F || F->isDeclaration())
647 // Handle the case when this function is called and we only want to care
648 // about always-inline functions. This is a bit of a hack to share code
649 // between here and the InlineAlways pass.
650 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
653 // If the only remaining users of the function are dead constants, remove
655 F->removeDeadConstantUsers();
657 if (!F->isDefTriviallyDead())
660 // It is unsafe to drop a function with discardable linkage from a COMDAT
661 // without also dropping the other members of the COMDAT.
662 // The inliner doesn't visit non-function entities which are in COMDAT
663 // groups so it is unsafe to do so *unless* the linkage is local.
664 if (!F->hasLocalLinkage() && F->hasComdat())
667 // Remove any call graph edges from the function to its callees.
668 CGN->removeAllCalledFunctions();
670 // Remove any edges from the external node to the function's call graph
671 // node. These edges might have been made irrelegant due to
672 // optimization of the program.
673 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
675 // Removing the node for callee from the call graph and delete it.
676 FunctionsToRemove.push_back(CGN);
678 if (FunctionsToRemove.empty())
681 // Now that we know which functions to delete, do so. We didn't want to do
682 // this inline, because that would invalidate our CallGraph::iterator
685 // Note that it doesn't matter that we are iterating over a non-stable order
686 // here to do this, it doesn't matter which order the functions are deleted
688 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
689 FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(),
690 FunctionsToRemove.end()),
691 FunctionsToRemove.end());
692 for (SmallVectorImpl<CallGraphNode *>::iterator I = FunctionsToRemove.begin(),
693 E = FunctionsToRemove.end();
695 delete CG.removeFunctionFromModule(*I);