1 //===- Inliner.cpp - Code common to all inliners --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the mechanics required to implement inlining without
11 // missing any calls and updating the call graph. The decisions of which calls
12 // are profitable to inline are implemented elsewhere.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "inline"
17 #include "llvm/Module.h"
18 #include "llvm/Instructions.h"
19 #include "llvm/IntrinsicInst.h"
20 #include "llvm/Analysis/CallGraph.h"
21 #include "llvm/Support/CallSite.h"
22 #include "llvm/Target/TargetData.h"
23 #include "llvm/Transforms/IPO/InlinerPass.h"
24 #include "llvm/Transforms/Utils/InlineCost.h"
25 #include "llvm/Transforms/Utils/Cloning.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/Statistic.h"
34 STATISTIC(NumInlined, "Number of functions inlined");
35 STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
36 STATISTIC(NumMergedAllocas, "Number of allocas merged together");
39 InlineLimit("inline-threshold", cl::Hidden, cl::init(200), cl::ZeroOrMore,
40 cl::desc("Control the amount of inlining to perform (default = 200)"));
42 Inliner::Inliner(void *ID)
43 : CallGraphSCCPass(ID), InlineThreshold(InlineLimit) {}
45 Inliner::Inliner(void *ID, int Threshold)
46 : CallGraphSCCPass(ID), InlineThreshold(Threshold) {}
48 /// getAnalysisUsage - For this class, we declare that we require and preserve
49 /// the call graph. If the derived class implements this method, it should
50 /// always explicitly call the implementation here.
51 void Inliner::getAnalysisUsage(AnalysisUsage &Info) const {
52 CallGraphSCCPass::getAnalysisUsage(Info);
56 typedef DenseMap<const ArrayType*, std::vector<AllocaInst*> >
57 InlinedArrayAllocasTy;
59 /// InlineCallIfPossible - If it is possible to inline the specified call site,
60 /// do so and update the CallGraph for this operation.
62 /// This function also does some basic book-keeping to update the IR. The
63 /// InlinedArrayAllocas map keeps track of any
64 static bool InlineCallIfPossible(CallSite CS, CallGraph &CG,
66 InlinedArrayAllocasTy &InlinedArrayAllocas) {
67 Function *Callee = CS.getCalledFunction();
68 Function *Caller = CS.getCaller();
70 // Try to inline the function. Get the list of static allocas that were
72 SmallVector<AllocaInst*, 16> StaticAllocas;
73 if (!InlineFunction(CS, &CG, TD, &StaticAllocas))
76 // If the inlined function had a higher stack protection level than the
77 // calling function, then bump up the caller's stack protection level.
78 if (Callee->hasFnAttr(Attribute::StackProtectReq))
79 Caller->addFnAttr(Attribute::StackProtectReq);
80 else if (Callee->hasFnAttr(Attribute::StackProtect) &&
81 !Caller->hasFnAttr(Attribute::StackProtectReq))
82 Caller->addFnAttr(Attribute::StackProtect);
85 // Look at all of the allocas that we inlined through this call site. If we
86 // have already inlined other allocas through other calls into this function,
87 // then we know that they have disjoint lifetimes and that we can merge them.
89 // There are many heuristics possible for merging these allocas, and the
90 // different options have different tradeoffs. One thing that we *really*
91 // don't want to hurt is SRoA: once inlining happens, often allocas are no
92 // longer address taken and so they can be promoted.
94 // Our "solution" for that is to only merge allocas whose outermost type is an
95 // array type. These are usually not promoted because someone is using a
96 // variable index into them. These are also often the most important ones to
99 // A better solution would be to have real memory lifetime markers in the IR
100 // and not have the inliner do any merging of allocas at all. This would
101 // allow the backend to do proper stack slot coloring of all allocas that
102 // *actually make it to the backend*, which is really what we want.
104 // Because we don't have this information, we do this simple and useful hack.
106 SmallPtrSet<AllocaInst*, 16> UsedAllocas;
108 // Loop over all the allocas we have so far and see if they can be merged with
109 // a previously inlined alloca. If not, remember that we had it.
110 for (unsigned AllocaNo = 0, e = StaticAllocas.size();
111 AllocaNo != e; ++AllocaNo) {
112 AllocaInst *AI = StaticAllocas[AllocaNo];
114 // Don't bother trying to merge array allocations (they will usually be
115 // canonicalized to be an allocation *of* an array), or allocations whose
116 // type is not itself an array (because we're afraid of pessimizing SRoA).
117 const ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
118 if (ATy == 0 || AI->isArrayAllocation())
121 // Get the list of all available allocas for this array type.
122 std::vector<AllocaInst*> &AllocasForType = InlinedArrayAllocas[ATy];
124 // Loop over the allocas in AllocasForType to see if we can reuse one. Note
125 // that we have to be careful not to reuse the same "available" alloca for
126 // multiple different allocas that we just inlined, we use the 'UsedAllocas'
127 // set to keep track of which "available" allocas are being used by this
128 // function. Also, AllocasForType can be empty of course!
129 bool MergedAwayAlloca = false;
130 for (unsigned i = 0, e = AllocasForType.size(); i != e; ++i) {
131 AllocaInst *AvailableAlloca = AllocasForType[i];
133 // The available alloca has to be in the right function, not in some other
134 // function in this SCC.
135 if (AvailableAlloca->getParent() != AI->getParent())
138 // If the inlined function already uses this alloca then we can't reuse
140 if (!UsedAllocas.insert(AvailableAlloca))
143 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
145 DEBUG(errs() << " ***MERGED ALLOCA: " << *AI);
147 AI->replaceAllUsesWith(AvailableAlloca);
148 AI->eraseFromParent();
149 MergedAwayAlloca = true;
154 // If we already nuked the alloca, we're done with it.
155 if (MergedAwayAlloca)
158 // If we were unable to merge away the alloca either because there are no
159 // allocas of the right type available or because we reused them all
160 // already, remember that this alloca came from an inlined function and mark
161 // it used so we don't reuse it for other allocas from this inline
163 AllocasForType.push_back(AI);
164 UsedAllocas.insert(AI);
170 /// shouldInline - Return true if the inliner should attempt to inline
171 /// at the given CallSite.
172 bool Inliner::shouldInline(CallSite CS) {
173 InlineCost IC = getInlineCost(CS);
176 DEBUG(errs() << " Inlining: cost=always"
177 << ", Call: " << *CS.getInstruction() << "\n");
182 DEBUG(errs() << " NOT Inlining: cost=never"
183 << ", Call: " << *CS.getInstruction() << "\n");
187 int Cost = IC.getValue();
188 int CurrentThreshold = InlineThreshold;
189 Function *Fn = CS.getCaller();
190 if (Fn && !Fn->isDeclaration() &&
191 Fn->hasFnAttr(Attribute::OptimizeForSize) &&
192 InlineThreshold != 50)
193 CurrentThreshold = 50;
195 float FudgeFactor = getInlineFudgeFactor(CS);
196 if (Cost >= (int)(CurrentThreshold * FudgeFactor)) {
197 DEBUG(errs() << " NOT Inlining: cost=" << Cost
198 << ", Call: " << *CS.getInstruction() << "\n");
202 DEBUG(errs() << " Inlining: cost=" << Cost
203 << ", Call: " << *CS.getInstruction() << "\n");
207 bool Inliner::runOnSCC(const std::vector<CallGraphNode*> &SCC) {
208 CallGraph &CG = getAnalysis<CallGraph>();
209 const TargetData *TD = getAnalysisIfAvailable<TargetData>();
211 SmallPtrSet<Function*, 8> SCCFunctions;
212 DEBUG(errs() << "Inliner visiting SCC:");
213 for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
214 Function *F = SCC[i]->getFunction();
215 if (F) SCCFunctions.insert(F);
216 DEBUG(errs() << " " << (F ? F->getName() : "INDIRECTNODE"));
219 // Scan through and identify all call sites ahead of time so that we only
220 // inline call sites in the original functions, not call sites that result
221 // from inlining other functions.
222 SmallVector<CallSite, 16> CallSites;
224 for (unsigned i = 0, e = SCC.size(); i != e; ++i) {
225 Function *F = SCC[i]->getFunction();
228 for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
229 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
230 CallSite CS = CallSite::get(I);
231 if (CS.getInstruction() == 0 || isa<DbgInfoIntrinsic>(I))
234 if (CS.getCalledFunction() == 0 ||
235 !CS.getCalledFunction()->isDeclaration())
236 CallSites.push_back(CS);
240 DEBUG(errs() << ": " << CallSites.size() << " call sites.\n");
242 // Now that we have all of the call sites, move the ones to functions in the
243 // current SCC to the end of the list.
244 unsigned FirstCallInSCC = CallSites.size();
245 for (unsigned i = 0; i < FirstCallInSCC; ++i)
246 if (Function *F = CallSites[i].getCalledFunction())
247 if (SCCFunctions.count(F))
248 std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);
251 InlinedArrayAllocasTy InlinedArrayAllocas;
253 // Now that we have all of the call sites, loop over them and inline them if
254 // it looks profitable to do so.
255 bool Changed = false;
259 // Iterate over the outer loop because inlining functions can cause indirect
260 // calls to become direct calls.
261 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
262 // We can only inline direct calls.
263 CallSite CS = CallSites[CSi];
265 Function *Callee = CS.getCalledFunction();
266 if (!Callee) continue;
268 // Calls to external functions are never inlinable.
269 if (Callee->isDeclaration()) {
270 if (SCC.size() == 1) {
271 std::swap(CallSites[CSi], CallSites.back());
272 CallSites.pop_back();
274 // Keep the 'in SCC / not in SCC' boundary correct.
275 CallSites.erase(CallSites.begin()+CSi);
281 // If the policy determines that we should inline this function,
283 if (!shouldInline(CS))
286 Function *Caller = CS.getCaller();
287 // Attempt to inline the function...
288 if (!InlineCallIfPossible(CS, CG, TD, InlinedArrayAllocas))
291 // If we inlined the last possible call site to the function, delete the
292 // function body now.
293 if (Callee->use_empty() &&
294 (Callee->hasLocalLinkage() ||
295 Callee->hasAvailableExternallyLinkage()) &&
296 !SCCFunctions.count(Callee)) {
297 DEBUG(errs() << " -> Deleting dead function: "
298 << Callee->getName() << "\n");
299 CallGraphNode *CalleeNode = CG[Callee];
301 // Remove any call graph edges from the callee to its callees.
302 CalleeNode->removeAllCalledFunctions();
304 resetCachedCostInfo(Callee);
306 // Removing the node for callee from the call graph and delete it.
307 delete CG.removeFunctionFromModule(CalleeNode);
311 // Remove any cached cost info for this caller, as inlining the
312 // callee has increased the size of the caller (which may be the
313 // same as the callee).
314 resetCachedCostInfo(Caller);
316 // Remove this call site from the list. If possible, use
317 // swap/pop_back for efficiency, but do not use it if doing so would
318 // move a call site to a function in this SCC before the
319 // 'FirstCallInSCC' barrier.
320 if (SCC.size() == 1) {
321 std::swap(CallSites[CSi], CallSites.back());
322 CallSites.pop_back();
324 CallSites.erase(CallSites.begin()+CSi);
332 } while (LocalChange);
337 // doFinalization - Remove now-dead linkonce functions at the end of
338 // processing to avoid breaking the SCC traversal.
339 bool Inliner::doFinalization(CallGraph &CG) {
340 return removeDeadFunctions(CG);
343 /// removeDeadFunctions - Remove dead functions that are not included in
344 /// DNR (Do Not Remove) list.
345 bool Inliner::removeDeadFunctions(CallGraph &CG,
346 SmallPtrSet<const Function *, 16> *DNR) {
347 SmallPtrSet<CallGraphNode*, 16> FunctionsToRemove;
349 // Scan for all of the functions, looking for ones that should now be removed
350 // from the program. Insert the dead ones in the FunctionsToRemove set.
351 for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
352 CallGraphNode *CGN = I->second;
353 if (CGN == 0 || CGN->getFunction() == 0)
356 Function *F = CGN->getFunction();
358 // If the only remaining users of the function are dead constants, remove
360 F->removeDeadConstantUsers();
362 if (DNR && DNR->count(F))
364 if (!F->hasLinkOnceLinkage() && !F->hasLocalLinkage())
369 // Remove any call graph edges from the function to its callees.
370 CGN->removeAllCalledFunctions();
372 // Remove any edges from the external node to the function's call graph
373 // node. These edges might have been made irrelegant due to
374 // optimization of the program.
375 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
377 // Removing the node for callee from the call graph and delete it.
378 FunctionsToRemove.insert(CGN);
381 // Now that we know which functions to delete, do so. We didn't want to do
382 // this inline, because that would invalidate our CallGraph::iterator
385 // Note that it doesn't matter that we are iterating over a non-stable set
386 // here to do this, it doesn't matter which order the functions are deleted
388 bool Changed = false;
389 for (SmallPtrSet<CallGraphNode*, 16>::iterator I = FunctionsToRemove.begin(),
390 E = FunctionsToRemove.end(); I != E; ++I) {
391 resetCachedCostInfo((*I)->getFunction());
392 delete CG.removeFunctionFromModule(*I);