1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/Analysis/CallGraph.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/DebugInfo.h"
21 #include "llvm/IR/Attributes.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/Instructions.h"
27 #include "llvm/IR/IntrinsicInst.h"
28 #include "llvm/IR/Intrinsics.h"
29 #include "llvm/IR/Module.h"
30 #include "llvm/Support/CallSite.h"
31 #include "llvm/Transforms/Utils/Local.h"
34 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
35 bool InsertLifetime) {
36 return InlineFunction(CallSite(CI), IFI, InsertLifetime);
38 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
39 bool InsertLifetime) {
40 return InlineFunction(CallSite(II), IFI, InsertLifetime);
44 /// A class for recording information about inlining through an invoke.
45 class InvokeInliningInfo {
46 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
47 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
48 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
49 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
50 SmallVector<Value*, 8> UnwindDestPHIValues;
53 InvokeInliningInfo(InvokeInst *II)
54 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(0),
55 CallerLPad(0), InnerEHValuesPHI(0) {
56 // If there are PHI nodes in the unwind destination block, we need to keep
57 // track of which values came into them from the invoke before removing
58 // the edge from this block.
59 llvm::BasicBlock *InvokeBB = II->getParent();
60 BasicBlock::iterator I = OuterResumeDest->begin();
61 for (; isa<PHINode>(I); ++I) {
62 // Save the value to use for this edge.
63 PHINode *PHI = cast<PHINode>(I);
64 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
67 CallerLPad = cast<LandingPadInst>(I);
70 /// getOuterResumeDest - The outer unwind destination is the target of
71 /// unwind edges introduced for calls within the inlined function.
72 BasicBlock *getOuterResumeDest() const {
73 return OuterResumeDest;
76 BasicBlock *getInnerResumeDest();
78 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
80 /// forwardResume - Forward the 'resume' instruction to the caller's landing
81 /// pad block. When the landing pad block has only one predecessor, this is
82 /// a simple branch. When there is more than one predecessor, we need to
83 /// split the landing pad block after the landingpad instruction and jump
85 void forwardResume(ResumeInst *RI,
86 SmallPtrSet<LandingPadInst*, 16> &InlinedLPads);
88 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind
89 /// destination block for the given basic block, using the values for the
90 /// original invoke's source block.
91 void addIncomingPHIValuesFor(BasicBlock *BB) const {
92 addIncomingPHIValuesForInto(BB, OuterResumeDest);
95 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
96 BasicBlock::iterator I = dest->begin();
97 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
98 PHINode *phi = cast<PHINode>(I);
99 phi->addIncoming(UnwindDestPHIValues[i], src);
105 /// getInnerResumeDest - Get or create a target for the branch from ResumeInsts.
106 BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
107 if (InnerResumeDest) return InnerResumeDest;
109 // Split the landing pad.
110 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
112 OuterResumeDest->splitBasicBlock(SplitPoint,
113 OuterResumeDest->getName() + ".body");
115 // The number of incoming edges we expect to the inner landing pad.
116 const unsigned PHICapacity = 2;
118 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
119 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
120 BasicBlock::iterator I = OuterResumeDest->begin();
121 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
122 PHINode *OuterPHI = cast<PHINode>(I);
123 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
124 OuterPHI->getName() + ".lpad-body",
126 OuterPHI->replaceAllUsesWith(InnerPHI);
127 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
130 // Create a PHI for the exception values.
131 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
132 "eh.lpad-body", InsertPoint);
133 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
134 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
137 return InnerResumeDest;
140 /// forwardResume - Forward the 'resume' instruction to the caller's landing pad
141 /// block. When the landing pad block has only one predecessor, this is a simple
142 /// branch. When there is more than one predecessor, we need to split the
143 /// landing pad block after the landingpad instruction and jump to there.
144 void InvokeInliningInfo::forwardResume(ResumeInst *RI,
145 SmallPtrSet<LandingPadInst*, 16> &InlinedLPads) {
146 BasicBlock *Dest = getInnerResumeDest();
147 LandingPadInst *OuterLPad = getLandingPadInst();
148 BasicBlock *Src = RI->getParent();
150 BranchInst::Create(Dest, Src);
152 // Update the PHIs in the destination. They were inserted in an order which
154 addIncomingPHIValuesForInto(Src, Dest);
156 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
157 RI->eraseFromParent();
159 // Append the clauses from the outer landing pad instruction into the inlined
160 // landing pad instructions.
161 for (SmallPtrSet<LandingPadInst*, 16>::iterator I = InlinedLPads.begin(),
162 E = InlinedLPads.end(); I != E; ++I) {
163 LandingPadInst *InlinedLPad = *I;
164 for (unsigned OuterIdx = 0, OuterNum = OuterLPad->getNumClauses();
165 OuterIdx != OuterNum; ++OuterIdx)
166 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
170 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
171 /// an invoke, we have to turn all of the calls that can throw into
172 /// invokes. This function analyze BB to see if there are any calls, and if so,
173 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
174 /// nodes in that block with the values specified in InvokeDestPHIValues.
176 /// Returns true to indicate that the next block should be skipped.
177 static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
178 InvokeInliningInfo &Invoke) {
179 LandingPadInst *LPI = Invoke.getLandingPadInst();
181 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
182 Instruction *I = BBI++;
184 if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) {
185 unsigned NumClauses = LPI->getNumClauses();
186 L->reserveClauses(NumClauses);
187 for (unsigned i = 0; i != NumClauses; ++i)
188 L->addClause(LPI->getClause(i));
191 // We only need to check for function calls: inlined invoke
192 // instructions require no special handling.
193 CallInst *CI = dyn_cast<CallInst>(I);
195 // If this call cannot unwind, don't convert it to an invoke.
196 // Inline asm calls cannot throw.
197 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
200 // Convert this function call into an invoke instruction. First, split the
202 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
204 // Delete the unconditional branch inserted by splitBasicBlock
205 BB->getInstList().pop_back();
207 // Create the new invoke instruction.
208 ImmutableCallSite CS(CI);
209 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
210 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
211 Invoke.getOuterResumeDest(),
212 InvokeArgs, CI->getName(), BB);
213 II->setCallingConv(CI->getCallingConv());
214 II->setAttributes(CI->getAttributes());
216 // Make sure that anything using the call now uses the invoke! This also
217 // updates the CallGraph if present, because it uses a WeakVH.
218 CI->replaceAllUsesWith(II);
220 // Delete the original call
221 Split->getInstList().pop_front();
223 // Update any PHI nodes in the exceptional block to indicate that there is
224 // now a new entry in them.
225 Invoke.addIncomingPHIValuesFor(BB);
232 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
233 /// in the body of the inlined function into invokes.
235 /// II is the invoke instruction being inlined. FirstNewBlock is the first
236 /// block of the inlined code (the last block is the end of the function),
237 /// and InlineCodeInfo is information about the code that got inlined.
238 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
239 ClonedCodeInfo &InlinedCodeInfo) {
240 BasicBlock *InvokeDest = II->getUnwindDest();
242 Function *Caller = FirstNewBlock->getParent();
244 // The inlined code is currently at the end of the function, scan from the
245 // start of the inlined code to its end, checking for stuff we need to
247 InvokeInliningInfo Invoke(II);
249 // Get all of the inlined landing pad instructions.
250 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
251 for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I)
252 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
253 InlinedLPads.insert(II->getLandingPadInst());
255 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
256 if (InlinedCodeInfo.ContainsCalls)
257 if (HandleCallsInBlockInlinedThroughInvoke(BB, Invoke)) {
258 // Honor a request to skip the next block.
263 // Forward any resumes that are remaining here.
264 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
265 Invoke.forwardResume(RI, InlinedLPads);
268 // Now that everything is happy, we have one final detail. The PHI nodes in
269 // the exception destination block still have entries due to the original
270 // invoke instruction. Eliminate these entries (which might even delete the
272 InvokeDest->removePredecessor(II->getParent());
275 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
276 /// into the caller, update the specified callgraph to reflect the changes we
277 /// made. Note that it's possible that not all code was copied over, so only
278 /// some edges of the callgraph may remain.
279 static void UpdateCallGraphAfterInlining(CallSite CS,
280 Function::iterator FirstNewBlock,
281 ValueToValueMapTy &VMap,
282 InlineFunctionInfo &IFI) {
283 CallGraph &CG = *IFI.CG;
284 const Function *Caller = CS.getInstruction()->getParent()->getParent();
285 const Function *Callee = CS.getCalledFunction();
286 CallGraphNode *CalleeNode = CG[Callee];
287 CallGraphNode *CallerNode = CG[Caller];
289 // Since we inlined some uninlined call sites in the callee into the caller,
290 // add edges from the caller to all of the callees of the callee.
291 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
293 // Consider the case where CalleeNode == CallerNode.
294 CallGraphNode::CalledFunctionsVector CallCache;
295 if (CalleeNode == CallerNode) {
296 CallCache.assign(I, E);
297 I = CallCache.begin();
301 for (; I != E; ++I) {
302 const Value *OrigCall = I->first;
304 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
305 // Only copy the edge if the call was inlined!
306 if (VMI == VMap.end() || VMI->second == 0)
309 // If the call was inlined, but then constant folded, there is no edge to
310 // add. Check for this case.
311 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
312 if (NewCall == 0) continue;
314 // Remember that this call site got inlined for the client of
316 IFI.InlinedCalls.push_back(NewCall);
318 // It's possible that inlining the callsite will cause it to go from an
319 // indirect to a direct call by resolving a function pointer. If this
320 // happens, set the callee of the new call site to a more precise
321 // destination. This can also happen if the call graph node of the caller
322 // was just unnecessarily imprecise.
323 if (I->second->getFunction() == 0)
324 if (Function *F = CallSite(NewCall).getCalledFunction()) {
325 // Indirect call site resolved to direct call.
326 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
331 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
334 // Update the call graph by deleting the edge from Callee to Caller. We must
335 // do this after the loop above in case Caller and Callee are the same.
336 CallerNode->removeCallEdgeFor(CS);
339 /// HandleByValArgument - When inlining a call site that has a byval argument,
340 /// we have to make the implicit memcpy explicit by adding it.
341 static Value *HandleByValArgument(Value *PassedValue,
342 const Argument *ArgumentSignature,
343 Instruction *TheCall,
344 const Function *CalledFunc,
345 InlineFunctionInfo &IFI,
346 unsigned ByValAlignment) {
347 Type *AggTy = cast<PointerType>(PassedValue->getType())->getElementType();
349 // If the called function is readonly, then it could not mutate the caller's
350 // copy of the byval'd memory. In this case, it is safe to elide the copy and
352 if (CalledFunc->onlyReadsMemory() || ArgumentSignature->onlyReadsMemory()) {
353 // If the byval argument has a specified alignment that is greater than the
354 // passed in pointer, then we either have to round up the input pointer or
355 // give up on this transformation.
356 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
359 // If the pointer is already known to be sufficiently aligned, or if we can
360 // round it up to a larger alignment, then we don't need a temporary.
361 if (getOrEnforceKnownAlignment(PassedValue, ByValAlignment,
362 IFI.TD) >= ByValAlignment)
365 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
366 // for code quality, but rarely happens and is required for correctness.
369 LLVMContext &Context = PassedValue->getContext();
371 Type *VoidPtrTy = Type::getInt8PtrTy(Context);
373 // Create the alloca. If we have DataLayout, use nice alignment.
376 Align = IFI.TD->getPrefTypeAlignment(AggTy);
378 // If the byval had an alignment specified, we *must* use at least that
379 // alignment, as it is required by the byval argument (and uses of the
380 // pointer inside the callee).
381 Align = std::max(Align, ByValAlignment);
383 Function *Caller = TheCall->getParent()->getParent();
385 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, PassedValue->getName(),
386 &*Caller->begin()->begin());
388 Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
389 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
392 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
393 Value *SrcCast = new BitCastInst(PassedValue, VoidPtrTy, "tmp", TheCall);
397 Size = ConstantExpr::getSizeOf(AggTy);
399 Size = ConstantInt::get(Type::getInt64Ty(Context),
400 IFI.TD->getTypeStoreSize(AggTy));
402 // Always generate a memcpy of alignment 1 here because we don't know
403 // the alignment of the src pointer. Other optimizations can infer
405 Value *CallArgs[] = {
406 DestCast, SrcCast, Size,
407 ConstantInt::get(Type::getInt32Ty(Context), 1),
408 ConstantInt::getFalse(Context) // isVolatile
410 IRBuilder<>(TheCall).CreateCall(MemCpyFn, CallArgs);
412 // Uses of the argument in the function should use our new alloca
417 // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
419 static bool isUsedByLifetimeMarker(Value *V) {
420 for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE;
422 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI)) {
423 switch (II->getIntrinsicID()) {
425 case Intrinsic::lifetime_start:
426 case Intrinsic::lifetime_end:
434 // hasLifetimeMarkers - Check whether the given alloca already has
435 // lifetime.start or lifetime.end intrinsics.
436 static bool hasLifetimeMarkers(AllocaInst *AI) {
437 Type *Int8PtrTy = Type::getInt8PtrTy(AI->getType()->getContext());
438 if (AI->getType() == Int8PtrTy)
439 return isUsedByLifetimeMarker(AI);
441 // Do a scan to find all the casts to i8*.
442 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); I != E;
444 if (I->getType() != Int8PtrTy) continue;
445 if (I->stripPointerCasts() != AI) continue;
446 if (isUsedByLifetimeMarker(*I))
452 /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to
453 /// recursively update InlinedAtEntry of a DebugLoc.
454 static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
455 const DebugLoc &InlinedAtDL,
457 if (MDNode *IA = DL.getInlinedAt(Ctx)) {
458 DebugLoc NewInlinedAtDL
459 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx);
460 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
461 NewInlinedAtDL.getAsMDNode(Ctx));
464 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
465 InlinedAtDL.getAsMDNode(Ctx));
468 /// fixupLineNumbers - Update inlined instructions' line numbers to
469 /// to encode location where these instructions are inlined.
470 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
471 Instruction *TheCall) {
472 DebugLoc TheCallDL = TheCall->getDebugLoc();
473 if (TheCallDL.isUnknown())
476 for (; FI != Fn->end(); ++FI) {
477 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
479 DebugLoc DL = BI->getDebugLoc();
480 if (!DL.isUnknown()) {
481 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
482 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
483 LLVMContext &Ctx = BI->getContext();
484 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx);
485 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(),
493 /// InlineFunction - This function inlines the called function into the basic
494 /// block of the caller. This returns false if it is not possible to inline
495 /// this call. The program is still in a well defined state if this occurs
498 /// Note that this only does one level of inlining. For example, if the
499 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
500 /// exists in the instruction stream. Similarly this will inline a recursive
501 /// function by one level.
502 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
503 bool InsertLifetime) {
504 Instruction *TheCall = CS.getInstruction();
505 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
506 "Instruction not in function!");
508 // If IFI has any state in it, zap it before we fill it in.
511 const Function *CalledFunc = CS.getCalledFunction();
512 if (CalledFunc == 0 || // Can't inline external function or indirect
513 CalledFunc->isDeclaration() || // call, or call to a vararg function!
514 CalledFunc->getFunctionType()->isVarArg()) return false;
516 // If the call to the callee is not a tail call, we must clear the 'tail'
517 // flags on any calls that we inline.
518 bool MustClearTailCallFlags =
519 !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());
521 // If the call to the callee cannot throw, set the 'nounwind' flag on any
522 // calls that we inline.
523 bool MarkNoUnwind = CS.doesNotThrow();
525 BasicBlock *OrigBB = TheCall->getParent();
526 Function *Caller = OrigBB->getParent();
528 // GC poses two hazards to inlining, which only occur when the callee has GC:
529 // 1. If the caller has no GC, then the callee's GC must be propagated to the
531 // 2. If the caller has a differing GC, it is invalid to inline.
532 if (CalledFunc->hasGC()) {
533 if (!Caller->hasGC())
534 Caller->setGC(CalledFunc->getGC());
535 else if (CalledFunc->getGC() != Caller->getGC())
539 // Get the personality function from the callee if it contains a landing pad.
540 Value *CalleePersonality = 0;
541 for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
543 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
544 const BasicBlock *BB = II->getUnwindDest();
545 const LandingPadInst *LP = BB->getLandingPadInst();
546 CalleePersonality = LP->getPersonalityFn();
550 // Find the personality function used by the landing pads of the caller. If it
551 // exists, then check to see that it matches the personality function used in
553 if (CalleePersonality) {
554 for (Function::const_iterator I = Caller->begin(), E = Caller->end();
556 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
557 const BasicBlock *BB = II->getUnwindDest();
558 const LandingPadInst *LP = BB->getLandingPadInst();
560 // If the personality functions match, then we can perform the
561 // inlining. Otherwise, we can't inline.
562 // TODO: This isn't 100% true. Some personality functions are proper
563 // supersets of others and can be used in place of the other.
564 if (LP->getPersonalityFn() != CalleePersonality)
571 // Get an iterator to the last basic block in the function, which will have
572 // the new function inlined after it.
573 Function::iterator LastBlock = &Caller->back();
575 // Make sure to capture all of the return instructions from the cloned
577 SmallVector<ReturnInst*, 8> Returns;
578 ClonedCodeInfo InlinedFunctionInfo;
579 Function::iterator FirstNewBlock;
581 { // Scope to destroy VMap after cloning.
582 ValueToValueMapTy VMap;
584 assert(CalledFunc->arg_size() == CS.arg_size() &&
585 "No varargs calls can be inlined!");
587 // Calculate the vector of arguments to pass into the function cloner, which
588 // matches up the formal to the actual argument values.
589 CallSite::arg_iterator AI = CS.arg_begin();
591 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
592 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
593 Value *ActualArg = *AI;
594 const Argument *Arg = I;
596 // When byval arguments actually inlined, we need to make the copy implied
597 // by them explicit. However, we don't do this if the callee is readonly
598 // or readnone, because the copy would be unneeded: the callee doesn't
599 // modify the struct.
600 if (CS.isByValArgument(ArgNo)) {
601 ActualArg = HandleByValArgument(ActualArg, Arg, TheCall, CalledFunc, IFI,
602 CalledFunc->getParamAlignment(ArgNo+1));
604 // Calls that we inline may use the new alloca, so we need to clear
605 // their 'tail' flags if HandleByValArgument introduced a new alloca and
606 // the callee has calls.
607 MustClearTailCallFlags |= ActualArg != *AI;
613 // We want the inliner to prune the code as it copies. We would LOVE to
614 // have no dead or constant instructions leftover after inlining occurs
615 // (which can happen, e.g., because an argument was constant), but we'll be
616 // happy with whatever the cloner can do.
617 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
618 /*ModuleLevelChanges=*/false, Returns, ".i",
619 &InlinedFunctionInfo, IFI.TD, TheCall);
621 // Remember the first block that is newly cloned over.
622 FirstNewBlock = LastBlock; ++FirstNewBlock;
624 // Update the callgraph if requested.
626 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
628 // Update inlined instructions' line number information.
629 fixupLineNumbers(Caller, FirstNewBlock, TheCall);
632 // If there are any alloca instructions in the block that used to be the entry
633 // block for the callee, move them to the entry block of the caller. First
634 // calculate which instruction they should be inserted before. We insert the
635 // instructions at the end of the current alloca list.
637 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
638 for (BasicBlock::iterator I = FirstNewBlock->begin(),
639 E = FirstNewBlock->end(); I != E; ) {
640 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
641 if (AI == 0) continue;
643 // If the alloca is now dead, remove it. This often occurs due to code
645 if (AI->use_empty()) {
646 AI->eraseFromParent();
650 if (!isa<Constant>(AI->getArraySize()))
653 // Keep track of the static allocas that we inline into the caller.
654 IFI.StaticAllocas.push_back(AI);
656 // Scan for the block of allocas that we can move over, and move them
658 while (isa<AllocaInst>(I) &&
659 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
660 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
664 // Transfer all of the allocas over in a block. Using splice means
665 // that the instructions aren't removed from the symbol table, then
667 Caller->getEntryBlock().getInstList().splice(InsertPoint,
668 FirstNewBlock->getInstList(),
673 // Leave lifetime markers for the static alloca's, scoping them to the
674 // function we just inlined.
675 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
676 IRBuilder<> builder(FirstNewBlock->begin());
677 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
678 AllocaInst *AI = IFI.StaticAllocas[ai];
680 // If the alloca is already scoped to something smaller than the whole
681 // function then there's no need to add redundant, less accurate markers.
682 if (hasLifetimeMarkers(AI))
685 // Try to determine the size of the allocation.
686 ConstantInt *AllocaSize = 0;
687 if (ConstantInt *AIArraySize =
688 dyn_cast<ConstantInt>(AI->getArraySize())) {
690 Type *AllocaType = AI->getAllocatedType();
691 uint64_t AllocaTypeSize = IFI.TD->getTypeAllocSize(AllocaType);
692 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
693 assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
694 // Check that array size doesn't saturate uint64_t and doesn't
695 // overflow when it's multiplied by type size.
696 if (AllocaArraySize != ~0ULL &&
697 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
698 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
699 AllocaArraySize * AllocaTypeSize);
704 builder.CreateLifetimeStart(AI, AllocaSize);
705 for (unsigned ri = 0, re = Returns.size(); ri != re; ++ri) {
706 IRBuilder<> builder(Returns[ri]);
707 builder.CreateLifetimeEnd(AI, AllocaSize);
712 // If the inlined code contained dynamic alloca instructions, wrap the inlined
713 // code with llvm.stacksave/llvm.stackrestore intrinsics.
714 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
715 Module *M = Caller->getParent();
716 // Get the two intrinsics we care about.
717 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
718 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
720 // Insert the llvm.stacksave.
721 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
722 .CreateCall(StackSave, "savedstack");
724 // Insert a call to llvm.stackrestore before any return instructions in the
726 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
727 IRBuilder<>(Returns[i]).CreateCall(StackRestore, SavedPtr);
731 // If we are inlining tail call instruction through a call site that isn't
732 // marked 'tail', we must remove the tail marker for any calls in the inlined
733 // code. Also, calls inlined through a 'nounwind' call site should be marked
735 if (InlinedFunctionInfo.ContainsCalls &&
736 (MustClearTailCallFlags || MarkNoUnwind)) {
737 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
739 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
740 if (CallInst *CI = dyn_cast<CallInst>(I)) {
741 if (MustClearTailCallFlags)
742 CI->setTailCall(false);
744 CI->setDoesNotThrow();
748 // If we are inlining for an invoke instruction, we must make sure to rewrite
749 // any call instructions into invoke instructions.
750 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
751 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
753 // If we cloned in _exactly one_ basic block, and if that block ends in a
754 // return instruction, we splice the body of the inlined callee directly into
755 // the calling basic block.
756 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
757 // Move all of the instructions right before the call.
758 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
759 FirstNewBlock->begin(), FirstNewBlock->end());
760 // Remove the cloned basic block.
761 Caller->getBasicBlockList().pop_back();
763 // If the call site was an invoke instruction, add a branch to the normal
765 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
766 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
767 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
770 // If the return instruction returned a value, replace uses of the call with
771 // uses of the returned value.
772 if (!TheCall->use_empty()) {
773 ReturnInst *R = Returns[0];
774 if (TheCall == R->getReturnValue())
775 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
777 TheCall->replaceAllUsesWith(R->getReturnValue());
779 // Since we are now done with the Call/Invoke, we can delete it.
780 TheCall->eraseFromParent();
782 // Since we are now done with the return instruction, delete it also.
783 Returns[0]->eraseFromParent();
785 // We are now done with the inlining.
789 // Otherwise, we have the normal case, of more than one block to inline or
790 // multiple return sites.
792 // We want to clone the entire callee function into the hole between the
793 // "starter" and "ender" blocks. How we accomplish this depends on whether
794 // this is an invoke instruction or a call instruction.
795 BasicBlock *AfterCallBB;
796 BranchInst *CreatedBranchToNormalDest = NULL;
797 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
799 // Add an unconditional branch to make this look like the CallInst case...
800 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
802 // Split the basic block. This guarantees that no PHI nodes will have to be
803 // updated due to new incoming edges, and make the invoke case more
804 // symmetric to the call case.
805 AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest,
806 CalledFunc->getName()+".exit");
808 } else { // It's a call
809 // If this is a call instruction, we need to split the basic block that
810 // the call lives in.
812 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
813 CalledFunc->getName()+".exit");
816 // Change the branch that used to go to AfterCallBB to branch to the first
817 // basic block of the inlined function.
819 TerminatorInst *Br = OrigBB->getTerminator();
820 assert(Br && Br->getOpcode() == Instruction::Br &&
821 "splitBasicBlock broken!");
822 Br->setOperand(0, FirstNewBlock);
825 // Now that the function is correct, make it a little bit nicer. In
826 // particular, move the basic blocks inserted from the end of the function
827 // into the space made by splitting the source basic block.
828 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
829 FirstNewBlock, Caller->end());
831 // Handle all of the return instructions that we just cloned in, and eliminate
832 // any users of the original call/invoke instruction.
833 Type *RTy = CalledFunc->getReturnType();
836 if (Returns.size() > 1) {
837 // The PHI node should go at the front of the new basic block to merge all
838 // possible incoming values.
839 if (!TheCall->use_empty()) {
840 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
841 AfterCallBB->begin());
842 // Anything that used the result of the function call should now use the
843 // PHI node as their operand.
844 TheCall->replaceAllUsesWith(PHI);
847 // Loop over all of the return instructions adding entries to the PHI node
850 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
851 ReturnInst *RI = Returns[i];
852 assert(RI->getReturnValue()->getType() == PHI->getType() &&
853 "Ret value not consistent in function!");
854 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
859 // Add a branch to the merge points and remove return instructions.
861 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
862 ReturnInst *RI = Returns[i];
863 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
864 Loc = RI->getDebugLoc();
865 BI->setDebugLoc(Loc);
866 RI->eraseFromParent();
868 // We need to set the debug location to *somewhere* inside the
869 // inlined function. The line number may be nonsensical, but the
870 // instruction will at least be associated with the right
872 if (CreatedBranchToNormalDest)
873 CreatedBranchToNormalDest->setDebugLoc(Loc);
874 } else if (!Returns.empty()) {
875 // Otherwise, if there is exactly one return value, just replace anything
876 // using the return value of the call with the computed value.
877 if (!TheCall->use_empty()) {
878 if (TheCall == Returns[0]->getReturnValue())
879 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
881 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
884 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
885 BasicBlock *ReturnBB = Returns[0]->getParent();
886 ReturnBB->replaceAllUsesWith(AfterCallBB);
888 // Splice the code from the return block into the block that it will return
889 // to, which contains the code that was after the call.
890 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
891 ReturnBB->getInstList());
893 if (CreatedBranchToNormalDest)
894 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
896 // Delete the return instruction now and empty ReturnBB now.
897 Returns[0]->eraseFromParent();
898 ReturnBB->eraseFromParent();
899 } else if (!TheCall->use_empty()) {
900 // No returns, but something is using the return value of the call. Just
902 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
905 // Since we are now done with the Call/Invoke, we can delete it.
906 TheCall->eraseFromParent();
908 // We should always be able to fold the entry block of the function into the
909 // single predecessor of the block...
910 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
911 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
913 // Splice the code entry block into calling block, right before the
914 // unconditional branch.
915 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
916 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
918 // Remove the unconditional branch.
919 OrigBB->getInstList().erase(Br);
921 // Now we can remove the CalleeEntry block, which is now empty.
922 Caller->getBasicBlockList().erase(CalleeEntry);
924 // If we inserted a phi node, check to see if it has a single value (e.g. all
925 // the entries are the same or undef). If so, remove the PHI so it doesn't
926 // block other optimizations.
928 if (Value *V = SimplifyInstruction(PHI, IFI.TD)) {
929 PHI->replaceAllUsesWith(V);
930 PHI->eraseFromParent();