1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 // The code in this file for handling inlines through invoke
14 // instructions preserves semantics only under some assumptions about
15 // the behavior of unwinders which correspond to gcc-style libUnwind
16 // exception personality functions. Eventually the IR will be
17 // improved to make this unnecessary, but until then, this code is
18 // marked [LIBUNWIND].
20 //===----------------------------------------------------------------------===//
22 #include "llvm/Transforms/Utils/Cloning.h"
23 #include "llvm/Constants.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/Module.h"
26 #include "llvm/Instructions.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Intrinsics.h"
29 #include "llvm/Attributes.h"
30 #include "llvm/Analysis/CallGraph.h"
31 #include "llvm/Analysis/DebugInfo.h"
32 #include "llvm/Analysis/InstructionSimplify.h"
33 #include "llvm/Target/TargetData.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/ADT/SmallVector.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/Support/CallSite.h"
38 #include "llvm/Support/IRBuilder.h"
41 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) {
42 return InlineFunction(CallSite(CI), IFI);
44 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) {
45 return InlineFunction(CallSite(II), IFI);
48 // FIXME: New EH - Remove the functions marked [LIBUNWIND] when new EH is
51 /// [LIBUNWIND] Look for an llvm.eh.exception call in the given block.
52 static EHExceptionInst *findExceptionInBlock(BasicBlock *bb) {
53 for (BasicBlock::iterator i = bb->begin(), e = bb->end(); i != e; i++) {
54 EHExceptionInst *exn = dyn_cast<EHExceptionInst>(i);
61 /// [LIBUNWIND] Look for the 'best' llvm.eh.selector instruction for
62 /// the given llvm.eh.exception call.
63 static EHSelectorInst *findSelectorForException(EHExceptionInst *exn) {
64 BasicBlock *exnBlock = exn->getParent();
66 EHSelectorInst *outOfBlockSelector = 0;
67 for (Instruction::use_iterator
68 ui = exn->use_begin(), ue = exn->use_end(); ui != ue; ++ui) {
69 EHSelectorInst *sel = dyn_cast<EHSelectorInst>(*ui);
72 // Immediately accept an eh.selector in the same block as the
74 if (sel->getParent() == exnBlock) return sel;
76 // Otherwise, use the first selector we see.
77 if (!outOfBlockSelector) outOfBlockSelector = sel;
80 return outOfBlockSelector;
83 /// [LIBUNWIND] Find the (possibly absent) call to @llvm.eh.selector
84 /// in the given landing pad. In principle, llvm.eh.exception is
85 /// required to be in the landing pad; in practice, SplitCriticalEdge
86 /// can break that invariant, and then inlining can break it further.
87 /// There's a real need for a reliable solution here, but until that
88 /// happens, we have some fragile workarounds here.
89 static EHSelectorInst *findSelectorForLandingPad(BasicBlock *lpad) {
90 // Look for an exception call in the actual landing pad.
91 EHExceptionInst *exn = findExceptionInBlock(lpad);
92 if (exn) return findSelectorForException(exn);
94 // Okay, if that failed, look for one in an obvious successor. If
95 // we find one, we'll fix the IR by moving things back to the
98 bool dominates = true; // does the lpad dominate the exn call
99 BasicBlock *nonDominated = 0; // if not, the first non-dominated block
100 BasicBlock *lastDominated = 0; // and the block which branched to it
102 BasicBlock *exnBlock = lpad;
104 // We need to protect against lpads that lead into infinite loops.
105 SmallPtrSet<BasicBlock*,4> visited;
106 visited.insert(exnBlock);
109 // We're not going to apply this hack to anything more complicated
110 // than a series of unconditional branches, so if the block
111 // doesn't terminate in an unconditional branch, just fail. More
112 // complicated cases can arise when, say, sinking a call into a
113 // split unwind edge and then inlining it; but that can do almost
114 // *anything* to the CFG, including leaving the selector
115 // completely unreachable. The only way to fix that properly is
116 // to (1) prohibit transforms which move the exception or selector
117 // values away from the landing pad, e.g. by producing them with
118 // instructions that are pinned to an edge like a phi, or
119 // producing them with not-really-instructions, and (2) making
120 // transforms which split edges deal with that.
121 BranchInst *branch = dyn_cast<BranchInst>(&exnBlock->back());
122 if (!branch || branch->isConditional()) return 0;
124 BasicBlock *successor = branch->getSuccessor(0);
126 // Fail if we found an infinite loop.
127 if (!visited.insert(successor)) return 0;
129 // If the successor isn't dominated by exnBlock:
130 if (!successor->getSinglePredecessor()) {
131 // We don't want to have to deal with threading the exception
132 // through multiple levels of phi, so give up if we've already
133 // followed a non-dominating edge.
134 if (!dominates) return 0;
136 // Otherwise, remember this as a non-dominating edge.
138 nonDominated = successor;
139 lastDominated = exnBlock;
142 exnBlock = successor;
145 exn = findExceptionInBlock(exnBlock);
148 // Look for a selector call for the exception we found.
149 EHSelectorInst *selector = findSelectorForException(exn);
150 if (!selector) return 0;
152 // The easy case is when the landing pad still dominates the
153 // exception call, in which case we can just move both calls back to
156 selector->moveBefore(lpad->getFirstNonPHI());
157 exn->moveBefore(selector);
161 // Otherwise, we have to split at the first non-dominating block.
162 // The CFG looks basically like this:
165 // insnsAndBranches_1
166 // br label %nonDominated
170 // %exn = call i8* @llvm.eh.exception()
171 // insnsAndBranches_4
172 // %selector = call @llvm.eh.selector(i8* %exn, ...
173 // We need to turn this into:
176 // %exn0 = call i8* @llvm.eh.exception()
177 // %selector0 = call @llvm.eh.selector(i8* %exn0, ...
178 // insnsAndBranches_1
179 // br label %split // from lastDominated
181 // phis_2 (without edge from lastDominated)
182 // %exn1 = call i8* @llvm.eh.exception()
183 // %selector1 = call i8* @llvm.eh.selector(i8* %exn1, ...
186 // phis_2 (edge from lastDominated, edge from split)
188 // %selector = phi ...
190 // insnsAndBranches_4
192 assert(nonDominated);
193 assert(lastDominated);
195 // First, make clones of the intrinsics to go in lpad.
196 EHExceptionInst *lpadExn = cast<EHExceptionInst>(exn->clone());
197 EHSelectorInst *lpadSelector = cast<EHSelectorInst>(selector->clone());
198 lpadSelector->setArgOperand(0, lpadExn);
199 lpadSelector->insertBefore(lpad->getFirstNonPHI());
200 lpadExn->insertBefore(lpadSelector);
202 // Split the non-dominated block.
204 nonDominated->splitBasicBlock(nonDominated->getFirstNonPHI(),
205 nonDominated->getName() + ".lpad-fix");
207 // Redirect the last dominated branch there.
208 cast<BranchInst>(lastDominated->back()).setSuccessor(0, split);
210 // Move the existing intrinsics to the end of the old block.
211 selector->moveBefore(&nonDominated->back());
212 exn->moveBefore(selector);
214 Instruction *splitIP = &split->front();
216 // For all the phis in nonDominated, make a new phi in split to join
217 // that phi with the edge from lastDominated.
218 for (BasicBlock::iterator
219 i = nonDominated->begin(), e = nonDominated->end(); i != e; ++i) {
220 PHINode *phi = dyn_cast<PHINode>(i);
223 PHINode *splitPhi = PHINode::Create(phi->getType(), 2, phi->getName(),
225 phi->replaceAllUsesWith(splitPhi);
226 splitPhi->addIncoming(phi, nonDominated);
227 splitPhi->addIncoming(phi->removeIncomingValue(lastDominated),
231 // Make new phis for the exception and selector.
232 PHINode *exnPhi = PHINode::Create(exn->getType(), 2, "", splitIP);
233 exn->replaceAllUsesWith(exnPhi);
234 selector->setArgOperand(0, exn); // except for this use
235 exnPhi->addIncoming(exn, nonDominated);
236 exnPhi->addIncoming(lpadExn, lastDominated);
238 PHINode *selectorPhi = PHINode::Create(selector->getType(), 2, "", splitIP);
239 selector->replaceAllUsesWith(selectorPhi);
240 selectorPhi->addIncoming(selector, nonDominated);
241 selectorPhi->addIncoming(lpadSelector, lastDominated);
247 /// A class for recording information about inlining through an invoke.
248 class InvokeInliningInfo {
249 BasicBlock *OuterUnwindDest;
250 EHSelectorInst *OuterSelector;
251 BasicBlock *InnerUnwindDest;
252 PHINode *InnerExceptionPHI;
253 PHINode *InnerSelectorPHI;
254 SmallVector<Value*, 8> UnwindDestPHIValues;
256 // FIXME: New EH - These will replace the analogous ones above.
257 BasicBlock *OuterResumeDest; //< Destination of the invoke's unwind.
258 BasicBlock *InnerResumeDest; //< Destination for the callee's resume.
259 LandingPadInst *CallerLPad; //< LandingPadInst associated with the invoke.
260 PHINode *InnerEHValuesPHI; //< PHI for EH values from landingpad insts.
263 InvokeInliningInfo(InvokeInst *II)
264 : OuterUnwindDest(II->getUnwindDest()), OuterSelector(0),
265 InnerUnwindDest(0), InnerExceptionPHI(0), InnerSelectorPHI(0),
266 OuterResumeDest(II->getUnwindDest()), InnerResumeDest(0),
267 CallerLPad(0), InnerEHValuesPHI(0) {
268 // If there are PHI nodes in the unwind destination block, we need to keep
269 // track of which values came into them from the invoke before removing
270 // the edge from this block.
271 llvm::BasicBlock *InvokeBB = II->getParent();
272 BasicBlock::iterator I = OuterUnwindDest->begin();
273 for (; isa<PHINode>(I); ++I) {
274 // Save the value to use for this edge.
275 PHINode *PHI = cast<PHINode>(I);
276 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
279 CallerLPad = cast<LandingPadInst>(I);
282 /// The outer unwind destination is the target of unwind edges
283 /// introduced for calls within the inlined function.
284 BasicBlock *getOuterUnwindDest() const {
285 return OuterUnwindDest;
288 EHSelectorInst *getOuterSelector() {
290 OuterSelector = findSelectorForLandingPad(OuterUnwindDest);
291 return OuterSelector;
294 // FIXME: New EH - Rename when new EH is turned on.
295 BasicBlock *getInnerUnwindDestNewEH();
297 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
299 /// forwardResume - Forward the 'resume' instruction to the caller's landing
300 /// pad block. When the landing pad block has only one predecessor, this is
301 /// a simple branch. When there is more than one predecessor, we need to
302 /// split the landing pad block after the landingpad instruction and jump
304 void forwardResume(ResumeInst *RI);
306 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind
307 /// destination block for the given basic block, using the values for the
308 /// original invoke's source block.
309 void addIncomingPHIValuesFor(BasicBlock *BB) const {
310 addIncomingPHIValuesForInto(BB, OuterUnwindDest);
313 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
314 BasicBlock::iterator I = dest->begin();
315 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
316 PHINode *phi = cast<PHINode>(I);
317 phi->addIncoming(UnwindDestPHIValues[i], src);
323 /// getInnerUnwindDest - Get or create a target for the branch from ResumeInsts.
324 BasicBlock *InvokeInliningInfo::getInnerUnwindDestNewEH() {
325 if (InnerResumeDest) return InnerResumeDest;
327 // Split the landing pad.
328 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
330 OuterResumeDest->splitBasicBlock(SplitPoint,
331 OuterResumeDest->getName() + ".body");
333 // The number of incoming edges we expect to the inner landing pad.
334 const unsigned PHICapacity = 2;
336 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
337 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
338 BasicBlock::iterator I = OuterResumeDest->begin();
339 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
340 PHINode *OuterPHI = cast<PHINode>(I);
341 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
342 OuterPHI->getName() + ".lpad-body",
344 OuterPHI->replaceAllUsesWith(InnerPHI);
345 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
348 // Create a PHI for the exception values.
349 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
350 "eh.lpad-body", InsertPoint);
351 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
352 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
355 return InnerResumeDest;
358 /// forwardResume - Forward the 'resume' instruction to the caller's landing pad
359 /// block. When the landing pad block has only one predecessor, this is a simple
360 /// branch. When there is more than one predecessor, we need to split the
361 /// landing pad block after the landingpad instruction and jump to there.
362 void InvokeInliningInfo::forwardResume(ResumeInst *RI) {
363 BasicBlock *Dest = getInnerUnwindDestNewEH();
364 BasicBlock *Src = RI->getParent();
366 BranchInst::Create(Dest, Src);
368 // Update the PHIs in the destination. They were inserted in an order which
370 addIncomingPHIValuesForInto(Src, Dest);
372 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
373 RI->eraseFromParent();
376 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
377 /// an invoke, we have to turn all of the calls that can throw into
378 /// invokes. This function analyze BB to see if there are any calls, and if so,
379 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
380 /// nodes in that block with the values specified in InvokeDestPHIValues.
382 /// Returns true to indicate that the next block should be skipped.
383 static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
384 InvokeInliningInfo &Invoke) {
385 LandingPadInst *LPI = Invoke.getLandingPadInst();
387 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
388 Instruction *I = BBI++;
390 if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) {
391 unsigned NumClauses = LPI->getNumClauses();
392 L->reserveClauses(NumClauses);
393 for (unsigned i = 0; i != NumClauses; ++i)
394 L->addClause(LPI->getClause(i));
397 // We only need to check for function calls: inlined invoke
398 // instructions require no special handling.
399 CallInst *CI = dyn_cast<CallInst>(I);
401 // If this call cannot unwind, don't convert it to an invoke.
402 if (!CI || CI->doesNotThrow())
405 // Convert this function call into an invoke instruction. First, split the
407 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
409 // Delete the unconditional branch inserted by splitBasicBlock
410 BB->getInstList().pop_back();
412 // Create the new invoke instruction.
413 ImmutableCallSite CS(CI);
414 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
415 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
416 Invoke.getOuterUnwindDest(),
417 InvokeArgs, CI->getName(), BB);
418 II->setCallingConv(CI->getCallingConv());
419 II->setAttributes(CI->getAttributes());
421 // Make sure that anything using the call now uses the invoke! This also
422 // updates the CallGraph if present, because it uses a WeakVH.
423 CI->replaceAllUsesWith(II);
425 // Delete the original call
426 Split->getInstList().pop_front();
428 // Update any PHI nodes in the exceptional block to indicate that there is
429 // now a new entry in them.
430 Invoke.addIncomingPHIValuesFor(BB);
437 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
438 /// in the body of the inlined function into invokes and turn unwind
439 /// instructions into branches to the invoke unwind dest.
441 /// II is the invoke instruction being inlined. FirstNewBlock is the first
442 /// block of the inlined code (the last block is the end of the function),
443 /// and InlineCodeInfo is information about the code that got inlined.
444 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
445 ClonedCodeInfo &InlinedCodeInfo) {
446 BasicBlock *InvokeDest = II->getUnwindDest();
448 Function *Caller = FirstNewBlock->getParent();
450 // The inlined code is currently at the end of the function, scan from the
451 // start of the inlined code to its end, checking for stuff we need to
452 // rewrite. If the code doesn't have calls or unwinds, we know there is
453 // nothing to rewrite.
454 if (!InlinedCodeInfo.ContainsCalls && !InlinedCodeInfo.ContainsUnwinds) {
455 // Now that everything is happy, we have one final detail. The PHI nodes in
456 // the exception destination block still have entries due to the original
457 // invoke instruction. Eliminate these entries (which might even delete the
459 InvokeDest->removePredecessor(II->getParent());
463 InvokeInliningInfo Invoke(II);
465 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
466 if (InlinedCodeInfo.ContainsCalls)
467 if (HandleCallsInBlockInlinedThroughInvoke(BB, Invoke)) {
468 // Honor a request to skip the next block. We don't need to
469 // consider UnwindInsts in this case either.
474 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
475 // An UnwindInst requires special handling when it gets inlined into an
476 // invoke site. Once this happens, we know that the unwind would cause
477 // a control transfer to the invoke exception destination, so we can
478 // transform it into a direct branch to the exception destination.
479 BranchInst::Create(InvokeDest, UI);
481 // Delete the unwind instruction!
482 UI->eraseFromParent();
484 // Update any PHI nodes in the exceptional block to indicate that
485 // there is now a new entry in them.
486 Invoke.addIncomingPHIValuesFor(BB);
489 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
490 Invoke.forwardResume(RI);
493 // Now that everything is happy, we have one final detail. The PHI nodes in
494 // the exception destination block still have entries due to the original
495 // invoke instruction. Eliminate these entries (which might even delete the
497 InvokeDest->removePredecessor(II->getParent());
500 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
501 /// into the caller, update the specified callgraph to reflect the changes we
502 /// made. Note that it's possible that not all code was copied over, so only
503 /// some edges of the callgraph may remain.
504 static void UpdateCallGraphAfterInlining(CallSite CS,
505 Function::iterator FirstNewBlock,
506 ValueToValueMapTy &VMap,
507 InlineFunctionInfo &IFI) {
508 CallGraph &CG = *IFI.CG;
509 const Function *Caller = CS.getInstruction()->getParent()->getParent();
510 const Function *Callee = CS.getCalledFunction();
511 CallGraphNode *CalleeNode = CG[Callee];
512 CallGraphNode *CallerNode = CG[Caller];
514 // Since we inlined some uninlined call sites in the callee into the caller,
515 // add edges from the caller to all of the callees of the callee.
516 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
518 // Consider the case where CalleeNode == CallerNode.
519 CallGraphNode::CalledFunctionsVector CallCache;
520 if (CalleeNode == CallerNode) {
521 CallCache.assign(I, E);
522 I = CallCache.begin();
526 for (; I != E; ++I) {
527 const Value *OrigCall = I->first;
529 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
530 // Only copy the edge if the call was inlined!
531 if (VMI == VMap.end() || VMI->second == 0)
534 // If the call was inlined, but then constant folded, there is no edge to
535 // add. Check for this case.
536 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
537 if (NewCall == 0) continue;
539 // Remember that this call site got inlined for the client of
541 IFI.InlinedCalls.push_back(NewCall);
543 // It's possible that inlining the callsite will cause it to go from an
544 // indirect to a direct call by resolving a function pointer. If this
545 // happens, set the callee of the new call site to a more precise
546 // destination. This can also happen if the call graph node of the caller
547 // was just unnecessarily imprecise.
548 if (I->second->getFunction() == 0)
549 if (Function *F = CallSite(NewCall).getCalledFunction()) {
550 // Indirect call site resolved to direct call.
551 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
556 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
559 // Update the call graph by deleting the edge from Callee to Caller. We must
560 // do this after the loop above in case Caller and Callee are the same.
561 CallerNode->removeCallEdgeFor(CS);
564 /// HandleByValArgument - When inlining a call site that has a byval argument,
565 /// we have to make the implicit memcpy explicit by adding it.
566 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
567 const Function *CalledFunc,
568 InlineFunctionInfo &IFI,
569 unsigned ByValAlignment) {
570 Type *AggTy = cast<PointerType>(Arg->getType())->getElementType();
572 // If the called function is readonly, then it could not mutate the caller's
573 // copy of the byval'd memory. In this case, it is safe to elide the copy and
575 if (CalledFunc->onlyReadsMemory()) {
576 // If the byval argument has a specified alignment that is greater than the
577 // passed in pointer, then we either have to round up the input pointer or
578 // give up on this transformation.
579 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
582 // If the pointer is already known to be sufficiently aligned, or if we can
583 // round it up to a larger alignment, then we don't need a temporary.
584 if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
585 IFI.TD) >= ByValAlignment)
588 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
589 // for code quality, but rarely happens and is required for correctness.
592 LLVMContext &Context = Arg->getContext();
594 Type *VoidPtrTy = Type::getInt8PtrTy(Context);
596 // Create the alloca. If we have TargetData, use nice alignment.
599 Align = IFI.TD->getPrefTypeAlignment(AggTy);
601 // If the byval had an alignment specified, we *must* use at least that
602 // alignment, as it is required by the byval argument (and uses of the
603 // pointer inside the callee).
604 Align = std::max(Align, ByValAlignment);
606 Function *Caller = TheCall->getParent()->getParent();
608 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, Arg->getName(),
609 &*Caller->begin()->begin());
611 Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
612 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
615 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
616 Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall);
620 Size = ConstantExpr::getSizeOf(AggTy);
622 Size = ConstantInt::get(Type::getInt64Ty(Context),
623 IFI.TD->getTypeStoreSize(AggTy));
625 // Always generate a memcpy of alignment 1 here because we don't know
626 // the alignment of the src pointer. Other optimizations can infer
628 Value *CallArgs[] = {
629 DestCast, SrcCast, Size,
630 ConstantInt::get(Type::getInt32Ty(Context), 1),
631 ConstantInt::getFalse(Context) // isVolatile
633 IRBuilder<>(TheCall).CreateCall(MemCpyFn, CallArgs);
635 // Uses of the argument in the function should use our new alloca
640 // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
642 static bool isUsedByLifetimeMarker(Value *V) {
643 for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE;
645 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI)) {
646 switch (II->getIntrinsicID()) {
648 case Intrinsic::lifetime_start:
649 case Intrinsic::lifetime_end:
657 // hasLifetimeMarkers - Check whether the given alloca already has
658 // lifetime.start or lifetime.end intrinsics.
659 static bool hasLifetimeMarkers(AllocaInst *AI) {
660 Type *Int8PtrTy = Type::getInt8PtrTy(AI->getType()->getContext());
661 if (AI->getType() == Int8PtrTy)
662 return isUsedByLifetimeMarker(AI);
664 // Do a scan to find all the casts to i8*.
665 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); I != E;
667 if (I->getType() != Int8PtrTy) continue;
668 if (I->stripPointerCasts() != AI) continue;
669 if (isUsedByLifetimeMarker(*I))
675 /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to recursively
676 /// update InlinedAtEntry of a DebugLoc.
677 static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
678 const DebugLoc &InlinedAtDL,
680 if (MDNode *IA = DL.getInlinedAt(Ctx)) {
681 DebugLoc NewInlinedAtDL
682 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx);
683 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
684 NewInlinedAtDL.getAsMDNode(Ctx));
687 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
688 InlinedAtDL.getAsMDNode(Ctx));
691 /// fixupLineNumbers - Update inlined instructions' line numbers to
692 /// to encode location where these instructions are inlined.
693 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
694 Instruction *TheCall) {
695 DebugLoc TheCallDL = TheCall->getDebugLoc();
696 if (TheCallDL.isUnknown())
699 for (; FI != Fn->end(); ++FI) {
700 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
702 DebugLoc DL = BI->getDebugLoc();
703 if (!DL.isUnknown()) {
704 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
705 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
706 LLVMContext &Ctx = BI->getContext();
707 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx);
708 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(),
716 /// InlineFunction - This function inlines the called function into the basic
717 /// block of the caller. This returns false if it is not possible to inline
718 /// this call. The program is still in a well defined state if this occurs
721 /// Note that this only does one level of inlining. For example, if the
722 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
723 /// exists in the instruction stream. Similarly this will inline a recursive
724 /// function by one level.
725 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
726 Instruction *TheCall = CS.getInstruction();
727 LLVMContext &Context = TheCall->getContext();
728 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
729 "Instruction not in function!");
731 // If IFI has any state in it, zap it before we fill it in.
734 const Function *CalledFunc = CS.getCalledFunction();
735 if (CalledFunc == 0 || // Can't inline external function or indirect
736 CalledFunc->isDeclaration() || // call, or call to a vararg function!
737 CalledFunc->getFunctionType()->isVarArg()) return false;
739 // If the call to the callee is not a tail call, we must clear the 'tail'
740 // flags on any calls that we inline.
741 bool MustClearTailCallFlags =
742 !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());
744 // If the call to the callee cannot throw, set the 'nounwind' flag on any
745 // calls that we inline.
746 bool MarkNoUnwind = CS.doesNotThrow();
748 BasicBlock *OrigBB = TheCall->getParent();
749 Function *Caller = OrigBB->getParent();
751 // GC poses two hazards to inlining, which only occur when the callee has GC:
752 // 1. If the caller has no GC, then the callee's GC must be propagated to the
754 // 2. If the caller has a differing GC, it is invalid to inline.
755 if (CalledFunc->hasGC()) {
756 if (!Caller->hasGC())
757 Caller->setGC(CalledFunc->getGC());
758 else if (CalledFunc->getGC() != Caller->getGC())
762 // Get the personality function from the callee if it contains a landing pad.
763 Value *CalleePersonality = 0;
764 for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
766 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
767 const BasicBlock *BB = II->getUnwindDest();
768 const LandingPadInst *LP = BB->getLandingPadInst();
769 CalleePersonality = LP->getPersonalityFn();
773 // Find the personality function used by the landing pads of the caller. If it
774 // exists, then check to see that it matches the personality function used in
776 if (CalleePersonality) {
777 for (Function::const_iterator I = Caller->begin(), E = Caller->end();
779 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
780 const BasicBlock *BB = II->getUnwindDest();
781 const LandingPadInst *LP = BB->getLandingPadInst();
783 // If the personality functions match, then we can perform the
784 // inlining. Otherwise, we can't inline.
785 // TODO: This isn't 100% true. Some personality functions are proper
786 // supersets of others and can be used in place of the other.
787 if (LP->getPersonalityFn() != CalleePersonality)
794 // Get an iterator to the last basic block in the function, which will have
795 // the new function inlined after it.
796 Function::iterator LastBlock = &Caller->back();
798 // Make sure to capture all of the return instructions from the cloned
800 SmallVector<ReturnInst*, 8> Returns;
801 ClonedCodeInfo InlinedFunctionInfo;
802 Function::iterator FirstNewBlock;
804 { // Scope to destroy VMap after cloning.
805 ValueToValueMapTy VMap;
807 assert(CalledFunc->arg_size() == CS.arg_size() &&
808 "No varargs calls can be inlined!");
810 // Calculate the vector of arguments to pass into the function cloner, which
811 // matches up the formal to the actual argument values.
812 CallSite::arg_iterator AI = CS.arg_begin();
814 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
815 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
816 Value *ActualArg = *AI;
818 // When byval arguments actually inlined, we need to make the copy implied
819 // by them explicit. However, we don't do this if the callee is readonly
820 // or readnone, because the copy would be unneeded: the callee doesn't
821 // modify the struct.
822 if (CS.isByValArgument(ArgNo)) {
823 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
824 CalledFunc->getParamAlignment(ArgNo+1));
826 // Calls that we inline may use the new alloca, so we need to clear
827 // their 'tail' flags if HandleByValArgument introduced a new alloca and
828 // the callee has calls.
829 MustClearTailCallFlags |= ActualArg != *AI;
835 // We want the inliner to prune the code as it copies. We would LOVE to
836 // have no dead or constant instructions leftover after inlining occurs
837 // (which can happen, e.g., because an argument was constant), but we'll be
838 // happy with whatever the cloner can do.
839 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
840 /*ModuleLevelChanges=*/false, Returns, ".i",
841 &InlinedFunctionInfo, IFI.TD, TheCall);
843 // Remember the first block that is newly cloned over.
844 FirstNewBlock = LastBlock; ++FirstNewBlock;
846 // Update the callgraph if requested.
848 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
850 // Update inlined instructions' line number information.
851 fixupLineNumbers(Caller, FirstNewBlock, TheCall);
854 // If there are any alloca instructions in the block that used to be the entry
855 // block for the callee, move them to the entry block of the caller. First
856 // calculate which instruction they should be inserted before. We insert the
857 // instructions at the end of the current alloca list.
859 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
860 for (BasicBlock::iterator I = FirstNewBlock->begin(),
861 E = FirstNewBlock->end(); I != E; ) {
862 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
863 if (AI == 0) continue;
865 // If the alloca is now dead, remove it. This often occurs due to code
867 if (AI->use_empty()) {
868 AI->eraseFromParent();
872 if (!isa<Constant>(AI->getArraySize()))
875 // Keep track of the static allocas that we inline into the caller.
876 IFI.StaticAllocas.push_back(AI);
878 // Scan for the block of allocas that we can move over, and move them
880 while (isa<AllocaInst>(I) &&
881 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
882 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
886 // Transfer all of the allocas over in a block. Using splice means
887 // that the instructions aren't removed from the symbol table, then
889 Caller->getEntryBlock().getInstList().splice(InsertPoint,
890 FirstNewBlock->getInstList(),
895 // Leave lifetime markers for the static alloca's, scoping them to the
896 // function we just inlined.
897 if (!IFI.StaticAllocas.empty()) {
898 IRBuilder<> builder(FirstNewBlock->begin());
899 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
900 AllocaInst *AI = IFI.StaticAllocas[ai];
902 // If the alloca is already scoped to something smaller than the whole
903 // function then there's no need to add redundant, less accurate markers.
904 if (hasLifetimeMarkers(AI))
907 builder.CreateLifetimeStart(AI);
908 for (unsigned ri = 0, re = Returns.size(); ri != re; ++ri) {
909 IRBuilder<> builder(Returns[ri]);
910 builder.CreateLifetimeEnd(AI);
915 // If the inlined code contained dynamic alloca instructions, wrap the inlined
916 // code with llvm.stacksave/llvm.stackrestore intrinsics.
917 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
918 Module *M = Caller->getParent();
919 // Get the two intrinsics we care about.
920 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
921 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
923 // Insert the llvm.stacksave.
924 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
925 .CreateCall(StackSave, "savedstack");
927 // Insert a call to llvm.stackrestore before any return instructions in the
929 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
930 IRBuilder<>(Returns[i]).CreateCall(StackRestore, SavedPtr);
933 // Count the number of StackRestore calls we insert.
934 unsigned NumStackRestores = Returns.size();
936 // If we are inlining an invoke instruction, insert restores before each
937 // unwind. These unwinds will be rewritten into branches later.
938 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
939 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
941 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
942 IRBuilder<>(UI).CreateCall(StackRestore, SavedPtr);
948 // If we are inlining tail call instruction through a call site that isn't
949 // marked 'tail', we must remove the tail marker for any calls in the inlined
950 // code. Also, calls inlined through a 'nounwind' call site should be marked
952 if (InlinedFunctionInfo.ContainsCalls &&
953 (MustClearTailCallFlags || MarkNoUnwind)) {
954 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
956 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
957 if (CallInst *CI = dyn_cast<CallInst>(I)) {
958 if (MustClearTailCallFlags)
959 CI->setTailCall(false);
961 CI->setDoesNotThrow();
965 // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
966 // instructions are unreachable.
967 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
968 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
970 TerminatorInst *Term = BB->getTerminator();
971 if (isa<UnwindInst>(Term)) {
972 new UnreachableInst(Context, Term);
973 BB->getInstList().erase(Term);
977 // If we are inlining for an invoke instruction, we must make sure to rewrite
978 // any inlined 'unwind' instructions into branches to the invoke exception
979 // destination, and call instructions into invoke instructions.
980 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
981 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
983 // If we cloned in _exactly one_ basic block, and if that block ends in a
984 // return instruction, we splice the body of the inlined callee directly into
985 // the calling basic block.
986 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
987 // Move all of the instructions right before the call.
988 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
989 FirstNewBlock->begin(), FirstNewBlock->end());
990 // Remove the cloned basic block.
991 Caller->getBasicBlockList().pop_back();
993 // If the call site was an invoke instruction, add a branch to the normal
995 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
996 BranchInst::Create(II->getNormalDest(), TheCall);
998 // If the return instruction returned a value, replace uses of the call with
999 // uses of the returned value.
1000 if (!TheCall->use_empty()) {
1001 ReturnInst *R = Returns[0];
1002 if (TheCall == R->getReturnValue())
1003 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1005 TheCall->replaceAllUsesWith(R->getReturnValue());
1007 // Since we are now done with the Call/Invoke, we can delete it.
1008 TheCall->eraseFromParent();
1010 // Since we are now done with the return instruction, delete it also.
1011 Returns[0]->eraseFromParent();
1013 // We are now done with the inlining.
1017 // Otherwise, we have the normal case, of more than one block to inline or
1018 // multiple return sites.
1020 // We want to clone the entire callee function into the hole between the
1021 // "starter" and "ender" blocks. How we accomplish this depends on whether
1022 // this is an invoke instruction or a call instruction.
1023 BasicBlock *AfterCallBB;
1024 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1026 // Add an unconditional branch to make this look like the CallInst case...
1027 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1029 // Split the basic block. This guarantees that no PHI nodes will have to be
1030 // updated due to new incoming edges, and make the invoke case more
1031 // symmetric to the call case.
1032 AfterCallBB = OrigBB->splitBasicBlock(NewBr,
1033 CalledFunc->getName()+".exit");
1035 } else { // It's a call
1036 // If this is a call instruction, we need to split the basic block that
1037 // the call lives in.
1039 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
1040 CalledFunc->getName()+".exit");
1043 // Change the branch that used to go to AfterCallBB to branch to the first
1044 // basic block of the inlined function.
1046 TerminatorInst *Br = OrigBB->getTerminator();
1047 assert(Br && Br->getOpcode() == Instruction::Br &&
1048 "splitBasicBlock broken!");
1049 Br->setOperand(0, FirstNewBlock);
1052 // Now that the function is correct, make it a little bit nicer. In
1053 // particular, move the basic blocks inserted from the end of the function
1054 // into the space made by splitting the source basic block.
1055 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
1056 FirstNewBlock, Caller->end());
1058 // Handle all of the return instructions that we just cloned in, and eliminate
1059 // any users of the original call/invoke instruction.
1060 Type *RTy = CalledFunc->getReturnType();
1063 if (Returns.size() > 1) {
1064 // The PHI node should go at the front of the new basic block to merge all
1065 // possible incoming values.
1066 if (!TheCall->use_empty()) {
1067 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
1068 AfterCallBB->begin());
1069 // Anything that used the result of the function call should now use the
1070 // PHI node as their operand.
1071 TheCall->replaceAllUsesWith(PHI);
1074 // Loop over all of the return instructions adding entries to the PHI node
1077 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1078 ReturnInst *RI = Returns[i];
1079 assert(RI->getReturnValue()->getType() == PHI->getType() &&
1080 "Ret value not consistent in function!");
1081 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
1086 // Add a branch to the merge points and remove return instructions.
1087 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1088 ReturnInst *RI = Returns[i];
1089 BranchInst::Create(AfterCallBB, RI);
1090 RI->eraseFromParent();
1092 } else if (!Returns.empty()) {
1093 // Otherwise, if there is exactly one return value, just replace anything
1094 // using the return value of the call with the computed value.
1095 if (!TheCall->use_empty()) {
1096 if (TheCall == Returns[0]->getReturnValue())
1097 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1099 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
1102 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
1103 BasicBlock *ReturnBB = Returns[0]->getParent();
1104 ReturnBB->replaceAllUsesWith(AfterCallBB);
1106 // Splice the code from the return block into the block that it will return
1107 // to, which contains the code that was after the call.
1108 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
1109 ReturnBB->getInstList());
1111 // Delete the return instruction now and empty ReturnBB now.
1112 Returns[0]->eraseFromParent();
1113 ReturnBB->eraseFromParent();
1114 } else if (!TheCall->use_empty()) {
1115 // No returns, but something is using the return value of the call. Just
1117 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1120 // Since we are now done with the Call/Invoke, we can delete it.
1121 TheCall->eraseFromParent();
1123 // We should always be able to fold the entry block of the function into the
1124 // single predecessor of the block...
1125 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
1126 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
1128 // Splice the code entry block into calling block, right before the
1129 // unconditional branch.
1130 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
1131 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
1133 // Remove the unconditional branch.
1134 OrigBB->getInstList().erase(Br);
1136 // Now we can remove the CalleeEntry block, which is now empty.
1137 Caller->getBasicBlockList().erase(CalleeEntry);
1139 // If we inserted a phi node, check to see if it has a single value (e.g. all
1140 // the entries are the same or undef). If so, remove the PHI so it doesn't
1141 // block other optimizations.
1143 if (Value *V = SimplifyInstruction(PHI, IFI.TD)) {
1144 PHI->replaceAllUsesWith(V);
1145 PHI->eraseFromParent();