1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 // The code in this file for handling inlines through invoke
14 // instructions preserves semantics only under some assumptions about
15 // the behavior of unwinders which correspond to gcc-style libUnwind
16 // exception personality functions. Eventually the IR will be
17 // improved to make this unnecessary, but until then, this code is
18 // marked [LIBUNWIND].
20 //===----------------------------------------------------------------------===//
22 #include "llvm/Transforms/Utils/Cloning.h"
23 #include "llvm/Constants.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/Module.h"
26 #include "llvm/Instructions.h"
27 #include "llvm/IntrinsicInst.h"
28 #include "llvm/Intrinsics.h"
29 #include "llvm/Attributes.h"
30 #include "llvm/Analysis/CallGraph.h"
31 #include "llvm/Analysis/DebugInfo.h"
32 #include "llvm/Analysis/InstructionSimplify.h"
33 #include "llvm/Target/TargetData.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/ADT/SmallVector.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/Support/CallSite.h"
38 #include "llvm/Support/IRBuilder.h"
41 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) {
42 return InlineFunction(CallSite(CI), IFI);
44 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) {
45 return InlineFunction(CallSite(II), IFI);
48 /// [LIBUNWIND] Look for an llvm.eh.exception call in the given block.
49 static EHExceptionInst *findExceptionInBlock(BasicBlock *bb) {
50 for (BasicBlock::iterator i = bb->begin(), e = bb->end(); i != e; i++) {
51 EHExceptionInst *exn = dyn_cast<EHExceptionInst>(i);
58 /// [LIBUNWIND] Look for the 'best' llvm.eh.selector instruction for
59 /// the given llvm.eh.exception call.
60 static EHSelectorInst *findSelectorForException(EHExceptionInst *exn) {
61 BasicBlock *exnBlock = exn->getParent();
63 EHSelectorInst *outOfBlockSelector = 0;
64 for (Instruction::use_iterator
65 ui = exn->use_begin(), ue = exn->use_end(); ui != ue; ++ui) {
66 EHSelectorInst *sel = dyn_cast<EHSelectorInst>(*ui);
69 // Immediately accept an eh.selector in the same block as the
71 if (sel->getParent() == exnBlock) return sel;
73 // Otherwise, use the first selector we see.
74 if (!outOfBlockSelector) outOfBlockSelector = sel;
77 return outOfBlockSelector;
80 /// [LIBUNWIND] Find the (possibly absent) call to @llvm.eh.selector
81 /// in the given landing pad. In principle, llvm.eh.exception is
82 /// required to be in the landing pad; in practice, SplitCriticalEdge
83 /// can break that invariant, and then inlining can break it further.
84 /// There's a real need for a reliable solution here, but until that
85 /// happens, we have some fragile workarounds here.
86 static EHSelectorInst *findSelectorForLandingPad(BasicBlock *lpad) {
87 // Look for an exception call in the actual landing pad.
88 EHExceptionInst *exn = findExceptionInBlock(lpad);
89 if (exn) return findSelectorForException(exn);
91 // Okay, if that failed, look for one in an obvious successor. If
92 // we find one, we'll fix the IR by moving things back to the
95 bool dominates = true; // does the lpad dominate the exn call
96 BasicBlock *nonDominated = 0; // if not, the first non-dominated block
97 BasicBlock *lastDominated = 0; // and the block which branched to it
99 BasicBlock *exnBlock = lpad;
101 // We need to protect against lpads that lead into infinite loops.
102 SmallPtrSet<BasicBlock*,4> visited;
103 visited.insert(exnBlock);
106 // We're not going to apply this hack to anything more complicated
107 // than a series of unconditional branches, so if the block
108 // doesn't terminate in an unconditional branch, just fail. More
109 // complicated cases can arise when, say, sinking a call into a
110 // split unwind edge and then inlining it; but that can do almost
111 // *anything* to the CFG, including leaving the selector
112 // completely unreachable. The only way to fix that properly is
113 // to (1) prohibit transforms which move the exception or selector
114 // values away from the landing pad, e.g. by producing them with
115 // instructions that are pinned to an edge like a phi, or
116 // producing them with not-really-instructions, and (2) making
117 // transforms which split edges deal with that.
118 BranchInst *branch = dyn_cast<BranchInst>(&exnBlock->back());
119 if (!branch || branch->isConditional()) return 0;
121 BasicBlock *successor = branch->getSuccessor(0);
123 // Fail if we found an infinite loop.
124 if (!visited.insert(successor)) return 0;
126 // If the successor isn't dominated by exnBlock:
127 if (!successor->getSinglePredecessor()) {
128 // We don't want to have to deal with threading the exception
129 // through multiple levels of phi, so give up if we've already
130 // followed a non-dominating edge.
131 if (!dominates) return 0;
133 // Otherwise, remember this as a non-dominating edge.
135 nonDominated = successor;
136 lastDominated = exnBlock;
139 exnBlock = successor;
142 exn = findExceptionInBlock(exnBlock);
145 // Look for a selector call for the exception we found.
146 EHSelectorInst *selector = findSelectorForException(exn);
147 if (!selector) return 0;
149 // The easy case is when the landing pad still dominates the
150 // exception call, in which case we can just move both calls back to
153 selector->moveBefore(lpad->getFirstNonPHI());
154 exn->moveBefore(selector);
158 // Otherwise, we have to split at the first non-dominating block.
159 // The CFG looks basically like this:
162 // insnsAndBranches_1
163 // br label %nonDominated
167 // %exn = call i8* @llvm.eh.exception()
168 // insnsAndBranches_4
169 // %selector = call @llvm.eh.selector(i8* %exn, ...
170 // We need to turn this into:
173 // %exn0 = call i8* @llvm.eh.exception()
174 // %selector0 = call @llvm.eh.selector(i8* %exn0, ...
175 // insnsAndBranches_1
176 // br label %split // from lastDominated
178 // phis_2 (without edge from lastDominated)
179 // %exn1 = call i8* @llvm.eh.exception()
180 // %selector1 = call i8* @llvm.eh.selector(i8* %exn1, ...
183 // phis_2 (edge from lastDominated, edge from split)
185 // %selector = phi ...
187 // insnsAndBranches_4
189 assert(nonDominated);
190 assert(lastDominated);
192 // First, make clones of the intrinsics to go in lpad.
193 EHExceptionInst *lpadExn = cast<EHExceptionInst>(exn->clone());
194 EHSelectorInst *lpadSelector = cast<EHSelectorInst>(selector->clone());
195 lpadSelector->setArgOperand(0, lpadExn);
196 lpadSelector->insertBefore(lpad->getFirstNonPHI());
197 lpadExn->insertBefore(lpadSelector);
199 // Split the non-dominated block.
201 nonDominated->splitBasicBlock(nonDominated->getFirstNonPHI(),
202 nonDominated->getName() + ".lpad-fix");
204 // Redirect the last dominated branch there.
205 cast<BranchInst>(lastDominated->back()).setSuccessor(0, split);
207 // Move the existing intrinsics to the end of the old block.
208 selector->moveBefore(&nonDominated->back());
209 exn->moveBefore(selector);
211 Instruction *splitIP = &split->front();
213 // For all the phis in nonDominated, make a new phi in split to join
214 // that phi with the edge from lastDominated.
215 for (BasicBlock::iterator
216 i = nonDominated->begin(), e = nonDominated->end(); i != e; ++i) {
217 PHINode *phi = dyn_cast<PHINode>(i);
220 PHINode *splitPhi = PHINode::Create(phi->getType(), 2, phi->getName(),
222 phi->replaceAllUsesWith(splitPhi);
223 splitPhi->addIncoming(phi, nonDominated);
224 splitPhi->addIncoming(phi->removeIncomingValue(lastDominated),
228 // Make new phis for the exception and selector.
229 PHINode *exnPhi = PHINode::Create(exn->getType(), 2, "", splitIP);
230 exn->replaceAllUsesWith(exnPhi);
231 selector->setArgOperand(0, exn); // except for this use
232 exnPhi->addIncoming(exn, nonDominated);
233 exnPhi->addIncoming(lpadExn, lastDominated);
235 PHINode *selectorPhi = PHINode::Create(selector->getType(), 2, "", splitIP);
236 selector->replaceAllUsesWith(selectorPhi);
237 selectorPhi->addIncoming(selector, nonDominated);
238 selectorPhi->addIncoming(lpadSelector, lastDominated);
244 /// A class for recording information about inlining through an invoke.
245 class InvokeInliningInfo {
246 BasicBlock *OuterUnwindDest;
247 EHSelectorInst *OuterSelector;
248 BasicBlock *InnerUnwindDest;
249 PHINode *InnerExceptionPHI;
250 PHINode *InnerSelectorPHI;
251 SmallVector<Value*, 8> UnwindDestPHIValues;
253 PHINode *InnerEHValuesPHI;
254 LandingPadInst *CallerLPad;
255 BasicBlock *SplitLPad;
258 InvokeInliningInfo(InvokeInst *II)
259 : OuterUnwindDest(II->getUnwindDest()), OuterSelector(0),
260 InnerUnwindDest(0), InnerExceptionPHI(0), InnerSelectorPHI(0),
261 InnerEHValuesPHI(0), CallerLPad(0), SplitLPad(0) {
262 // If there are PHI nodes in the unwind destination block, we
263 // need to keep track of which values came into them from the
264 // invoke before removing the edge from this block.
265 llvm::BasicBlock *invokeBB = II->getParent();
266 BasicBlock::iterator I = OuterUnwindDest->begin();
267 for (; isa<PHINode>(I); ++I) {
268 // Save the value to use for this edge.
269 PHINode *phi = cast<PHINode>(I);
270 UnwindDestPHIValues.push_back(phi->getIncomingValueForBlock(invokeBB));
273 // FIXME: With the new EH, this if/dyn_cast should be a 'cast'.
274 if (LandingPadInst *LPI = dyn_cast<LandingPadInst>(I))
278 /// The outer unwind destination is the target of unwind edges
279 /// introduced for calls within the inlined function.
280 BasicBlock *getOuterUnwindDest() const {
281 return OuterUnwindDest;
284 EHSelectorInst *getOuterSelector() {
286 OuterSelector = findSelectorForLandingPad(OuterUnwindDest);
287 return OuterSelector;
290 BasicBlock *getInnerUnwindDest();
292 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
293 BasicBlock *getSplitLandingPad() {
294 if (SplitLPad) return SplitLPad;
295 assert(CallerLPad && "Trying to split a block that isn't a landing pad!");
296 BasicBlock::iterator I = CallerLPad; ++I;
297 SplitLPad = CallerLPad->getParent()->splitBasicBlock(I, "split.lpad");
301 bool forwardEHResume(CallInst *call, BasicBlock *src);
303 /// forwardResume - Forward the 'resume' instruction to the caller's landing
304 /// pad block. When the landing pad block has only one predecessor, this is
305 /// a simple branch. When there is more than one predecessor, we need to
306 /// split the landing pad block after the landingpad instruction and jump
308 void forwardResume(ResumeInst *RI);
310 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind
311 /// destination block for the given basic block, using the values for the
312 /// original invoke's source block.
313 void addIncomingPHIValuesFor(BasicBlock *BB) const {
314 addIncomingPHIValuesForInto(BB, OuterUnwindDest);
316 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
317 BasicBlock::iterator I = dest->begin();
318 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
319 PHINode *phi = cast<PHINode>(I);
320 phi->addIncoming(UnwindDestPHIValues[i], src);
326 /// Get or create a target for the branch out of rewritten calls to
328 BasicBlock *InvokeInliningInfo::getInnerUnwindDest() {
329 if (InnerUnwindDest) return InnerUnwindDest;
331 // Find and hoist the llvm.eh.exception and llvm.eh.selector calls
332 // in the outer landing pad to immediately following the phis.
333 EHSelectorInst *selector = getOuterSelector();
334 if (!selector) return 0;
336 // The call to llvm.eh.exception *must* be in the landing pad.
337 Instruction *exn = cast<Instruction>(selector->getArgOperand(0));
338 assert(exn->getParent() == OuterUnwindDest);
340 // TODO: recognize when we've already done this, so that we don't
341 // get a linear number of these when inlining calls into lots of
342 // invokes with the same landing pad.
345 Instruction *splitPoint = exn->getParent()->getFirstNonPHI();
346 assert(splitPoint != selector && "selector-on-exception dominance broken!");
347 if (splitPoint == exn) {
348 selector->removeFromParent();
349 selector->insertAfter(exn);
350 splitPoint = selector->getNextNode();
352 exn->moveBefore(splitPoint);
353 selector->moveBefore(splitPoint);
356 // Split the landing pad.
357 InnerUnwindDest = OuterUnwindDest->splitBasicBlock(splitPoint,
358 OuterUnwindDest->getName() + ".body");
360 // The number of incoming edges we expect to the inner landing pad.
361 const unsigned phiCapacity = 2;
363 // Create corresponding new phis for all the phis in the outer landing pad.
364 BasicBlock::iterator insertPoint = InnerUnwindDest->begin();
365 BasicBlock::iterator I = OuterUnwindDest->begin();
366 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
367 PHINode *outerPhi = cast<PHINode>(I);
368 PHINode *innerPhi = PHINode::Create(outerPhi->getType(), phiCapacity,
369 outerPhi->getName() + ".lpad-body",
371 outerPhi->replaceAllUsesWith(innerPhi);
372 innerPhi->addIncoming(outerPhi, OuterUnwindDest);
375 // Create a phi for the exception value...
376 InnerExceptionPHI = PHINode::Create(exn->getType(), phiCapacity,
377 "exn.lpad-body", insertPoint);
378 exn->replaceAllUsesWith(InnerExceptionPHI);
379 selector->setArgOperand(0, exn); // restore this use
380 InnerExceptionPHI->addIncoming(exn, OuterUnwindDest);
382 // ...and the selector.
383 InnerSelectorPHI = PHINode::Create(selector->getType(), phiCapacity,
384 "selector.lpad-body", insertPoint);
385 selector->replaceAllUsesWith(InnerSelectorPHI);
386 InnerSelectorPHI->addIncoming(selector, OuterUnwindDest);
389 return InnerUnwindDest;
392 /// [LIBUNWIND] Try to forward the given call, which logically occurs
393 /// at the end of the given block, as a branch to the inner unwind
394 /// block. Returns true if the call was forwarded.
395 bool InvokeInliningInfo::forwardEHResume(CallInst *call, BasicBlock *src) {
396 // First, check whether this is a call to the intrinsic.
397 Function *fn = dyn_cast<Function>(call->getCalledValue());
398 if (!fn || fn->getName() != "llvm.eh.resume")
401 // At this point, we need to return true on all paths, because
402 // otherwise we'll construct an invoke of the intrinsic, which is
405 // Try to find or make an inner unwind dest, which will fail if we
406 // can't find a selector call for the outer unwind dest.
407 BasicBlock *dest = getInnerUnwindDest();
408 bool hasSelector = (dest != 0);
410 // If we failed, just use the outer unwind dest, dropping the
411 // exception and selector on the floor.
413 dest = OuterUnwindDest;
416 BranchInst::Create(dest, src);
418 // Update the phis in the destination. They were inserted in an
419 // order which makes this work.
420 addIncomingPHIValuesForInto(src, dest);
423 InnerExceptionPHI->addIncoming(call->getArgOperand(0), src);
424 InnerSelectorPHI->addIncoming(call->getArgOperand(1), src);
430 /// forwardResume - Forward the 'resume' instruction to the caller's landing pad
431 /// block. When the landing pad block has only one predecessor, this is a simple
432 /// branch. When there is more than one predecessor, we need to split the
433 /// landing pad block after the landingpad instruction and jump to there.
434 void InvokeInliningInfo::forwardResume(ResumeInst *RI) {
435 BasicBlock *LPadBB = CallerLPad->getParent();
436 Value *ResumeOp = RI->getOperand(0);
438 if (!LPadBB->getSinglePredecessor()) {
439 // There are multiple predecessors to this landing pad block. Split this
440 // landing pad block and jump to the new BB.
441 BasicBlock *SplitLPad = getSplitLandingPad();
442 BranchInst::Create(SplitLPad, RI->getParent());
444 if (CallerLPad->hasOneUse() && isa<PHINode>(CallerLPad->use_back())) {
445 PHINode *PN = cast<PHINode>(CallerLPad->use_back());
446 PN->addIncoming(ResumeOp, RI->getParent());
448 PHINode *PN = PHINode::Create(ResumeOp->getType(), 0, "lpad.phi",
449 &SplitLPad->front());
450 CallerLPad->replaceAllUsesWith(PN);
451 PN->addIncoming(ResumeOp, RI->getParent());
452 PN->addIncoming(CallerLPad, LPadBB);
455 RI->eraseFromParent();
459 BranchInst::Create(LPadBB, RI->getParent());
460 CallerLPad->replaceAllUsesWith(ResumeOp);
461 CallerLPad->eraseFromParent();
462 RI->eraseFromParent();
465 /// [LIBUNWIND] Check whether this selector is "only cleanups":
466 /// call i32 @llvm.eh.selector(blah, blah, i32 0)
467 static bool isCleanupOnlySelector(EHSelectorInst *selector) {
468 if (selector->getNumArgOperands() != 3) return false;
469 ConstantInt *val = dyn_cast<ConstantInt>(selector->getArgOperand(2));
470 return (val && val->isZero());
473 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
474 /// an invoke, we have to turn all of the calls that can throw into
475 /// invokes. This function analyze BB to see if there are any calls, and if so,
476 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
477 /// nodes in that block with the values specified in InvokeDestPHIValues.
479 /// Returns true to indicate that the next block should be skipped.
480 static bool HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
481 InvokeInliningInfo &Invoke) {
482 LandingPadInst *LPI = Invoke.getLandingPadInst();
484 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
485 Instruction *I = BBI++;
487 if (LPI) // FIXME: This won't be NULL in the new EH.
488 if (LandingPadInst *L = dyn_cast<LandingPadInst>(I)) {
489 unsigned NumClauses = LPI->getNumClauses();
490 L->reserveClauses(NumClauses);
491 for (unsigned i = 0; i != NumClauses; ++i)
492 L->addClause(LPI->getClauseType(i), LPI->getClauseValue(i));
495 // We only need to check for function calls: inlined invoke
496 // instructions require no special handling.
497 CallInst *CI = dyn_cast<CallInst>(I);
498 if (CI == 0) continue;
500 // LIBUNWIND: merge selector instructions.
501 if (EHSelectorInst *Inner = dyn_cast<EHSelectorInst>(CI)) {
502 EHSelectorInst *Outer = Invoke.getOuterSelector();
503 if (!Outer) continue;
505 bool innerIsOnlyCleanup = isCleanupOnlySelector(Inner);
506 bool outerIsOnlyCleanup = isCleanupOnlySelector(Outer);
508 // If both selectors contain only cleanups, we don't need to do
509 // anything. TODO: this is really just a very specific instance
510 // of a much more general optimization.
511 if (innerIsOnlyCleanup && outerIsOnlyCleanup) continue;
513 // Otherwise, we just append the outer selector to the inner selector.
514 SmallVector<Value*, 16> NewSelector;
515 for (unsigned i = 0, e = Inner->getNumArgOperands(); i != e; ++i)
516 NewSelector.push_back(Inner->getArgOperand(i));
517 for (unsigned i = 2, e = Outer->getNumArgOperands(); i != e; ++i)
518 NewSelector.push_back(Outer->getArgOperand(i));
521 IRBuilder<>(Inner).CreateCall(Inner->getCalledValue(), NewSelector);
522 // No need to copy attributes, calling convention, etc.
523 NewInner->takeName(Inner);
524 Inner->replaceAllUsesWith(NewInner);
525 Inner->eraseFromParent();
529 // If this call cannot unwind, don't convert it to an invoke.
530 if (CI->doesNotThrow())
533 // Convert this function call into an invoke instruction.
534 // First, split the basic block.
535 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
537 // Delete the unconditional branch inserted by splitBasicBlock
538 BB->getInstList().pop_back();
540 // LIBUNWIND: If this is a call to @llvm.eh.resume, just branch
541 // directly to the new landing pad.
542 if (Invoke.forwardEHResume(CI, BB)) {
543 // TODO: 'Split' is now unreachable; clean it up.
545 // We want to leave the original call intact so that the call
546 // graph and other structures won't get misled. We also have to
547 // avoid processing the next block, or we'll iterate here forever.
551 // Otherwise, create the new invoke instruction.
552 ImmutableCallSite CS(CI);
553 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
555 InvokeInst::Create(CI->getCalledValue(), Split,
556 Invoke.getOuterUnwindDest(),
557 InvokeArgs, CI->getName(), BB);
558 II->setCallingConv(CI->getCallingConv());
559 II->setAttributes(CI->getAttributes());
561 // Make sure that anything using the call now uses the invoke! This also
562 // updates the CallGraph if present, because it uses a WeakVH.
563 CI->replaceAllUsesWith(II);
565 Split->getInstList().pop_front(); // Delete the original call
567 // Update any PHI nodes in the exceptional block to indicate that
568 // there is now a new entry in them.
569 Invoke.addIncomingPHIValuesFor(BB);
577 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
578 /// in the body of the inlined function into invokes and turn unwind
579 /// instructions into branches to the invoke unwind dest.
581 /// II is the invoke instruction being inlined. FirstNewBlock is the first
582 /// block of the inlined code (the last block is the end of the function),
583 /// and InlineCodeInfo is information about the code that got inlined.
584 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
585 ClonedCodeInfo &InlinedCodeInfo) {
586 BasicBlock *InvokeDest = II->getUnwindDest();
588 Function *Caller = FirstNewBlock->getParent();
590 // The inlined code is currently at the end of the function, scan from the
591 // start of the inlined code to its end, checking for stuff we need to
592 // rewrite. If the code doesn't have calls or unwinds, we know there is
593 // nothing to rewrite.
594 if (!InlinedCodeInfo.ContainsCalls && !InlinedCodeInfo.ContainsUnwinds) {
595 // Now that everything is happy, we have one final detail. The PHI nodes in
596 // the exception destination block still have entries due to the original
597 // invoke instruction. Eliminate these entries (which might even delete the
599 InvokeDest->removePredecessor(II->getParent());
603 InvokeInliningInfo Invoke(II);
605 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
606 if (InlinedCodeInfo.ContainsCalls)
607 if (HandleCallsInBlockInlinedThroughInvoke(BB, Invoke)) {
608 // Honor a request to skip the next block. We don't need to
609 // consider UnwindInsts in this case either.
614 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
615 // An UnwindInst requires special handling when it gets inlined into an
616 // invoke site. Once this happens, we know that the unwind would cause
617 // a control transfer to the invoke exception destination, so we can
618 // transform it into a direct branch to the exception destination.
619 BranchInst::Create(InvokeDest, UI);
621 // Delete the unwind instruction!
622 UI->eraseFromParent();
624 // Update any PHI nodes in the exceptional block to indicate that
625 // there is now a new entry in them.
626 Invoke.addIncomingPHIValuesFor(BB);
629 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator())) {
630 Invoke.forwardResume(RI);
634 // Now that everything is happy, we have one final detail. The PHI nodes in
635 // the exception destination block still have entries due to the original
636 // invoke instruction. Eliminate these entries (which might even delete the
638 InvokeDest->removePredecessor(II->getParent());
641 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
642 /// into the caller, update the specified callgraph to reflect the changes we
643 /// made. Note that it's possible that not all code was copied over, so only
644 /// some edges of the callgraph may remain.
645 static void UpdateCallGraphAfterInlining(CallSite CS,
646 Function::iterator FirstNewBlock,
647 ValueToValueMapTy &VMap,
648 InlineFunctionInfo &IFI) {
649 CallGraph &CG = *IFI.CG;
650 const Function *Caller = CS.getInstruction()->getParent()->getParent();
651 const Function *Callee = CS.getCalledFunction();
652 CallGraphNode *CalleeNode = CG[Callee];
653 CallGraphNode *CallerNode = CG[Caller];
655 // Since we inlined some uninlined call sites in the callee into the caller,
656 // add edges from the caller to all of the callees of the callee.
657 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
659 // Consider the case where CalleeNode == CallerNode.
660 CallGraphNode::CalledFunctionsVector CallCache;
661 if (CalleeNode == CallerNode) {
662 CallCache.assign(I, E);
663 I = CallCache.begin();
667 for (; I != E; ++I) {
668 const Value *OrigCall = I->first;
670 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
671 // Only copy the edge if the call was inlined!
672 if (VMI == VMap.end() || VMI->second == 0)
675 // If the call was inlined, but then constant folded, there is no edge to
676 // add. Check for this case.
677 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
678 if (NewCall == 0) continue;
680 // Remember that this call site got inlined for the client of
682 IFI.InlinedCalls.push_back(NewCall);
684 // It's possible that inlining the callsite will cause it to go from an
685 // indirect to a direct call by resolving a function pointer. If this
686 // happens, set the callee of the new call site to a more precise
687 // destination. This can also happen if the call graph node of the caller
688 // was just unnecessarily imprecise.
689 if (I->second->getFunction() == 0)
690 if (Function *F = CallSite(NewCall).getCalledFunction()) {
691 // Indirect call site resolved to direct call.
692 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
697 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
700 // Update the call graph by deleting the edge from Callee to Caller. We must
701 // do this after the loop above in case Caller and Callee are the same.
702 CallerNode->removeCallEdgeFor(CS);
705 /// HandleByValArgument - When inlining a call site that has a byval argument,
706 /// we have to make the implicit memcpy explicit by adding it.
707 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
708 const Function *CalledFunc,
709 InlineFunctionInfo &IFI,
710 unsigned ByValAlignment) {
711 Type *AggTy = cast<PointerType>(Arg->getType())->getElementType();
713 // If the called function is readonly, then it could not mutate the caller's
714 // copy of the byval'd memory. In this case, it is safe to elide the copy and
716 if (CalledFunc->onlyReadsMemory()) {
717 // If the byval argument has a specified alignment that is greater than the
718 // passed in pointer, then we either have to round up the input pointer or
719 // give up on this transformation.
720 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
723 // If the pointer is already known to be sufficiently aligned, or if we can
724 // round it up to a larger alignment, then we don't need a temporary.
725 if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
726 IFI.TD) >= ByValAlignment)
729 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
730 // for code quality, but rarely happens and is required for correctness.
733 LLVMContext &Context = Arg->getContext();
735 Type *VoidPtrTy = Type::getInt8PtrTy(Context);
737 // Create the alloca. If we have TargetData, use nice alignment.
740 Align = IFI.TD->getPrefTypeAlignment(AggTy);
742 // If the byval had an alignment specified, we *must* use at least that
743 // alignment, as it is required by the byval argument (and uses of the
744 // pointer inside the callee).
745 Align = std::max(Align, ByValAlignment);
747 Function *Caller = TheCall->getParent()->getParent();
749 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, Arg->getName(),
750 &*Caller->begin()->begin());
752 Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
753 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
756 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
757 Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall);
761 Size = ConstantExpr::getSizeOf(AggTy);
763 Size = ConstantInt::get(Type::getInt64Ty(Context),
764 IFI.TD->getTypeStoreSize(AggTy));
766 // Always generate a memcpy of alignment 1 here because we don't know
767 // the alignment of the src pointer. Other optimizations can infer
769 Value *CallArgs[] = {
770 DestCast, SrcCast, Size,
771 ConstantInt::get(Type::getInt32Ty(Context), 1),
772 ConstantInt::getFalse(Context) // isVolatile
774 IRBuilder<>(TheCall).CreateCall(MemCpyFn, CallArgs);
776 // Uses of the argument in the function should use our new alloca
781 // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
783 static bool isUsedByLifetimeMarker(Value *V) {
784 for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE;
786 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI)) {
787 switch (II->getIntrinsicID()) {
789 case Intrinsic::lifetime_start:
790 case Intrinsic::lifetime_end:
798 // hasLifetimeMarkers - Check whether the given alloca already has
799 // lifetime.start or lifetime.end intrinsics.
800 static bool hasLifetimeMarkers(AllocaInst *AI) {
801 Type *Int8PtrTy = Type::getInt8PtrTy(AI->getType()->getContext());
802 if (AI->getType() == Int8PtrTy)
803 return isUsedByLifetimeMarker(AI);
805 // Do a scan to find all the casts to i8*.
806 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); I != E;
808 if (I->getType() != Int8PtrTy) continue;
809 if (I->stripPointerCasts() != AI) continue;
810 if (isUsedByLifetimeMarker(*I))
816 /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to recursively
817 /// update InlinedAtEntry of a DebugLoc.
818 static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
819 const DebugLoc &InlinedAtDL,
821 if (MDNode *IA = DL.getInlinedAt(Ctx)) {
822 DebugLoc NewInlinedAtDL
823 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx);
824 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
825 NewInlinedAtDL.getAsMDNode(Ctx));
828 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
829 InlinedAtDL.getAsMDNode(Ctx));
833 /// fixupLineNumbers - Update inlined instructions' line numbers to
834 /// to encode location where these instructions are inlined.
835 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
836 Instruction *TheCall) {
837 DebugLoc TheCallDL = TheCall->getDebugLoc();
838 if (TheCallDL.isUnknown())
841 for (; FI != Fn->end(); ++FI) {
842 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
844 DebugLoc DL = BI->getDebugLoc();
845 if (!DL.isUnknown()) {
846 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
847 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
848 LLVMContext &Ctx = BI->getContext();
849 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx);
850 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(),
858 // InlineFunction - This function inlines the called function into the basic
859 // block of the caller. This returns false if it is not possible to inline this
860 // call. The program is still in a well defined state if this occurs though.
862 // Note that this only does one level of inlining. For example, if the
863 // instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
864 // exists in the instruction stream. Similarly this will inline a recursive
865 // function by one level.
867 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
868 Instruction *TheCall = CS.getInstruction();
869 LLVMContext &Context = TheCall->getContext();
870 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
871 "Instruction not in function!");
873 // If IFI has any state in it, zap it before we fill it in.
876 const Function *CalledFunc = CS.getCalledFunction();
877 if (CalledFunc == 0 || // Can't inline external function or indirect
878 CalledFunc->isDeclaration() || // call, or call to a vararg function!
879 CalledFunc->getFunctionType()->isVarArg()) return false;
881 // If the call to the callee is not a tail call, we must clear the 'tail'
882 // flags on any calls that we inline.
883 bool MustClearTailCallFlags =
884 !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());
886 // If the call to the callee cannot throw, set the 'nounwind' flag on any
887 // calls that we inline.
888 bool MarkNoUnwind = CS.doesNotThrow();
890 BasicBlock *OrigBB = TheCall->getParent();
891 Function *Caller = OrigBB->getParent();
893 // GC poses two hazards to inlining, which only occur when the callee has GC:
894 // 1. If the caller has no GC, then the callee's GC must be propagated to the
896 // 2. If the caller has a differing GC, it is invalid to inline.
897 if (CalledFunc->hasGC()) {
898 if (!Caller->hasGC())
899 Caller->setGC(CalledFunc->getGC());
900 else if (CalledFunc->getGC() != Caller->getGC())
904 // Find the personality function used by the landing pads of the caller. If it
905 // exists, then check to see that it matches the personality function used in
907 for (Function::const_iterator
908 I = Caller->begin(), E = Caller->end(); I != E; ++I)
909 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
910 const BasicBlock *BB = II->getUnwindDest();
911 // FIXME: This 'isa' here should become go away once the new EH system is
913 if (!isa<LandingPadInst>(BB->getFirstNonPHI()))
915 const LandingPadInst *LP = cast<LandingPadInst>(BB->getFirstNonPHI());
916 const Value *CallerPersFn = LP->getPersonalityFn();
918 // If the personality functions match, then we can perform the
919 // inlining. Otherwise, we can't inline.
920 // TODO: This isn't 100% true. Some personality functions are proper
921 // supersets of others and can be used in place of the other.
922 for (Function::const_iterator
923 I = CalledFunc->begin(), E = CalledFunc->end(); I != E; ++I)
924 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
925 const BasicBlock *BB = II->getUnwindDest();
926 // FIXME: This 'if/dyn_cast' here should become a normal 'cast' once
927 // the new EH system is in place.
928 if (const LandingPadInst *LP =
929 dyn_cast<LandingPadInst>(BB->getFirstNonPHI()))
930 if (CallerPersFn != LP->getPersonalityFn())
938 // Get an iterator to the last basic block in the function, which will have
939 // the new function inlined after it.
941 Function::iterator LastBlock = &Caller->back();
943 // Make sure to capture all of the return instructions from the cloned
945 SmallVector<ReturnInst*, 8> Returns;
946 ClonedCodeInfo InlinedFunctionInfo;
947 Function::iterator FirstNewBlock;
949 { // Scope to destroy VMap after cloning.
950 ValueToValueMapTy VMap;
952 assert(CalledFunc->arg_size() == CS.arg_size() &&
953 "No varargs calls can be inlined!");
955 // Calculate the vector of arguments to pass into the function cloner, which
956 // matches up the formal to the actual argument values.
957 CallSite::arg_iterator AI = CS.arg_begin();
959 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
960 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
961 Value *ActualArg = *AI;
963 // When byval arguments actually inlined, we need to make the copy implied
964 // by them explicit. However, we don't do this if the callee is readonly
965 // or readnone, because the copy would be unneeded: the callee doesn't
966 // modify the struct.
967 if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal)) {
968 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
969 CalledFunc->getParamAlignment(ArgNo+1));
971 // Calls that we inline may use the new alloca, so we need to clear
972 // their 'tail' flags if HandleByValArgument introduced a new alloca and
973 // the callee has calls.
974 MustClearTailCallFlags |= ActualArg != *AI;
980 // We want the inliner to prune the code as it copies. We would LOVE to
981 // have no dead or constant instructions leftover after inlining occurs
982 // (which can happen, e.g., because an argument was constant), but we'll be
983 // happy with whatever the cloner can do.
984 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
985 /*ModuleLevelChanges=*/false, Returns, ".i",
986 &InlinedFunctionInfo, IFI.TD, TheCall);
988 // Remember the first block that is newly cloned over.
989 FirstNewBlock = LastBlock; ++FirstNewBlock;
991 // Update the callgraph if requested.
993 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
995 // Update inlined instructions' line number information.
996 fixupLineNumbers(Caller, FirstNewBlock, TheCall);
999 // If there are any alloca instructions in the block that used to be the entry
1000 // block for the callee, move them to the entry block of the caller. First
1001 // calculate which instruction they should be inserted before. We insert the
1002 // instructions at the end of the current alloca list.
1005 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1006 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1007 E = FirstNewBlock->end(); I != E; ) {
1008 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1009 if (AI == 0) continue;
1011 // If the alloca is now dead, remove it. This often occurs due to code
1013 if (AI->use_empty()) {
1014 AI->eraseFromParent();
1018 if (!isa<Constant>(AI->getArraySize()))
1021 // Keep track of the static allocas that we inline into the caller.
1022 IFI.StaticAllocas.push_back(AI);
1024 // Scan for the block of allocas that we can move over, and move them
1026 while (isa<AllocaInst>(I) &&
1027 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
1028 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1032 // Transfer all of the allocas over in a block. Using splice means
1033 // that the instructions aren't removed from the symbol table, then
1035 Caller->getEntryBlock().getInstList().splice(InsertPoint,
1036 FirstNewBlock->getInstList(),
1041 // Leave lifetime markers for the static alloca's, scoping them to the
1042 // function we just inlined.
1043 if (!IFI.StaticAllocas.empty()) {
1044 IRBuilder<> builder(FirstNewBlock->begin());
1045 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1046 AllocaInst *AI = IFI.StaticAllocas[ai];
1048 // If the alloca is already scoped to something smaller than the whole
1049 // function then there's no need to add redundant, less accurate markers.
1050 if (hasLifetimeMarkers(AI))
1053 builder.CreateLifetimeStart(AI);
1054 for (unsigned ri = 0, re = Returns.size(); ri != re; ++ri) {
1055 IRBuilder<> builder(Returns[ri]);
1056 builder.CreateLifetimeEnd(AI);
1061 // If the inlined code contained dynamic alloca instructions, wrap the inlined
1062 // code with llvm.stacksave/llvm.stackrestore intrinsics.
1063 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1064 Module *M = Caller->getParent();
1065 // Get the two intrinsics we care about.
1066 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1067 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1069 // Insert the llvm.stacksave.
1070 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
1071 .CreateCall(StackSave, "savedstack");
1073 // Insert a call to llvm.stackrestore before any return instructions in the
1074 // inlined function.
1075 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1076 IRBuilder<>(Returns[i]).CreateCall(StackRestore, SavedPtr);
1079 // Count the number of StackRestore calls we insert.
1080 unsigned NumStackRestores = Returns.size();
1082 // If we are inlining an invoke instruction, insert restores before each
1083 // unwind. These unwinds will be rewritten into branches later.
1084 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
1085 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
1087 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
1088 IRBuilder<>(UI).CreateCall(StackRestore, SavedPtr);
1094 // If we are inlining tail call instruction through a call site that isn't
1095 // marked 'tail', we must remove the tail marker for any calls in the inlined
1096 // code. Also, calls inlined through a 'nounwind' call site should be marked
1098 if (InlinedFunctionInfo.ContainsCalls &&
1099 (MustClearTailCallFlags || MarkNoUnwind)) {
1100 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
1102 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
1103 if (CallInst *CI = dyn_cast<CallInst>(I)) {
1104 if (MustClearTailCallFlags)
1105 CI->setTailCall(false);
1107 CI->setDoesNotThrow();
1111 // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
1112 // instructions are unreachable.
1113 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
1114 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
1116 TerminatorInst *Term = BB->getTerminator();
1117 if (isa<UnwindInst>(Term)) {
1118 new UnreachableInst(Context, Term);
1119 BB->getInstList().erase(Term);
1123 // If we are inlining for an invoke instruction, we must make sure to rewrite
1124 // any inlined 'unwind' instructions into branches to the invoke exception
1125 // destination, and call instructions into invoke instructions.
1126 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
1127 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
1129 // If we cloned in _exactly one_ basic block, and if that block ends in a
1130 // return instruction, we splice the body of the inlined callee directly into
1131 // the calling basic block.
1132 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
1133 // Move all of the instructions right before the call.
1134 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
1135 FirstNewBlock->begin(), FirstNewBlock->end());
1136 // Remove the cloned basic block.
1137 Caller->getBasicBlockList().pop_back();
1139 // If the call site was an invoke instruction, add a branch to the normal
1141 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
1142 BranchInst::Create(II->getNormalDest(), TheCall);
1144 // If the return instruction returned a value, replace uses of the call with
1145 // uses of the returned value.
1146 if (!TheCall->use_empty()) {
1147 ReturnInst *R = Returns[0];
1148 if (TheCall == R->getReturnValue())
1149 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1151 TheCall->replaceAllUsesWith(R->getReturnValue());
1153 // Since we are now done with the Call/Invoke, we can delete it.
1154 TheCall->eraseFromParent();
1156 // Since we are now done with the return instruction, delete it also.
1157 Returns[0]->eraseFromParent();
1159 // We are now done with the inlining.
1163 // Otherwise, we have the normal case, of more than one block to inline or
1164 // multiple return sites.
1166 // We want to clone the entire callee function into the hole between the
1167 // "starter" and "ender" blocks. How we accomplish this depends on whether
1168 // this is an invoke instruction or a call instruction.
1169 BasicBlock *AfterCallBB;
1170 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1172 // Add an unconditional branch to make this look like the CallInst case...
1173 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1175 // Split the basic block. This guarantees that no PHI nodes will have to be
1176 // updated due to new incoming edges, and make the invoke case more
1177 // symmetric to the call case.
1178 AfterCallBB = OrigBB->splitBasicBlock(NewBr,
1179 CalledFunc->getName()+".exit");
1181 } else { // It's a call
1182 // If this is a call instruction, we need to split the basic block that
1183 // the call lives in.
1185 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
1186 CalledFunc->getName()+".exit");
1189 // Change the branch that used to go to AfterCallBB to branch to the first
1190 // basic block of the inlined function.
1192 TerminatorInst *Br = OrigBB->getTerminator();
1193 assert(Br && Br->getOpcode() == Instruction::Br &&
1194 "splitBasicBlock broken!");
1195 Br->setOperand(0, FirstNewBlock);
1198 // Now that the function is correct, make it a little bit nicer. In
1199 // particular, move the basic blocks inserted from the end of the function
1200 // into the space made by splitting the source basic block.
1201 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
1202 FirstNewBlock, Caller->end());
1204 // Handle all of the return instructions that we just cloned in, and eliminate
1205 // any users of the original call/invoke instruction.
1206 Type *RTy = CalledFunc->getReturnType();
1209 if (Returns.size() > 1) {
1210 // The PHI node should go at the front of the new basic block to merge all
1211 // possible incoming values.
1212 if (!TheCall->use_empty()) {
1213 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
1214 AfterCallBB->begin());
1215 // Anything that used the result of the function call should now use the
1216 // PHI node as their operand.
1217 TheCall->replaceAllUsesWith(PHI);
1220 // Loop over all of the return instructions adding entries to the PHI node
1223 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1224 ReturnInst *RI = Returns[i];
1225 assert(RI->getReturnValue()->getType() == PHI->getType() &&
1226 "Ret value not consistent in function!");
1227 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
1232 // Add a branch to the merge points and remove return instructions.
1233 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1234 ReturnInst *RI = Returns[i];
1235 BranchInst::Create(AfterCallBB, RI);
1236 RI->eraseFromParent();
1238 } else if (!Returns.empty()) {
1239 // Otherwise, if there is exactly one return value, just replace anything
1240 // using the return value of the call with the computed value.
1241 if (!TheCall->use_empty()) {
1242 if (TheCall == Returns[0]->getReturnValue())
1243 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1245 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
1248 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
1249 BasicBlock *ReturnBB = Returns[0]->getParent();
1250 ReturnBB->replaceAllUsesWith(AfterCallBB);
1252 // Splice the code from the return block into the block that it will return
1253 // to, which contains the code that was after the call.
1254 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
1255 ReturnBB->getInstList());
1257 // Delete the return instruction now and empty ReturnBB now.
1258 Returns[0]->eraseFromParent();
1259 ReturnBB->eraseFromParent();
1260 } else if (!TheCall->use_empty()) {
1261 // No returns, but something is using the return value of the call. Just
1263 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1266 // Since we are now done with the Call/Invoke, we can delete it.
1267 TheCall->eraseFromParent();
1269 // We should always be able to fold the entry block of the function into the
1270 // single predecessor of the block...
1271 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
1272 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
1274 // Splice the code entry block into calling block, right before the
1275 // unconditional branch.
1276 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
1277 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
1279 // Remove the unconditional branch.
1280 OrigBB->getInstList().erase(Br);
1282 // Now we can remove the CalleeEntry block, which is now empty.
1283 Caller->getBasicBlockList().erase(CalleeEntry);
1285 // If we inserted a phi node, check to see if it has a single value (e.g. all
1286 // the entries are the same or undef). If so, remove the PHI so it doesn't
1287 // block other optimizations.
1289 if (Value *V = SimplifyInstruction(PHI, IFI.TD)) {
1290 PHI->replaceAllUsesWith(V);
1291 PHI->eraseFromParent();