1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/Constants.h"
17 #include "llvm/DerivedTypes.h"
18 #include "llvm/Module.h"
19 #include "llvm/Instructions.h"
20 #include "llvm/IntrinsicInst.h"
21 #include "llvm/Intrinsics.h"
22 #include "llvm/Attributes.h"
23 #include "llvm/Analysis/CallGraph.h"
24 #include "llvm/Analysis/DebugInfo.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/Support/CallSite.h"
31 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) {
32 return InlineFunction(CallSite(CI), IFI);
34 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) {
35 return InlineFunction(CallSite(II), IFI);
39 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
40 /// an invoke, we have to turn all of the calls that can throw into
41 /// invokes. This function analyze BB to see if there are any calls, and if so,
42 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
43 /// nodes in that block with the values specified in InvokeDestPHIValues.
45 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
46 BasicBlock *InvokeDest,
47 const SmallVectorImpl<Value*> &InvokeDestPHIValues) {
48 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
49 Instruction *I = BBI++;
51 // We only need to check for function calls: inlined invoke
52 // instructions require no special handling.
53 CallInst *CI = dyn_cast<CallInst>(I);
54 if (CI == 0) continue;
56 // If this call cannot unwind, don't convert it to an invoke.
57 if (CI->doesNotThrow())
60 // Convert this function call into an invoke instruction.
61 // First, split the basic block.
62 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
64 // Next, create the new invoke instruction, inserting it at the end
65 // of the old basic block.
66 SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
68 InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
69 InvokeArgs.begin(), InvokeArgs.end(),
70 CI->getName(), BB->getTerminator());
71 II->setCallingConv(CI->getCallingConv());
72 II->setAttributes(CI->getAttributes());
74 // Make sure that anything using the call now uses the invoke! This also
75 // updates the CallGraph if present, because it uses a WeakVH.
76 CI->replaceAllUsesWith(II);
78 // Delete the unconditional branch inserted by splitBasicBlock
79 BB->getInstList().pop_back();
80 Split->getInstList().pop_front(); // Delete the original call
82 // Update any PHI nodes in the exceptional block to indicate that
83 // there is now a new entry in them.
85 for (BasicBlock::iterator I = InvokeDest->begin();
86 isa<PHINode>(I); ++I, ++i)
87 cast<PHINode>(I)->addIncoming(InvokeDestPHIValues[i], BB);
89 // This basic block is now complete, the caller will continue scanning the
96 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
97 /// in the body of the inlined function into invokes and turn unwind
98 /// instructions into branches to the invoke unwind dest.
100 /// II is the invoke instruction being inlined. FirstNewBlock is the first
101 /// block of the inlined code (the last block is the end of the function),
102 /// and InlineCodeInfo is information about the code that got inlined.
103 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
104 ClonedCodeInfo &InlinedCodeInfo) {
105 BasicBlock *InvokeDest = II->getUnwindDest();
106 SmallVector<Value*, 8> InvokeDestPHIValues;
108 // If there are PHI nodes in the unwind destination block, we need to
109 // keep track of which values came into them from this invoke, then remove
110 // the entry for this block.
111 BasicBlock *InvokeBlock = II->getParent();
112 for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) {
113 PHINode *PN = cast<PHINode>(I);
114 // Save the value to use for this edge.
115 InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock));
118 Function *Caller = FirstNewBlock->getParent();
120 // The inlined code is currently at the end of the function, scan from the
121 // start of the inlined code to its end, checking for stuff we need to
122 // rewrite. If the code doesn't have calls or unwinds, we know there is
123 // nothing to rewrite.
124 if (!InlinedCodeInfo.ContainsCalls && !InlinedCodeInfo.ContainsUnwinds) {
125 // Now that everything is happy, we have one final detail. The PHI nodes in
126 // the exception destination block still have entries due to the original
127 // invoke instruction. Eliminate these entries (which might even delete the
129 InvokeDest->removePredecessor(II->getParent());
133 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
134 if (InlinedCodeInfo.ContainsCalls)
135 HandleCallsInBlockInlinedThroughInvoke(BB, InvokeDest,
136 InvokeDestPHIValues);
138 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
139 // An UnwindInst requires special handling when it gets inlined into an
140 // invoke site. Once this happens, we know that the unwind would cause
141 // a control transfer to the invoke exception destination, so we can
142 // transform it into a direct branch to the exception destination.
143 BranchInst::Create(InvokeDest, UI);
145 // Delete the unwind instruction!
146 UI->eraseFromParent();
148 // Update any PHI nodes in the exceptional block to indicate that
149 // there is now a new entry in them.
151 for (BasicBlock::iterator I = InvokeDest->begin();
152 isa<PHINode>(I); ++I, ++i) {
153 PHINode *PN = cast<PHINode>(I);
154 PN->addIncoming(InvokeDestPHIValues[i], BB);
159 // Now that everything is happy, we have one final detail. The PHI nodes in
160 // the exception destination block still have entries due to the original
161 // invoke instruction. Eliminate these entries (which might even delete the
163 InvokeDest->removePredecessor(II->getParent());
166 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
167 /// into the caller, update the specified callgraph to reflect the changes we
168 /// made. Note that it's possible that not all code was copied over, so only
169 /// some edges of the callgraph may remain.
170 static void UpdateCallGraphAfterInlining(CallSite CS,
171 Function::iterator FirstNewBlock,
172 DenseMap<const Value*, Value*> &VMap,
173 InlineFunctionInfo &IFI) {
174 CallGraph &CG = *IFI.CG;
175 const Function *Caller = CS.getInstruction()->getParent()->getParent();
176 const Function *Callee = CS.getCalledFunction();
177 CallGraphNode *CalleeNode = CG[Callee];
178 CallGraphNode *CallerNode = CG[Caller];
180 // Since we inlined some uninlined call sites in the callee into the caller,
181 // add edges from the caller to all of the callees of the callee.
182 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
184 // Consider the case where CalleeNode == CallerNode.
185 CallGraphNode::CalledFunctionsVector CallCache;
186 if (CalleeNode == CallerNode) {
187 CallCache.assign(I, E);
188 I = CallCache.begin();
192 for (; I != E; ++I) {
193 const Value *OrigCall = I->first;
195 DenseMap<const Value*, Value*>::iterator VMI = VMap.find(OrigCall);
196 // Only copy the edge if the call was inlined!
197 if (VMI == VMap.end() || VMI->second == 0)
200 // If the call was inlined, but then constant folded, there is no edge to
201 // add. Check for this case.
202 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
203 if (NewCall == 0) continue;
205 // Remember that this call site got inlined for the client of
207 IFI.InlinedCalls.push_back(NewCall);
209 // It's possible that inlining the callsite will cause it to go from an
210 // indirect to a direct call by resolving a function pointer. If this
211 // happens, set the callee of the new call site to a more precise
212 // destination. This can also happen if the call graph node of the caller
213 // was just unnecessarily imprecise.
214 if (I->second->getFunction() == 0)
215 if (Function *F = CallSite(NewCall).getCalledFunction()) {
216 // Indirect call site resolved to direct call.
217 CallerNode->addCalledFunction(CallSite::get(NewCall), CG[F]);
222 CallerNode->addCalledFunction(CallSite::get(NewCall), I->second);
225 // Update the call graph by deleting the edge from Callee to Caller. We must
226 // do this after the loop above in case Caller and Callee are the same.
227 CallerNode->removeCallEdgeFor(CS);
230 // InlineFunction - This function inlines the called function into the basic
231 // block of the caller. This returns false if it is not possible to inline this
232 // call. The program is still in a well defined state if this occurs though.
234 // Note that this only does one level of inlining. For example, if the
235 // instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
236 // exists in the instruction stream. Similiarly this will inline a recursive
237 // function by one level.
239 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
240 Instruction *TheCall = CS.getInstruction();
241 LLVMContext &Context = TheCall->getContext();
242 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
243 "Instruction not in function!");
245 // If IFI has any state in it, zap it before we fill it in.
248 const Function *CalledFunc = CS.getCalledFunction();
249 if (CalledFunc == 0 || // Can't inline external function or indirect
250 CalledFunc->isDeclaration() || // call, or call to a vararg function!
251 CalledFunc->getFunctionType()->isVarArg()) return false;
254 // If the call to the callee is not a tail call, we must clear the 'tail'
255 // flags on any calls that we inline.
256 bool MustClearTailCallFlags =
257 !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());
259 // If the call to the callee cannot throw, set the 'nounwind' flag on any
260 // calls that we inline.
261 bool MarkNoUnwind = CS.doesNotThrow();
263 BasicBlock *OrigBB = TheCall->getParent();
264 Function *Caller = OrigBB->getParent();
266 // GC poses two hazards to inlining, which only occur when the callee has GC:
267 // 1. If the caller has no GC, then the callee's GC must be propagated to the
269 // 2. If the caller has a differing GC, it is invalid to inline.
270 if (CalledFunc->hasGC()) {
271 if (!Caller->hasGC())
272 Caller->setGC(CalledFunc->getGC());
273 else if (CalledFunc->getGC() != Caller->getGC())
277 // Get an iterator to the last basic block in the function, which will have
278 // the new function inlined after it.
280 Function::iterator LastBlock = &Caller->back();
282 // Make sure to capture all of the return instructions from the cloned
284 SmallVector<ReturnInst*, 8> Returns;
285 ClonedCodeInfo InlinedFunctionInfo;
286 Function::iterator FirstNewBlock;
288 { // Scope to destroy VMap after cloning.
289 DenseMap<const Value*, Value*> VMap;
291 assert(CalledFunc->arg_size() == CS.arg_size() &&
292 "No varargs calls can be inlined!");
294 // Calculate the vector of arguments to pass into the function cloner, which
295 // matches up the formal to the actual argument values.
296 CallSite::arg_iterator AI = CS.arg_begin();
298 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
299 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
300 Value *ActualArg = *AI;
302 // When byval arguments actually inlined, we need to make the copy implied
303 // by them explicit. However, we don't do this if the callee is readonly
304 // or readnone, because the copy would be unneeded: the callee doesn't
305 // modify the struct.
306 if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal) &&
307 !CalledFunc->onlyReadsMemory()) {
308 const Type *AggTy = cast<PointerType>(I->getType())->getElementType();
309 const Type *VoidPtrTy =
310 Type::getInt8PtrTy(Context);
312 // Create the alloca. If we have TargetData, use nice alignment.
314 if (IFI.TD) Align = IFI.TD->getPrefTypeAlignment(AggTy);
315 Value *NewAlloca = new AllocaInst(AggTy, 0, Align,
317 &*Caller->begin()->begin());
319 const Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
320 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
323 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
324 Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
328 Size = ConstantExpr::getSizeOf(AggTy);
330 Size = ConstantInt::get(Type::getInt64Ty(Context),
331 IFI.TD->getTypeStoreSize(AggTy));
333 // Always generate a memcpy of alignment 1 here because we don't know
334 // the alignment of the src pointer. Other optimizations can infer
336 Value *CallArgs[] = {
337 DestCast, SrcCast, Size,
338 ConstantInt::get(Type::getInt32Ty(Context), 1),
339 ConstantInt::get(Type::getInt1Ty(Context), 0)
341 CallInst *TheMemCpy =
342 CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall);
344 // If we have a call graph, update it.
345 if (CallGraph *CG = IFI.CG) {
346 CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
347 CallGraphNode *CallerNode = (*CG)[Caller];
348 CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
351 // Uses of the argument in the function should use our new alloca
353 ActualArg = NewAlloca;
355 // Calls that we inline may use the new alloca, so we need to clear
356 // their 'tail' flags.
357 MustClearTailCallFlags = true;
363 // We want the inliner to prune the code as it copies. We would LOVE to
364 // have no dead or constant instructions leftover after inlining occurs
365 // (which can happen, e.g., because an argument was constant), but we'll be
366 // happy with whatever the cloner can do.
367 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap, Returns, ".i",
368 &InlinedFunctionInfo, IFI.TD, TheCall);
370 // Remember the first block that is newly cloned over.
371 FirstNewBlock = LastBlock; ++FirstNewBlock;
373 // Update the callgraph if requested.
375 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
378 // If there are any alloca instructions in the block that used to be the entry
379 // block for the callee, move them to the entry block of the caller. First
380 // calculate which instruction they should be inserted before. We insert the
381 // instructions at the end of the current alloca list.
384 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
385 for (BasicBlock::iterator I = FirstNewBlock->begin(),
386 E = FirstNewBlock->end(); I != E; ) {
387 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
388 if (AI == 0) continue;
390 // If the alloca is now dead, remove it. This often occurs due to code
392 if (AI->use_empty()) {
393 AI->eraseFromParent();
397 if (!isa<Constant>(AI->getArraySize()))
400 // Keep track of the static allocas that we inline into the caller if the
401 // StaticAllocas pointer is non-null.
402 IFI.StaticAllocas.push_back(AI);
404 // Scan for the block of allocas that we can move over, and move them
406 while (isa<AllocaInst>(I) &&
407 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
408 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
412 // Transfer all of the allocas over in a block. Using splice means
413 // that the instructions aren't removed from the symbol table, then
415 Caller->getEntryBlock().getInstList().splice(InsertPoint,
416 FirstNewBlock->getInstList(),
421 // If the inlined code contained dynamic alloca instructions, wrap the inlined
422 // code with llvm.stacksave/llvm.stackrestore intrinsics.
423 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
424 Module *M = Caller->getParent();
425 // Get the two intrinsics we care about.
426 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
427 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
429 // If we are preserving the callgraph, add edges to the stacksave/restore
430 // functions for the calls we insert.
431 CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0;
432 if (CallGraph *CG = IFI.CG) {
433 StackSaveCGN = CG->getOrInsertFunction(StackSave);
434 StackRestoreCGN = CG->getOrInsertFunction(StackRestore);
435 CallerNode = (*CG)[Caller];
438 // Insert the llvm.stacksave.
439 CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack",
440 FirstNewBlock->begin());
441 if (IFI.CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
443 // Insert a call to llvm.stackrestore before any return instructions in the
445 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
446 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]);
447 if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
450 // Count the number of StackRestore calls we insert.
451 unsigned NumStackRestores = Returns.size();
453 // If we are inlining an invoke instruction, insert restores before each
454 // unwind. These unwinds will be rewritten into branches later.
455 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
456 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
458 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
459 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", UI);
460 if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
466 // If we are inlining tail call instruction through a call site that isn't
467 // marked 'tail', we must remove the tail marker for any calls in the inlined
468 // code. Also, calls inlined through a 'nounwind' call site should be marked
470 if (InlinedFunctionInfo.ContainsCalls &&
471 (MustClearTailCallFlags || MarkNoUnwind)) {
472 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
474 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
475 if (CallInst *CI = dyn_cast<CallInst>(I)) {
476 if (MustClearTailCallFlags)
477 CI->setTailCall(false);
479 CI->setDoesNotThrow();
483 // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
484 // instructions are unreachable.
485 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
486 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
488 TerminatorInst *Term = BB->getTerminator();
489 if (isa<UnwindInst>(Term)) {
490 new UnreachableInst(Context, Term);
491 BB->getInstList().erase(Term);
495 // If we are inlining for an invoke instruction, we must make sure to rewrite
496 // any inlined 'unwind' instructions into branches to the invoke exception
497 // destination, and call instructions into invoke instructions.
498 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
499 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
501 // If we cloned in _exactly one_ basic block, and if that block ends in a
502 // return instruction, we splice the body of the inlined callee directly into
503 // the calling basic block.
504 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
505 // Move all of the instructions right before the call.
506 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
507 FirstNewBlock->begin(), FirstNewBlock->end());
508 // Remove the cloned basic block.
509 Caller->getBasicBlockList().pop_back();
511 // If the call site was an invoke instruction, add a branch to the normal
513 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
514 BranchInst::Create(II->getNormalDest(), TheCall);
516 // If the return instruction returned a value, replace uses of the call with
517 // uses of the returned value.
518 if (!TheCall->use_empty()) {
519 ReturnInst *R = Returns[0];
520 if (TheCall == R->getReturnValue())
521 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
523 TheCall->replaceAllUsesWith(R->getReturnValue());
525 // Since we are now done with the Call/Invoke, we can delete it.
526 TheCall->eraseFromParent();
528 // Since we are now done with the return instruction, delete it also.
529 Returns[0]->eraseFromParent();
531 // We are now done with the inlining.
535 // Otherwise, we have the normal case, of more than one block to inline or
536 // multiple return sites.
538 // We want to clone the entire callee function into the hole between the
539 // "starter" and "ender" blocks. How we accomplish this depends on whether
540 // this is an invoke instruction or a call instruction.
541 BasicBlock *AfterCallBB;
542 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
544 // Add an unconditional branch to make this look like the CallInst case...
545 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
547 // Split the basic block. This guarantees that no PHI nodes will have to be
548 // updated due to new incoming edges, and make the invoke case more
549 // symmetric to the call case.
550 AfterCallBB = OrigBB->splitBasicBlock(NewBr,
551 CalledFunc->getName()+".exit");
553 } else { // It's a call
554 // If this is a call instruction, we need to split the basic block that
555 // the call lives in.
557 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
558 CalledFunc->getName()+".exit");
561 // Change the branch that used to go to AfterCallBB to branch to the first
562 // basic block of the inlined function.
564 TerminatorInst *Br = OrigBB->getTerminator();
565 assert(Br && Br->getOpcode() == Instruction::Br &&
566 "splitBasicBlock broken!");
567 Br->setOperand(0, FirstNewBlock);
570 // Now that the function is correct, make it a little bit nicer. In
571 // particular, move the basic blocks inserted from the end of the function
572 // into the space made by splitting the source basic block.
573 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
574 FirstNewBlock, Caller->end());
576 // Handle all of the return instructions that we just cloned in, and eliminate
577 // any users of the original call/invoke instruction.
578 const Type *RTy = CalledFunc->getReturnType();
580 if (Returns.size() > 1) {
581 // The PHI node should go at the front of the new basic block to merge all
582 // possible incoming values.
584 if (!TheCall->use_empty()) {
585 PHI = PHINode::Create(RTy, TheCall->getName(),
586 AfterCallBB->begin());
587 // Anything that used the result of the function call should now use the
588 // PHI node as their operand.
589 TheCall->replaceAllUsesWith(PHI);
592 // Loop over all of the return instructions adding entries to the PHI node
595 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
596 ReturnInst *RI = Returns[i];
597 assert(RI->getReturnValue()->getType() == PHI->getType() &&
598 "Ret value not consistent in function!");
599 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
602 // Now that we inserted the PHI, check to see if it has a single value
603 // (e.g. all the entries are the same or undef). If so, remove the PHI so
604 // it doesn't block other optimizations.
605 if (Value *V = PHI->hasConstantValue()) {
606 PHI->replaceAllUsesWith(V);
607 PHI->eraseFromParent();
612 // Add a branch to the merge points and remove return instructions.
613 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
614 ReturnInst *RI = Returns[i];
615 BranchInst::Create(AfterCallBB, RI);
616 RI->eraseFromParent();
618 } else if (!Returns.empty()) {
619 // Otherwise, if there is exactly one return value, just replace anything
620 // using the return value of the call with the computed value.
621 if (!TheCall->use_empty()) {
622 if (TheCall == Returns[0]->getReturnValue())
623 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
625 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
628 // Splice the code from the return block into the block that it will return
629 // to, which contains the code that was after the call.
630 BasicBlock *ReturnBB = Returns[0]->getParent();
631 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
632 ReturnBB->getInstList());
634 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
635 ReturnBB->replaceAllUsesWith(AfterCallBB);
637 // Delete the return instruction now and empty ReturnBB now.
638 Returns[0]->eraseFromParent();
639 ReturnBB->eraseFromParent();
640 } else if (!TheCall->use_empty()) {
641 // No returns, but something is using the return value of the call. Just
643 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
646 // Since we are now done with the Call/Invoke, we can delete it.
647 TheCall->eraseFromParent();
649 // We should always be able to fold the entry block of the function into the
650 // single predecessor of the block...
651 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
652 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
654 // Splice the code entry block into calling block, right before the
655 // unconditional branch.
656 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
657 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
659 // Remove the unconditional branch.
660 OrigBB->getInstList().erase(Br);
662 // Now we can remove the CalleeEntry block, which is now empty.
663 Caller->getBasicBlockList().erase(CalleeEntry);