1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/Constants.h"
17 #include "llvm/DerivedTypes.h"
18 #include "llvm/Module.h"
19 #include "llvm/Instructions.h"
20 #include "llvm/IntrinsicInst.h"
21 #include "llvm/Intrinsics.h"
22 #include "llvm/Attributes.h"
23 #include "llvm/Analysis/CallGraph.h"
24 #include "llvm/Analysis/DebugInfo.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Target/TargetData.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/StringExtras.h"
29 #include "llvm/Support/CallSite.h"
32 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) {
33 return InlineFunction(CallSite(CI), IFI);
35 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) {
36 return InlineFunction(CallSite(II), IFI);
40 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
41 /// an invoke, we have to turn all of the calls that can throw into
42 /// invokes. This function analyze BB to see if there are any calls, and if so,
43 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
44 /// nodes in that block with the values specified in InvokeDestPHIValues.
46 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
47 BasicBlock *InvokeDest,
48 const SmallVectorImpl<Value*> &InvokeDestPHIValues) {
49 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
50 Instruction *I = BBI++;
52 // We only need to check for function calls: inlined invoke
53 // instructions require no special handling.
54 CallInst *CI = dyn_cast<CallInst>(I);
55 if (CI == 0) continue;
57 // If this call cannot unwind, don't convert it to an invoke.
58 if (CI->doesNotThrow())
61 // Convert this function call into an invoke instruction.
62 // First, split the basic block.
63 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
65 // Next, create the new invoke instruction, inserting it at the end
66 // of the old basic block.
67 ImmutableCallSite CS(CI);
68 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
70 InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
71 InvokeArgs.begin(), InvokeArgs.end(),
72 CI->getName(), BB->getTerminator());
73 II->setCallingConv(CI->getCallingConv());
74 II->setAttributes(CI->getAttributes());
76 // Make sure that anything using the call now uses the invoke! This also
77 // updates the CallGraph if present, because it uses a WeakVH.
78 CI->replaceAllUsesWith(II);
80 // Delete the unconditional branch inserted by splitBasicBlock
81 BB->getInstList().pop_back();
82 Split->getInstList().pop_front(); // Delete the original call
84 // Update any PHI nodes in the exceptional block to indicate that
85 // there is now a new entry in them.
87 for (BasicBlock::iterator I = InvokeDest->begin();
88 isa<PHINode>(I); ++I, ++i)
89 cast<PHINode>(I)->addIncoming(InvokeDestPHIValues[i], BB);
91 // This basic block is now complete, the caller will continue scanning the
98 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
99 /// in the body of the inlined function into invokes and turn unwind
100 /// instructions into branches to the invoke unwind dest.
102 /// II is the invoke instruction being inlined. FirstNewBlock is the first
103 /// block of the inlined code (the last block is the end of the function),
104 /// and InlineCodeInfo is information about the code that got inlined.
105 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
106 ClonedCodeInfo &InlinedCodeInfo) {
107 BasicBlock *InvokeDest = II->getUnwindDest();
108 SmallVector<Value*, 8> InvokeDestPHIValues;
110 // If there are PHI nodes in the unwind destination block, we need to
111 // keep track of which values came into them from this invoke, then remove
112 // the entry for this block.
113 BasicBlock *InvokeBlock = II->getParent();
114 for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) {
115 PHINode *PN = cast<PHINode>(I);
116 // Save the value to use for this edge.
117 InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock));
120 Function *Caller = FirstNewBlock->getParent();
122 // The inlined code is currently at the end of the function, scan from the
123 // start of the inlined code to its end, checking for stuff we need to
124 // rewrite. If the code doesn't have calls or unwinds, we know there is
125 // nothing to rewrite.
126 if (!InlinedCodeInfo.ContainsCalls && !InlinedCodeInfo.ContainsUnwinds) {
127 // Now that everything is happy, we have one final detail. The PHI nodes in
128 // the exception destination block still have entries due to the original
129 // invoke instruction. Eliminate these entries (which might even delete the
131 InvokeDest->removePredecessor(II->getParent());
135 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
136 if (InlinedCodeInfo.ContainsCalls)
137 HandleCallsInBlockInlinedThroughInvoke(BB, InvokeDest,
138 InvokeDestPHIValues);
140 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
141 // An UnwindInst requires special handling when it gets inlined into an
142 // invoke site. Once this happens, we know that the unwind would cause
143 // a control transfer to the invoke exception destination, so we can
144 // transform it into a direct branch to the exception destination.
145 BranchInst::Create(InvokeDest, UI);
147 // Delete the unwind instruction!
148 UI->eraseFromParent();
150 // Update any PHI nodes in the exceptional block to indicate that
151 // there is now a new entry in them.
153 for (BasicBlock::iterator I = InvokeDest->begin();
154 isa<PHINode>(I); ++I, ++i) {
155 PHINode *PN = cast<PHINode>(I);
156 PN->addIncoming(InvokeDestPHIValues[i], BB);
161 // Now that everything is happy, we have one final detail. The PHI nodes in
162 // the exception destination block still have entries due to the original
163 // invoke instruction. Eliminate these entries (which might even delete the
165 InvokeDest->removePredecessor(II->getParent());
168 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
169 /// into the caller, update the specified callgraph to reflect the changes we
170 /// made. Note that it's possible that not all code was copied over, so only
171 /// some edges of the callgraph may remain.
172 static void UpdateCallGraphAfterInlining(CallSite CS,
173 Function::iterator FirstNewBlock,
174 ValueToValueMapTy &VMap,
175 InlineFunctionInfo &IFI) {
176 CallGraph &CG = *IFI.CG;
177 const Function *Caller = CS.getInstruction()->getParent()->getParent();
178 const Function *Callee = CS.getCalledFunction();
179 CallGraphNode *CalleeNode = CG[Callee];
180 CallGraphNode *CallerNode = CG[Caller];
182 // Since we inlined some uninlined call sites in the callee into the caller,
183 // add edges from the caller to all of the callees of the callee.
184 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
186 // Consider the case where CalleeNode == CallerNode.
187 CallGraphNode::CalledFunctionsVector CallCache;
188 if (CalleeNode == CallerNode) {
189 CallCache.assign(I, E);
190 I = CallCache.begin();
194 for (; I != E; ++I) {
195 const Value *OrigCall = I->first;
197 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
198 // Only copy the edge if the call was inlined!
199 if (VMI == VMap.end() || VMI->second == 0)
202 // If the call was inlined, but then constant folded, there is no edge to
203 // add. Check for this case.
204 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
205 if (NewCall == 0) continue;
207 // Remember that this call site got inlined for the client of
209 IFI.InlinedCalls.push_back(NewCall);
211 // It's possible that inlining the callsite will cause it to go from an
212 // indirect to a direct call by resolving a function pointer. If this
213 // happens, set the callee of the new call site to a more precise
214 // destination. This can also happen if the call graph node of the caller
215 // was just unnecessarily imprecise.
216 if (I->second->getFunction() == 0)
217 if (Function *F = CallSite(NewCall).getCalledFunction()) {
218 // Indirect call site resolved to direct call.
219 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
224 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
227 // Update the call graph by deleting the edge from Callee to Caller. We must
228 // do this after the loop above in case Caller and Callee are the same.
229 CallerNode->removeCallEdgeFor(CS);
232 /// HandleByValArgument - When inlining a call site that has a byval argument,
233 /// we have to make the implicit memcpy explicit by adding it.
234 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
235 const Function *CalledFunc,
236 InlineFunctionInfo &IFI,
237 unsigned ByValAlignment) {
238 const Type *AggTy = cast<PointerType>(Arg->getType())->getElementType();
240 // If the called function is readonly, then it could not mutate the caller's
241 // copy of the byval'd memory. In this case, it is safe to elide the copy and
243 if (CalledFunc->onlyReadsMemory()) {
244 // If the byval argument has a specified alignment that is greater than the
245 // passed in pointer, then we either have to round up the input pointer or
246 // give up on this transformation.
247 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
250 // See if the argument is a (bitcasted) pointer to an alloca. If so, we can
251 // round up the alloca if needed.
252 if (AllocaInst *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts())) {
253 unsigned AIAlign = AI->getAlignment();
255 // If the alloca is known at least aligned as much as the byval, we can do
256 // this optimization.
257 if (AIAlign >= ByValAlignment)
260 // If the alloca has a specified alignment that is less than the byval,
261 // then we can safely bump it up.
263 AI->setAlignment(ByValAlignment);
267 // If the alignment has an unspecified alignment, then we can only modify
268 // it if we have TD information. Doing so without TD info could end up
269 // with us rounding the alignment *down* accidentally, which is badness.
271 AIAlign = std::max(ByValAlignment, IFI.TD->getPrefTypeAlignment(AggTy));
272 AI->setAlignment(AIAlign);
277 // Otherwise, we have to make a memcpy to get a safe alignment, pretty lame.
280 LLVMContext &Context = Arg->getContext();
282 const Type *VoidPtrTy = Type::getInt8PtrTy(Context);
284 // Create the alloca. If we have TargetData, use nice alignment.
287 Align = IFI.TD->getPrefTypeAlignment(AggTy);
289 // If the byval had an alignment specified, we *must* use at least that
290 // alignment, as it is required by the byval argument (and uses of the
291 // pointer inside the callee).
292 Align = std::max(Align, ByValAlignment);
294 Function *Caller = TheCall->getParent()->getParent();
296 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, Arg->getName(),
297 &*Caller->begin()->begin());
299 const Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
300 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
303 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
304 Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall);
308 Size = ConstantExpr::getSizeOf(AggTy);
310 Size = ConstantInt::get(Type::getInt64Ty(Context),
311 IFI.TD->getTypeStoreSize(AggTy));
313 // Always generate a memcpy of alignment 1 here because we don't know
314 // the alignment of the src pointer. Other optimizations can infer
316 Value *CallArgs[] = {
317 DestCast, SrcCast, Size,
318 ConstantInt::get(Type::getInt32Ty(Context), 1),
319 ConstantInt::getFalse(Context) // isVolatile
321 CallInst *TheMemCpy =
322 CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall);
324 // If we have a call graph, update it.
325 if (CallGraph *CG = IFI.CG) {
326 CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
327 CallGraphNode *CallerNode = (*CG)[Caller];
328 CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
331 // Uses of the argument in the function should use our new alloca
336 // InlineFunction - This function inlines the called function into the basic
337 // block of the caller. This returns false if it is not possible to inline this
338 // call. The program is still in a well defined state if this occurs though.
340 // Note that this only does one level of inlining. For example, if the
341 // instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
342 // exists in the instruction stream. Similiarly this will inline a recursive
343 // function by one level.
345 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
346 Instruction *TheCall = CS.getInstruction();
347 LLVMContext &Context = TheCall->getContext();
348 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
349 "Instruction not in function!");
351 // If IFI has any state in it, zap it before we fill it in.
354 const Function *CalledFunc = CS.getCalledFunction();
355 if (CalledFunc == 0 || // Can't inline external function or indirect
356 CalledFunc->isDeclaration() || // call, or call to a vararg function!
357 CalledFunc->getFunctionType()->isVarArg()) return false;
359 // If the call to the callee is not a tail call, we must clear the 'tail'
360 // flags on any calls that we inline.
361 bool MustClearTailCallFlags =
362 !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());
364 // If the call to the callee cannot throw, set the 'nounwind' flag on any
365 // calls that we inline.
366 bool MarkNoUnwind = CS.doesNotThrow();
368 BasicBlock *OrigBB = TheCall->getParent();
369 Function *Caller = OrigBB->getParent();
371 // GC poses two hazards to inlining, which only occur when the callee has GC:
372 // 1. If the caller has no GC, then the callee's GC must be propagated to the
374 // 2. If the caller has a differing GC, it is invalid to inline.
375 if (CalledFunc->hasGC()) {
376 if (!Caller->hasGC())
377 Caller->setGC(CalledFunc->getGC());
378 else if (CalledFunc->getGC() != Caller->getGC())
382 // Get an iterator to the last basic block in the function, which will have
383 // the new function inlined after it.
385 Function::iterator LastBlock = &Caller->back();
387 // Make sure to capture all of the return instructions from the cloned
389 SmallVector<ReturnInst*, 8> Returns;
390 ClonedCodeInfo InlinedFunctionInfo;
391 Function::iterator FirstNewBlock;
393 { // Scope to destroy VMap after cloning.
394 ValueToValueMapTy VMap;
396 assert(CalledFunc->arg_size() == CS.arg_size() &&
397 "No varargs calls can be inlined!");
399 // Calculate the vector of arguments to pass into the function cloner, which
400 // matches up the formal to the actual argument values.
401 CallSite::arg_iterator AI = CS.arg_begin();
403 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
404 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
405 Value *ActualArg = *AI;
407 // When byval arguments actually inlined, we need to make the copy implied
408 // by them explicit. However, we don't do this if the callee is readonly
409 // or readnone, because the copy would be unneeded: the callee doesn't
410 // modify the struct.
411 if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal)) {
412 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
413 CalledFunc->getParamAlignment(ArgNo+1));
415 // Calls that we inline may use the new alloca, so we need to clear
416 // their 'tail' flags if HandleByValArgument introduced a new alloca and
417 // the callee has calls.
418 MustClearTailCallFlags |= ActualArg != *AI;
424 // We want the inliner to prune the code as it copies. We would LOVE to
425 // have no dead or constant instructions leftover after inlining occurs
426 // (which can happen, e.g., because an argument was constant), but we'll be
427 // happy with whatever the cloner can do.
428 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
429 /*ModuleLevelChanges=*/false, Returns, ".i",
430 &InlinedFunctionInfo, IFI.TD, TheCall);
432 // Remember the first block that is newly cloned over.
433 FirstNewBlock = LastBlock; ++FirstNewBlock;
435 // Update the callgraph if requested.
437 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
440 // If there are any alloca instructions in the block that used to be the entry
441 // block for the callee, move them to the entry block of the caller. First
442 // calculate which instruction they should be inserted before. We insert the
443 // instructions at the end of the current alloca list.
446 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
447 for (BasicBlock::iterator I = FirstNewBlock->begin(),
448 E = FirstNewBlock->end(); I != E; ) {
449 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
450 if (AI == 0) continue;
452 // If the alloca is now dead, remove it. This often occurs due to code
454 if (AI->use_empty()) {
455 AI->eraseFromParent();
459 if (!isa<Constant>(AI->getArraySize()))
462 // Keep track of the static allocas that we inline into the caller.
463 IFI.StaticAllocas.push_back(AI);
465 // Scan for the block of allocas that we can move over, and move them
467 while (isa<AllocaInst>(I) &&
468 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
469 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
473 // Transfer all of the allocas over in a block. Using splice means
474 // that the instructions aren't removed from the symbol table, then
476 Caller->getEntryBlock().getInstList().splice(InsertPoint,
477 FirstNewBlock->getInstList(),
482 // If the inlined code contained dynamic alloca instructions, wrap the inlined
483 // code with llvm.stacksave/llvm.stackrestore intrinsics.
484 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
485 Module *M = Caller->getParent();
486 // Get the two intrinsics we care about.
487 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
488 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
490 // If we are preserving the callgraph, add edges to the stacksave/restore
491 // functions for the calls we insert.
492 CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0;
493 if (CallGraph *CG = IFI.CG) {
494 StackSaveCGN = CG->getOrInsertFunction(StackSave);
495 StackRestoreCGN = CG->getOrInsertFunction(StackRestore);
496 CallerNode = (*CG)[Caller];
499 // Insert the llvm.stacksave.
500 CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack",
501 FirstNewBlock->begin());
502 if (IFI.CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
504 // Insert a call to llvm.stackrestore before any return instructions in the
506 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
507 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]);
508 if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
511 // Count the number of StackRestore calls we insert.
512 unsigned NumStackRestores = Returns.size();
514 // If we are inlining an invoke instruction, insert restores before each
515 // unwind. These unwinds will be rewritten into branches later.
516 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
517 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
519 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
520 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", UI);
521 if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
527 // If we are inlining tail call instruction through a call site that isn't
528 // marked 'tail', we must remove the tail marker for any calls in the inlined
529 // code. Also, calls inlined through a 'nounwind' call site should be marked
531 if (InlinedFunctionInfo.ContainsCalls &&
532 (MustClearTailCallFlags || MarkNoUnwind)) {
533 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
535 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
536 if (CallInst *CI = dyn_cast<CallInst>(I)) {
537 if (MustClearTailCallFlags)
538 CI->setTailCall(false);
540 CI->setDoesNotThrow();
544 // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
545 // instructions are unreachable.
546 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
547 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
549 TerminatorInst *Term = BB->getTerminator();
550 if (isa<UnwindInst>(Term)) {
551 new UnreachableInst(Context, Term);
552 BB->getInstList().erase(Term);
556 // If we are inlining for an invoke instruction, we must make sure to rewrite
557 // any inlined 'unwind' instructions into branches to the invoke exception
558 // destination, and call instructions into invoke instructions.
559 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
560 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
562 // If we cloned in _exactly one_ basic block, and if that block ends in a
563 // return instruction, we splice the body of the inlined callee directly into
564 // the calling basic block.
565 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
566 // Move all of the instructions right before the call.
567 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
568 FirstNewBlock->begin(), FirstNewBlock->end());
569 // Remove the cloned basic block.
570 Caller->getBasicBlockList().pop_back();
572 // If the call site was an invoke instruction, add a branch to the normal
574 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
575 BranchInst::Create(II->getNormalDest(), TheCall);
577 // If the return instruction returned a value, replace uses of the call with
578 // uses of the returned value.
579 if (!TheCall->use_empty()) {
580 ReturnInst *R = Returns[0];
581 if (TheCall == R->getReturnValue())
582 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
584 TheCall->replaceAllUsesWith(R->getReturnValue());
586 // Since we are now done with the Call/Invoke, we can delete it.
587 TheCall->eraseFromParent();
589 // Since we are now done with the return instruction, delete it also.
590 Returns[0]->eraseFromParent();
592 // We are now done with the inlining.
596 // Otherwise, we have the normal case, of more than one block to inline or
597 // multiple return sites.
599 // We want to clone the entire callee function into the hole between the
600 // "starter" and "ender" blocks. How we accomplish this depends on whether
601 // this is an invoke instruction or a call instruction.
602 BasicBlock *AfterCallBB;
603 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
605 // Add an unconditional branch to make this look like the CallInst case...
606 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
608 // Split the basic block. This guarantees that no PHI nodes will have to be
609 // updated due to new incoming edges, and make the invoke case more
610 // symmetric to the call case.
611 AfterCallBB = OrigBB->splitBasicBlock(NewBr,
612 CalledFunc->getName()+".exit");
614 } else { // It's a call
615 // If this is a call instruction, we need to split the basic block that
616 // the call lives in.
618 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
619 CalledFunc->getName()+".exit");
622 // Change the branch that used to go to AfterCallBB to branch to the first
623 // basic block of the inlined function.
625 TerminatorInst *Br = OrigBB->getTerminator();
626 assert(Br && Br->getOpcode() == Instruction::Br &&
627 "splitBasicBlock broken!");
628 Br->setOperand(0, FirstNewBlock);
631 // Now that the function is correct, make it a little bit nicer. In
632 // particular, move the basic blocks inserted from the end of the function
633 // into the space made by splitting the source basic block.
634 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
635 FirstNewBlock, Caller->end());
637 // Handle all of the return instructions that we just cloned in, and eliminate
638 // any users of the original call/invoke instruction.
639 const Type *RTy = CalledFunc->getReturnType();
642 if (Returns.size() > 1) {
643 // The PHI node should go at the front of the new basic block to merge all
644 // possible incoming values.
645 if (!TheCall->use_empty()) {
646 PHI = PHINode::Create(RTy, TheCall->getName(),
647 AfterCallBB->begin());
648 // Anything that used the result of the function call should now use the
649 // PHI node as their operand.
650 TheCall->replaceAllUsesWith(PHI);
653 // Loop over all of the return instructions adding entries to the PHI node
656 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
657 ReturnInst *RI = Returns[i];
658 assert(RI->getReturnValue()->getType() == PHI->getType() &&
659 "Ret value not consistent in function!");
660 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
665 // Add a branch to the merge points and remove return instructions.
666 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
667 ReturnInst *RI = Returns[i];
668 BranchInst::Create(AfterCallBB, RI);
669 RI->eraseFromParent();
671 } else if (!Returns.empty()) {
672 // Otherwise, if there is exactly one return value, just replace anything
673 // using the return value of the call with the computed value.
674 if (!TheCall->use_empty()) {
675 if (TheCall == Returns[0]->getReturnValue())
676 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
678 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
681 // Splice the code from the return block into the block that it will return
682 // to, which contains the code that was after the call.
683 BasicBlock *ReturnBB = Returns[0]->getParent();
684 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
685 ReturnBB->getInstList());
687 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
688 ReturnBB->replaceAllUsesWith(AfterCallBB);
690 // Delete the return instruction now and empty ReturnBB now.
691 Returns[0]->eraseFromParent();
692 ReturnBB->eraseFromParent();
693 } else if (!TheCall->use_empty()) {
694 // No returns, but something is using the return value of the call. Just
696 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
699 // Since we are now done with the Call/Invoke, we can delete it.
700 TheCall->eraseFromParent();
702 // We should always be able to fold the entry block of the function into the
703 // single predecessor of the block...
704 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
705 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
707 // Splice the code entry block into calling block, right before the
708 // unconditional branch.
709 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
710 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
712 // Remove the unconditional branch.
713 OrigBB->getInstList().erase(Br);
715 // Now we can remove the CalleeEntry block, which is now empty.
716 Caller->getBasicBlockList().erase(CalleeEntry);
718 // If we inserted a phi node, check to see if it has a single value (e.g. all
719 // the entries are the same or undef). If so, remove the PHI so it doesn't
720 // block other optimizations.
722 if (Value *V = SimplifyInstruction(PHI, IFI.TD)) {
723 PHI->replaceAllUsesWith(V);
724 PHI->eraseFromParent();