1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionTracker.h"
22 #include "llvm/Analysis/CallGraph.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/CallSite.h"
28 #include "llvm/IR/CFG.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfo.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/MDBuilder.h"
39 #include "llvm/IR/Module.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 #include "llvm/Support/CommandLine.h"
46 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
48 cl::desc("Convert noalias attributes to metadata during inlining."));
50 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
51 bool InsertLifetime) {
52 return InlineFunction(CallSite(CI), IFI, InsertLifetime);
54 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
55 bool InsertLifetime) {
56 return InlineFunction(CallSite(II), IFI, InsertLifetime);
60 /// A class for recording information about inlining through an invoke.
61 class InvokeInliningInfo {
62 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
63 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
64 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
65 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
66 SmallVector<Value*, 8> UnwindDestPHIValues;
69 InvokeInliningInfo(InvokeInst *II)
70 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
71 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
72 // If there are PHI nodes in the unwind destination block, we need to keep
73 // track of which values came into them from the invoke before removing
74 // the edge from this block.
75 llvm::BasicBlock *InvokeBB = II->getParent();
76 BasicBlock::iterator I = OuterResumeDest->begin();
77 for (; isa<PHINode>(I); ++I) {
78 // Save the value to use for this edge.
79 PHINode *PHI = cast<PHINode>(I);
80 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
83 CallerLPad = cast<LandingPadInst>(I);
86 /// getOuterResumeDest - The outer unwind destination is the target of
87 /// unwind edges introduced for calls within the inlined function.
88 BasicBlock *getOuterResumeDest() const {
89 return OuterResumeDest;
92 BasicBlock *getInnerResumeDest();
94 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
96 /// forwardResume - Forward the 'resume' instruction to the caller's landing
97 /// pad block. When the landing pad block has only one predecessor, this is
98 /// a simple branch. When there is more than one predecessor, we need to
99 /// split the landing pad block after the landingpad instruction and jump
101 void forwardResume(ResumeInst *RI,
102 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
104 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind
105 /// destination block for the given basic block, using the values for the
106 /// original invoke's source block.
107 void addIncomingPHIValuesFor(BasicBlock *BB) const {
108 addIncomingPHIValuesForInto(BB, OuterResumeDest);
111 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
112 BasicBlock::iterator I = dest->begin();
113 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
114 PHINode *phi = cast<PHINode>(I);
115 phi->addIncoming(UnwindDestPHIValues[i], src);
121 /// getInnerResumeDest - Get or create a target for the branch from ResumeInsts.
122 BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
123 if (InnerResumeDest) return InnerResumeDest;
125 // Split the landing pad.
126 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
128 OuterResumeDest->splitBasicBlock(SplitPoint,
129 OuterResumeDest->getName() + ".body");
131 // The number of incoming edges we expect to the inner landing pad.
132 const unsigned PHICapacity = 2;
134 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
135 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
136 BasicBlock::iterator I = OuterResumeDest->begin();
137 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
138 PHINode *OuterPHI = cast<PHINode>(I);
139 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
140 OuterPHI->getName() + ".lpad-body",
142 OuterPHI->replaceAllUsesWith(InnerPHI);
143 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
146 // Create a PHI for the exception values.
147 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
148 "eh.lpad-body", InsertPoint);
149 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
150 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
153 return InnerResumeDest;
156 /// forwardResume - Forward the 'resume' instruction to the caller's landing pad
157 /// block. When the landing pad block has only one predecessor, this is a simple
158 /// branch. When there is more than one predecessor, we need to split the
159 /// landing pad block after the landingpad instruction and jump to there.
160 void InvokeInliningInfo::forwardResume(ResumeInst *RI,
161 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads) {
162 BasicBlock *Dest = getInnerResumeDest();
163 BasicBlock *Src = RI->getParent();
165 BranchInst::Create(Dest, Src);
167 // Update the PHIs in the destination. They were inserted in an order which
169 addIncomingPHIValuesForInto(Src, Dest);
171 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
172 RI->eraseFromParent();
175 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
176 /// an invoke, we have to turn all of the calls that can throw into
177 /// invokes. This function analyze BB to see if there are any calls, and if so,
178 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
179 /// nodes in that block with the values specified in InvokeDestPHIValues.
180 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
181 InvokeInliningInfo &Invoke) {
182 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
183 Instruction *I = BBI++;
185 // We only need to check for function calls: inlined invoke
186 // instructions require no special handling.
187 CallInst *CI = dyn_cast<CallInst>(I);
189 // If this call cannot unwind, don't convert it to an invoke.
190 // Inline asm calls cannot throw.
191 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
194 // Convert this function call into an invoke instruction. First, split the
196 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
198 // Delete the unconditional branch inserted by splitBasicBlock
199 BB->getInstList().pop_back();
201 // Create the new invoke instruction.
202 ImmutableCallSite CS(CI);
203 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
204 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
205 Invoke.getOuterResumeDest(),
206 InvokeArgs, CI->getName(), BB);
207 II->setDebugLoc(CI->getDebugLoc());
208 II->setCallingConv(CI->getCallingConv());
209 II->setAttributes(CI->getAttributes());
211 // Make sure that anything using the call now uses the invoke! This also
212 // updates the CallGraph if present, because it uses a WeakVH.
213 CI->replaceAllUsesWith(II);
215 // Delete the original call
216 Split->getInstList().pop_front();
218 // Update any PHI nodes in the exceptional block to indicate that there is
219 // now a new entry in them.
220 Invoke.addIncomingPHIValuesFor(BB);
225 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
226 /// in the body of the inlined function into invokes.
228 /// II is the invoke instruction being inlined. FirstNewBlock is the first
229 /// block of the inlined code (the last block is the end of the function),
230 /// and InlineCodeInfo is information about the code that got inlined.
231 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
232 ClonedCodeInfo &InlinedCodeInfo) {
233 BasicBlock *InvokeDest = II->getUnwindDest();
235 Function *Caller = FirstNewBlock->getParent();
237 // The inlined code is currently at the end of the function, scan from the
238 // start of the inlined code to its end, checking for stuff we need to
240 InvokeInliningInfo Invoke(II);
242 // Get all of the inlined landing pad instructions.
243 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
244 for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I)
245 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
246 InlinedLPads.insert(II->getLandingPadInst());
248 // Append the clauses from the outer landing pad instruction into the inlined
249 // landing pad instructions.
250 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
251 for (LandingPadInst *InlinedLPad : InlinedLPads) {
252 unsigned OuterNum = OuterLPad->getNumClauses();
253 InlinedLPad->reserveClauses(OuterNum);
254 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
255 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
256 if (OuterLPad->isCleanup())
257 InlinedLPad->setCleanup(true);
260 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
261 if (InlinedCodeInfo.ContainsCalls)
262 HandleCallsInBlockInlinedThroughInvoke(BB, Invoke);
264 // Forward any resumes that are remaining here.
265 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
266 Invoke.forwardResume(RI, InlinedLPads);
269 // Now that everything is happy, we have one final detail. The PHI nodes in
270 // the exception destination block still have entries due to the original
271 // invoke instruction. Eliminate these entries (which might even delete the
273 InvokeDest->removePredecessor(II->getParent());
276 /// CloneAliasScopeMetadata - When inlining a function that contains noalias
277 /// scope metadata, this metadata needs to be cloned so that the inlined blocks
278 /// have different "unqiue scopes" at every call site. Were this not done, then
279 /// aliasing scopes from a function inlined into a caller multiple times could
280 /// not be differentiated (and this would lead to miscompiles because the
281 /// non-aliasing property communicated by the metadata could have
282 /// call-site-specific control dependencies).
283 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
284 const Function *CalledFunc = CS.getCalledFunction();
285 SetVector<const MDNode *> MD;
287 // Note: We could only clone the metadata if it is already used in the
288 // caller. I'm omitting that check here because it might confuse
289 // inter-procedural alias analysis passes. We can revisit this if it becomes
290 // an efficiency or overhead problem.
292 for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end();
294 for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
295 if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope))
297 if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias))
304 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
306 SmallVector<const Value *, 16> Queue(MD.begin(), MD.end());
307 while (!Queue.empty()) {
308 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
309 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
310 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
315 // Now we have a complete set of all metadata in the chains used to specify
316 // the noalias scopes and the lists of those scopes.
317 SmallVector<MDNode *, 16> DummyNodes;
318 DenseMap<const MDNode *, TrackingVH<MDNode> > MDMap;
319 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
321 MDNode *Dummy = MDNode::getTemporary(CalledFunc->getContext(), None);
322 DummyNodes.push_back(Dummy);
326 // Create new metadata nodes to replace the dummy nodes, replacing old
327 // metadata references with either a dummy node or an already-created new
329 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
331 SmallVector<Value *, 4> NewOps;
332 for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) {
333 const Value *V = (*I)->getOperand(i);
334 if (const MDNode *M = dyn_cast<MDNode>(V))
335 NewOps.push_back(MDMap[M]);
337 NewOps.push_back(const_cast<Value *>(V));
340 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps),
343 TempM->replaceAllUsesWith(NewM);
346 // Now replace the metadata in the new inlined instructions with the
347 // repacements from the map.
348 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
349 VMI != VMIE; ++VMI) {
353 Instruction *NI = dyn_cast<Instruction>(VMI->second);
357 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
358 MDNode *NewMD = MDMap[M];
359 // If the call site also had alias scope metadata (a list of scopes to
360 // which instructions inside it might belong), propagate those scopes to
361 // the inlined instructions.
363 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
364 NewMD = MDNode::concatenate(NewMD, CSM);
365 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
366 } else if (NI->mayReadOrWriteMemory()) {
368 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
369 NI->setMetadata(LLVMContext::MD_alias_scope, M);
372 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
373 MDNode *NewMD = MDMap[M];
374 // If the call site also had noalias metadata (a list of scopes with
375 // which instructions inside it don't alias), propagate those scopes to
376 // the inlined instructions.
378 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
379 NewMD = MDNode::concatenate(NewMD, CSM);
380 NI->setMetadata(LLVMContext::MD_noalias, NewMD);
381 } else if (NI->mayReadOrWriteMemory()) {
383 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
384 NI->setMetadata(LLVMContext::MD_noalias, M);
388 // Now that everything has been replaced, delete the dummy nodes.
389 for (unsigned i = 0, ie = DummyNodes.size(); i != ie; ++i)
390 MDNode::deleteTemporary(DummyNodes[i]);
393 /// AddAliasScopeMetadata - If the inlined function has noalias arguments, then
394 /// add new alias scopes for each noalias argument, tag the mapped noalias
395 /// parameters with noalias metadata specifying the new scope, and tag all
396 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
397 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
398 const DataLayout *DL, AliasAnalysis *AA) {
399 if (!EnableNoAliasConversion)
402 const Function *CalledFunc = CS.getCalledFunction();
403 SmallVector<const Argument *, 4> NoAliasArgs;
405 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
406 E = CalledFunc->arg_end(); I != E; ++I) {
407 if (I->hasNoAliasAttr() && !I->hasNUses(0))
408 NoAliasArgs.push_back(I);
411 if (NoAliasArgs.empty())
414 // To do a good job, if a noalias variable is captured, we need to know if
415 // the capture point dominates the particular use we're considering.
417 DT.recalculate(const_cast<Function&>(*CalledFunc));
419 // noalias indicates that pointer values based on the argument do not alias
420 // pointer values which are not based on it. So we add a new "scope" for each
421 // noalias function argument. Accesses using pointers based on that argument
422 // become part of that alias scope, accesses using pointers not based on that
423 // argument are tagged as noalias with that scope.
425 DenseMap<const Argument *, MDNode *> NewScopes;
426 MDBuilder MDB(CalledFunc->getContext());
428 // Create a new scope domain for this function.
430 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
431 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
432 const Argument *A = NoAliasArgs[i];
434 std::string Name = CalledFunc->getName();
437 Name += A->getName();
439 Name += ": argument ";
443 // Note: We always create a new anonymous root here. This is true regardless
444 // of the linkage of the callee because the aliasing "scope" is not just a
445 // property of the callee, but also all control dependencies in the caller.
446 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
447 NewScopes.insert(std::make_pair(A, NewScope));
450 // Iterate over all new instructions in the map; for all memory-access
451 // instructions, add the alias scope metadata.
452 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
453 VMI != VMIE; ++VMI) {
454 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
458 Instruction *NI = dyn_cast<Instruction>(VMI->second);
462 bool IsArgMemOnlyCall = false, IsFuncCall = false;
463 SmallVector<const Value *, 2> PtrArgs;
465 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
466 PtrArgs.push_back(LI->getPointerOperand());
467 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
468 PtrArgs.push_back(SI->getPointerOperand());
469 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
470 PtrArgs.push_back(VAAI->getPointerOperand());
471 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
472 PtrArgs.push_back(CXI->getPointerOperand());
473 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
474 PtrArgs.push_back(RMWI->getPointerOperand());
475 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
476 // If we know that the call does not access memory, then we'll still
477 // know that about the inlined clone of this call site, and we don't
478 // need to add metadata.
479 if (ICS.doesNotAccessMemory())
484 AliasAnalysis::ModRefBehavior MRB = AA->getModRefBehavior(ICS);
485 if (MRB == AliasAnalysis::OnlyAccessesArgumentPointees ||
486 MRB == AliasAnalysis::OnlyReadsArgumentPointees)
487 IsArgMemOnlyCall = true;
490 for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(),
491 AE = ICS.arg_end(); AI != AE; ++AI) {
492 // We need to check the underlying objects of all arguments, not just
493 // the pointer arguments, because we might be passing pointers as
495 // However, if we know that the call only accesses pointer arguments,
496 // then we only need to check the pointer arguments.
497 if (IsArgMemOnlyCall && !(*AI)->getType()->isPointerTy())
500 PtrArgs.push_back(*AI);
504 // If we found no pointers, then this instruction is not suitable for
505 // pairing with an instruction to receive aliasing metadata.
506 // However, if this is a call, this we might just alias with none of the
507 // noalias arguments.
508 if (PtrArgs.empty() && !IsFuncCall)
511 // It is possible that there is only one underlying object, but you
512 // need to go through several PHIs to see it, and thus could be
513 // repeated in the Objects list.
514 SmallPtrSet<const Value *, 4> ObjSet;
515 SmallVector<Value *, 4> Scopes, NoAliases;
517 SmallSetVector<const Argument *, 4> NAPtrArgs;
518 for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) {
519 SmallVector<Value *, 4> Objects;
520 GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]),
521 Objects, DL, /* MaxLookup = */ 0);
523 for (Value *O : Objects)
527 // Figure out if we're derived from anything that is not a noalias
529 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
530 for (const Value *V : ObjSet) {
531 // Is this value a constant that cannot be derived from any pointer
532 // value (we need to exclude constant expressions, for example, that
533 // are formed from arithmetic on global symbols).
534 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
535 isa<ConstantPointerNull>(V) ||
536 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
540 // If this is anything other than a noalias argument, then we cannot
541 // completely describe the aliasing properties using alias.scope
542 // metadata (and, thus, won't add any).
543 if (const Argument *A = dyn_cast<Argument>(V)) {
544 if (!A->hasNoAliasAttr())
545 UsesAliasingPtr = true;
547 UsesAliasingPtr = true;
550 // If this is not some identified function-local object (which cannot
551 // directly alias a noalias argument), or some other argument (which,
552 // by definition, also cannot alias a noalias argument), then we could
553 // alias a noalias argument that has been captured).
554 if (!isa<Argument>(V) &&
555 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
556 CanDeriveViaCapture = true;
559 // A function call can always get captured noalias pointers (via other
560 // parameters, globals, etc.).
561 if (IsFuncCall && !IsArgMemOnlyCall)
562 CanDeriveViaCapture = true;
564 // First, we want to figure out all of the sets with which we definitely
565 // don't alias. Iterate over all noalias set, and add those for which:
566 // 1. The noalias argument is not in the set of objects from which we
567 // definitely derive.
568 // 2. The noalias argument has not yet been captured.
569 // An arbitrary function that might load pointers could see captured
570 // noalias arguments via other noalias arguments or globals, and so we
571 // must always check for prior capture.
572 for (const Argument *A : NoAliasArgs) {
573 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
574 // It might be tempting to skip the
575 // PointerMayBeCapturedBefore check if
576 // A->hasNoCaptureAttr() is true, but this is
577 // incorrect because nocapture only guarantees
578 // that no copies outlive the function, not
579 // that the value cannot be locally captured.
580 !PointerMayBeCapturedBefore(A,
581 /* ReturnCaptures */ false,
582 /* StoreCaptures */ false, I, &DT)))
583 NoAliases.push_back(NewScopes[A]);
586 if (!NoAliases.empty())
587 NI->setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
588 NI->getMetadata(LLVMContext::MD_noalias),
589 MDNode::get(CalledFunc->getContext(), NoAliases)));
591 // Next, we want to figure out all of the sets to which we might belong.
592 // We might belong to a set if the noalias argument is in the set of
593 // underlying objects. If there is some non-noalias argument in our list
594 // of underlying objects, then we cannot add a scope because the fact
595 // that some access does not alias with any set of our noalias arguments
596 // cannot itself guarantee that it does not alias with this access
597 // (because there is some pointer of unknown origin involved and the
598 // other access might also depend on this pointer). We also cannot add
599 // scopes to arbitrary functions unless we know they don't access any
600 // non-parameter pointer-values.
601 bool CanAddScopes = !UsesAliasingPtr;
602 if (CanAddScopes && IsFuncCall)
603 CanAddScopes = IsArgMemOnlyCall;
606 for (const Argument *A : NoAliasArgs) {
608 Scopes.push_back(NewScopes[A]);
612 NI->setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
613 NI->getMetadata(LLVMContext::MD_alias_scope),
614 MDNode::get(CalledFunc->getContext(), Scopes)));
619 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
620 /// into the caller, update the specified callgraph to reflect the changes we
621 /// made. Note that it's possible that not all code was copied over, so only
622 /// some edges of the callgraph may remain.
623 static void UpdateCallGraphAfterInlining(CallSite CS,
624 Function::iterator FirstNewBlock,
625 ValueToValueMapTy &VMap,
626 InlineFunctionInfo &IFI) {
627 CallGraph &CG = *IFI.CG;
628 const Function *Caller = CS.getInstruction()->getParent()->getParent();
629 const Function *Callee = CS.getCalledFunction();
630 CallGraphNode *CalleeNode = CG[Callee];
631 CallGraphNode *CallerNode = CG[Caller];
633 // Since we inlined some uninlined call sites in the callee into the caller,
634 // add edges from the caller to all of the callees of the callee.
635 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
637 // Consider the case where CalleeNode == CallerNode.
638 CallGraphNode::CalledFunctionsVector CallCache;
639 if (CalleeNode == CallerNode) {
640 CallCache.assign(I, E);
641 I = CallCache.begin();
645 for (; I != E; ++I) {
646 const Value *OrigCall = I->first;
648 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
649 // Only copy the edge if the call was inlined!
650 if (VMI == VMap.end() || VMI->second == nullptr)
653 // If the call was inlined, but then constant folded, there is no edge to
654 // add. Check for this case.
655 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
656 if (!NewCall) continue;
658 // Remember that this call site got inlined for the client of
660 IFI.InlinedCalls.push_back(NewCall);
662 // It's possible that inlining the callsite will cause it to go from an
663 // indirect to a direct call by resolving a function pointer. If this
664 // happens, set the callee of the new call site to a more precise
665 // destination. This can also happen if the call graph node of the caller
666 // was just unnecessarily imprecise.
667 if (!I->second->getFunction())
668 if (Function *F = CallSite(NewCall).getCalledFunction()) {
669 // Indirect call site resolved to direct call.
670 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
675 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
678 // Update the call graph by deleting the edge from Callee to Caller. We must
679 // do this after the loop above in case Caller and Callee are the same.
680 CallerNode->removeCallEdgeFor(CS);
683 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
684 BasicBlock *InsertBlock,
685 InlineFunctionInfo &IFI) {
686 LLVMContext &Context = Src->getContext();
687 Type *VoidPtrTy = Type::getInt8PtrTy(Context);
688 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
689 Type *Tys[3] = { VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context) };
690 Function *MemCpyFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys);
691 IRBuilder<> builder(InsertBlock->begin());
692 Value *DstCast = builder.CreateBitCast(Dst, VoidPtrTy, "tmp");
693 Value *SrcCast = builder.CreateBitCast(Src, VoidPtrTy, "tmp");
696 if (IFI.DL == nullptr)
697 Size = ConstantExpr::getSizeOf(AggTy);
699 Size = ConstantInt::get(Type::getInt64Ty(Context),
700 IFI.DL->getTypeStoreSize(AggTy));
702 // Always generate a memcpy of alignment 1 here because we don't know
703 // the alignment of the src pointer. Other optimizations can infer
705 Value *CallArgs[] = {
706 DstCast, SrcCast, Size,
707 ConstantInt::get(Type::getInt32Ty(Context), 1),
708 ConstantInt::getFalse(Context) // isVolatile
710 builder.CreateCall(MemCpyFn, CallArgs);
713 /// HandleByValArgument - When inlining a call site that has a byval argument,
714 /// we have to make the implicit memcpy explicit by adding it.
715 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
716 const Function *CalledFunc,
717 InlineFunctionInfo &IFI,
718 unsigned ByValAlignment) {
719 PointerType *ArgTy = cast<PointerType>(Arg->getType());
720 Type *AggTy = ArgTy->getElementType();
722 // If the called function is readonly, then it could not mutate the caller's
723 // copy of the byval'd memory. In this case, it is safe to elide the copy and
725 if (CalledFunc->onlyReadsMemory()) {
726 // If the byval argument has a specified alignment that is greater than the
727 // passed in pointer, then we either have to round up the input pointer or
728 // give up on this transformation.
729 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
732 // If the pointer is already known to be sufficiently aligned, or if we can
733 // round it up to a larger alignment, then we don't need a temporary.
734 if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
735 IFI.DL) >= ByValAlignment)
738 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
739 // for code quality, but rarely happens and is required for correctness.
742 // Create the alloca. If we have DataLayout, use nice alignment.
745 Align = IFI.DL->getPrefTypeAlignment(AggTy);
747 // If the byval had an alignment specified, we *must* use at least that
748 // alignment, as it is required by the byval argument (and uses of the
749 // pointer inside the callee).
750 Align = std::max(Align, ByValAlignment);
752 Function *Caller = TheCall->getParent()->getParent();
754 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
755 &*Caller->begin()->begin());
756 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
758 // Uses of the argument in the function should use our new alloca
763 // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
765 static bool isUsedByLifetimeMarker(Value *V) {
766 for (User *U : V->users()) {
767 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
768 switch (II->getIntrinsicID()) {
770 case Intrinsic::lifetime_start:
771 case Intrinsic::lifetime_end:
779 // hasLifetimeMarkers - Check whether the given alloca already has
780 // lifetime.start or lifetime.end intrinsics.
781 static bool hasLifetimeMarkers(AllocaInst *AI) {
782 Type *Ty = AI->getType();
783 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
784 Ty->getPointerAddressSpace());
786 return isUsedByLifetimeMarker(AI);
788 // Do a scan to find all the casts to i8*.
789 for (User *U : AI->users()) {
790 if (U->getType() != Int8PtrTy) continue;
791 if (U->stripPointerCasts() != AI) continue;
792 if (isUsedByLifetimeMarker(U))
798 /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to
799 /// recursively update InlinedAtEntry of a DebugLoc.
800 static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
801 const DebugLoc &InlinedAtDL,
803 if (MDNode *IA = DL.getInlinedAt(Ctx)) {
804 DebugLoc NewInlinedAtDL
805 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx);
806 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
807 NewInlinedAtDL.getAsMDNode(Ctx));
810 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
811 InlinedAtDL.getAsMDNode(Ctx));
814 /// fixupLineNumbers - Update inlined instructions' line numbers to
815 /// to encode location where these instructions are inlined.
816 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
817 Instruction *TheCall) {
818 DebugLoc TheCallDL = TheCall->getDebugLoc();
819 if (TheCallDL.isUnknown())
822 for (; FI != Fn->end(); ++FI) {
823 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
825 DebugLoc DL = BI->getDebugLoc();
826 if (DL.isUnknown()) {
827 // If the inlined instruction has no line number, make it look as if it
828 // originates from the call location. This is important for
829 // ((__always_inline__, __nodebug__)) functions which must use caller
830 // location for all instructions in their function body.
831 BI->setDebugLoc(TheCallDL);
833 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
834 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
835 LLVMContext &Ctx = BI->getContext();
836 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx);
837 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(),
845 /// InlineFunction - This function inlines the called function into the basic
846 /// block of the caller. This returns false if it is not possible to inline
847 /// this call. The program is still in a well defined state if this occurs
850 /// Note that this only does one level of inlining. For example, if the
851 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
852 /// exists in the instruction stream. Similarly this will inline a recursive
853 /// function by one level.
854 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
855 bool InsertLifetime) {
856 Instruction *TheCall = CS.getInstruction();
857 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
858 "Instruction not in function!");
860 // If IFI has any state in it, zap it before we fill it in.
863 const Function *CalledFunc = CS.getCalledFunction();
864 if (!CalledFunc || // Can't inline external function or indirect
865 CalledFunc->isDeclaration() || // call, or call to a vararg function!
866 CalledFunc->getFunctionType()->isVarArg()) return false;
868 // If the call to the callee cannot throw, set the 'nounwind' flag on any
869 // calls that we inline.
870 bool MarkNoUnwind = CS.doesNotThrow();
872 BasicBlock *OrigBB = TheCall->getParent();
873 Function *Caller = OrigBB->getParent();
875 // GC poses two hazards to inlining, which only occur when the callee has GC:
876 // 1. If the caller has no GC, then the callee's GC must be propagated to the
878 // 2. If the caller has a differing GC, it is invalid to inline.
879 if (CalledFunc->hasGC()) {
880 if (!Caller->hasGC())
881 Caller->setGC(CalledFunc->getGC());
882 else if (CalledFunc->getGC() != Caller->getGC())
886 // Get the personality function from the callee if it contains a landing pad.
887 Value *CalleePersonality = nullptr;
888 for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
890 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
891 const BasicBlock *BB = II->getUnwindDest();
892 const LandingPadInst *LP = BB->getLandingPadInst();
893 CalleePersonality = LP->getPersonalityFn();
897 // Find the personality function used by the landing pads of the caller. If it
898 // exists, then check to see that it matches the personality function used in
900 if (CalleePersonality) {
901 for (Function::const_iterator I = Caller->begin(), E = Caller->end();
903 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
904 const BasicBlock *BB = II->getUnwindDest();
905 const LandingPadInst *LP = BB->getLandingPadInst();
907 // If the personality functions match, then we can perform the
908 // inlining. Otherwise, we can't inline.
909 // TODO: This isn't 100% true. Some personality functions are proper
910 // supersets of others and can be used in place of the other.
911 if (LP->getPersonalityFn() != CalleePersonality)
918 // Get an iterator to the last basic block in the function, which will have
919 // the new function inlined after it.
920 Function::iterator LastBlock = &Caller->back();
922 // Make sure to capture all of the return instructions from the cloned
924 SmallVector<ReturnInst*, 8> Returns;
925 ClonedCodeInfo InlinedFunctionInfo;
926 Function::iterator FirstNewBlock;
928 { // Scope to destroy VMap after cloning.
929 ValueToValueMapTy VMap;
930 // Keep a list of pair (dst, src) to emit byval initializations.
931 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
933 assert(CalledFunc->arg_size() == CS.arg_size() &&
934 "No varargs calls can be inlined!");
936 // Calculate the vector of arguments to pass into the function cloner, which
937 // matches up the formal to the actual argument values.
938 CallSite::arg_iterator AI = CS.arg_begin();
940 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
941 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
942 Value *ActualArg = *AI;
944 // When byval arguments actually inlined, we need to make the copy implied
945 // by them explicit. However, we don't do this if the callee is readonly
946 // or readnone, because the copy would be unneeded: the callee doesn't
947 // modify the struct.
948 if (CS.isByValArgument(ArgNo)) {
949 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
950 CalledFunc->getParamAlignment(ArgNo+1));
951 if (ActualArg != *AI)
952 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
958 // We want the inliner to prune the code as it copies. We would LOVE to
959 // have no dead or constant instructions leftover after inlining occurs
960 // (which can happen, e.g., because an argument was constant), but we'll be
961 // happy with whatever the cloner can do.
962 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
963 /*ModuleLevelChanges=*/false, Returns, ".i",
964 &InlinedFunctionInfo, IFI.DL, TheCall);
966 // Remember the first block that is newly cloned over.
967 FirstNewBlock = LastBlock; ++FirstNewBlock;
969 // Inject byval arguments initialization.
970 for (std::pair<Value*, Value*> &Init : ByValInit)
971 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
974 // Update the callgraph if requested.
976 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
978 // Update inlined instructions' line number information.
979 fixupLineNumbers(Caller, FirstNewBlock, TheCall);
981 // Clone existing noalias metadata if necessary.
982 CloneAliasScopeMetadata(CS, VMap);
984 // Add noalias metadata if necessary.
985 AddAliasScopeMetadata(CS, VMap, IFI.DL, IFI.AA);
987 // FIXME: We could register any cloned assumptions instead of clearing the
988 // whole function's cache.
990 IFI.AT->forgetCachedAssumptions(Caller);
993 // If there are any alloca instructions in the block that used to be the entry
994 // block for the callee, move them to the entry block of the caller. First
995 // calculate which instruction they should be inserted before. We insert the
996 // instructions at the end of the current alloca list.
998 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
999 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1000 E = FirstNewBlock->end(); I != E; ) {
1001 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1004 // If the alloca is now dead, remove it. This often occurs due to code
1006 if (AI->use_empty()) {
1007 AI->eraseFromParent();
1011 if (!isa<Constant>(AI->getArraySize()))
1014 // Keep track of the static allocas that we inline into the caller.
1015 IFI.StaticAllocas.push_back(AI);
1017 // Scan for the block of allocas that we can move over, and move them
1019 while (isa<AllocaInst>(I) &&
1020 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
1021 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1025 // Transfer all of the allocas over in a block. Using splice means
1026 // that the instructions aren't removed from the symbol table, then
1028 Caller->getEntryBlock().getInstList().splice(InsertPoint,
1029 FirstNewBlock->getInstList(),
1034 bool InlinedMustTailCalls = false;
1035 if (InlinedFunctionInfo.ContainsCalls) {
1036 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1037 if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1038 CallSiteTailKind = CI->getTailCallKind();
1040 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1042 for (Instruction &I : *BB) {
1043 CallInst *CI = dyn_cast<CallInst>(&I);
1047 // We need to reduce the strength of any inlined tail calls. For
1048 // musttail, we have to avoid introducing potential unbounded stack
1049 // growth. For example, if functions 'f' and 'g' are mutually recursive
1050 // with musttail, we can inline 'g' into 'f' so long as we preserve
1051 // musttail on the cloned call to 'f'. If either the inlined call site
1052 // or the cloned call site is *not* musttail, the program already has
1053 // one frame of stack growth, so it's safe to remove musttail. Here is
1054 // a table of example transformations:
1056 // f -> musttail g -> musttail f ==> f -> musttail f
1057 // f -> musttail g -> tail f ==> f -> tail f
1058 // f -> g -> musttail f ==> f -> f
1059 // f -> g -> tail f ==> f -> f
1060 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1061 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1062 CI->setTailCallKind(ChildTCK);
1063 InlinedMustTailCalls |= CI->isMustTailCall();
1065 // Calls inlined through a 'nounwind' call site should be marked
1068 CI->setDoesNotThrow();
1073 // Leave lifetime markers for the static alloca's, scoping them to the
1074 // function we just inlined.
1075 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1076 IRBuilder<> builder(FirstNewBlock->begin());
1077 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1078 AllocaInst *AI = IFI.StaticAllocas[ai];
1080 // If the alloca is already scoped to something smaller than the whole
1081 // function then there's no need to add redundant, less accurate markers.
1082 if (hasLifetimeMarkers(AI))
1085 // Try to determine the size of the allocation.
1086 ConstantInt *AllocaSize = nullptr;
1087 if (ConstantInt *AIArraySize =
1088 dyn_cast<ConstantInt>(AI->getArraySize())) {
1090 Type *AllocaType = AI->getAllocatedType();
1091 uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
1092 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1093 assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
1094 // Check that array size doesn't saturate uint64_t and doesn't
1095 // overflow when it's multiplied by type size.
1096 if (AllocaArraySize != ~0ULL &&
1097 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1098 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1099 AllocaArraySize * AllocaTypeSize);
1104 builder.CreateLifetimeStart(AI, AllocaSize);
1105 for (ReturnInst *RI : Returns) {
1106 // Don't insert llvm.lifetime.end calls between a musttail call and a
1107 // return. The return kills all local allocas.
1108 if (InlinedMustTailCalls &&
1109 RI->getParent()->getTerminatingMustTailCall())
1111 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1116 // If the inlined code contained dynamic alloca instructions, wrap the inlined
1117 // code with llvm.stacksave/llvm.stackrestore intrinsics.
1118 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1119 Module *M = Caller->getParent();
1120 // Get the two intrinsics we care about.
1121 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1122 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1124 // Insert the llvm.stacksave.
1125 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
1126 .CreateCall(StackSave, "savedstack");
1128 // Insert a call to llvm.stackrestore before any return instructions in the
1129 // inlined function.
1130 for (ReturnInst *RI : Returns) {
1131 // Don't insert llvm.stackrestore calls between a musttail call and a
1132 // return. The return will restore the stack pointer.
1133 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1135 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1139 // If we are inlining for an invoke instruction, we must make sure to rewrite
1140 // any call instructions into invoke instructions.
1141 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
1142 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
1144 // Handle any inlined musttail call sites. In order for a new call site to be
1145 // musttail, the source of the clone and the inlined call site must have been
1146 // musttail. Therefore it's safe to return without merging control into the
1148 if (InlinedMustTailCalls) {
1149 // Check if we need to bitcast the result of any musttail calls.
1150 Type *NewRetTy = Caller->getReturnType();
1151 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
1153 // Handle the returns preceded by musttail calls separately.
1154 SmallVector<ReturnInst *, 8> NormalReturns;
1155 for (ReturnInst *RI : Returns) {
1156 CallInst *ReturnedMustTail =
1157 RI->getParent()->getTerminatingMustTailCall();
1158 if (!ReturnedMustTail) {
1159 NormalReturns.push_back(RI);
1165 // Delete the old return and any preceding bitcast.
1166 BasicBlock *CurBB = RI->getParent();
1167 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
1168 RI->eraseFromParent();
1170 OldCast->eraseFromParent();
1172 // Insert a new bitcast and return with the right type.
1173 IRBuilder<> Builder(CurBB);
1174 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
1177 // Leave behind the normal returns so we can merge control flow.
1178 std::swap(Returns, NormalReturns);
1181 // If we cloned in _exactly one_ basic block, and if that block ends in a
1182 // return instruction, we splice the body of the inlined callee directly into
1183 // the calling basic block.
1184 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
1185 // Move all of the instructions right before the call.
1186 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
1187 FirstNewBlock->begin(), FirstNewBlock->end());
1188 // Remove the cloned basic block.
1189 Caller->getBasicBlockList().pop_back();
1191 // If the call site was an invoke instruction, add a branch to the normal
1193 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1194 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1195 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
1198 // If the return instruction returned a value, replace uses of the call with
1199 // uses of the returned value.
1200 if (!TheCall->use_empty()) {
1201 ReturnInst *R = Returns[0];
1202 if (TheCall == R->getReturnValue())
1203 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1205 TheCall->replaceAllUsesWith(R->getReturnValue());
1207 // Since we are now done with the Call/Invoke, we can delete it.
1208 TheCall->eraseFromParent();
1210 // Since we are now done with the return instruction, delete it also.
1211 Returns[0]->eraseFromParent();
1213 // We are now done with the inlining.
1217 // Otherwise, we have the normal case, of more than one block to inline or
1218 // multiple return sites.
1220 // We want to clone the entire callee function into the hole between the
1221 // "starter" and "ender" blocks. How we accomplish this depends on whether
1222 // this is an invoke instruction or a call instruction.
1223 BasicBlock *AfterCallBB;
1224 BranchInst *CreatedBranchToNormalDest = nullptr;
1225 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1227 // Add an unconditional branch to make this look like the CallInst case...
1228 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
1230 // Split the basic block. This guarantees that no PHI nodes will have to be
1231 // updated due to new incoming edges, and make the invoke case more
1232 // symmetric to the call case.
1233 AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest,
1234 CalledFunc->getName()+".exit");
1236 } else { // It's a call
1237 // If this is a call instruction, we need to split the basic block that
1238 // the call lives in.
1240 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
1241 CalledFunc->getName()+".exit");
1244 // Change the branch that used to go to AfterCallBB to branch to the first
1245 // basic block of the inlined function.
1247 TerminatorInst *Br = OrigBB->getTerminator();
1248 assert(Br && Br->getOpcode() == Instruction::Br &&
1249 "splitBasicBlock broken!");
1250 Br->setOperand(0, FirstNewBlock);
1253 // Now that the function is correct, make it a little bit nicer. In
1254 // particular, move the basic blocks inserted from the end of the function
1255 // into the space made by splitting the source basic block.
1256 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
1257 FirstNewBlock, Caller->end());
1259 // Handle all of the return instructions that we just cloned in, and eliminate
1260 // any users of the original call/invoke instruction.
1261 Type *RTy = CalledFunc->getReturnType();
1263 PHINode *PHI = nullptr;
1264 if (Returns.size() > 1) {
1265 // The PHI node should go at the front of the new basic block to merge all
1266 // possible incoming values.
1267 if (!TheCall->use_empty()) {
1268 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
1269 AfterCallBB->begin());
1270 // Anything that used the result of the function call should now use the
1271 // PHI node as their operand.
1272 TheCall->replaceAllUsesWith(PHI);
1275 // Loop over all of the return instructions adding entries to the PHI node
1278 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1279 ReturnInst *RI = Returns[i];
1280 assert(RI->getReturnValue()->getType() == PHI->getType() &&
1281 "Ret value not consistent in function!");
1282 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
1287 // Add a branch to the merge points and remove return instructions.
1289 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1290 ReturnInst *RI = Returns[i];
1291 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
1292 Loc = RI->getDebugLoc();
1293 BI->setDebugLoc(Loc);
1294 RI->eraseFromParent();
1296 // We need to set the debug location to *somewhere* inside the
1297 // inlined function. The line number may be nonsensical, but the
1298 // instruction will at least be associated with the right
1300 if (CreatedBranchToNormalDest)
1301 CreatedBranchToNormalDest->setDebugLoc(Loc);
1302 } else if (!Returns.empty()) {
1303 // Otherwise, if there is exactly one return value, just replace anything
1304 // using the return value of the call with the computed value.
1305 if (!TheCall->use_empty()) {
1306 if (TheCall == Returns[0]->getReturnValue())
1307 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1309 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
1312 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
1313 BasicBlock *ReturnBB = Returns[0]->getParent();
1314 ReturnBB->replaceAllUsesWith(AfterCallBB);
1316 // Splice the code from the return block into the block that it will return
1317 // to, which contains the code that was after the call.
1318 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
1319 ReturnBB->getInstList());
1321 if (CreatedBranchToNormalDest)
1322 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
1324 // Delete the return instruction now and empty ReturnBB now.
1325 Returns[0]->eraseFromParent();
1326 ReturnBB->eraseFromParent();
1327 } else if (!TheCall->use_empty()) {
1328 // No returns, but something is using the return value of the call. Just
1330 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1333 // Since we are now done with the Call/Invoke, we can delete it.
1334 TheCall->eraseFromParent();
1336 // If we inlined any musttail calls and the original return is now
1337 // unreachable, delete it. It can only contain a bitcast and ret.
1338 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
1339 AfterCallBB->eraseFromParent();
1341 // We should always be able to fold the entry block of the function into the
1342 // single predecessor of the block...
1343 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
1344 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
1346 // Splice the code entry block into calling block, right before the
1347 // unconditional branch.
1348 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
1349 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
1351 // Remove the unconditional branch.
1352 OrigBB->getInstList().erase(Br);
1354 // Now we can remove the CalleeEntry block, which is now empty.
1355 Caller->getBasicBlockList().erase(CalleeEntry);
1357 // If we inserted a phi node, check to see if it has a single value (e.g. all
1358 // the entries are the same or undef). If so, remove the PHI so it doesn't
1359 // block other optimizations.
1361 if (Value *V = SimplifyInstruction(PHI, IFI.DL)) {
1362 PHI->replaceAllUsesWith(V);
1363 PHI->eraseFromParent();