1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/CallGraph.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/InstructionSimplify.h"
24 #include "llvm/Analysis/ValueTracking.h"
25 #include "llvm/IR/Attributes.h"
26 #include "llvm/IR/CallSite.h"
27 #include "llvm/IR/CFG.h"
28 #include "llvm/IR/Constants.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/DebugInfo.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/Module.h"
39 #include "llvm/Transforms/Utils/Local.h"
40 #include "llvm/Support/CommandLine.h"
45 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(false),
47 cl::desc("Convert noalias attributes to metadata during inlining."));
49 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
50 bool InsertLifetime) {
51 return InlineFunction(CallSite(CI), IFI, InsertLifetime);
53 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
54 bool InsertLifetime) {
55 return InlineFunction(CallSite(II), IFI, InsertLifetime);
59 /// A class for recording information about inlining through an invoke.
60 class InvokeInliningInfo {
61 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
62 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
63 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
64 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
65 SmallVector<Value*, 8> UnwindDestPHIValues;
68 InvokeInliningInfo(InvokeInst *II)
69 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
70 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
71 // If there are PHI nodes in the unwind destination block, we need to keep
72 // track of which values came into them from the invoke before removing
73 // the edge from this block.
74 llvm::BasicBlock *InvokeBB = II->getParent();
75 BasicBlock::iterator I = OuterResumeDest->begin();
76 for (; isa<PHINode>(I); ++I) {
77 // Save the value to use for this edge.
78 PHINode *PHI = cast<PHINode>(I);
79 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
82 CallerLPad = cast<LandingPadInst>(I);
85 /// getOuterResumeDest - The outer unwind destination is the target of
86 /// unwind edges introduced for calls within the inlined function.
87 BasicBlock *getOuterResumeDest() const {
88 return OuterResumeDest;
91 BasicBlock *getInnerResumeDest();
93 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
95 /// forwardResume - Forward the 'resume' instruction to the caller's landing
96 /// pad block. When the landing pad block has only one predecessor, this is
97 /// a simple branch. When there is more than one predecessor, we need to
98 /// split the landing pad block after the landingpad instruction and jump
100 void forwardResume(ResumeInst *RI,
101 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
103 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind
104 /// destination block for the given basic block, using the values for the
105 /// original invoke's source block.
106 void addIncomingPHIValuesFor(BasicBlock *BB) const {
107 addIncomingPHIValuesForInto(BB, OuterResumeDest);
110 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
111 BasicBlock::iterator I = dest->begin();
112 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
113 PHINode *phi = cast<PHINode>(I);
114 phi->addIncoming(UnwindDestPHIValues[i], src);
120 /// getInnerResumeDest - Get or create a target for the branch from ResumeInsts.
121 BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
122 if (InnerResumeDest) return InnerResumeDest;
124 // Split the landing pad.
125 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
127 OuterResumeDest->splitBasicBlock(SplitPoint,
128 OuterResumeDest->getName() + ".body");
130 // The number of incoming edges we expect to the inner landing pad.
131 const unsigned PHICapacity = 2;
133 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
134 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
135 BasicBlock::iterator I = OuterResumeDest->begin();
136 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
137 PHINode *OuterPHI = cast<PHINode>(I);
138 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
139 OuterPHI->getName() + ".lpad-body",
141 OuterPHI->replaceAllUsesWith(InnerPHI);
142 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
145 // Create a PHI for the exception values.
146 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
147 "eh.lpad-body", InsertPoint);
148 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
149 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
152 return InnerResumeDest;
155 /// forwardResume - Forward the 'resume' instruction to the caller's landing pad
156 /// block. When the landing pad block has only one predecessor, this is a simple
157 /// branch. When there is more than one predecessor, we need to split the
158 /// landing pad block after the landingpad instruction and jump to there.
159 void InvokeInliningInfo::forwardResume(ResumeInst *RI,
160 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads) {
161 BasicBlock *Dest = getInnerResumeDest();
162 BasicBlock *Src = RI->getParent();
164 BranchInst::Create(Dest, Src);
166 // Update the PHIs in the destination. They were inserted in an order which
168 addIncomingPHIValuesForInto(Src, Dest);
170 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
171 RI->eraseFromParent();
174 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
175 /// an invoke, we have to turn all of the calls that can throw into
176 /// invokes. This function analyze BB to see if there are any calls, and if so,
177 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
178 /// nodes in that block with the values specified in InvokeDestPHIValues.
179 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
180 InvokeInliningInfo &Invoke) {
181 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
182 Instruction *I = BBI++;
184 // We only need to check for function calls: inlined invoke
185 // instructions require no special handling.
186 CallInst *CI = dyn_cast<CallInst>(I);
188 // If this call cannot unwind, don't convert it to an invoke.
189 // Inline asm calls cannot throw.
190 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
193 // Convert this function call into an invoke instruction. First, split the
195 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
197 // Delete the unconditional branch inserted by splitBasicBlock
198 BB->getInstList().pop_back();
200 // Create the new invoke instruction.
201 ImmutableCallSite CS(CI);
202 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
203 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
204 Invoke.getOuterResumeDest(),
205 InvokeArgs, CI->getName(), BB);
206 II->setDebugLoc(CI->getDebugLoc());
207 II->setCallingConv(CI->getCallingConv());
208 II->setAttributes(CI->getAttributes());
210 // Make sure that anything using the call now uses the invoke! This also
211 // updates the CallGraph if present, because it uses a WeakVH.
212 CI->replaceAllUsesWith(II);
214 // Delete the original call
215 Split->getInstList().pop_front();
217 // Update any PHI nodes in the exceptional block to indicate that there is
218 // now a new entry in them.
219 Invoke.addIncomingPHIValuesFor(BB);
224 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
225 /// in the body of the inlined function into invokes.
227 /// II is the invoke instruction being inlined. FirstNewBlock is the first
228 /// block of the inlined code (the last block is the end of the function),
229 /// and InlineCodeInfo is information about the code that got inlined.
230 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
231 ClonedCodeInfo &InlinedCodeInfo) {
232 BasicBlock *InvokeDest = II->getUnwindDest();
234 Function *Caller = FirstNewBlock->getParent();
236 // The inlined code is currently at the end of the function, scan from the
237 // start of the inlined code to its end, checking for stuff we need to
239 InvokeInliningInfo Invoke(II);
241 // Get all of the inlined landing pad instructions.
242 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
243 for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I)
244 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
245 InlinedLPads.insert(II->getLandingPadInst());
247 // Append the clauses from the outer landing pad instruction into the inlined
248 // landing pad instructions.
249 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
250 for (LandingPadInst *InlinedLPad : InlinedLPads) {
251 unsigned OuterNum = OuterLPad->getNumClauses();
252 InlinedLPad->reserveClauses(OuterNum);
253 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
254 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
255 if (OuterLPad->isCleanup())
256 InlinedLPad->setCleanup(true);
259 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
260 if (InlinedCodeInfo.ContainsCalls)
261 HandleCallsInBlockInlinedThroughInvoke(BB, Invoke);
263 // Forward any resumes that are remaining here.
264 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
265 Invoke.forwardResume(RI, InlinedLPads);
268 // Now that everything is happy, we have one final detail. The PHI nodes in
269 // the exception destination block still have entries due to the original
270 // invoke instruction. Eliminate these entries (which might even delete the
272 InvokeDest->removePredecessor(II->getParent());
275 /// CloneAliasScopeMetadata - When inlining a function that contains noalias
276 /// scope metadata, this metadata needs to be cloned so that the inlined blocks
277 /// have different "unqiue scopes" at every call site. Were this not done, then
278 /// aliasing scopes from a function inlined into a caller multiple times could
279 /// not be differentiated (and this would lead to miscompiles because the
280 /// non-aliasing property communicated by the metadata could have
281 /// call-site-specific control dependencies).
282 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
283 const Function *CalledFunc = CS.getCalledFunction();
284 SetVector<const MDNode *> MD;
286 // Note: We could only clone the metadata if it is already used in the
287 // caller. I'm omitting that check here because it might confuse
288 // inter-procedural alias analysis passes. We can revisit this if it becomes
289 // an efficiency or overhead problem.
291 for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end();
293 for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
294 if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope))
296 if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias))
303 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
305 SmallVector<const Value *, 16> Queue(MD.begin(), MD.end());
306 while (!Queue.empty()) {
307 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
308 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
309 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
314 // Now we have a complete set of all metadata in the chains used to specify
315 // the noalias scopes and the lists of those scopes.
316 SmallVector<MDNode *, 16> DummyNodes;
317 DenseMap<const MDNode *, TrackingVH<MDNode> > MDMap;
318 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
320 MDNode *Dummy = MDNode::getTemporary(CalledFunc->getContext(), None);
321 DummyNodes.push_back(Dummy);
325 // Create new metadata nodes to replace the dummy nodes, replacing old
326 // metadata references with either a dummy node or an already-created new
328 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
330 SmallVector<Value *, 4> NewOps;
331 for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) {
332 const Value *V = (*I)->getOperand(i);
333 if (const MDNode *M = dyn_cast<MDNode>(V))
334 NewOps.push_back(MDMap[M]);
336 NewOps.push_back(const_cast<Value *>(V));
339 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps),
342 TempM->replaceAllUsesWith(NewM);
345 // Now replace the metadata in the new inlined instructions with the
346 // repacements from the map.
347 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
348 VMI != VMIE; ++VMI) {
352 Instruction *NI = dyn_cast<Instruction>(VMI->second);
356 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
357 MDNode *NewMD = MDMap[M];
358 // If the call site also had alias scope metadata (a list of scopes to
359 // which instructions inside it might belong), propagate those scopes to
360 // the inlined instructions.
362 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
363 NewMD = MDNode::concatenate(NewMD, CSM);
364 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
365 } else if (NI->mayReadOrWriteMemory()) {
367 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
368 NI->setMetadata(LLVMContext::MD_alias_scope, M);
371 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
372 MDNode *NewMD = MDMap[M];
373 // If the call site also had noalias metadata (a list of scopes with
374 // which instructions inside it don't alias), propagate those scopes to
375 // the inlined instructions.
377 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
378 NewMD = MDNode::concatenate(NewMD, CSM);
379 NI->setMetadata(LLVMContext::MD_noalias, NewMD);
380 } else if (NI->mayReadOrWriteMemory()) {
382 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
383 NI->setMetadata(LLVMContext::MD_noalias, M);
387 // Now that everything has been replaced, delete the dummy nodes.
388 for (unsigned i = 0, ie = DummyNodes.size(); i != ie; ++i)
389 MDNode::deleteTemporary(DummyNodes[i]);
392 /// AddAliasScopeMetadata - If the inlined function has noalias arguments, then
393 /// add new alias scopes for each noalias argument, tag the mapped noalias
394 /// parameters with noalias metadata specifying the new scope, and tag all
395 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
396 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
397 const DataLayout *DL, AliasAnalysis *AA) {
398 if (!EnableNoAliasConversion)
401 const Function *CalledFunc = CS.getCalledFunction();
402 SmallVector<const Argument *, 4> NoAliasArgs;
404 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
405 E = CalledFunc->arg_end(); I != E; ++I) {
406 if (I->hasNoAliasAttr() && !I->hasNUses(0))
407 NoAliasArgs.push_back(I);
410 if (NoAliasArgs.empty())
413 // To do a good job, if a noalias variable is captured, we need to know if
414 // the capture point dominates the particular use we're considering.
416 DT.recalculate(const_cast<Function&>(*CalledFunc));
418 // noalias indicates that pointer values based on the argument do not alias
419 // pointer values which are not based on it. So we add a new "scope" for each
420 // noalias function argument. Accesses using pointers based on that argument
421 // become part of that alias scope, accesses using pointers not based on that
422 // argument are tagged as noalias with that scope.
424 DenseMap<const Argument *, MDNode *> NewScopes;
425 MDBuilder MDB(CalledFunc->getContext());
427 // Create a new scope domain for this function.
429 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
430 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
431 const Argument *A = NoAliasArgs[i];
433 std::string Name = CalledFunc->getName();
436 Name += A->getName();
438 Name += ": argument ";
442 // Note: We always create a new anonymous root here. This is true regardless
443 // of the linkage of the callee because the aliasing "scope" is not just a
444 // property of the callee, but also all control dependencies in the caller.
445 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
446 NewScopes.insert(std::make_pair(A, NewScope));
449 // Iterate over all new instructions in the map; for all memory-access
450 // instructions, add the alias scope metadata.
451 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
452 VMI != VMIE; ++VMI) {
453 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
457 Instruction *NI = dyn_cast<Instruction>(VMI->second);
461 bool IsArgMemOnlyCall = false, IsFuncCall = false;
462 SmallVector<const Value *, 2> PtrArgs;
464 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
465 PtrArgs.push_back(LI->getPointerOperand());
466 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
467 PtrArgs.push_back(SI->getPointerOperand());
468 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
469 PtrArgs.push_back(VAAI->getPointerOperand());
470 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
471 PtrArgs.push_back(CXI->getPointerOperand());
472 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
473 PtrArgs.push_back(RMWI->getPointerOperand());
474 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
475 // If we know that the call does not access memory, then we'll still
476 // know that about the inlined clone of this call site, and we don't
477 // need to add metadata.
478 if (ICS.doesNotAccessMemory())
483 AliasAnalysis::ModRefBehavior MRB = AA->getModRefBehavior(ICS);
484 if (MRB == AliasAnalysis::OnlyAccessesArgumentPointees ||
485 MRB == AliasAnalysis::OnlyReadsArgumentPointees)
486 IsArgMemOnlyCall = true;
489 for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(),
490 AE = ICS.arg_end(); AI != AE; ++AI) {
491 // We need to check the underlying objects of all arguments, not just
492 // the pointer arguments, because we might be passing pointers as
494 // However, if we know that the call only accesses pointer arguments,
495 // then we only need to check the pointer arguments.
496 if (IsArgMemOnlyCall && !(*AI)->getType()->isPointerTy())
499 PtrArgs.push_back(*AI);
503 // If we found no pointers, then this instruction is not suitable for
504 // pairing with an instruction to receive aliasing metadata.
505 // However, if this is a call, this we might just alias with none of the
506 // noalias arguments.
507 if (PtrArgs.empty() && !IsFuncCall)
510 // It is possible that there is only one underlying object, but you
511 // need to go through several PHIs to see it, and thus could be
512 // repeated in the Objects list.
513 SmallPtrSet<const Value *, 4> ObjSet;
514 SmallVector<Value *, 4> Scopes, NoAliases;
516 SmallSetVector<const Argument *, 4> NAPtrArgs;
517 for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) {
518 SmallVector<Value *, 4> Objects;
519 GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]),
520 Objects, DL, /* MaxLookup = */ 0);
522 for (Value *O : Objects)
526 // Figure out if we're derived from anything that is not a noalias
528 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
529 for (const Value *V : ObjSet) {
530 // Is this value a constant that cannot be derived from any pointer
531 // value (we need to exclude constant expressions, for example, that
532 // are formed from arithmetic on global symbols).
533 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
534 isa<ConstantPointerNull>(V) ||
535 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
539 // If this is anything other than a noalias argument, then we cannot
540 // completely describe the aliasing properties using alias.scope
541 // metadata (and, thus, won't add any).
542 if (const Argument *A = dyn_cast<Argument>(V)) {
543 if (!A->hasNoAliasAttr())
544 UsesAliasingPtr = true;
546 UsesAliasingPtr = true;
549 // If this is not some identified function-local object (which cannot
550 // directly alias a noalias argument), or some other argument (which,
551 // by definition, also cannot alias a noalias argument), then we could
552 // alias a noalias argument that has been captured).
553 if (!isa<Argument>(V) &&
554 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
555 CanDeriveViaCapture = true;
558 // A function call can always get captured noalias pointers (via other
559 // parameters, globals, etc.).
560 if (IsFuncCall && !IsArgMemOnlyCall)
561 CanDeriveViaCapture = true;
563 // First, we want to figure out all of the sets with which we definitely
564 // don't alias. Iterate over all noalias set, and add those for which:
565 // 1. The noalias argument is not in the set of objects from which we
566 // definitely derive.
567 // 2. The noalias argument has not yet been captured.
568 // An arbitrary function that might load pointers could see captured
569 // noalias arguments via other noalias arguments or globals, and so we
570 // must always check for prior capture.
571 for (const Argument *A : NoAliasArgs) {
572 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
573 // It might be tempting to skip the
574 // PointerMayBeCapturedBefore check if
575 // A->hasNoCaptureAttr() is true, but this is
576 // incorrect because nocapture only guarantees
577 // that no copies outlive the function, not
578 // that the value cannot be locally captured.
579 !PointerMayBeCapturedBefore(A,
580 /* ReturnCaptures */ false,
581 /* StoreCaptures */ false, I, &DT)))
582 NoAliases.push_back(NewScopes[A]);
585 if (!NoAliases.empty())
586 NI->setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
587 NI->getMetadata(LLVMContext::MD_noalias),
588 MDNode::get(CalledFunc->getContext(), NoAliases)));
590 // Next, we want to figure out all of the sets to which we might belong.
591 // We might belong to a set if the noalias argument is in the set of
592 // underlying objects. If there is some non-noalias argument in our list
593 // of underlying objects, then we cannot add a scope because the fact
594 // that some access does not alias with any set of our noalias arguments
595 // cannot itself guarantee that it does not alias with this access
596 // (because there is some pointer of unknown origin involved and the
597 // other access might also depend on this pointer). We also cannot add
598 // scopes to arbitrary functions unless we know they don't access any
599 // non-parameter pointer-values.
600 bool CanAddScopes = !UsesAliasingPtr;
601 if (CanAddScopes && IsFuncCall)
602 CanAddScopes = IsArgMemOnlyCall;
605 for (const Argument *A : NoAliasArgs) {
607 Scopes.push_back(NewScopes[A]);
611 NI->setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
612 NI->getMetadata(LLVMContext::MD_alias_scope),
613 MDNode::get(CalledFunc->getContext(), Scopes)));
618 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
619 /// into the caller, update the specified callgraph to reflect the changes we
620 /// made. Note that it's possible that not all code was copied over, so only
621 /// some edges of the callgraph may remain.
622 static void UpdateCallGraphAfterInlining(CallSite CS,
623 Function::iterator FirstNewBlock,
624 ValueToValueMapTy &VMap,
625 InlineFunctionInfo &IFI) {
626 CallGraph &CG = *IFI.CG;
627 const Function *Caller = CS.getInstruction()->getParent()->getParent();
628 const Function *Callee = CS.getCalledFunction();
629 CallGraphNode *CalleeNode = CG[Callee];
630 CallGraphNode *CallerNode = CG[Caller];
632 // Since we inlined some uninlined call sites in the callee into the caller,
633 // add edges from the caller to all of the callees of the callee.
634 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
636 // Consider the case where CalleeNode == CallerNode.
637 CallGraphNode::CalledFunctionsVector CallCache;
638 if (CalleeNode == CallerNode) {
639 CallCache.assign(I, E);
640 I = CallCache.begin();
644 for (; I != E; ++I) {
645 const Value *OrigCall = I->first;
647 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
648 // Only copy the edge if the call was inlined!
649 if (VMI == VMap.end() || VMI->second == nullptr)
652 // If the call was inlined, but then constant folded, there is no edge to
653 // add. Check for this case.
654 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
655 if (!NewCall) continue;
657 // Remember that this call site got inlined for the client of
659 IFI.InlinedCalls.push_back(NewCall);
661 // It's possible that inlining the callsite will cause it to go from an
662 // indirect to a direct call by resolving a function pointer. If this
663 // happens, set the callee of the new call site to a more precise
664 // destination. This can also happen if the call graph node of the caller
665 // was just unnecessarily imprecise.
666 if (!I->second->getFunction())
667 if (Function *F = CallSite(NewCall).getCalledFunction()) {
668 // Indirect call site resolved to direct call.
669 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
674 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
677 // Update the call graph by deleting the edge from Callee to Caller. We must
678 // do this after the loop above in case Caller and Callee are the same.
679 CallerNode->removeCallEdgeFor(CS);
682 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
683 BasicBlock *InsertBlock,
684 InlineFunctionInfo &IFI) {
685 LLVMContext &Context = Src->getContext();
686 Type *VoidPtrTy = Type::getInt8PtrTy(Context);
687 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
688 Type *Tys[3] = { VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context) };
689 Function *MemCpyFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys);
690 IRBuilder<> builder(InsertBlock->begin());
691 Value *DstCast = builder.CreateBitCast(Dst, VoidPtrTy, "tmp");
692 Value *SrcCast = builder.CreateBitCast(Src, VoidPtrTy, "tmp");
695 if (IFI.DL == nullptr)
696 Size = ConstantExpr::getSizeOf(AggTy);
698 Size = ConstantInt::get(Type::getInt64Ty(Context),
699 IFI.DL->getTypeStoreSize(AggTy));
701 // Always generate a memcpy of alignment 1 here because we don't know
702 // the alignment of the src pointer. Other optimizations can infer
704 Value *CallArgs[] = {
705 DstCast, SrcCast, Size,
706 ConstantInt::get(Type::getInt32Ty(Context), 1),
707 ConstantInt::getFalse(Context) // isVolatile
709 builder.CreateCall(MemCpyFn, CallArgs);
712 /// HandleByValArgument - When inlining a call site that has a byval argument,
713 /// we have to make the implicit memcpy explicit by adding it.
714 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
715 const Function *CalledFunc,
716 InlineFunctionInfo &IFI,
717 unsigned ByValAlignment) {
718 PointerType *ArgTy = cast<PointerType>(Arg->getType());
719 Type *AggTy = ArgTy->getElementType();
721 // If the called function is readonly, then it could not mutate the caller's
722 // copy of the byval'd memory. In this case, it is safe to elide the copy and
724 if (CalledFunc->onlyReadsMemory()) {
725 // If the byval argument has a specified alignment that is greater than the
726 // passed in pointer, then we either have to round up the input pointer or
727 // give up on this transformation.
728 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
731 // If the pointer is already known to be sufficiently aligned, or if we can
732 // round it up to a larger alignment, then we don't need a temporary.
733 if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
734 IFI.DL) >= ByValAlignment)
737 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
738 // for code quality, but rarely happens and is required for correctness.
741 // Create the alloca. If we have DataLayout, use nice alignment.
744 Align = IFI.DL->getPrefTypeAlignment(AggTy);
746 // If the byval had an alignment specified, we *must* use at least that
747 // alignment, as it is required by the byval argument (and uses of the
748 // pointer inside the callee).
749 Align = std::max(Align, ByValAlignment);
751 Function *Caller = TheCall->getParent()->getParent();
753 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
754 &*Caller->begin()->begin());
755 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
757 // Uses of the argument in the function should use our new alloca
762 // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
764 static bool isUsedByLifetimeMarker(Value *V) {
765 for (User *U : V->users()) {
766 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
767 switch (II->getIntrinsicID()) {
769 case Intrinsic::lifetime_start:
770 case Intrinsic::lifetime_end:
778 // hasLifetimeMarkers - Check whether the given alloca already has
779 // lifetime.start or lifetime.end intrinsics.
780 static bool hasLifetimeMarkers(AllocaInst *AI) {
781 Type *Ty = AI->getType();
782 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
783 Ty->getPointerAddressSpace());
785 return isUsedByLifetimeMarker(AI);
787 // Do a scan to find all the casts to i8*.
788 for (User *U : AI->users()) {
789 if (U->getType() != Int8PtrTy) continue;
790 if (U->stripPointerCasts() != AI) continue;
791 if (isUsedByLifetimeMarker(U))
797 /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to
798 /// recursively update InlinedAtEntry of a DebugLoc.
799 static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
800 const DebugLoc &InlinedAtDL,
802 if (MDNode *IA = DL.getInlinedAt(Ctx)) {
803 DebugLoc NewInlinedAtDL
804 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx);
805 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
806 NewInlinedAtDL.getAsMDNode(Ctx));
809 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
810 InlinedAtDL.getAsMDNode(Ctx));
813 /// fixupLineNumbers - Update inlined instructions' line numbers to
814 /// to encode location where these instructions are inlined.
815 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
816 Instruction *TheCall) {
817 DebugLoc TheCallDL = TheCall->getDebugLoc();
818 if (TheCallDL.isUnknown())
821 for (; FI != Fn->end(); ++FI) {
822 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
824 DebugLoc DL = BI->getDebugLoc();
825 if (DL.isUnknown()) {
826 // If the inlined instruction has no line number, make it look as if it
827 // originates from the call location. This is important for
828 // ((__always_inline__, __nodebug__)) functions which must use caller
829 // location for all instructions in their function body.
830 BI->setDebugLoc(TheCallDL);
832 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
833 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
834 LLVMContext &Ctx = BI->getContext();
835 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx);
836 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(),
844 /// InlineFunction - This function inlines the called function into the basic
845 /// block of the caller. This returns false if it is not possible to inline
846 /// this call. The program is still in a well defined state if this occurs
849 /// Note that this only does one level of inlining. For example, if the
850 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
851 /// exists in the instruction stream. Similarly this will inline a recursive
852 /// function by one level.
853 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
854 bool InsertLifetime) {
855 Instruction *TheCall = CS.getInstruction();
856 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
857 "Instruction not in function!");
859 // If IFI has any state in it, zap it before we fill it in.
862 const Function *CalledFunc = CS.getCalledFunction();
863 if (!CalledFunc || // Can't inline external function or indirect
864 CalledFunc->isDeclaration() || // call, or call to a vararg function!
865 CalledFunc->getFunctionType()->isVarArg()) return false;
867 // If the call to the callee cannot throw, set the 'nounwind' flag on any
868 // calls that we inline.
869 bool MarkNoUnwind = CS.doesNotThrow();
871 BasicBlock *OrigBB = TheCall->getParent();
872 Function *Caller = OrigBB->getParent();
874 // GC poses two hazards to inlining, which only occur when the callee has GC:
875 // 1. If the caller has no GC, then the callee's GC must be propagated to the
877 // 2. If the caller has a differing GC, it is invalid to inline.
878 if (CalledFunc->hasGC()) {
879 if (!Caller->hasGC())
880 Caller->setGC(CalledFunc->getGC());
881 else if (CalledFunc->getGC() != Caller->getGC())
885 // Get the personality function from the callee if it contains a landing pad.
886 Value *CalleePersonality = nullptr;
887 for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
889 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
890 const BasicBlock *BB = II->getUnwindDest();
891 const LandingPadInst *LP = BB->getLandingPadInst();
892 CalleePersonality = LP->getPersonalityFn();
896 // Find the personality function used by the landing pads of the caller. If it
897 // exists, then check to see that it matches the personality function used in
899 if (CalleePersonality) {
900 for (Function::const_iterator I = Caller->begin(), E = Caller->end();
902 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
903 const BasicBlock *BB = II->getUnwindDest();
904 const LandingPadInst *LP = BB->getLandingPadInst();
906 // If the personality functions match, then we can perform the
907 // inlining. Otherwise, we can't inline.
908 // TODO: This isn't 100% true. Some personality functions are proper
909 // supersets of others and can be used in place of the other.
910 if (LP->getPersonalityFn() != CalleePersonality)
917 // Get an iterator to the last basic block in the function, which will have
918 // the new function inlined after it.
919 Function::iterator LastBlock = &Caller->back();
921 // Make sure to capture all of the return instructions from the cloned
923 SmallVector<ReturnInst*, 8> Returns;
924 ClonedCodeInfo InlinedFunctionInfo;
925 Function::iterator FirstNewBlock;
927 { // Scope to destroy VMap after cloning.
928 ValueToValueMapTy VMap;
929 // Keep a list of pair (dst, src) to emit byval initializations.
930 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
932 assert(CalledFunc->arg_size() == CS.arg_size() &&
933 "No varargs calls can be inlined!");
935 // Calculate the vector of arguments to pass into the function cloner, which
936 // matches up the formal to the actual argument values.
937 CallSite::arg_iterator AI = CS.arg_begin();
939 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
940 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
941 Value *ActualArg = *AI;
943 // When byval arguments actually inlined, we need to make the copy implied
944 // by them explicit. However, we don't do this if the callee is readonly
945 // or readnone, because the copy would be unneeded: the callee doesn't
946 // modify the struct.
947 if (CS.isByValArgument(ArgNo)) {
948 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
949 CalledFunc->getParamAlignment(ArgNo+1));
950 if (ActualArg != *AI)
951 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
957 // We want the inliner to prune the code as it copies. We would LOVE to
958 // have no dead or constant instructions leftover after inlining occurs
959 // (which can happen, e.g., because an argument was constant), but we'll be
960 // happy with whatever the cloner can do.
961 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
962 /*ModuleLevelChanges=*/false, Returns, ".i",
963 &InlinedFunctionInfo, IFI.DL, TheCall);
965 // Remember the first block that is newly cloned over.
966 FirstNewBlock = LastBlock; ++FirstNewBlock;
968 // Inject byval arguments initialization.
969 for (std::pair<Value*, Value*> &Init : ByValInit)
970 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
973 // Update the callgraph if requested.
975 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
977 // Update inlined instructions' line number information.
978 fixupLineNumbers(Caller, FirstNewBlock, TheCall);
980 // Clone existing noalias metadata if necessary.
981 CloneAliasScopeMetadata(CS, VMap);
983 // Add noalias metadata if necessary.
984 AddAliasScopeMetadata(CS, VMap, IFI.DL, IFI.AA);
987 // If there are any alloca instructions in the block that used to be the entry
988 // block for the callee, move them to the entry block of the caller. First
989 // calculate which instruction they should be inserted before. We insert the
990 // instructions at the end of the current alloca list.
992 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
993 for (BasicBlock::iterator I = FirstNewBlock->begin(),
994 E = FirstNewBlock->end(); I != E; ) {
995 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
998 // If the alloca is now dead, remove it. This often occurs due to code
1000 if (AI->use_empty()) {
1001 AI->eraseFromParent();
1005 if (!isa<Constant>(AI->getArraySize()))
1008 // Keep track of the static allocas that we inline into the caller.
1009 IFI.StaticAllocas.push_back(AI);
1011 // Scan for the block of allocas that we can move over, and move them
1013 while (isa<AllocaInst>(I) &&
1014 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
1015 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1019 // Transfer all of the allocas over in a block. Using splice means
1020 // that the instructions aren't removed from the symbol table, then
1022 Caller->getEntryBlock().getInstList().splice(InsertPoint,
1023 FirstNewBlock->getInstList(),
1028 bool InlinedMustTailCalls = false;
1029 if (InlinedFunctionInfo.ContainsCalls) {
1030 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1031 if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1032 CallSiteTailKind = CI->getTailCallKind();
1034 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1036 for (Instruction &I : *BB) {
1037 CallInst *CI = dyn_cast<CallInst>(&I);
1041 // We need to reduce the strength of any inlined tail calls. For
1042 // musttail, we have to avoid introducing potential unbounded stack
1043 // growth. For example, if functions 'f' and 'g' are mutually recursive
1044 // with musttail, we can inline 'g' into 'f' so long as we preserve
1045 // musttail on the cloned call to 'f'. If either the inlined call site
1046 // or the cloned call site is *not* musttail, the program already has
1047 // one frame of stack growth, so it's safe to remove musttail. Here is
1048 // a table of example transformations:
1050 // f -> musttail g -> musttail f ==> f -> musttail f
1051 // f -> musttail g -> tail f ==> f -> tail f
1052 // f -> g -> musttail f ==> f -> f
1053 // f -> g -> tail f ==> f -> f
1054 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1055 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1056 CI->setTailCallKind(ChildTCK);
1057 InlinedMustTailCalls |= CI->isMustTailCall();
1059 // Calls inlined through a 'nounwind' call site should be marked
1062 CI->setDoesNotThrow();
1067 // Leave lifetime markers for the static alloca's, scoping them to the
1068 // function we just inlined.
1069 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1070 IRBuilder<> builder(FirstNewBlock->begin());
1071 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1072 AllocaInst *AI = IFI.StaticAllocas[ai];
1074 // If the alloca is already scoped to something smaller than the whole
1075 // function then there's no need to add redundant, less accurate markers.
1076 if (hasLifetimeMarkers(AI))
1079 // Try to determine the size of the allocation.
1080 ConstantInt *AllocaSize = nullptr;
1081 if (ConstantInt *AIArraySize =
1082 dyn_cast<ConstantInt>(AI->getArraySize())) {
1084 Type *AllocaType = AI->getAllocatedType();
1085 uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
1086 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1087 assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
1088 // Check that array size doesn't saturate uint64_t and doesn't
1089 // overflow when it's multiplied by type size.
1090 if (AllocaArraySize != ~0ULL &&
1091 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1092 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1093 AllocaArraySize * AllocaTypeSize);
1098 builder.CreateLifetimeStart(AI, AllocaSize);
1099 for (ReturnInst *RI : Returns) {
1100 // Don't insert llvm.lifetime.end calls between a musttail call and a
1101 // return. The return kills all local allocas.
1102 if (InlinedMustTailCalls &&
1103 RI->getParent()->getTerminatingMustTailCall())
1105 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1110 // If the inlined code contained dynamic alloca instructions, wrap the inlined
1111 // code with llvm.stacksave/llvm.stackrestore intrinsics.
1112 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1113 Module *M = Caller->getParent();
1114 // Get the two intrinsics we care about.
1115 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1116 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1118 // Insert the llvm.stacksave.
1119 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
1120 .CreateCall(StackSave, "savedstack");
1122 // Insert a call to llvm.stackrestore before any return instructions in the
1123 // inlined function.
1124 for (ReturnInst *RI : Returns) {
1125 // Don't insert llvm.stackrestore calls between a musttail call and a
1126 // return. The return will restore the stack pointer.
1127 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1129 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1133 // If we are inlining for an invoke instruction, we must make sure to rewrite
1134 // any call instructions into invoke instructions.
1135 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
1136 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
1138 // Handle any inlined musttail call sites. In order for a new call site to be
1139 // musttail, the source of the clone and the inlined call site must have been
1140 // musttail. Therefore it's safe to return without merging control into the
1142 if (InlinedMustTailCalls) {
1143 // Check if we need to bitcast the result of any musttail calls.
1144 Type *NewRetTy = Caller->getReturnType();
1145 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
1147 // Handle the returns preceded by musttail calls separately.
1148 SmallVector<ReturnInst *, 8> NormalReturns;
1149 for (ReturnInst *RI : Returns) {
1150 CallInst *ReturnedMustTail =
1151 RI->getParent()->getTerminatingMustTailCall();
1152 if (!ReturnedMustTail) {
1153 NormalReturns.push_back(RI);
1159 // Delete the old return and any preceding bitcast.
1160 BasicBlock *CurBB = RI->getParent();
1161 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
1162 RI->eraseFromParent();
1164 OldCast->eraseFromParent();
1166 // Insert a new bitcast and return with the right type.
1167 IRBuilder<> Builder(CurBB);
1168 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
1171 // Leave behind the normal returns so we can merge control flow.
1172 std::swap(Returns, NormalReturns);
1175 // If we cloned in _exactly one_ basic block, and if that block ends in a
1176 // return instruction, we splice the body of the inlined callee directly into
1177 // the calling basic block.
1178 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
1179 // Move all of the instructions right before the call.
1180 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
1181 FirstNewBlock->begin(), FirstNewBlock->end());
1182 // Remove the cloned basic block.
1183 Caller->getBasicBlockList().pop_back();
1185 // If the call site was an invoke instruction, add a branch to the normal
1187 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1188 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1189 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
1192 // If the return instruction returned a value, replace uses of the call with
1193 // uses of the returned value.
1194 if (!TheCall->use_empty()) {
1195 ReturnInst *R = Returns[0];
1196 if (TheCall == R->getReturnValue())
1197 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1199 TheCall->replaceAllUsesWith(R->getReturnValue());
1201 // Since we are now done with the Call/Invoke, we can delete it.
1202 TheCall->eraseFromParent();
1204 // Since we are now done with the return instruction, delete it also.
1205 Returns[0]->eraseFromParent();
1207 // We are now done with the inlining.
1211 // Otherwise, we have the normal case, of more than one block to inline or
1212 // multiple return sites.
1214 // We want to clone the entire callee function into the hole between the
1215 // "starter" and "ender" blocks. How we accomplish this depends on whether
1216 // this is an invoke instruction or a call instruction.
1217 BasicBlock *AfterCallBB;
1218 BranchInst *CreatedBranchToNormalDest = nullptr;
1219 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1221 // Add an unconditional branch to make this look like the CallInst case...
1222 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
1224 // Split the basic block. This guarantees that no PHI nodes will have to be
1225 // updated due to new incoming edges, and make the invoke case more
1226 // symmetric to the call case.
1227 AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest,
1228 CalledFunc->getName()+".exit");
1230 } else { // It's a call
1231 // If this is a call instruction, we need to split the basic block that
1232 // the call lives in.
1234 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
1235 CalledFunc->getName()+".exit");
1238 // Change the branch that used to go to AfterCallBB to branch to the first
1239 // basic block of the inlined function.
1241 TerminatorInst *Br = OrigBB->getTerminator();
1242 assert(Br && Br->getOpcode() == Instruction::Br &&
1243 "splitBasicBlock broken!");
1244 Br->setOperand(0, FirstNewBlock);
1247 // Now that the function is correct, make it a little bit nicer. In
1248 // particular, move the basic blocks inserted from the end of the function
1249 // into the space made by splitting the source basic block.
1250 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
1251 FirstNewBlock, Caller->end());
1253 // Handle all of the return instructions that we just cloned in, and eliminate
1254 // any users of the original call/invoke instruction.
1255 Type *RTy = CalledFunc->getReturnType();
1257 PHINode *PHI = nullptr;
1258 if (Returns.size() > 1) {
1259 // The PHI node should go at the front of the new basic block to merge all
1260 // possible incoming values.
1261 if (!TheCall->use_empty()) {
1262 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
1263 AfterCallBB->begin());
1264 // Anything that used the result of the function call should now use the
1265 // PHI node as their operand.
1266 TheCall->replaceAllUsesWith(PHI);
1269 // Loop over all of the return instructions adding entries to the PHI node
1272 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1273 ReturnInst *RI = Returns[i];
1274 assert(RI->getReturnValue()->getType() == PHI->getType() &&
1275 "Ret value not consistent in function!");
1276 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
1281 // Add a branch to the merge points and remove return instructions.
1283 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1284 ReturnInst *RI = Returns[i];
1285 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
1286 Loc = RI->getDebugLoc();
1287 BI->setDebugLoc(Loc);
1288 RI->eraseFromParent();
1290 // We need to set the debug location to *somewhere* inside the
1291 // inlined function. The line number may be nonsensical, but the
1292 // instruction will at least be associated with the right
1294 if (CreatedBranchToNormalDest)
1295 CreatedBranchToNormalDest->setDebugLoc(Loc);
1296 } else if (!Returns.empty()) {
1297 // Otherwise, if there is exactly one return value, just replace anything
1298 // using the return value of the call with the computed value.
1299 if (!TheCall->use_empty()) {
1300 if (TheCall == Returns[0]->getReturnValue())
1301 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1303 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
1306 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
1307 BasicBlock *ReturnBB = Returns[0]->getParent();
1308 ReturnBB->replaceAllUsesWith(AfterCallBB);
1310 // Splice the code from the return block into the block that it will return
1311 // to, which contains the code that was after the call.
1312 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
1313 ReturnBB->getInstList());
1315 if (CreatedBranchToNormalDest)
1316 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
1318 // Delete the return instruction now and empty ReturnBB now.
1319 Returns[0]->eraseFromParent();
1320 ReturnBB->eraseFromParent();
1321 } else if (!TheCall->use_empty()) {
1322 // No returns, but something is using the return value of the call. Just
1324 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1327 // Since we are now done with the Call/Invoke, we can delete it.
1328 TheCall->eraseFromParent();
1330 // If we inlined any musttail calls and the original return is now
1331 // unreachable, delete it. It can only contain a bitcast and ret.
1332 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
1333 AfterCallBB->eraseFromParent();
1335 // We should always be able to fold the entry block of the function into the
1336 // single predecessor of the block...
1337 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
1338 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
1340 // Splice the code entry block into calling block, right before the
1341 // unconditional branch.
1342 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
1343 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
1345 // Remove the unconditional branch.
1346 OrigBB->getInstList().erase(Br);
1348 // Now we can remove the CalleeEntry block, which is now empty.
1349 Caller->getBasicBlockList().erase(CalleeEntry);
1351 // If we inserted a phi node, check to see if it has a single value (e.g. all
1352 // the entries are the same or undef). If so, remove the PHI so it doesn't
1353 // block other optimizations.
1355 if (Value *V = SimplifyInstruction(PHI, IFI.DL)) {
1356 PHI->replaceAllUsesWith(V);
1357 PHI->eraseFromParent();