1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CallGraph.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/CallSite.h"
29 #include "llvm/IR/CFG.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DebugInfo.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/DIBuilder.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/MDBuilder.h"
41 #include "llvm/IR/Module.h"
42 #include "llvm/Transforms/Utils/Local.h"
43 #include "llvm/Support/CommandLine.h"
49 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
51 cl::desc("Convert noalias attributes to metadata during inlining."));
54 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
55 cl::init(true), cl::Hidden,
56 cl::desc("Convert align attributes to assumptions during inlining."));
58 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
59 AAResults *CalleeAAR, bool InsertLifetime) {
60 return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime);
62 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
63 AAResults *CalleeAAR, bool InsertLifetime) {
64 return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime);
68 /// A class for recording information about inlining a landing pad.
69 class LandingPadInliningInfo {
70 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
71 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
72 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
73 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
74 SmallVector<Value*, 8> UnwindDestPHIValues;
77 LandingPadInliningInfo(InvokeInst *II)
78 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
79 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
80 // If there are PHI nodes in the unwind destination block, we need to keep
81 // track of which values came into them from the invoke before removing
82 // the edge from this block.
83 llvm::BasicBlock *InvokeBB = II->getParent();
84 BasicBlock::iterator I = OuterResumeDest->begin();
85 for (; isa<PHINode>(I); ++I) {
86 // Save the value to use for this edge.
87 PHINode *PHI = cast<PHINode>(I);
88 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
91 CallerLPad = cast<LandingPadInst>(I);
94 /// The outer unwind destination is the target of
95 /// unwind edges introduced for calls within the inlined function.
96 BasicBlock *getOuterResumeDest() const {
97 return OuterResumeDest;
100 BasicBlock *getInnerResumeDest();
102 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
104 /// Forward the 'resume' instruction to the caller's landing pad block.
105 /// When the landing pad block has only one predecessor, this is
106 /// a simple branch. When there is more than one predecessor, we need to
107 /// split the landing pad block after the landingpad instruction and jump
109 void forwardResume(ResumeInst *RI,
110 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
112 /// Add incoming-PHI values to the unwind destination block for the given
113 /// basic block, using the values for the original invoke's source block.
114 void addIncomingPHIValuesFor(BasicBlock *BB) const {
115 addIncomingPHIValuesForInto(BB, OuterResumeDest);
118 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
119 BasicBlock::iterator I = dest->begin();
120 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
121 PHINode *phi = cast<PHINode>(I);
122 phi->addIncoming(UnwindDestPHIValues[i], src);
126 } // anonymous namespace
128 /// Get or create a target for the branch from ResumeInsts.
129 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
130 if (InnerResumeDest) return InnerResumeDest;
132 // Split the landing pad.
133 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
135 OuterResumeDest->splitBasicBlock(SplitPoint,
136 OuterResumeDest->getName() + ".body");
138 // The number of incoming edges we expect to the inner landing pad.
139 const unsigned PHICapacity = 2;
141 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
142 Instruction *InsertPoint = &InnerResumeDest->front();
143 BasicBlock::iterator I = OuterResumeDest->begin();
144 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
145 PHINode *OuterPHI = cast<PHINode>(I);
146 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
147 OuterPHI->getName() + ".lpad-body",
149 OuterPHI->replaceAllUsesWith(InnerPHI);
150 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
153 // Create a PHI for the exception values.
154 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
155 "eh.lpad-body", InsertPoint);
156 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
157 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
160 return InnerResumeDest;
163 /// Forward the 'resume' instruction to the caller's landing pad block.
164 /// When the landing pad block has only one predecessor, this is a simple
165 /// branch. When there is more than one predecessor, we need to split the
166 /// landing pad block after the landingpad instruction and jump to there.
167 void LandingPadInliningInfo::forwardResume(
168 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
169 BasicBlock *Dest = getInnerResumeDest();
170 BasicBlock *Src = RI->getParent();
172 BranchInst::Create(Dest, Src);
174 // Update the PHIs in the destination. They were inserted in an order which
176 addIncomingPHIValuesForInto(Src, Dest);
178 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
179 RI->eraseFromParent();
182 /// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
183 static Value *getParentPad(Value *EHPad) {
184 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
185 return FPI->getParentPad();
186 return cast<CatchSwitchInst>(EHPad)->getParentPad();
189 typedef DenseMap<Instruction *, Value *> UnwindDestMemoTy;
191 /// Helper for getUnwindDestToken that does the descendant-ward part of
193 static Value *getUnwindDestTokenHelper(Instruction *EHPad,
194 UnwindDestMemoTy &MemoMap) {
195 SmallVector<Instruction *, 8> Worklist(1, EHPad);
197 while (!Worklist.empty()) {
198 Instruction *CurrentPad = Worklist.pop_back_val();
199 // We only put pads on the worklist that aren't in the MemoMap. When
200 // we find an unwind dest for a pad we may update its ancestors, but
201 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
202 // so they should never get updated while queued on the worklist.
203 assert(!MemoMap.count(CurrentPad));
204 Value *UnwindDestToken = nullptr;
205 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
206 if (CatchSwitch->hasUnwindDest()) {
207 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
209 // Catchswitch doesn't have a 'nounwind' variant, and one might be
210 // annotated as "unwinds to caller" when really it's nounwind (see
211 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
212 // parent's unwind dest from this. We can check its catchpads'
213 // descendants, since they might include a cleanuppad with an
214 // "unwinds to caller" cleanupret, which can be trusted.
215 for (auto HI = CatchSwitch->handler_begin(),
216 HE = CatchSwitch->handler_end();
217 HI != HE && !UnwindDestToken; ++HI) {
218 BasicBlock *HandlerBlock = *HI;
219 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
220 for (User *Child : CatchPad->users()) {
221 // Intentionally ignore invokes here -- since the catchswitch is
222 // marked "unwind to caller", it would be a verifier error if it
223 // contained an invoke which unwinds out of it, so any invoke we'd
224 // encounter must unwind to some child of the catch.
225 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
228 Instruction *ChildPad = cast<Instruction>(Child);
229 auto Memo = MemoMap.find(ChildPad);
230 if (Memo == MemoMap.end()) {
231 // Haven't figure out this child pad yet; queue it.
232 Worklist.push_back(ChildPad);
235 // We've already checked this child, but might have found that
236 // it offers no proof either way.
237 Value *ChildUnwindDestToken = Memo->second;
238 if (!ChildUnwindDestToken)
240 // We already know the child's unwind dest, which can either
241 // be ConstantTokenNone to indicate unwind to caller, or can
242 // be another child of the catchpad. Only the former indicates
243 // the unwind dest of the catchswitch.
244 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
245 UnwindDestToken = ChildUnwindDestToken;
248 assert(getParentPad(ChildUnwindDestToken) == CatchPad);
253 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
254 for (User *U : CleanupPad->users()) {
255 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
256 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
257 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
259 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
262 Value *ChildUnwindDestToken;
263 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
264 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
265 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
266 Instruction *ChildPad = cast<Instruction>(U);
267 auto Memo = MemoMap.find(ChildPad);
268 if (Memo == MemoMap.end()) {
269 // Haven't resolved this child yet; queue it and keep searching.
270 Worklist.push_back(ChildPad);
273 // We've checked this child, but still need to ignore it if it
274 // had no proof either way.
275 ChildUnwindDestToken = Memo->second;
276 if (!ChildUnwindDestToken)
279 // Not a relevant user of the cleanuppad
282 // In a well-formed program, the child/invoke must either unwind to
283 // an(other) child of the cleanup, or exit the cleanup. In the
284 // first case, continue searching.
285 if (isa<Instruction>(ChildUnwindDestToken) &&
286 getParentPad(ChildUnwindDestToken) == CleanupPad)
288 UnwindDestToken = ChildUnwindDestToken;
292 // If we haven't found an unwind dest for CurrentPad, we may have queued its
293 // children, so move on to the next in the worklist.
294 if (!UnwindDestToken)
297 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
298 // any ancestors of CurrentPad up to but not including UnwindDestToken's
299 // parent pad. Record this in the memo map, and check to see if the
300 // original EHPad being queried is one of the ones exited.
302 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
303 UnwindParent = getParentPad(UnwindPad);
305 UnwindParent = nullptr;
306 bool ExitedOriginalPad = false;
307 for (Instruction *ExitedPad = CurrentPad;
308 ExitedPad && ExitedPad != UnwindParent;
309 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
310 // Skip over catchpads since they just follow their catchswitches.
311 if (isa<CatchPadInst>(ExitedPad))
313 MemoMap[ExitedPad] = UnwindDestToken;
314 ExitedOriginalPad |= (ExitedPad == EHPad);
317 if (ExitedOriginalPad)
318 return UnwindDestToken;
320 // Continue the search.
323 // No definitive information is contained within this funclet.
327 /// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
328 /// return that pad instruction. If it unwinds to caller, return
329 /// ConstantTokenNone. If it does not have a definitive unwind destination,
332 /// This routine gets invoked for calls in funclets in inlinees when inlining
333 /// an invoke. Since many funclets don't have calls inside them, it's queried
334 /// on-demand rather than building a map of pads to unwind dests up front.
335 /// Determining a funclet's unwind dest may require recursively searching its
336 /// descendants, and also ancestors and cousins if the descendants don't provide
337 /// an answer. Since most funclets will have their unwind dest immediately
338 /// available as the unwind dest of a catchswitch or cleanupret, this routine
339 /// searches top-down from the given pad and then up. To avoid worst-case
340 /// quadratic run-time given that approach, it uses a memo map to avoid
341 /// re-processing funclet trees. The callers that rewrite the IR as they go
342 /// take advantage of this, for correctness, by checking/forcing rewritten
343 /// pads' entries to match the original callee view.
344 static Value *getUnwindDestToken(Instruction *EHPad,
345 UnwindDestMemoTy &MemoMap) {
346 // Catchpads unwind to the same place as their catchswitch;
347 // redirct any queries on catchpads so the code below can
348 // deal with just catchswitches and cleanuppads.
349 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
350 EHPad = CPI->getCatchSwitch();
352 // Check if we've already determined the unwind dest for this pad.
353 auto Memo = MemoMap.find(EHPad);
354 if (Memo != MemoMap.end())
357 // Search EHPad and, if necessary, its descendants.
358 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
359 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
361 return UnwindDestToken;
363 // No information is available for this EHPad from itself or any of its
364 // descendants. An unwind all the way out to a pad in the caller would
365 // need also to agree with the unwind dest of the parent funclet, so
366 // search up the chain to try to find a funclet with information. Put
367 // null entries in the memo map to avoid re-processing as we go up.
368 MemoMap[EHPad] = nullptr;
369 Instruction *LastUselessPad = EHPad;
370 Value *AncestorToken;
371 for (AncestorToken = getParentPad(EHPad);
372 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
373 AncestorToken = getParentPad(AncestorToken)) {
374 // Skip over catchpads since they just follow their catchswitches.
375 if (isa<CatchPadInst>(AncestorPad))
377 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
378 auto AncestorMemo = MemoMap.find(AncestorPad);
379 if (AncestorMemo == MemoMap.end()) {
380 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
382 UnwindDestToken = AncestorMemo->second;
386 LastUselessPad = AncestorPad;
389 // Since the whole tree under LastUselessPad has no information, it all must
390 // match UnwindDestToken; record that to avoid repeating the search.
391 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
392 while (!Worklist.empty()) {
393 Instruction *UselessPad = Worklist.pop_back_val();
394 assert(!MemoMap.count(UselessPad) || MemoMap[UselessPad] == nullptr);
395 MemoMap[UselessPad] = UnwindDestToken;
396 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
397 for (BasicBlock *HandlerBlock : CatchSwitch->handlers())
398 for (User *U : HandlerBlock->getFirstNonPHI()->users())
399 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
400 Worklist.push_back(cast<Instruction>(U));
402 assert(isa<CleanupPadInst>(UselessPad));
403 for (User *U : UselessPad->users())
404 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
405 Worklist.push_back(cast<Instruction>(U));
409 return UnwindDestToken;
412 /// When we inline a basic block into an invoke,
413 /// we have to turn all of the calls that can throw into invokes.
414 /// This function analyze BB to see if there are any calls, and if so,
415 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
416 /// nodes in that block with the values specified in InvokeDestPHIValues.
417 static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
418 BasicBlock *BB, BasicBlock *UnwindEdge,
419 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
420 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
421 Instruction *I = &*BBI++;
423 // We only need to check for function calls: inlined invoke
424 // instructions require no special handling.
425 CallInst *CI = dyn_cast<CallInst>(I);
427 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
430 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
431 // This call is nested inside a funclet. If that funclet has an unwind
432 // destination within the inlinee, then unwinding out of this call would
433 // be UB. Rewriting this call to an invoke which targets the inlined
434 // invoke's unwind dest would give the call's parent funclet multiple
435 // unwind destinations, which is something that subsequent EH table
436 // generation can't handle and that the veirifer rejects. So when we
437 // see such a call, leave it as a call.
438 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
439 Value *UnwindDestToken =
440 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
441 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
444 Instruction *MemoKey;
445 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
446 MemoKey = CatchPad->getCatchSwitch();
448 MemoKey = FuncletPad;
449 assert(FuncletUnwindMap->count(MemoKey) &&
450 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
451 "must get memoized to avoid confusing later searches");
455 // Convert this function call into an invoke instruction. First, split the
458 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
460 // Delete the unconditional branch inserted by splitBasicBlock
461 BB->getInstList().pop_back();
463 // Create the new invoke instruction.
464 SmallVector<Value*, 8> InvokeArgs(CI->arg_begin(), CI->arg_end());
465 SmallVector<OperandBundleDef, 1> OpBundles;
467 CI->getOperandBundlesAsDefs(OpBundles);
469 // Note: we're round tripping operand bundles through memory here, and that
470 // can potentially be avoided with a cleverer API design that we do not have
474 InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge, InvokeArgs,
475 OpBundles, CI->getName(), BB);
476 II->setDebugLoc(CI->getDebugLoc());
477 II->setCallingConv(CI->getCallingConv());
478 II->setAttributes(CI->getAttributes());
480 // Make sure that anything using the call now uses the invoke! This also
481 // updates the CallGraph if present, because it uses a WeakVH.
482 CI->replaceAllUsesWith(II);
484 // Delete the original call
485 Split->getInstList().pop_front();
491 /// If we inlined an invoke site, we need to convert calls
492 /// in the body of the inlined function into invokes.
494 /// II is the invoke instruction being inlined. FirstNewBlock is the first
495 /// block of the inlined code (the last block is the end of the function),
496 /// and InlineCodeInfo is information about the code that got inlined.
497 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
498 ClonedCodeInfo &InlinedCodeInfo) {
499 BasicBlock *InvokeDest = II->getUnwindDest();
501 Function *Caller = FirstNewBlock->getParent();
503 // The inlined code is currently at the end of the function, scan from the
504 // start of the inlined code to its end, checking for stuff we need to
506 LandingPadInliningInfo Invoke(II);
508 // Get all of the inlined landing pad instructions.
509 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
510 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
512 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
513 InlinedLPads.insert(II->getLandingPadInst());
515 // Append the clauses from the outer landing pad instruction into the inlined
516 // landing pad instructions.
517 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
518 for (LandingPadInst *InlinedLPad : InlinedLPads) {
519 unsigned OuterNum = OuterLPad->getNumClauses();
520 InlinedLPad->reserveClauses(OuterNum);
521 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
522 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
523 if (OuterLPad->isCleanup())
524 InlinedLPad->setCleanup(true);
527 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
529 if (InlinedCodeInfo.ContainsCalls)
530 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
531 &*BB, Invoke.getOuterResumeDest()))
532 // Update any PHI nodes in the exceptional block to indicate that there
533 // is now a new entry in them.
534 Invoke.addIncomingPHIValuesFor(NewBB);
536 // Forward any resumes that are remaining here.
537 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
538 Invoke.forwardResume(RI, InlinedLPads);
541 // Now that everything is happy, we have one final detail. The PHI nodes in
542 // the exception destination block still have entries due to the original
543 // invoke instruction. Eliminate these entries (which might even delete the
545 InvokeDest->removePredecessor(II->getParent());
548 /// If we inlined an invoke site, we need to convert calls
549 /// in the body of the inlined function into invokes.
551 /// II is the invoke instruction being inlined. FirstNewBlock is the first
552 /// block of the inlined code (the last block is the end of the function),
553 /// and InlineCodeInfo is information about the code that got inlined.
554 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
555 ClonedCodeInfo &InlinedCodeInfo) {
556 BasicBlock *UnwindDest = II->getUnwindDest();
557 Function *Caller = FirstNewBlock->getParent();
559 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
561 // If there are PHI nodes in the unwind destination block, we need to keep
562 // track of which values came into them from the invoke before removing the
563 // edge from this block.
564 SmallVector<Value *, 8> UnwindDestPHIValues;
565 llvm::BasicBlock *InvokeBB = II->getParent();
566 for (Instruction &I : *UnwindDest) {
567 // Save the value to use for this edge.
568 PHINode *PHI = dyn_cast<PHINode>(&I);
571 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
574 // Add incoming-PHI values to the unwind destination block for the given basic
575 // block, using the values for the original invoke's source block.
576 auto UpdatePHINodes = [&](BasicBlock *Src) {
577 BasicBlock::iterator I = UnwindDest->begin();
578 for (Value *V : UnwindDestPHIValues) {
579 PHINode *PHI = cast<PHINode>(I);
580 PHI->addIncoming(V, Src);
585 // This connects all the instructions which 'unwind to caller' to the invoke
587 UnwindDestMemoTy FuncletUnwindMap;
588 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
590 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
591 if (CRI->unwindsToCaller()) {
592 auto *CleanupPad = CRI->getCleanupPad();
593 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI);
594 CRI->eraseFromParent();
595 UpdatePHINodes(&*BB);
596 // Finding a cleanupret with an unwind destination would confuse
597 // subsequent calls to getUnwindDestToken, so map the cleanuppad
598 // to short-circuit any such calls and recognize this as an "unwind
599 // to caller" cleanup.
600 assert(!FuncletUnwindMap.count(CleanupPad) ||
601 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
602 FuncletUnwindMap[CleanupPad] =
603 ConstantTokenNone::get(Caller->getContext());
607 Instruction *I = BB->getFirstNonPHI();
611 Instruction *Replacement = nullptr;
612 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
613 if (CatchSwitch->unwindsToCaller()) {
614 Value *UnwindDestToken;
615 if (auto *ParentPad =
616 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
617 // This catchswitch is nested inside another funclet. If that
618 // funclet has an unwind destination within the inlinee, then
619 // unwinding out of this catchswitch would be UB. Rewriting this
620 // catchswitch to unwind to the inlined invoke's unwind dest would
621 // give the parent funclet multiple unwind destinations, which is
622 // something that subsequent EH table generation can't handle and
623 // that the veirifer rejects. So when we see such a call, leave it
624 // as "unwind to caller".
625 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
626 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
629 // This catchswitch has no parent to inherit constraints from, and
630 // none of its descendants can have an unwind edge that exits it and
631 // targets another funclet in the inlinee. It may or may not have a
632 // descendant that definitively has an unwind to caller. In either
633 // case, we'll have to assume that any unwinds out of it may need to
634 // be routed to the caller, so treat it as though it has a definitive
636 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
638 auto *NewCatchSwitch = CatchSwitchInst::Create(
639 CatchSwitch->getParentPad(), UnwindDest,
640 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
642 for (BasicBlock *PadBB : CatchSwitch->handlers())
643 NewCatchSwitch->addHandler(PadBB);
644 // Propagate info for the old catchswitch over to the new one in
645 // the unwind map. This also serves to short-circuit any subsequent
646 // checks for the unwind dest of this catchswitch, which would get
647 // confused if they found the outer handler in the callee.
648 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
649 Replacement = NewCatchSwitch;
651 } else if (!isa<FuncletPadInst>(I)) {
652 llvm_unreachable("unexpected EHPad!");
656 Replacement->takeName(I);
657 I->replaceAllUsesWith(Replacement);
658 I->eraseFromParent();
659 UpdatePHINodes(&*BB);
663 if (InlinedCodeInfo.ContainsCalls)
664 for (Function::iterator BB = FirstNewBlock->getIterator(),
667 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
668 &*BB, UnwindDest, &FuncletUnwindMap))
669 // Update any PHI nodes in the exceptional block to indicate that there
670 // is now a new entry in them.
671 UpdatePHINodes(NewBB);
673 // Now that everything is happy, we have one final detail. The PHI nodes in
674 // the exception destination block still have entries due to the original
675 // invoke instruction. Eliminate these entries (which might even delete the
677 UnwindDest->removePredecessor(InvokeBB);
680 /// When inlining a function that contains noalias scope metadata,
681 /// this metadata needs to be cloned so that the inlined blocks
682 /// have different "unqiue scopes" at every call site. Were this not done, then
683 /// aliasing scopes from a function inlined into a caller multiple times could
684 /// not be differentiated (and this would lead to miscompiles because the
685 /// non-aliasing property communicated by the metadata could have
686 /// call-site-specific control dependencies).
687 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
688 const Function *CalledFunc = CS.getCalledFunction();
689 SetVector<const MDNode *> MD;
691 // Note: We could only clone the metadata if it is already used in the
692 // caller. I'm omitting that check here because it might confuse
693 // inter-procedural alias analysis passes. We can revisit this if it becomes
694 // an efficiency or overhead problem.
696 for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end();
698 for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
699 if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope))
701 if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias))
708 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
710 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
711 while (!Queue.empty()) {
712 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
713 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
714 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
719 // Now we have a complete set of all metadata in the chains used to specify
720 // the noalias scopes and the lists of those scopes.
721 SmallVector<TempMDTuple, 16> DummyNodes;
722 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
723 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
725 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
726 MDMap[*I].reset(DummyNodes.back().get());
729 // Create new metadata nodes to replace the dummy nodes, replacing old
730 // metadata references with either a dummy node or an already-created new
732 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
734 SmallVector<Metadata *, 4> NewOps;
735 for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) {
736 const Metadata *V = (*I)->getOperand(i);
737 if (const MDNode *M = dyn_cast<MDNode>(V))
738 NewOps.push_back(MDMap[M]);
740 NewOps.push_back(const_cast<Metadata *>(V));
743 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
744 MDTuple *TempM = cast<MDTuple>(MDMap[*I]);
745 assert(TempM->isTemporary() && "Expected temporary node");
747 TempM->replaceAllUsesWith(NewM);
750 // Now replace the metadata in the new inlined instructions with the
751 // repacements from the map.
752 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
753 VMI != VMIE; ++VMI) {
757 Instruction *NI = dyn_cast<Instruction>(VMI->second);
761 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
762 MDNode *NewMD = MDMap[M];
763 // If the call site also had alias scope metadata (a list of scopes to
764 // which instructions inside it might belong), propagate those scopes to
765 // the inlined instructions.
767 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
768 NewMD = MDNode::concatenate(NewMD, CSM);
769 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
770 } else if (NI->mayReadOrWriteMemory()) {
772 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
773 NI->setMetadata(LLVMContext::MD_alias_scope, M);
776 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
777 MDNode *NewMD = MDMap[M];
778 // If the call site also had noalias metadata (a list of scopes with
779 // which instructions inside it don't alias), propagate those scopes to
780 // the inlined instructions.
782 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
783 NewMD = MDNode::concatenate(NewMD, CSM);
784 NI->setMetadata(LLVMContext::MD_noalias, NewMD);
785 } else if (NI->mayReadOrWriteMemory()) {
786 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
787 NI->setMetadata(LLVMContext::MD_noalias, M);
792 /// If the inlined function has noalias arguments,
793 /// then add new alias scopes for each noalias argument, tag the mapped noalias
794 /// parameters with noalias metadata specifying the new scope, and tag all
795 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
796 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
797 const DataLayout &DL, AAResults *CalleeAAR) {
798 if (!EnableNoAliasConversion)
801 const Function *CalledFunc = CS.getCalledFunction();
802 SmallVector<const Argument *, 4> NoAliasArgs;
804 for (const Argument &I : CalledFunc->args()) {
805 if (I.hasNoAliasAttr() && !I.hasNUses(0))
806 NoAliasArgs.push_back(&I);
809 if (NoAliasArgs.empty())
812 // To do a good job, if a noalias variable is captured, we need to know if
813 // the capture point dominates the particular use we're considering.
815 DT.recalculate(const_cast<Function&>(*CalledFunc));
817 // noalias indicates that pointer values based on the argument do not alias
818 // pointer values which are not based on it. So we add a new "scope" for each
819 // noalias function argument. Accesses using pointers based on that argument
820 // become part of that alias scope, accesses using pointers not based on that
821 // argument are tagged as noalias with that scope.
823 DenseMap<const Argument *, MDNode *> NewScopes;
824 MDBuilder MDB(CalledFunc->getContext());
826 // Create a new scope domain for this function.
828 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
829 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
830 const Argument *A = NoAliasArgs[i];
832 std::string Name = CalledFunc->getName();
835 Name += A->getName();
837 Name += ": argument ";
841 // Note: We always create a new anonymous root here. This is true regardless
842 // of the linkage of the callee because the aliasing "scope" is not just a
843 // property of the callee, but also all control dependencies in the caller.
844 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
845 NewScopes.insert(std::make_pair(A, NewScope));
848 // Iterate over all new instructions in the map; for all memory-access
849 // instructions, add the alias scope metadata.
850 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
851 VMI != VMIE; ++VMI) {
852 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
856 Instruction *NI = dyn_cast<Instruction>(VMI->second);
860 bool IsArgMemOnlyCall = false, IsFuncCall = false;
861 SmallVector<const Value *, 2> PtrArgs;
863 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
864 PtrArgs.push_back(LI->getPointerOperand());
865 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
866 PtrArgs.push_back(SI->getPointerOperand());
867 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
868 PtrArgs.push_back(VAAI->getPointerOperand());
869 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
870 PtrArgs.push_back(CXI->getPointerOperand());
871 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
872 PtrArgs.push_back(RMWI->getPointerOperand());
873 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
874 // If we know that the call does not access memory, then we'll still
875 // know that about the inlined clone of this call site, and we don't
876 // need to add metadata.
877 if (ICS.doesNotAccessMemory())
882 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS);
883 if (MRB == FMRB_OnlyAccessesArgumentPointees ||
884 MRB == FMRB_OnlyReadsArgumentPointees)
885 IsArgMemOnlyCall = true;
888 for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(),
889 AE = ICS.arg_end(); AI != AE; ++AI) {
890 // We need to check the underlying objects of all arguments, not just
891 // the pointer arguments, because we might be passing pointers as
893 // However, if we know that the call only accesses pointer arguments,
894 // then we only need to check the pointer arguments.
895 if (IsArgMemOnlyCall && !(*AI)->getType()->isPointerTy())
898 PtrArgs.push_back(*AI);
902 // If we found no pointers, then this instruction is not suitable for
903 // pairing with an instruction to receive aliasing metadata.
904 // However, if this is a call, this we might just alias with none of the
905 // noalias arguments.
906 if (PtrArgs.empty() && !IsFuncCall)
909 // It is possible that there is only one underlying object, but you
910 // need to go through several PHIs to see it, and thus could be
911 // repeated in the Objects list.
912 SmallPtrSet<const Value *, 4> ObjSet;
913 SmallVector<Metadata *, 4> Scopes, NoAliases;
915 SmallSetVector<const Argument *, 4> NAPtrArgs;
916 for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) {
917 SmallVector<Value *, 4> Objects;
918 GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]),
919 Objects, DL, /* LI = */ nullptr);
921 for (Value *O : Objects)
925 // Figure out if we're derived from anything that is not a noalias
927 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
928 for (const Value *V : ObjSet) {
929 // Is this value a constant that cannot be derived from any pointer
930 // value (we need to exclude constant expressions, for example, that
931 // are formed from arithmetic on global symbols).
932 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
933 isa<ConstantPointerNull>(V) ||
934 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
938 // If this is anything other than a noalias argument, then we cannot
939 // completely describe the aliasing properties using alias.scope
940 // metadata (and, thus, won't add any).
941 if (const Argument *A = dyn_cast<Argument>(V)) {
942 if (!A->hasNoAliasAttr())
943 UsesAliasingPtr = true;
945 UsesAliasingPtr = true;
948 // If this is not some identified function-local object (which cannot
949 // directly alias a noalias argument), or some other argument (which,
950 // by definition, also cannot alias a noalias argument), then we could
951 // alias a noalias argument that has been captured).
952 if (!isa<Argument>(V) &&
953 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
954 CanDeriveViaCapture = true;
957 // A function call can always get captured noalias pointers (via other
958 // parameters, globals, etc.).
959 if (IsFuncCall && !IsArgMemOnlyCall)
960 CanDeriveViaCapture = true;
962 // First, we want to figure out all of the sets with which we definitely
963 // don't alias. Iterate over all noalias set, and add those for which:
964 // 1. The noalias argument is not in the set of objects from which we
965 // definitely derive.
966 // 2. The noalias argument has not yet been captured.
967 // An arbitrary function that might load pointers could see captured
968 // noalias arguments via other noalias arguments or globals, and so we
969 // must always check for prior capture.
970 for (const Argument *A : NoAliasArgs) {
971 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
972 // It might be tempting to skip the
973 // PointerMayBeCapturedBefore check if
974 // A->hasNoCaptureAttr() is true, but this is
975 // incorrect because nocapture only guarantees
976 // that no copies outlive the function, not
977 // that the value cannot be locally captured.
978 !PointerMayBeCapturedBefore(A,
979 /* ReturnCaptures */ false,
980 /* StoreCaptures */ false, I, &DT)))
981 NoAliases.push_back(NewScopes[A]);
984 if (!NoAliases.empty())
985 NI->setMetadata(LLVMContext::MD_noalias,
987 NI->getMetadata(LLVMContext::MD_noalias),
988 MDNode::get(CalledFunc->getContext(), NoAliases)));
990 // Next, we want to figure out all of the sets to which we might belong.
991 // We might belong to a set if the noalias argument is in the set of
992 // underlying objects. If there is some non-noalias argument in our list
993 // of underlying objects, then we cannot add a scope because the fact
994 // that some access does not alias with any set of our noalias arguments
995 // cannot itself guarantee that it does not alias with this access
996 // (because there is some pointer of unknown origin involved and the
997 // other access might also depend on this pointer). We also cannot add
998 // scopes to arbitrary functions unless we know they don't access any
999 // non-parameter pointer-values.
1000 bool CanAddScopes = !UsesAliasingPtr;
1001 if (CanAddScopes && IsFuncCall)
1002 CanAddScopes = IsArgMemOnlyCall;
1005 for (const Argument *A : NoAliasArgs) {
1006 if (ObjSet.count(A))
1007 Scopes.push_back(NewScopes[A]);
1010 if (!Scopes.empty())
1012 LLVMContext::MD_alias_scope,
1013 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1014 MDNode::get(CalledFunc->getContext(), Scopes)));
1019 /// If the inlined function has non-byval align arguments, then
1020 /// add @llvm.assume-based alignment assumptions to preserve this information.
1021 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
1022 if (!PreserveAlignmentAssumptions)
1024 auto &DL = CS.getCaller()->getParent()->getDataLayout();
1026 // To avoid inserting redundant assumptions, we should check for assumptions
1027 // already in the caller. To do this, we might need a DT of the caller.
1029 bool DTCalculated = false;
1031 Function *CalledFunc = CS.getCalledFunction();
1032 for (Function::arg_iterator I = CalledFunc->arg_begin(),
1033 E = CalledFunc->arg_end();
1035 unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0;
1036 if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) {
1037 if (!DTCalculated) {
1038 DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent()
1040 DTCalculated = true;
1043 // If we can already prove the asserted alignment in the context of the
1044 // caller, then don't bother inserting the assumption.
1045 Value *Arg = CS.getArgument(I->getArgNo());
1046 if (getKnownAlignment(Arg, DL, CS.getInstruction(),
1047 &IFI.ACT->getAssumptionCache(*CS.getCaller()),
1051 IRBuilder<>(CS.getInstruction())
1052 .CreateAlignmentAssumption(DL, Arg, Align);
1057 /// Once we have cloned code over from a callee into the caller,
1058 /// update the specified callgraph to reflect the changes we made.
1059 /// Note that it's possible that not all code was copied over, so only
1060 /// some edges of the callgraph may remain.
1061 static void UpdateCallGraphAfterInlining(CallSite CS,
1062 Function::iterator FirstNewBlock,
1063 ValueToValueMapTy &VMap,
1064 InlineFunctionInfo &IFI) {
1065 CallGraph &CG = *IFI.CG;
1066 const Function *Caller = CS.getInstruction()->getParent()->getParent();
1067 const Function *Callee = CS.getCalledFunction();
1068 CallGraphNode *CalleeNode = CG[Callee];
1069 CallGraphNode *CallerNode = CG[Caller];
1071 // Since we inlined some uninlined call sites in the callee into the caller,
1072 // add edges from the caller to all of the callees of the callee.
1073 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
1075 // Consider the case where CalleeNode == CallerNode.
1076 CallGraphNode::CalledFunctionsVector CallCache;
1077 if (CalleeNode == CallerNode) {
1078 CallCache.assign(I, E);
1079 I = CallCache.begin();
1080 E = CallCache.end();
1083 for (; I != E; ++I) {
1084 const Value *OrigCall = I->first;
1086 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
1087 // Only copy the edge if the call was inlined!
1088 if (VMI == VMap.end() || VMI->second == nullptr)
1091 // If the call was inlined, but then constant folded, there is no edge to
1092 // add. Check for this case.
1093 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
1097 // We do not treat intrinsic calls like real function calls because we
1098 // expect them to become inline code; do not add an edge for an intrinsic.
1099 CallSite CS = CallSite(NewCall);
1100 if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
1103 // Remember that this call site got inlined for the client of
1105 IFI.InlinedCalls.push_back(NewCall);
1107 // It's possible that inlining the callsite will cause it to go from an
1108 // indirect to a direct call by resolving a function pointer. If this
1109 // happens, set the callee of the new call site to a more precise
1110 // destination. This can also happen if the call graph node of the caller
1111 // was just unnecessarily imprecise.
1112 if (!I->second->getFunction())
1113 if (Function *F = CallSite(NewCall).getCalledFunction()) {
1114 // Indirect call site resolved to direct call.
1115 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
1120 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
1123 // Update the call graph by deleting the edge from Callee to Caller. We must
1124 // do this after the loop above in case Caller and Callee are the same.
1125 CallerNode->removeCallEdgeFor(CS);
1128 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
1129 BasicBlock *InsertBlock,
1130 InlineFunctionInfo &IFI) {
1131 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
1132 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1134 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
1136 // Always generate a memcpy of alignment 1 here because we don't know
1137 // the alignment of the src pointer. Other optimizations can infer
1138 // better alignment.
1139 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
1142 /// When inlining a call site that has a byval argument,
1143 /// we have to make the implicit memcpy explicit by adding it.
1144 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
1145 const Function *CalledFunc,
1146 InlineFunctionInfo &IFI,
1147 unsigned ByValAlignment) {
1148 PointerType *ArgTy = cast<PointerType>(Arg->getType());
1149 Type *AggTy = ArgTy->getElementType();
1151 Function *Caller = TheCall->getParent()->getParent();
1153 // If the called function is readonly, then it could not mutate the caller's
1154 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1156 if (CalledFunc->onlyReadsMemory()) {
1157 // If the byval argument has a specified alignment that is greater than the
1158 // passed in pointer, then we either have to round up the input pointer or
1159 // give up on this transformation.
1160 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
1163 const DataLayout &DL = Caller->getParent()->getDataLayout();
1165 // If the pointer is already known to be sufficiently aligned, or if we can
1166 // round it up to a larger alignment, then we don't need a temporary.
1167 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall,
1168 &IFI.ACT->getAssumptionCache(*Caller)) >=
1172 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1173 // for code quality, but rarely happens and is required for correctness.
1176 // Create the alloca. If we have DataLayout, use nice alignment.
1178 Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
1180 // If the byval had an alignment specified, we *must* use at least that
1181 // alignment, as it is required by the byval argument (and uses of the
1182 // pointer inside the callee).
1183 Align = std::max(Align, ByValAlignment);
1185 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
1186 &*Caller->begin()->begin());
1187 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
1189 // Uses of the argument in the function should use our new alloca
1194 // Check whether this Value is used by a lifetime intrinsic.
1195 static bool isUsedByLifetimeMarker(Value *V) {
1196 for (User *U : V->users()) {
1197 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
1198 switch (II->getIntrinsicID()) {
1200 case Intrinsic::lifetime_start:
1201 case Intrinsic::lifetime_end:
1209 // Check whether the given alloca already has
1210 // lifetime.start or lifetime.end intrinsics.
1211 static bool hasLifetimeMarkers(AllocaInst *AI) {
1212 Type *Ty = AI->getType();
1213 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
1214 Ty->getPointerAddressSpace());
1215 if (Ty == Int8PtrTy)
1216 return isUsedByLifetimeMarker(AI);
1218 // Do a scan to find all the casts to i8*.
1219 for (User *U : AI->users()) {
1220 if (U->getType() != Int8PtrTy) continue;
1221 if (U->stripPointerCasts() != AI) continue;
1222 if (isUsedByLifetimeMarker(U))
1228 /// Rebuild the entire inlined-at chain for this instruction so that the top of
1229 /// the chain now is inlined-at the new call site.
1231 updateInlinedAtInfo(DebugLoc DL, DILocation *InlinedAtNode, LLVMContext &Ctx,
1232 DenseMap<const DILocation *, DILocation *> &IANodes) {
1233 SmallVector<DILocation *, 3> InlinedAtLocations;
1234 DILocation *Last = InlinedAtNode;
1235 DILocation *CurInlinedAt = DL;
1237 // Gather all the inlined-at nodes
1238 while (DILocation *IA = CurInlinedAt->getInlinedAt()) {
1239 // Skip any we've already built nodes for
1240 if (DILocation *Found = IANodes[IA]) {
1245 InlinedAtLocations.push_back(IA);
1249 // Starting from the top, rebuild the nodes to point to the new inlined-at
1250 // location (then rebuilding the rest of the chain behind it) and update the
1251 // map of already-constructed inlined-at nodes.
1252 for (const DILocation *MD : make_range(InlinedAtLocations.rbegin(),
1253 InlinedAtLocations.rend())) {
1254 Last = IANodes[MD] = DILocation::getDistinct(
1255 Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
1258 // And finally create the normal location for this instruction, referring to
1259 // the new inlined-at chain.
1260 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last);
1263 /// Update inlined instructions' line numbers to
1264 /// to encode location where these instructions are inlined.
1265 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
1266 Instruction *TheCall) {
1267 DebugLoc TheCallDL = TheCall->getDebugLoc();
1271 auto &Ctx = Fn->getContext();
1272 DILocation *InlinedAtNode = TheCallDL;
1274 // Create a unique call site, not to be confused with any other call from the
1276 InlinedAtNode = DILocation::getDistinct(
1277 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1278 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1280 // Cache the inlined-at nodes as they're built so they are reused, without
1281 // this every instruction's inlined-at chain would become distinct from each
1283 DenseMap<const DILocation *, DILocation *> IANodes;
1285 for (; FI != Fn->end(); ++FI) {
1286 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1288 DebugLoc DL = BI->getDebugLoc();
1290 // If the inlined instruction has no line number, make it look as if it
1291 // originates from the call location. This is important for
1292 // ((__always_inline__, __nodebug__)) functions which must use caller
1293 // location for all instructions in their function body.
1295 // Don't update static allocas, as they may get moved later.
1296 if (auto *AI = dyn_cast<AllocaInst>(BI))
1297 if (isa<Constant>(AI->getArraySize()))
1300 BI->setDebugLoc(TheCallDL);
1302 BI->setDebugLoc(updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes));
1308 /// This function inlines the called function into the basic block of the
1309 /// caller. This returns false if it is not possible to inline this call.
1310 /// The program is still in a well defined state if this occurs though.
1312 /// Note that this only does one level of inlining. For example, if the
1313 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1314 /// exists in the instruction stream. Similarly this will inline a recursive
1315 /// function by one level.
1316 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1317 AAResults *CalleeAAR, bool InsertLifetime) {
1318 Instruction *TheCall = CS.getInstruction();
1319 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
1320 "Instruction not in function!");
1322 // If IFI has any state in it, zap it before we fill it in.
1325 const Function *CalledFunc = CS.getCalledFunction();
1326 if (!CalledFunc || // Can't inline external function or indirect
1327 CalledFunc->isDeclaration() || // call, or call to a vararg function!
1328 CalledFunc->getFunctionType()->isVarArg()) return false;
1330 // The inliner does not know how to inline through calls with operand bundles
1332 if (CS.hasOperandBundles()) {
1333 for (int i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
1334 uint32_t Tag = CS.getOperandBundleAt(i).getTagID();
1335 // ... but it knows how to inline through "deopt" operand bundles ...
1336 if (Tag == LLVMContext::OB_deopt)
1338 // ... and "funclet" operand bundles.
1339 if (Tag == LLVMContext::OB_funclet)
1346 // If the call to the callee cannot throw, set the 'nounwind' flag on any
1347 // calls that we inline.
1348 bool MarkNoUnwind = CS.doesNotThrow();
1350 BasicBlock *OrigBB = TheCall->getParent();
1351 Function *Caller = OrigBB->getParent();
1353 // GC poses two hazards to inlining, which only occur when the callee has GC:
1354 // 1. If the caller has no GC, then the callee's GC must be propagated to the
1356 // 2. If the caller has a differing GC, it is invalid to inline.
1357 if (CalledFunc->hasGC()) {
1358 if (!Caller->hasGC())
1359 Caller->setGC(CalledFunc->getGC());
1360 else if (CalledFunc->getGC() != Caller->getGC())
1364 // Get the personality function from the callee if it contains a landing pad.
1365 Constant *CalledPersonality =
1366 CalledFunc->hasPersonalityFn()
1367 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1370 // Find the personality function used by the landing pads of the caller. If it
1371 // exists, then check to see that it matches the personality function used in
1373 Constant *CallerPersonality =
1374 Caller->hasPersonalityFn()
1375 ? Caller->getPersonalityFn()->stripPointerCasts()
1377 if (CalledPersonality) {
1378 if (!CallerPersonality)
1379 Caller->setPersonalityFn(CalledPersonality);
1380 // If the personality functions match, then we can perform the
1381 // inlining. Otherwise, we can't inline.
1382 // TODO: This isn't 100% true. Some personality functions are proper
1383 // supersets of others and can be used in place of the other.
1384 else if (CalledPersonality != CallerPersonality)
1388 // We need to figure out which funclet the callsite was in so that we may
1389 // properly nest the callee.
1390 Instruction *CallSiteEHPad = nullptr;
1391 if (CallerPersonality) {
1392 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
1393 if (isFuncletEHPersonality(Personality)) {
1394 Optional<OperandBundleUse> ParentFunclet =
1395 CS.getOperandBundle(LLVMContext::OB_funclet);
1397 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
1399 // OK, the inlining site is legal. What about the target function?
1401 if (CallSiteEHPad) {
1402 if (Personality == EHPersonality::MSVC_CXX) {
1403 // The MSVC personality cannot tolerate catches getting inlined into
1404 // cleanup funclets.
1405 if (isa<CleanupPadInst>(CallSiteEHPad)) {
1406 // Ok, the call site is within a cleanuppad. Let's check the callee
1408 for (const BasicBlock &CalledBB : *CalledFunc) {
1409 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
1413 } else if (isAsynchronousEHPersonality(Personality)) {
1414 // SEH is even less tolerant, there may not be any sort of exceptional
1415 // funclet in the callee.
1416 for (const BasicBlock &CalledBB : *CalledFunc) {
1417 if (CalledBB.isEHPad())
1425 // Get an iterator to the last basic block in the function, which will have
1426 // the new function inlined after it.
1427 Function::iterator LastBlock = --Caller->end();
1429 // Make sure to capture all of the return instructions from the cloned
1431 SmallVector<ReturnInst*, 8> Returns;
1432 ClonedCodeInfo InlinedFunctionInfo;
1433 Function::iterator FirstNewBlock;
1435 { // Scope to destroy VMap after cloning.
1436 ValueToValueMapTy VMap;
1437 // Keep a list of pair (dst, src) to emit byval initializations.
1438 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1440 auto &DL = Caller->getParent()->getDataLayout();
1442 assert(CalledFunc->arg_size() == CS.arg_size() &&
1443 "No varargs calls can be inlined!");
1445 // Calculate the vector of arguments to pass into the function cloner, which
1446 // matches up the formal to the actual argument values.
1447 CallSite::arg_iterator AI = CS.arg_begin();
1449 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
1450 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1451 Value *ActualArg = *AI;
1453 // When byval arguments actually inlined, we need to make the copy implied
1454 // by them explicit. However, we don't do this if the callee is readonly
1455 // or readnone, because the copy would be unneeded: the callee doesn't
1456 // modify the struct.
1457 if (CS.isByValArgument(ArgNo)) {
1458 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1459 CalledFunc->getParamAlignment(ArgNo+1));
1460 if (ActualArg != *AI)
1461 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1464 VMap[&*I] = ActualArg;
1467 // Add alignment assumptions if necessary. We do this before the inlined
1468 // instructions are actually cloned into the caller so that we can easily
1469 // check what will be known at the start of the inlined code.
1470 AddAlignmentAssumptions(CS, IFI);
1472 // We want the inliner to prune the code as it copies. We would LOVE to
1473 // have no dead or constant instructions leftover after inlining occurs
1474 // (which can happen, e.g., because an argument was constant), but we'll be
1475 // happy with whatever the cloner can do.
1476 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1477 /*ModuleLevelChanges=*/false, Returns, ".i",
1478 &InlinedFunctionInfo, TheCall);
1480 // Remember the first block that is newly cloned over.
1481 FirstNewBlock = LastBlock; ++FirstNewBlock;
1483 // Inject byval arguments initialization.
1484 for (std::pair<Value*, Value*> &Init : ByValInit)
1485 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1486 &*FirstNewBlock, IFI);
1488 Optional<OperandBundleUse> ParentDeopt =
1489 CS.getOperandBundle(LLVMContext::OB_deopt);
1491 SmallVector<OperandBundleDef, 2> OpDefs;
1493 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1494 Instruction *I = dyn_cast_or_null<Instruction>(VH);
1495 if (!I) continue; // instruction was DCE'd or RAUW'ed to undef
1500 OpDefs.reserve(ICS.getNumOperandBundles());
1502 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1503 auto ChildOB = ICS.getOperandBundleAt(i);
1504 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1505 // If the inlined call has other operand bundles, let them be
1506 OpDefs.emplace_back(ChildOB);
1510 // It may be useful to separate this logic (of handling operand
1511 // bundles) out to a separate "policy" component if this gets crowded.
1512 // Prepend the parent's deoptimization continuation to the newly
1513 // inlined call's deoptimization continuation.
1514 std::vector<Value *> MergedDeoptArgs;
1515 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
1516 ChildOB.Inputs.size());
1518 MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1519 ParentDeopt->Inputs.begin(),
1520 ParentDeopt->Inputs.end());
1521 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1522 ChildOB.Inputs.end());
1524 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
1527 Instruction *NewI = nullptr;
1528 if (isa<CallInst>(I))
1529 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1531 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1533 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1534 // this even if the call returns void.
1535 I->replaceAllUsesWith(NewI);
1538 I->eraseFromParent();
1542 // Update the callgraph if requested.
1544 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1546 // Update inlined instructions' line number information.
1547 fixupLineNumbers(Caller, FirstNewBlock, TheCall);
1549 // Clone existing noalias metadata if necessary.
1550 CloneAliasScopeMetadata(CS, VMap);
1552 // Add noalias metadata if necessary.
1553 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1555 // FIXME: We could register any cloned assumptions instead of clearing the
1556 // whole function's cache.
1558 IFI.ACT->getAssumptionCache(*Caller).clear();
1561 // If there are any alloca instructions in the block that used to be the entry
1562 // block for the callee, move them to the entry block of the caller. First
1563 // calculate which instruction they should be inserted before. We insert the
1564 // instructions at the end of the current alloca list.
1566 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1567 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1568 E = FirstNewBlock->end(); I != E; ) {
1569 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1572 // If the alloca is now dead, remove it. This often occurs due to code
1574 if (AI->use_empty()) {
1575 AI->eraseFromParent();
1579 if (!isa<Constant>(AI->getArraySize()))
1582 // Keep track of the static allocas that we inline into the caller.
1583 IFI.StaticAllocas.push_back(AI);
1585 // Scan for the block of allocas that we can move over, and move them
1587 while (isa<AllocaInst>(I) &&
1588 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
1589 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1593 // Transfer all of the allocas over in a block. Using splice means
1594 // that the instructions aren't removed from the symbol table, then
1596 Caller->getEntryBlock().getInstList().splice(
1597 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1599 // Move any dbg.declares describing the allocas into the entry basic block.
1600 DIBuilder DIB(*Caller->getParent());
1601 for (auto &AI : IFI.StaticAllocas)
1602 replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false);
1605 bool InlinedMustTailCalls = false;
1606 if (InlinedFunctionInfo.ContainsCalls) {
1607 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1608 if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1609 CallSiteTailKind = CI->getTailCallKind();
1611 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1613 for (Instruction &I : *BB) {
1614 CallInst *CI = dyn_cast<CallInst>(&I);
1618 // We need to reduce the strength of any inlined tail calls. For
1619 // musttail, we have to avoid introducing potential unbounded stack
1620 // growth. For example, if functions 'f' and 'g' are mutually recursive
1621 // with musttail, we can inline 'g' into 'f' so long as we preserve
1622 // musttail on the cloned call to 'f'. If either the inlined call site
1623 // or the cloned call site is *not* musttail, the program already has
1624 // one frame of stack growth, so it's safe to remove musttail. Here is
1625 // a table of example transformations:
1627 // f -> musttail g -> musttail f ==> f -> musttail f
1628 // f -> musttail g -> tail f ==> f -> tail f
1629 // f -> g -> musttail f ==> f -> f
1630 // f -> g -> tail f ==> f -> f
1631 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1632 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1633 CI->setTailCallKind(ChildTCK);
1634 InlinedMustTailCalls |= CI->isMustTailCall();
1636 // Calls inlined through a 'nounwind' call site should be marked
1639 CI->setDoesNotThrow();
1644 // Leave lifetime markers for the static alloca's, scoping them to the
1645 // function we just inlined.
1646 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1647 IRBuilder<> builder(&FirstNewBlock->front());
1648 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1649 AllocaInst *AI = IFI.StaticAllocas[ai];
1651 // If the alloca is already scoped to something smaller than the whole
1652 // function then there's no need to add redundant, less accurate markers.
1653 if (hasLifetimeMarkers(AI))
1656 // Try to determine the size of the allocation.
1657 ConstantInt *AllocaSize = nullptr;
1658 if (ConstantInt *AIArraySize =
1659 dyn_cast<ConstantInt>(AI->getArraySize())) {
1660 auto &DL = Caller->getParent()->getDataLayout();
1661 Type *AllocaType = AI->getAllocatedType();
1662 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1663 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1665 // Don't add markers for zero-sized allocas.
1666 if (AllocaArraySize == 0)
1669 // Check that array size doesn't saturate uint64_t and doesn't
1670 // overflow when it's multiplied by type size.
1671 if (AllocaArraySize != ~0ULL &&
1672 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1673 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1674 AllocaArraySize * AllocaTypeSize);
1678 builder.CreateLifetimeStart(AI, AllocaSize);
1679 for (ReturnInst *RI : Returns) {
1680 // Don't insert llvm.lifetime.end calls between a musttail call and a
1681 // return. The return kills all local allocas.
1682 if (InlinedMustTailCalls &&
1683 RI->getParent()->getTerminatingMustTailCall())
1685 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1690 // If the inlined code contained dynamic alloca instructions, wrap the inlined
1691 // code with llvm.stacksave/llvm.stackrestore intrinsics.
1692 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1693 Module *M = Caller->getParent();
1694 // Get the two intrinsics we care about.
1695 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1696 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1698 // Insert the llvm.stacksave.
1699 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
1700 .CreateCall(StackSave, {}, "savedstack");
1702 // Insert a call to llvm.stackrestore before any return instructions in the
1703 // inlined function.
1704 for (ReturnInst *RI : Returns) {
1705 // Don't insert llvm.stackrestore calls between a musttail call and a
1706 // return. The return will restore the stack pointer.
1707 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1709 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1713 // If we are inlining for an invoke instruction, we must make sure to rewrite
1714 // any call instructions into invoke instructions. This is sensitive to which
1715 // funclet pads were top-level in the inlinee, so must be done before
1716 // rewriting the "parent pad" links.
1717 if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
1718 BasicBlock *UnwindDest = II->getUnwindDest();
1719 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
1720 if (isa<LandingPadInst>(FirstNonPHI)) {
1721 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1723 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1727 // Update the lexical scopes of the new funclets and callsites.
1728 // Anything that had 'none' as its parent is now nested inside the callsite's
1731 if (CallSiteEHPad) {
1732 for (Function::iterator BB = FirstNewBlock->getIterator(),
1735 // Add bundle operands to any top-level call sites.
1736 SmallVector<OperandBundleDef, 1> OpBundles;
1737 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;) {
1738 Instruction *I = &*BBI++;
1743 // Skip call sites which are nounwind intrinsics.
1745 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1746 if (CalledFn && CalledFn->isIntrinsic() && CS.doesNotThrow())
1749 // Skip call sites which already have a "funclet" bundle.
1750 if (CS.getOperandBundle(LLVMContext::OB_funclet))
1753 CS.getOperandBundlesAsDefs(OpBundles);
1754 OpBundles.emplace_back("funclet", CallSiteEHPad);
1756 Instruction *NewInst;
1758 NewInst = CallInst::Create(cast<CallInst>(I), OpBundles, I);
1760 NewInst = InvokeInst::Create(cast<InvokeInst>(I), OpBundles, I);
1761 NewInst->setDebugLoc(I->getDebugLoc());
1762 NewInst->takeName(I);
1763 I->replaceAllUsesWith(NewInst);
1764 I->eraseFromParent();
1769 Instruction *I = BB->getFirstNonPHI();
1773 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
1774 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
1775 CatchSwitch->setParentPad(CallSiteEHPad);
1777 auto *FPI = cast<FuncletPadInst>(I);
1778 if (isa<ConstantTokenNone>(FPI->getParentPad()))
1779 FPI->setParentPad(CallSiteEHPad);
1784 // Handle any inlined musttail call sites. In order for a new call site to be
1785 // musttail, the source of the clone and the inlined call site must have been
1786 // musttail. Therefore it's safe to return without merging control into the
1788 if (InlinedMustTailCalls) {
1789 // Check if we need to bitcast the result of any musttail calls.
1790 Type *NewRetTy = Caller->getReturnType();
1791 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
1793 // Handle the returns preceded by musttail calls separately.
1794 SmallVector<ReturnInst *, 8> NormalReturns;
1795 for (ReturnInst *RI : Returns) {
1796 CallInst *ReturnedMustTail =
1797 RI->getParent()->getTerminatingMustTailCall();
1798 if (!ReturnedMustTail) {
1799 NormalReturns.push_back(RI);
1805 // Delete the old return and any preceding bitcast.
1806 BasicBlock *CurBB = RI->getParent();
1807 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
1808 RI->eraseFromParent();
1810 OldCast->eraseFromParent();
1812 // Insert a new bitcast and return with the right type.
1813 IRBuilder<> Builder(CurBB);
1814 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
1817 // Leave behind the normal returns so we can merge control flow.
1818 std::swap(Returns, NormalReturns);
1821 // If we cloned in _exactly one_ basic block, and if that block ends in a
1822 // return instruction, we splice the body of the inlined callee directly into
1823 // the calling basic block.
1824 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
1825 // Move all of the instructions right before the call.
1826 OrigBB->getInstList().splice(TheCall->getIterator(),
1827 FirstNewBlock->getInstList(),
1828 FirstNewBlock->begin(), FirstNewBlock->end());
1829 // Remove the cloned basic block.
1830 Caller->getBasicBlockList().pop_back();
1832 // If the call site was an invoke instruction, add a branch to the normal
1834 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1835 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1836 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
1839 // If the return instruction returned a value, replace uses of the call with
1840 // uses of the returned value.
1841 if (!TheCall->use_empty()) {
1842 ReturnInst *R = Returns[0];
1843 if (TheCall == R->getReturnValue())
1844 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1846 TheCall->replaceAllUsesWith(R->getReturnValue());
1848 // Since we are now done with the Call/Invoke, we can delete it.
1849 TheCall->eraseFromParent();
1851 // Since we are now done with the return instruction, delete it also.
1852 Returns[0]->eraseFromParent();
1854 // We are now done with the inlining.
1858 // Otherwise, we have the normal case, of more than one block to inline or
1859 // multiple return sites.
1861 // We want to clone the entire callee function into the hole between the
1862 // "starter" and "ender" blocks. How we accomplish this depends on whether
1863 // this is an invoke instruction or a call instruction.
1864 BasicBlock *AfterCallBB;
1865 BranchInst *CreatedBranchToNormalDest = nullptr;
1866 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1868 // Add an unconditional branch to make this look like the CallInst case...
1869 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
1871 // Split the basic block. This guarantees that no PHI nodes will have to be
1872 // updated due to new incoming edges, and make the invoke case more
1873 // symmetric to the call case.
1875 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
1876 CalledFunc->getName() + ".exit");
1878 } else { // It's a call
1879 // If this is a call instruction, we need to split the basic block that
1880 // the call lives in.
1882 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
1883 CalledFunc->getName() + ".exit");
1886 // Change the branch that used to go to AfterCallBB to branch to the first
1887 // basic block of the inlined function.
1889 TerminatorInst *Br = OrigBB->getTerminator();
1890 assert(Br && Br->getOpcode() == Instruction::Br &&
1891 "splitBasicBlock broken!");
1892 Br->setOperand(0, &*FirstNewBlock);
1894 // Now that the function is correct, make it a little bit nicer. In
1895 // particular, move the basic blocks inserted from the end of the function
1896 // into the space made by splitting the source basic block.
1897 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
1898 Caller->getBasicBlockList(), FirstNewBlock,
1901 // Handle all of the return instructions that we just cloned in, and eliminate
1902 // any users of the original call/invoke instruction.
1903 Type *RTy = CalledFunc->getReturnType();
1905 PHINode *PHI = nullptr;
1906 if (Returns.size() > 1) {
1907 // The PHI node should go at the front of the new basic block to merge all
1908 // possible incoming values.
1909 if (!TheCall->use_empty()) {
1910 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
1911 &AfterCallBB->front());
1912 // Anything that used the result of the function call should now use the
1913 // PHI node as their operand.
1914 TheCall->replaceAllUsesWith(PHI);
1917 // Loop over all of the return instructions adding entries to the PHI node
1920 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1921 ReturnInst *RI = Returns[i];
1922 assert(RI->getReturnValue()->getType() == PHI->getType() &&
1923 "Ret value not consistent in function!");
1924 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
1928 // Add a branch to the merge points and remove return instructions.
1930 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1931 ReturnInst *RI = Returns[i];
1932 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
1933 Loc = RI->getDebugLoc();
1934 BI->setDebugLoc(Loc);
1935 RI->eraseFromParent();
1937 // We need to set the debug location to *somewhere* inside the
1938 // inlined function. The line number may be nonsensical, but the
1939 // instruction will at least be associated with the right
1941 if (CreatedBranchToNormalDest)
1942 CreatedBranchToNormalDest->setDebugLoc(Loc);
1943 } else if (!Returns.empty()) {
1944 // Otherwise, if there is exactly one return value, just replace anything
1945 // using the return value of the call with the computed value.
1946 if (!TheCall->use_empty()) {
1947 if (TheCall == Returns[0]->getReturnValue())
1948 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1950 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
1953 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
1954 BasicBlock *ReturnBB = Returns[0]->getParent();
1955 ReturnBB->replaceAllUsesWith(AfterCallBB);
1957 // Splice the code from the return block into the block that it will return
1958 // to, which contains the code that was after the call.
1959 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
1960 ReturnBB->getInstList());
1962 if (CreatedBranchToNormalDest)
1963 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
1965 // Delete the return instruction now and empty ReturnBB now.
1966 Returns[0]->eraseFromParent();
1967 ReturnBB->eraseFromParent();
1968 } else if (!TheCall->use_empty()) {
1969 // No returns, but something is using the return value of the call. Just
1971 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1974 // Since we are now done with the Call/Invoke, we can delete it.
1975 TheCall->eraseFromParent();
1977 // If we inlined any musttail calls and the original return is now
1978 // unreachable, delete it. It can only contain a bitcast and ret.
1979 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
1980 AfterCallBB->eraseFromParent();
1982 // We should always be able to fold the entry block of the function into the
1983 // single predecessor of the block...
1984 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
1985 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
1987 // Splice the code entry block into calling block, right before the
1988 // unconditional branch.
1989 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
1990 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
1992 // Remove the unconditional branch.
1993 OrigBB->getInstList().erase(Br);
1995 // Now we can remove the CalleeEntry block, which is now empty.
1996 Caller->getBasicBlockList().erase(CalleeEntry);
1998 // If we inserted a phi node, check to see if it has a single value (e.g. all
1999 // the entries are the same or undef). If so, remove the PHI so it doesn't
2000 // block other optimizations.
2002 auto &DL = Caller->getParent()->getDataLayout();
2003 if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr,
2004 &IFI.ACT->getAssumptionCache(*Caller))) {
2005 PHI->replaceAllUsesWith(V);
2006 PHI->eraseFromParent();