//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/Cloning.h"
-#include "llvm/Constants.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/LLVMContext.h"
-#include "llvm/Module.h"
-#include "llvm/Instructions.h"
-#include "llvm/IntrinsicInst.h"
-#include "llvm/Intrinsics.h"
-#include "llvm/Attributes.h"
-#include "llvm/Analysis/CallGraph.h"
-#include "llvm/Analysis/DebugInfo.h"
-#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/CaptureTracking.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Support/CommandLine.h"
+#include <algorithm>
using namespace llvm;
-bool llvm::InlineFunction(CallInst *CI, CallGraph *CG, const TargetData *TD,
- SmallVectorImpl<AllocaInst*> *StaticAllocas) {
- return InlineFunction(CallSite(CI), CG, TD, StaticAllocas);
+static cl::opt<bool>
+EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(false),
+ cl::Hidden,
+ cl::desc("Convert noalias attributes to metadata during inlining."));
+
+bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
+ bool InsertLifetime) {
+ return InlineFunction(CallSite(CI), IFI, InsertLifetime);
+}
+bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
+ bool InsertLifetime) {
+ return InlineFunction(CallSite(II), IFI, InsertLifetime);
+}
+
+namespace {
+ /// A class for recording information about inlining through an invoke.
+ class InvokeInliningInfo {
+ BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
+ BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
+ LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
+ PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
+ SmallVector<Value*, 8> UnwindDestPHIValues;
+
+ public:
+ InvokeInliningInfo(InvokeInst *II)
+ : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
+ CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
+ // If there are PHI nodes in the unwind destination block, we need to keep
+ // track of which values came into them from the invoke before removing
+ // the edge from this block.
+ llvm::BasicBlock *InvokeBB = II->getParent();
+ BasicBlock::iterator I = OuterResumeDest->begin();
+ for (; isa<PHINode>(I); ++I) {
+ // Save the value to use for this edge.
+ PHINode *PHI = cast<PHINode>(I);
+ UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
+ }
+
+ CallerLPad = cast<LandingPadInst>(I);
+ }
+
+ /// getOuterResumeDest - The outer unwind destination is the target of
+ /// unwind edges introduced for calls within the inlined function.
+ BasicBlock *getOuterResumeDest() const {
+ return OuterResumeDest;
+ }
+
+ BasicBlock *getInnerResumeDest();
+
+ LandingPadInst *getLandingPadInst() const { return CallerLPad; }
+
+ /// forwardResume - Forward the 'resume' instruction to the caller's landing
+ /// pad block. When the landing pad block has only one predecessor, this is
+ /// a simple branch. When there is more than one predecessor, we need to
+ /// split the landing pad block after the landingpad instruction and jump
+ /// to there.
+ void forwardResume(ResumeInst *RI,
+ SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
+
+ /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind
+ /// destination block for the given basic block, using the values for the
+ /// original invoke's source block.
+ void addIncomingPHIValuesFor(BasicBlock *BB) const {
+ addIncomingPHIValuesForInto(BB, OuterResumeDest);
+ }
+
+ void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
+ BasicBlock::iterator I = dest->begin();
+ for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
+ PHINode *phi = cast<PHINode>(I);
+ phi->addIncoming(UnwindDestPHIValues[i], src);
+ }
+ }
+ };
}
-bool llvm::InlineFunction(InvokeInst *II, CallGraph *CG, const TargetData *TD,
- SmallVectorImpl<AllocaInst*> *StaticAllocas) {
- return InlineFunction(CallSite(II), CG, TD, StaticAllocas);
+
+/// getInnerResumeDest - Get or create a target for the branch from ResumeInsts.
+BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
+ if (InnerResumeDest) return InnerResumeDest;
+
+ // Split the landing pad.
+ BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
+ InnerResumeDest =
+ OuterResumeDest->splitBasicBlock(SplitPoint,
+ OuterResumeDest->getName() + ".body");
+
+ // The number of incoming edges we expect to the inner landing pad.
+ const unsigned PHICapacity = 2;
+
+ // Create corresponding new PHIs for all the PHIs in the outer landing pad.
+ BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
+ BasicBlock::iterator I = OuterResumeDest->begin();
+ for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
+ PHINode *OuterPHI = cast<PHINode>(I);
+ PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
+ OuterPHI->getName() + ".lpad-body",
+ InsertPoint);
+ OuterPHI->replaceAllUsesWith(InnerPHI);
+ InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
+ }
+
+ // Create a PHI for the exception values.
+ InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
+ "eh.lpad-body", InsertPoint);
+ CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
+ InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
+
+ // All done.
+ return InnerResumeDest;
}
+/// forwardResume - Forward the 'resume' instruction to the caller's landing pad
+/// block. When the landing pad block has only one predecessor, this is a simple
+/// branch. When there is more than one predecessor, we need to split the
+/// landing pad block after the landingpad instruction and jump to there.
+void InvokeInliningInfo::forwardResume(ResumeInst *RI,
+ SmallPtrSetImpl<LandingPadInst*> &InlinedLPads) {
+ BasicBlock *Dest = getInnerResumeDest();
+ BasicBlock *Src = RI->getParent();
+
+ BranchInst::Create(Dest, Src);
+
+ // Update the PHIs in the destination. They were inserted in an order which
+ // makes this work.
+ addIncomingPHIValuesForInto(Src, Dest);
+
+ InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
+ RI->eraseFromParent();
+}
/// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
/// an invoke, we have to turn all of the calls that can throw into
/// invokes. This function analyze BB to see if there are any calls, and if so,
/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
/// nodes in that block with the values specified in InvokeDestPHIValues.
-///
static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
- BasicBlock *InvokeDest,
- const SmallVectorImpl<Value*> &InvokeDestPHIValues) {
+ InvokeInliningInfo &Invoke) {
for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
Instruction *I = BBI++;
-
+
// We only need to check for function calls: inlined invoke
// instructions require no special handling.
CallInst *CI = dyn_cast<CallInst>(I);
- if (CI == 0) continue;
-
+
// If this call cannot unwind, don't convert it to an invoke.
- if (CI->doesNotThrow())
+ // Inline asm calls cannot throw.
+ if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
continue;
-
- // Convert this function call into an invoke instruction.
- // First, split the basic block.
+
+ // Convert this function call into an invoke instruction. First, split the
+ // basic block.
BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
-
- // Next, create the new invoke instruction, inserting it at the end
- // of the old basic block.
- SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
- InvokeInst *II =
- InvokeInst::Create(CI->getCalledValue(), Split, InvokeDest,
- InvokeArgs.begin(), InvokeArgs.end(),
- CI->getName(), BB->getTerminator());
+
+ // Delete the unconditional branch inserted by splitBasicBlock
+ BB->getInstList().pop_back();
+
+ // Create the new invoke instruction.
+ ImmutableCallSite CS(CI);
+ SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
+ InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
+ Invoke.getOuterResumeDest(),
+ InvokeArgs, CI->getName(), BB);
+ II->setDebugLoc(CI->getDebugLoc());
II->setCallingConv(CI->getCallingConv());
II->setAttributes(CI->getAttributes());
// Make sure that anything using the call now uses the invoke! This also
- // updates the CallGraph if present.
+ // updates the CallGraph if present, because it uses a WeakVH.
CI->replaceAllUsesWith(II);
-
- // Delete the unconditional branch inserted by splitBasicBlock
- BB->getInstList().pop_back();
- Split->getInstList().pop_front(); // Delete the original call
-
- // Update any PHI nodes in the exceptional block to indicate that
- // there is now a new entry in them.
- unsigned i = 0;
- for (BasicBlock::iterator I = InvokeDest->begin();
- isa<PHINode>(I); ++I, ++i)
- cast<PHINode>(I)->addIncoming(InvokeDestPHIValues[i], BB);
-
- // This basic block is now complete, the caller will continue scanning the
- // next one.
+
+ // Delete the original call
+ Split->getInstList().pop_front();
+
+ // Update any PHI nodes in the exceptional block to indicate that there is
+ // now a new entry in them.
+ Invoke.addIncomingPHIValuesFor(BB);
return;
}
}
-
/// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
-/// in the body of the inlined function into invokes and turn unwind
-/// instructions into branches to the invoke unwind dest.
+/// in the body of the inlined function into invokes.
///
/// II is the invoke instruction being inlined. FirstNewBlock is the first
/// block of the inlined code (the last block is the end of the function),
static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
ClonedCodeInfo &InlinedCodeInfo) {
BasicBlock *InvokeDest = II->getUnwindDest();
- SmallVector<Value*, 8> InvokeDestPHIValues;
-
- // If there are PHI nodes in the unwind destination block, we need to
- // keep track of which values came into them from this invoke, then remove
- // the entry for this block.
- BasicBlock *InvokeBlock = II->getParent();
- for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) {
- PHINode *PN = cast<PHINode>(I);
- // Save the value to use for this edge.
- InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock));
- }
Function *Caller = FirstNewBlock->getParent();
// The inlined code is currently at the end of the function, scan from the
// start of the inlined code to its end, checking for stuff we need to
- // rewrite. If the code doesn't have calls or unwinds, we know there is
- // nothing to rewrite.
- if (!InlinedCodeInfo.ContainsCalls && !InlinedCodeInfo.ContainsUnwinds) {
- // Now that everything is happy, we have one final detail. The PHI nodes in
- // the exception destination block still have entries due to the original
- // invoke instruction. Eliminate these entries (which might even delete the
- // PHI node) now.
- InvokeDest->removePredecessor(II->getParent());
- return;
+ // rewrite.
+ InvokeInliningInfo Invoke(II);
+
+ // Get all of the inlined landing pad instructions.
+ SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
+ for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I)
+ if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
+ InlinedLPads.insert(II->getLandingPadInst());
+
+ // Append the clauses from the outer landing pad instruction into the inlined
+ // landing pad instructions.
+ LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
+ for (SmallPtrSet<LandingPadInst*, 16>::iterator I = InlinedLPads.begin(),
+ E = InlinedLPads.end(); I != E; ++I) {
+ LandingPadInst *InlinedLPad = *I;
+ unsigned OuterNum = OuterLPad->getNumClauses();
+ InlinedLPad->reserveClauses(OuterNum);
+ for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
+ InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
+ if (OuterLPad->isCleanup())
+ InlinedLPad->setCleanup(true);
}
-
+
for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
if (InlinedCodeInfo.ContainsCalls)
- HandleCallsInBlockInlinedThroughInvoke(BB, InvokeDest,
- InvokeDestPHIValues);
-
- if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
- // An UnwindInst requires special handling when it gets inlined into an
- // invoke site. Once this happens, we know that the unwind would cause
- // a control transfer to the invoke exception destination, so we can
- // transform it into a direct branch to the exception destination.
- BranchInst::Create(InvokeDest, UI);
-
- // Delete the unwind instruction!
- UI->eraseFromParent();
-
- // Update any PHI nodes in the exceptional block to indicate that
- // there is now a new entry in them.
- unsigned i = 0;
- for (BasicBlock::iterator I = InvokeDest->begin();
- isa<PHINode>(I); ++I, ++i) {
- PHINode *PN = cast<PHINode>(I);
- PN->addIncoming(InvokeDestPHIValues[i], BB);
- }
- }
+ HandleCallsInBlockInlinedThroughInvoke(BB, Invoke);
+
+ // Forward any resumes that are remaining here.
+ if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
+ Invoke.forwardResume(RI, InlinedLPads);
}
// Now that everything is happy, we have one final detail. The PHI nodes in
// the exception destination block still have entries due to the original
- // invoke instruction. Eliminate these entries (which might even delete the
+ // invoke instruction. Eliminate these entries (which might even delete the
// PHI node) now.
InvokeDest->removePredecessor(II->getParent());
}
+/// CloneAliasScopeMetadata - When inlining a function that contains noalias
+/// scope metadata, this metadata needs to be cloned so that the inlined blocks
+/// have different "unqiue scopes" at every call site. Were this not done, then
+/// aliasing scopes from a function inlined into a caller multiple times could
+/// not be differentiated (and this would lead to miscompiles because the
+/// non-aliasing property communicated by the metadata could have
+/// call-site-specific control dependencies).
+static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
+ const Function *CalledFunc = CS.getCalledFunction();
+ SetVector<const MDNode *> MD;
+
+ // Note: We could only clone the metadata if it is already used in the
+ // caller. I'm omitting that check here because it might confuse
+ // inter-procedural alias analysis passes. We can revisit this if it becomes
+ // an efficiency or overhead problem.
+
+ for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end();
+ I != IE; ++I)
+ for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
+ if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope))
+ MD.insert(M);
+ if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias))
+ MD.insert(M);
+ }
+
+ if (MD.empty())
+ return;
+
+ // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
+ // the set.
+ SmallVector<const Value *, 16> Queue(MD.begin(), MD.end());
+ while (!Queue.empty()) {
+ const MDNode *M = cast<MDNode>(Queue.pop_back_val());
+ for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
+ if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
+ if (MD.insert(M1))
+ Queue.push_back(M1);
+ }
+
+ // Now we have a complete set of all metadata in the chains used to specify
+ // the noalias scopes and the lists of those scopes.
+ SmallVector<MDNode *, 16> DummyNodes;
+ DenseMap<const MDNode *, TrackingVH<MDNode> > MDMap;
+ for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
+ I != IE; ++I) {
+ MDNode *Dummy = MDNode::getTemporary(CalledFunc->getContext(),
+ ArrayRef<Value*>());
+ DummyNodes.push_back(Dummy);
+ MDMap[*I] = Dummy;
+ }
+
+ // Create new metadata nodes to replace the dummy nodes, replacing old
+ // metadata references with either a dummy node or an already-created new
+ // node.
+ for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
+ I != IE; ++I) {
+ SmallVector<Value *, 4> NewOps;
+ for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) {
+ const Value *V = (*I)->getOperand(i);
+ if (const MDNode *M = dyn_cast<MDNode>(V))
+ NewOps.push_back(MDMap[M]);
+ else
+ NewOps.push_back(const_cast<Value *>(V));
+ }
+
+ MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps),
+ *TempM = MDMap[*I];
+
+ TempM->replaceAllUsesWith(NewM);
+ }
+
+ // Now replace the metadata in the new inlined instructions with the
+ // repacements from the map.
+ for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
+ VMI != VMIE; ++VMI) {
+ if (!VMI->second)
+ continue;
+
+ Instruction *NI = dyn_cast<Instruction>(VMI->second);
+ if (!NI)
+ continue;
+
+ if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
+ MDNode *NewMD = MDMap[M];
+ // If the call site also had alias scope metadata (a list of scopes to
+ // which instructions inside it might belong), propagate those scopes to
+ // the inlined instructions.
+ if (MDNode *CSM =
+ CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
+ NewMD = MDNode::concatenate(NewMD, CSM);
+ NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
+ } else if (NI->mayReadOrWriteMemory()) {
+ if (MDNode *M =
+ CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
+ NI->setMetadata(LLVMContext::MD_alias_scope, M);
+ }
+
+ if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
+ MDNode *NewMD = MDMap[M];
+ // If the call site also had noalias metadata (a list of scopes with
+ // which instructions inside it don't alias), propagate those scopes to
+ // the inlined instructions.
+ if (MDNode *CSM =
+ CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
+ NewMD = MDNode::concatenate(NewMD, CSM);
+ NI->setMetadata(LLVMContext::MD_noalias, NewMD);
+ } else if (NI->mayReadOrWriteMemory()) {
+ if (MDNode *M =
+ CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
+ NI->setMetadata(LLVMContext::MD_noalias, M);
+ }
+ }
+
+ // Now that everything has been replaced, delete the dummy nodes.
+ for (unsigned i = 0, ie = DummyNodes.size(); i != ie; ++i)
+ MDNode::deleteTemporary(DummyNodes[i]);
+}
+
+/// AddAliasScopeMetadata - If the inlined function has noalias arguments, then
+/// add new alias scopes for each noalias argument, tag the mapped noalias
+/// parameters with noalias metadata specifying the new scope, and tag all
+/// non-derived loads, stores and memory intrinsics with the new alias scopes.
+static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
+ const DataLayout *DL) {
+ if (!EnableNoAliasConversion)
+ return;
+
+ const Function *CalledFunc = CS.getCalledFunction();
+ SmallVector<const Argument *, 4> NoAliasArgs;
+
+ for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
+ E = CalledFunc->arg_end(); I != E; ++I) {
+ if (I->hasNoAliasAttr() && !I->hasNUses(0))
+ NoAliasArgs.push_back(I);
+ }
+
+ if (NoAliasArgs.empty())
+ return;
+
+ // To do a good job, if a noalias variable is captured, we need to know if
+ // the capture point dominates the particular use we're considering.
+ DominatorTree DT;
+ DT.recalculate(const_cast<Function&>(*CalledFunc));
+
+ // noalias indicates that pointer values based on the argument do not alias
+ // pointer values which are not based on it. So we add a new "scope" for each
+ // noalias function argument. Accesses using pointers based on that argument
+ // become part of that alias scope, accesses using pointers not based on that
+ // argument are tagged as noalias with that scope.
+
+ DenseMap<const Argument *, MDNode *> NewScopes;
+ MDBuilder MDB(CalledFunc->getContext());
+
+ // Create a new scope domain for this function.
+ MDNode *NewDomain =
+ MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
+ for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
+ const Argument *A = NoAliasArgs[i];
+
+ std::string Name = CalledFunc->getName();
+ if (A->hasName()) {
+ Name += ": %";
+ Name += A->getName();
+ } else {
+ Name += ": argument ";
+ Name += utostr(i);
+ }
+
+ // Note: We always create a new anonymous root here. This is true regardless
+ // of the linkage of the callee because the aliasing "scope" is not just a
+ // property of the callee, but also all control dependencies in the caller.
+ MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
+ NewScopes.insert(std::make_pair(A, NewScope));
+ }
+
+ // Iterate over all new instructions in the map; for all memory-access
+ // instructions, add the alias scope metadata.
+ for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
+ VMI != VMIE; ++VMI) {
+ if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
+ if (!VMI->second)
+ continue;
+
+ Instruction *NI = dyn_cast<Instruction>(VMI->second);
+ if (!NI)
+ continue;
+
+ SmallVector<const Value *, 2> PtrArgs;
+
+ if (const LoadInst *LI = dyn_cast<LoadInst>(I))
+ PtrArgs.push_back(LI->getPointerOperand());
+ else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
+ PtrArgs.push_back(SI->getPointerOperand());
+ else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
+ PtrArgs.push_back(VAAI->getPointerOperand());
+ else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
+ PtrArgs.push_back(CXI->getPointerOperand());
+ else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
+ PtrArgs.push_back(RMWI->getPointerOperand());
+ else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
+ // If we know that the call does not access memory, then we'll still
+ // know that about the inlined clone of this call site, and we don't
+ // need to add metadata.
+ if (ICS.doesNotAccessMemory())
+ continue;
+
+ for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(),
+ AE = ICS.arg_end(); AI != AE; ++AI)
+ // We need to check the underlying objects of all arguments, not just
+ // the pointer arguments, because we might be passing pointers as
+ // integers, etc.
+ // FIXME: If we know that the call only accesses pointer arguments,
+ // then we only need to check the pointer arguments.
+ PtrArgs.push_back(*AI);
+ }
+
+ // If we found no pointers, then this instruction is not suitable for
+ // pairing with an instruction to receive aliasing metadata.
+ // However, if this is a call, this we might just alias with none of the
+ // noalias arguments.
+ if (PtrArgs.empty() && !isa<CallInst>(I) && !isa<InvokeInst>(I))
+ continue;
+
+ // It is possible that there is only one underlying object, but you
+ // need to go through several PHIs to see it, and thus could be
+ // repeated in the Objects list.
+ SmallPtrSet<const Value *, 4> ObjSet;
+ SmallVector<Value *, 4> Scopes, NoAliases;
+
+ SmallSetVector<const Argument *, 4> NAPtrArgs;
+ for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) {
+ SmallVector<Value *, 4> Objects;
+ GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]),
+ Objects, DL, /* MaxLookup = */ 0);
+
+ for (Value *O : Objects)
+ ObjSet.insert(O);
+ }
+
+ // Figure out if we're derived from anyhing that is not a noalias
+ // argument.
+ bool CanDeriveViaCapture = false;
+ for (const Value *V : ObjSet)
+ if (!isIdentifiedFunctionLocal(const_cast<Value*>(V))) {
+ CanDeriveViaCapture = true;
+ break;
+ }
+
+ // First, we want to figure out all of the sets with which we definitely
+ // don't alias. Iterate over all noalias set, and add those for which:
+ // 1. The noalias argument is not in the set of objects from which we
+ // definitely derive.
+ // 2. The noalias argument has not yet been captured.
+ for (const Argument *A : NoAliasArgs) {
+ if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
+ A->hasNoCaptureAttr() ||
+ !PointerMayBeCapturedBefore(A,
+ /* ReturnCaptures */ false,
+ /* StoreCaptures */ false, I, &DT)))
+ NoAliases.push_back(NewScopes[A]);
+ }
+
+ if (!NoAliases.empty())
+ NI->setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
+ NI->getMetadata(LLVMContext::MD_noalias),
+ MDNode::get(CalledFunc->getContext(), NoAliases)));
+ // Next, we want to figure out all of the sets to which we might belong.
+ // We might below to a set if:
+ // 1. The noalias argument is in the set of underlying objects
+ // or
+ // 2. There is some non-noalias argument in our list and the no-alias
+ // argument has been captured.
+
+ for (const Argument *A : NoAliasArgs) {
+ if (ObjSet.count(A) || (CanDeriveViaCapture &&
+ PointerMayBeCapturedBefore(A,
+ /* ReturnCaptures */ false,
+ /* StoreCaptures */ false,
+ I, &DT)))
+ Scopes.push_back(NewScopes[A]);
+ }
+
+ if (!Scopes.empty())
+ NI->setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
+ NI->getMetadata(LLVMContext::MD_alias_scope),
+ MDNode::get(CalledFunc->getContext(), Scopes)));
+ }
+ }
+}
+
/// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
/// into the caller, update the specified callgraph to reflect the changes we
/// made. Note that it's possible that not all code was copied over, so only
/// some edges of the callgraph may remain.
static void UpdateCallGraphAfterInlining(CallSite CS,
Function::iterator FirstNewBlock,
- DenseMap<const Value*, Value*> &ValueMap,
- CallGraph &CG) {
+ ValueToValueMapTy &VMap,
+ InlineFunctionInfo &IFI) {
+ CallGraph &CG = *IFI.CG;
const Function *Caller = CS.getInstruction()->getParent()->getParent();
const Function *Callee = CS.getCalledFunction();
CallGraphNode *CalleeNode = CG[Callee];
for (; I != E; ++I) {
const Value *OrigCall = I->first;
- DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall);
+ ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
// Only copy the edge if the call was inlined!
- if (VMI == ValueMap.end() || VMI->second == 0)
+ if (VMI == VMap.end() || VMI->second == nullptr)
continue;
// If the call was inlined, but then constant folded, there is no edge to
// add. Check for this case.
- if (Instruction *NewCall = dyn_cast<Instruction>(VMI->second))
- CallerNode->addCalledFunction(CallSite::get(NewCall), I->second);
+ Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
+ if (!NewCall) continue;
+
+ // Remember that this call site got inlined for the client of
+ // InlineFunction.
+ IFI.InlinedCalls.push_back(NewCall);
+
+ // It's possible that inlining the callsite will cause it to go from an
+ // indirect to a direct call by resolving a function pointer. If this
+ // happens, set the callee of the new call site to a more precise
+ // destination. This can also happen if the call graph node of the caller
+ // was just unnecessarily imprecise.
+ if (!I->second->getFunction())
+ if (Function *F = CallSite(NewCall).getCalledFunction()) {
+ // Indirect call site resolved to direct call.
+ CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
+
+ continue;
+ }
+
+ CallerNode->addCalledFunction(CallSite(NewCall), I->second);
}
// Update the call graph by deleting the edge from Callee to Caller. We must
CallerNode->removeCallEdgeFor(CS);
}
-// InlineFunction - This function inlines the called function into the basic
-// block of the caller. This returns false if it is not possible to inline this
-// call. The program is still in a well defined state if this occurs though.
-//
-// Note that this only does one level of inlining. For example, if the
-// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
-// exists in the instruction stream. Similiarly this will inline a recursive
-// function by one level.
-//
-bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
- SmallVectorImpl<AllocaInst*> *StaticAllocas) {
+static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
+ BasicBlock *InsertBlock,
+ InlineFunctionInfo &IFI) {
+ LLVMContext &Context = Src->getContext();
+ Type *VoidPtrTy = Type::getInt8PtrTy(Context);
+ Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
+ Type *Tys[3] = { VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context) };
+ Function *MemCpyFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys);
+ IRBuilder<> builder(InsertBlock->begin());
+ Value *DstCast = builder.CreateBitCast(Dst, VoidPtrTy, "tmp");
+ Value *SrcCast = builder.CreateBitCast(Src, VoidPtrTy, "tmp");
+
+ Value *Size;
+ if (IFI.DL == nullptr)
+ Size = ConstantExpr::getSizeOf(AggTy);
+ else
+ Size = ConstantInt::get(Type::getInt64Ty(Context),
+ IFI.DL->getTypeStoreSize(AggTy));
+
+ // Always generate a memcpy of alignment 1 here because we don't know
+ // the alignment of the src pointer. Other optimizations can infer
+ // better alignment.
+ Value *CallArgs[] = {
+ DstCast, SrcCast, Size,
+ ConstantInt::get(Type::getInt32Ty(Context), 1),
+ ConstantInt::getFalse(Context) // isVolatile
+ };
+ builder.CreateCall(MemCpyFn, CallArgs);
+}
+
+/// HandleByValArgument - When inlining a call site that has a byval argument,
+/// we have to make the implicit memcpy explicit by adding it.
+static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
+ const Function *CalledFunc,
+ InlineFunctionInfo &IFI,
+ unsigned ByValAlignment) {
+ PointerType *ArgTy = cast<PointerType>(Arg->getType());
+ Type *AggTy = ArgTy->getElementType();
+
+ // If the called function is readonly, then it could not mutate the caller's
+ // copy of the byval'd memory. In this case, it is safe to elide the copy and
+ // temporary.
+ if (CalledFunc->onlyReadsMemory()) {
+ // If the byval argument has a specified alignment that is greater than the
+ // passed in pointer, then we either have to round up the input pointer or
+ // give up on this transformation.
+ if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
+ return Arg;
+
+ // If the pointer is already known to be sufficiently aligned, or if we can
+ // round it up to a larger alignment, then we don't need a temporary.
+ if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
+ IFI.DL) >= ByValAlignment)
+ return Arg;
+
+ // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
+ // for code quality, but rarely happens and is required for correctness.
+ }
+
+ // Create the alloca. If we have DataLayout, use nice alignment.
+ unsigned Align = 1;
+ if (IFI.DL)
+ Align = IFI.DL->getPrefTypeAlignment(AggTy);
+
+ // If the byval had an alignment specified, we *must* use at least that
+ // alignment, as it is required by the byval argument (and uses of the
+ // pointer inside the callee).
+ Align = std::max(Align, ByValAlignment);
+
+ Function *Caller = TheCall->getParent()->getParent();
+
+ Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
+ &*Caller->begin()->begin());
+ IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
+
+ // Uses of the argument in the function should use our new alloca
+ // instead.
+ return NewAlloca;
+}
+
+// isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
+// intrinsic.
+static bool isUsedByLifetimeMarker(Value *V) {
+ for (User *U : V->users()) {
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
+ switch (II->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+// hasLifetimeMarkers - Check whether the given alloca already has
+// lifetime.start or lifetime.end intrinsics.
+static bool hasLifetimeMarkers(AllocaInst *AI) {
+ Type *Ty = AI->getType();
+ Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
+ Ty->getPointerAddressSpace());
+ if (Ty == Int8PtrTy)
+ return isUsedByLifetimeMarker(AI);
+
+ // Do a scan to find all the casts to i8*.
+ for (User *U : AI->users()) {
+ if (U->getType() != Int8PtrTy) continue;
+ if (U->stripPointerCasts() != AI) continue;
+ if (isUsedByLifetimeMarker(U))
+ return true;
+ }
+ return false;
+}
+
+/// updateInlinedAtInfo - Helper function used by fixupLineNumbers to
+/// recursively update InlinedAtEntry of a DebugLoc.
+static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
+ const DebugLoc &InlinedAtDL,
+ LLVMContext &Ctx) {
+ if (MDNode *IA = DL.getInlinedAt(Ctx)) {
+ DebugLoc NewInlinedAtDL
+ = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx);
+ return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
+ NewInlinedAtDL.getAsMDNode(Ctx));
+ }
+
+ return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
+ InlinedAtDL.getAsMDNode(Ctx));
+}
+
+/// fixupLineNumbers - Update inlined instructions' line numbers to
+/// to encode location where these instructions are inlined.
+static void fixupLineNumbers(Function *Fn, Function::iterator FI,
+ Instruction *TheCall) {
+ DebugLoc TheCallDL = TheCall->getDebugLoc();
+ if (TheCallDL.isUnknown())
+ return;
+
+ for (; FI != Fn->end(); ++FI) {
+ for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
+ BI != BE; ++BI) {
+ DebugLoc DL = BI->getDebugLoc();
+ if (DL.isUnknown()) {
+ // If the inlined instruction has no line number, make it look as if it
+ // originates from the call location. This is important for
+ // ((__always_inline__, __nodebug__)) functions which must use caller
+ // location for all instructions in their function body.
+ BI->setDebugLoc(TheCallDL);
+ } else {
+ BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
+ if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
+ LLVMContext &Ctx = BI->getContext();
+ MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx);
+ DVI->setOperand(2, createInlinedVariable(DVI->getVariable(),
+ InlinedAt, Ctx));
+ }
+ }
+ }
+ }
+}
+
+/// InlineFunction - This function inlines the called function into the basic
+/// block of the caller. This returns false if it is not possible to inline
+/// this call. The program is still in a well defined state if this occurs
+/// though.
+///
+/// Note that this only does one level of inlining. For example, if the
+/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
+/// exists in the instruction stream. Similarly this will inline a recursive
+/// function by one level.
+bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
+ bool InsertLifetime) {
Instruction *TheCall = CS.getInstruction();
- LLVMContext &Context = TheCall->getContext();
assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
"Instruction not in function!");
+ // If IFI has any state in it, zap it before we fill it in.
+ IFI.reset();
+
const Function *CalledFunc = CS.getCalledFunction();
- if (CalledFunc == 0 || // Can't inline external function or indirect
+ if (!CalledFunc || // Can't inline external function or indirect
CalledFunc->isDeclaration() || // call, or call to a vararg function!
CalledFunc->getFunctionType()->isVarArg()) return false;
-
- // If the call to the callee is not a tail call, we must clear the 'tail'
- // flags on any calls that we inline.
- bool MustClearTailCallFlags =
- !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());
-
// If the call to the callee cannot throw, set the 'nounwind' flag on any
// calls that we inline.
bool MarkNoUnwind = CS.doesNotThrow();
return false;
}
+ // Get the personality function from the callee if it contains a landing pad.
+ Value *CalleePersonality = nullptr;
+ for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
+ I != E; ++I)
+ if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
+ const BasicBlock *BB = II->getUnwindDest();
+ const LandingPadInst *LP = BB->getLandingPadInst();
+ CalleePersonality = LP->getPersonalityFn();
+ break;
+ }
+
+ // Find the personality function used by the landing pads of the caller. If it
+ // exists, then check to see that it matches the personality function used in
+ // the callee.
+ if (CalleePersonality) {
+ for (Function::const_iterator I = Caller->begin(), E = Caller->end();
+ I != E; ++I)
+ if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
+ const BasicBlock *BB = II->getUnwindDest();
+ const LandingPadInst *LP = BB->getLandingPadInst();
+
+ // If the personality functions match, then we can perform the
+ // inlining. Otherwise, we can't inline.
+ // TODO: This isn't 100% true. Some personality functions are proper
+ // supersets of others and can be used in place of the other.
+ if (LP->getPersonalityFn() != CalleePersonality)
+ return false;
+
+ break;
+ }
+ }
+
// Get an iterator to the last basic block in the function, which will have
// the new function inlined after it.
- //
Function::iterator LastBlock = &Caller->back();
// Make sure to capture all of the return instructions from the cloned
ClonedCodeInfo InlinedFunctionInfo;
Function::iterator FirstNewBlock;
- { // Scope to destroy ValueMap after cloning.
- DenseMap<const Value*, Value*> ValueMap;
+ { // Scope to destroy VMap after cloning.
+ ValueToValueMapTy VMap;
+ // Keep a list of pair (dst, src) to emit byval initializations.
+ SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
assert(CalledFunc->arg_size() == CS.arg_size() &&
"No varargs calls can be inlined!");
// by them explicit. However, we don't do this if the callee is readonly
// or readnone, because the copy would be unneeded: the callee doesn't
// modify the struct.
- if (CalledFunc->paramHasAttr(ArgNo+1, Attribute::ByVal) &&
- !CalledFunc->onlyReadsMemory()) {
- const Type *AggTy = cast<PointerType>(I->getType())->getElementType();
- const Type *VoidPtrTy =
- Type::getInt8PtrTy(Context);
-
- // Create the alloca. If we have TargetData, use nice alignment.
- unsigned Align = 1;
- if (TD) Align = TD->getPrefTypeAlignment(AggTy);
- Value *NewAlloca = new AllocaInst(AggTy, 0, Align,
- I->getName(),
- &*Caller->begin()->begin());
- // Emit a memcpy.
- const Type *Tys[] = { Type::getInt64Ty(Context) };
- Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
- Intrinsic::memcpy,
- Tys, 1);
- Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
- Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
-
- Value *Size;
- if (TD == 0)
- Size = ConstantExpr::getSizeOf(AggTy);
- else
- Size = ConstantInt::get(Type::getInt64Ty(Context),
- TD->getTypeStoreSize(AggTy));
-
- // Always generate a memcpy of alignment 1 here because we don't know
- // the alignment of the src pointer. Other optimizations can infer
- // better alignment.
- Value *CallArgs[] = {
- DestCast, SrcCast, Size,
- ConstantInt::get(Type::getInt32Ty(Context), 1)
- };
- CallInst *TheMemCpy =
- CallInst::Create(MemCpyFn, CallArgs, CallArgs+4, "", TheCall);
-
- // If we have a call graph, update it.
- if (CG) {
- CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
- CallGraphNode *CallerNode = (*CG)[Caller];
- CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
- }
-
- // Uses of the argument in the function should use our new alloca
- // instead.
- ActualArg = NewAlloca;
+ if (CS.isByValArgument(ArgNo)) {
+ ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
+ CalledFunc->getParamAlignment(ArgNo+1));
+ if (ActualArg != *AI)
+ ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
}
- ValueMap[I] = ActualArg;
+ VMap[I] = ActualArg;
}
// We want the inliner to prune the code as it copies. We would LOVE to
// have no dead or constant instructions leftover after inlining occurs
// (which can happen, e.g., because an argument was constant), but we'll be
// happy with whatever the cloner can do.
- CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i",
- &InlinedFunctionInfo, TD, TheCall);
+ CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
+ /*ModuleLevelChanges=*/false, Returns, ".i",
+ &InlinedFunctionInfo, IFI.DL, TheCall);
// Remember the first block that is newly cloned over.
FirstNewBlock = LastBlock; ++FirstNewBlock;
+ // Inject byval arguments initialization.
+ for (std::pair<Value*, Value*> &Init : ByValInit)
+ HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
+ FirstNewBlock, IFI);
+
// Update the callgraph if requested.
- if (CG)
- UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, *CG);
+ if (IFI.CG)
+ UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
+
+ // Update inlined instructions' line number information.
+ fixupLineNumbers(Caller, FirstNewBlock, TheCall);
+
+ // Clone existing noalias metadata if necessary.
+ CloneAliasScopeMetadata(CS, VMap);
+
+ // Add noalias metadata if necessary.
+ AddAliasScopeMetadata(CS, VMap, IFI.DL);
}
// If there are any alloca instructions in the block that used to be the entry
// block for the callee, move them to the entry block of the caller. First
// calculate which instruction they should be inserted before. We insert the
// instructions at the end of the current alloca list.
- //
{
BasicBlock::iterator InsertPoint = Caller->begin()->begin();
for (BasicBlock::iterator I = FirstNewBlock->begin(),
E = FirstNewBlock->end(); I != E; ) {
AllocaInst *AI = dyn_cast<AllocaInst>(I++);
- if (AI == 0) continue;
+ if (!AI) continue;
// If the alloca is now dead, remove it. This often occurs due to code
// specialization.
if (!isa<Constant>(AI->getArraySize()))
continue;
- // Keep track of the static allocas that we inline into the caller if the
- // StaticAllocas pointer is non-null.
- if (StaticAllocas) StaticAllocas->push_back(AI);
+ // Keep track of the static allocas that we inline into the caller.
+ IFI.StaticAllocas.push_back(AI);
// Scan for the block of allocas that we can move over, and move them
// all at once.
while (isa<AllocaInst>(I) &&
isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
- if (StaticAllocas) StaticAllocas->push_back(cast<AllocaInst>(I));
+ IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
++I;
}
}
}
+ bool InlinedMustTailCalls = false;
+ if (InlinedFunctionInfo.ContainsCalls) {
+ CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
+ if (CallInst *CI = dyn_cast<CallInst>(TheCall))
+ CallSiteTailKind = CI->getTailCallKind();
+
+ for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
+ ++BB) {
+ for (Instruction &I : *BB) {
+ CallInst *CI = dyn_cast<CallInst>(&I);
+ if (!CI)
+ continue;
+
+ // We need to reduce the strength of any inlined tail calls. For
+ // musttail, we have to avoid introducing potential unbounded stack
+ // growth. For example, if functions 'f' and 'g' are mutually recursive
+ // with musttail, we can inline 'g' into 'f' so long as we preserve
+ // musttail on the cloned call to 'f'. If either the inlined call site
+ // or the cloned call site is *not* musttail, the program already has
+ // one frame of stack growth, so it's safe to remove musttail. Here is
+ // a table of example transformations:
+ //
+ // f -> musttail g -> musttail f ==> f -> musttail f
+ // f -> musttail g -> tail f ==> f -> tail f
+ // f -> g -> musttail f ==> f -> f
+ // f -> g -> tail f ==> f -> f
+ CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
+ ChildTCK = std::min(CallSiteTailKind, ChildTCK);
+ CI->setTailCallKind(ChildTCK);
+ InlinedMustTailCalls |= CI->isMustTailCall();
+
+ // Calls inlined through a 'nounwind' call site should be marked
+ // 'nounwind'.
+ if (MarkNoUnwind)
+ CI->setDoesNotThrow();
+ }
+ }
+ }
+
+ // Leave lifetime markers for the static alloca's, scoping them to the
+ // function we just inlined.
+ if (InsertLifetime && !IFI.StaticAllocas.empty()) {
+ IRBuilder<> builder(FirstNewBlock->begin());
+ for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
+ AllocaInst *AI = IFI.StaticAllocas[ai];
+
+ // If the alloca is already scoped to something smaller than the whole
+ // function then there's no need to add redundant, less accurate markers.
+ if (hasLifetimeMarkers(AI))
+ continue;
+
+ // Try to determine the size of the allocation.
+ ConstantInt *AllocaSize = nullptr;
+ if (ConstantInt *AIArraySize =
+ dyn_cast<ConstantInt>(AI->getArraySize())) {
+ if (IFI.DL) {
+ Type *AllocaType = AI->getAllocatedType();
+ uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
+ uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
+ assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
+ // Check that array size doesn't saturate uint64_t and doesn't
+ // overflow when it's multiplied by type size.
+ if (AllocaArraySize != ~0ULL &&
+ UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
+ AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
+ AllocaArraySize * AllocaTypeSize);
+ }
+ }
+ }
+
+ builder.CreateLifetimeStart(AI, AllocaSize);
+ for (ReturnInst *RI : Returns) {
+ // Don't insert llvm.lifetime.end calls between a musttail call and a
+ // return. The return kills all local allocas.
+ if (InlinedMustTailCalls &&
+ RI->getParent()->getTerminatingMustTailCall())
+ continue;
+ IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
+ }
+ }
+ }
+
// If the inlined code contained dynamic alloca instructions, wrap the inlined
// code with llvm.stacksave/llvm.stackrestore intrinsics.
if (InlinedFunctionInfo.ContainsDynamicAllocas) {
Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
- // If we are preserving the callgraph, add edges to the stacksave/restore
- // functions for the calls we insert.
- CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0;
- if (CG) {
- StackSaveCGN = CG->getOrInsertFunction(StackSave);
- StackRestoreCGN = CG->getOrInsertFunction(StackRestore);
- CallerNode = (*CG)[Caller];
- }
-
// Insert the llvm.stacksave.
- CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack",
- FirstNewBlock->begin());
- if (CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
+ CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
+ .CreateCall(StackSave, "savedstack");
// Insert a call to llvm.stackrestore before any return instructions in the
// inlined function.
- for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
- CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]);
- if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
- }
-
- // Count the number of StackRestore calls we insert.
- unsigned NumStackRestores = Returns.size();
-
- // If we are inlining an invoke instruction, insert restores before each
- // unwind. These unwinds will be rewritten into branches later.
- if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
- for (Function::iterator BB = FirstNewBlock, E = Caller->end();
- BB != E; ++BB)
- if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
- CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", UI);
- if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
- ++NumStackRestores;
- }
+ for (ReturnInst *RI : Returns) {
+ // Don't insert llvm.stackrestore calls between a musttail call and a
+ // return. The return will restore the stack pointer.
+ if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
+ continue;
+ IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
}
}
- // If we are inlining tail call instruction through a call site that isn't
- // marked 'tail', we must remove the tail marker for any calls in the inlined
- // code. Also, calls inlined through a 'nounwind' call site should be marked
- // 'nounwind'.
- if (InlinedFunctionInfo.ContainsCalls &&
- (MustClearTailCallFlags || MarkNoUnwind)) {
- for (Function::iterator BB = FirstNewBlock, E = Caller->end();
- BB != E; ++BB)
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
- if (CallInst *CI = dyn_cast<CallInst>(I)) {
- if (MustClearTailCallFlags)
- CI->setTailCall(false);
- if (MarkNoUnwind)
- CI->setDoesNotThrow();
- }
- }
+ // If we are inlining for an invoke instruction, we must make sure to rewrite
+ // any call instructions into invoke instructions.
+ if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
+ HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
- // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
- // instructions are unreachable.
- if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
- for (Function::iterator BB = FirstNewBlock, E = Caller->end();
- BB != E; ++BB) {
- TerminatorInst *Term = BB->getTerminator();
- if (isa<UnwindInst>(Term)) {
- new UnreachableInst(Context, Term);
- BB->getInstList().erase(Term);
+ // Handle any inlined musttail call sites. In order for a new call site to be
+ // musttail, the source of the clone and the inlined call site must have been
+ // musttail. Therefore it's safe to return without merging control into the
+ // phi below.
+ if (InlinedMustTailCalls) {
+ // Check if we need to bitcast the result of any musttail calls.
+ Type *NewRetTy = Caller->getReturnType();
+ bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
+
+ // Handle the returns preceded by musttail calls separately.
+ SmallVector<ReturnInst *, 8> NormalReturns;
+ for (ReturnInst *RI : Returns) {
+ CallInst *ReturnedMustTail =
+ RI->getParent()->getTerminatingMustTailCall();
+ if (!ReturnedMustTail) {
+ NormalReturns.push_back(RI);
+ continue;
}
+ if (!NeedBitCast)
+ continue;
+
+ // Delete the old return and any preceding bitcast.
+ BasicBlock *CurBB = RI->getParent();
+ auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
+ RI->eraseFromParent();
+ if (OldCast)
+ OldCast->eraseFromParent();
+
+ // Insert a new bitcast and return with the right type.
+ IRBuilder<> Builder(CurBB);
+ Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
}
- // If we are inlining for an invoke instruction, we must make sure to rewrite
- // any inlined 'unwind' instructions into branches to the invoke exception
- // destination, and call instructions into invoke instructions.
- if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
- HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
+ // Leave behind the normal returns so we can merge control flow.
+ std::swap(Returns, NormalReturns);
+ }
// If we cloned in _exactly one_ basic block, and if that block ends in a
// return instruction, we splice the body of the inlined callee directly into
// If the call site was an invoke instruction, add a branch to the normal
// destination.
- if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
- BranchInst::Create(II->getNormalDest(), TheCall);
+ if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
+ BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
+ NewBr->setDebugLoc(Returns[0]->getDebugLoc());
+ }
// If the return instruction returned a value, replace uses of the call with
// uses of the returned value.
// "starter" and "ender" blocks. How we accomplish this depends on whether
// this is an invoke instruction or a call instruction.
BasicBlock *AfterCallBB;
+ BranchInst *CreatedBranchToNormalDest = nullptr;
if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
// Add an unconditional branch to make this look like the CallInst case...
- BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
+ CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
// Split the basic block. This guarantees that no PHI nodes will have to be
// updated due to new incoming edges, and make the invoke case more
// symmetric to the call case.
- AfterCallBB = OrigBB->splitBasicBlock(NewBr,
+ AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest,
CalledFunc->getName()+".exit");
} else { // It's a call
// Handle all of the return instructions that we just cloned in, and eliminate
// any users of the original call/invoke instruction.
- const Type *RTy = CalledFunc->getReturnType();
+ Type *RTy = CalledFunc->getReturnType();
+ PHINode *PHI = nullptr;
if (Returns.size() > 1) {
// The PHI node should go at the front of the new basic block to merge all
// possible incoming values.
- PHINode *PHI = 0;
if (!TheCall->use_empty()) {
- PHI = PHINode::Create(RTy, TheCall->getName(),
+ PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
AfterCallBB->begin());
// Anything that used the result of the function call should now use the
// PHI node as their operand.
"Ret value not consistent in function!");
PHI->addIncoming(RI->getReturnValue(), RI->getParent());
}
-
- // Now that we inserted the PHI, check to see if it has a single value
- // (e.g. all the entries are the same or undef). If so, remove the PHI so
- // it doesn't block other optimizations.
- if (Value *V = PHI->hasConstantValue()) {
- PHI->replaceAllUsesWith(V);
- PHI->eraseFromParent();
- }
}
// Add a branch to the merge points and remove return instructions.
+ DebugLoc Loc;
for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
ReturnInst *RI = Returns[i];
- BranchInst::Create(AfterCallBB, RI);
+ BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
+ Loc = RI->getDebugLoc();
+ BI->setDebugLoc(Loc);
RI->eraseFromParent();
}
+ // We need to set the debug location to *somewhere* inside the
+ // inlined function. The line number may be nonsensical, but the
+ // instruction will at least be associated with the right
+ // function.
+ if (CreatedBranchToNormalDest)
+ CreatedBranchToNormalDest->setDebugLoc(Loc);
} else if (!Returns.empty()) {
// Otherwise, if there is exactly one return value, just replace anything
// using the return value of the call with the computed value.
TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
}
+ // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
+ BasicBlock *ReturnBB = Returns[0]->getParent();
+ ReturnBB->replaceAllUsesWith(AfterCallBB);
+
// Splice the code from the return block into the block that it will return
// to, which contains the code that was after the call.
- BasicBlock *ReturnBB = Returns[0]->getParent();
AfterCallBB->getInstList().splice(AfterCallBB->begin(),
ReturnBB->getInstList());
- // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
- ReturnBB->replaceAllUsesWith(AfterCallBB);
+ if (CreatedBranchToNormalDest)
+ CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
// Delete the return instruction now and empty ReturnBB now.
Returns[0]->eraseFromParent();
// Since we are now done with the Call/Invoke, we can delete it.
TheCall->eraseFromParent();
+ // If we inlined any musttail calls and the original return is now
+ // unreachable, delete it. It can only contain a bitcast and ret.
+ if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
+ AfterCallBB->eraseFromParent();
+
// We should always be able to fold the entry block of the function into the
// single predecessor of the block...
assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
// Splice the code entry block into calling block, right before the
// unconditional branch.
- OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
+ OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
// Remove the unconditional branch.
OrigBB->getInstList().erase(Br);
// Now we can remove the CalleeEntry block, which is now empty.
Caller->getBasicBlockList().erase(CalleeEntry);
+ // If we inserted a phi node, check to see if it has a single value (e.g. all
+ // the entries are the same or undef). If so, remove the PHI so it doesn't
+ // block other optimizations.
+ if (PHI) {
+ if (Value *V = SimplifyInstruction(PHI, IFI.DL)) {
+ PHI->replaceAllUsesWith(V);
+ PHI->eraseFromParent();
+ }
+ }
+
return true;
}