1 //===- PlaceSafepoints.cpp - Place GC Safepoints --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Place garbage collection safepoints at appropriate locations in the IR. This
11 // does not make relocation semantics or variable liveness explicit. That's
12 // done by RewriteStatepointsForGC.
15 // - A call is said to be "parseable" if there is a stack map generated for the
16 // return PC of the call. A runtime can determine where values listed in the
17 // deopt arguments and (after RewriteStatepointsForGC) gc arguments are located
18 // on the stack when the code is suspended inside such a call. Every parse
19 // point is represented by a call wrapped in an gc.statepoint intrinsic.
20 // - A "poll" is an explicit check in the generated code to determine if the
21 // runtime needs the generated code to cooperate by calling a helper routine
22 // and thus suspending its execution at a known state. The call to the helper
23 // routine will be parseable. The (gc & runtime specific) logic of a poll is
24 // assumed to be provided in a function of the name "gc.safepoint_poll".
26 // We aim to insert polls such that running code can quickly be brought to a
27 // well defined state for inspection by the collector. In the current
28 // implementation, this is done via the insertion of poll sites at method entry
29 // and the backedge of most loops. We try to avoid inserting more polls than
30 // are neccessary to ensure a finite period between poll sites. This is not
31 // because the poll itself is expensive in the generated code; it's not. Polls
32 // do tend to impact the optimizer itself in negative ways; we'd like to avoid
33 // perturbing the optimization of the method as much as we can.
35 // We also need to make most call sites parseable. The callee might execute a
36 // poll (or otherwise be inspected by the GC). If so, the entire stack
37 // (including the suspended frame of the current method) must be parseable.
39 // This pass will insert:
40 // - Call parse points ("call safepoints") for any call which may need to
41 // reach a safepoint during the execution of the callee function.
42 // - Backedge safepoint polls and entry safepoint polls to ensure that
43 // executing code reaches a safepoint poll in a finite amount of time.
45 // We do not currently support return statepoints, but adding them would not
46 // be hard. They are not required for correctness - entry safepoints are an
47 // alternative - but some GCs may prefer them. Patches welcome.
49 //===----------------------------------------------------------------------===//
51 #include "llvm/Pass.h"
52 #include "llvm/IR/LegacyPassManager.h"
53 #include "llvm/ADT/SetOperations.h"
54 #include "llvm/ADT/SetVector.h"
55 #include "llvm/ADT/Statistic.h"
56 #include "llvm/Analysis/LoopPass.h"
57 #include "llvm/Analysis/LoopInfo.h"
58 #include "llvm/Analysis/ScalarEvolution.h"
59 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
60 #include "llvm/Analysis/CFG.h"
61 #include "llvm/Analysis/InstructionSimplify.h"
62 #include "llvm/IR/BasicBlock.h"
63 #include "llvm/IR/CallSite.h"
64 #include "llvm/IR/Dominators.h"
65 #include "llvm/IR/Function.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/InstIterator.h"
68 #include "llvm/IR/Instructions.h"
69 #include "llvm/IR/Intrinsics.h"
70 #include "llvm/IR/IntrinsicInst.h"
71 #include "llvm/IR/Module.h"
72 #include "llvm/IR/Statepoint.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/IR/Verifier.h"
75 #include "llvm/Support/Debug.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Transforms/Scalar.h"
79 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
80 #include "llvm/Transforms/Utils/Cloning.h"
81 #include "llvm/Transforms/Utils/Local.h"
83 #define DEBUG_TYPE "safepoint-placement"
84 STATISTIC(NumEntrySafepoints, "Number of entry safepoints inserted");
85 STATISTIC(NumCallSafepoints, "Number of call safepoints inserted");
86 STATISTIC(NumBackedgeSafepoints, "Number of backedge safepoints inserted");
88 STATISTIC(CallInLoop, "Number of loops w/o safepoints due to calls in loop");
89 STATISTIC(FiniteExecution, "Number of loops w/o safepoints finite execution");
93 // Ignore oppurtunities to avoid placing safepoints on backedges, useful for
95 static cl::opt<bool> AllBackedges("spp-all-backedges", cl::Hidden,
98 /// If true, do not place backedge safepoints in counted loops.
99 static cl::opt<bool> SkipCounted("spp-counted", cl::Hidden, cl::init(true));
101 // If true, split the backedge of a loop when placing the safepoint, otherwise
102 // split the latch block itself. Both are useful to support for
103 // experimentation, but in practice, it looks like splitting the backedge
105 static cl::opt<bool> SplitBackedge("spp-split-backedge", cl::Hidden,
108 // Print tracing output
109 static cl::opt<bool> TraceLSP("spp-trace", cl::Hidden, cl::init(false));
113 /// An analysis pass whose purpose is to identify each of the backedges in
114 /// the function which require a safepoint poll to be inserted.
115 struct PlaceBackedgeSafepointsImpl : public FunctionPass {
118 /// The output of the pass - gives a list of each backedge (described by
119 /// pointing at the branch) which need a poll inserted.
120 std::vector<TerminatorInst *> PollLocations;
122 /// True unless we're running spp-no-calls in which case we need to disable
123 /// the call dependend placement opts.
124 bool CallSafepointsEnabled;
126 ScalarEvolution *SE = nullptr;
127 DominatorTree *DT = nullptr;
128 LoopInfo *LI = nullptr;
130 PlaceBackedgeSafepointsImpl(bool CallSafepoints = false)
131 : FunctionPass(ID), CallSafepointsEnabled(CallSafepoints) {
132 initializePlaceBackedgeSafepointsImplPass(*PassRegistry::getPassRegistry());
135 bool runOnLoop(Loop *);
136 void runOnLoopAndSubLoops(Loop *L) {
137 // Visit all the subloops
138 for (auto I = L->begin(), E = L->end(); I != E; I++)
139 runOnLoopAndSubLoops(*I);
143 bool runOnFunction(Function &F) {
144 SE = &getAnalysis<ScalarEvolution>();
145 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
146 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
147 for (auto I = LI->begin(), E = LI->end(); I != E; I++) {
148 runOnLoopAndSubLoops(*I);
153 void getAnalysisUsage(AnalysisUsage &AU) const override {
154 AU.addRequired<DominatorTreeWrapperPass>();
155 AU.addRequired<ScalarEvolution>();
156 AU.addRequired<LoopInfoWrapperPass>();
157 // We no longer modify the IR at all in this pass. Thus all
158 // analysis are preserved.
159 AU.setPreservesAll();
164 static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
165 static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
166 static cl::opt<bool> NoBackedge("spp-no-backedge", cl::Hidden, cl::init(false));
169 struct PlaceSafepoints : public ModulePass {
170 static char ID; // Pass identification, replacement for typeid
172 PlaceSafepoints() : ModulePass(ID) {
173 initializePlaceSafepointsPass(*PassRegistry::getPassRegistry());
175 bool runOnModule(Module &M) override {
176 bool modified = false;
177 for (Function &F : M) {
178 modified |= runOnFunction(F);
182 bool runOnFunction(Function &F);
184 void getAnalysisUsage(AnalysisUsage &AU) const override {
185 // We modify the graph wholesale (inlining, block insertion, etc). We
186 // preserve nothing at the moment. We could potentially preserve dom tree
187 // if that was worth doing
192 // Insert a safepoint poll immediately before the given instruction. Does
193 // not handle the parsability of state at the runtime call, that's the
196 InsertSafepointPoll(DominatorTree &DT, Instruction *after,
197 std::vector<CallSite> &ParsePointsNeeded /*rval*/);
199 static bool isGCLeafFunction(const CallSite &CS);
201 static bool needsStatepoint(const CallSite &CS) {
202 if (isGCLeafFunction(CS))
205 CallInst *call = cast<CallInst>(CS.getInstruction());
206 if (call->isInlineAsm())
209 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS)) {
215 static Value *ReplaceWithStatepoint(const CallSite &CS, Pass *P);
217 /// Returns true if this loop is known to contain a call safepoint which
218 /// must unconditionally execute on any iteration of the loop which returns
219 /// to the loop header via an edge from Pred. Returns a conservative correct
220 /// answer; i.e. false is always valid.
221 static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header,
224 // In general, we're looking for any cut of the graph which ensures
225 // there's a call safepoint along every edge between Header and Pred.
226 // For the moment, we look only for the 'cuts' that consist of a single call
227 // instruction in a block which is dominated by the Header and dominates the
228 // loop latch (Pred) block. Somewhat surprisingly, walking the entire chain
229 // of such dominating blocks gets substaintially more occurences than just
230 // checking the Pred and Header blocks themselves. This may be due to the
231 // density of loop exit conditions caused by range and null checks.
232 // TODO: structure this as an analysis pass, cache the result for subloops,
233 // avoid dom tree recalculations
234 assert(DT.dominates(Header, Pred) && "loop latch not dominated by header?");
236 BasicBlock *Current = Pred;
238 for (Instruction &I : *Current) {
239 if (auto CS = CallSite(&I))
240 // Note: Technically, needing a safepoint isn't quite the right
241 // condition here. We should instead be checking if the target method
243 // unconditional poll. In practice, this is only a theoretical concern
244 // since we don't have any methods with conditional-only safepoint
246 if (needsStatepoint(CS))
250 if (Current == Header)
252 Current = DT.getNode(Current)->getIDom()->getBlock();
258 /// Returns true if this loop is known to terminate in a finite number of
259 /// iterations. Note that this function may return false for a loop which
260 /// does actual terminate in a finite constant number of iterations due to
261 /// conservatism in the analysis.
262 static bool mustBeFiniteCountedLoop(Loop *L, ScalarEvolution *SE,
264 // Only used when SkipCounted is off
265 const unsigned upperTripBound = 8192;
267 // A conservative bound on the loop as a whole.
268 const SCEV *MaxTrips = SE->getMaxBackedgeTakenCount(L);
269 if (MaxTrips != SE->getCouldNotCompute()) {
270 if (SE->getUnsignedRange(MaxTrips).getUnsignedMax().ult(upperTripBound))
273 SE->getUnsignedRange(MaxTrips).getUnsignedMax().isIntN(32))
277 // If this is a conditional branch to the header with the alternate path
278 // being outside the loop, we can ask questions about the execution frequency
279 // of the exit block.
280 if (L->isLoopExiting(Pred)) {
281 // This returns an exact expression only. TODO: We really only need an
282 // upper bound here, but SE doesn't expose that.
283 const SCEV *MaxExec = SE->getExitCount(L, Pred);
284 if (MaxExec != SE->getCouldNotCompute()) {
285 if (SE->getUnsignedRange(MaxExec).getUnsignedMax().ult(upperTripBound))
288 SE->getUnsignedRange(MaxExec).getUnsignedMax().isIntN(32))
293 return /* not finite */ false;
296 static void scanOneBB(Instruction *start, Instruction *end,
297 std::vector<CallInst *> &calls,
298 std::set<BasicBlock *> &seen,
299 std::vector<BasicBlock *> &worklist) {
300 for (BasicBlock::iterator itr(start);
301 itr != start->getParent()->end() && itr != BasicBlock::iterator(end);
303 if (CallInst *CI = dyn_cast<CallInst>(&*itr)) {
306 // FIXME: This code does not handle invokes
307 assert(!dyn_cast<InvokeInst>(&*itr) &&
308 "support for invokes in poll code needed");
309 // Only add the successor blocks if we reach the terminator instruction
310 // without encountering end first
311 if (itr->isTerminator()) {
312 BasicBlock *BB = itr->getParent();
313 for (BasicBlock *Succ : successors(BB)) {
314 if (seen.count(Succ) == 0) {
315 worklist.push_back(Succ);
322 static void scanInlinedCode(Instruction *start, Instruction *end,
323 std::vector<CallInst *> &calls,
324 std::set<BasicBlock *> &seen) {
326 std::vector<BasicBlock *> worklist;
327 seen.insert(start->getParent());
328 scanOneBB(start, end, calls, seen, worklist);
329 while (!worklist.empty()) {
330 BasicBlock *BB = worklist.back();
332 scanOneBB(&*BB->begin(), end, calls, seen, worklist);
336 bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
337 // Loop through all loop latches (branches controlling backedges). We need
338 // to place a safepoint on every backedge (potentially).
339 // Note: In common usage, there will be only one edge due to LoopSimplify
340 // having run sometime earlier in the pipeline, but this code must be correct
341 // w.r.t. loops with multiple backedges.
342 BasicBlock *header = L->getHeader();
343 SmallVector<BasicBlock*, 16> LoopLatches;
344 L->getLoopLatches(LoopLatches);
345 for (BasicBlock *pred : LoopLatches) {
346 assert(L->contains(pred));
348 // Make a policy decision about whether this loop needs a safepoint or
349 // not. Note that this is about unburdening the optimizer in loops, not
350 // avoiding the runtime cost of the actual safepoint.
352 if (mustBeFiniteCountedLoop(L, SE, pred)) {
354 errs() << "skipping safepoint placement in finite loop\n";
358 if (CallSafepointsEnabled &&
359 containsUnconditionalCallSafepoint(L, header, pred, *DT)) {
360 // Note: This is only semantically legal since we won't do any further
361 // IPO or inlining before the actual call insertion.. If we hadn't, we
362 // might latter loose this call safepoint.
364 errs() << "skipping safepoint placement due to unconditional call\n";
370 // TODO: We can create an inner loop which runs a finite number of
371 // iterations with an outer loop which contains a safepoint. This would
372 // not help runtime performance that much, but it might help our ability to
373 // optimize the inner loop.
375 // Safepoint insertion would involve creating a new basic block (as the
376 // target of the current backedge) which does the safepoint (of all live
377 // variables) and branches to the true header
378 TerminatorInst *term = pred->getTerminator();
381 errs() << "[LSP] terminator instruction: ";
385 PollLocations.push_back(term);
391 static Instruction *findLocationForEntrySafepoint(Function &F,
394 // Conceptually, this poll needs to be on method entry, but in
395 // practice, we place it as late in the entry block as possible. We
396 // can place it as late as we want as long as it dominates all calls
397 // that can grow the stack. This, combined with backedge polls,
398 // give us all the progress guarantees we need.
400 // Due to the way the frontend generates IR, we may have a couple of initial
401 // basic blocks before the first bytecode. These will be single-entry
402 // single-exit blocks which conceptually are just part of the first 'real
403 // basic block'. Since we don't have deopt state until the first bytecode,
404 // walk forward until we've found the first unconditional branch or merge.
406 // hasNextInstruction and nextInstruction are used to iterate
407 // through a "straight line" execution sequence.
409 auto hasNextInstruction = [](Instruction *I) {
410 if (!I->isTerminator()) {
413 BasicBlock *nextBB = I->getParent()->getUniqueSuccessor();
414 return nextBB && (nextBB->getUniquePredecessor() != nullptr);
417 auto nextInstruction = [&hasNextInstruction](Instruction *I) {
418 assert(hasNextInstruction(I) &&
419 "first check if there is a next instruction!");
420 if (I->isTerminator()) {
421 return I->getParent()->getUniqueSuccessor()->begin();
423 return std::next(BasicBlock::iterator(I));
427 Instruction *cursor = nullptr;
428 for (cursor = F.getEntryBlock().begin(); hasNextInstruction(cursor);
429 cursor = nextInstruction(cursor)) {
431 // We need to stop going forward as soon as we see a call that can
432 // grow the stack (i.e. the call target has a non-zero frame
434 if (CallSite(cursor)) {
435 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(cursor)) {
436 // llvm.assume(...) are not really calls.
437 if (II->getIntrinsicID() == Intrinsic::assume) {
440 // llvm.frameescape() intrinsic is not a real call. The intrinsic can
441 // exist only in the entry block.
442 // Inserting a statepoint before llvm.frameescape() may split the
443 // entry block, and push the intrinsic out of the entry block.
444 if (II->getIntrinsicID() == Intrinsic::frameescape) {
452 assert((hasNextInstruction(cursor) || cursor->isTerminator()) &&
453 "either we stopped because of a call, or because of terminator");
455 if (cursor->isTerminator()) {
459 BasicBlock *BB = cursor->getParent();
460 SplitBlock(BB, cursor, nullptr);
462 // Note: SplitBlock modifies the DT. Simply passing a Pass (which is a
463 // module pass) is not enough.
466 // SplitBlock updates the DT
467 DEBUG(DT.verifyDomTree());
469 return BB->getTerminator();
472 /// Identify the list of call sites which need to be have parseable state
473 static void findCallSafepoints(Function &F,
474 std::vector<CallSite> &Found /*rval*/) {
475 assert(Found.empty() && "must be empty!");
476 for (Instruction &I : inst_range(F)) {
477 Instruction *inst = &I;
478 if (isa<CallInst>(inst) || isa<InvokeInst>(inst)) {
481 // No safepoint needed or wanted
482 if (!needsStatepoint(CS)) {
491 /// Implement a unique function which doesn't require we sort the input
492 /// vector. Doing so has the effect of changing the output of a couple of
493 /// tests in ways which make them less useful in testing fused safepoints.
494 template <typename T> static void unique_unsorted(std::vector<T> &vec) {
497 vec.reserve(vec.size());
500 if (seen.insert(V).second) {
506 static std::string GCSafepointPollName("gc.safepoint_poll");
508 static bool isGCSafepointPoll(Function &F) {
509 return F.getName().equals(GCSafepointPollName);
512 /// Returns true if this function should be rewritten to include safepoint
513 /// polls and parseable call sites. The main point of this function is to be
514 /// an extension point for custom logic.
515 static bool shouldRewriteFunction(Function &F) {
516 // TODO: This should check the GCStrategy
518 const std::string StatepointExampleName("statepoint-example");
519 return StatepointExampleName == F.getGC();
524 // TODO: These should become properties of the GCStrategy, possibly with
525 // command line overrides.
526 static bool enableEntrySafepoints(Function &F) { return !NoEntry; }
527 static bool enableBackedgeSafepoints(Function &F) { return !NoBackedge; }
528 static bool enableCallSafepoints(Function &F) { return !NoCall; }
530 // Normalize basic block to make it ready to be target of invoke statepoint.
531 // Ensure that 'BB' does not have phi nodes. It may require spliting it.
532 static BasicBlock *normalizeForInvokeSafepoint(BasicBlock *BB,
533 BasicBlock *InvokeParent) {
534 BasicBlock *ret = BB;
536 if (!BB->getUniquePredecessor()) {
537 ret = SplitBlockPredecessors(BB, InvokeParent, "");
540 // Now that 'ret' has unique predecessor we can safely remove all phi nodes
542 FoldSingleEntryPHINodes(ret);
543 assert(!isa<PHINode>(ret->begin()));
548 bool PlaceSafepoints::runOnFunction(Function &F) {
549 if (F.isDeclaration() || F.empty()) {
550 // This is a declaration, nothing to do. Must exit early to avoid crash in
551 // dom tree calculation
555 if (isGCSafepointPoll(F)) {
556 // Given we're inlining this inside of safepoint poll insertion, this
557 // doesn't make any sense. Note that we do make any contained calls
558 // parseable after we inline a poll.
562 if (!shouldRewriteFunction(F))
565 bool modified = false;
567 // In various bits below, we rely on the fact that uses are reachable from
568 // defs. When there are basic blocks unreachable from the entry, dominance
569 // and reachablity queries return non-sensical results. Thus, we preprocess
570 // the function to ensure these properties hold.
571 modified |= removeUnreachableBlocks(F);
573 // STEP 1 - Insert the safepoint polling locations. We do not need to
574 // actually insert parse points yet. That will be done for all polls and
575 // calls in a single pass.
577 // Note: With the migration, we need to recompute this for each 'pass'. Once
578 // we merge these, we'll do it once before the analysis
581 std::vector<CallSite> ParsePointNeeded;
583 if (enableBackedgeSafepoints(F)) {
584 // Construct a pass manager to run the LoopPass backedge logic. We
585 // need the pass manager to handle scheduling all the loop passes
586 // appropriately. Doing this by hand is painful and just not worth messing
587 // with for the moment.
588 legacy::FunctionPassManager FPM(F.getParent());
589 bool CanAssumeCallSafepoints = enableCallSafepoints(F);
590 PlaceBackedgeSafepointsImpl *PBS =
591 new PlaceBackedgeSafepointsImpl(CanAssumeCallSafepoints);
595 // We preserve dominance information when inserting the poll, otherwise
596 // we'd have to recalculate this on every insert
599 auto &PollLocations = PBS->PollLocations;
601 auto OrderByBBName = [](Instruction *a, Instruction *b) {
602 return a->getParent()->getName() < b->getParent()->getName();
604 // We need the order of list to be stable so that naming ends up stable
605 // when we split edges. This makes test cases much easier to write.
606 std::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName);
608 // We can sometimes end up with duplicate poll locations. This happens if
609 // a single loop is visited more than once. The fact this happens seems
610 // wrong, but it does happen for the split-backedge.ll test case.
611 PollLocations.erase(std::unique(PollLocations.begin(),
612 PollLocations.end()),
613 PollLocations.end());
615 // Insert a poll at each point the analysis pass identified
616 for (size_t i = 0; i < PollLocations.size(); i++) {
617 // We are inserting a poll, the function is modified
620 // The poll location must be the terminator of a loop latch block.
621 TerminatorInst *Term = PollLocations[i];
623 std::vector<CallSite> ParsePoints;
625 // Split the backedge of the loop and insert the poll within that new
626 // basic block. This creates a loop with two latches per original
627 // latch (which is non-ideal), but this appears to be easier to
628 // optimize in practice than inserting the poll immediately before the
631 // Since this is a latch, at least one of the successors must dominate
632 // it. Its possible that we have a) duplicate edges to the same header
633 // and b) edges to distinct loop headers. We need to insert pools on
635 SetVector<BasicBlock *> Headers;
636 for (unsigned i = 0; i < Term->getNumSuccessors(); i++) {
637 BasicBlock *Succ = Term->getSuccessor(i);
638 if (DT.dominates(Succ, Term->getParent())) {
639 Headers.insert(Succ);
642 assert(!Headers.empty() && "poll location is not a loop latch?");
644 // The split loop structure here is so that we only need to recalculate
645 // the dominator tree once. Alternatively, we could just keep it up to
646 // date and use a more natural merged loop.
647 SetVector<BasicBlock *> SplitBackedges;
648 for (BasicBlock *Header : Headers) {
649 BasicBlock *NewBB = SplitEdge(Term->getParent(), Header, nullptr);
650 SplitBackedges.insert(NewBB);
653 for (BasicBlock *NewBB : SplitBackedges) {
654 std::vector<CallSite> RuntimeCalls;
655 InsertSafepointPoll(DT, NewBB->getTerminator(), RuntimeCalls);
656 NumBackedgeSafepoints++;
657 ParsePointNeeded.insert(ParsePointNeeded.end(), RuntimeCalls.begin(),
662 // Split the latch block itself, right before the terminator.
663 std::vector<CallSite> RuntimeCalls;
664 InsertSafepointPoll(DT, Term, RuntimeCalls);
665 NumBackedgeSafepoints++;
666 ParsePointNeeded.insert(ParsePointNeeded.end(), RuntimeCalls.begin(),
670 // Record the parse points for later use
671 ParsePointNeeded.insert(ParsePointNeeded.end(), ParsePoints.begin(),
676 if (enableEntrySafepoints(F)) {
678 Instruction *term = findLocationForEntrySafepoint(F, DT);
680 // policy choice not to insert?
682 std::vector<CallSite> RuntimeCalls;
683 InsertSafepointPoll(DT, term, RuntimeCalls);
685 NumEntrySafepoints++;
686 ParsePointNeeded.insert(ParsePointNeeded.end(), RuntimeCalls.begin(),
691 if (enableCallSafepoints(F)) {
693 std::vector<CallSite> Calls;
694 findCallSafepoints(F, Calls);
695 NumCallSafepoints += Calls.size();
696 ParsePointNeeded.insert(ParsePointNeeded.end(), Calls.begin(), Calls.end());
699 // Unique the vectors since we can end up with duplicates if we scan the call
700 // site for call safepoints after we add it for entry or backedge. The
701 // only reason we need tracking at all is that some functions might have
702 // polls but not call safepoints and thus we might miss marking the runtime
703 // calls for the polls. (This is useful in test cases!)
704 unique_unsorted(ParsePointNeeded);
706 // Any parse point (no matter what source) will be handled here
707 DT.recalculate(F); // Needed?
709 // We're about to start modifying the function
710 if (!ParsePointNeeded.empty())
713 // Now run through and insert the safepoints, but do _NOT_ update or remove
714 // any existing uses. We have references to live variables that need to
715 // survive to the last iteration of this loop.
716 std::vector<Value *> Results;
717 Results.reserve(ParsePointNeeded.size());
718 for (size_t i = 0; i < ParsePointNeeded.size(); i++) {
719 CallSite &CS = ParsePointNeeded[i];
721 // For invoke statepoints we need to remove all phi nodes at the normal
722 // destination block.
723 // Reason for this is that we can place gc_result only after last phi node
724 // in basic block. We will get malformed code after RAUW for the
725 // gc_result if one of this phi nodes uses result from the invoke.
726 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(CS.getInstruction())) {
727 normalizeForInvokeSafepoint(Invoke->getNormalDest(),
728 Invoke->getParent());
731 Value *GCResult = ReplaceWithStatepoint(CS, nullptr);
732 Results.push_back(GCResult);
734 assert(Results.size() == ParsePointNeeded.size());
736 // Adjust all users of the old call sites to use the new ones instead
737 for (size_t i = 0; i < ParsePointNeeded.size(); i++) {
738 CallSite &CS = ParsePointNeeded[i];
739 Value *GCResult = Results[i];
741 // Can not RAUW for the gc result in case of phi nodes preset.
742 assert(!isa<PHINode>(cast<Instruction>(GCResult)->getParent()->begin()));
744 // Replace all uses with the new call
745 CS.getInstruction()->replaceAllUsesWith(GCResult);
748 // Now that we've handled all uses, remove the original call itself
749 // Note: The insert point can't be the deleted instruction!
750 CS.getInstruction()->eraseFromParent();
755 char PlaceBackedgeSafepointsImpl::ID = 0;
756 char PlaceSafepoints::ID = 0;
758 ModulePass *llvm::createPlaceSafepointsPass() { return new PlaceSafepoints(); }
760 INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl,
761 "place-backedge-safepoints-impl",
762 "Place Backedge Safepoints", false, false)
763 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
764 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
765 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
766 INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl,
767 "place-backedge-safepoints-impl",
768 "Place Backedge Safepoints", false, false)
770 INITIALIZE_PASS_BEGIN(PlaceSafepoints, "place-safepoints", "Place Safepoints",
772 INITIALIZE_PASS_END(PlaceSafepoints, "place-safepoints", "Place Safepoints",
775 static bool isGCLeafFunction(const CallSite &CS) {
776 Instruction *inst = CS.getInstruction();
777 if (isa<IntrinsicInst>(inst)) {
778 // Most LLVM intrinsics are things which can never take a safepoint.
779 // As a result, we don't need to have the stack parsable at the
780 // callsite. This is a highly useful optimization since intrinsic
781 // calls are fairly prevelent, particularly in debug builds.
785 // If this function is marked explicitly as a leaf call, we don't need to
786 // place a safepoint of it. In fact, for correctness we *can't* in many
787 // cases. Note: Indirect calls return Null for the called function,
788 // these obviously aren't runtime functions with attributes
789 // TODO: Support attributes on the call site as well.
790 const Function *F = CS.getCalledFunction();
793 F->getFnAttribute("gc-leaf-function").getValueAsString().equals("true");
801 InsertSafepointPoll(DominatorTree &DT, Instruction *term,
802 std::vector<CallSite> &ParsePointsNeeded /*rval*/) {
803 Module *M = term->getParent()->getParent()->getParent();
806 // Inline the safepoint poll implementation - this will get all the branch,
807 // control flow, etc.. Most importantly, it will introduce the actual slow
808 // path call - where we need to insert a safepoint (parsepoint).
809 FunctionType *ftype =
810 FunctionType::get(Type::getVoidTy(M->getContext()), false);
811 assert(ftype && "null?");
812 // Note: This cast can fail if there's a function of the same name with a
813 // different type inserted previously
815 dyn_cast<Function>(M->getOrInsertFunction("gc.safepoint_poll", ftype));
816 assert(F && "void @gc.safepoint_poll() must be defined");
817 assert(!F->empty() && "gc.safepoint_poll must be a non-empty function");
818 CallInst *poll = CallInst::Create(F, "", term);
820 // Record some information about the call site we're replacing
821 BasicBlock *OrigBB = term->getParent();
822 BasicBlock::iterator before(poll), after(poll);
824 if (before == term->getParent()->begin()) {
830 assert(after != poll->getParent()->end() && "must have successor");
831 assert(DT.dominates(before, after) && "trivially true");
833 // do the actual inlining
834 InlineFunctionInfo IFI;
835 bool inlineStatus = InlineFunction(poll, IFI);
836 assert(inlineStatus && "inline must succeed");
837 (void)inlineStatus; // suppress warning in release-asserts
839 // Check post conditions
840 assert(IFI.StaticAllocas.empty() && "can't have allocs");
842 std::vector<CallInst *> calls; // new calls
843 std::set<BasicBlock *> BBs; // new BBs + insertee
844 // Include only the newly inserted instructions, Note: begin may not be valid
845 // if we inserted to the beginning of the basic block
846 BasicBlock::iterator start;
848 start = OrigBB->begin();
854 // If your poll function includes an unreachable at the end, that's not
855 // valid. Bugpoint likes to create this, so check for it.
856 assert(isPotentiallyReachable(&*start, &*after, nullptr, nullptr) &&
857 "malformed poll function");
859 scanInlinedCode(&*(start), &*(after), calls, BBs);
861 // Recompute since we've invalidated cached data. Conceptually we
862 // shouldn't need to do this, but implementation wise we appear to. Needed
863 // so we can insert safepoints correctly.
864 // TODO: update more cheaply
865 DT.recalculate(*after->getParent()->getParent());
867 assert(!calls.empty() && "slow path not found for safepoint poll");
869 // Record the fact we need a parsable state at the runtime call contained in
870 // the poll function. This is required so that the runtime knows how to
871 // parse the last frame when we actually take the safepoint (i.e. execute
873 assert(ParsePointsNeeded.empty());
874 for (size_t i = 0; i < calls.size(); i++) {
876 // No safepoint needed or wanted
877 if (!needsStatepoint(calls[i])) {
881 // These are likely runtime calls. Should we assert that via calling
882 // convention or something?
883 ParsePointsNeeded.push_back(CallSite(calls[i]));
885 assert(ParsePointsNeeded.size() <= calls.size());
888 /// Replaces the given call site (Call or Invoke) with a gc.statepoint
889 /// intrinsic with an empty deoptimization arguments list. This does
890 /// NOT do explicit relocation for GC support.
891 static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */
893 assert(CS.getInstruction()->getParent()->getParent()->getParent() &&
896 // TODO: technically, a pass is not allowed to get functions from within a
897 // function pass since it might trigger a new function addition. Refactor
898 // this logic out to the initialization of the pass. Doesn't appear to
899 // matter in practice.
901 // Then go ahead and use the builder do actually do the inserts. We insert
902 // immediately before the previous instruction under the assumption that all
903 // arguments will be available here. We can't insert afterwards since we may
904 // be replacing a terminator.
905 IRBuilder<> Builder(CS.getInstruction());
907 // Note: The gc args are not filled in at this time, that's handled by
908 // RewriteStatepointsForGC (which is currently under review).
910 // Create the statepoint given all the arguments
911 Instruction *Token = nullptr;
912 AttributeSet OriginalAttrs;
915 CallInst *ToReplace = cast<CallInst>(CS.getInstruction());
916 CallInst *Call = Builder.CreateGCStatepointCall(
917 CS.getCalledValue(), makeArrayRef(CS.arg_begin(), CS.arg_end()), None,
918 None, "safepoint_token");
919 Call->setTailCall(ToReplace->isTailCall());
920 Call->setCallingConv(ToReplace->getCallingConv());
922 // Before we have to worry about GC semantics, all attributes are legal
923 // TODO: handle param attributes
924 OriginalAttrs = ToReplace->getAttributes();
926 // In case if we can handle this set of attributes - set up function
927 // attributes directly on statepoint and return attributes later for
928 // gc_result intrinsic.
929 Call->setAttributes(OriginalAttrs.getFnAttributes());
933 // Put the following gc_result and gc_relocate calls immediately after the
934 // the old call (which we're about to delete).
935 assert(ToReplace->getNextNode() && "not a terminator, must have next");
936 Builder.SetInsertPoint(ToReplace->getNextNode());
937 Builder.SetCurrentDebugLocation(ToReplace->getNextNode()->getDebugLoc());
938 } else if (CS.isInvoke()) {
939 InvokeInst *ToReplace = cast<InvokeInst>(CS.getInstruction());
941 // Insert the new invoke into the old block. We'll remove the old one in a
942 // moment at which point this will become the new terminator for the
944 Builder.SetInsertPoint(ToReplace->getParent());
945 InvokeInst *Invoke = Builder.CreateGCStatepointInvoke(
946 CS.getCalledValue(), ToReplace->getNormalDest(),
947 ToReplace->getUnwindDest(), makeArrayRef(CS.arg_begin(), CS.arg_end()),
948 Builder.getInt32(0), None, "safepoint_token");
950 // Currently we will fail on parameter attributes and on certain
951 // function attributes.
952 OriginalAttrs = ToReplace->getAttributes();
954 // In case if we can handle this set of attributes - set up function
955 // attributes directly on statepoint and return attributes later for
956 // gc_result intrinsic.
957 Invoke->setAttributes(OriginalAttrs.getFnAttributes());
961 // We'll insert the gc.result into the normal block
962 BasicBlock *NormalDest = ToReplace->getNormalDest();
963 // Can not insert gc.result in case of phi nodes preset.
964 // Should have removed this cases prior to runnning this function
965 assert(!isa<PHINode>(NormalDest->begin()));
966 Instruction *IP = &*(NormalDest->getFirstInsertionPt());
967 Builder.SetInsertPoint(IP);
969 llvm_unreachable("unexpect type of CallSite");
973 // Handle the return value of the original call - update all uses to use a
974 // gc_result hanging off the statepoint node we just inserted
976 // Only add the gc_result iff there is actually a used result
977 if (!CS.getType()->isVoidTy() && !CS.getInstruction()->use_empty()) {
978 std::string TakenName =
979 CS.getInstruction()->hasName() ? CS.getInstruction()->getName() : "";
980 CallInst *GCResult = Builder.CreateGCResult(Token, CS.getType(), TakenName);
981 GCResult->setAttributes(OriginalAttrs.getRetAttributes());
984 // No return value for the call.