1 //===- PlaceSafepoints.cpp - Place GC Safepoints --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Place garbage collection safepoints at appropriate locations in the IR. This
11 // does not make relocation semantics or variable liveness explicit. That's
12 // done by RewriteStatepointsForGC.
15 // - A call is said to be "parseable" if there is a stack map generated for the
16 // return PC of the call. A runtime can determine where values listed in the
17 // deopt arguments and (after RewriteStatepointsForGC) gc arguments are located
18 // on the stack when the code is suspended inside such a call. Every parse
19 // point is represented by a call wrapped in an gc.statepoint intrinsic.
20 // - A "poll" is an explicit check in the generated code to determine if the
21 // runtime needs the generated code to cooperate by calling a helper routine
22 // and thus suspending its execution at a known state. The call to the helper
23 // routine will be parseable. The (gc & runtime specific) logic of a poll is
24 // assumed to be provided in a function of the name "gc.safepoint_poll".
26 // We aim to insert polls such that running code can quickly be brought to a
27 // well defined state for inspection by the collector. In the current
28 // implementation, this is done via the insertion of poll sites at method entry
29 // and the backedge of most loops. We try to avoid inserting more polls than
30 // are neccessary to ensure a finite period between poll sites. This is not
31 // because the poll itself is expensive in the generated code; it's not. Polls
32 // do tend to impact the optimizer itself in negative ways; we'd like to avoid
33 // perturbing the optimization of the method as much as we can.
35 // We also need to make most call sites parseable. The callee might execute a
36 // poll (or otherwise be inspected by the GC). If so, the entire stack
37 // (including the suspended frame of the current method) must be parseable.
39 // This pass will insert:
40 // - Call parse points ("call safepoints") for any call which may need to
41 // reach a safepoint during the execution of the callee function.
42 // - Backedge safepoint polls and entry safepoint polls to ensure that
43 // executing code reaches a safepoint poll in a finite amount of time.
45 // We do not currently support return statepoints, but adding them would not
46 // be hard. They are not required for correctness - entry safepoints are an
47 // alternative - but some GCs may prefer them. Patches welcome.
49 //===----------------------------------------------------------------------===//
51 #include "llvm/Pass.h"
52 #include "llvm/IR/LegacyPassManager.h"
53 #include "llvm/ADT/SetOperations.h"
54 #include "llvm/ADT/SetVector.h"
55 #include "llvm/ADT/Statistic.h"
56 #include "llvm/Analysis/LoopPass.h"
57 #include "llvm/Analysis/LoopInfo.h"
58 #include "llvm/Analysis/ScalarEvolution.h"
59 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
60 #include "llvm/Analysis/CFG.h"
61 #include "llvm/Analysis/InstructionSimplify.h"
62 #include "llvm/IR/BasicBlock.h"
63 #include "llvm/IR/CallSite.h"
64 #include "llvm/IR/Dominators.h"
65 #include "llvm/IR/Function.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/InstIterator.h"
68 #include "llvm/IR/Instructions.h"
69 #include "llvm/IR/Intrinsics.h"
70 #include "llvm/IR/IntrinsicInst.h"
71 #include "llvm/IR/Module.h"
72 #include "llvm/IR/Statepoint.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/IR/Verifier.h"
75 #include "llvm/Support/Debug.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/raw_ostream.h"
78 #include "llvm/Transforms/Scalar.h"
79 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
80 #include "llvm/Transforms/Utils/Cloning.h"
81 #include "llvm/Transforms/Utils/Local.h"
83 #define DEBUG_TYPE "safepoint-placement"
84 STATISTIC(NumEntrySafepoints, "Number of entry safepoints inserted");
85 STATISTIC(NumCallSafepoints, "Number of call safepoints inserted");
86 STATISTIC(NumBackedgeSafepoints, "Number of backedge safepoints inserted");
88 STATISTIC(CallInLoop, "Number of loops w/o safepoints due to calls in loop");
89 STATISTIC(FiniteExecution, "Number of loops w/o safepoints finite execution");
93 // Ignore oppurtunities to avoid placing safepoints on backedges, useful for
95 static cl::opt<bool> AllBackedges("spp-all-backedges", cl::Hidden,
98 /// If true, do not place backedge safepoints in counted loops.
99 static cl::opt<bool> SkipCounted("spp-counted", cl::Hidden, cl::init(true));
101 // If true, split the backedge of a loop when placing the safepoint, otherwise
102 // split the latch block itself. Both are useful to support for
103 // experimentation, but in practice, it looks like splitting the backedge
105 static cl::opt<bool> SplitBackedge("spp-split-backedge", cl::Hidden,
108 // Print tracing output
109 static cl::opt<bool> TraceLSP("spp-trace", cl::Hidden, cl::init(false));
113 /// An analysis pass whose purpose is to identify each of the backedges in
114 /// the function which require a safepoint poll to be inserted.
115 struct PlaceBackedgeSafepointsImpl : public FunctionPass {
118 /// The output of the pass - gives a list of each backedge (described by
119 /// pointing at the branch) which need a poll inserted.
120 std::vector<TerminatorInst *> PollLocations;
122 /// True unless we're running spp-no-calls in which case we need to disable
123 /// the call dependend placement opts.
124 bool CallSafepointsEnabled;
126 ScalarEvolution *SE = nullptr;
127 DominatorTree *DT = nullptr;
128 LoopInfo *LI = nullptr;
130 PlaceBackedgeSafepointsImpl(bool CallSafepoints = false)
131 : FunctionPass(ID), CallSafepointsEnabled(CallSafepoints) {
132 initializePlaceBackedgeSafepointsImplPass(*PassRegistry::getPassRegistry());
135 bool runOnLoop(Loop *);
136 void runOnLoopAndSubLoops(Loop *L) {
137 // Visit all the subloops
138 for (auto I = L->begin(), E = L->end(); I != E; I++)
139 runOnLoopAndSubLoops(*I);
143 bool runOnFunction(Function &F) override {
144 SE = &getAnalysis<ScalarEvolution>();
145 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
146 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
147 for (auto I = LI->begin(), E = LI->end(); I != E; I++) {
148 runOnLoopAndSubLoops(*I);
153 void getAnalysisUsage(AnalysisUsage &AU) const override {
154 AU.addRequired<DominatorTreeWrapperPass>();
155 AU.addRequired<ScalarEvolution>();
156 AU.addRequired<LoopInfoWrapperPass>();
157 // We no longer modify the IR at all in this pass. Thus all
158 // analysis are preserved.
159 AU.setPreservesAll();
164 static cl::opt<bool> NoEntry("spp-no-entry", cl::Hidden, cl::init(false));
165 static cl::opt<bool> NoCall("spp-no-call", cl::Hidden, cl::init(false));
166 static cl::opt<bool> NoBackedge("spp-no-backedge", cl::Hidden, cl::init(false));
169 struct PlaceSafepoints : public FunctionPass {
170 static char ID; // Pass identification, replacement for typeid
172 PlaceSafepoints() : FunctionPass(ID) {
173 initializePlaceSafepointsPass(*PassRegistry::getPassRegistry());
175 bool runOnFunction(Function &F) override;
177 void getAnalysisUsage(AnalysisUsage &AU) const override {
178 // We modify the graph wholesale (inlining, block insertion, etc). We
179 // preserve nothing at the moment. We could potentially preserve dom tree
180 // if that was worth doing
185 // Insert a safepoint poll immediately before the given instruction. Does
186 // not handle the parsability of state at the runtime call, that's the
189 InsertSafepointPoll(Instruction *after,
190 std::vector<CallSite> &ParsePointsNeeded /*rval*/);
192 static bool isGCLeafFunction(const CallSite &CS);
194 static bool needsStatepoint(const CallSite &CS) {
195 if (isGCLeafFunction(CS))
198 CallInst *call = cast<CallInst>(CS.getInstruction());
199 if (call->isInlineAsm())
202 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS)) {
208 static Value *ReplaceWithStatepoint(const CallSite &CS, Pass *P);
210 /// Returns true if this loop is known to contain a call safepoint which
211 /// must unconditionally execute on any iteration of the loop which returns
212 /// to the loop header via an edge from Pred. Returns a conservative correct
213 /// answer; i.e. false is always valid.
214 static bool containsUnconditionalCallSafepoint(Loop *L, BasicBlock *Header,
217 // In general, we're looking for any cut of the graph which ensures
218 // there's a call safepoint along every edge between Header and Pred.
219 // For the moment, we look only for the 'cuts' that consist of a single call
220 // instruction in a block which is dominated by the Header and dominates the
221 // loop latch (Pred) block. Somewhat surprisingly, walking the entire chain
222 // of such dominating blocks gets substaintially more occurences than just
223 // checking the Pred and Header blocks themselves. This may be due to the
224 // density of loop exit conditions caused by range and null checks.
225 // TODO: structure this as an analysis pass, cache the result for subloops,
226 // avoid dom tree recalculations
227 assert(DT.dominates(Header, Pred) && "loop latch not dominated by header?");
229 BasicBlock *Current = Pred;
231 for (Instruction &I : *Current) {
232 if (auto CS = CallSite(&I))
233 // Note: Technically, needing a safepoint isn't quite the right
234 // condition here. We should instead be checking if the target method
236 // unconditional poll. In practice, this is only a theoretical concern
237 // since we don't have any methods with conditional-only safepoint
239 if (needsStatepoint(CS))
243 if (Current == Header)
245 Current = DT.getNode(Current)->getIDom()->getBlock();
251 /// Returns true if this loop is known to terminate in a finite number of
252 /// iterations. Note that this function may return false for a loop which
253 /// does actual terminate in a finite constant number of iterations due to
254 /// conservatism in the analysis.
255 static bool mustBeFiniteCountedLoop(Loop *L, ScalarEvolution *SE,
257 // Only used when SkipCounted is off
258 const unsigned upperTripBound = 8192;
260 // A conservative bound on the loop as a whole.
261 const SCEV *MaxTrips = SE->getMaxBackedgeTakenCount(L);
262 if (MaxTrips != SE->getCouldNotCompute()) {
263 if (SE->getUnsignedRange(MaxTrips).getUnsignedMax().ult(upperTripBound))
266 SE->getUnsignedRange(MaxTrips).getUnsignedMax().isIntN(32))
270 // If this is a conditional branch to the header with the alternate path
271 // being outside the loop, we can ask questions about the execution frequency
272 // of the exit block.
273 if (L->isLoopExiting(Pred)) {
274 // This returns an exact expression only. TODO: We really only need an
275 // upper bound here, but SE doesn't expose that.
276 const SCEV *MaxExec = SE->getExitCount(L, Pred);
277 if (MaxExec != SE->getCouldNotCompute()) {
278 if (SE->getUnsignedRange(MaxExec).getUnsignedMax().ult(upperTripBound))
281 SE->getUnsignedRange(MaxExec).getUnsignedMax().isIntN(32))
286 return /* not finite */ false;
289 static void scanOneBB(Instruction *start, Instruction *end,
290 std::vector<CallInst *> &calls,
291 std::set<BasicBlock *> &seen,
292 std::vector<BasicBlock *> &worklist) {
293 for (BasicBlock::iterator itr(start);
294 itr != start->getParent()->end() && itr != BasicBlock::iterator(end);
296 if (CallInst *CI = dyn_cast<CallInst>(&*itr)) {
299 // FIXME: This code does not handle invokes
300 assert(!dyn_cast<InvokeInst>(&*itr) &&
301 "support for invokes in poll code needed");
302 // Only add the successor blocks if we reach the terminator instruction
303 // without encountering end first
304 if (itr->isTerminator()) {
305 BasicBlock *BB = itr->getParent();
306 for (BasicBlock *Succ : successors(BB)) {
307 if (seen.count(Succ) == 0) {
308 worklist.push_back(Succ);
315 static void scanInlinedCode(Instruction *start, Instruction *end,
316 std::vector<CallInst *> &calls,
317 std::set<BasicBlock *> &seen) {
319 std::vector<BasicBlock *> worklist;
320 seen.insert(start->getParent());
321 scanOneBB(start, end, calls, seen, worklist);
322 while (!worklist.empty()) {
323 BasicBlock *BB = worklist.back();
325 scanOneBB(&*BB->begin(), end, calls, seen, worklist);
329 bool PlaceBackedgeSafepointsImpl::runOnLoop(Loop *L) {
330 // Loop through all loop latches (branches controlling backedges). We need
331 // to place a safepoint on every backedge (potentially).
332 // Note: In common usage, there will be only one edge due to LoopSimplify
333 // having run sometime earlier in the pipeline, but this code must be correct
334 // w.r.t. loops with multiple backedges.
335 BasicBlock *header = L->getHeader();
336 SmallVector<BasicBlock*, 16> LoopLatches;
337 L->getLoopLatches(LoopLatches);
338 for (BasicBlock *pred : LoopLatches) {
339 assert(L->contains(pred));
341 // Make a policy decision about whether this loop needs a safepoint or
342 // not. Note that this is about unburdening the optimizer in loops, not
343 // avoiding the runtime cost of the actual safepoint.
345 if (mustBeFiniteCountedLoop(L, SE, pred)) {
347 errs() << "skipping safepoint placement in finite loop\n";
351 if (CallSafepointsEnabled &&
352 containsUnconditionalCallSafepoint(L, header, pred, *DT)) {
353 // Note: This is only semantically legal since we won't do any further
354 // IPO or inlining before the actual call insertion.. If we hadn't, we
355 // might latter loose this call safepoint.
357 errs() << "skipping safepoint placement due to unconditional call\n";
363 // TODO: We can create an inner loop which runs a finite number of
364 // iterations with an outer loop which contains a safepoint. This would
365 // not help runtime performance that much, but it might help our ability to
366 // optimize the inner loop.
368 // Safepoint insertion would involve creating a new basic block (as the
369 // target of the current backedge) which does the safepoint (of all live
370 // variables) and branches to the true header
371 TerminatorInst *term = pred->getTerminator();
374 errs() << "[LSP] terminator instruction: ";
378 PollLocations.push_back(term);
384 /// Returns true if an entry safepoint is not required before this callsite in
385 /// the caller function.
386 static bool doesNotRequireEntrySafepointBefore(const CallSite &CS) {
387 Instruction *Inst = CS.getInstruction();
388 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
389 switch (II->getIntrinsicID()) {
390 case Intrinsic::experimental_gc_statepoint:
391 case Intrinsic::experimental_patchpoint_void:
392 case Intrinsic::experimental_patchpoint_i64:
393 // The can wrap an actual call which may grow the stack by an unbounded
394 // amount or run forever.
397 // Most LLVM intrinsics are things which do not expand to actual calls, or
398 // at least if they do, are leaf functions that cause only finite stack
399 // growth. In particular, the optimizer likes to form things like memsets
400 // out of stores in the original IR. Another important example is
401 // llvm.frameescape which must occur in the entry block. Inserting a
402 // safepoint before it is not legal since it could push the frameescape
403 // out of the entry block.
410 static Instruction *findLocationForEntrySafepoint(Function &F,
413 // Conceptually, this poll needs to be on method entry, but in
414 // practice, we place it as late in the entry block as possible. We
415 // can place it as late as we want as long as it dominates all calls
416 // that can grow the stack. This, combined with backedge polls,
417 // give us all the progress guarantees we need.
419 // Due to the way the frontend generates IR, we may have a couple of initial
420 // basic blocks before the first bytecode. These will be single-entry
421 // single-exit blocks which conceptually are just part of the first 'real
422 // basic block'. Since we don't have deopt state until the first bytecode,
423 // walk forward until we've found the first unconditional branch or merge.
425 // hasNextInstruction and nextInstruction are used to iterate
426 // through a "straight line" execution sequence.
428 auto hasNextInstruction = [](Instruction *I) {
429 if (!I->isTerminator()) {
432 BasicBlock *nextBB = I->getParent()->getUniqueSuccessor();
433 return nextBB && (nextBB->getUniquePredecessor() != nullptr);
436 auto nextInstruction = [&hasNextInstruction](Instruction *I) {
437 assert(hasNextInstruction(I) &&
438 "first check if there is a next instruction!");
439 if (I->isTerminator()) {
440 return I->getParent()->getUniqueSuccessor()->begin();
442 return std::next(BasicBlock::iterator(I));
446 Instruction *cursor = nullptr;
447 for (cursor = F.getEntryBlock().begin(); hasNextInstruction(cursor);
448 cursor = nextInstruction(cursor)) {
450 // We need to ensure a safepoint poll occurs before any 'real' call. The
451 // easiest way to ensure finite execution between safepoints in the face of
452 // recursive and mutually recursive functions is to enforce that each take
453 // a safepoint. Additionally, we need to ensure a poll before any call
454 // which can grow the stack by an unbounded amount. This isn't required
455 // for GC semantics per se, but is a common requirement for languages
456 // which detect stack overflow via guard pages and then throw exceptions.
457 if (auto CS = CallSite(cursor)) {
458 if (doesNotRequireEntrySafepointBefore(CS))
464 assert((hasNextInstruction(cursor) || cursor->isTerminator()) &&
465 "either we stopped because of a call, or because of terminator");
467 if (cursor->isTerminator()) {
471 BasicBlock *BB = cursor->getParent();
472 SplitBlock(BB, cursor, &DT);
474 // SplitBlock updates the DT
475 DEBUG(DT.verifyDomTree());
477 return BB->getTerminator();
480 /// Identify the list of call sites which need to be have parseable state
481 static void findCallSafepoints(Function &F,
482 std::vector<CallSite> &Found /*rval*/) {
483 assert(Found.empty() && "must be empty!");
484 for (Instruction &I : inst_range(F)) {
485 Instruction *inst = &I;
486 if (isa<CallInst>(inst) || isa<InvokeInst>(inst)) {
489 // No safepoint needed or wanted
490 if (!needsStatepoint(CS)) {
499 /// Implement a unique function which doesn't require we sort the input
500 /// vector. Doing so has the effect of changing the output of a couple of
501 /// tests in ways which make them less useful in testing fused safepoints.
502 template <typename T> static void unique_unsorted(std::vector<T> &vec) {
505 vec.reserve(vec.size());
508 if (seen.insert(V).second) {
514 static std::string GCSafepointPollName("gc.safepoint_poll");
516 static bool isGCSafepointPoll(Function &F) {
517 return F.getName().equals(GCSafepointPollName);
520 /// Returns true if this function should be rewritten to include safepoint
521 /// polls and parseable call sites. The main point of this function is to be
522 /// an extension point for custom logic.
523 static bool shouldRewriteFunction(Function &F) {
524 // TODO: This should check the GCStrategy
526 const std::string StatepointExampleName("statepoint-example");
527 return StatepointExampleName == F.getGC();
532 // TODO: These should become properties of the GCStrategy, possibly with
533 // command line overrides.
534 static bool enableEntrySafepoints(Function &F) { return !NoEntry; }
535 static bool enableBackedgeSafepoints(Function &F) { return !NoBackedge; }
536 static bool enableCallSafepoints(Function &F) { return !NoCall; }
538 // Normalize basic block to make it ready to be target of invoke statepoint.
539 // Ensure that 'BB' does not have phi nodes. It may require spliting it.
540 static BasicBlock *normalizeForInvokeSafepoint(BasicBlock *BB,
541 BasicBlock *InvokeParent) {
542 BasicBlock *ret = BB;
544 if (!BB->getUniquePredecessor()) {
545 ret = SplitBlockPredecessors(BB, InvokeParent, "");
548 // Now that 'ret' has unique predecessor we can safely remove all phi nodes
550 FoldSingleEntryPHINodes(ret);
551 assert(!isa<PHINode>(ret->begin()));
556 bool PlaceSafepoints::runOnFunction(Function &F) {
557 if (F.isDeclaration() || F.empty()) {
558 // This is a declaration, nothing to do. Must exit early to avoid crash in
559 // dom tree calculation
563 if (isGCSafepointPoll(F)) {
564 // Given we're inlining this inside of safepoint poll insertion, this
565 // doesn't make any sense. Note that we do make any contained calls
566 // parseable after we inline a poll.
570 if (!shouldRewriteFunction(F))
573 bool modified = false;
575 // In various bits below, we rely on the fact that uses are reachable from
576 // defs. When there are basic blocks unreachable from the entry, dominance
577 // and reachablity queries return non-sensical results. Thus, we preprocess
578 // the function to ensure these properties hold.
579 modified |= removeUnreachableBlocks(F);
581 // STEP 1 - Insert the safepoint polling locations. We do not need to
582 // actually insert parse points yet. That will be done for all polls and
583 // calls in a single pass.
588 SmallVector<Instruction *, 16> PollsNeeded;
589 std::vector<CallSite> ParsePointNeeded;
591 if (enableBackedgeSafepoints(F)) {
592 // Construct a pass manager to run the LoopPass backedge logic. We
593 // need the pass manager to handle scheduling all the loop passes
594 // appropriately. Doing this by hand is painful and just not worth messing
595 // with for the moment.
596 legacy::FunctionPassManager FPM(F.getParent());
597 bool CanAssumeCallSafepoints = enableCallSafepoints(F);
598 PlaceBackedgeSafepointsImpl *PBS =
599 new PlaceBackedgeSafepointsImpl(CanAssumeCallSafepoints);
603 // We preserve dominance information when inserting the poll, otherwise
604 // we'd have to recalculate this on every insert
607 auto &PollLocations = PBS->PollLocations;
609 auto OrderByBBName = [](Instruction *a, Instruction *b) {
610 return a->getParent()->getName() < b->getParent()->getName();
612 // We need the order of list to be stable so that naming ends up stable
613 // when we split edges. This makes test cases much easier to write.
614 std::sort(PollLocations.begin(), PollLocations.end(), OrderByBBName);
616 // We can sometimes end up with duplicate poll locations. This happens if
617 // a single loop is visited more than once. The fact this happens seems
618 // wrong, but it does happen for the split-backedge.ll test case.
619 PollLocations.erase(std::unique(PollLocations.begin(),
620 PollLocations.end()),
621 PollLocations.end());
623 // Insert a poll at each point the analysis pass identified
624 // The poll location must be the terminator of a loop latch block.
625 for (TerminatorInst *Term : PollLocations) {
626 // We are inserting a poll, the function is modified
630 // Split the backedge of the loop and insert the poll within that new
631 // basic block. This creates a loop with two latches per original
632 // latch (which is non-ideal), but this appears to be easier to
633 // optimize in practice than inserting the poll immediately before the
636 // Since this is a latch, at least one of the successors must dominate
637 // it. Its possible that we have a) duplicate edges to the same header
638 // and b) edges to distinct loop headers. We need to insert pools on
640 SetVector<BasicBlock *> Headers;
641 for (unsigned i = 0; i < Term->getNumSuccessors(); i++) {
642 BasicBlock *Succ = Term->getSuccessor(i);
643 if (DT.dominates(Succ, Term->getParent())) {
644 Headers.insert(Succ);
647 assert(!Headers.empty() && "poll location is not a loop latch?");
649 // The split loop structure here is so that we only need to recalculate
650 // the dominator tree once. Alternatively, we could just keep it up to
651 // date and use a more natural merged loop.
652 SetVector<BasicBlock *> SplitBackedges;
653 for (BasicBlock *Header : Headers) {
654 BasicBlock *NewBB = SplitEdge(Term->getParent(), Header, &DT);
655 PollsNeeded.push_back(NewBB->getTerminator());
656 NumBackedgeSafepoints++;
659 // Split the latch block itself, right before the terminator.
660 PollsNeeded.push_back(Term);
661 NumBackedgeSafepoints++;
666 if (enableEntrySafepoints(F)) {
667 Instruction *Location = findLocationForEntrySafepoint(F, DT);
669 // policy choice not to insert?
671 PollsNeeded.push_back(Location);
673 NumEntrySafepoints++;
677 // Now that we've identified all the needed safepoint poll locations, insert
678 // safepoint polls themselves.
679 for (Instruction *PollLocation : PollsNeeded) {
680 std::vector<CallSite> RuntimeCalls;
681 InsertSafepointPoll(PollLocation, RuntimeCalls);
682 ParsePointNeeded.insert(ParsePointNeeded.end(), RuntimeCalls.begin(),
685 PollsNeeded.clear(); // make sure we don't accidentally use
686 // The dominator tree has been invalidated by the inlining performed in the
687 // above loop. TODO: Teach the inliner how to update the dom tree?
690 if (enableCallSafepoints(F)) {
691 std::vector<CallSite> Calls;
692 findCallSafepoints(F, Calls);
693 NumCallSafepoints += Calls.size();
694 ParsePointNeeded.insert(ParsePointNeeded.end(), Calls.begin(), Calls.end());
697 // Unique the vectors since we can end up with duplicates if we scan the call
698 // site for call safepoints after we add it for entry or backedge. The
699 // only reason we need tracking at all is that some functions might have
700 // polls but not call safepoints and thus we might miss marking the runtime
701 // calls for the polls. (This is useful in test cases!)
702 unique_unsorted(ParsePointNeeded);
704 // Any parse point (no matter what source) will be handled here
706 // We're about to start modifying the function
707 if (!ParsePointNeeded.empty())
710 // Now run through and insert the safepoints, but do _NOT_ update or remove
711 // any existing uses. We have references to live variables that need to
712 // survive to the last iteration of this loop.
713 std::vector<Value *> Results;
714 Results.reserve(ParsePointNeeded.size());
715 for (size_t i = 0; i < ParsePointNeeded.size(); i++) {
716 CallSite &CS = ParsePointNeeded[i];
718 // For invoke statepoints we need to remove all phi nodes at the normal
719 // destination block.
720 // Reason for this is that we can place gc_result only after last phi node
721 // in basic block. We will get malformed code after RAUW for the
722 // gc_result if one of this phi nodes uses result from the invoke.
723 if (InvokeInst *Invoke = dyn_cast<InvokeInst>(CS.getInstruction())) {
724 normalizeForInvokeSafepoint(Invoke->getNormalDest(),
725 Invoke->getParent());
728 Value *GCResult = ReplaceWithStatepoint(CS, nullptr);
729 Results.push_back(GCResult);
731 assert(Results.size() == ParsePointNeeded.size());
733 // Adjust all users of the old call sites to use the new ones instead
734 for (size_t i = 0; i < ParsePointNeeded.size(); i++) {
735 CallSite &CS = ParsePointNeeded[i];
736 Value *GCResult = Results[i];
738 // Can not RAUW for the invoke gc result in case of phi nodes preset.
739 assert(CS.isCall() || !isa<PHINode>(cast<Instruction>(GCResult)->getParent()->begin()));
741 // Replace all uses with the new call
742 CS.getInstruction()->replaceAllUsesWith(GCResult);
745 // Now that we've handled all uses, remove the original call itself
746 // Note: The insert point can't be the deleted instruction!
747 CS.getInstruction()->eraseFromParent();
752 char PlaceBackedgeSafepointsImpl::ID = 0;
753 char PlaceSafepoints::ID = 0;
755 FunctionPass *llvm::createPlaceSafepointsPass() {
756 return new PlaceSafepoints();
759 INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl,
760 "place-backedge-safepoints-impl",
761 "Place Backedge Safepoints", false, false)
762 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
763 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
764 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
765 INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl,
766 "place-backedge-safepoints-impl",
767 "Place Backedge Safepoints", false, false)
769 INITIALIZE_PASS_BEGIN(PlaceSafepoints, "place-safepoints", "Place Safepoints",
771 INITIALIZE_PASS_END(PlaceSafepoints, "place-safepoints", "Place Safepoints",
774 static bool isGCLeafFunction(const CallSite &CS) {
775 Instruction *inst = CS.getInstruction();
776 if (isa<IntrinsicInst>(inst)) {
777 // Most LLVM intrinsics are things which can never take a safepoint.
778 // As a result, we don't need to have the stack parsable at the
779 // callsite. This is a highly useful optimization since intrinsic
780 // calls are fairly prevelent, particularly in debug builds.
784 // If this function is marked explicitly as a leaf call, we don't need to
785 // place a safepoint of it. In fact, for correctness we *can't* in many
786 // cases. Note: Indirect calls return Null for the called function,
787 // these obviously aren't runtime functions with attributes
788 // TODO: Support attributes on the call site as well.
789 const Function *F = CS.getCalledFunction();
792 F->getFnAttribute("gc-leaf-function").getValueAsString().equals("true");
800 InsertSafepointPoll(Instruction *term,
801 std::vector<CallSite> &ParsePointsNeeded /*rval*/) {
802 Module *M = term->getParent()->getParent()->getParent();
805 // Inline the safepoint poll implementation - this will get all the branch,
806 // control flow, etc.. Most importantly, it will introduce the actual slow
807 // path call - where we need to insert a safepoint (parsepoint).
808 FunctionType *ftype =
809 FunctionType::get(Type::getVoidTy(M->getContext()), false);
810 assert(ftype && "null?");
811 // Note: This cast can fail if there's a function of the same name with a
812 // different type inserted previously
814 dyn_cast<Function>(M->getOrInsertFunction("gc.safepoint_poll", ftype));
815 assert(F && "void @gc.safepoint_poll() must be defined");
816 assert(!F->empty() && "gc.safepoint_poll must be a non-empty function");
817 CallInst *poll = CallInst::Create(F, "", term);
819 // Record some information about the call site we're replacing
820 BasicBlock *OrigBB = term->getParent();
821 BasicBlock::iterator before(poll), after(poll);
823 if (before == term->getParent()->begin()) {
829 assert(after != poll->getParent()->end() && "must have successor");
831 // do the actual inlining
832 InlineFunctionInfo IFI;
833 bool inlineStatus = InlineFunction(poll, IFI);
834 assert(inlineStatus && "inline must succeed");
835 (void)inlineStatus; // suppress warning in release-asserts
837 // Check post conditions
838 assert(IFI.StaticAllocas.empty() && "can't have allocs");
840 std::vector<CallInst *> calls; // new calls
841 std::set<BasicBlock *> BBs; // new BBs + insertee
842 // Include only the newly inserted instructions, Note: begin may not be valid
843 // if we inserted to the beginning of the basic block
844 BasicBlock::iterator start;
846 start = OrigBB->begin();
852 // If your poll function includes an unreachable at the end, that's not
853 // valid. Bugpoint likes to create this, so check for it.
854 assert(isPotentiallyReachable(&*start, &*after, nullptr, nullptr) &&
855 "malformed poll function");
857 scanInlinedCode(&*(start), &*(after), calls, BBs);
858 assert(!calls.empty() && "slow path not found for safepoint poll");
860 // Record the fact we need a parsable state at the runtime call contained in
861 // the poll function. This is required so that the runtime knows how to
862 // parse the last frame when we actually take the safepoint (i.e. execute
864 assert(ParsePointsNeeded.empty());
865 for (size_t i = 0; i < calls.size(); i++) {
867 // No safepoint needed or wanted
868 if (!needsStatepoint(calls[i])) {
872 // These are likely runtime calls. Should we assert that via calling
873 // convention or something?
874 ParsePointsNeeded.push_back(CallSite(calls[i]));
876 assert(ParsePointsNeeded.size() <= calls.size());
879 /// Replaces the given call site (Call or Invoke) with a gc.statepoint
880 /// intrinsic with an empty deoptimization arguments list. This does
881 /// NOT do explicit relocation for GC support.
882 static Value *ReplaceWithStatepoint(const CallSite &CS, /* to replace */
884 assert(CS.getInstruction()->getParent()->getParent()->getParent() &&
887 // TODO: technically, a pass is not allowed to get functions from within a
888 // function pass since it might trigger a new function addition. Refactor
889 // this logic out to the initialization of the pass. Doesn't appear to
890 // matter in practice.
892 // Then go ahead and use the builder do actually do the inserts. We insert
893 // immediately before the previous instruction under the assumption that all
894 // arguments will be available here. We can't insert afterwards since we may
895 // be replacing a terminator.
896 IRBuilder<> Builder(CS.getInstruction());
898 // Note: The gc args are not filled in at this time, that's handled by
899 // RewriteStatepointsForGC (which is currently under review).
901 // Create the statepoint given all the arguments
902 Instruction *Token = nullptr;
905 uint32_t NumPatchBytes;
907 AttributeSet OriginalAttrs = CS.getAttributes();
909 OriginalAttrs.getAttribute(AttributeSet::FunctionIndex, "statepoint-id");
910 Attribute AttrNumPatchBytes = OriginalAttrs.getAttribute(
911 AttributeSet::FunctionIndex, "statepoint-num-patch-bytes");
913 AttrBuilder AttrsToRemove;
914 bool HasID = AttrID.isStringAttribute() &&
915 !AttrID.getValueAsString().getAsInteger(10, ID);
918 AttrsToRemove.addAttribute("statepoint-id");
922 bool HasNumPatchBytes =
923 AttrNumPatchBytes.isStringAttribute() &&
924 !AttrNumPatchBytes.getValueAsString().getAsInteger(10, NumPatchBytes);
926 if (HasNumPatchBytes)
927 AttrsToRemove.addAttribute("statepoint-num-patch-bytes");
931 OriginalAttrs = OriginalAttrs.removeAttributes(
932 CS.getInstruction()->getContext(), AttributeSet::FunctionIndex,
935 Value *StatepointTarget = NumPatchBytes == 0
936 ? CS.getCalledValue()
937 : ConstantPointerNull::get(cast<PointerType>(
938 CS.getCalledValue()->getType()));
941 CallInst *ToReplace = cast<CallInst>(CS.getInstruction());
942 CallInst *Call = Builder.CreateGCStatepointCall(
943 ID, NumPatchBytes, StatepointTarget,
944 makeArrayRef(CS.arg_begin(), CS.arg_end()), None, None,
946 Call->setTailCall(ToReplace->isTailCall());
947 Call->setCallingConv(ToReplace->getCallingConv());
949 // In case if we can handle this set of attributes - set up function
950 // attributes directly on statepoint and return attributes later for
951 // gc_result intrinsic.
952 Call->setAttributes(OriginalAttrs.getFnAttributes());
956 // Put the following gc_result and gc_relocate calls immediately after the
957 // the old call (which we're about to delete).
958 assert(ToReplace->getNextNode() && "not a terminator, must have next");
959 Builder.SetInsertPoint(ToReplace->getNextNode());
960 Builder.SetCurrentDebugLocation(ToReplace->getNextNode()->getDebugLoc());
961 } else if (CS.isInvoke()) {
962 InvokeInst *ToReplace = cast<InvokeInst>(CS.getInstruction());
964 // Insert the new invoke into the old block. We'll remove the old one in a
965 // moment at which point this will become the new terminator for the
967 Builder.SetInsertPoint(ToReplace->getParent());
968 InvokeInst *Invoke = Builder.CreateGCStatepointInvoke(
969 ID, NumPatchBytes, StatepointTarget, ToReplace->getNormalDest(),
970 ToReplace->getUnwindDest(), makeArrayRef(CS.arg_begin(), CS.arg_end()),
971 None, None, "safepoint_token");
973 Invoke->setCallingConv(ToReplace->getCallingConv());
975 // In case if we can handle this set of attributes - set up function
976 // attributes directly on statepoint and return attributes later for
977 // gc_result intrinsic.
978 Invoke->setAttributes(OriginalAttrs.getFnAttributes());
982 // We'll insert the gc.result into the normal block
983 BasicBlock *NormalDest = ToReplace->getNormalDest();
984 // Can not insert gc.result in case of phi nodes preset.
985 // Should have removed this cases prior to runnning this function
986 assert(!isa<PHINode>(NormalDest->begin()));
987 Instruction *IP = &*(NormalDest->getFirstInsertionPt());
988 Builder.SetInsertPoint(IP);
990 llvm_unreachable("unexpect type of CallSite");
994 // Handle the return value of the original call - update all uses to use a
995 // gc_result hanging off the statepoint node we just inserted
997 // Only add the gc_result iff there is actually a used result
998 if (!CS.getType()->isVoidTy() && !CS.getInstruction()->use_empty()) {
999 std::string TakenName =
1000 CS.getInstruction()->hasName() ? CS.getInstruction()->getName() : "";
1001 CallInst *GCResult = Builder.CreateGCResult(Token, CS.getType(), TakenName);
1002 GCResult->setAttributes(OriginalAttrs.getRetAttributes());
1005 // No return value for the call.