1 //===- BlockFrequencyImplInfo.cpp - Block Frequency Info Implementation ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Loops should be simplified before this analysis.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/BlockFrequencyInfoImpl.h"
15 #include "llvm/ADT/SCCIterator.h"
16 #include "llvm/Support/raw_ostream.h"
20 using namespace llvm::bfi_detail;
22 #define DEBUG_TYPE "block-freq"
24 //===----------------------------------------------------------------------===//
26 // BlockMass implementation.
28 //===----------------------------------------------------------------------===//
29 ScaledNumber<uint64_t> BlockMass::toScaled() const {
31 return ScaledNumber<uint64_t>(1, 0);
32 return ScaledNumber<uint64_t>(getMass() + 1, -64);
35 void BlockMass::dump() const { print(dbgs()); }
37 static char getHexDigit(int N) {
43 raw_ostream &BlockMass::print(raw_ostream &OS) const {
44 for (int Digits = 0; Digits < 16; ++Digits)
45 OS << getHexDigit(Mass >> (60 - Digits * 4) & 0xf);
49 //===----------------------------------------------------------------------===//
51 // BlockFrequencyInfoImpl implementation.
53 //===----------------------------------------------------------------------===//
56 typedef BlockFrequencyInfoImplBase::BlockNode BlockNode;
57 typedef BlockFrequencyInfoImplBase::Distribution Distribution;
58 typedef BlockFrequencyInfoImplBase::Distribution::WeightList WeightList;
59 typedef BlockFrequencyInfoImplBase::Scaled64 Scaled64;
60 typedef BlockFrequencyInfoImplBase::LoopData LoopData;
61 typedef BlockFrequencyInfoImplBase::Weight Weight;
62 typedef BlockFrequencyInfoImplBase::FrequencyData FrequencyData;
64 /// \brief Dithering mass distributer.
66 /// This class splits up a single mass into portions by weight, dithering to
67 /// spread out error. No mass is lost. The dithering precision depends on the
68 /// precision of the product of \a BlockMass and \a BranchProbability.
70 /// The distribution algorithm follows.
72 /// 1. Initialize by saving the sum of the weights in \a RemWeight and the
73 /// mass to distribute in \a RemMass.
75 /// 2. For each portion:
77 /// 1. Construct a branch probability, P, as the portion's weight divided
78 /// by the current value of \a RemWeight.
79 /// 2. Calculate the portion's mass as \a RemMass times P.
80 /// 3. Update \a RemWeight and \a RemMass at each portion by subtracting
81 /// the current portion's weight and mass.
82 struct DitheringDistributer {
86 DitheringDistributer(Distribution &Dist, const BlockMass &Mass);
88 BlockMass takeMass(uint32_t Weight);
93 DitheringDistributer::DitheringDistributer(Distribution &Dist,
94 const BlockMass &Mass) {
96 RemWeight = Dist.Total;
100 BlockMass DitheringDistributer::takeMass(uint32_t Weight) {
101 assert(Weight && "invalid weight");
102 assert(Weight <= RemWeight);
103 BlockMass Mass = RemMass * BranchProbability(Weight, RemWeight);
105 // Decrement totals (dither).
111 void Distribution::add(const BlockNode &Node, uint64_t Amount,
112 Weight::DistType Type) {
113 assert(Amount && "invalid weight of 0");
114 uint64_t NewTotal = Total + Amount;
116 // Check for overflow. It should be impossible to overflow twice.
117 bool IsOverflow = NewTotal < Total;
118 assert(!(DidOverflow && IsOverflow) && "unexpected repeated overflow");
119 DidOverflow |= IsOverflow;
129 Weights.push_back(W);
132 static void combineWeight(Weight &W, const Weight &OtherW) {
133 assert(OtherW.TargetNode.isValid());
138 assert(W.Type == OtherW.Type);
139 assert(W.TargetNode == OtherW.TargetNode);
140 assert(W.Amount < W.Amount + OtherW.Amount && "Unexpected overflow");
141 W.Amount += OtherW.Amount;
143 static void combineWeightsBySorting(WeightList &Weights) {
144 // Sort so edges to the same node are adjacent.
145 std::sort(Weights.begin(), Weights.end(),
147 const Weight &R) { return L.TargetNode < R.TargetNode; });
149 // Combine adjacent edges.
150 WeightList::iterator O = Weights.begin();
151 for (WeightList::const_iterator I = O, L = O, E = Weights.end(); I != E;
155 // Find the adjacent weights to the same node.
156 for (++L; L != E && I->TargetNode == L->TargetNode; ++L)
157 combineWeight(*O, *L);
160 // Erase extra entries.
161 Weights.erase(O, Weights.end());
164 static void combineWeightsByHashing(WeightList &Weights) {
165 // Collect weights into a DenseMap.
166 typedef DenseMap<BlockNode::IndexType, Weight> HashTable;
167 HashTable Combined(NextPowerOf2(2 * Weights.size()));
168 for (const Weight &W : Weights)
169 combineWeight(Combined[W.TargetNode.Index], W);
171 // Check whether anything changed.
172 if (Weights.size() == Combined.size())
175 // Fill in the new weights.
177 Weights.reserve(Combined.size());
178 for (const auto &I : Combined)
179 Weights.push_back(I.second);
181 static void combineWeights(WeightList &Weights) {
182 // Use a hash table for many successors to keep this linear.
183 if (Weights.size() > 128) {
184 combineWeightsByHashing(Weights);
188 combineWeightsBySorting(Weights);
190 static uint64_t shiftRightAndRound(uint64_t N, int Shift) {
195 return (N >> Shift) + (UINT64_C(1) & N >> (Shift - 1));
197 void Distribution::normalize() {
198 // Early exit for termination nodes.
202 // Only bother if there are multiple successors.
203 if (Weights.size() > 1)
204 combineWeights(Weights);
206 // Early exit when combined into a single successor.
207 if (Weights.size() == 1) {
209 Weights.front().Amount = 1;
213 // Determine how much to shift right so that the total fits into 32-bits.
215 // If we shift at all, shift by 1 extra. Otherwise, the lower limit of 1
216 // for each weight can cause a 32-bit overflow.
220 else if (Total > UINT32_MAX)
221 Shift = 33 - countLeadingZeros(Total);
223 // Early exit if nothing needs to be scaled.
227 // Recompute the total through accumulation (rather than shifting it) so that
228 // it's accurate after shifting.
231 // Sum the weights to each node and shift right if necessary.
232 for (Weight &W : Weights) {
233 // Scale down below UINT32_MAX. Since Shift is larger than necessary, we
234 // can round here without concern about overflow.
235 assert(W.TargetNode.isValid());
236 W.Amount = std::max(UINT64_C(1), shiftRightAndRound(W.Amount, Shift));
237 assert(W.Amount <= UINT32_MAX);
242 assert(Total <= UINT32_MAX);
245 void BlockFrequencyInfoImplBase::clear() {
246 // Swap with a default-constructed std::vector, since std::vector<>::clear()
247 // does not actually clear heap storage.
248 std::vector<FrequencyData>().swap(Freqs);
249 std::vector<WorkingData>().swap(Working);
253 /// \brief Clear all memory not needed downstream.
255 /// Releases all memory not used downstream. In particular, saves Freqs.
256 static void cleanup(BlockFrequencyInfoImplBase &BFI) {
257 std::vector<FrequencyData> SavedFreqs(std::move(BFI.Freqs));
259 BFI.Freqs = std::move(SavedFreqs);
262 bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist,
263 const LoopData *OuterLoop,
264 const BlockNode &Pred,
265 const BlockNode &Succ,
270 auto isLoopHeader = [&OuterLoop](const BlockNode &Node) {
271 return OuterLoop && OuterLoop->isHeader(Node);
274 BlockNode Resolved = Working[Succ.Index].getResolvedNode();
277 auto debugSuccessor = [&](const char *Type) {
279 << " [" << Type << "] weight = " << Weight;
280 if (!isLoopHeader(Resolved))
281 dbgs() << ", succ = " << getBlockName(Succ);
282 if (Resolved != Succ)
283 dbgs() << ", resolved = " << getBlockName(Resolved);
286 (void)debugSuccessor;
289 if (isLoopHeader(Resolved)) {
290 DEBUG(debugSuccessor("backedge"));
291 Dist.addBackedge(OuterLoop->getHeader(), Weight);
295 if (Working[Resolved.Index].getContainingLoop() != OuterLoop) {
296 DEBUG(debugSuccessor(" exit "));
297 Dist.addExit(Resolved, Weight);
301 if (Resolved < Pred) {
302 if (!isLoopHeader(Pred)) {
303 // If OuterLoop is an irreducible loop, we can't actually handle this.
304 assert((!OuterLoop || !OuterLoop->isIrreducible()) &&
305 "unhandled irreducible control flow");
307 // Irreducible backedge. Abort.
308 DEBUG(debugSuccessor("abort!!!"));
312 // If "Pred" is a loop header, then this isn't really a backedge; rather,
313 // OuterLoop must be irreducible. These false backedges can come only from
314 // secondary loop headers.
315 assert(OuterLoop && OuterLoop->isIrreducible() && !isLoopHeader(Resolved) &&
316 "unhandled irreducible control flow");
319 DEBUG(debugSuccessor(" local "));
320 Dist.addLocal(Resolved, Weight);
324 bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist(
325 const LoopData *OuterLoop, LoopData &Loop, Distribution &Dist) {
326 // Copy the exit map into Dist.
327 for (const auto &I : Loop.Exits)
328 if (!addToDist(Dist, OuterLoop, Loop.getHeader(), I.first,
330 // Irreducible backedge.
336 /// \brief Get the maximum allowed loop scale.
338 /// Gives the maximum number of estimated iterations allowed for a loop. Very
339 /// large numbers cause problems downstream (even within 64-bits).
340 static Scaled64 getMaxLoopScale() { return Scaled64(1, 12); }
342 /// \brief Compute the loop scale for a loop.
343 void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) {
344 // Compute loop scale.
345 DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n");
347 // LoopScale == 1 / ExitMass
348 // ExitMass == HeadMass - BackedgeMass
349 BlockMass ExitMass = BlockMass::getFull() - Loop.BackedgeMass;
351 // Block scale stores the inverse of the scale.
352 Loop.Scale = ExitMass.toScaled().inverse();
354 DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull()
355 << " - " << Loop.BackedgeMass << ")\n"
356 << " - scale = " << Loop.Scale << "\n");
358 if (Loop.Scale > getMaxLoopScale()) {
359 Loop.Scale = getMaxLoopScale();
360 DEBUG(dbgs() << " - reduced-to-max-scale: " << getMaxLoopScale() << "\n");
364 /// \brief Package up a loop.
365 void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) {
366 DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n");
368 // Clear the subloop exits to prevent quadratic memory usage.
369 for (const BlockNode &M : Loop.Nodes) {
370 if (auto *Loop = Working[M.Index].getPackagedLoop())
372 DEBUG(dbgs() << " - node: " << getBlockName(M.Index) << "\n");
374 Loop.IsPackaged = true;
377 void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source,
379 Distribution &Dist) {
380 BlockMass Mass = Working[Source.Index].getMass();
381 DEBUG(dbgs() << " => mass: " << Mass << "\n");
383 // Distribute mass to successors as laid out in Dist.
384 DitheringDistributer D(Dist, Mass);
387 auto debugAssign = [&](const BlockNode &T, const BlockMass &M,
389 dbgs() << " => assign " << M << " (" << D.RemMass << ")";
391 dbgs() << " [" << Desc << "]";
393 dbgs() << " to " << getBlockName(T);
399 for (const Weight &W : Dist.Weights) {
400 // Check for a local edge (non-backedge and non-exit).
401 BlockMass Taken = D.takeMass(W.Amount);
402 if (W.Type == Weight::Local) {
403 Working[W.TargetNode.Index].getMass() += Taken;
404 DEBUG(debugAssign(W.TargetNode, Taken, nullptr));
408 // Backedges and exits only make sense if we're processing a loop.
409 assert(OuterLoop && "backedge or exit outside of loop");
411 // Check for a backedge.
412 if (W.Type == Weight::Backedge) {
413 OuterLoop->BackedgeMass += Taken;
414 DEBUG(debugAssign(BlockNode(), Taken, "back"));
418 // This must be an exit.
419 assert(W.Type == Weight::Exit);
420 OuterLoop->Exits.push_back(std::make_pair(W.TargetNode, Taken));
421 DEBUG(debugAssign(W.TargetNode, Taken, "exit"));
425 static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI,
426 const Scaled64 &Min, const Scaled64 &Max) {
427 // Scale the Factor to a size that creates integers. Ideally, integers would
428 // be scaled so that Max == UINT64_MAX so that they can be best
429 // differentiated. However, the register allocator currently deals poorly
430 // with large numbers. Instead, push Min up a little from 1 to give some
431 // room to differentiate small, unequal numbers.
433 // TODO: fix issues downstream so that ScalingFactor can be
434 // Scaled64(1,64)/Max.
435 Scaled64 ScalingFactor = Min.inverse();
436 if ((Max / Min).lg() < 60)
439 // Translate the floats to integers.
440 DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max
441 << ", factor = " << ScalingFactor << "\n");
442 for (size_t Index = 0; Index < BFI.Freqs.size(); ++Index) {
443 Scaled64 Scaled = BFI.Freqs[Index].Scaled * ScalingFactor;
444 BFI.Freqs[Index].Integer = std::max(UINT64_C(1), Scaled.toInt<uint64_t>());
445 DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = "
446 << BFI.Freqs[Index].Scaled << ", scaled = " << Scaled
447 << ", int = " << BFI.Freqs[Index].Integer << "\n");
451 /// \brief Unwrap a loop package.
453 /// Visits all the members of a loop, adjusting their BlockData according to
454 /// the loop's pseudo-node.
455 static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) {
456 DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getLoopName(Loop)
457 << ": mass = " << Loop.Mass << ", scale = " << Loop.Scale
459 Loop.Scale *= Loop.Mass.toScaled();
460 Loop.IsPackaged = false;
461 DEBUG(dbgs() << " => combined-scale = " << Loop.Scale << "\n");
463 // Propagate the head scale through the loop. Since members are visited in
464 // RPO, the head scale will be updated by the loop scale first, and then the
465 // final head scale will be used for updated the rest of the members.
466 for (const BlockNode &N : Loop.Nodes) {
467 const auto &Working = BFI.Working[N.Index];
468 Scaled64 &F = Working.isAPackage() ? Working.getPackagedLoop()->Scale
469 : BFI.Freqs[N.Index].Scaled;
470 Scaled64 New = Loop.Scale * F;
471 DEBUG(dbgs() << " - " << BFI.getBlockName(N) << ": " << F << " => " << New
477 void BlockFrequencyInfoImplBase::unwrapLoops() {
478 // Set initial frequencies from loop-local masses.
479 for (size_t Index = 0; Index < Working.size(); ++Index)
480 Freqs[Index].Scaled = Working[Index].Mass.toScaled();
482 for (LoopData &Loop : Loops)
483 unwrapLoop(*this, Loop);
486 void BlockFrequencyInfoImplBase::finalizeMetrics() {
487 // Unwrap loop packages in reverse post-order, tracking min and max
489 auto Min = Scaled64::getLargest();
490 auto Max = Scaled64::getZero();
491 for (size_t Index = 0; Index < Working.size(); ++Index) {
492 // Update min/max scale.
493 Min = std::min(Min, Freqs[Index].Scaled);
494 Max = std::max(Max, Freqs[Index].Scaled);
497 // Convert to integers.
498 convertFloatingToInteger(*this, Min, Max);
500 // Clean up data structures.
503 // Print out the final stats.
508 BlockFrequencyInfoImplBase::getBlockFreq(const BlockNode &Node) const {
511 return Freqs[Node.Index].Integer;
514 BlockFrequencyInfoImplBase::getFloatingBlockFreq(const BlockNode &Node) const {
516 return Scaled64::getZero();
517 return Freqs[Node.Index].Scaled;
521 BlockFrequencyInfoImplBase::getBlockName(const BlockNode &Node) const {
522 return std::string();
525 BlockFrequencyInfoImplBase::getLoopName(const LoopData &Loop) const {
526 return getBlockName(Loop.getHeader()) + (Loop.isIrreducible() ? "**" : "*");
530 BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS,
531 const BlockNode &Node) const {
532 return OS << getFloatingBlockFreq(Node);
536 BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS,
537 const BlockFrequency &Freq) const {
538 Scaled64 Block(Freq.getFrequency(), 0);
539 Scaled64 Entry(getEntryFreq(), 0);
541 return OS << Block / Entry;
544 void IrreducibleGraph::addNodesInLoop(const BFIBase::LoopData &OuterLoop) {
545 Start = OuterLoop.getHeader();
546 Nodes.reserve(OuterLoop.Nodes.size());
547 for (auto N : OuterLoop.Nodes)
551 void IrreducibleGraph::addNodesInFunction() {
553 for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index)
554 if (!BFI.Working[Index].isPackaged())
558 void IrreducibleGraph::indexNodes() {
559 for (auto &I : Nodes)
560 Lookup[I.Node.Index] = &I;
562 void IrreducibleGraph::addEdge(IrrNode &Irr, const BlockNode &Succ,
563 const BFIBase::LoopData *OuterLoop) {
564 if (OuterLoop && OuterLoop->isHeader(Succ))
566 auto L = Lookup.find(Succ.Index);
567 if (L == Lookup.end())
569 IrrNode &SuccIrr = *L->second;
570 Irr.Edges.push_back(&SuccIrr);
571 SuccIrr.Edges.push_front(&Irr);
576 template <> struct GraphTraits<IrreducibleGraph> {
577 typedef bfi_detail::IrreducibleGraph GraphT;
579 typedef const GraphT::IrrNode NodeType;
580 typedef GraphT::IrrNode::iterator ChildIteratorType;
582 static const NodeType *getEntryNode(const GraphT &G) {
585 static ChildIteratorType child_begin(NodeType *N) { return N->succ_begin(); }
586 static ChildIteratorType child_end(NodeType *N) { return N->succ_end(); }
590 /// \brief Find extra irreducible headers.
592 /// Find entry blocks and other blocks with backedges, which exist when \c G
593 /// contains irreducible sub-SCCs.
594 static void findIrreducibleHeaders(
595 const BlockFrequencyInfoImplBase &BFI,
596 const IrreducibleGraph &G,
597 const std::vector<const IrreducibleGraph::IrrNode *> &SCC,
598 LoopData::NodeList &Headers, LoopData::NodeList &Others) {
599 // Map from nodes in the SCC to whether it's an entry block.
600 SmallDenseMap<const IrreducibleGraph::IrrNode *, bool, 8> InSCC;
602 // InSCC also acts the set of nodes in the graph. Seed it.
603 for (const auto *I : SCC)
606 for (auto I = InSCC.begin(), E = InSCC.end(); I != E; ++I) {
607 auto &Irr = *I->first;
608 for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) {
612 // This is an entry block.
614 Headers.push_back(Irr.Node);
615 DEBUG(dbgs() << " => entry = " << BFI.getBlockName(Irr.Node) << "\n");
619 assert(Headers.size() >= 2 && "Should be irreducible");
620 if (Headers.size() == InSCC.size()) {
621 // Every block is a header.
622 std::sort(Headers.begin(), Headers.end());
626 // Look for extra headers from irreducible sub-SCCs.
627 for (const auto &I : InSCC) {
628 // Entry blocks are already headers.
632 auto &Irr = *I.first;
633 for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) {
634 // Skip forward edges.
635 if (P->Node < Irr.Node)
638 // Skip predecessors from entry blocks. These can have inverted
643 // Store the extra header.
644 Headers.push_back(Irr.Node);
645 DEBUG(dbgs() << " => extra = " << BFI.getBlockName(Irr.Node) << "\n");
648 if (Headers.back() == Irr.Node)
649 // Added this as a header.
652 // This is not a header.
653 Others.push_back(Irr.Node);
654 DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n");
656 std::sort(Headers.begin(), Headers.end());
657 std::sort(Others.begin(), Others.end());
660 static void createIrreducibleLoop(
661 BlockFrequencyInfoImplBase &BFI, const IrreducibleGraph &G,
662 LoopData *OuterLoop, std::list<LoopData>::iterator Insert,
663 const std::vector<const IrreducibleGraph::IrrNode *> &SCC) {
664 // Translate the SCC into RPO.
665 DEBUG(dbgs() << " - found-scc\n");
667 LoopData::NodeList Headers;
668 LoopData::NodeList Others;
669 findIrreducibleHeaders(BFI, G, SCC, Headers, Others);
671 auto Loop = BFI.Loops.emplace(Insert, OuterLoop, Headers.begin(),
672 Headers.end(), Others.begin(), Others.end());
674 // Update loop hierarchy.
675 for (const auto &N : Loop->Nodes)
676 if (BFI.Working[N.Index].isLoopHeader())
677 BFI.Working[N.Index].Loop->Parent = &*Loop;
679 BFI.Working[N.Index].Loop = &*Loop;
682 iterator_range<std::list<LoopData>::iterator>
683 BlockFrequencyInfoImplBase::analyzeIrreducible(
684 const IrreducibleGraph &G, LoopData *OuterLoop,
685 std::list<LoopData>::iterator Insert) {
686 assert((OuterLoop == nullptr) == (Insert == Loops.begin()));
687 auto Prev = OuterLoop ? std::prev(Insert) : Loops.end();
689 for (auto I = scc_begin(G); !I.isAtEnd(); ++I) {
693 // Translate the SCC into RPO.
694 createIrreducibleLoop(*this, G, OuterLoop, Insert, *I);
698 return make_range(std::next(Prev), Insert);
699 return make_range(Loops.begin(), Insert);
703 BlockFrequencyInfoImplBase::updateLoopWithIrreducible(LoopData &OuterLoop) {
704 OuterLoop.Exits.clear();
705 OuterLoop.BackedgeMass = BlockMass::getEmpty();
706 auto O = OuterLoop.Nodes.begin() + 1;
707 for (auto I = O, E = OuterLoop.Nodes.end(); I != E; ++I)
708 if (!Working[I->Index].isPackaged())
710 OuterLoop.Nodes.erase(O, OuterLoop.Nodes.end());