1 //==- BlockFrequencyInfoImpl.h - Block Frequency Implementation -*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Shared implementation of BlockFrequency for IR and Machine Instructions.
11 // See the documentation below for BlockFrequencyInfoImpl for details.
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
16 #define LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/PostOrderIterator.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/IR/BasicBlock.h"
22 #include "llvm/Support/BlockFrequency.h"
23 #include "llvm/Support/BranchProbability.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/ScaledNumber.h"
26 #include "llvm/Support/raw_ostream.h"
32 #define DEBUG_TYPE "block-freq"
34 //===----------------------------------------------------------------------===//
36 // ScaledNumber definition.
38 // TODO: Move to include/llvm/Support/ScaledNumber.h
40 //===----------------------------------------------------------------------===//
43 class ScaledNumberBase {
45 static const int DefaultPrecision = 10;
47 static void dump(uint64_t D, int16_t E, int Width);
48 static raw_ostream &print(raw_ostream &OS, uint64_t D, int16_t E, int Width,
50 static std::string toString(uint64_t D, int16_t E, int Width,
52 static int countLeadingZeros32(uint32_t N) { return countLeadingZeros(N); }
53 static int countLeadingZeros64(uint64_t N) { return countLeadingZeros(N); }
54 static uint64_t getHalf(uint64_t N) { return (N >> 1) + (N & 1); }
56 static std::pair<uint64_t, bool> splitSigned(int64_t N) {
58 return std::make_pair(N, false);
59 uint64_t Unsigned = N == INT64_MIN ? UINT64_C(1) << 63 : uint64_t(-N);
60 return std::make_pair(Unsigned, true);
62 static int64_t joinSigned(uint64_t U, bool IsNeg) {
63 if (U > uint64_t(INT64_MAX))
64 return IsNeg ? INT64_MIN : INT64_MAX;
65 return IsNeg ? -int64_t(U) : int64_t(U);
69 /// \brief Simple representation of a scaled number.
71 /// ScaledNumber is a number represented by digits and a scale. It uses simple
72 /// saturation arithmetic and every operation is well-defined for every value.
73 /// It's somewhat similar in behaviour to a soft-float, but is *not* a
74 /// replacement for one. If you're doing numerics, look at \a APFloat instead.
75 /// Nevertheless, we've found these semantics useful for modelling certain cost
78 /// The number is split into a signed scale and unsigned digits. The number
79 /// represented is \c getDigits()*2^getScale(). In this way, the digits are
80 /// much like the mantissa in the x87 long double, but there is no canonical
81 /// form so the same number can be represented by many bit representations.
83 /// ScaledNumber is templated on the underlying integer type for digits, which
84 /// is expected to be unsigned.
86 /// Unlike APFloat, ScaledNumber does not model architecture floating point
87 /// behaviour -- while this might make it a little faster and easier to reason
88 /// about, it certainly makes it more dangerous for general numerics.
90 /// ScaledNumber is totally ordered. However, there is no canonical form, so
91 /// there are multiple representations of most scalars. E.g.:
93 /// ScaledNumber(8u, 0) == ScaledNumber(4u, 1)
94 /// ScaledNumber(4u, 1) == ScaledNumber(2u, 2)
95 /// ScaledNumber(2u, 2) == ScaledNumber(1u, 3)
97 /// ScaledNumber implements most arithmetic operations. Precision is kept
98 /// where possible. Uses simple saturation arithmetic, so that operations
99 /// saturate to 0.0 or getLargest() rather than under or overflowing. It has
100 /// some extra arithmetic for unit inversion. 0.0/0.0 is defined to be 0.0.
101 /// Any other division by 0.0 is defined to be getLargest().
103 /// As a convenience for modifying the exponent, left and right shifting are
104 /// both implemented, and both interpret negative shifts as positive shifts in
105 /// the opposite direction.
107 /// Scales are limited to the range accepted by x87 long double. This makes
108 /// it trivial to add functionality to convert to APFloat (this is already
109 /// relied on for the implementation of printing).
111 /// Possible (and conflicting) future directions:
113 /// 1. Turn this into a wrapper around \a APFloat.
114 /// 2. Share the algorithm implementations with \a APFloat.
115 /// 3. Allow \a ScaledNumber to represent a signed number.
116 template <class DigitsT> class ScaledNumber : ScaledNumberBase {
118 static_assert(!std::numeric_limits<DigitsT>::is_signed,
119 "only unsigned floats supported");
121 typedef DigitsT DigitsType;
124 typedef std::numeric_limits<DigitsType> DigitsLimits;
126 static const int Width = sizeof(DigitsType) * 8;
127 static_assert(Width <= 64, "invalid integer width for digits");
134 ScaledNumber() : Digits(0), Scale(0) {}
136 ScaledNumber(DigitsType Digits, int16_t Scale)
137 : Digits(Digits), Scale(Scale) {}
140 ScaledNumber(const std::pair<uint64_t, int16_t> &X)
141 : Digits(X.first), Scale(X.second) {}
144 static ScaledNumber getZero() { return ScaledNumber(0, 0); }
145 static ScaledNumber getOne() { return ScaledNumber(1, 0); }
146 static ScaledNumber getLargest() {
147 return ScaledNumber(DigitsLimits::max(), ScaledNumbers::MaxScale);
149 static ScaledNumber get(uint64_t N) { return adjustToWidth(N, 0); }
150 static ScaledNumber getInverse(uint64_t N) {
151 return get(N).invert();
153 static ScaledNumber getFraction(DigitsType N, DigitsType D) {
154 return getQuotient(N, D);
157 int16_t getScale() const { return Scale; }
158 DigitsType getDigits() const { return Digits; }
160 /// \brief Convert to the given integer type.
162 /// Convert to \c IntT using simple saturating arithmetic, truncating if
164 template <class IntT> IntT toInt() const;
166 bool isZero() const { return !Digits; }
167 bool isLargest() const { return *this == getLargest(); }
169 if (Scale > 0 || Scale <= -Width)
171 return Digits == DigitsType(1) << -Scale;
174 /// \brief The log base 2, rounded.
176 /// Get the lg of the scalar. lg 0 is defined to be INT32_MIN.
177 int32_t lg() const { return ScaledNumbers::getLg(Digits, Scale); }
179 /// \brief The log base 2, rounded towards INT32_MIN.
181 /// Get the lg floor. lg 0 is defined to be INT32_MIN.
182 int32_t lgFloor() const { return ScaledNumbers::getLgFloor(Digits, Scale); }
184 /// \brief The log base 2, rounded towards INT32_MAX.
186 /// Get the lg ceiling. lg 0 is defined to be INT32_MIN.
187 int32_t lgCeiling() const {
188 return ScaledNumbers::getLgCeiling(Digits, Scale);
191 bool operator==(const ScaledNumber &X) const { return compare(X) == 0; }
192 bool operator<(const ScaledNumber &X) const { return compare(X) < 0; }
193 bool operator!=(const ScaledNumber &X) const { return compare(X) != 0; }
194 bool operator>(const ScaledNumber &X) const { return compare(X) > 0; }
195 bool operator<=(const ScaledNumber &X) const { return compare(X) <= 0; }
196 bool operator>=(const ScaledNumber &X) const { return compare(X) >= 0; }
198 bool operator!() const { return isZero(); }
200 /// \brief Convert to a decimal representation in a string.
202 /// Convert to a string. Uses scientific notation for very large/small
203 /// numbers. Scientific notation is used roughly for numbers outside of the
204 /// range 2^-64 through 2^64.
206 /// \c Precision indicates the number of decimal digits of precision to use;
207 /// 0 requests the maximum available.
209 /// As a special case to make debugging easier, if the number is small enough
210 /// to convert without scientific notation and has more than \c Precision
211 /// digits before the decimal place, it's printed accurately to the first
212 /// digit past zero. E.g., assuming 10 digits of precision:
214 /// 98765432198.7654... => 98765432198.8
215 /// 8765432198.7654... => 8765432198.8
216 /// 765432198.7654... => 765432198.8
217 /// 65432198.7654... => 65432198.77
218 /// 5432198.7654... => 5432198.765
219 std::string toString(unsigned Precision = DefaultPrecision) {
220 return ScaledNumberBase::toString(Digits, Scale, Width, Precision);
223 /// \brief Print a decimal representation.
225 /// Print a string. See toString for documentation.
226 raw_ostream &print(raw_ostream &OS,
227 unsigned Precision = DefaultPrecision) const {
228 return ScaledNumberBase::print(OS, Digits, Scale, Width, Precision);
230 void dump() const { return ScaledNumberBase::dump(Digits, Scale, Width); }
232 ScaledNumber &operator+=(const ScaledNumber &X) {
233 std::tie(Digits, Scale) =
234 ScaledNumbers::getSum(Digits, Scale, X.Digits, X.Scale);
235 // Check for exponent past MaxScale.
236 if (Scale > ScaledNumbers::MaxScale)
237 *this = getLargest();
240 ScaledNumber &operator-=(const ScaledNumber &X) {
241 std::tie(Digits, Scale) =
242 ScaledNumbers::getDifference(Digits, Scale, X.Digits, X.Scale);
245 ScaledNumber &operator*=(const ScaledNumber &X);
246 ScaledNumber &operator/=(const ScaledNumber &X);
247 ScaledNumber &operator<<=(int16_t Shift) {
251 ScaledNumber &operator>>=(int16_t Shift) {
257 void shiftLeft(int32_t Shift);
258 void shiftRight(int32_t Shift);
260 /// \brief Adjust two floats to have matching exponents.
262 /// Adjust \c this and \c X to have matching exponents. Returns the new \c X
263 /// by value. Does nothing if \a isZero() for either.
265 /// The value that compares smaller will lose precision, and possibly become
267 ScaledNumber matchScales(ScaledNumber X) {
268 ScaledNumbers::matchScales(Digits, Scale, X.Digits, X.Scale);
273 /// \brief Scale a large number accurately.
275 /// Scale N (multiply it by this). Uses full precision multiplication, even
276 /// if Width is smaller than 64, so information is not lost.
277 uint64_t scale(uint64_t N) const;
278 uint64_t scaleByInverse(uint64_t N) const {
279 // TODO: implement directly, rather than relying on inverse. Inverse is
281 return inverse().scale(N);
283 int64_t scale(int64_t N) const {
284 std::pair<uint64_t, bool> Unsigned = splitSigned(N);
285 return joinSigned(scale(Unsigned.first), Unsigned.second);
287 int64_t scaleByInverse(int64_t N) const {
288 std::pair<uint64_t, bool> Unsigned = splitSigned(N);
289 return joinSigned(scaleByInverse(Unsigned.first), Unsigned.second);
292 int compare(const ScaledNumber &X) const {
293 return ScaledNumbers::compare(Digits, Scale, X.Digits, X.Scale);
295 int compareTo(uint64_t N) const {
296 ScaledNumber Scaled = get(N);
297 int Compare = compare(Scaled);
298 if (Width == 64 || Compare != 0)
301 // Check for precision loss. We know *this == RoundTrip.
302 uint64_t RoundTrip = Scaled.template toInt<uint64_t>();
303 return N == RoundTrip ? 0 : RoundTrip < N ? -1 : 1;
305 int compareTo(int64_t N) const { return N < 0 ? 1 : compareTo(uint64_t(N)); }
307 ScaledNumber &invert() { return *this = ScaledNumber::get(1) / *this; }
308 ScaledNumber inverse() const { return ScaledNumber(*this).invert(); }
311 static ScaledNumber getProduct(DigitsType LHS, DigitsType RHS) {
312 return ScaledNumbers::getProduct(LHS, RHS);
314 static ScaledNumber getQuotient(DigitsType Dividend, DigitsType Divisor) {
315 return ScaledNumbers::getQuotient(Dividend, Divisor);
318 static int countLeadingZerosWidth(DigitsType Digits) {
320 return countLeadingZeros64(Digits);
322 return countLeadingZeros32(Digits);
323 return countLeadingZeros32(Digits) + Width - 32;
326 /// \brief Adjust a number to width, rounding up if necessary.
328 /// Should only be called for \c Shift close to zero.
330 /// \pre Shift >= MinScale && Shift + 64 <= MaxScale.
331 static ScaledNumber adjustToWidth(uint64_t N, int32_t Shift) {
332 assert(Shift >= ScaledNumbers::MinScale && "Shift should be close to 0");
333 assert(Shift <= ScaledNumbers::MaxScale - 64 &&
334 "Shift should be close to 0");
335 auto Adjusted = ScaledNumbers::getAdjusted<DigitsT>(N, Shift);
339 static ScaledNumber getRounded(ScaledNumber P, bool Round) {
344 return ScaledNumbers::getRounded(P.Digits, P.Scale, Round);
348 #define SCALED_NUMBER_BOP(op, base) \
349 template <class DigitsT> \
350 ScaledNumber<DigitsT> operator op(const ScaledNumber<DigitsT> &L, \
351 const ScaledNumber<DigitsT> &R) { \
352 return ScaledNumber<DigitsT>(L) base R; \
354 SCALED_NUMBER_BOP(+, += )
355 SCALED_NUMBER_BOP(-, -= )
356 SCALED_NUMBER_BOP(*, *= )
357 SCALED_NUMBER_BOP(/, /= )
358 SCALED_NUMBER_BOP(<<, <<= )
359 SCALED_NUMBER_BOP(>>, >>= )
360 #undef SCALED_NUMBER_BOP
362 template <class DigitsT>
363 raw_ostream &operator<<(raw_ostream &OS, const ScaledNumber<DigitsT> &X) {
364 return X.print(OS, 10);
367 #define SCALED_NUMBER_COMPARE_TO_TYPE(op, T1, T2) \
368 template <class DigitsT> \
369 bool operator op(const ScaledNumber<DigitsT> &L, T1 R) { \
370 return L.compareTo(T2(R)) op 0; \
372 template <class DigitsT> \
373 bool operator op(T1 L, const ScaledNumber<DigitsT> &R) { \
374 return 0 op R.compareTo(T2(L)); \
376 #define SCALED_NUMBER_COMPARE_TO(op) \
377 SCALED_NUMBER_COMPARE_TO_TYPE(op, uint64_t, uint64_t) \
378 SCALED_NUMBER_COMPARE_TO_TYPE(op, uint32_t, uint64_t) \
379 SCALED_NUMBER_COMPARE_TO_TYPE(op, int64_t, int64_t) \
380 SCALED_NUMBER_COMPARE_TO_TYPE(op, int32_t, int64_t)
381 SCALED_NUMBER_COMPARE_TO(< )
382 SCALED_NUMBER_COMPARE_TO(> )
383 SCALED_NUMBER_COMPARE_TO(== )
384 SCALED_NUMBER_COMPARE_TO(!= )
385 SCALED_NUMBER_COMPARE_TO(<= )
386 SCALED_NUMBER_COMPARE_TO(>= )
387 #undef SCALED_NUMBER_COMPARE_TO
388 #undef SCALED_NUMBER_COMPARE_TO_TYPE
390 template <class DigitsT>
391 uint64_t ScaledNumber<DigitsT>::scale(uint64_t N) const {
392 if (Width == 64 || N <= DigitsLimits::max())
393 return (get(N) * *this).template toInt<uint64_t>();
395 // Defer to the 64-bit version.
396 return ScaledNumber<uint64_t>(Digits, Scale).scale(N);
399 template <class DigitsT>
400 template <class IntT>
401 IntT ScaledNumber<DigitsT>::toInt() const {
402 typedef std::numeric_limits<IntT> Limits;
405 if (*this >= Limits::max())
406 return Limits::max();
410 assert(size_t(Scale) < sizeof(IntT) * 8);
414 assert(size_t(-Scale) < sizeof(IntT) * 8);
420 template <class DigitsT>
421 ScaledNumber<DigitsT> &ScaledNumber<DigitsT>::
422 operator*=(const ScaledNumber &X) {
428 // Save the exponents.
429 int32_t Scales = int32_t(Scale) + int32_t(X.Scale);
431 // Get the raw product.
432 *this = getProduct(Digits, X.Digits);
434 // Combine with exponents.
435 return *this <<= Scales;
437 template <class DigitsT>
438 ScaledNumber<DigitsT> &ScaledNumber<DigitsT>::
439 operator/=(const ScaledNumber &X) {
443 return *this = getLargest();
445 // Save the exponents.
446 int32_t Scales = int32_t(Scale) - int32_t(X.Scale);
448 // Get the raw quotient.
449 *this = getQuotient(Digits, X.Digits);
451 // Combine with exponents.
452 return *this <<= Scales;
454 template <class DigitsT> void ScaledNumber<DigitsT>::shiftLeft(int32_t Shift) {
455 if (!Shift || isZero())
457 assert(Shift != INT32_MIN);
463 // Shift as much as we can in the exponent.
464 int32_t ScaleShift = std::min(Shift, ScaledNumbers::MaxScale - Scale);
466 if (ScaleShift == Shift)
469 // Check this late, since it's rare.
473 // Shift the digits themselves.
475 if (Shift > countLeadingZerosWidth(Digits)) {
477 *this = getLargest();
485 template <class DigitsT> void ScaledNumber<DigitsT>::shiftRight(int32_t Shift) {
486 if (!Shift || isZero())
488 assert(Shift != INT32_MIN);
494 // Shift as much as we can in the exponent.
495 int32_t ScaleShift = std::min(Shift, Scale - ScaledNumbers::MinScale);
497 if (ScaleShift == Shift)
500 // Shift the digits themselves.
502 if (Shift >= Width) {
512 template <class T> struct isPodLike<ScaledNumber<T>> {
513 static const bool value = true;
517 //===----------------------------------------------------------------------===//
519 // BlockMass definition.
521 // TODO: Make this private to BlockFrequencyInfoImpl or delete.
523 //===----------------------------------------------------------------------===//
526 /// \brief Mass of a block.
528 /// This class implements a sort of fixed-point fraction always between 0.0 and
529 /// 1.0. getMass() == UINT64_MAX indicates a value of 1.0.
531 /// Masses can be added and subtracted. Simple saturation arithmetic is used,
532 /// so arithmetic operations never overflow or underflow.
534 /// Masses can be multiplied. Multiplication treats full mass as 1.0 and uses
535 /// an inexpensive floating-point algorithm that's off-by-one (almost, but not
536 /// quite, maximum precision).
538 /// Masses can be scaled by \a BranchProbability at maximum precision.
543 BlockMass() : Mass(0) {}
544 explicit BlockMass(uint64_t Mass) : Mass(Mass) {}
546 static BlockMass getEmpty() { return BlockMass(); }
547 static BlockMass getFull() { return BlockMass(UINT64_MAX); }
549 uint64_t getMass() const { return Mass; }
551 bool isFull() const { return Mass == UINT64_MAX; }
552 bool isEmpty() const { return !Mass; }
554 bool operator!() const { return isEmpty(); }
556 /// \brief Add another mass.
558 /// Adds another mass, saturating at \a isFull() rather than overflowing.
559 BlockMass &operator+=(const BlockMass &X) {
560 uint64_t Sum = Mass + X.Mass;
561 Mass = Sum < Mass ? UINT64_MAX : Sum;
565 /// \brief Subtract another mass.
567 /// Subtracts another mass, saturating at \a isEmpty() rather than
569 BlockMass &operator-=(const BlockMass &X) {
570 uint64_t Diff = Mass - X.Mass;
571 Mass = Diff > Mass ? 0 : Diff;
575 BlockMass &operator*=(const BranchProbability &P) {
576 Mass = P.scale(Mass);
580 bool operator==(const BlockMass &X) const { return Mass == X.Mass; }
581 bool operator!=(const BlockMass &X) const { return Mass != X.Mass; }
582 bool operator<=(const BlockMass &X) const { return Mass <= X.Mass; }
583 bool operator>=(const BlockMass &X) const { return Mass >= X.Mass; }
584 bool operator<(const BlockMass &X) const { return Mass < X.Mass; }
585 bool operator>(const BlockMass &X) const { return Mass > X.Mass; }
587 /// \brief Convert to scaled number.
589 /// Convert to \a ScaledNumber. \a isFull() gives 1.0, while \a isEmpty()
590 /// gives slightly above 0.0.
591 ScaledNumber<uint64_t> toScaled() const;
594 raw_ostream &print(raw_ostream &OS) const;
597 inline BlockMass operator+(const BlockMass &L, const BlockMass &R) {
598 return BlockMass(L) += R;
600 inline BlockMass operator-(const BlockMass &L, const BlockMass &R) {
601 return BlockMass(L) -= R;
603 inline BlockMass operator*(const BlockMass &L, const BranchProbability &R) {
604 return BlockMass(L) *= R;
606 inline BlockMass operator*(const BranchProbability &L, const BlockMass &R) {
607 return BlockMass(R) *= L;
610 inline raw_ostream &operator<<(raw_ostream &OS, const BlockMass &X) {
614 template <> struct isPodLike<BlockMass> {
615 static const bool value = true;
619 //===----------------------------------------------------------------------===//
621 // BlockFrequencyInfoImpl definition.
623 //===----------------------------------------------------------------------===//
627 class BranchProbabilityInfo;
631 class MachineBasicBlock;
632 class MachineBranchProbabilityInfo;
633 class MachineFunction;
635 class MachineLoopInfo;
637 namespace bfi_detail {
638 struct IrreducibleGraph;
640 // This is part of a workaround for a GCC 4.7 crash on lambdas.
641 template <class BT> struct BlockEdgesAdder;
644 /// \brief Base class for BlockFrequencyInfoImpl
646 /// BlockFrequencyInfoImplBase has supporting data structures and some
647 /// algorithms for BlockFrequencyInfoImplBase. Only algorithms that depend on
648 /// the block type (or that call such algorithms) are skipped here.
650 /// Nevertheless, the majority of the overall algorithm documention lives with
651 /// BlockFrequencyInfoImpl. See there for details.
652 class BlockFrequencyInfoImplBase {
654 typedef ScaledNumber<uint64_t> Scaled64;
656 /// \brief Representative of a block.
658 /// This is a simple wrapper around an index into the reverse-post-order
659 /// traversal of the blocks.
661 /// Unlike a block pointer, its order has meaning (location in the
662 /// topological sort) and it's class is the same regardless of block type.
664 typedef uint32_t IndexType;
667 bool operator==(const BlockNode &X) const { return Index == X.Index; }
668 bool operator!=(const BlockNode &X) const { return Index != X.Index; }
669 bool operator<=(const BlockNode &X) const { return Index <= X.Index; }
670 bool operator>=(const BlockNode &X) const { return Index >= X.Index; }
671 bool operator<(const BlockNode &X) const { return Index < X.Index; }
672 bool operator>(const BlockNode &X) const { return Index > X.Index; }
674 BlockNode() : Index(UINT32_MAX) {}
675 BlockNode(IndexType Index) : Index(Index) {}
677 bool isValid() const { return Index <= getMaxIndex(); }
678 static size_t getMaxIndex() { return UINT32_MAX - 1; }
681 /// \brief Stats about a block itself.
682 struct FrequencyData {
687 /// \brief Data about a loop.
689 /// Contains the data necessary to represent represent a loop as a
690 /// pseudo-node once it's packaged.
692 typedef SmallVector<std::pair<BlockNode, BlockMass>, 4> ExitMap;
693 typedef SmallVector<BlockNode, 4> NodeList;
694 LoopData *Parent; ///< The parent loop.
695 bool IsPackaged; ///< Whether this has been packaged.
696 uint32_t NumHeaders; ///< Number of headers.
697 ExitMap Exits; ///< Successor edges (and weights).
698 NodeList Nodes; ///< Header and the members of the loop.
699 BlockMass BackedgeMass; ///< Mass returned to loop header.
703 LoopData(LoopData *Parent, const BlockNode &Header)
704 : Parent(Parent), IsPackaged(false), NumHeaders(1), Nodes(1, Header) {}
705 template <class It1, class It2>
706 LoopData(LoopData *Parent, It1 FirstHeader, It1 LastHeader, It2 FirstOther,
708 : Parent(Parent), IsPackaged(false), Nodes(FirstHeader, LastHeader) {
709 NumHeaders = Nodes.size();
710 Nodes.insert(Nodes.end(), FirstOther, LastOther);
712 bool isHeader(const BlockNode &Node) const {
714 return std::binary_search(Nodes.begin(), Nodes.begin() + NumHeaders,
716 return Node == Nodes[0];
718 BlockNode getHeader() const { return Nodes[0]; }
719 bool isIrreducible() const { return NumHeaders > 1; }
721 NodeList::const_iterator members_begin() const {
722 return Nodes.begin() + NumHeaders;
724 NodeList::const_iterator members_end() const { return Nodes.end(); }
725 iterator_range<NodeList::const_iterator> members() const {
726 return make_range(members_begin(), members_end());
730 /// \brief Index of loop information.
732 BlockNode Node; ///< This node.
733 LoopData *Loop; ///< The loop this block is inside.
734 BlockMass Mass; ///< Mass distribution from the entry block.
736 WorkingData(const BlockNode &Node) : Node(Node), Loop(nullptr) {}
738 bool isLoopHeader() const { return Loop && Loop->isHeader(Node); }
739 bool isDoubleLoopHeader() const {
740 return isLoopHeader() && Loop->Parent && Loop->Parent->isIrreducible() &&
741 Loop->Parent->isHeader(Node);
744 LoopData *getContainingLoop() const {
747 if (!isDoubleLoopHeader())
749 return Loop->Parent->Parent;
752 /// \brief Resolve a node to its representative.
754 /// Get the node currently representing Node, which could be a containing
757 /// This function should only be called when distributing mass. As long as
758 /// there are no irreducilbe edges to Node, then it will have complexity
759 /// O(1) in this context.
761 /// In general, the complexity is O(L), where L is the number of loop
762 /// headers Node has been packaged into. Since this method is called in
763 /// the context of distributing mass, L will be the number of loop headers
764 /// an early exit edge jumps out of.
765 BlockNode getResolvedNode() const {
766 auto L = getPackagedLoop();
767 return L ? L->getHeader() : Node;
769 LoopData *getPackagedLoop() const {
770 if (!Loop || !Loop->IsPackaged)
773 while (L->Parent && L->Parent->IsPackaged)
778 /// \brief Get the appropriate mass for a node.
780 /// Get appropriate mass for Node. If Node is a loop-header (whose loop
781 /// has been packaged), returns the mass of its pseudo-node. If it's a
782 /// node inside a packaged loop, it returns the loop's mass.
783 BlockMass &getMass() {
786 if (!isADoublePackage())
788 return Loop->Parent->Mass;
791 /// \brief Has ContainingLoop been packaged up?
792 bool isPackaged() const { return getResolvedNode() != Node; }
793 /// \brief Has Loop been packaged up?
794 bool isAPackage() const { return isLoopHeader() && Loop->IsPackaged; }
795 /// \brief Has Loop been packaged up twice?
796 bool isADoublePackage() const {
797 return isDoubleLoopHeader() && Loop->Parent->IsPackaged;
801 /// \brief Unscaled probability weight.
803 /// Probability weight for an edge in the graph (including the
804 /// successor/target node).
806 /// All edges in the original function are 32-bit. However, exit edges from
807 /// loop packages are taken from 64-bit exit masses, so we need 64-bits of
808 /// space in general.
810 /// In addition to the raw weight amount, Weight stores the type of the edge
811 /// in the current context (i.e., the context of the loop being processed).
812 /// Is this a local edge within the loop, an exit from the loop, or a
813 /// backedge to the loop header?
815 enum DistType { Local, Exit, Backedge };
817 BlockNode TargetNode;
819 Weight() : Type(Local), Amount(0) {}
822 /// \brief Distribution of unscaled probability weight.
824 /// Distribution of unscaled probability weight to a set of successors.
826 /// This class collates the successor edge weights for later processing.
828 /// \a DidOverflow indicates whether \a Total did overflow while adding to
829 /// the distribution. It should never overflow twice.
830 struct Distribution {
831 typedef SmallVector<Weight, 4> WeightList;
832 WeightList Weights; ///< Individual successor weights.
833 uint64_t Total; ///< Sum of all weights.
834 bool DidOverflow; ///< Whether \a Total did overflow.
836 Distribution() : Total(0), DidOverflow(false) {}
837 void addLocal(const BlockNode &Node, uint64_t Amount) {
838 add(Node, Amount, Weight::Local);
840 void addExit(const BlockNode &Node, uint64_t Amount) {
841 add(Node, Amount, Weight::Exit);
843 void addBackedge(const BlockNode &Node, uint64_t Amount) {
844 add(Node, Amount, Weight::Backedge);
847 /// \brief Normalize the distribution.
849 /// Combines multiple edges to the same \a Weight::TargetNode and scales
850 /// down so that \a Total fits into 32-bits.
852 /// This is linear in the size of \a Weights. For the vast majority of
853 /// cases, adjacent edge weights are combined by sorting WeightList and
854 /// combining adjacent weights. However, for very large edge lists an
855 /// auxiliary hash table is used.
859 void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
862 /// \brief Data about each block. This is used downstream.
863 std::vector<FrequencyData> Freqs;
865 /// \brief Loop data: see initializeLoops().
866 std::vector<WorkingData> Working;
868 /// \brief Indexed information about loops.
869 std::list<LoopData> Loops;
871 /// \brief Add all edges out of a packaged loop to the distribution.
873 /// Adds all edges from LocalLoopHead to Dist. Calls addToDist() to add each
876 /// \return \c true unless there's an irreducible backedge.
877 bool addLoopSuccessorsToDist(const LoopData *OuterLoop, LoopData &Loop,
880 /// \brief Add an edge to the distribution.
882 /// Adds an edge to Succ to Dist. If \c LoopHead.isValid(), then whether the
883 /// edge is local/exit/backedge is in the context of LoopHead. Otherwise,
884 /// every edge should be a local edge (since all the loops are packaged up).
886 /// \return \c true unless aborted due to an irreducible backedge.
887 bool addToDist(Distribution &Dist, const LoopData *OuterLoop,
888 const BlockNode &Pred, const BlockNode &Succ, uint64_t Weight);
890 LoopData &getLoopPackage(const BlockNode &Head) {
891 assert(Head.Index < Working.size());
892 assert(Working[Head.Index].isLoopHeader());
893 return *Working[Head.Index].Loop;
896 /// \brief Analyze irreducible SCCs.
898 /// Separate irreducible SCCs from \c G, which is an explict graph of \c
899 /// OuterLoop (or the top-level function, if \c OuterLoop is \c nullptr).
900 /// Insert them into \a Loops before \c Insert.
902 /// \return the \c LoopData nodes representing the irreducible SCCs.
903 iterator_range<std::list<LoopData>::iterator>
904 analyzeIrreducible(const bfi_detail::IrreducibleGraph &G, LoopData *OuterLoop,
905 std::list<LoopData>::iterator Insert);
907 /// \brief Update a loop after packaging irreducible SCCs inside of it.
909 /// Update \c OuterLoop. Before finding irreducible control flow, it was
910 /// partway through \a computeMassInLoop(), so \a LoopData::Exits and \a
911 /// LoopData::BackedgeMass need to be reset. Also, nodes that were packaged
912 /// up need to be removed from \a OuterLoop::Nodes.
913 void updateLoopWithIrreducible(LoopData &OuterLoop);
915 /// \brief Distribute mass according to a distribution.
917 /// Distributes the mass in Source according to Dist. If LoopHead.isValid(),
918 /// backedges and exits are stored in its entry in Loops.
920 /// Mass is distributed in parallel from two copies of the source mass.
921 void distributeMass(const BlockNode &Source, LoopData *OuterLoop,
924 /// \brief Compute the loop scale for a loop.
925 void computeLoopScale(LoopData &Loop);
927 /// \brief Package up a loop.
928 void packageLoop(LoopData &Loop);
930 /// \brief Unwrap loops.
933 /// \brief Finalize frequency metrics.
935 /// Calculates final frequencies and cleans up no-longer-needed data
937 void finalizeMetrics();
939 /// \brief Clear all memory.
942 virtual std::string getBlockName(const BlockNode &Node) const;
943 std::string getLoopName(const LoopData &Loop) const;
945 virtual raw_ostream &print(raw_ostream &OS) const { return OS; }
946 void dump() const { print(dbgs()); }
948 Scaled64 getFloatingBlockFreq(const BlockNode &Node) const;
950 BlockFrequency getBlockFreq(const BlockNode &Node) const;
952 raw_ostream &printBlockFreq(raw_ostream &OS, const BlockNode &Node) const;
953 raw_ostream &printBlockFreq(raw_ostream &OS,
954 const BlockFrequency &Freq) const;
956 uint64_t getEntryFreq() const {
957 assert(!Freqs.empty());
958 return Freqs[0].Integer;
960 /// \brief Virtual destructor.
962 /// Need a virtual destructor to mask the compiler warning about
964 virtual ~BlockFrequencyInfoImplBase() {}
967 namespace bfi_detail {
968 template <class BlockT> struct TypeMap {};
969 template <> struct TypeMap<BasicBlock> {
970 typedef BasicBlock BlockT;
971 typedef Function FunctionT;
972 typedef BranchProbabilityInfo BranchProbabilityInfoT;
974 typedef LoopInfo LoopInfoT;
976 template <> struct TypeMap<MachineBasicBlock> {
977 typedef MachineBasicBlock BlockT;
978 typedef MachineFunction FunctionT;
979 typedef MachineBranchProbabilityInfo BranchProbabilityInfoT;
980 typedef MachineLoop LoopT;
981 typedef MachineLoopInfo LoopInfoT;
984 /// \brief Get the name of a MachineBasicBlock.
986 /// Get the name of a MachineBasicBlock. It's templated so that including from
987 /// CodeGen is unnecessary (that would be a layering issue).
989 /// This is used mainly for debug output. The name is similar to
990 /// MachineBasicBlock::getFullName(), but skips the name of the function.
991 template <class BlockT> std::string getBlockName(const BlockT *BB) {
992 assert(BB && "Unexpected nullptr");
993 auto MachineName = "BB" + Twine(BB->getNumber());
994 if (BB->getBasicBlock())
995 return (MachineName + "[" + BB->getName() + "]").str();
996 return MachineName.str();
998 /// \brief Get the name of a BasicBlock.
999 template <> inline std::string getBlockName(const BasicBlock *BB) {
1000 assert(BB && "Unexpected nullptr");
1001 return BB->getName().str();
1004 /// \brief Graph of irreducible control flow.
1006 /// This graph is used for determining the SCCs in a loop (or top-level
1007 /// function) that has irreducible control flow.
1009 /// During the block frequency algorithm, the local graphs are defined in a
1010 /// light-weight way, deferring to the \a BasicBlock or \a MachineBasicBlock
1011 /// graphs for most edges, but getting others from \a LoopData::ExitMap. The
1012 /// latter only has successor information.
1014 /// \a IrreducibleGraph makes this graph explicit. It's in a form that can use
1015 /// \a GraphTraits (so that \a analyzeIrreducible() can use \a scc_iterator),
1016 /// and it explicitly lists predecessors and successors. The initialization
1017 /// that relies on \c MachineBasicBlock is defined in the header.
1018 struct IrreducibleGraph {
1019 typedef BlockFrequencyInfoImplBase BFIBase;
1023 typedef BFIBase::BlockNode BlockNode;
1027 std::deque<const IrrNode *> Edges;
1028 IrrNode(const BlockNode &Node) : Node(Node), NumIn(0) {}
1030 typedef std::deque<const IrrNode *>::const_iterator iterator;
1031 iterator pred_begin() const { return Edges.begin(); }
1032 iterator succ_begin() const { return Edges.begin() + NumIn; }
1033 iterator pred_end() const { return succ_begin(); }
1034 iterator succ_end() const { return Edges.end(); }
1037 const IrrNode *StartIrr;
1038 std::vector<IrrNode> Nodes;
1039 SmallDenseMap<uint32_t, IrrNode *, 4> Lookup;
1041 /// \brief Construct an explicit graph containing irreducible control flow.
1043 /// Construct an explicit graph of the control flow in \c OuterLoop (or the
1044 /// top-level function, if \c OuterLoop is \c nullptr). Uses \c
1045 /// addBlockEdges to add block successors that have not been packaged into
1048 /// \a BlockFrequencyInfoImpl::computeIrreducibleMass() is the only expected
1050 template <class BlockEdgesAdder>
1051 IrreducibleGraph(BFIBase &BFI, const BFIBase::LoopData *OuterLoop,
1052 BlockEdgesAdder addBlockEdges)
1053 : BFI(BFI), StartIrr(nullptr) {
1054 initialize(OuterLoop, addBlockEdges);
1057 template <class BlockEdgesAdder>
1058 void initialize(const BFIBase::LoopData *OuterLoop,
1059 BlockEdgesAdder addBlockEdges);
1060 void addNodesInLoop(const BFIBase::LoopData &OuterLoop);
1061 void addNodesInFunction();
1062 void addNode(const BlockNode &Node) {
1063 Nodes.emplace_back(Node);
1064 BFI.Working[Node.Index].getMass() = BlockMass::getEmpty();
1067 template <class BlockEdgesAdder>
1068 void addEdges(const BlockNode &Node, const BFIBase::LoopData *OuterLoop,
1069 BlockEdgesAdder addBlockEdges);
1070 void addEdge(IrrNode &Irr, const BlockNode &Succ,
1071 const BFIBase::LoopData *OuterLoop);
1073 template <class BlockEdgesAdder>
1074 void IrreducibleGraph::initialize(const BFIBase::LoopData *OuterLoop,
1075 BlockEdgesAdder addBlockEdges) {
1077 addNodesInLoop(*OuterLoop);
1078 for (auto N : OuterLoop->Nodes)
1079 addEdges(N, OuterLoop, addBlockEdges);
1081 addNodesInFunction();
1082 for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index)
1083 addEdges(Index, OuterLoop, addBlockEdges);
1085 StartIrr = Lookup[Start.Index];
1087 template <class BlockEdgesAdder>
1088 void IrreducibleGraph::addEdges(const BlockNode &Node,
1089 const BFIBase::LoopData *OuterLoop,
1090 BlockEdgesAdder addBlockEdges) {
1091 auto L = Lookup.find(Node.Index);
1092 if (L == Lookup.end())
1094 IrrNode &Irr = *L->second;
1095 const auto &Working = BFI.Working[Node.Index];
1097 if (Working.isAPackage())
1098 for (const auto &I : Working.Loop->Exits)
1099 addEdge(Irr, I.first, OuterLoop);
1101 addBlockEdges(*this, Irr, OuterLoop);
1105 /// \brief Shared implementation for block frequency analysis.
1107 /// This is a shared implementation of BlockFrequencyInfo and
1108 /// MachineBlockFrequencyInfo, and calculates the relative frequencies of
1111 /// LoopInfo defines a loop as a "non-trivial" SCC dominated by a single block,
1112 /// which is called the header. A given loop, L, can have sub-loops, which are
1113 /// loops within the subgraph of L that exclude its header. (A "trivial" SCC
1114 /// consists of a single block that does not have a self-edge.)
1116 /// In addition to loops, this algorithm has limited support for irreducible
1117 /// SCCs, which are SCCs with multiple entry blocks. Irreducible SCCs are
1118 /// discovered on they fly, and modelled as loops with multiple headers.
1120 /// The headers of irreducible sub-SCCs consist of its entry blocks and all
1121 /// nodes that are targets of a backedge within it (excluding backedges within
1122 /// true sub-loops). Block frequency calculations act as if a block is
1123 /// inserted that intercepts all the edges to the headers. All backedges and
1124 /// entries point to this block. Its successors are the headers, which split
1125 /// the frequency evenly.
1127 /// This algorithm leverages BlockMass and ScaledNumber to maintain precision,
1128 /// separates mass distribution from loop scaling, and dithers to eliminate
1129 /// probability mass loss.
1131 /// The implementation is split between BlockFrequencyInfoImpl, which knows the
1132 /// type of graph being modelled (BasicBlock vs. MachineBasicBlock), and
1133 /// BlockFrequencyInfoImplBase, which doesn't. The base class uses \a
1134 /// BlockNode, a wrapper around a uint32_t. BlockNode is numbered from 0 in
1135 /// reverse-post order. This gives two advantages: it's easy to compare the
1136 /// relative ordering of two nodes, and maps keyed on BlockT can be represented
1139 /// This algorithm is O(V+E), unless there is irreducible control flow, in
1140 /// which case it's O(V*E) in the worst case.
1142 /// These are the main stages:
1144 /// 0. Reverse post-order traversal (\a initializeRPOT()).
1146 /// Run a single post-order traversal and save it (in reverse) in RPOT.
1147 /// All other stages make use of this ordering. Save a lookup from BlockT
1148 /// to BlockNode (the index into RPOT) in Nodes.
1150 /// 1. Loop initialization (\a initializeLoops()).
1152 /// Translate LoopInfo/MachineLoopInfo into a form suitable for the rest of
1153 /// the algorithm. In particular, store the immediate members of each loop
1154 /// in reverse post-order.
1156 /// 2. Calculate mass and scale in loops (\a computeMassInLoops()).
1158 /// For each loop (bottom-up), distribute mass through the DAG resulting
1159 /// from ignoring backedges and treating sub-loops as a single pseudo-node.
1160 /// Track the backedge mass distributed to the loop header, and use it to
1161 /// calculate the loop scale (number of loop iterations). Immediate
1162 /// members that represent sub-loops will already have been visited and
1163 /// packaged into a pseudo-node.
1165 /// Distributing mass in a loop is a reverse-post-order traversal through
1166 /// the loop. Start by assigning full mass to the Loop header. For each
1167 /// node in the loop:
1169 /// - Fetch and categorize the weight distribution for its successors.
1170 /// If this is a packaged-subloop, the weight distribution is stored
1171 /// in \a LoopData::Exits. Otherwise, fetch it from
1172 /// BranchProbabilityInfo.
1174 /// - Each successor is categorized as \a Weight::Local, a local edge
1175 /// within the current loop, \a Weight::Backedge, a backedge to the
1176 /// loop header, or \a Weight::Exit, any successor outside the loop.
1177 /// The weight, the successor, and its category are stored in \a
1178 /// Distribution. There can be multiple edges to each successor.
1180 /// - If there's a backedge to a non-header, there's an irreducible SCC.
1181 /// The usual flow is temporarily aborted. \a
1182 /// computeIrreducibleMass() finds the irreducible SCCs within the
1183 /// loop, packages them up, and restarts the flow.
1185 /// - Normalize the distribution: scale weights down so that their sum
1186 /// is 32-bits, and coalesce multiple edges to the same node.
1188 /// - Distribute the mass accordingly, dithering to minimize mass loss,
1189 /// as described in \a distributeMass().
1191 /// Finally, calculate the loop scale from the accumulated backedge mass.
1193 /// 3. Distribute mass in the function (\a computeMassInFunction()).
1195 /// Finally, distribute mass through the DAG resulting from packaging all
1196 /// loops in the function. This uses the same algorithm as distributing
1197 /// mass in a loop, except that there are no exit or backedge edges.
1199 /// 4. Unpackage loops (\a unwrapLoops()).
1201 /// Initialize each block's frequency to a floating point representation of
1204 /// Visit loops top-down, scaling the frequencies of its immediate members
1205 /// by the loop's pseudo-node's frequency.
1207 /// 5. Convert frequencies to a 64-bit range (\a finalizeMetrics()).
1209 /// Using the min and max frequencies as a guide, translate floating point
1210 /// frequencies to an appropriate range in uint64_t.
1212 /// It has some known flaws.
1214 /// - Loop scale is limited to 4096 per loop (2^12) to avoid exhausting
1215 /// BlockFrequency's 64-bit integer precision.
1217 /// - The model of irreducible control flow is a rough approximation.
1219 /// Modelling irreducible control flow exactly involves setting up and
1220 /// solving a group of infinite geometric series. Such precision is
1221 /// unlikely to be worthwhile, since most of our algorithms give up on
1222 /// irreducible control flow anyway.
1224 /// Nevertheless, we might find that we need to get closer. Here's a sort
1225 /// of TODO list for the model with diminishing returns, to be completed as
1228 /// - The headers for the \a LoopData representing an irreducible SCC
1229 /// include non-entry blocks. When these extra blocks exist, they
1230 /// indicate a self-contained irreducible sub-SCC. We could treat them
1231 /// as sub-loops, rather than arbitrarily shoving the problematic
1232 /// blocks into the headers of the main irreducible SCC.
1234 /// - Backedge frequencies are assumed to be evenly split between the
1235 /// headers of a given irreducible SCC. Instead, we could track the
1236 /// backedge mass separately for each header, and adjust their relative
1239 /// - Entry frequencies are assumed to be evenly split between the
1240 /// headers of a given irreducible SCC, which is the only option if we
1241 /// need to compute mass in the SCC before its parent loop. Instead,
1242 /// we could partially compute mass in the parent loop, and stop when
1243 /// we get to the SCC. Here, we have the correct ratio of entry
1244 /// masses, which we can use to adjust their relative frequencies.
1245 /// Compute mass in the SCC, and then continue propagation in the
1248 /// - We can propagate mass iteratively through the SCC, for some fixed
1249 /// number of iterations. Each iteration starts by assigning the entry
1250 /// blocks their backedge mass from the prior iteration. The final
1251 /// mass for each block (and each exit, and the total backedge mass
1252 /// used for computing loop scale) is the sum of all iterations.
1253 /// (Running this until fixed point would "solve" the geometric
1254 /// series by simulation.)
1255 template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
1256 typedef typename bfi_detail::TypeMap<BT>::BlockT BlockT;
1257 typedef typename bfi_detail::TypeMap<BT>::FunctionT FunctionT;
1258 typedef typename bfi_detail::TypeMap<BT>::BranchProbabilityInfoT
1259 BranchProbabilityInfoT;
1260 typedef typename bfi_detail::TypeMap<BT>::LoopT LoopT;
1261 typedef typename bfi_detail::TypeMap<BT>::LoopInfoT LoopInfoT;
1263 // This is part of a workaround for a GCC 4.7 crash on lambdas.
1264 friend struct bfi_detail::BlockEdgesAdder<BT>;
1266 typedef GraphTraits<const BlockT *> Successor;
1267 typedef GraphTraits<Inverse<const BlockT *>> Predecessor;
1269 const BranchProbabilityInfoT *BPI;
1270 const LoopInfoT *LI;
1273 // All blocks in reverse postorder.
1274 std::vector<const BlockT *> RPOT;
1275 DenseMap<const BlockT *, BlockNode> Nodes;
1277 typedef typename std::vector<const BlockT *>::const_iterator rpot_iterator;
1279 rpot_iterator rpot_begin() const { return RPOT.begin(); }
1280 rpot_iterator rpot_end() const { return RPOT.end(); }
1282 size_t getIndex(const rpot_iterator &I) const { return I - rpot_begin(); }
1284 BlockNode getNode(const rpot_iterator &I) const {
1285 return BlockNode(getIndex(I));
1287 BlockNode getNode(const BlockT *BB) const { return Nodes.lookup(BB); }
1289 const BlockT *getBlock(const BlockNode &Node) const {
1290 assert(Node.Index < RPOT.size());
1291 return RPOT[Node.Index];
1294 /// \brief Run (and save) a post-order traversal.
1296 /// Saves a reverse post-order traversal of all the nodes in \a F.
1297 void initializeRPOT();
1299 /// \brief Initialize loop data.
1301 /// Build up \a Loops using \a LoopInfo. \a LoopInfo gives us a mapping from
1302 /// each block to the deepest loop it's in, but we need the inverse. For each
1303 /// loop, we store in reverse post-order its "immediate" members, defined as
1304 /// the header, the headers of immediate sub-loops, and all other blocks in
1305 /// the loop that are not in sub-loops.
1306 void initializeLoops();
1308 /// \brief Propagate to a block's successors.
1310 /// In the context of distributing mass through \c OuterLoop, divide the mass
1311 /// currently assigned to \c Node between its successors.
1313 /// \return \c true unless there's an irreducible backedge.
1314 bool propagateMassToSuccessors(LoopData *OuterLoop, const BlockNode &Node);
1316 /// \brief Compute mass in a particular loop.
1318 /// Assign mass to \c Loop's header, and then for each block in \c Loop in
1319 /// reverse post-order, distribute mass to its successors. Only visits nodes
1320 /// that have not been packaged into sub-loops.
1322 /// \pre \a computeMassInLoop() has been called for each subloop of \c Loop.
1323 /// \return \c true unless there's an irreducible backedge.
1324 bool computeMassInLoop(LoopData &Loop);
1326 /// \brief Try to compute mass in the top-level function.
1328 /// Assign mass to the entry block, and then for each block in reverse
1329 /// post-order, distribute mass to its successors. Skips nodes that have
1330 /// been packaged into loops.
1332 /// \pre \a computeMassInLoops() has been called.
1333 /// \return \c true unless there's an irreducible backedge.
1334 bool tryToComputeMassInFunction();
1336 /// \brief Compute mass in (and package up) irreducible SCCs.
1338 /// Find the irreducible SCCs in \c OuterLoop, add them to \a Loops (in front
1339 /// of \c Insert), and call \a computeMassInLoop() on each of them.
1341 /// If \c OuterLoop is \c nullptr, it refers to the top-level function.
1343 /// \pre \a computeMassInLoop() has been called for each subloop of \c
1345 /// \pre \c Insert points at the the last loop successfully processed by \a
1346 /// computeMassInLoop().
1347 /// \pre \c OuterLoop has irreducible SCCs.
1348 void computeIrreducibleMass(LoopData *OuterLoop,
1349 std::list<LoopData>::iterator Insert);
1351 /// \brief Compute mass in all loops.
1353 /// For each loop bottom-up, call \a computeMassInLoop().
1355 /// \a computeMassInLoop() aborts (and returns \c false) on loops that
1356 /// contain a irreducible sub-SCCs. Use \a computeIrreducibleMass() and then
1357 /// re-enter \a computeMassInLoop().
1359 /// \post \a computeMassInLoop() has returned \c true for every loop.
1360 void computeMassInLoops();
1362 /// \brief Compute mass in the top-level function.
1364 /// Uses \a tryToComputeMassInFunction() and \a computeIrreducibleMass() to
1365 /// compute mass in the top-level function.
1367 /// \post \a tryToComputeMassInFunction() has returned \c true.
1368 void computeMassInFunction();
1370 std::string getBlockName(const BlockNode &Node) const override {
1371 return bfi_detail::getBlockName(getBlock(Node));
1375 const FunctionT *getFunction() const { return F; }
1377 void doFunction(const FunctionT *F, const BranchProbabilityInfoT *BPI,
1378 const LoopInfoT *LI);
1379 BlockFrequencyInfoImpl() : BPI(nullptr), LI(nullptr), F(nullptr) {}
1381 using BlockFrequencyInfoImplBase::getEntryFreq;
1382 BlockFrequency getBlockFreq(const BlockT *BB) const {
1383 return BlockFrequencyInfoImplBase::getBlockFreq(getNode(BB));
1385 Scaled64 getFloatingBlockFreq(const BlockT *BB) const {
1386 return BlockFrequencyInfoImplBase::getFloatingBlockFreq(getNode(BB));
1389 /// \brief Print the frequencies for the current function.
1391 /// Prints the frequencies for the blocks in the current function.
1393 /// Blocks are printed in the natural iteration order of the function, rather
1394 /// than reverse post-order. This provides two advantages: writing -analyze
1395 /// tests is easier (since blocks come out in source order), and even
1396 /// unreachable blocks are printed.
1398 /// \a BlockFrequencyInfoImplBase::print() only knows reverse post-order, so
1399 /// we need to override it here.
1400 raw_ostream &print(raw_ostream &OS) const override;
1401 using BlockFrequencyInfoImplBase::dump;
1403 using BlockFrequencyInfoImplBase::printBlockFreq;
1404 raw_ostream &printBlockFreq(raw_ostream &OS, const BlockT *BB) const {
1405 return BlockFrequencyInfoImplBase::printBlockFreq(OS, getNode(BB));
1410 void BlockFrequencyInfoImpl<BT>::doFunction(const FunctionT *F,
1411 const BranchProbabilityInfoT *BPI,
1412 const LoopInfoT *LI) {
1413 // Save the parameters.
1418 // Clean up left-over data structures.
1419 BlockFrequencyInfoImplBase::clear();
1424 DEBUG(dbgs() << "\nblock-frequency: " << F->getName() << "\n================="
1425 << std::string(F->getName().size(), '=') << "\n");
1429 // Visit loops in post-order to find thelocal mass distribution, and then do
1430 // the full function.
1431 computeMassInLoops();
1432 computeMassInFunction();
1437 template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
1438 const BlockT *Entry = F->begin();
1439 RPOT.reserve(F->size());
1440 std::copy(po_begin(Entry), po_end(Entry), std::back_inserter(RPOT));
1441 std::reverse(RPOT.begin(), RPOT.end());
1443 assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
1444 "More nodes in function than Block Frequency Info supports");
1446 DEBUG(dbgs() << "reverse-post-order-traversal\n");
1447 for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
1448 BlockNode Node = getNode(I);
1449 DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node) << "\n");
1453 Working.reserve(RPOT.size());
1454 for (size_t Index = 0; Index < RPOT.size(); ++Index)
1455 Working.emplace_back(Index);
1456 Freqs.resize(RPOT.size());
1459 template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
1460 DEBUG(dbgs() << "loop-detection\n");
1464 // Visit loops top down and assign them an index.
1465 std::deque<std::pair<const LoopT *, LoopData *>> Q;
1466 for (const LoopT *L : *LI)
1467 Q.emplace_back(L, nullptr);
1468 while (!Q.empty()) {
1469 const LoopT *Loop = Q.front().first;
1470 LoopData *Parent = Q.front().second;
1473 BlockNode Header = getNode(Loop->getHeader());
1474 assert(Header.isValid());
1476 Loops.emplace_back(Parent, Header);
1477 Working[Header.Index].Loop = &Loops.back();
1478 DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
1480 for (const LoopT *L : *Loop)
1481 Q.emplace_back(L, &Loops.back());
1484 // Visit nodes in reverse post-order and add them to their deepest containing
1486 for (size_t Index = 0; Index < RPOT.size(); ++Index) {
1487 // Loop headers have already been mostly mapped.
1488 if (Working[Index].isLoopHeader()) {
1489 LoopData *ContainingLoop = Working[Index].getContainingLoop();
1491 ContainingLoop->Nodes.push_back(Index);
1495 const LoopT *Loop = LI->getLoopFor(RPOT[Index]);
1499 // Add this node to its containing loop's member list.
1500 BlockNode Header = getNode(Loop->getHeader());
1501 assert(Header.isValid());
1502 const auto &HeaderData = Working[Header.Index];
1503 assert(HeaderData.isLoopHeader());
1505 Working[Index].Loop = HeaderData.Loop;
1506 HeaderData.Loop->Nodes.push_back(Index);
1507 DEBUG(dbgs() << " - loop = " << getBlockName(Header)
1508 << ": member = " << getBlockName(Index) << "\n");
1512 template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInLoops() {
1513 // Visit loops with the deepest first, and the top-level loops last.
1514 for (auto L = Loops.rbegin(), E = Loops.rend(); L != E; ++L) {
1515 if (computeMassInLoop(*L))
1517 auto Next = std::next(L);
1518 computeIrreducibleMass(&*L, L.base());
1519 L = std::prev(Next);
1520 if (computeMassInLoop(*L))
1522 llvm_unreachable("unhandled irreducible control flow");
1527 bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
1528 // Compute mass in loop.
1529 DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
1531 if (Loop.isIrreducible()) {
1532 BlockMass Remaining = BlockMass::getFull();
1533 for (uint32_t H = 0; H < Loop.NumHeaders; ++H) {
1534 auto &Mass = Working[Loop.Nodes[H].Index].getMass();
1535 Mass = Remaining * BranchProbability(1, Loop.NumHeaders - H);
1538 for (const BlockNode &M : Loop.Nodes)
1539 if (!propagateMassToSuccessors(&Loop, M))
1540 llvm_unreachable("unhandled irreducible control flow");
1542 Working[Loop.getHeader().Index].getMass() = BlockMass::getFull();
1543 if (!propagateMassToSuccessors(&Loop, Loop.getHeader()))
1544 llvm_unreachable("irreducible control flow to loop header!?");
1545 for (const BlockNode &M : Loop.members())
1546 if (!propagateMassToSuccessors(&Loop, M))
1547 // Irreducible backedge.
1551 computeLoopScale(Loop);
1557 bool BlockFrequencyInfoImpl<BT>::tryToComputeMassInFunction() {
1558 // Compute mass in function.
1559 DEBUG(dbgs() << "compute-mass-in-function\n");
1560 assert(!Working.empty() && "no blocks in function");
1561 assert(!Working[0].isLoopHeader() && "entry block is a loop header");
1563 Working[0].getMass() = BlockMass::getFull();
1564 for (rpot_iterator I = rpot_begin(), IE = rpot_end(); I != IE; ++I) {
1565 // Check for nodes that have been packaged.
1566 BlockNode Node = getNode(I);
1567 if (Working[Node.Index].isPackaged())
1570 if (!propagateMassToSuccessors(nullptr, Node))
1576 template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInFunction() {
1577 if (tryToComputeMassInFunction())
1579 computeIrreducibleMass(nullptr, Loops.begin());
1580 if (tryToComputeMassInFunction())
1582 llvm_unreachable("unhandled irreducible control flow");
1585 /// \note This should be a lambda, but that crashes GCC 4.7.
1586 namespace bfi_detail {
1587 template <class BT> struct BlockEdgesAdder {
1589 typedef BlockFrequencyInfoImplBase::LoopData LoopData;
1590 typedef GraphTraits<const BlockT *> Successor;
1592 const BlockFrequencyInfoImpl<BT> &BFI;
1593 explicit BlockEdgesAdder(const BlockFrequencyInfoImpl<BT> &BFI)
1595 void operator()(IrreducibleGraph &G, IrreducibleGraph::IrrNode &Irr,
1596 const LoopData *OuterLoop) {
1597 const BlockT *BB = BFI.RPOT[Irr.Node.Index];
1598 for (auto I = Successor::child_begin(BB), E = Successor::child_end(BB);
1600 G.addEdge(Irr, BFI.getNode(*I), OuterLoop);
1605 void BlockFrequencyInfoImpl<BT>::computeIrreducibleMass(
1606 LoopData *OuterLoop, std::list<LoopData>::iterator Insert) {
1607 DEBUG(dbgs() << "analyze-irreducible-in-";
1608 if (OuterLoop) dbgs() << "loop: " << getLoopName(*OuterLoop) << "\n";
1609 else dbgs() << "function\n");
1611 using namespace bfi_detail;
1612 // Ideally, addBlockEdges() would be declared here as a lambda, but that
1614 BlockEdgesAdder<BT> addBlockEdges(*this);
1615 IrreducibleGraph G(*this, OuterLoop, addBlockEdges);
1617 for (auto &L : analyzeIrreducible(G, OuterLoop, Insert))
1618 computeMassInLoop(L);
1622 updateLoopWithIrreducible(*OuterLoop);
1627 BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
1628 const BlockNode &Node) {
1629 DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
1630 // Calculate probability for successors.
1632 if (auto *Loop = Working[Node.Index].getPackagedLoop()) {
1633 assert(Loop != OuterLoop && "Cannot propagate mass in a packaged loop");
1634 if (!addLoopSuccessorsToDist(OuterLoop, *Loop, Dist))
1635 // Irreducible backedge.
1638 const BlockT *BB = getBlock(Node);
1639 for (auto SI = Successor::child_begin(BB), SE = Successor::child_end(BB);
1641 // Do not dereference SI, or getEdgeWeight() is linear in the number of
1643 if (!addToDist(Dist, OuterLoop, Node, getNode(*SI),
1644 BPI->getEdgeWeight(BB, SI)))
1645 // Irreducible backedge.
1649 // Distribute mass to successors, saving exit and backedge data in the
1651 distributeMass(Node, OuterLoop, Dist);
1656 raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const {
1659 OS << "block-frequency-info: " << F->getName() << "\n";
1660 for (const BlockT &BB : *F)
1661 OS << " - " << bfi_detail::getBlockName(&BB)
1662 << ": float = " << getFloatingBlockFreq(&BB)
1663 << ", int = " << getBlockFreq(&BB).getFrequency() << "\n";
1665 // Add an extra newline for readability.