1 //==- BlockFrequencyInfoImpl.h - Block Frequency Implementation -*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Shared implementation of BlockFrequency for IR and Machine Instructions.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
15 #define LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/PostOrderIterator.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/Support/BlockFrequency.h"
21 #include "llvm/Support/BranchProbability.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Support/raw_ostream.h"
27 //===----------------------------------------------------------------------===//
29 // PositiveFloat definition.
31 // TODO: Make this private to BlockFrequencyInfoImpl or delete.
33 //===----------------------------------------------------------------------===//
36 class PositiveFloatBase {
38 static const int MaxExponent = 16383;
39 static const int MinExponent = -16382;
40 static const int DefaultPrecision = 10;
42 static void dump(uint64_t D, int16_t E, int Width);
43 static raw_ostream &print(raw_ostream &OS, uint64_t D, int16_t E, int Width,
45 static std::string toString(uint64_t D, int16_t E, int Width,
47 static int countLeadingZeros32(uint32_t N) { return countLeadingZeros(N); }
48 static int countLeadingZeros64(uint64_t N) { return countLeadingZeros(N); }
49 static uint64_t getHalf(uint64_t N) { return (N >> 1) + (N & 1); }
51 static std::pair<uint64_t, bool> splitSigned(int64_t N) {
53 return std::make_pair(N, false);
55 return std::make_pair(uint64_t(N) + 1, true);
56 return std::make_pair(-N, true);
58 static int64_t joinSigned(uint64_t U, bool IsNeg) {
60 return IsNeg ? INT64_MIN : INT64_MAX;
61 return IsNeg ? -int16_t(U) : U;
64 static int32_t extractLg(const std::pair<int32_t, int> &Lg) {
67 static int32_t extractLgFloor(const std::pair<int32_t, int> &Lg) {
68 return Lg.first - (Lg.second > 0);
70 static int32_t extractLgCeiling(const std::pair<int32_t, int> &Lg) {
71 return Lg.first + (Lg.second < 0);
73 static uint64_t getDiff(int16_t L, int16_t R) {
74 assert(L <= R && "arguments in wrong order");
75 return (uint64_t)R - (uint64_t)L;
78 static std::pair<uint64_t, int16_t> divide64(uint64_t L, uint64_t R);
79 static std::pair<uint64_t, int16_t> multiply64(uint64_t L, uint64_t R);
81 static int compare(uint64_t L, uint64_t R, int Shift) {
85 uint64_t L_adjusted = L >> Shift;
91 return L > L_adjusted << Shift ? 1 : 0;
95 /// \brief Simple representation of a positive floating point.
97 /// PositiveFloat is a positive floating point number. It uses simple
98 /// saturation arithmetic, and every operation is well-defined for every value.
100 /// The number is split into a signed exponent and unsigned digits. The number
101 /// represented is \c getDigits()*2^getExponent(). In this way, the digits are
102 /// much like the mantissa in the x87 long double, but there is no canonical
103 /// form, so the same number can be represented by many bit representations
104 /// (it's always in "denormal" mode).
106 /// PositiveFloat is templated on the underlying integer type for digits, which
107 /// is expected to be one of uint64_t, uint32_t, uint16_t or uint8_t.
109 /// Unlike builtin floating point types, PositiveFloat is portable.
111 /// Unlike APFloat, PositiveFloat does not model architecture floating point
112 /// behaviour (this should make it a little faster), and implements most
113 /// operators (this makes it usable).
115 /// PositiveFloat is totally ordered. However, there is no canonical form, so
116 /// there are multiple representations of most scalars. E.g.:
118 /// PositiveFloat(8u, 0) == PositiveFloat(4u, 1)
119 /// PositiveFloat(4u, 1) == PositiveFloat(2u, 2)
120 /// PositiveFloat(2u, 2) == PositiveFloat(1u, 3)
122 /// PositiveFloat implements most arithmetic operations. Precision is kept
123 /// where possible. Uses simple saturation arithmetic, so that operations
124 /// saturate to 0.0 or getLargest() rather than under or overflowing. It has
125 /// some extra arithmetic for unit inversion. 0.0/0.0 is defined to be 0.0.
126 /// Any other division by 0.0 is defined to be getLargest().
128 /// As a convenience for modifying the exponent, left and right shifting are
129 /// both implemented, and both interpret negative shifts as positive shifts in
130 /// the opposite direction.
132 /// Future work might extract most of the implementation into a base class
133 /// (e.g., \c Float) that has an \c IsSigned template parameter. The initial
134 /// use case for this only needed positive semantics, but it wouldn't take much
137 /// Exponents are limited to the range accepted by x87 long double. This makes
138 /// it trivial to add functionality to convert to APFloat (this is already
139 /// relied on for the implementation of printing).
140 template <class DigitsT> class PositiveFloat : PositiveFloatBase {
142 static_assert(!std::numeric_limits<DigitsT>::is_signed,
143 "only unsigned floats supported");
145 typedef DigitsT DigitsType;
148 typedef std::numeric_limits<DigitsType> DigitsLimits;
150 static const int Width = sizeof(DigitsType) * 8;
151 static_assert(Width <= 64, "invalid integer width for digits");
158 PositiveFloat() : Digits(0), Exponent(0) {}
160 PositiveFloat(DigitsType Digits, int16_t Exponent)
161 : Digits(Digits), Exponent(Exponent) {}
164 PositiveFloat(const std::pair<uint64_t, int16_t> &X)
165 : Digits(X.first), Exponent(X.second) {}
168 static PositiveFloat getZero() { return PositiveFloat(0, 0); }
169 static PositiveFloat getOne() { return PositiveFloat(1, 0); }
170 static PositiveFloat getLargest() {
171 return PositiveFloat(DigitsLimits::max(), MaxExponent);
173 static PositiveFloat getFloat(uint64_t N) { return adjustToWidth(N, 0); }
174 static PositiveFloat getInverseFloat(uint64_t N) {
175 return getFloat(N).invert();
177 static PositiveFloat getFraction(DigitsType N, DigitsType D) {
178 return getQuotient(N, D);
181 int16_t getExponent() const { return Exponent; }
182 DigitsType getDigits() const { return Digits; }
184 template <class IntT> IntT toInt() const;
186 bool isZero() const { return !Digits; }
187 bool isLargest() const { return *this == getLargest(); }
189 if (Exponent > 0 || Exponent <= -Width)
191 return Digits == DigitsType(1) << -Exponent;
194 /// \brief The log base 2, rounded.
196 /// Get the lg of the scalar. lg 0 is defined to be INT32_MIN.
197 int32_t lg() const { return extractLg(lgImpl()); }
199 /// \brief The log base 2, rounded towards INT32_MIN.
201 /// Get the lg floor. lg 0 is defined to be INT32_MIN.
202 int32_t lgFloor() const { return extractLgFloor(lgImpl()); }
204 /// \brief The log base 2, rounded towards INT32_MAX.
206 /// Get the lg ceiling. lg 0 is defined to be INT32_MIN.
207 int32_t lgCeiling() const { return extractLgCeiling(lgImpl()); }
209 bool operator==(const PositiveFloat &X) const { return compare(X) == 0; }
210 bool operator<(const PositiveFloat &X) const { return compare(X) < 0; }
211 bool operator!=(const PositiveFloat &X) const { return compare(X) != 0; }
212 bool operator>(const PositiveFloat &X) const { return compare(X) > 0; }
213 bool operator<=(const PositiveFloat &X) const { return compare(X) <= 0; }
214 bool operator>=(const PositiveFloat &X) const { return compare(X) >= 0; }
216 bool operator!() const { return isZero(); }
218 /// \brief Convert to a decimal representation in a string.
220 /// Convert to a string. Uses scientific notation for very large/small
221 /// numbers. Scientific notation is used roughly for numbers outside of the
222 /// range 2^-64 through 2^64.
224 /// \c Precision indicates the number of decimal digits of precision to use;
225 /// 0 requests the maximum available.
227 /// As a special case to make debugging easier, if the number is small enough
228 /// to convert without scientific notation and has more than \c Precision
229 /// digits before the decimal place, it's printed accurately to the first
230 /// digit past zero. E.g., assuming 10 digits of precision:
232 /// 98765432198.7654... => 98765432198.8
233 /// 8765432198.7654... => 8765432198.8
234 /// 765432198.7654... => 765432198.8
235 /// 65432198.7654... => 65432198.77
236 /// 5432198.7654... => 5432198.765
237 std::string toString(unsigned Precision = DefaultPrecision) {
238 return PositiveFloatBase::toString(Digits, Exponent, Width, Precision);
241 /// \brief Print a decimal representation.
243 /// Print a string. See toString for documentation.
244 raw_ostream &print(raw_ostream &OS,
245 unsigned Precision = DefaultPrecision) const {
246 return PositiveFloatBase::print(OS, Digits, Exponent, Width, Precision);
248 void dump() const { return PositiveFloatBase::dump(Digits, Exponent, Width); }
250 PositiveFloat &operator+=(const PositiveFloat &X);
251 PositiveFloat &operator-=(const PositiveFloat &X);
252 PositiveFloat &operator*=(const PositiveFloat &X);
253 PositiveFloat &operator/=(const PositiveFloat &X);
254 PositiveFloat &operator<<=(int16_t Shift) { return shiftLeft(Shift); }
255 PositiveFloat &operator>>=(int16_t Shift) { return shiftRight(Shift); }
258 PositiveFloat &shiftLeft(int32_t Shift);
259 PositiveFloat &shiftRight(int32_t Shift);
260 PositiveFloat normalizeExponents(PositiveFloat X);
263 /// \brief Scale a large number accurately.
265 /// Scale N (multiply it by this). Uses full precision multiplication, even
266 /// if Width is smaller than 64, so information is not lost.
267 uint64_t scale(uint64_t N) const;
268 uint64_t scaleByInverse(uint64_t N) const {
269 // TODO: implement directly, rather than relying on inverse. Inverse is
271 return inverse().scale(N);
273 int64_t scale(int64_t N) const {
274 std::pair<uint64_t, bool> Unsigned = splitSigned(N);
275 return joinSigned(scale(Unsigned.first), Unsigned.second);
277 int64_t scaleByInverse(int64_t N) const {
278 std::pair<uint64_t, bool> Unsigned = splitSigned(N);
279 return joinSigned(scaleByInverse(Unsigned.first), Unsigned.second);
282 int compare(const PositiveFloat &X) const;
283 int compareTo(uint64_t N) const {
284 PositiveFloat Float = getFloat(N);
285 int Compare = compare(Float);
286 if (Width == 64 || Compare != 0)
289 // Check for precision loss. We know *this == RoundTrip.
290 uint64_t RoundTrip = Float.template toInt<uint64_t>();
291 return N == RoundTrip ? 0 : RoundTrip < N ? -1 : 1;
293 int compareTo(int64_t N) const { return N < 0 ? 1 : compareTo(uint64_t(N)); }
295 PositiveFloat &invert() { return *this = PositiveFloat::getFloat(1) / *this; }
296 PositiveFloat inverse() const { return PositiveFloat(*this).invert(); }
299 static PositiveFloat getProduct(DigitsType L, DigitsType R);
300 static PositiveFloat getQuotient(DigitsType Dividend, DigitsType Divisor);
302 std::pair<int32_t, int> lgImpl() const;
303 static int countLeadingZerosWidth(DigitsType Digits) {
305 return countLeadingZeros64(Digits);
307 return countLeadingZeros32(Digits);
308 return countLeadingZeros32(Digits) + Width - 32;
311 static PositiveFloat adjustToWidth(uint64_t N, int S) {
312 assert(S >= MinExponent);
313 assert(S <= MaxExponent);
314 if (Width == 64 || N <= DigitsLimits::max())
315 return PositiveFloat(N, S);
318 int Shift = 64 - Width - countLeadingZeros64(N);
319 DigitsType Shifted = N >> Shift;
322 assert(S + Shift <= MaxExponent);
323 return getRounded(PositiveFloat(Shifted, S + Shift),
324 N & UINT64_C(1) << (Shift - 1));
327 static PositiveFloat getRounded(PositiveFloat P, bool Round) {
330 if (P.Digits == DigitsLimits::max())
331 // Careful of overflow in the exponent.
332 return PositiveFloat(1, P.Exponent) <<= Width;
333 return PositiveFloat(P.Digits + 1, P.Exponent);
337 template <class DigitsT>
338 PositiveFloat<DigitsT> operator+(const PositiveFloat<DigitsT> &L,
339 const PositiveFloat<DigitsT> &R) {
340 return PositiveFloat<DigitsT>(L) += R;
342 template <class DigitsT>
343 PositiveFloat<DigitsT> operator-(const PositiveFloat<DigitsT> &L,
344 const PositiveFloat<DigitsT> &R) {
345 return PositiveFloat<DigitsT>(L) -= R;
347 template <class DigitsT>
348 PositiveFloat<DigitsT> operator*(const PositiveFloat<DigitsT> &L,
349 const PositiveFloat<DigitsT> &R) {
350 return PositiveFloat<DigitsT>(L) *= R;
352 template <class DigitsT>
353 PositiveFloat<DigitsT> operator/(const PositiveFloat<DigitsT> &L,
354 const PositiveFloat<DigitsT> &R) {
355 return PositiveFloat<DigitsT>(L) /= R;
357 template <class DigitsT>
358 PositiveFloat<DigitsT> operator<<(const PositiveFloat<DigitsT> &F,
360 return PositiveFloat<DigitsT>(F) <<= Shift;
362 template <class DigitsT>
363 PositiveFloat<DigitsT> operator>>(const PositiveFloat<DigitsT> &F,
365 return PositiveFloat<DigitsT>(F) >>= Shift;
368 template <class DigitsT>
369 raw_ostream &operator<<(raw_ostream &OS, const PositiveFloat<DigitsT> &X) {
370 return X.print(OS, 10);
373 template <class DigitsT>
374 bool operator<(const PositiveFloat<DigitsT> &L, uint64_t R) {
375 return L.compareTo(R) < 0;
377 template <class DigitsT>
378 bool operator>(const PositiveFloat<DigitsT> &L, uint64_t R) {
379 return L.compareTo(R) > 0;
381 template <class DigitsT>
382 bool operator==(const PositiveFloat<DigitsT> &L, uint64_t R) {
383 return L.compareTo(R) == 0;
385 template <class DigitsT>
386 bool operator!=(const PositiveFloat<DigitsT> &L, uint64_t R) {
387 return L.compareTo(R) != 0;
389 template <class DigitsT>
390 bool operator<=(const PositiveFloat<DigitsT> &L, uint64_t R) {
391 return L.compareTo(R) <= 0;
393 template <class DigitsT>
394 bool operator>=(const PositiveFloat<DigitsT> &L, uint64_t R) {
395 return L.compareTo(R) >= 0;
398 template <class DigitsT>
399 bool operator<(const PositiveFloat<DigitsT> &L, int64_t R) {
400 return L.compareTo(R) < 0;
402 template <class DigitsT>
403 bool operator>(const PositiveFloat<DigitsT> &L, int64_t R) {
404 return L.compareTo(R) > 0;
406 template <class DigitsT>
407 bool operator==(const PositiveFloat<DigitsT> &L, int64_t R) {
408 return L.compareTo(R) == 0;
410 template <class DigitsT>
411 bool operator!=(const PositiveFloat<DigitsT> &L, int64_t R) {
412 return L.compareTo(R) != 0;
414 template <class DigitsT>
415 bool operator<=(const PositiveFloat<DigitsT> &L, int64_t R) {
416 return L.compareTo(R) <= 0;
418 template <class DigitsT>
419 bool operator>=(const PositiveFloat<DigitsT> &L, int64_t R) {
420 return L.compareTo(R) >= 0;
423 template <class DigitsT>
424 bool operator<(const PositiveFloat<DigitsT> &L, uint32_t R) {
425 return L.compareTo(uint64_t(R)) < 0;
427 template <class DigitsT>
428 bool operator>(const PositiveFloat<DigitsT> &L, uint32_t R) {
429 return L.compareTo(uint64_t(R)) > 0;
431 template <class DigitsT>
432 bool operator==(const PositiveFloat<DigitsT> &L, uint32_t R) {
433 return L.compareTo(uint64_t(R)) == 0;
435 template <class DigitsT>
436 bool operator!=(const PositiveFloat<DigitsT> &L, uint32_t R) {
437 return L.compareTo(uint64_t(R)) != 0;
439 template <class DigitsT>
440 bool operator<=(const PositiveFloat<DigitsT> &L, uint32_t R) {
441 return L.compareTo(uint64_t(R)) <= 0;
443 template <class DigitsT>
444 bool operator>=(const PositiveFloat<DigitsT> &L, uint32_t R) {
445 return L.compareTo(uint64_t(R)) >= 0;
448 template <class DigitsT>
449 bool operator<(const PositiveFloat<DigitsT> &L, int32_t R) {
450 return L.compareTo(int64_t(R)) < 0;
452 template <class DigitsT>
453 bool operator>(const PositiveFloat<DigitsT> &L, int32_t R) {
454 return L.compareTo(int64_t(R)) > 0;
456 template <class DigitsT>
457 bool operator==(const PositiveFloat<DigitsT> &L, int32_t R) {
458 return L.compareTo(int64_t(R)) == 0;
460 template <class DigitsT>
461 bool operator!=(const PositiveFloat<DigitsT> &L, int32_t R) {
462 return L.compareTo(int64_t(R)) != 0;
464 template <class DigitsT>
465 bool operator<=(const PositiveFloat<DigitsT> &L, int32_t R) {
466 return L.compareTo(int64_t(R)) <= 0;
468 template <class DigitsT>
469 bool operator>=(const PositiveFloat<DigitsT> &L, int32_t R) {
470 return L.compareTo(int64_t(R)) >= 0;
473 template <class DigitsT>
474 bool operator<(uint64_t L, const PositiveFloat<DigitsT> &R) {
477 template <class DigitsT>
478 bool operator>(uint64_t L, const PositiveFloat<DigitsT> &R) {
481 template <class DigitsT>
482 bool operator==(uint64_t L, const PositiveFloat<DigitsT> &R) {
485 template <class DigitsT>
486 bool operator<=(uint64_t L, const PositiveFloat<DigitsT> &R) {
489 template <class DigitsT>
490 bool operator>=(uint64_t L, const PositiveFloat<DigitsT> &R) {
493 template <class DigitsT>
494 bool operator!=(uint64_t L, const PositiveFloat<DigitsT> &R) {
497 template <class DigitsT>
498 bool operator<(int64_t L, const PositiveFloat<DigitsT> &R) {
501 template <class DigitsT>
502 bool operator>(int64_t L, const PositiveFloat<DigitsT> &R) {
505 template <class DigitsT>
506 bool operator==(int64_t L, const PositiveFloat<DigitsT> &R) {
509 template <class DigitsT>
510 bool operator<=(int64_t L, const PositiveFloat<DigitsT> &R) {
513 template <class DigitsT>
514 bool operator>=(int64_t L, const PositiveFloat<DigitsT> &R) {
517 template <class DigitsT>
518 bool operator!=(int64_t L, const PositiveFloat<DigitsT> &R) {
521 template <class DigitsT>
522 bool operator<(uint32_t L, const PositiveFloat<DigitsT> &R) {
525 template <class DigitsT>
526 bool operator>(uint32_t L, const PositiveFloat<DigitsT> &R) {
529 template <class DigitsT>
530 bool operator==(uint32_t L, const PositiveFloat<DigitsT> &R) {
533 template <class DigitsT>
534 bool operator<=(uint32_t L, const PositiveFloat<DigitsT> &R) {
537 template <class DigitsT>
538 bool operator>=(uint32_t L, const PositiveFloat<DigitsT> &R) {
541 template <class DigitsT>
542 bool operator!=(uint32_t L, const PositiveFloat<DigitsT> &R) {
545 template <class DigitsT>
546 bool operator<(int32_t L, const PositiveFloat<DigitsT> &R) {
549 template <class DigitsT>
550 bool operator>(int32_t L, const PositiveFloat<DigitsT> &R) {
553 template <class DigitsT>
554 bool operator==(int32_t L, const PositiveFloat<DigitsT> &R) {
557 template <class DigitsT>
558 bool operator<=(int32_t L, const PositiveFloat<DigitsT> &R) {
561 template <class DigitsT>
562 bool operator>=(int32_t L, const PositiveFloat<DigitsT> &R) {
565 template <class DigitsT>
566 bool operator!=(int32_t L, const PositiveFloat<DigitsT> &R) {
570 template <class DigitsT>
571 uint64_t PositiveFloat<DigitsT>::scale(uint64_t N) const {
572 if (Width == 64 || N <= DigitsLimits::max())
573 return (getFloat(N) * *this).template toInt<uint64_t>();
574 std::pair<int32_t, int> Lg = lgImpl();
575 if (extractLgFloor(Lg) >= 64)
577 if (extractLgCeiling(Lg) <= -64)
581 for (int Bit = 0; Bit < 64; Bit += Width) {
582 PositiveFloat Digit = getFloat(N & DigitsLimits::max() << Bit);
585 uint64_t Sum = Result + (Digit.toInt<uint64_t>() >> Bit);
593 template <class DigitsT>
594 PositiveFloat<DigitsT> PositiveFloat<DigitsT>::getProduct(DigitsType L,
600 // Check for numbers that we can compute with 64-bit math.
602 return adjustToWidth(uint64_t(L) * uint64_t(R), 0);
604 // Do the full thing.
605 return PositiveFloat(multiply64(L, R));
607 template <class DigitsT>
608 PositiveFloat<DigitsT> PositiveFloat<DigitsT>::getQuotient(DigitsType Dividend,
609 DigitsType Divisor) {
617 return PositiveFloat(divide64(Dividend, Divisor));
619 // We can compute this with 64-bit math.
620 int Shift = countLeadingZeros64(Dividend);
621 uint64_t Shifted = uint64_t(Dividend) << Shift;
622 uint64_t Quotient = Shifted / Divisor;
624 // If Quotient needs to be shifted, then adjustToWidth will round.
625 if (Quotient > DigitsLimits::max())
626 return adjustToWidth(Quotient, -Shift);
628 // Round based on the value of the next bit.
629 return getRounded(PositiveFloat(Quotient, -Shift),
630 Shifted % Divisor >= getHalf(Divisor));
633 template <class DigitsT>
634 template <class IntT>
635 IntT PositiveFloat<DigitsT>::toInt() const {
636 typedef std::numeric_limits<IntT> Limits;
639 if (*this >= Limits::max())
640 return Limits::max();
644 assert(size_t(Exponent) < sizeof(IntT) * 8);
645 return N << Exponent;
648 assert(size_t(-Exponent) < sizeof(IntT) * 8);
649 return N >> -Exponent;
654 template <class DigitsT>
655 std::pair<int32_t, int> PositiveFloat<DigitsT>::lgImpl() const {
657 return std::make_pair(INT32_MIN, 0);
659 // Get the floor of the lg of Digits.
660 int32_t LocalFloor = Width - countLeadingZerosWidth(Digits) - 1;
662 // Get the floor of the lg of this.
663 int32_t Floor = Exponent + LocalFloor;
664 if (Digits == UINT64_C(1) << LocalFloor)
665 return std::make_pair(Floor, 0);
667 // Round based on the next digit.
668 bool Round = Digits & UINT64_C(1) << (LocalFloor - 1);
669 return std::make_pair(Floor + Round, Round ? 1 : -1);
672 template <class DigitsT>
673 PositiveFloat<DigitsT>
674 PositiveFloat<DigitsT>::normalizeExponents(PositiveFloat X) {
675 if (isZero() || X.isZero())
678 if (Exponent > X.Exponent) {
679 // Reverse the arguments.
680 *this = X.normalizeExponents(*this);
684 if (Exponent == X.Exponent)
687 int ExponentDiff = getDiff(Exponent, X.Exponent);
688 if (ExponentDiff >= 2 * Width) {
693 // Use up any leading zeros on X, and then shift this.
694 int ShiftX = std::min(countLeadingZerosWidth(X.Digits), ExponentDiff);
695 int ShiftThis = ExponentDiff - ShiftX;
697 if (ShiftThis >= Width) {
703 X.Exponent -= ShiftX;
704 Digits >>= ShiftThis;
705 Exponent += ShiftThis;
709 template <class DigitsT>
710 PositiveFloat<DigitsT> &PositiveFloat<DigitsT>::
711 operator+=(const PositiveFloat &X) {
712 if (isLargest() || X.isZero())
714 if (isZero() || X.isLargest())
717 // Normalize exponents.
718 PositiveFloat Scaled = normalizeExponents(X);
720 // Check for zero again.
722 return *this = Scaled;
727 DigitsType Sum = Digits + Scaled.Digits;
728 bool DidOverflow = Sum < Digits || Sum < Scaled.Digits;
733 if (Exponent == MaxExponent)
734 return *this = getLargest();
737 Digits = Digits >> 1 | UINT64_C(1) << (Width - 1);
741 template <class DigitsT>
742 PositiveFloat<DigitsT> &PositiveFloat<DigitsT>::
743 operator-=(const PositiveFloat &X) {
747 return *this = getZero();
749 // Normalize exponents.
750 PositiveFloat Scaled = normalizeExponents(X);
751 assert(Digits >= Scaled.Digits);
753 // Compute difference.
754 if (!Scaled.isZero()) {
755 Digits -= Scaled.Digits;
759 // Check if X just barely lost its last bit. E.g., for 32-bit:
761 // 1*2^32 - 1*2^0 == 0xffffffff != 1*2^32
762 if (*this == PositiveFloat(1, X.lgFloor() + Width)) {
763 Digits = DigitsType(0) - 1;
768 template <class DigitsT>
769 PositiveFloat<DigitsT> &PositiveFloat<DigitsT>::
770 operator*=(const PositiveFloat &X) {
776 // Save the exponents.
777 int32_t Exponents = int32_t(Exponent) + int32_t(X.Exponent);
779 // Get the raw product.
780 *this = getProduct(Digits, X.Digits);
782 // Combine with exponents.
783 return *this <<= Exponents;
785 template <class DigitsT>
786 PositiveFloat<DigitsT> &PositiveFloat<DigitsT>::
787 operator/=(const PositiveFloat &X) {
791 return *this = getLargest();
793 // Save the exponents.
794 int32_t Exponents = int32_t(Exponent) + -int32_t(X.Exponent);
796 // Get the raw quotient.
797 *this = getQuotient(Digits, X.Digits);
799 // Combine with exponents.
800 return *this <<= Exponents;
802 template <class DigitsT>
803 PositiveFloat<DigitsT> &PositiveFloat<DigitsT>::shiftLeft(int32_t Shift) {
805 return shiftRight(-Shift);
806 if (!Shift || isZero())
809 // Shift as much as we can in the exponent.
810 int16_t ExponentShift = std::min(Shift, MaxExponent - Exponent);
811 Exponent += ExponentShift;
812 if (ExponentShift == Shift)
815 // Check this late, since it's rare.
819 // Shift as far as possible.
820 int32_t RawShift = std::min(Shift, countLeadingZerosWidth(Digits));
821 if (RawShift + ExponentShift < Shift)
823 return *this = getLargest();
829 template <class DigitsT>
830 PositiveFloat<DigitsT> &PositiveFloat<DigitsT>::shiftRight(int32_t Shift) {
832 return shiftLeft(-Shift);
833 if (!Shift || isZero())
836 // Shift as much as we can in the exponent.
837 int16_t ExponentShift = std::min(Shift, Exponent - MinExponent);
838 Exponent -= ExponentShift;
839 if (ExponentShift == Shift)
842 // Shift as far as possible.
843 int32_t RawShift = Shift - ExponentShift;
844 if (RawShift >= Width)
846 return *this = getZero();
848 // May result in zero.
853 template <class DigitsT>
854 int PositiveFloat<DigitsT>::compare(const PositiveFloat &X) const {
857 return X.isZero() ? 0 : -1;
861 // Check for the scale. Use lgFloor to be sure that the exponent difference
862 // is always lower than 64.
863 int32_t lgL = lgFloor(), lgR = X.lgFloor();
865 return lgL < lgR ? -1 : 1;
868 if (Exponent < X.Exponent)
869 return PositiveFloatBase::compare(Digits, X.Digits, X.Exponent - Exponent);
871 return -PositiveFloatBase::compare(X.Digits, Digits, Exponent - X.Exponent);
874 template <class T> struct isPodLike<PositiveFloat<T>> {
875 static const bool value = true;
879 //===----------------------------------------------------------------------===//
881 // BlockMass definition.
883 // TODO: Make this private to BlockFrequencyInfoImpl or delete.
885 //===----------------------------------------------------------------------===//
888 /// \brief Mass of a block.
890 /// This class implements a sort of fixed-point fraction always between 0.0 and
891 /// 1.0. getMass() == UINT64_MAX indicates a value of 1.0.
893 /// Masses can be added and subtracted. Simple saturation arithmetic is used,
894 /// so arithmetic operations never overflow or underflow.
896 /// Masses can be multiplied. Multiplication treats full mass as 1.0 and uses
897 /// an inexpensive floating-point algorithm that's off-by-one (almost, but not
898 /// quite, maximum precision).
900 /// Masses can be scaled by \a BranchProbability at maximum precision.
905 BlockMass() : Mass(0) {}
906 explicit BlockMass(uint64_t Mass) : Mass(Mass) {}
908 static BlockMass getEmpty() { return BlockMass(); }
909 static BlockMass getFull() { return BlockMass(UINT64_MAX); }
911 uint64_t getMass() const { return Mass; }
913 bool isFull() const { return Mass == UINT64_MAX; }
914 bool isEmpty() const { return !Mass; }
916 bool operator!() const { return isEmpty(); }
918 /// \brief Add another mass.
920 /// Adds another mass, saturating at \a isFull() rather than overflowing.
921 BlockMass &operator+=(const BlockMass &X) {
922 uint64_t Sum = Mass + X.Mass;
923 Mass = Sum < Mass ? UINT64_MAX : Sum;
927 /// \brief Subtract another mass.
929 /// Subtracts another mass, saturating at \a isEmpty() rather than
931 BlockMass &operator-=(const BlockMass &X) {
932 uint64_t Diff = Mass - X.Mass;
933 Mass = Diff > Mass ? 0 : Diff;
937 /// \brief Scale by another mass.
939 /// The current implementation is a little imprecise, but it's relatively
940 /// fast, never overflows, and maintains the property that 1.0*1.0==1.0
941 /// (where isFull represents the number 1.0). It's an approximation of
942 /// 128-bit multiply that gets right-shifted by 64-bits.
944 /// For a given digit size, multiplying two-digit numbers looks like:
950 /// + 0 . U1*L2 . 0 // (shift left once by a digit-size)
951 /// + 0 . U2*L1 . 0 // (shift left once by a digit-size)
952 /// + U1*L2 . 0 . 0 // (shift left twice by a digit-size)
954 /// BlockMass has 64-bit numbers. Split each into two 32-bit digits, stored
955 /// 64-bit. Add 1 to the lower digits, to model isFull as 1.0; this won't
956 /// overflow, since we have 64-bit storage for each digit.
958 /// To do this accurately, (a) multiply into two 64-bit digits, incrementing
959 /// the upper digit on overflows of the lower digit (carry), (b) subtract 1
960 /// from the lower digit, decrementing the upper digit on underflow (carry),
961 /// and (c) truncate the lower digit. For the 1.0*1.0 case, the upper digit
962 /// will be 0 at the end of step (a), and then will underflow back to isFull
963 /// (1.0) in step (b).
965 /// Instead, the implementation does something a little faster with a small
966 /// loss of accuracy: ignore the lower 64-bit digit entirely. The loss of
967 /// accuracy is small, since the sum of the unmodelled carries is 0 or 1
968 /// (i.e., step (a) will overflow at most once, and step (b) will underflow
969 /// only if step (a) overflows).
971 /// This is the formula we're calculating:
973 /// U1.L1 * U2.L2 == U1 * U2 + (U1 * (L2+1))>>32 + (U2 * (L1+1))>>32
975 /// As a demonstration of 1.0*1.0, consider two 4-bit numbers that are both
978 /// U1.L1 * U2.L2 == U1 * U2 + (U1 * (L2+1))>>2 + (U2 * (L1+1))>>2
979 /// 11.11 * 11.11 == 11 * 11 + (11 * (11+1))/4 + (11 * (11+1))/4
980 /// == 1001 + (11 * 100)/4 + (11 * 100)/4
981 /// == 1001 + 1100/4 + 1100/4
982 /// == 1001 + 0011 + 0011
984 BlockMass &operator*=(const BlockMass &X) {
985 uint64_t U1 = Mass >> 32, L1 = Mass & UINT32_MAX, U2 = X.Mass >> 32,
986 L2 = X.Mass & UINT32_MAX;
987 Mass = U1 * U2 + (U1 * (L2 + 1) >> 32) + ((L1 + 1) * U2 >> 32);
991 /// \brief Multiply by a branch probability.
993 /// Multiply by P. Guarantees full precision.
995 /// This could be naively implemented by multiplying by the numerator and
996 /// dividing by the denominator, but in what order? Multiplying first can
997 /// overflow, while dividing first will lose precision (potentially, changing
998 /// a non-zero mass to zero).
1000 /// The implementation mixes the two methods. Since \a BranchProbability
1001 /// uses 32-bits and \a BlockMass 64-bits, shift the mass as far to the left
1002 /// as there is room, then divide by the denominator to get a quotient.
1003 /// Multiplying by the numerator and right shifting gives a first
1006 /// Calculate the error in this first approximation by calculating the
1007 /// opposite mass (multiply by the opposite numerator and shift) and
1008 /// subtracting both from teh original mass.
1010 /// Add to the first approximation the correct fraction of this error value.
1011 /// This time, multiply first and then divide, since there is no danger of
1014 /// \pre P represents a fraction between 0.0 and 1.0.
1015 BlockMass &operator*=(const BranchProbability &P);
1017 bool operator==(const BlockMass &X) const { return Mass == X.Mass; }
1018 bool operator<(const BlockMass &X) const { return Mass < X.Mass; }
1019 bool operator!=(const BlockMass &X) const { return !(*this == X); }
1020 bool operator>(const BlockMass &X) const { return X < *this; }
1021 bool operator<=(const BlockMass &X) const { return !(*this > X); }
1022 bool operator>=(const BlockMass &X) const { return !(*this < X); }
1024 /// \brief Convert to floating point.
1026 /// Convert to a float. \a isFull() gives 1.0, while \a isEmpty() gives
1027 /// slightly above 0.0.
1028 PositiveFloat<uint64_t> toFloat() const;
1031 raw_ostream &print(raw_ostream &OS) const;
1034 inline BlockMass operator+(const BlockMass &L, const BlockMass &R) {
1035 return BlockMass(L) += R;
1037 inline BlockMass operator-(const BlockMass &L, const BlockMass &R) {
1038 return BlockMass(L) -= R;
1040 inline BlockMass operator*(const BlockMass &L, const BlockMass &R) {
1041 return BlockMass(L) *= R;
1043 inline BlockMass operator*(const BlockMass &L, const BranchProbability &R) {
1044 return BlockMass(L) *= R;
1046 inline BlockMass operator*(const BranchProbability &L, const BlockMass &R) {
1047 return BlockMass(R) *= L;
1050 inline raw_ostream &operator<<(raw_ostream &OS, const BlockMass &X) {
1054 template <> struct isPodLike<BlockMass> {
1055 static const bool value = true;
1059 //===----------------------------------------------------------------------===//
1061 // BlockFrequencyInfoImpl definition.
1063 //===----------------------------------------------------------------------===//
1067 class BranchProbabilityInfo;
1071 class MachineBasicBlock;
1072 class MachineBranchProbabilityInfo;
1073 class MachineFunction;
1075 class MachineLoopInfo;
1077 /// \brief Base class for BlockFrequencyInfoImpl
1079 /// BlockFrequencyInfoImplBase has supporting data structures and some
1080 /// algorithms for BlockFrequencyInfoImplBase. Only algorithms that depend on
1081 /// the block type (or that call such algorithms) are skipped here.
1083 /// Nevertheless, the majority of the overall algorithm documention lives with
1084 /// BlockFrequencyInfoImpl. See there for details.
1085 class BlockFrequencyInfoImplBase {
1087 typedef PositiveFloat<uint64_t> Float;
1089 /// \brief Representative of a block.
1091 /// This is a simple wrapper around an index into the reverse-post-order
1092 /// traversal of the blocks.
1094 /// Unlike a block pointer, its order has meaning (location in the
1095 /// topological sort) and it's class is the same regardless of block type.
1097 typedef uint32_t IndexType;
1100 bool operator==(const BlockNode &X) const { return Index == X.Index; }
1101 bool operator!=(const BlockNode &X) const { return Index != X.Index; }
1102 bool operator<=(const BlockNode &X) const { return Index <= X.Index; }
1103 bool operator>=(const BlockNode &X) const { return Index >= X.Index; }
1104 bool operator<(const BlockNode &X) const { return Index < X.Index; }
1105 bool operator>(const BlockNode &X) const { return Index > X.Index; }
1107 BlockNode() : Index(UINT32_MAX) {}
1108 BlockNode(IndexType Index) : Index(Index) {}
1110 bool isValid() const { return Index <= getMaxIndex(); }
1111 static size_t getMaxIndex() { return UINT32_MAX - 1; }
1114 /// \brief Stats about a block itself.
1115 struct FrequencyData {
1120 /// \brief Index of loop information.
1121 struct WorkingData {
1122 BlockNode ContainingLoop; ///< The block whose loop this block is inside.
1123 uint32_t LoopIndex; ///< Index into PackagedLoops.
1124 bool IsPackaged; ///< Has ContainingLoop been packaged up?
1125 bool IsAPackage; ///< Has this block's loop been packaged up?
1126 BlockMass Mass; ///< Mass distribution from the entry block.
1129 : LoopIndex(UINT32_MAX), IsPackaged(false), IsAPackage(false) {}
1131 bool hasLoopHeader() const { return ContainingLoop.isValid(); }
1132 bool isLoopHeader() const { return LoopIndex != UINT32_MAX; }
1135 /// \brief Unscaled probability weight.
1137 /// Probability weight for an edge in the graph (including the
1138 /// successor/target node).
1140 /// All edges in the original function are 32-bit. However, exit edges from
1141 /// loop packages are taken from 64-bit exit masses, so we need 64-bits of
1142 /// space in general.
1144 /// In addition to the raw weight amount, Weight stores the type of the edge
1145 /// in the current context (i.e., the context of the loop being processed).
1146 /// Is this a local edge within the loop, an exit from the loop, or a
1147 /// backedge to the loop header?
1149 enum DistType { Local, Exit, Backedge };
1151 BlockNode TargetNode;
1153 Weight() : Type(Local), Amount(0) {}
1156 /// \brief Distribution of unscaled probability weight.
1158 /// Distribution of unscaled probability weight to a set of successors.
1160 /// This class collates the successor edge weights for later processing.
1162 /// \a DidOverflow indicates whether \a Total did overflow while adding to
1163 /// the distribution. It should never overflow twice. There's no flag for
1164 /// whether \a ForwardTotal overflows, since when \a Total exceeds 32-bits
1165 /// they both get re-computed during \a normalize().
1166 struct Distribution {
1167 typedef SmallVector<Weight, 4> WeightList;
1168 WeightList Weights; ///< Individual successor weights.
1169 uint64_t Total; ///< Sum of all weights.
1170 bool DidOverflow; ///< Whether \a Total did overflow.
1171 uint32_t ForwardTotal; ///< Total excluding backedges.
1173 Distribution() : Total(0), DidOverflow(false), ForwardTotal(0) {}
1174 void addLocal(const BlockNode &Node, uint64_t Amount) {
1175 add(Node, Amount, Weight::Local);
1177 void addExit(const BlockNode &Node, uint64_t Amount) {
1178 add(Node, Amount, Weight::Exit);
1180 void addBackedge(const BlockNode &Node, uint64_t Amount) {
1181 add(Node, Amount, Weight::Backedge);
1184 /// \brief Normalize the distribution.
1186 /// Combines multiple edges to the same \a Weight::TargetNode and scales
1187 /// down so that \a Total fits into 32-bits.
1189 /// This is linear in the size of \a Weights. For the vast majority of
1190 /// cases, adjacent edge weights are combined by sorting WeightList and
1191 /// combining adjacent weights. However, for very large edge lists an
1192 /// auxiliary hash table is used.
1196 void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
1199 /// \brief Data for a packaged loop.
1201 /// Contains the data necessary to represent represent a loop as a node once
1204 /// PackagedLoopData inherits from BlockData to give the node the necessary
1205 /// stats. Further, it has a list of successors, list of members, and stores
1206 /// the backedge mass assigned to this loop.
1207 struct PackagedLoopData {
1208 typedef SmallVector<std::pair<BlockNode, BlockMass>, 4> ExitMap;
1209 typedef SmallVector<BlockNode, 4> MemberList;
1210 BlockNode Header; ///< Header.
1211 ExitMap Exits; ///< Successor edges (and weights).
1212 MemberList Members; ///< Members of the loop.
1213 BlockMass BackedgeMass; ///< Mass returned to loop header.
1217 PackagedLoopData(const BlockNode &Header) : Header(Header) {}
1220 /// \brief Data about each block. This is used downstream.
1221 std::vector<FrequencyData> Freqs;
1223 /// \brief Loop data: see initializeLoops().
1224 std::vector<WorkingData> Working;
1226 /// \brief Indexed information about packaged loops.
1227 std::vector<PackagedLoopData> PackagedLoops;
1229 /// \brief Create the initial loop packages.
1231 /// Initializes PackagedLoops using the data in Working about backedges
1232 /// and containing loops. Called by initializeLoops().
1234 /// \post WorkingData::LoopIndex has been initialized for every loop header
1235 /// and PackagedLoopData::Members has been initialized.
1237 /// \brief Add all edges out of a packaged loop to the distribution.
1239 /// Adds all edges from LocalLoopHead to Dist. Calls addToDist() to add each
1241 void addLoopSuccessorsToDist(const BlockNode &LoopHead,
1242 const BlockNode &LocalLoopHead,
1243 Distribution &Dist);
1245 /// \brief Add an edge to the distribution.
1247 /// Adds an edge to Succ to Dist. If \c LoopHead.isValid(), then whether the
1248 /// edge is forward/exit/backedge is in the context of LoopHead. Otherwise,
1249 /// every edge should be a forward edge (since all the loops are packaged
1251 void addToDist(Distribution &Dist, const BlockNode &LoopHead,
1252 const BlockNode &Pred, const BlockNode &Succ, uint64_t Weight);
1254 PackagedLoopData &getLoopPackage(const BlockNode &Head) {
1255 assert(Head.Index < Working.size());
1256 size_t Index = Working[Head.Index].LoopIndex;
1257 assert(Index < PackagedLoops.size());
1258 return PackagedLoops[Index];
1261 /// \brief Distribute mass according to a distribution.
1263 /// Distributes the mass in Source according to Dist. If LoopHead.isValid(),
1264 /// backedges and exits are stored in its entry in PackagedLoops.
1266 /// Mass is distributed in parallel from two copies of the source mass.
1268 /// The first mass (forward) represents the distribution of mass through the
1269 /// local DAG. This distribution should lose mass at loop exits and ignore
1272 /// The second mass (general) represents the behavior of the loop in the
1273 /// global context. In a given distribution from the head, how much mass
1274 /// exits, and to where? How much mass returns to the loop head?
1276 /// The forward mass should be split up between local successors and exits,
1277 /// but only actually distributed to the local successors. The general mass
1278 /// should be split up between all three types of successors, but distributed
1279 /// only to exits and backedges.
1280 void distributeMass(const BlockNode &Source, const BlockNode &LoopHead,
1281 Distribution &Dist);
1283 /// \brief Compute the loop scale for a loop.
1284 void computeLoopScale(const BlockNode &LoopHead);
1286 /// \brief Package up a loop.
1287 void packageLoop(const BlockNode &LoopHead);
1289 /// \brief Finalize frequency metrics.
1291 /// Unwraps loop packages, calculates final frequencies, and cleans up
1292 /// no-longer-needed data structures.
1293 void finalizeMetrics();
1295 /// \brief Clear all memory.
1298 virtual std::string getBlockName(const BlockNode &Node) const;
1300 virtual raw_ostream &print(raw_ostream &OS) const { return OS; }
1301 void dump() const { print(dbgs()); }
1303 Float getFloatingBlockFreq(const BlockNode &Node) const;
1305 BlockFrequency getBlockFreq(const BlockNode &Node) const;
1307 raw_ostream &printBlockFreq(raw_ostream &OS, const BlockNode &Node) const;
1308 raw_ostream &printBlockFreq(raw_ostream &OS,
1309 const BlockFrequency &Freq) const;
1311 uint64_t getEntryFreq() const {
1312 assert(!Freqs.empty());
1313 return Freqs[0].Integer;
1315 /// \brief Virtual destructor.
1317 /// Need a virtual destructor to mask the compiler warning about
1319 virtual ~BlockFrequencyInfoImplBase() {}
1322 namespace bfi_detail {
1323 template <class BlockT> struct TypeMap {};
1324 template <> struct TypeMap<BasicBlock> {
1325 typedef BasicBlock BlockT;
1326 typedef Function FunctionT;
1327 typedef BranchProbabilityInfo BranchProbabilityInfoT;
1329 typedef LoopInfo LoopInfoT;
1331 template <> struct TypeMap<MachineBasicBlock> {
1332 typedef MachineBasicBlock BlockT;
1333 typedef MachineFunction FunctionT;
1334 typedef MachineBranchProbabilityInfo BranchProbabilityInfoT;
1335 typedef MachineLoop LoopT;
1336 typedef MachineLoopInfo LoopInfoT;
1339 /// \brief Get the name of a MachineBasicBlock.
1341 /// Get the name of a MachineBasicBlock. It's templated so that including from
1342 /// CodeGen is unnecessary (that would be a layering issue).
1344 /// This is used mainly for debug output. The name is similar to
1345 /// MachineBasicBlock::getFullName(), but skips the name of the function.
1346 template <class BlockT> std::string getBlockName(const BlockT *BB) {
1347 assert(BB && "Unexpected nullptr");
1348 if (BB->getBasicBlock())
1349 return BB->getName().str();
1350 return (Twine("BB") + Twine(BB->getNumber())).str();
1352 /// \brief Get the name of a BasicBlock.
1353 template <> inline std::string getBlockName(const BasicBlock *BB) {
1354 assert(BB && "Unexpected nullptr");
1355 return BB->getName().str();
1359 /// \brief Shared implementation for block frequency analysis.
1361 /// This is a shared implementation of BlockFrequencyInfo and
1362 /// MachineBlockFrequencyInfo, and calculates the relative frequencies of
1365 /// This algorithm leverages BlockMass and PositiveFloat to maintain precision,
1366 /// separates mass distribution from loop scaling, and dithers to eliminate
1367 /// probability mass loss.
1369 /// The implementation is split between BlockFrequencyInfoImpl, which knows the
1370 /// type of graph being modelled (BasicBlock vs. MachineBasicBlock), and
1371 /// BlockFrequencyInfoImplBase, which doesn't. The base class uses \a
1372 /// BlockNode, a wrapper around a uint32_t. BlockNode is numbered from 0 in
1373 /// reverse-post order. This gives two advantages: it's easy to compare the
1374 /// relative ordering of two nodes, and maps keyed on BlockT can be represented
1377 /// This algorithm is O(V+E), unless there is irreducible control flow, in
1378 /// which case it's O(V*E) in the worst case.
1380 /// These are the main stages:
1382 /// 0. Reverse post-order traversal (\a initializeRPOT()).
1384 /// Run a single post-order traversal and save it (in reverse) in RPOT.
1385 /// All other stages make use of this ordering. Save a lookup from BlockT
1386 /// to BlockNode (the index into RPOT) in Nodes.
1388 /// 1. Loop indexing (\a initializeLoops()).
1390 /// Translate LoopInfo/MachineLoopInfo into a form suitable for the rest of
1391 /// the algorithm. In particular, store the immediate members of each loop
1392 /// in reverse post-order.
1394 /// 2. Calculate mass and scale in loops (\a computeMassInLoops()).
1396 /// For each loop (bottom-up), distribute mass through the DAG resulting
1397 /// from ignoring backedges and treating sub-loops as a single pseudo-node.
1398 /// Track the backedge mass distributed to the loop header, and use it to
1399 /// calculate the loop scale (number of loop iterations).
1401 /// Visiting loops bottom-up is a post-order traversal of loop headers.
1402 /// For each loop, immediate members that represent sub-loops will already
1403 /// have been visited and packaged into a pseudo-node.
1405 /// Distributing mass in a loop is a reverse-post-order traversal through
1406 /// the loop. Start by assigning full mass to the Loop header. For each
1407 /// node in the loop:
1409 /// - Fetch and categorize the weight distribution for its successors.
1410 /// If this is a packaged-subloop, the weight distribution is stored
1411 /// in \a PackagedLoopData::Exits. Otherwise, fetch it from
1412 /// BranchProbabilityInfo.
1414 /// - Each successor is categorized as \a Weight::Local, a normal
1415 /// forward edge within the current loop, \a Weight::Backedge, a
1416 /// backedge to the loop header, or \a Weight::Exit, any successor
1417 /// outside the loop. The weight, the successor, and its category
1418 /// are stored in \a Distribution. There can be multiple edges to
1421 /// - Normalize the distribution: scale weights down so that their sum
1422 /// is 32-bits, and coalesce multiple edges to the same node.
1424 /// - Distribute the mass accordingly, dithering to minimize mass loss,
1425 /// as described in \a distributeMass(). Mass is distributed in
1426 /// parallel in two ways: forward, and general. Local successors
1427 /// take their mass from the forward mass, while exit and backedge
1428 /// successors take their mass from the general mass. Additionally,
1429 /// exit edges use up (ignored) mass from the forward mass, and local
1430 /// edges use up (ignored) mass from the general distribution.
1432 /// Finally, calculate the loop scale from the accumulated backedge mass.
1434 /// 3. Distribute mass in the function (\a computeMassInFunction()).
1436 /// Finally, distribute mass through the DAG resulting from packaging all
1437 /// loops in the function. This uses the same algorithm as distributing
1438 /// mass in a loop, except that there are no exit or backedge edges.
1440 /// 4. Loop unpackaging and cleanup (\a finalizeMetrics()).
1442 /// Initialize the frequency to a floating point representation of its
1445 /// Visit loops top-down (reverse post-order), scaling the loop header's
1446 /// frequency by its psuedo-node's mass and loop scale. Keep track of the
1447 /// minimum and maximum final frequencies.
1449 /// Using the min and max frequencies as a guide, translate floating point
1450 /// frequencies to an appropriate range in uint64_t.
1452 /// It has some known flaws.
1454 /// - Irreducible control flow isn't modelled correctly. In particular,
1455 /// LoopInfo and MachineLoopInfo ignore irreducible backedges. The main
1456 /// result is that irreducible SCCs will under-scaled. No mass is lost,
1457 /// but the computed branch weights for the loop pseudo-node will be
1460 /// Modelling irreducible control flow exactly involves setting up and
1461 /// solving a group of infinite geometric series. Such precision is
1462 /// unlikely to be worthwhile, since most of our algorithms give up on
1463 /// irreducible control flow anyway.
1465 /// Nevertheless, we might find that we need to get closer. If
1466 /// LoopInfo/MachineLoopInfo flags loops with irreducible control flow
1467 /// (and/or the function as a whole), we can find the SCCs, compute an
1468 /// approximate exit frequency for the SCC as a whole, and scale up
1471 /// - Loop scale is limited to 4096 per loop (2^12) to avoid exhausting
1472 /// BlockFrequency's 64-bit integer precision.
1473 template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
1474 typedef typename bfi_detail::TypeMap<BT>::BlockT BlockT;
1475 typedef typename bfi_detail::TypeMap<BT>::FunctionT FunctionT;
1476 typedef typename bfi_detail::TypeMap<BT>::BranchProbabilityInfoT
1477 BranchProbabilityInfoT;
1478 typedef typename bfi_detail::TypeMap<BT>::LoopT LoopT;
1479 typedef typename bfi_detail::TypeMap<BT>::LoopInfoT LoopInfoT;
1481 typedef GraphTraits<const BlockT *> Successor;
1482 typedef GraphTraits<Inverse<const BlockT *>> Predecessor;
1484 const BranchProbabilityInfoT *BPI;
1485 const LoopInfoT *LI;
1488 // All blocks in reverse postorder.
1489 std::vector<const BlockT *> RPOT;
1490 DenseMap<const BlockT *, BlockNode> Nodes;
1492 typedef typename std::vector<const BlockT *>::const_iterator rpot_iterator;
1494 rpot_iterator rpot_begin() const { return RPOT.begin(); }
1495 rpot_iterator rpot_end() const { return RPOT.end(); }
1497 size_t getIndex(const rpot_iterator &I) const { return I - rpot_begin(); }
1499 BlockNode getNode(const rpot_iterator &I) const {
1500 return BlockNode(getIndex(I));
1502 BlockNode getNode(const BlockT *BB) const { return Nodes.lookup(BB); }
1504 const BlockT *getBlock(const BlockNode &Node) const {
1505 return RPOT[Node.Index];
1508 void initializeRPOT();
1509 void initializeLoops();
1510 void runOnFunction(const FunctionT *F);
1512 void propagateMassToSuccessors(const BlockNode &LoopHead,
1513 const BlockNode &Node);
1514 void computeMassInLoops();
1515 void computeMassInLoop(const BlockNode &LoopHead);
1516 void computeMassInFunction();
1518 std::string getBlockName(const BlockNode &Node) const override {
1519 return bfi_detail::getBlockName(getBlock(Node));
1523 const FunctionT *getFunction() const { return F; }
1525 void doFunction(const FunctionT *F, const BranchProbabilityInfoT *BPI,
1526 const LoopInfoT *LI);
1527 BlockFrequencyInfoImpl() : BPI(0), LI(0), F(0) {}
1529 using BlockFrequencyInfoImplBase::getEntryFreq;
1530 BlockFrequency getBlockFreq(const BlockT *BB) const {
1531 return BlockFrequencyInfoImplBase::getBlockFreq(getNode(BB));
1533 Float getFloatingBlockFreq(const BlockT *BB) const {
1534 return BlockFrequencyInfoImplBase::getFloatingBlockFreq(getNode(BB));
1537 /// \brief Print the frequencies for the current function.
1539 /// Prints the frequencies for the blocks in the current function.
1541 /// Blocks are printed in the natural iteration order of the function, rather
1542 /// than reverse post-order. This provides two advantages: writing -analyze
1543 /// tests is easier (since blocks come out in source order), and even
1544 /// unreachable blocks are printed.
1545 raw_ostream &print(raw_ostream &OS) const override;
1546 using BlockFrequencyInfoImplBase::dump;
1548 using BlockFrequencyInfoImplBase::printBlockFreq;
1549 raw_ostream &printBlockFreq(raw_ostream &OS, const BlockT *BB) const {
1550 return BlockFrequencyInfoImplBase::printBlockFreq(OS, getNode(BB));
1555 void BlockFrequencyInfoImpl<BT>::doFunction(const FunctionT *F,
1556 const BranchProbabilityInfoT *BPI,
1557 const LoopInfoT *LI) {
1558 // Save the parameters.
1563 // Clean up left-over data structures.
1564 BlockFrequencyInfoImplBase::clear();
1569 DEBUG(dbgs() << "\nblock-frequency: " << F->getName() << "\n================="
1570 << std::string(F->getName().size(), '=') << "\n");
1574 // Visit loops in post-order to find thelocal mass distribution, and then do
1575 // the full function.
1576 computeMassInLoops();
1577 computeMassInFunction();
1581 template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
1582 const BlockT *Entry = F->begin();
1583 RPOT.reserve(F->size());
1584 std::copy(po_begin(Entry), po_end(Entry), std::back_inserter(RPOT));
1585 std::reverse(RPOT.begin(), RPOT.end());
1587 assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
1588 "More nodes in function than Block Frequency Info supports");
1590 DEBUG(dbgs() << "reverse-post-order-traversal\n");
1591 for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
1592 BlockNode Node = getNode(I);
1593 DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node) << "\n");
1597 Working.resize(RPOT.size());
1598 Freqs.resize(RPOT.size());
1601 template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
1602 DEBUG(dbgs() << "loop-detection\n");
1606 // Visit loops top down and assign them an index.
1607 std::deque<const LoopT *> Q;
1608 Q.insert(Q.end(), LI->begin(), LI->end());
1609 while (!Q.empty()) {
1610 const LoopT *Loop = Q.front();
1612 Q.insert(Q.end(), Loop->begin(), Loop->end());
1614 // Save the order this loop was visited.
1615 BlockNode Header = getNode(Loop->getHeader());
1616 assert(Header.isValid());
1618 Working[Header.Index].LoopIndex = PackagedLoops.size();
1619 PackagedLoops.emplace_back(Header);
1620 DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
1623 // Visit nodes in reverse post-order and add them to their deepest containing
1625 for (size_t Index = 0; Index < RPOT.size(); ++Index) {
1626 const LoopT *Loop = LI->getLoopFor(RPOT[Index]);
1630 // If this is a loop header, find its parent loop (if any).
1631 if (Working[Index].isLoopHeader())
1632 if (!(Loop = Loop->getParentLoop()))
1635 // Add this node to its containing loop's member list.
1636 BlockNode Header = getNode(Loop->getHeader());
1637 assert(Header.isValid());
1638 const auto &HeaderData = Working[Header.Index];
1639 assert(HeaderData.isLoopHeader());
1641 Working[Index].ContainingLoop = Header;
1642 PackagedLoops[HeaderData.LoopIndex].Members.push_back(Index);
1643 DEBUG(dbgs() << " - loop = " << getBlockName(Header)
1644 << ": member = " << getBlockName(Index) << "\n");
1648 template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInLoops() {
1649 // Visit loops with the deepest first, and the top-level loops last.
1650 for (auto L = PackagedLoops.rbegin(), LE = PackagedLoops.rend(); L != LE; ++L)
1651 computeMassInLoop(L->Header);
1655 void BlockFrequencyInfoImpl<BT>::computeMassInLoop(const BlockNode &LoopHead) {
1656 // Compute mass in loop.
1657 DEBUG(dbgs() << "compute-mass-in-loop: " << getBlockName(LoopHead) << "\n");
1659 Working[LoopHead.Index].Mass = BlockMass::getFull();
1660 propagateMassToSuccessors(LoopHead, LoopHead);
1662 for (const BlockNode &M : getLoopPackage(LoopHead).Members)
1663 propagateMassToSuccessors(LoopHead, M);
1665 computeLoopScale(LoopHead);
1666 packageLoop(LoopHead);
1669 template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInFunction() {
1670 // Compute mass in function.
1671 DEBUG(dbgs() << "compute-mass-in-function\n");
1672 Working[0].Mass = BlockMass::getFull();
1673 for (rpot_iterator I = rpot_begin(), IE = rpot_end(); I != IE; ++I) {
1674 // Check for nodes that have been packaged.
1675 BlockNode Node = getNode(I);
1676 if (Working[Node.Index].hasLoopHeader())
1679 propagateMassToSuccessors(BlockNode(), Node);
1685 BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(const BlockNode &LoopHead,
1686 const BlockNode &Node) {
1687 DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
1688 // Calculate probability for successors.
1690 if (Node != LoopHead && Working[Node.Index].isLoopHeader())
1691 addLoopSuccessorsToDist(LoopHead, Node, Dist);
1693 const BlockT *BB = getBlock(Node);
1694 for (auto SI = Successor::child_begin(BB), SE = Successor::child_end(BB);
1696 // Do not dereference SI, or getEdgeWeight() is linear in the number of
1698 addToDist(Dist, LoopHead, Node, getNode(*SI), BPI->getEdgeWeight(BB, SI));
1701 // Distribute mass to successors, saving exit and backedge data in the
1703 distributeMass(Node, LoopHead, Dist);
1707 raw_ostream &BlockFrequencyInfoImpl<BT>::print(raw_ostream &OS) const {
1710 OS << "block-frequency-info: " << F->getName() << "\n";
1711 for (const BlockT &BB : *F)
1712 OS << " - " << bfi_detail::getBlockName(&BB)
1713 << ": float = " << getFloatingBlockFreq(&BB)
1714 << ", int = " << getBlockFreq(&BB).getFrequency() << "\n";
1716 // Add an extra newline for readability.