X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FAnalysis%2FBlockFrequencyInfoImpl.cpp;h=903a263a65fee860def7cd346f5c107524fecbf0;hp=e7424aebd7f41a4e2d8795b07289669bcf410dd6;hb=965f2c2437c272146c447eb1775c65abd3d4bb57;hpb=9a11d668f97d1712d6e24acc96a0603e0855fc80 diff --git a/lib/Analysis/BlockFrequencyInfoImpl.cpp b/lib/Analysis/BlockFrequencyInfoImpl.cpp index e7424aebd7f..903a263a65f 100644 --- a/lib/Analysis/BlockFrequencyInfoImpl.cpp +++ b/lib/Analysis/BlockFrequencyInfoImpl.cpp @@ -11,334 +11,20 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "block-freq" #include "llvm/Analysis/BlockFrequencyInfoImpl.h" -#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/SCCIterator.h" #include "llvm/Support/raw_ostream.h" -#include +#include using namespace llvm; +using namespace llvm::bfi_detail; -//===----------------------------------------------------------------------===// -// -// PositiveFloat implementation. -// -//===----------------------------------------------------------------------===// -#ifndef _MSC_VER -const int32_t PositiveFloatBase::MaxExponent; -const int32_t PositiveFloatBase::MinExponent; -#endif - -static void appendDigit(std::string &Str, unsigned D) { - assert(D < 10); - Str += '0' + D % 10; -} - -static void appendNumber(std::string &Str, uint64_t N) { - while (N) { - appendDigit(Str, N % 10); - N /= 10; - } -} - -static bool doesRoundUp(char Digit) { - switch (Digit) { - case '5': - case '6': - case '7': - case '8': - case '9': - return true; - default: - return false; - } -} - -static std::string toStringAPFloat(uint64_t D, int E, unsigned Precision) { - assert(E >= PositiveFloatBase::MinExponent); - assert(E <= PositiveFloatBase::MaxExponent); - - // Find a new E, but don't let it increase past MaxExponent. - int LeadingZeros = PositiveFloatBase::countLeadingZeros64(D); - int NewE = std::min(PositiveFloatBase::MaxExponent, E + 63 - LeadingZeros); - int Shift = 63 - (NewE - E); - assert(Shift <= LeadingZeros); - assert(Shift == LeadingZeros || NewE == PositiveFloatBase::MaxExponent); - D <<= Shift; - E = NewE; - - // Check for a denormal. - unsigned AdjustedE = E + 16383; - if (!(D >> 63)) { - assert(E == PositiveFloatBase::MaxExponent); - AdjustedE = 0; - } - - // Build the float and print it. - uint64_t RawBits[2] = {D, AdjustedE}; - APFloat Float(APFloat::x87DoubleExtended, APInt(80, RawBits)); - SmallVector Chars; - Float.toString(Chars, Precision, 0); - return std::string(Chars.begin(), Chars.end()); -} - -static std::string stripTrailingZeros(const std::string &Float) { - size_t NonZero = Float.find_last_not_of('0'); - assert(NonZero != std::string::npos && "no . in floating point string"); - - if (Float[NonZero] == '.') - ++NonZero; - - return Float.substr(0, NonZero + 1); -} - -std::string PositiveFloatBase::toString(uint64_t D, int16_t E, int Width, - unsigned Precision) { - if (!D) - return "0.0"; - - // Canonicalize exponent and digits. - uint64_t Above0 = 0; - uint64_t Below0 = 0; - uint64_t Extra = 0; - int ExtraShift = 0; - if (E == 0) { - Above0 = D; - } else if (E > 0) { - if (int Shift = std::min(int16_t(countLeadingZeros64(D)), E)) { - D <<= Shift; - E -= Shift; - - if (!E) - Above0 = D; - } - } else if (E > -64) { - Above0 = D >> -E; - Below0 = D << (64 + E); - } else if (E > -120) { - Below0 = D >> (-E - 64); - Extra = D << (128 + E); - ExtraShift = -64 - E; - } - - // Fall back on APFloat for very small and very large numbers. - if (!Above0 && !Below0) - return toStringAPFloat(D, E, Precision); - - // Append the digits before the decimal. - std::string Str; - size_t DigitsOut = 0; - if (Above0) { - appendNumber(Str, Above0); - DigitsOut = Str.size(); - } else - appendDigit(Str, 0); - std::reverse(Str.begin(), Str.end()); - - // Return early if there's nothing after the decimal. - if (!Below0) - return Str + ".0"; - - // Append the decimal and beyond. - Str += '.'; - uint64_t Error = UINT64_C(1) << (64 - Width); - - // We need to shift Below0 to the right to make space for calculating - // digits. Save the precision we're losing in Extra. - Extra = (Below0 & 0xf) << 56 | (Extra >> 8); - Below0 >>= 4; - size_t SinceDot = 0; - size_t AfterDot = Str.size(); - do { - if (ExtraShift) { - --ExtraShift; - Error *= 5; - } else - Error *= 10; - - Below0 *= 10; - Extra *= 10; - Below0 += (Extra >> 60); - Extra = Extra & (UINT64_MAX >> 4); - appendDigit(Str, Below0 >> 60); - Below0 = Below0 & (UINT64_MAX >> 4); - if (DigitsOut || Str.back() != '0') - ++DigitsOut; - ++SinceDot; - } while (Error && (Below0 << 4 | Extra >> 60) >= Error / 2 && - (!Precision || DigitsOut <= Precision || SinceDot < 2)); - - // Return early for maximum precision. - if (!Precision || DigitsOut <= Precision) - return stripTrailingZeros(Str); - - // Find where to truncate. - size_t Truncate = - std::max(Str.size() - (DigitsOut - Precision), AfterDot + 1); - - // Check if there's anything to truncate. - if (Truncate >= Str.size()) - return stripTrailingZeros(Str); - - bool Carry = doesRoundUp(Str[Truncate]); - if (!Carry) - return stripTrailingZeros(Str.substr(0, Truncate)); - - // Round with the first truncated digit. - for (std::string::reverse_iterator I(Str.begin() + Truncate), E = Str.rend(); - I != E; ++I) { - if (*I == '.') - continue; - if (*I == '9') { - *I = '0'; - continue; - } - - ++*I; - Carry = false; - break; - } - - // Add "1" in front if we still need to carry. - return stripTrailingZeros(std::string(Carry, '1') + Str.substr(0, Truncate)); -} - -raw_ostream &PositiveFloatBase::print(raw_ostream &OS, uint64_t D, int16_t E, - int Width, unsigned Precision) { - return OS << toString(D, E, Width, Precision); -} - -void PositiveFloatBase::dump(uint64_t D, int16_t E, int Width) { - print(dbgs(), D, E, Width, 0) << "[" << Width << ":" << D << "*2^" << E - << "]"; -} - -static std::pair -getRoundedFloat(uint64_t N, bool ShouldRound, int64_t Shift) { - if (ShouldRound) - if (!++N) - // Rounding caused an overflow. - return std::make_pair(UINT64_C(1), Shift + 64); - return std::make_pair(N, Shift); -} - -std::pair PositiveFloatBase::divide64(uint64_t Dividend, - uint64_t Divisor) { - // Input should be sanitized. - assert(Divisor); - assert(Dividend); - - // Minimize size of divisor. - int16_t Shift = 0; - if (int Zeros = countTrailingZeros(Divisor)) { - Shift -= Zeros; - Divisor >>= Zeros; - } - - // Check for powers of two. - if (Divisor == 1) - return std::make_pair(Dividend, Shift); - - // Maximize size of dividend. - if (int Zeros = countLeadingZeros64(Dividend)) { - Shift -= Zeros; - Dividend <<= Zeros; - } - - // Start with the result of a divide. - uint64_t Quotient = Dividend / Divisor; - Dividend %= Divisor; - - // Continue building the quotient with long division. - // - // TODO: continue with largers digits. - while (!(Quotient >> 63) && Dividend) { - // Shift Dividend, and check for overflow. - bool IsOverflow = Dividend >> 63; - Dividend <<= 1; - --Shift; - - // Divide. - bool DoesDivide = IsOverflow || Divisor <= Dividend; - Quotient = (Quotient << 1) | uint64_t(DoesDivide); - Dividend -= DoesDivide ? Divisor : 0; - } - - // Round. - if (Dividend >= getHalf(Divisor)) - if (!++Quotient) - // Rounding caused an overflow in Quotient. - return std::make_pair(UINT64_C(1), Shift + 64); - - return getRoundedFloat(Quotient, Dividend >= getHalf(Divisor), Shift); -} - -std::pair PositiveFloatBase::multiply64(uint64_t L, - uint64_t R) { - // Separate into two 32-bit digits (U.L). - uint64_t UL = L >> 32, LL = L & UINT32_MAX, UR = R >> 32, LR = R & UINT32_MAX; - - // Compute cross products. - uint64_t P1 = UL * UR, P2 = UL * LR, P3 = LL * UR, P4 = LL * LR; - - // Sum into two 64-bit digits. - uint64_t Upper = P1, Lower = P4; - auto addWithCarry = [&](uint64_t N) { - uint64_t NewLower = Lower + (N << 32); - Upper += (N >> 32) + (NewLower < Lower); - Lower = NewLower; - }; - addWithCarry(P2); - addWithCarry(P3); - - // Check whether the upper digit is empty. - if (!Upper) - return std::make_pair(Lower, 0); - - // Shift as little as possible to maximize precision. - unsigned LeadingZeros = countLeadingZeros64(Upper); - int16_t Shift = 64 - LeadingZeros; - if (LeadingZeros) - Upper = Upper << LeadingZeros | Lower >> Shift; - bool ShouldRound = Shift && (Lower & UINT64_C(1) << (Shift - 1)); - return getRoundedFloat(Upper, ShouldRound, Shift); -} - -//===----------------------------------------------------------------------===// -// -// BlockMass implementation. -// -//===----------------------------------------------------------------------===// -BlockMass &BlockMass::operator*=(const BranchProbability &P) { - uint32_t N = P.getNumerator(), D = P.getDenominator(); - assert(D && "divide by 0"); - assert(N <= D && "fraction greater than 1"); - - // Fast path for multiplying by 1.0. - if (!Mass || N == D) - return *this; - - // Get as much precision as we can. - int Shift = countLeadingZeros(Mass); - uint64_t ShiftedQuotient = (Mass << Shift) / D; - uint64_t Product = ShiftedQuotient * N >> Shift; - - // Now check for what's lost. - uint64_t Left = ShiftedQuotient * (D - N) >> Shift; - uint64_t Lost = Mass - Product - Left; - - // TODO: prove this assertion. - assert(Lost <= UINT32_MAX); - - // Take the product plus a portion of the spoils. - Mass = Product + Lost * N / D; - return *this; -} +#define DEBUG_TYPE "block-freq" -PositiveFloat BlockMass::toFloat() const { +ScaledNumber BlockMass::toScaled() const { if (isFull()) - return PositiveFloat(1, 0); - return PositiveFloat(getMass() + 1, -64); + return ScaledNumber(1, 0); + return ScaledNumber(getMass() + 1, -64); } void BlockMass::dump() const { print(dbgs()); } @@ -355,18 +41,13 @@ raw_ostream &BlockMass::print(raw_ostream &OS) const { return OS; } -//===----------------------------------------------------------------------===// -// -// BlockFrequencyInfoImpl implementation. -// -//===----------------------------------------------------------------------===// namespace { typedef BlockFrequencyInfoImplBase::BlockNode BlockNode; typedef BlockFrequencyInfoImplBase::Distribution Distribution; typedef BlockFrequencyInfoImplBase::Distribution::WeightList WeightList; -typedef BlockFrequencyInfoImplBase::Float Float; -typedef BlockFrequencyInfoImplBase::PackagedLoopData PackagedLoopData; +typedef BlockFrequencyInfoImplBase::Scaled64 Scaled64; +typedef BlockFrequencyInfoImplBase::LoopData LoopData; typedef BlockFrequencyInfoImplBase::Weight Weight; typedef BlockFrequencyInfoImplBase::FrequencyData FrequencyData; @@ -388,55 +69,24 @@ typedef BlockFrequencyInfoImplBase::FrequencyData FrequencyData; /// 2. Calculate the portion's mass as \a RemMass times P. /// 3. Update \a RemWeight and \a RemMass at each portion by subtracting /// the current portion's weight and mass. -/// -/// Mass is distributed in two ways: full distribution and forward -/// distribution. The latter ignores backedges, and uses the parallel fields -/// \a RemForwardWeight and \a RemForwardMass. struct DitheringDistributer { uint32_t RemWeight; - uint32_t RemForwardWeight; - BlockMass RemMass; - BlockMass RemForwardMass; DitheringDistributer(Distribution &Dist, const BlockMass &Mass); - BlockMass takeLocalMass(uint32_t Weight) { - (void)takeMass(Weight); - return takeForwardMass(Weight); - } - BlockMass takeExitMass(uint32_t Weight) { - (void)takeForwardMass(Weight); - return takeMass(Weight); - } - BlockMass takeBackedgeMass(uint32_t Weight) { return takeMass(Weight); } - -private: - BlockMass takeForwardMass(uint32_t Weight); BlockMass takeMass(uint32_t Weight); }; -} + +} // end namespace DitheringDistributer::DitheringDistributer(Distribution &Dist, const BlockMass &Mass) { Dist.normalize(); RemWeight = Dist.Total; - RemForwardWeight = Dist.ForwardTotal; RemMass = Mass; - RemForwardMass = Dist.ForwardTotal ? Mass : BlockMass(); } -BlockMass DitheringDistributer::takeForwardMass(uint32_t Weight) { - // Compute the amount of mass to take. - assert(Weight && "invalid weight"); - assert(Weight <= RemForwardWeight); - BlockMass Mass = RemForwardMass * BranchProbability(Weight, RemForwardWeight); - - // Decrement totals (dither). - RemForwardWeight -= Weight; - RemForwardMass -= Mass; - return Mass; -} BlockMass DitheringDistributer::takeMass(uint32_t Weight) { assert(Weight && "invalid weight"); assert(Weight <= RemWeight); @@ -462,18 +112,7 @@ void Distribution::add(const BlockNode &Node, uint64_t Amount, Total = NewTotal; // Save the weight. - Weight W; - W.TargetNode = Node; - W.Amount = Amount; - W.Type = Type; - Weights.push_back(W); - - if (Type == Weight::Backedge) - return; - - // Update forward total. Don't worry about overflow here, since then Total - // will exceed 32-bits and they'll both be recomputed in normalize(). - ForwardTotal += Amount; + Weights.push_back(Weight(Type, Node, Amount)); } static void combineWeight(Weight &W, const Weight &OtherW) { @@ -484,8 +123,12 @@ static void combineWeight(Weight &W, const Weight &OtherW) { } assert(W.Type == OtherW.Type); assert(W.TargetNode == OtherW.TargetNode); - assert(W.Amount < W.Amount + OtherW.Amount); - W.Amount += OtherW.Amount; + assert(OtherW.Amount && "Expected non-zero weight"); + if (W.Amount > W.Amount + OtherW.Amount) + // Saturate on overflow. + W.Amount = UINT64_MAX; + else + W.Amount += OtherW.Amount; } static void combineWeightsBySorting(WeightList &Weights) { // Sort so edges to the same node are adjacent. @@ -553,7 +196,6 @@ void Distribution::normalize() { // Early exit when combined into a single successor. if (Weights.size() == 1) { Total = 1; - ForwardTotal = Weights.front().Type != Weight::Backedge; Weights.front().Amount = 1; return; } @@ -569,13 +211,20 @@ void Distribution::normalize() { Shift = 33 - countLeadingZeros(Total); // Early exit if nothing needs to be scaled. - if (!Shift) + if (!Shift) { + // If we didn't overflow then combineWeights() shouldn't have changed the + // sum of the weights, but let's double-check. + assert(Total == std::accumulate(Weights.begin(), Weights.end(), UINT64_C(0), + [](uint64_t Sum, const Weight &W) { + return Sum + W.Amount; + }) && + "Expected total to be correct"); return; + } // Recompute the total through accumulation (rather than shifting it) so that - // it's accurate after shifting. ForwardTotal is dirty here anyway. + // it's accurate after shifting and any changes combineWeights() made above. Total = 0; - ForwardTotal = 0; // Sum the weights to each node and shift right if necessary. for (Weight &W : Weights) { @@ -587,17 +236,16 @@ void Distribution::normalize() { // Update the total. Total += W.Amount; - if (W.Type == Weight::Backedge) - continue; - - // Update the forward total. - ForwardTotal += W.Amount; } assert(Total <= UINT32_MAX); } void BlockFrequencyInfoImplBase::clear() { - *this = BlockFrequencyInfoImplBase(); + // Swap with a default-constructed std::vector, since std::vector<>::clear() + // does not actually clear heap storage. + std::vector().swap(Freqs); + std::vector().swap(Working); + Loops.clear(); } /// \brief Clear all memory not needed downstream. @@ -609,57 +257,25 @@ static void cleanup(BlockFrequencyInfoImplBase &BFI) { BFI.Freqs = std::move(SavedFreqs); } -/// \brief Get a possibly packaged node. -/// -/// Get the node currently representing Node, which could be a containing -/// loop. -/// -/// This function should only be called when distributing mass. As long as -/// there are no irreducilbe edges to Node, then it will have complexity O(1) -/// in this context. -/// -/// In general, the complexity is O(L), where L is the number of loop headers -/// Node has been packaged into. Since this method is called in the context -/// of distributing mass, L will be the number of loop headers an early exit -/// edge jumps out of. -static BlockNode getPackagedNode(const BlockFrequencyInfoImplBase &BFI, - const BlockNode &Node) { - assert(Node.isValid()); - if (!BFI.Working[Node.Index].IsPackaged) - return Node; - if (!BFI.Working[Node.Index].ContainingLoop.isValid()) - return Node; - return getPackagedNode(BFI, BFI.Working[Node.Index].ContainingLoop); -} - -/// \brief Get the appropriate mass for a possible pseudo-node loop package. -/// -/// Get appropriate mass for Node. If Node is a loop-header (whose loop has -/// been packaged), returns the mass of its pseudo-node. If it's a node inside -/// a packaged loop, it returns the loop's pseudo-node. -static BlockMass &getPackageMass(BlockFrequencyInfoImplBase &BFI, - const BlockNode &Node) { - assert(Node.isValid()); - assert(!BFI.Working[Node.Index].IsPackaged); - if (!BFI.Working[Node.Index].IsAPackage) - return BFI.Working[Node.Index].Mass; - - return BFI.getLoopPackage(Node).Mass; -} - -void BlockFrequencyInfoImplBase::addToDist(Distribution &Dist, - const BlockNode &LoopHead, +bool BlockFrequencyInfoImplBase::addToDist(Distribution &Dist, + const LoopData *OuterLoop, const BlockNode &Pred, const BlockNode &Succ, uint64_t Weight) { if (!Weight) Weight = 1; + auto isLoopHeader = [&OuterLoop](const BlockNode &Node) { + return OuterLoop && OuterLoop->isHeader(Node); + }; + + BlockNode Resolved = Working[Succ.Index].getResolvedNode(); + #ifndef NDEBUG - auto debugSuccessor = [&](const char *Type, const BlockNode &Resolved) { + auto debugSuccessor = [&](const char *Type) { dbgs() << " =>" << " [" << Type << "] weight = " << Weight; - if (Succ != LoopHead) + if (!isLoopHeader(Resolved)) dbgs() << ", succ = " << getBlockName(Succ); if (Resolved != Succ) dbgs() << ", resolved = " << getBlockName(Resolved); @@ -668,224 +284,227 @@ void BlockFrequencyInfoImplBase::addToDist(Distribution &Dist, (void)debugSuccessor; #endif - if (Succ == LoopHead) { - DEBUG(debugSuccessor("backedge", Succ)); - Dist.addBackedge(LoopHead, Weight); - return; + if (isLoopHeader(Resolved)) { + DEBUG(debugSuccessor("backedge")); + Dist.addBackedge(Resolved, Weight); + return true; } - BlockNode Resolved = getPackagedNode(*this, Succ); - assert(Resolved != LoopHead); - if (Working[Resolved.Index].ContainingLoop != LoopHead) { - DEBUG(debugSuccessor(" exit ", Resolved)); + if (Working[Resolved.Index].getContainingLoop() != OuterLoop) { + DEBUG(debugSuccessor(" exit ")); Dist.addExit(Resolved, Weight); - return; + return true; } - if (!LoopHead.isValid() && Resolved < Pred) { - // Irreducible backedge. Skip this edge in the distribution. - DEBUG(debugSuccessor("skipped ", Resolved)); - return; + if (Resolved < Pred) { + if (!isLoopHeader(Pred)) { + // If OuterLoop is an irreducible loop, we can't actually handle this. + assert((!OuterLoop || !OuterLoop->isIrreducible()) && + "unhandled irreducible control flow"); + + // Irreducible backedge. Abort. + DEBUG(debugSuccessor("abort!!!")); + return false; + } + + // If "Pred" is a loop header, then this isn't really a backedge; rather, + // OuterLoop must be irreducible. These false backedges can come only from + // secondary loop headers. + assert(OuterLoop && OuterLoop->isIrreducible() && !isLoopHeader(Resolved) && + "unhandled irreducible control flow"); } - DEBUG(debugSuccessor(" local ", Resolved)); + DEBUG(debugSuccessor(" local ")); Dist.addLocal(Resolved, Weight); + return true; } -void BlockFrequencyInfoImplBase::addLoopSuccessorsToDist( - const BlockNode &LoopHead, const BlockNode &LocalLoopHead, - Distribution &Dist) { - PackagedLoopData &LoopPackage = getLoopPackage(LocalLoopHead); - const PackagedLoopData::ExitMap &Exits = LoopPackage.Exits; - +bool BlockFrequencyInfoImplBase::addLoopSuccessorsToDist( + const LoopData *OuterLoop, LoopData &Loop, Distribution &Dist) { // Copy the exit map into Dist. - for (const auto &I : Exits) - addToDist(Dist, LoopHead, LocalLoopHead, I.first, I.second.getMass()); + for (const auto &I : Loop.Exits) + if (!addToDist(Dist, OuterLoop, Loop.getHeader(), I.first, + I.second.getMass())) + // Irreducible backedge. + return false; - // We don't need this map any more. Clear it to prevent quadratic memory - // usage in deeply nested loops with irreducible control flow. - LoopPackage.Exits.clear(); + return true; } -/// \brief Get the maximum allowed loop scale. -/// -/// Gives the maximum number of estimated iterations allowed for a loop. -/// Downstream users have trouble with very large numbers (even within -/// 64-bits). Perhaps they can be changed to use PositiveFloat. -/// -/// TODO: change downstream users so that this can be increased or removed. -static Float getMaxLoopScale() { return Float(1, 12); } - /// \brief Compute the loop scale for a loop. -void BlockFrequencyInfoImplBase::computeLoopScale(const BlockNode &LoopHead) { +void BlockFrequencyInfoImplBase::computeLoopScale(LoopData &Loop) { // Compute loop scale. - DEBUG(dbgs() << "compute-loop-scale: " << getBlockName(LoopHead) << "\n"); + DEBUG(dbgs() << "compute-loop-scale: " << getLoopName(Loop) << "\n"); + + // Infinite loops need special handling. If we give the back edge an infinite + // mass, they may saturate all the other scales in the function down to 1, + // making all the other region temperatures look exactly the same. Choose an + // arbitrary scale to avoid these issues. + // + // FIXME: An alternate way would be to select a symbolic scale which is later + // replaced to be the maximum of all computed scales plus 1. This would + // appropriately describe the loop as having a large scale, without skewing + // the final frequency computation. + const Scaled64 InifiniteLoopScale(1, 12); // LoopScale == 1 / ExitMass // ExitMass == HeadMass - BackedgeMass - PackagedLoopData &LoopPackage = getLoopPackage(LoopHead); - BlockMass ExitMass = BlockMass::getFull() - LoopPackage.BackedgeMass; + BlockMass TotalBackedgeMass; + for (auto &Mass : Loop.BackedgeMass) + TotalBackedgeMass += Mass; + BlockMass ExitMass = BlockMass::getFull() - TotalBackedgeMass; - // Block scale stores the inverse of the scale. - LoopPackage.Scale = ExitMass.toFloat().inverse(); + // Block scale stores the inverse of the scale. If this is an infinite loop, + // its exit mass will be zero. In this case, use an arbitrary scale for the + // loop scale. + Loop.Scale = + ExitMass.isEmpty() ? InifiniteLoopScale : ExitMass.toScaled().inverse(); DEBUG(dbgs() << " - exit-mass = " << ExitMass << " (" << BlockMass::getFull() - << " - " << LoopPackage.BackedgeMass << ")\n" - << " - scale = " << LoopPackage.Scale << "\n"); - - if (LoopPackage.Scale > getMaxLoopScale()) { - LoopPackage.Scale = getMaxLoopScale(); - DEBUG(dbgs() << " - reduced-to-max-scale: " << getMaxLoopScale() << "\n"); - } + << " - " << TotalBackedgeMass << ")\n" + << " - scale = " << Loop.Scale << "\n"); } /// \brief Package up a loop. -void BlockFrequencyInfoImplBase::packageLoop(const BlockNode &LoopHead) { - DEBUG(dbgs() << "packaging-loop: " << getBlockName(LoopHead) << "\n"); - Working[LoopHead.Index].IsAPackage = true; - for (const BlockNode &M : getLoopPackage(LoopHead).Members) { +void BlockFrequencyInfoImplBase::packageLoop(LoopData &Loop) { + DEBUG(dbgs() << "packaging-loop: " << getLoopName(Loop) << "\n"); + + // Clear the subloop exits to prevent quadratic memory usage. + for (const BlockNode &M : Loop.Nodes) { + if (auto *Loop = Working[M.Index].getPackagedLoop()) + Loop->Exits.clear(); DEBUG(dbgs() << " - node: " << getBlockName(M.Index) << "\n"); - Working[M.Index].IsPackaged = true; } + Loop.IsPackaged = true; } +#ifndef NDEBUG +static void debugAssign(const BlockFrequencyInfoImplBase &BFI, + const DitheringDistributer &D, const BlockNode &T, + const BlockMass &M, const char *Desc) { + dbgs() << " => assign " << M << " (" << D.RemMass << ")"; + if (Desc) + dbgs() << " [" << Desc << "]"; + if (T.isValid()) + dbgs() << " to " << BFI.getBlockName(T); + dbgs() << "\n"; +} +#endif + void BlockFrequencyInfoImplBase::distributeMass(const BlockNode &Source, - const BlockNode &LoopHead, + LoopData *OuterLoop, Distribution &Dist) { - BlockMass Mass = getPackageMass(*this, Source); - DEBUG(dbgs() << " => mass: " << Mass - << " ( general | forward )\n"); + BlockMass Mass = Working[Source.Index].getMass(); + DEBUG(dbgs() << " => mass: " << Mass << "\n"); // Distribute mass to successors as laid out in Dist. DitheringDistributer D(Dist, Mass); -#ifndef NDEBUG - auto debugAssign = [&](const BlockNode &T, const BlockMass &M, - const char *Desc) { - dbgs() << " => assign " << M << " (" << D.RemMass << "|" - << D.RemForwardMass << ")"; - if (Desc) - dbgs() << " [" << Desc << "]"; - if (T.isValid()) - dbgs() << " to " << getBlockName(T); - dbgs() << "\n"; - }; - (void)debugAssign; -#endif - - PackagedLoopData *LoopPackage = 0; - if (LoopHead.isValid()) - LoopPackage = &getLoopPackage(LoopHead); for (const Weight &W : Dist.Weights) { - // Check for a local edge (forward and non-exit). + // Check for a local edge (non-backedge and non-exit). + BlockMass Taken = D.takeMass(W.Amount); if (W.Type == Weight::Local) { - BlockMass Local = D.takeLocalMass(W.Amount); - getPackageMass(*this, W.TargetNode) += Local; - DEBUG(debugAssign(W.TargetNode, Local, nullptr)); + Working[W.TargetNode.Index].getMass() += Taken; + DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr)); continue; } // Backedges and exits only make sense if we're processing a loop. - assert(LoopPackage && "backedge or exit outside of loop"); + assert(OuterLoop && "backedge or exit outside of loop"); // Check for a backedge. if (W.Type == Weight::Backedge) { - BlockMass Back = D.takeBackedgeMass(W.Amount); - LoopPackage->BackedgeMass += Back; - DEBUG(debugAssign(BlockNode(), Back, "back")); + OuterLoop->BackedgeMass[OuterLoop->getHeaderIndex(W.TargetNode)] += Taken; + DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "back")); continue; } // This must be an exit. assert(W.Type == Weight::Exit); - BlockMass Exit = D.takeExitMass(W.Amount); - LoopPackage->Exits.push_back(std::make_pair(W.TargetNode, Exit)); - DEBUG(debugAssign(W.TargetNode, Exit, "exit")); + OuterLoop->Exits.push_back(std::make_pair(W.TargetNode, Taken)); + DEBUG(debugAssign(*this, D, W.TargetNode, Taken, "exit")); } } static void convertFloatingToInteger(BlockFrequencyInfoImplBase &BFI, - const Float &Min, const Float &Max) { + const Scaled64 &Min, const Scaled64 &Max) { // Scale the Factor to a size that creates integers. Ideally, integers would // be scaled so that Max == UINT64_MAX so that they can be best - // differentiated. However, the register allocator currently deals poorly - // with large numbers. Instead, push Min up a little from 1 to give some - // room to differentiate small, unequal numbers. - // - // TODO: fix issues downstream so that ScalingFactor can be Float(1,64)/Max. - Float ScalingFactor = Min.inverse(); - if ((Max / Min).lg() < 60) + // differentiated. However, in the presence of large frequency values, small + // frequencies are scaled down to 1, making it impossible to differentiate + // small, unequal numbers. When the spread between Min and Max frequencies + // fits well within MaxBits, we make the scale be at least 8. + const unsigned MaxBits = 64; + const unsigned SpreadBits = (Max / Min).lg(); + Scaled64 ScalingFactor; + if (SpreadBits <= MaxBits - 3) { + // If the values are small enough, make the scaling factor at least 8 to + // allow distinguishing small values. + ScalingFactor = Min.inverse(); ScalingFactor <<= 3; + } else { + // If the values need more than MaxBits to be represented, saturate small + // frequency values down to 1 by using a scaling factor that benefits large + // frequency values. + ScalingFactor = Scaled64(1, MaxBits) / Max; + } // Translate the floats to integers. DEBUG(dbgs() << "float-to-int: min = " << Min << ", max = " << Max << ", factor = " << ScalingFactor << "\n"); for (size_t Index = 0; Index < BFI.Freqs.size(); ++Index) { - Float Scaled = BFI.Freqs[Index].Floating * ScalingFactor; + Scaled64 Scaled = BFI.Freqs[Index].Scaled * ScalingFactor; BFI.Freqs[Index].Integer = std::max(UINT64_C(1), Scaled.toInt()); DEBUG(dbgs() << " - " << BFI.getBlockName(Index) << ": float = " - << BFI.Freqs[Index].Floating << ", scaled = " << Scaled + << BFI.Freqs[Index].Scaled << ", scaled = " << Scaled << ", int = " << BFI.Freqs[Index].Integer << "\n"); } } -static void scaleBlockData(BlockFrequencyInfoImplBase &BFI, - const BlockNode &Node, - const PackagedLoopData &Loop) { - Float F = Loop.Mass.toFloat() * Loop.Scale; - - Float &Current = BFI.Freqs[Node.Index].Floating; - Float Updated = Current * F; - - DEBUG(dbgs() << " - " << BFI.getBlockName(Node) << ": " << Current << " => " - << Updated << "\n"); - - Current = Updated; -} - /// \brief Unwrap a loop package. /// /// Visits all the members of a loop, adjusting their BlockData according to /// the loop's pseudo-node. -static void unwrapLoopPackage(BlockFrequencyInfoImplBase &BFI, - const BlockNode &Head) { - assert(Head.isValid()); - - PackagedLoopData &LoopPackage = BFI.getLoopPackage(Head); - DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getBlockName(Head) - << ": mass = " << LoopPackage.Mass - << ", scale = " << LoopPackage.Scale << "\n"); - scaleBlockData(BFI, Head, LoopPackage); +static void unwrapLoop(BlockFrequencyInfoImplBase &BFI, LoopData &Loop) { + DEBUG(dbgs() << "unwrap-loop-package: " << BFI.getLoopName(Loop) + << ": mass = " << Loop.Mass << ", scale = " << Loop.Scale + << "\n"); + Loop.Scale *= Loop.Mass.toScaled(); + Loop.IsPackaged = false; + DEBUG(dbgs() << " => combined-scale = " << Loop.Scale << "\n"); // Propagate the head scale through the loop. Since members are visited in // RPO, the head scale will be updated by the loop scale first, and then the // final head scale will be used for updated the rest of the members. - for (const BlockNode &M : LoopPackage.Members) { - const FrequencyData &HeadData = BFI.Freqs[Head.Index]; - FrequencyData &Freqs = BFI.Freqs[M.Index]; - Float NewFreq = Freqs.Floating * HeadData.Floating; - DEBUG(dbgs() << " - " << BFI.getBlockName(M) << ": " << Freqs.Floating - << " => " << NewFreq << "\n"); - Freqs.Floating = NewFreq; + for (const BlockNode &N : Loop.Nodes) { + const auto &Working = BFI.Working[N.Index]; + Scaled64 &F = Working.isAPackage() ? Working.getPackagedLoop()->Scale + : BFI.Freqs[N.Index].Scaled; + Scaled64 New = Loop.Scale * F; + DEBUG(dbgs() << " - " << BFI.getBlockName(N) << ": " << F << " => " << New + << "\n"); + F = New; } } -void BlockFrequencyInfoImplBase::finalizeMetrics() { +void BlockFrequencyInfoImplBase::unwrapLoops() { // Set initial frequencies from loop-local masses. for (size_t Index = 0; Index < Working.size(); ++Index) - Freqs[Index].Floating = Working[Index].Mass.toFloat(); + Freqs[Index].Scaled = Working[Index].Mass.toScaled(); + + for (LoopData &Loop : Loops) + unwrapLoop(*this, Loop); +} +void BlockFrequencyInfoImplBase::finalizeMetrics() { // Unwrap loop packages in reverse post-order, tracking min and max // frequencies. - auto Min = Float::getLargest(); - auto Max = Float::getZero(); + auto Min = Scaled64::getLargest(); + auto Max = Scaled64::getZero(); for (size_t Index = 0; Index < Working.size(); ++Index) { - if (Working[Index].isLoopHeader()) - unwrapLoopPackage(*this, BlockNode(Index)); - - // Update max scale. - Min = std::min(Min, Freqs[Index].Floating); - Max = std::max(Max, Freqs[Index].Floating); + // Update min/max scale. + Min = std::min(Min, Freqs[Index].Scaled); + Max = std::max(Max, Freqs[Index].Scaled); } // Convert to integers. @@ -904,17 +523,21 @@ BlockFrequencyInfoImplBase::getBlockFreq(const BlockNode &Node) const { return 0; return Freqs[Node.Index].Integer; } -Float +Scaled64 BlockFrequencyInfoImplBase::getFloatingBlockFreq(const BlockNode &Node) const { if (!Node.isValid()) - return Float::getZero(); - return Freqs[Node.Index].Floating; + return Scaled64::getZero(); + return Freqs[Node.Index].Scaled; } std::string BlockFrequencyInfoImplBase::getBlockName(const BlockNode &Node) const { return std::string(); } +std::string +BlockFrequencyInfoImplBase::getLoopName(const LoopData &Loop) const { + return getBlockName(Loop.getHeader()) + (Loop.isIrreducible() ? "**" : "*"); +} raw_ostream & BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS, @@ -925,8 +548,215 @@ BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS, raw_ostream & BlockFrequencyInfoImplBase::printBlockFreq(raw_ostream &OS, const BlockFrequency &Freq) const { - Float Block(Freq.getFrequency(), 0); - Float Entry(getEntryFreq(), 0); + Scaled64 Block(Freq.getFrequency(), 0); + Scaled64 Entry(getEntryFreq(), 0); return OS << Block / Entry; } + +void IrreducibleGraph::addNodesInLoop(const BFIBase::LoopData &OuterLoop) { + Start = OuterLoop.getHeader(); + Nodes.reserve(OuterLoop.Nodes.size()); + for (auto N : OuterLoop.Nodes) + addNode(N); + indexNodes(); +} +void IrreducibleGraph::addNodesInFunction() { + Start = 0; + for (uint32_t Index = 0; Index < BFI.Working.size(); ++Index) + if (!BFI.Working[Index].isPackaged()) + addNode(Index); + indexNodes(); +} +void IrreducibleGraph::indexNodes() { + for (auto &I : Nodes) + Lookup[I.Node.Index] = &I; +} +void IrreducibleGraph::addEdge(IrrNode &Irr, const BlockNode &Succ, + const BFIBase::LoopData *OuterLoop) { + if (OuterLoop && OuterLoop->isHeader(Succ)) + return; + auto L = Lookup.find(Succ.Index); + if (L == Lookup.end()) + return; + IrrNode &SuccIrr = *L->second; + Irr.Edges.push_back(&SuccIrr); + SuccIrr.Edges.push_front(&Irr); + ++SuccIrr.NumIn; +} + +namespace llvm { +template <> struct GraphTraits { + typedef bfi_detail::IrreducibleGraph GraphT; + + typedef const GraphT::IrrNode NodeType; + typedef GraphT::IrrNode::iterator ChildIteratorType; + + static const NodeType *getEntryNode(const GraphT &G) { + return G.StartIrr; + } + static ChildIteratorType child_begin(NodeType *N) { return N->succ_begin(); } + static ChildIteratorType child_end(NodeType *N) { return N->succ_end(); } +}; +} + +/// \brief Find extra irreducible headers. +/// +/// Find entry blocks and other blocks with backedges, which exist when \c G +/// contains irreducible sub-SCCs. +static void findIrreducibleHeaders( + const BlockFrequencyInfoImplBase &BFI, + const IrreducibleGraph &G, + const std::vector &SCC, + LoopData::NodeList &Headers, LoopData::NodeList &Others) { + // Map from nodes in the SCC to whether it's an entry block. + SmallDenseMap InSCC; + + // InSCC also acts the set of nodes in the graph. Seed it. + for (const auto *I : SCC) + InSCC[I] = false; + + for (auto I = InSCC.begin(), E = InSCC.end(); I != E; ++I) { + auto &Irr = *I->first; + for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) { + if (InSCC.count(P)) + continue; + + // This is an entry block. + I->second = true; + Headers.push_back(Irr.Node); + DEBUG(dbgs() << " => entry = " << BFI.getBlockName(Irr.Node) << "\n"); + break; + } + } + assert(Headers.size() >= 2 && + "Expected irreducible CFG; -loop-info is likely invalid"); + if (Headers.size() == InSCC.size()) { + // Every block is a header. + std::sort(Headers.begin(), Headers.end()); + return; + } + + // Look for extra headers from irreducible sub-SCCs. + for (const auto &I : InSCC) { + // Entry blocks are already headers. + if (I.second) + continue; + + auto &Irr = *I.first; + for (const auto *P : make_range(Irr.pred_begin(), Irr.pred_end())) { + // Skip forward edges. + if (P->Node < Irr.Node) + continue; + + // Skip predecessors from entry blocks. These can have inverted + // ordering. + if (InSCC.lookup(P)) + continue; + + // Store the extra header. + Headers.push_back(Irr.Node); + DEBUG(dbgs() << " => extra = " << BFI.getBlockName(Irr.Node) << "\n"); + break; + } + if (Headers.back() == Irr.Node) + // Added this as a header. + continue; + + // This is not a header. + Others.push_back(Irr.Node); + DEBUG(dbgs() << " => other = " << BFI.getBlockName(Irr.Node) << "\n"); + } + std::sort(Headers.begin(), Headers.end()); + std::sort(Others.begin(), Others.end()); +} + +static void createIrreducibleLoop( + BlockFrequencyInfoImplBase &BFI, const IrreducibleGraph &G, + LoopData *OuterLoop, std::list::iterator Insert, + const std::vector &SCC) { + // Translate the SCC into RPO. + DEBUG(dbgs() << " - found-scc\n"); + + LoopData::NodeList Headers; + LoopData::NodeList Others; + findIrreducibleHeaders(BFI, G, SCC, Headers, Others); + + auto Loop = BFI.Loops.emplace(Insert, OuterLoop, Headers.begin(), + Headers.end(), Others.begin(), Others.end()); + + // Update loop hierarchy. + for (const auto &N : Loop->Nodes) + if (BFI.Working[N.Index].isLoopHeader()) + BFI.Working[N.Index].Loop->Parent = &*Loop; + else + BFI.Working[N.Index].Loop = &*Loop; +} + +iterator_range::iterator> +BlockFrequencyInfoImplBase::analyzeIrreducible( + const IrreducibleGraph &G, LoopData *OuterLoop, + std::list::iterator Insert) { + assert((OuterLoop == nullptr) == (Insert == Loops.begin())); + auto Prev = OuterLoop ? std::prev(Insert) : Loops.end(); + + for (auto I = scc_begin(G); !I.isAtEnd(); ++I) { + if (I->size() < 2) + continue; + + // Translate the SCC into RPO. + createIrreducibleLoop(*this, G, OuterLoop, Insert, *I); + } + + if (OuterLoop) + return make_range(std::next(Prev), Insert); + return make_range(Loops.begin(), Insert); +} + +void +BlockFrequencyInfoImplBase::updateLoopWithIrreducible(LoopData &OuterLoop) { + OuterLoop.Exits.clear(); + for (auto &Mass : OuterLoop.BackedgeMass) + Mass = BlockMass::getEmpty(); + auto O = OuterLoop.Nodes.begin() + 1; + for (auto I = O, E = OuterLoop.Nodes.end(); I != E; ++I) + if (!Working[I->Index].isPackaged()) + *O++ = *I; + OuterLoop.Nodes.erase(O, OuterLoop.Nodes.end()); +} + +void BlockFrequencyInfoImplBase::adjustLoopHeaderMass(LoopData &Loop) { + assert(Loop.isIrreducible() && "this only makes sense on irreducible loops"); + + // Since the loop has more than one header block, the mass flowing back into + // each header will be different. Adjust the mass in each header loop to + // reflect the masses flowing through back edges. + // + // To do this, we distribute the initial mass using the backedge masses + // as weights for the distribution. + BlockMass LoopMass = BlockMass::getFull(); + Distribution Dist; + + DEBUG(dbgs() << "adjust-loop-header-mass:\n"); + for (uint32_t H = 0; H < Loop.NumHeaders; ++H) { + auto &HeaderNode = Loop.Nodes[H]; + auto &BackedgeMass = Loop.BackedgeMass[Loop.getHeaderIndex(HeaderNode)]; + DEBUG(dbgs() << " - Add back edge mass for node " + << getBlockName(HeaderNode) << ": " << BackedgeMass << "\n"); + if (BackedgeMass.getMass() > 0) + Dist.addLocal(HeaderNode, BackedgeMass.getMass()); + else + DEBUG(dbgs() << " Nothing added. Back edge mass is zero\n"); + } + + DitheringDistributer D(Dist, LoopMass); + + DEBUG(dbgs() << " Distribute loop mass " << LoopMass + << " to headers using above weights\n"); + for (const Weight &W : Dist.Weights) { + BlockMass Taken = D.takeMass(W.Amount); + assert(W.Type == Weight::Local && "all weights should be local"); + Working[W.TargetNode.Index].getMass() = Taken; + DEBUG(debugAssign(*this, D, W.TargetNode, Taken, nullptr)); + } +}