From df60e43e05e1a61f1c9f7039ae9f7054bba570ea Mon Sep 17 00:00:00 2001 From: Hal Finkel Date: Wed, 7 May 2014 22:25:18 +0000 Subject: [PATCH] [X86TTI] Remove the unrolling branch limits The loop stream detector (LSD) on modern Intel cores, which optimizes the execution of small loops, has limits on the number of taken branches in addition to uop-count limits (modern AMD cores have similar limits). Unfortunately, at the IR level, estimating the number of branches that will be taken is difficult. For one thing, it strongly depends on later passes (block placement, etc.). The original implementation took a conservative approach and limited the maximal BB DFS depth of the loop. However, fairly-extensive benchmarking by several of us has revealed that this is the wrong approach. In fact, there are zero known cases where the branch limit prevents a detrimental unrolling (but plenty of cases where it does prevent beneficial unrolling). While we could improve the current branch counting logic by incorporating branch probabilities, this further complication seems unjustified without a motivating regression. Instead, unless and until a regression appears, the branch counting will be removed. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@208255 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86TargetTransformInfo.cpp | 53 ++++++----------------- 1 file changed, 13 insertions(+), 40 deletions(-) diff --git a/lib/Target/X86/X86TargetTransformInfo.cpp b/lib/Target/X86/X86TargetTransformInfo.cpp index ce060aba644..cad8dfd5221 100644 --- a/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/lib/Target/X86/X86TargetTransformInfo.cpp @@ -41,10 +41,6 @@ UsePartialUnrolling("x86-use-partial-unrolling", cl::init(true), static cl::opt PartialUnrollingThreshold("x86-partial-unrolling-threshold", cl::init(0), cl::desc("Threshold for X86 partial unrolling"), cl::Hidden); -static cl::opt -PartialUnrollingMaxBranches("x86-partial-max-branches", cl::init(2), - cl::desc("Threshold for taken branches in X86 partial unrolling"), - cl::Hidden); namespace { @@ -172,49 +168,38 @@ void X86TTI::getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const { // - The loop must have fewer than 16 branches // - The loop must have less than 40 uops in all executed loop branches - unsigned MaxBranches, MaxOps; + // The number of taken branches in a loop is hard to estimate here, and + // benchmarking has revealed that it is better not to be conservative when + // estimating the branch count. As a result, we'll ignore the branch limits + // until someone finds a case where it matters in practice. + + unsigned MaxOps; if (PartialUnrollingThreshold.getNumOccurrences() > 0) { - MaxBranches = PartialUnrollingMaxBranches; MaxOps = PartialUnrollingThreshold; } else if (ST->isAtom()) { // On the Atom, the throughput for taken branches is 2 cycles. For small // simple loops, expand by a small factor to hide the backedge cost. - MaxBranches = 2; MaxOps = 10; } else if (ST->hasFSGSBase() && ST->hasXOP() /* Steamroller and later */) { - MaxBranches = 16; MaxOps = 40; } else if (ST->hasFMA4() /* Any other recent AMD */) { return; } else if (ST->hasAVX() || ST->hasSSE42() /* Nehalem and later */) { - MaxBranches = 8; MaxOps = 28; } else if (ST->hasSSSE3() /* Intel Core */) { - MaxBranches = 4; MaxOps = 18; } else { return; } - // Scan the loop: don't unroll loops with calls, and count the potential - // number of taken branches (this is somewhat conservative because we're - // counting all block transitions as potential branches while in reality some - // of these will become implicit via block placement). - unsigned MaxDepth = 0; - for (df_iterator DI = df_begin(L->getHeader()), - DE = df_end(L->getHeader()); DI != DE;) { - if (!L->contains(*DI)) { - DI.skipChildren(); - continue; - } - - MaxDepth = std::max(MaxDepth, DI.getPathLength()); - if (MaxDepth > MaxBranches) - return; + // Scan the loop: don't unroll loops with calls. + for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); + I != E; ++I) { + BasicBlock *BB = *I; - for (BasicBlock::iterator I = DI->begin(), IE = DI->end(); I != IE; ++I) - if (isa(I) || isa(I)) { - ImmutableCallSite CS(I); + for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J) + if (isa(J) || isa(J)) { + ImmutableCallSite CS(J); if (const Function *F = CS.getCalledFunction()) { if (!isLoweredToCall(F)) continue; @@ -222,23 +207,11 @@ void X86TTI::getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const { return; } - - ++DI; } // Enable runtime and partial unrolling up to the specified size. UP.Partial = UP.Runtime = true; UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps; - - // Set the maximum count based on the loop depth. The maximum number of - // branches taken in a loop (including the backedge) is equal to the maximum - // loop depth (the DFS path length from the loop header to any block in the - // loop). When the loop is unrolled, this depth (except for the backedge - // itself) is multiplied by the unrolling factor. This new unrolled depth - // must be less than the target-specific maximum branch count (which limits - // the number of taken branches in the uop buffer). - if (MaxDepth > 1) - UP.MaxCount = (MaxBranches-1)/(MaxDepth-1); } unsigned X86TTI::getNumberOfRegisters(bool Vector) const { -- 2.34.1