From bfe1f1c5a398ea83e108f91f7bd3d7e2ec00c6c0 Mon Sep 17 00:00:00 2001 From: Chandler Carruth Date: Mon, 17 Aug 2015 02:08:17 +0000 Subject: [PATCH] [PM] Port ScalarEvolution to the new pass manager. This change makes ScalarEvolution a stand-alone object and just produces one from a pass as needed. Making this work well requires making the object movable, using references instead of overwritten pointers in a number of places, and other refactorings. I've also wired it up to the new pass manager and added a RUN line to a test to exercise it under the new pass manager. This includes basic printing support much like with other analyses. But there is a big and somewhat scary change here. Prior to this patch ScalarEvolution was never *actually* invalidated!!! Re-running the pass just re-wired up the various other analyses and didn't remove any of the existing entries in the SCEV caches or clear out anything at all. This might seem OK as everything in SCEV that can uses ValueHandles to track updates to the values that serve as SCEV keys. However, this still means that as we ran SCEV over each function in the module, we kept accumulating more and more SCEVs into the cache. At the end, we would have a SCEV cache with every value that we ever needed a SCEV for in the entire module!!! Yowzers. The releaseMemory routine would dump all of this, but that isn't realy called during normal runs of the pipeline as far as I can see. To make matters worse, there *is* actually a key that we don't update with value handles -- there is a map keyed off of Loop*s. Because LoopInfo *does* release its memory from run to run, it is entirely possible to run SCEV over one function, then over another function, and then lookup a Loop* from the second function but find an entry inserted for the first function! Ouch. To make matters still worse, there are plenty of updates that *don't* trip a value handle. It seems incredibly unlikely that today GVN or another pass that invalidates SCEV can update values in *just* such a way that a subsequent run of SCEV will incorrectly find lookups in a cache, but it is theoretically possible and would be a nightmare to debug. With this refactoring, I've fixed all this by actually destroying and recreating the ScalarEvolution object from run to run. Technically, this could increase the amount of malloc traffic we see, but then again it is also technically correct. ;] I don't actually think we're suffering from tons of malloc traffic from SCEV because if we were, the fact that we never clear the memory would seem more likely to have come up as an actual problem before now. So, I've made the simple fix here. If in fact there are serious issues with too much allocation and deallocation, I can work on a clever fix that preserves the allocations (while clearing the data) between each run, but I'd prefer to do that kind of optimization with a test case / benchmark that shows why we need such cleverness (and that can test that we actually make it faster). It's possible that this will make some things faster by making the SCEV caches have higher locality (due to being significantly smaller) so until there is a clear benchmark, I think the simple change is best. Differential Revision: http://reviews.llvm.org/D12063 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@245193 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Analysis/ScalarEvolution.h | 92 +++++-- include/llvm/InitializePasses.h | 2 +- include/llvm/LinkAllPasses.h | 2 +- lib/Analysis/Analysis.cpp | 2 +- lib/Analysis/Delinearization.cpp | 4 +- lib/Analysis/DependenceAnalysis.cpp | 6 +- lib/Analysis/IVUsers.cpp | 6 +- lib/Analysis/LoopAccessAnalysis.cpp | 6 +- lib/Analysis/ScalarEvolution.cpp | 254 ++++++++++-------- lib/Analysis/ScalarEvolutionAliasAnalysis.cpp | 6 +- lib/Analysis/ScalarEvolutionExpander.cpp | 67 +++-- lib/CodeGen/MachineFunctionPass.cpp | 2 +- lib/Passes/PassBuilder.cpp | 1 + lib/Passes/PassRegistry.def | 2 + lib/Target/PowerPC/PPCCTRLoops.cpp | 6 +- lib/Target/PowerPC/PPCLoopDataPrefetch.cpp | 8 +- lib/Target/PowerPC/PPCLoopPreIncPrep.cpp | 6 +- .../Scalar/AlignmentFromAssumptions.cpp | 8 +- lib/Transforms/Scalar/IndVarSimplify.cpp | 8 +- .../Scalar/InductiveRangeCheckElimination.cpp | 4 +- lib/Transforms/Scalar/LICM.cpp | 11 +- lib/Transforms/Scalar/LoopDeletion.cpp | 8 +- lib/Transforms/Scalar/LoopIdiomRecognize.cpp | 8 +- lib/Transforms/Scalar/LoopInstSimplify.cpp | 2 +- lib/Transforms/Scalar/LoopInterchange.cpp | 6 +- lib/Transforms/Scalar/LoopRerollPass.cpp | 6 +- lib/Transforms/Scalar/LoopRotation.cpp | 6 +- lib/Transforms/Scalar/LoopStrengthReduce.cpp | 12 +- lib/Transforms/Scalar/LoopUnrollPass.cpp | 8 +- lib/Transforms/Scalar/LoopUnswitch.cpp | 6 +- lib/Transforms/Scalar/NaryReassociate.cpp | 8 +- lib/Transforms/Scalar/PlaceSafepoints.cpp | 6 +- .../Scalar/SeparateConstOffsetFromGEP.cpp | 6 +- .../Scalar/StraightLineStrengthReduce.cpp | 6 +- lib/Transforms/Utils/LCSSA.cpp | 5 +- lib/Transforms/Utils/LoopSimplify.cpp | 5 +- lib/Transforms/Utils/LoopUnroll.cpp | 10 +- lib/Transforms/Utils/LoopUnrollRuntime.cpp | 13 +- lib/Transforms/Vectorize/BBVectorize.cpp | 10 +- lib/Transforms/Vectorize/LoopVectorize.cpp | 6 +- lib/Transforms/Vectorize/SLPVectorizer.cpp | 6 +- test/Analysis/ScalarEvolution/trip-count.ll | 5 +- unittests/Analysis/ScalarEvolutionTest.cpp | 35 ++- 43 files changed, 393 insertions(+), 293 deletions(-) diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index 57f23f07769..537198e7107 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -27,6 +27,7 @@ #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Operator.h" +#include "llvm/IR/PassManager.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Pass.h" #include "llvm/Support/Allocator.h" @@ -170,11 +171,10 @@ namespace llvm { static bool classof(const SCEV *S); }; - /// ScalarEvolution - This class is the main scalar evolution driver. Because - /// client code (intentionally) can't do much with the SCEV objects directly, - /// they must ask this class for services. - /// - class ScalarEvolution : public FunctionPass { + /// The main scalar evolution driver. Because client code (intentionally) + /// can't do much with the SCEV objects directly, they must ask this class + /// for services. + class ScalarEvolution { public: /// LoopDisposition - An enum describing the relationship between a /// SCEV and a loop. @@ -224,26 +224,26 @@ namespace llvm { /// F - The function we are analyzing. /// - Function *F; - - /// The tracker for @llvm.assume intrinsics in this function. - AssumptionCache *AC; - - /// LI - The loop information for the function we are currently analyzing. - /// - LoopInfo *LI; + Function &F; /// TLI - The target library information for the target we are targeting. /// - TargetLibraryInfo *TLI; + TargetLibraryInfo &TLI; + + /// The tracker for @llvm.assume intrinsics in this function. + AssumptionCache &AC; /// DT - The dominator tree. /// - DominatorTree *DT; + DominatorTree &DT; + + /// LI - The loop information for the function we are currently analyzing. + /// + LoopInfo &LI; /// CouldNotCompute - This SCEV is used to represent unknown trip /// counts and things. - SCEVCouldNotCompute CouldNotCompute; + std::unique_ptr CouldNotCompute; /// ValueExprMapType - The typedef for ValueExprMap. /// @@ -604,10 +604,12 @@ namespace llvm { SCEV::NoWrapFlags getNoWrapFlagsFromUB(const Value *V); public: - static char ID; // Pass identification, replacement for typeid - ScalarEvolution(); + ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, + DominatorTree &DT, LoopInfo &LI); + ~ScalarEvolution(); + ScalarEvolution(ScalarEvolution &&Arg); - LLVMContext &getContext() const { return F->getContext(); } + LLVMContext &getContext() const { return F.getContext(); } /// isSCEVable - Test if values of the given type are analyzable within /// the SCEV framework. This primarily includes integer types, and it @@ -984,11 +986,8 @@ namespace llvm { SmallVectorImpl &Sizes, const SCEV *ElementSize) const; - bool runOnFunction(Function &F) override; - void releaseMemory() override; - void getAnalysisUsage(AnalysisUsage &AU) const override; - void print(raw_ostream &OS, const Module* = nullptr) const override; - void verifyAnalysis() const override; + void print(raw_ostream &OS) const; + void verify() const; /// Collect parametric terms occurring in step expressions. void collectParametricTerms(const SCEV *Expr, @@ -1097,6 +1096,51 @@ namespace llvm { /// to locate them all and call their destructors. SCEVUnknown *FirstUnknown; }; + + /// \brief Analysis pass that exposes the \c ScalarEvolution for a function. + class ScalarEvolutionAnalysis { + static char PassID; + + public: + typedef ScalarEvolution Result; + + /// \brief Opaque, unique identifier for this analysis pass. + static void *ID() { return (void *)&PassID; } + + /// \brief Provide a name for the analysis for debugging and logging. + static StringRef name() { return "ScalarEvolutionAnalysis"; } + + ScalarEvolution run(Function &F, AnalysisManager *AM); + }; + + /// \brief Printer pass for the \c ScalarEvolutionAnalysis results. + class ScalarEvolutionPrinterPass { + raw_ostream &OS; + + public: + explicit ScalarEvolutionPrinterPass(raw_ostream &OS) : OS(OS) {} + PreservedAnalyses run(Function &F, AnalysisManager *AM); + + static StringRef name() { return "ScalarEvolutionPrinterPass"; } + }; + + class ScalarEvolutionWrapperPass : public FunctionPass { + std::unique_ptr SE; + + public: + static char ID; + + ScalarEvolutionWrapperPass(); + + ScalarEvolution &getSE() { return *SE; } + const ScalarEvolution &getSE() const { return *SE; } + + bool runOnFunction(Function &F) override; + void releaseMemory() override; + void getAnalysisUsage(AnalysisUsage &AU) const override; + void print(raw_ostream &OS, const Module * = nullptr) const override; + void verifyAnalysis() const override; + }; } #endif diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index d3aeb0b326a..48776110f5f 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -247,7 +247,7 @@ void initializeSROAPass(PassRegistry&); void initializeSROA_DTPass(PassRegistry&); void initializeSROA_SSAUpPass(PassRegistry&); void initializeScalarEvolutionAliasAnalysisPass(PassRegistry&); -void initializeScalarEvolutionPass(PassRegistry&); +void initializeScalarEvolutionWrapperPassPass(PassRegistry&); void initializeShrinkWrapPass(PassRegistry &); void initializeSimpleInlinerPass(PassRegistry&); void initializeShadowStackGCLoweringPass(PassRegistry&); diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h index 251df727fe1..e3725d93010 100644 --- a/include/llvm/LinkAllPasses.h +++ b/include/llvm/LinkAllPasses.h @@ -184,7 +184,7 @@ namespace { (void) llvm::createEliminateAvailableExternallyPass(); (void)new llvm::IntervalPartition(); - (void)new llvm::ScalarEvolution(); + (void)new llvm::ScalarEvolutionWrapperPass(); ((llvm::Function*)nullptr)->viewCFGOnly(); llvm::RGPassManager RGM; ((llvm::RegionPass*)nullptr)->runOnRegion((llvm::Region*)nullptr, RGM); diff --git a/lib/Analysis/Analysis.cpp b/lib/Analysis/Analysis.cpp index 1ca151da94f..91e7339337a 100644 --- a/lib/Analysis/Analysis.cpp +++ b/lib/Analysis/Analysis.cpp @@ -63,7 +63,7 @@ void llvm::initializeAnalysis(PassRegistry &Registry) { initializeRegionPrinterPass(Registry); initializeRegionOnlyViewerPass(Registry); initializeRegionOnlyPrinterPass(Registry); - initializeScalarEvolutionPass(Registry); + initializeScalarEvolutionWrapperPassPass(Registry); initializeScalarEvolutionAliasAnalysisPass(Registry); initializeTargetTransformInfoWrapperPassPass(Registry); initializeTypeBasedAliasAnalysisPass(Registry); diff --git a/lib/Analysis/Delinearization.cpp b/lib/Analysis/Delinearization.cpp index 9d157860326..8e65c4c469a 100644 --- a/lib/Analysis/Delinearization.cpp +++ b/lib/Analysis/Delinearization.cpp @@ -60,12 +60,12 @@ public: void Delinearization::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); } bool Delinearization::runOnFunction(Function &F) { this->F = &F; - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); LI = &getAnalysis().getLoopInfo(); return false; } diff --git a/lib/Analysis/DependenceAnalysis.cpp b/lib/Analysis/DependenceAnalysis.cpp index 4ee82f0f134..0e6f222e52e 100644 --- a/lib/Analysis/DependenceAnalysis.cpp +++ b/lib/Analysis/DependenceAnalysis.cpp @@ -117,7 +117,7 @@ Delinearize("da-delinearize", cl::init(false), cl::Hidden, cl::ZeroOrMore, INITIALIZE_PASS_BEGIN(DependenceAnalysis, "da", "Dependence Analysis", true, true) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(DependenceAnalysis, "da", "Dependence Analysis", true, true) @@ -133,7 +133,7 @@ FunctionPass *llvm::createDependenceAnalysisPass() { bool DependenceAnalysis::runOnFunction(Function &F) { this->F = &F; AA = &getAnalysis(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); LI = &getAnalysis().getLoopInfo(); return false; } @@ -146,7 +146,7 @@ void DependenceAnalysis::releaseMemory() { void DependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); AU.addRequiredTransitive(); - AU.addRequiredTransitive(); + AU.addRequiredTransitive(); AU.addRequiredTransitive(); } diff --git a/lib/Analysis/IVUsers.cpp b/lib/Analysis/IVUsers.cpp index 926787d3be9..a1c6b72f9f0 100644 --- a/lib/Analysis/IVUsers.cpp +++ b/lib/Analysis/IVUsers.cpp @@ -39,7 +39,7 @@ INITIALIZE_PASS_BEGIN(IVUsers, "iv-users", INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_END(IVUsers, "iv-users", "Induction Variable Users", false, true) @@ -255,7 +255,7 @@ void IVUsers::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired(); AU.addRequired(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.setPreservesAll(); } @@ -266,7 +266,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) { *L->getHeader()->getParent()); LI = &getAnalysis().getLoopInfo(); DT = &getAnalysis().getDomTree(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); // Collect ephemeral values so that AddUsersIfInteresting skips them. EphValues.clear(); diff --git a/lib/Analysis/LoopAccessAnalysis.cpp b/lib/Analysis/LoopAccessAnalysis.cpp index 13d1235710e..b931589ed04 100644 --- a/lib/Analysis/LoopAccessAnalysis.cpp +++ b/lib/Analysis/LoopAccessAnalysis.cpp @@ -1794,7 +1794,7 @@ void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const { } bool LoopAccessAnalysis::runOnFunction(Function &F) { - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); auto *TLIP = getAnalysisIfAvailable(); TLI = TLIP ? &TLIP->getTLI() : nullptr; AA = &getAnalysis(); @@ -1805,7 +1805,7 @@ bool LoopAccessAnalysis::runOnFunction(Function &F) { } void LoopAccessAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); @@ -1819,7 +1819,7 @@ static const char laa_name[] = "Loop Access Analysis"; INITIALIZE_PASS_BEGIN(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_END(LoopAccessAnalysis, LAA_NAME, laa_name, false, true) diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 10d3acb46af..7cf08bd7156 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -114,16 +114,6 @@ static cl::opt VerifySCEV("verify-scev", cl::desc("Verify ScalarEvolution's backedge taken counts (slow)")); -INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution", - "Scalar Evolution Analysis", false, true) -INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) -INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) -INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) -INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution", - "Scalar Evolution Analysis", false, true) -char ScalarEvolution::ID = 0; - //===----------------------------------------------------------------------===// // SCEV class definitions //===----------------------------------------------------------------------===// @@ -1983,7 +1973,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, Flags = StrengthenNoWrapFlags(this, scAddExpr, Ops, Flags); // Sort by complexity, this groups all similar expression types together. - GroupByComplexity(Ops, LI); + GroupByComplexity(Ops, &LI); // If there are any constants, fold them together. unsigned Idx = 0; @@ -2391,7 +2381,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops, Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags); // Sort by complexity, this groups all similar expression types together. - GroupByComplexity(Ops, LI); + GroupByComplexity(Ops, &LI); // If there are any constants, fold them together. unsigned Idx = 0; @@ -2859,10 +2849,10 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, // Canonicalize nested AddRecs in by nesting them in order of loop depth. if (const SCEVAddRecExpr *NestedAR = dyn_cast(Operands[0])) { const Loop *NestedLoop = NestedAR->getLoop(); - if (L->contains(NestedLoop) ? - (L->getLoopDepth() < NestedLoop->getLoopDepth()) : - (!NestedLoop->contains(L) && - DT->dominates(L->getHeader(), NestedLoop->getHeader()))) { + if (L->contains(NestedLoop) + ? (L->getLoopDepth() < NestedLoop->getLoopDepth()) + : (!NestedLoop->contains(L) && + DT.dominates(L->getHeader(), NestedLoop->getHeader()))) { SmallVector NestedOperands(NestedAR->op_begin(), NestedAR->op_end()); Operands[0] = NestedAR->getStart(); @@ -2997,7 +2987,7 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { #endif // Sort by complexity, this groups all similar expression types together. - GroupByComplexity(Ops, LI); + GroupByComplexity(Ops, &LI); // If there are any constants, fold them together. unsigned Idx = 0; @@ -3101,7 +3091,7 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { #endif // Sort by complexity, this groups all similar expression types together. - GroupByComplexity(Ops, LI); + GroupByComplexity(Ops, &LI); // If there are any constants, fold them together. unsigned Idx = 0; @@ -3202,7 +3192,7 @@ const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) { // constant expression and then folding it back into a ConstantInt. // This is just a compile-time optimization. return getConstant(IntTy, - F->getParent()->getDataLayout().getTypeAllocSize(AllocTy)); + F.getParent()->getDataLayout().getTypeAllocSize(AllocTy)); } const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, @@ -3213,7 +3203,7 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy, // This is just a compile-time optimization. return getConstant( IntTy, - F->getParent()->getDataLayout().getStructLayout(STy)->getElementOffset( + F.getParent()->getDataLayout().getStructLayout(STy)->getElementOffset( FieldNo)); } @@ -3256,7 +3246,7 @@ bool ScalarEvolution::isSCEVable(Type *Ty) const { /// for which isSCEVable must return true. uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { assert(isSCEVable(Ty) && "Type is not SCEVable!"); - return F->getParent()->getDataLayout().getTypeSizeInBits(Ty); + return F.getParent()->getDataLayout().getTypeSizeInBits(Ty); } /// getEffectiveSCEVType - Return a type with the same bitwidth as @@ -3272,11 +3262,11 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const { // The only other support type is pointer. assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); - return F->getParent()->getDataLayout().getIntPtrType(Ty); + return F.getParent()->getDataLayout().getIntPtrType(Ty); } const SCEV *ScalarEvolution::getCouldNotCompute() { - return &CouldNotCompute; + return CouldNotCompute.get(); } namespace { @@ -3621,7 +3611,7 @@ ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) { /// a loop header, making it a potential recurrence, or it doesn't. /// const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { - if (const Loop *L = LI->getLoopFor(PN->getParent())) + if (const Loop *L = LI.getLoopFor(PN->getParent())) if (L->getHeader() == PN->getParent()) { // The loop may have multiple entrances or multiple exits; we can analyze // this phi as an addrec if it has a unique entry value and a unique @@ -3767,9 +3757,9 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) { // PHI's incoming blocks are in a different loop, in which case doing so // risks breaking LCSSA form. Instcombine would normally zap these, but // it doesn't have DominatorTree information, so it may miss cases. - if (Value *V = - SimplifyInstruction(PN, F->getParent()->getDataLayout(), TLI, DT, AC)) - if (LI->replacementPreservesLCSSAForm(PN, V)) + if (Value *V = SimplifyInstruction(PN, F.getParent()->getDataLayout(), &TLI, + &DT, &AC)) + if (LI.replacementPreservesLCSSAForm(PN, V)) return getSCEV(V); // If it's not a loop phi, we can't handle it yet. @@ -3864,8 +3854,8 @@ ScalarEvolution::GetMinTrailingZeros(const SCEV *S) { // For a SCEVUnknown, ask ValueTracking. unsigned BitWidth = getTypeSizeInBits(U->getType()); APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); - computeKnownBits(U->getValue(), Zeros, Ones, - F->getParent()->getDataLayout(), 0, AC, nullptr, DT); + computeKnownBits(U->getValue(), Zeros, Ones, F.getParent()->getDataLayout(), + 0, &AC, nullptr, &DT); return Zeros.countTrailingOnes(); } @@ -4095,18 +4085,18 @@ ScalarEvolution::getRange(const SCEV *S, // Split here to avoid paying the compile-time cost of calling both // computeKnownBits and ComputeNumSignBits. This restriction can be lifted // if needed. - const DataLayout &DL = F->getParent()->getDataLayout(); + const DataLayout &DL = F.getParent()->getDataLayout(); if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) { // For a SCEVUnknown, ask ValueTracking. APInt Zeros(BitWidth, 0), Ones(BitWidth, 0); - computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, AC, nullptr, DT); + computeKnownBits(U->getValue(), Zeros, Ones, DL, 0, &AC, nullptr, &DT); if (Ones != ~Zeros + 1) ConservativeResult = ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)); } else { assert(SignHint == ScalarEvolution::HINT_RANGE_SIGNED && "generalize as needed!"); - unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, AC, nullptr, DT); + unsigned NS = ComputeNumSignBits(U->getValue(), DL, 0, &AC, nullptr, &DT); if (NS > 1) ConservativeResult = ConservativeResult.intersectWith( ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1), @@ -4139,7 +4129,7 @@ SCEV::NoWrapFlags ScalarEvolution::getNoWrapFlagsFromUB(const Value *V) { // recurrence, but getting that requires computing the SCEV of the operands, // which can be expensive. This check we can do cheaply to rule out some // cases early. - Loop *innermostContainingLoop = LI->getLoopFor(BinOp->getParent()); + Loop *innermostContainingLoop = LI.getLoopFor(BinOp->getParent()); if (innermostContainingLoop == nullptr || innermostContainingLoop->getHeader() != BinOp->getParent()) return SCEV::FlagAnyWrap; @@ -4190,7 +4180,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { // reachable. Such instructions don't matter, and they aren't required // to obey basic rules for definitions dominating uses which this // analysis depends on. - if (!DT->isReachableFromEntry(I->getParent())) + if (!DT.isReachableFromEntry(I->getParent())) return getUnknown(V); } else if (ConstantExpr *CE = dyn_cast(V)) Opcode = CE->getOpcode(); @@ -4304,7 +4294,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) { unsigned BitWidth = A.getBitWidth(); APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); computeKnownBits(U->getOperand(0), KnownZero, KnownOne, - F->getParent()->getDataLayout(), 0, AC, nullptr, DT); + F.getParent()->getDataLayout(), 0, &AC, nullptr, &DT); APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ); @@ -5012,7 +5002,7 @@ ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) { // MaxBECount is conservatively the maximum EL.Max, where CouldNotCompute is // considered greater than any computable EL.Max. if (EL.Max != getCouldNotCompute() && Latch && - DT->dominates(ExitBB, Latch)) { + DT.dominates(ExitBB, Latch)) { if (!MustExitMaxBECount) MustExitMaxBECount = EL.Max; else { @@ -5625,7 +5615,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, unsigned NumIterations = BEs.getZExtValue(); // must be in range unsigned IterationNum = 0; - const DataLayout &DL = F->getParent()->getDataLayout(); + const DataLayout &DL = F.getParent()->getDataLayout(); for (; ; ++IterationNum) { if (IterationNum == NumIterations) return RetVal = CurrentIterVals[PN]; // Got exit value! @@ -5634,7 +5624,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, // EvaluateExpression adds non-phi values to the CurrentIterVals map. DenseMap NextIterVals; Constant *NextPHI = - EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI); + EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); if (!NextPHI) return nullptr; // Couldn't evaluate! NextIterVals[PN] = NextPHI; @@ -5659,7 +5649,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, Constant *&NextPHI = NextIterVals[PHI]; if (!NextPHI) { // Not already computed. Value *BEValue = PHI->getIncomingValue(SecondIsBackedge); - NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI); + NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); } if (NextPHI != I->second) StoppedEvolving = false; @@ -5711,10 +5701,10 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L, // the loop symbolically to determine when the condition gets a value of // "ExitWhen". unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis. - const DataLayout &DL = F->getParent()->getDataLayout(); + const DataLayout &DL = F.getParent()->getDataLayout(); for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){ ConstantInt *CondVal = dyn_cast_or_null( - EvaluateExpression(Cond, L, CurrentIterVals, DL, TLI)); + EvaluateExpression(Cond, L, CurrentIterVals, DL, &TLI)); // Couldn't symbolically evaluate. if (!CondVal) return getCouldNotCompute(); @@ -5744,7 +5734,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L, if (NextPHI) continue; // Already computed! Value *BEValue = PHI->getIncomingValue(SecondIsBackedge); - NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI); + NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, &TLI); } CurrentIterVals.swap(NextIterVals); } @@ -5889,7 +5879,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { // exit value from the loop without using SCEVs. if (const SCEVUnknown *SU = dyn_cast(V)) { if (Instruction *I = dyn_cast(SU->getValue())) { - const Loop *LI = (*this->LI)[I->getParent()]; + const Loop *LI = this->LI[I->getParent()]; if (LI && LI->getParentLoop() == L) // Looking for loop exit value. if (PHINode *PN = dyn_cast(I)) if (PN->getParent() == LI->getHeader()) { @@ -5947,16 +5937,16 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { // Check to see if getSCEVAtScope actually made an improvement. if (MadeImprovement) { Constant *C = nullptr; - const DataLayout &DL = F->getParent()->getDataLayout(); + const DataLayout &DL = F.getParent()->getDataLayout(); if (const CmpInst *CI = dyn_cast(I)) C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], - Operands[1], DL, TLI); + Operands[1], DL, &TLI); else if (const LoadInst *LI = dyn_cast(I)) { if (!LI->isVolatile()) C = ConstantFoldLoadFromConstPtr(Operands[0], DL); } else C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, - DL, TLI); + DL, &TLI); if (!C) return V; return getSCEV(C); } @@ -6377,7 +6367,7 @@ ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) { // A loop's header is defined to be a block that dominates the loop. // If the header has a unique predecessor outside the loop, it must be // a block that has exactly one successor that can reach the loop. - if (Loop *L = LI->getLoopFor(BB)) + if (Loop *L = LI.getLoopFor(BB)) return std::make_pair(L->getLoopPredecessor(), L->getHeader()); return std::pair(); @@ -6969,11 +6959,11 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, return true; // Check conditions due to any @llvm.assume intrinsics. - for (auto &AssumeVH : AC->assumptions()) { + for (auto &AssumeVH : AC.assumptions()) { if (!AssumeVH) continue; auto *CI = cast(AssumeVH); - if (!DT->dominates(CI, Latch->getTerminator())) + if (!DT.dominates(CI, Latch->getTerminator())) continue; if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) @@ -7002,12 +6992,11 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, // If the loop is not reachable from the entry block, we risk running into an // infinite loop as we walk up into the dom tree. These loops do not matter // anyway, so we just return a conservative answer when we see them. - if (!DT->isReachableFromEntry(L->getHeader())) + if (!DT.isReachableFromEntry(L->getHeader())) return false; - for (DomTreeNode *DTN = (*DT)[Latch], *HeaderDTN = (*DT)[L->getHeader()]; - DTN != HeaderDTN; - DTN = DTN->getIDom()) { + for (DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()]; + DTN != HeaderDTN; DTN = DTN->getIDom()) { assert(DTN && "should reach the loop header before reaching the root!"); @@ -7031,7 +7020,7 @@ ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L, // We're constructively (and conservatively) enumerating edges within the // loop body that dominate the latch. The dominator tree better agree // with us on this: - assert(DT->dominates(DominatingEdge, Latch) && "should be!"); + assert(DT.dominates(DominatingEdge, Latch) && "should be!"); if (isImpliedCond(Pred, LHS, RHS, Condition, BB != ContinuePredicate->getSuccessor(0))) @@ -7076,11 +7065,11 @@ ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L, } // Check conditions due to any @llvm.assume intrinsics. - for (auto &AssumeVH : AC->assumptions()) { + for (auto &AssumeVH : AC.assumptions()) { if (!AssumeVH) continue; auto *CI = cast(AssumeVH); - if (!DT->dominates(CI, L->getHeader())) + if (!DT.dominates(CI, L->getHeader())) continue; if (isImpliedCond(Pred, LHS, RHS, CI->getArgOperand(0), false)) @@ -8312,22 +8301,34 @@ ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se) // ScalarEvolution Class Implementation //===----------------------------------------------------------------------===// -ScalarEvolution::ScalarEvolution() - : FunctionPass(ID), WalkingBEDominatingConds(false), ValuesAtScopes(64), - LoopDispositions(64), BlockDispositions(64), FirstUnknown(nullptr) { - initializeScalarEvolutionPass(*PassRegistry::getPassRegistry()); -} - -bool ScalarEvolution::runOnFunction(Function &F) { - this->F = &F; - AC = &getAnalysis().getAssumptionCache(F); - LI = &getAnalysis().getLoopInfo(); - TLI = &getAnalysis().getTLI(); - DT = &getAnalysis().getDomTree(); - return false; -} - -void ScalarEvolution::releaseMemory() { +ScalarEvolution::ScalarEvolution(Function &F, TargetLibraryInfo &TLI, + AssumptionCache &AC, DominatorTree &DT, + LoopInfo &LI) + : F(F), TLI(TLI), AC(AC), DT(DT), LI(LI), + CouldNotCompute(new SCEVCouldNotCompute()), + WalkingBEDominatingConds(false), ValuesAtScopes(64), LoopDispositions(64), + BlockDispositions(64), FirstUnknown(nullptr) {} + +ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg) + : F(Arg.F), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT), LI(Arg.LI), + CouldNotCompute(std::move(Arg.CouldNotCompute)), + ValueExprMap(std::move(Arg.ValueExprMap)), + WalkingBEDominatingConds(false), + BackedgeTakenCounts(std::move(Arg.BackedgeTakenCounts)), + ConstantEvolutionLoopExitValue( + std::move(Arg.ConstantEvolutionLoopExitValue)), + ValuesAtScopes(std::move(Arg.ValuesAtScopes)), + LoopDispositions(std::move(Arg.LoopDispositions)), + BlockDispositions(std::move(Arg.BlockDispositions)), + UnsignedRanges(std::move(Arg.UnsignedRanges)), + SignedRanges(std::move(Arg.SignedRanges)), + UniqueSCEVs(std::move(Arg.UniqueSCEVs)), + SCEVAllocator(std::move(Arg.SCEVAllocator)), + FirstUnknown(Arg.FirstUnknown) { + Arg.FirstUnknown = nullptr; +} + +ScalarEvolution::~ScalarEvolution() { // Iterate through all the SCEVUnknown instances and call their // destructors, so that they release their references to their values. for (SCEVUnknown *U = FirstUnknown; U; U = U->Next) @@ -8346,24 +8347,6 @@ void ScalarEvolution::releaseMemory() { assert(PendingLoopPredicates.empty() && "isImpliedCond garbage"); assert(!WalkingBEDominatingConds && "isLoopBackedgeGuardedByCond garbage!"); - - BackedgeTakenCounts.clear(); - ConstantEvolutionLoopExitValue.clear(); - ValuesAtScopes.clear(); - LoopDispositions.clear(); - BlockDispositions.clear(); - UnsignedRanges.clear(); - SignedRanges.clear(); - UniqueSCEVs.clear(); - SCEVAllocator.Reset(); -} - -void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const { - AU.setPreservesAll(); - AU.addRequiredTransitive(); - AU.addRequiredTransitive(); - AU.addRequiredTransitive(); - AU.addRequiredTransitive(); } bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) { @@ -8405,7 +8388,7 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, OS << "\n"; } -void ScalarEvolution::print(raw_ostream &OS, const Module *) const { +void ScalarEvolution::print(raw_ostream &OS) const { // ScalarEvolution's implementation of the print method is to print // out SCEV values of all instructions that are interesting. Doing // this potentially causes it to create new SCEV objects though, @@ -8415,7 +8398,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const { ScalarEvolution &SE = *const_cast(this); OS << "Classifying expressions for: "; - F->printAsOperand(OS, /*PrintType=*/false); + F.printAsOperand(OS, /*PrintType=*/false); OS << "\n"; for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) if (isSCEVable(I->getType()) && !isa(*I)) { @@ -8430,7 +8413,7 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const { SE.getSignedRange(SV).print(OS); } - const Loop *L = LI->getLoopFor((*I).getParent()); + const Loop *L = LI.getLoopFor((*I).getParent()); const SCEV *AtUse = SE.getSCEVAtScope(SV, L); if (AtUse != SV) { @@ -8458,9 +8441,9 @@ void ScalarEvolution::print(raw_ostream &OS, const Module *) const { } OS << "Determining loop execution counts for: "; - F->printAsOperand(OS, /*PrintType=*/false); + F.printAsOperand(OS, /*PrintType=*/false); OS << "\n"; - for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) + for (LoopInfo::iterator I = LI.begin(), E = LI.end(); I != E; ++I) PrintLoopInfo(OS, &SE, *I); } @@ -8604,7 +8587,7 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { // produces the addrec's value is a PHI, and a PHI effectively properly // dominates its entire containing block. const SCEVAddRecExpr *AR = cast(S); - if (!DT->dominates(AR->getLoop()->getHeader(), BB)) + if (!DT.dominates(AR->getLoop()->getHeader(), BB)) return DoesNotDominateBlock; } // FALL THROUGH into SCEVNAryExpr handling. @@ -8641,7 +8624,7 @@ ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) { dyn_cast(cast(S)->getValue())) { if (I->getParent() == BB) return DominatesBlock; - if (DT->properlyDominates(I->getParent(), BB)) + if (DT.properlyDominates(I->getParent(), BB)) return ProperlyDominatesBlock; return DoesNotDominateBlock; } @@ -8735,24 +8718,21 @@ getLoopBackedgeTakenCounts(Loop *L, VerifyMap &Map, ScalarEvolution &SE) { } } -void ScalarEvolution::verifyAnalysis() const { - if (!VerifySCEV) - return; - +void ScalarEvolution::verify() const { ScalarEvolution &SE = *const_cast(this); // Gather stringified backedge taken counts for all loops using SCEV's caches. // FIXME: It would be much better to store actual values instead of strings, // but SCEV pointers will change if we drop the caches. VerifyMap BackedgeDumpsOld, BackedgeDumpsNew; - for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I) + for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I) getLoopBackedgeTakenCounts(*I, BackedgeDumpsOld, SE); - // Gather stringified backedge taken counts for all loops without using - // SCEV's caches. - SE.releaseMemory(); - for (LoopInfo::reverse_iterator I = LI->rbegin(), E = LI->rend(); I != E; ++I) - getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE); + // Gather stringified backedge taken counts for all loops using a fresh + // ScalarEvolution object. + ScalarEvolution SE2(F, TLI, AC, DT, LI); + for (LoopInfo::reverse_iterator I = LI.rbegin(), E = LI.rend(); I != E; ++I) + getLoopBackedgeTakenCounts(*I, BackedgeDumpsNew, SE2); // Now compare whether they're the same with and without caches. This allows // verifying that no pass changed the cache. @@ -8785,3 +8765,63 @@ void ScalarEvolution::verifyAnalysis() const { // TODO: Verify more things. } + +char ScalarEvolutionAnalysis::PassID; + +ScalarEvolution ScalarEvolutionAnalysis::run(Function &F, + AnalysisManager *AM) { + return ScalarEvolution(F, AM->getResult(F), + AM->getResult(F), + AM->getResult(F), + AM->getResult(F)); +} + +PreservedAnalyses +ScalarEvolutionPrinterPass::run(Function &F, AnalysisManager *AM) { + AM->getResult(F).print(OS); + return PreservedAnalyses::all(); +} + +INITIALIZE_PASS_BEGIN(ScalarEvolutionWrapperPass, "scalar-evolution", + "Scalar Evolution Analysis", false, true) +INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) +INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) +INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) +INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) +INITIALIZE_PASS_END(ScalarEvolutionWrapperPass, "scalar-evolution", + "Scalar Evolution Analysis", false, true) +char ScalarEvolutionWrapperPass::ID = 0; + +ScalarEvolutionWrapperPass::ScalarEvolutionWrapperPass() : FunctionPass(ID) { + initializeScalarEvolutionWrapperPassPass(*PassRegistry::getPassRegistry()); +} + +bool ScalarEvolutionWrapperPass::runOnFunction(Function &F) { + SE.reset(new ScalarEvolution( + F, getAnalysis().getTLI(), + getAnalysis().getAssumptionCache(F), + getAnalysis().getDomTree(), + getAnalysis().getLoopInfo())); + return false; +} + +void ScalarEvolutionWrapperPass::releaseMemory() { SE.reset(); } + +void ScalarEvolutionWrapperPass::print(raw_ostream &OS, const Module *) const { + SE->print(OS); +} + +void ScalarEvolutionWrapperPass::verifyAnalysis() const { + if (!VerifySCEV) + return; + + SE->verify(); +} + +void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesAll(); + AU.addRequiredTransitive(); + AU.addRequiredTransitive(); + AU.addRequiredTransitive(); + AU.addRequiredTransitive(); +} diff --git a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp index 58d3322683a..6fcf27ea4ab 100644 --- a/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp +++ b/lib/Analysis/ScalarEvolutionAliasAnalysis.cpp @@ -27,7 +27,7 @@ char ScalarEvolutionAliasAnalysis::ID = 0; INITIALIZE_AG_PASS_BEGIN(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa", "ScalarEvolution-based Alias Analysis", false, true, false) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_AG_PASS_END(ScalarEvolutionAliasAnalysis, AliasAnalysis, "scev-aa", "ScalarEvolution-based Alias Analysis", false, true, false) @@ -37,14 +37,14 @@ FunctionPass *llvm::createScalarEvolutionAliasAnalysisPass() { } void ScalarEvolutionAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const { - AU.addRequiredTransitive(); + AU.addRequiredTransitive(); AU.setPreservesAll(); AliasAnalysis::getAnalysisUsage(AU); } bool ScalarEvolutionAliasAnalysis::runOnFunction(Function &F) { InitializeAliasAnalysis(this, &F.getParent()->getDataLayout()); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); return false; } diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index 7789423cf6c..d0b36431130 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -80,7 +80,7 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, // We assert at the end of the function since IP might point to an // instruction with different dominance properties than a cast // (an invoke for example) and not dominate BIP (but the cast does). - assert(SE.DT->dominates(Ret, BIP)); + assert(SE.DT.dominates(Ret, BIP)); rememberInstruction(Ret); return Ret; @@ -186,7 +186,7 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, BuilderType::InsertPointGuard Guard(Builder); // Move the insertion point out of as many loops as we can. - while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { + while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; BasicBlock *Preheader = L->getLoopPreheader(); if (!Preheader) break; @@ -485,7 +485,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); assert(!isa(V) || - SE.DT->dominates(cast(V), Builder.GetInsertPoint())); + SE.DT.dominates(cast(V), Builder.GetInsertPoint())); // Expand the operands for a plain byte offset. Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); @@ -519,7 +519,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, BuilderType::InsertPointGuard Guard(Builder); // Move the insertion point out of as many loops as we can. - while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { + while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; BasicBlock *Preheader = L->getLoopPreheader(); if (!Preheader) break; @@ -539,7 +539,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, BuilderType::InsertPoint SaveInsertPt = Builder.saveIP(); // Move the insertion point out of as many loops as we can. - while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { + while (const Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock())) { if (!L->isLoopInvariant(V)) break; bool AnyIndexNotLoopInvariant = false; @@ -605,7 +605,7 @@ const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { return nullptr; if (const SCEVUnknown *U = dyn_cast(S)) { if (const Instruction *I = dyn_cast(U->getValue())) - return Pair.first->second = SE.LI->getLoopFor(I->getParent()); + return Pair.first->second = SE.LI.getLoopFor(I->getParent()); // A non-instruction has no relevant loops. return nullptr; } @@ -615,7 +615,7 @@ const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { L = AR->getLoop(); for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) - L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); + L = PickMostRelevantLoop(L, getRelevantLoop(*I), SE.DT); return RelevantLoops[N] = L; } if (const SCEVCastExpr *C = dyn_cast(S)) { @@ -623,10 +623,8 @@ const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { return RelevantLoops[C] = Result; } if (const SCEVUDivExpr *D = dyn_cast(S)) { - const Loop *Result = - PickMostRelevantLoop(getRelevantLoop(D->getLHS()), - getRelevantLoop(D->getRHS()), - *SE.DT); + const Loop *Result = PickMostRelevantLoop( + getRelevantLoop(D->getLHS()), getRelevantLoop(D->getRHS()), SE.DT); return RelevantLoops[D] = Result; } llvm_unreachable("Unexpected SCEV type!"); @@ -681,7 +679,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { // Sort by loop. Use a stable sort so that constants follow non-constants and // pointer operands precede non-pointer operands. - std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); + std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT)); // Emit instructions to add all the operands. Hoist as much as possible // out of loops, and form meaningful getelementptrs where possible. @@ -749,7 +747,7 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); // Sort by loop. Use a stable sort so that constants follow non-constants. - std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); + std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(SE.DT)); // Emit instructions to mul all the operands. Hoist as much as possible // out of loops. @@ -836,7 +834,7 @@ bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, for (User::op_iterator OI = IncV->op_begin()+1, OE = IncV->op_end(); OI != OE; ++OI) if (Instruction *OInst = dyn_cast(OI)) - if (!SE.DT->dominates(OInst, IVIncInsertPos)) + if (!SE.DT.dominates(OInst, IVIncInsertPos)) return false; } // Advance to the next instruction. @@ -875,7 +873,7 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV, case Instruction::Add: case Instruction::Sub: { Instruction *OInst = dyn_cast(IncV->getOperand(1)); - if (!OInst || SE.DT->dominates(OInst, InsertPos)) + if (!OInst || SE.DT.dominates(OInst, InsertPos)) return dyn_cast(IncV->getOperand(0)); return nullptr; } @@ -887,7 +885,7 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV, if (isa(*I)) continue; if (Instruction *OInst = dyn_cast(*I)) { - if (!SE.DT->dominates(OInst, InsertPos)) + if (!SE.DT.dominates(OInst, InsertPos)) return nullptr; } if (allowScale) { @@ -914,13 +912,13 @@ Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV, /// it available to other uses in this loop. Recursively hoist any operands, /// until we reach a value that dominates InsertPos. bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { - if (SE.DT->dominates(IncV, InsertPos)) + if (SE.DT.dominates(IncV, InsertPos)) return true; // InsertPos must itself dominate IncV so that IncV's new position satisfies // its existing users. - if (isa(InsertPos) - || !SE.DT->dominates(InsertPos->getParent(), IncV->getParent())) + if (isa(InsertPos) || + !SE.DT.dominates(InsertPos->getParent(), IncV->getParent())) return false; // Check that the chain of IV operands leading back to Phi can be hoisted. @@ -932,7 +930,7 @@ bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) { // IncV is safe to hoist. IVIncs.push_back(IncV); IncV = Oper; - if (SE.DT->dominates(IncV, InsertPos)) + if (SE.DT.dominates(IncV, InsertPos)) break; } for (SmallVectorImpl::reverse_iterator I = IVIncs.rbegin(), @@ -1086,8 +1084,9 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, // Only try partially matching scevs that need truncation and/or // step-inversion if we know this loop is outside the current loop. - bool TryNonMatchingSCEV = IVIncInsertLoop && - SE.DT->properlyDominates(LatchBlock, IVIncInsertLoop->getHeader()); + bool TryNonMatchingSCEV = + IVIncInsertLoop && + SE.DT.properlyDominates(LatchBlock, IVIncInsertLoop->getHeader()); for (BasicBlock::iterator I = L->getHeader()->begin(); PHINode *PN = dyn_cast(I); ++I) { @@ -1144,7 +1143,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, // Potentially, move the increment. We have made sure in // isExpandedAddRecExprPHI or hoistIVInc that this is possible. if (L == IVIncInsertLoop) - hoistBeforePos(SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch); + hoistBeforePos(&SE.DT, IncV, IVIncInsertPos, AddRecPhiMatch); // Ok, the add recurrence looks usable. // Remember this PHI, even in post-inc mode. @@ -1174,8 +1173,8 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, // StartV must be hoisted into L's preheader to dominate the new phi. assert(!isa(StartV) || - SE.DT->properlyDominates(cast(StartV)->getParent(), - L->getHeader())); + SE.DT.properlyDominates(cast(StartV)->getParent(), + L->getHeader())); // Expand code for the step value. Do this before creating the PHI so that PHI // reuse code doesn't see an incomplete PHI. @@ -1251,9 +1250,8 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { if (PostIncLoops.count(L)) { PostIncLoopSet Loops; Loops.insert(L); - Normalized = - cast(TransformForPostIncUse(Normalize, S, nullptr, - nullptr, Loops, SE, *SE.DT)); + Normalized = cast(TransformForPostIncUse( + Normalize, S, nullptr, nullptr, Loops, SE, SE.DT)); } // Strip off any non-loop-dominating component from the addrec start. @@ -1303,9 +1301,8 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { // For an expansion to use the postinc form, the client must call // expandCodeFor with an InsertPoint that is either outside the PostIncLoop // or dominated by IVIncInsertPos. - if (isa(Result) - && !SE.DT->dominates(cast(Result), - Builder.GetInsertPoint())) { + if (isa(Result) && + !SE.DT.dominates(cast(Result), Builder.GetInsertPoint())) { // The induction variable's postinc expansion does not dominate this use. // IVUsers tries to prevent this case, so it is rare. However, it can // happen when an IVUser outside the loop is not dominated by the latch @@ -1608,7 +1605,7 @@ Value *SCEVExpander::expand(const SCEV *S) { // Compute an insertion point for this SCEV object. Hoist the instructions // as far out in the loop nest as possible. Instruction *InsertPt = Builder.GetInsertPoint(); - for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; + for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());; L = L->getParentLoop()) if (SE.isLoopInvariant(S, L)) { if (!L) break; @@ -1719,7 +1716,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, // Fold constant phis. They may be congruent to other constant phis and // would confuse the logic below that expects proper IVs. - if (Value *V = SimplifyInstruction(Phi, DL, SE.TLI, SE.DT, SE.AC)) { + if (Value *V = SimplifyInstruction(Phi, DL, &SE.TLI, &SE.DT, &SE.AC)) { Phi->replaceAllUsesWith(V); DeadInsts.emplace_back(Phi); ++NumElim; @@ -1832,10 +1829,10 @@ Value *SCEVExpander::findExistingExpansion(const SCEV *S, TrueBB, FalseBB))) continue; - if (SE.getSCEV(LHS) == S && SE.DT->dominates(LHS, At)) + if (SE.getSCEV(LHS) == S && SE.DT.dominates(LHS, At)) return LHS; - if (SE.getSCEV(RHS) == S && SE.DT->dominates(RHS, At)) + if (SE.getSCEV(RHS) == S && SE.DT.dominates(RHS, At)) return RHS; } diff --git a/lib/CodeGen/MachineFunctionPass.cpp b/lib/CodeGen/MachineFunctionPass.cpp index aaf06a70da7..cc965947bbd 100644 --- a/lib/CodeGen/MachineFunctionPass.cpp +++ b/lib/CodeGen/MachineFunctionPass.cpp @@ -55,7 +55,7 @@ void MachineFunctionPass::getAnalysisUsage(AnalysisUsage &AU) const { AU.addPreserved(); AU.addPreserved(); AU.addPreserved(); - AU.addPreserved(); + AU.addPreserved(); AU.addPreserved(); FunctionPass::getAnalysisUsage(AU); diff --git a/lib/Passes/PassBuilder.cpp b/lib/Passes/PassBuilder.cpp index ba7132050e9..7c683472cc2 100644 --- a/lib/Passes/PassBuilder.cpp +++ b/lib/Passes/PassBuilder.cpp @@ -20,6 +20,7 @@ #include "llvm/Analysis/CGSCCPassManager.h" #include "llvm/Analysis/LazyCallGraph.h" #include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/TargetTransformInfo.h" #include "llvm/IR/Dominators.h" diff --git a/lib/Passes/PassRegistry.def b/lib/Passes/PassRegistry.def index d768a3ad1d2..76b19df1679 100644 --- a/lib/Passes/PassRegistry.def +++ b/lib/Passes/PassRegistry.def @@ -54,6 +54,7 @@ FUNCTION_ANALYSIS("assumptions", AssumptionAnalysis()) FUNCTION_ANALYSIS("domtree", DominatorTreeAnalysis()) FUNCTION_ANALYSIS("loops", LoopAnalysis()) FUNCTION_ANALYSIS("no-op-function", NoOpFunctionAnalysis()) +FUNCTION_ANALYSIS("scalar-evolution", ScalarEvolutionAnalysis()) FUNCTION_ANALYSIS("targetlibinfo", TargetLibraryAnalysis()) FUNCTION_ANALYSIS("targetir", TM ? TM->getTargetIRAnalysis() : TargetIRAnalysis()) @@ -71,6 +72,7 @@ FUNCTION_PASS("print", PrintFunctionPass(dbgs())) FUNCTION_PASS("print", AssumptionPrinterPass(dbgs())) FUNCTION_PASS("print", DominatorTreePrinterPass(dbgs())) FUNCTION_PASS("print", LoopPrinterPass(dbgs())) +FUNCTION_PASS("print", ScalarEvolutionPrinterPass(dbgs())) FUNCTION_PASS("simplify-cfg", SimplifyCFGPass()) FUNCTION_PASS("verify", VerifierPass()) FUNCTION_PASS("verify", DominatorTreeVerifierPass()) diff --git a/lib/Target/PowerPC/PPCCTRLoops.cpp b/lib/Target/PowerPC/PPCCTRLoops.cpp index baadf081a64..376d5ab1087 100644 --- a/lib/Target/PowerPC/PPCCTRLoops.cpp +++ b/lib/Target/PowerPC/PPCCTRLoops.cpp @@ -98,7 +98,7 @@ namespace { AU.addPreserved(); AU.addRequired(); AU.addPreserved(); - AU.addRequired(); + AU.addRequired(); } private: @@ -147,7 +147,7 @@ INITIALIZE_PASS_BEGIN(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_END(PPCCTRLoops, "ppc-ctr-loops", "PowerPC CTR Loops", false, false) @@ -169,7 +169,7 @@ FunctionPass *llvm::createPPCCTRLoopsVerify() { bool PPCCTRLoops::runOnFunction(Function &F) { LI = &getAnalysis().getLoopInfo(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); DT = &getAnalysis().getDomTree(); DL = &F.getParent()->getDataLayout(); auto *TLIP = getAnalysisIfAvailable(); diff --git a/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp b/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp index b4e1c099f19..e2cc7d569ba 100644 --- a/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp +++ b/lib/Target/PowerPC/PPCLoopDataPrefetch.cpp @@ -71,10 +71,10 @@ namespace { AU.addPreserved(); AU.addRequired(); AU.addPreserved(); - AU.addRequired(); + AU.addRequired(); // FIXME: For some reason, preserving SE here breaks LSR (even if // this pass changes nothing). - // AU.addPreserved(); + // AU.addPreserved(); AU.addRequired(); } @@ -96,7 +96,7 @@ INITIALIZE_PASS_BEGIN(PPCLoopDataPrefetch, "ppc-loop-data-prefetch", INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_END(PPCLoopDataPrefetch, "ppc-loop-data-prefetch", "PPC Loop Data Prefetch", false, false) @@ -104,7 +104,7 @@ FunctionPass *llvm::createPPCLoopDataPrefetchPass() { return new PPCLoopDataPref bool PPCLoopDataPrefetch::runOnFunction(Function &F) { LI = &getAnalysis().getLoopInfo(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); DL = &F.getParent()->getDataLayout(); AC = &getAnalysis().getAssumptionCache(F); TTI = &getAnalysis().getTTI(F); diff --git a/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp b/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp index b6e7799402e..60c0afadf34 100644 --- a/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp +++ b/lib/Target/PowerPC/PPCLoopPreIncPrep.cpp @@ -73,7 +73,7 @@ namespace { AU.addPreserved(); AU.addRequired(); AU.addPreserved(); - AU.addRequired(); + AU.addRequired(); } bool runOnFunction(Function &F) override; @@ -93,7 +93,7 @@ char PPCLoopPreIncPrep::ID = 0; static const char *name = "Prepare loop for pre-inc. addressing modes"; INITIALIZE_PASS_BEGIN(PPCLoopPreIncPrep, DEBUG_TYPE, name, false, false) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_END(PPCLoopPreIncPrep, DEBUG_TYPE, name, false, false) FunctionPass *llvm::createPPCLoopPreIncPrepPass(PPCTargetMachine &TM) { @@ -140,7 +140,7 @@ static Value *GetPointerOperand(Value *MemI) { bool PPCLoopPreIncPrep::runOnFunction(Function &F) { LI = &getAnalysis().getLoopInfo(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); bool MadeChange = false; diff --git a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp index 8918909f484..3c2c0cbc3d2 100644 --- a/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -54,13 +54,13 @@ struct AlignmentFromAssumptions : public FunctionPass { void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.setPreservesCFG(); AU.addPreserved(); AU.addPreserved(); - AU.addPreserved(); + AU.addPreserved(); } // For memory transfers, we need a common alignment for both the source and @@ -84,7 +84,7 @@ INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions, AA_NAME, aip_name, false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_END(AlignmentFromAssumptions, AA_NAME, aip_name, false, false) @@ -410,7 +410,7 @@ bool AlignmentFromAssumptions::processAssumption(CallInst *ACall) { bool AlignmentFromAssumptions::runOnFunction(Function &F) { bool Changed = false; auto &AC = getAnalysis().getAssumptionCache(F); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); DT = &getAnalysis().getDomTree(); NewDestAlignments.clear(); diff --git a/lib/Transforms/Scalar/IndVarSimplify.cpp b/lib/Transforms/Scalar/IndVarSimplify.cpp index 9c2e11be3f9..66f8aef98d4 100644 --- a/lib/Transforms/Scalar/IndVarSimplify.cpp +++ b/lib/Transforms/Scalar/IndVarSimplify.cpp @@ -108,10 +108,10 @@ namespace { void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addRequiredID(LoopSimplifyID); AU.addRequiredID(LCSSAID); - AU.addPreserved(); + AU.addPreserved(); AU.addPreservedID(LoopSimplifyID); AU.addPreservedID(LCSSAID); AU.setPreservesCFG(); @@ -147,7 +147,7 @@ INITIALIZE_PASS_BEGIN(IndVarSimplify, "indvars", "Induction Variable Simplification", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_END(IndVarSimplify, "indvars", @@ -1959,7 +1959,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { return false; LI = &getAnalysis().getLoopInfo(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); DT = &getAnalysis().getDomTree(); auto *TLIP = getAnalysisIfAvailable(); TLI = TLIP ? &TLIP->getTLI() : nullptr; diff --git a/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp index 08fdcc38c04..736b656499d 100644 --- a/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp +++ b/lib/Transforms/Scalar/InductiveRangeCheckElimination.cpp @@ -214,7 +214,7 @@ public: AU.addRequired(); AU.addRequiredID(LoopSimplifyID); AU.addRequiredID(LCSSAID); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); } @@ -1399,7 +1399,7 @@ bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) { LLVMContext &Context = Preheader->getContext(); InductiveRangeCheck::AllocatorTy IRCAlloc; SmallVector RangeChecks; - ScalarEvolution &SE = getAnalysis(); + ScalarEvolution &SE = getAnalysis().getSE(); BranchProbabilityInfo &BPI = getAnalysis().getBPI(); diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp index 97490f78cf7..f2e9e4e71ad 100644 --- a/lib/Transforms/Scalar/LICM.cpp +++ b/lib/Transforms/Scalar/LICM.cpp @@ -120,7 +120,7 @@ namespace { AU.addPreservedID(LCSSAID); AU.addRequired(); AU.addPreserved(); - AU.addPreserved(); + AU.addPreserved(); AU.addRequired(); } @@ -164,7 +164,7 @@ INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_END(LICM, "licm", "Loop Invariant Code Motion", false, false) @@ -264,9 +264,10 @@ bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) { // FIXME: This is really heavy handed. It would be a bit better to use an // SSAUpdater strategy during promotion that was LCSSA aware and reformed // it as it went. - if (Changed) - formLCSSARecursively(*L, *DT, LI, - getAnalysisIfAvailable()); + if (Changed) { + auto *SEWP = getAnalysisIfAvailable(); + formLCSSARecursively(*L, *DT, LI, SEWP ? &SEWP->getSE() : nullptr); + } } // Check that neither this loop nor its parent have had LCSSA broken. LICM is diff --git a/lib/Transforms/Scalar/LoopDeletion.cpp b/lib/Transforms/Scalar/LoopDeletion.cpp index 98b068edf58..d0e8b7c8fc4 100644 --- a/lib/Transforms/Scalar/LoopDeletion.cpp +++ b/lib/Transforms/Scalar/LoopDeletion.cpp @@ -40,11 +40,11 @@ namespace { void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addRequiredID(LoopSimplifyID); AU.addRequiredID(LCSSAID); - AU.addPreserved(); + AU.addPreserved(); AU.addPreserved(); AU.addPreserved(); AU.addPreservedID(LoopSimplifyID); @@ -64,7 +64,7 @@ INITIALIZE_PASS_BEGIN(LoopDeletion, "loop-deletion", "Delete dead loops", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_END(LoopDeletion, "loop-deletion", @@ -169,7 +169,7 @@ bool LoopDeletion::runOnLoop(Loop *L, LPPassManager &LPM) { // Don't remove loops for which we can't solve the trip count. // They could be infinite, in which case we'd be changing program behavior. - ScalarEvolution &SE = getAnalysis(); + ScalarEvolution &SE = getAnalysis().getSE(); const SCEV *S = SE.getMaxBackedgeTakenCount(L); if (isa(S)) return Changed; diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index ee520310520..44e9afd5e12 100644 --- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -96,8 +96,8 @@ public: AU.addPreservedID(LCSSAID); AU.addRequired(); AU.addPreserved(); - AU.addRequired(); - AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); AU.addPreserved(); AU.addRequired(); AU.addRequired(); @@ -146,7 +146,7 @@ INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) @@ -192,7 +192,7 @@ bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) { AA = &getAnalysis(); DT = &getAnalysis().getDomTree(); LI = &getAnalysis().getLoopInfo(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); TLI = &getAnalysis().getTLI(); TTI = &getAnalysis().getTTI( *CurLoop->getHeader()->getParent()); diff --git a/lib/Transforms/Scalar/LoopInstSimplify.cpp b/lib/Transforms/Scalar/LoopInstSimplify.cpp index e1250265475..39c7738ca9c 100644 --- a/lib/Transforms/Scalar/LoopInstSimplify.cpp +++ b/lib/Transforms/Scalar/LoopInstSimplify.cpp @@ -48,7 +48,7 @@ namespace { AU.addRequiredID(LoopSimplifyID); AU.addPreservedID(LoopSimplifyID); AU.addPreservedID(LCSSAID); - AU.addPreserved(); + AU.addPreserved(); AU.addRequired(); } }; diff --git a/lib/Transforms/Scalar/LoopInterchange.cpp b/lib/Transforms/Scalar/LoopInterchange.cpp index 2f3b5e97185..b99210a7a6b 100644 --- a/lib/Transforms/Scalar/LoopInterchange.cpp +++ b/lib/Transforms/Scalar/LoopInterchange.cpp @@ -437,7 +437,7 @@ struct LoopInterchange : public FunctionPass { } void getAnalysisUsage(AnalysisUsage &AU) const override { - AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); @@ -447,7 +447,7 @@ struct LoopInterchange : public FunctionPass { } bool runOnFunction(Function &F) override { - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); LI = &getAnalysis().getLoopInfo(); DA = &getAnalysis(); auto *DTWP = getAnalysisIfAvailable(); @@ -1289,7 +1289,7 @@ INITIALIZE_PASS_BEGIN(LoopInterchange, "loop-interchange", INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(DependenceAnalysis) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp index 05d8d20b1d2..50f95055756 100644 --- a/lib/Transforms/Scalar/LoopRerollPass.cpp +++ b/lib/Transforms/Scalar/LoopRerollPass.cpp @@ -152,7 +152,7 @@ namespace { AU.addPreserved(); AU.addRequired(); AU.addPreserved(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); } @@ -452,7 +452,7 @@ INITIALIZE_PASS_BEGIN(LoopReroll, "loop-reroll", "Reroll loops", false, false) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_END(LoopReroll, "loop-reroll", "Reroll loops", false, false) @@ -1468,7 +1468,7 @@ bool LoopReroll::runOnLoop(Loop *L, LPPassManager &LPM) { AA = &getAnalysis(); LI = &getAnalysis().getLoopInfo(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); TLI = &getAnalysis().getTLI(); DT = &getAnalysis().getDomTree(); diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp index 770a549709f..1c108fbc87a 100644 --- a/lib/Transforms/Scalar/LoopRotation.cpp +++ b/lib/Transforms/Scalar/LoopRotation.cpp @@ -66,7 +66,7 @@ namespace { AU.addPreservedID(LoopSimplifyID); AU.addRequiredID(LCSSAID); AU.addPreservedID(LCSSAID); - AU.addPreserved(); + AU.addPreserved(); AU.addRequired(); } @@ -384,8 +384,8 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { // Anything ScalarEvolution may know about this loop or the PHI nodes // in its header will soon be invalidated. - if (ScalarEvolution *SE = getAnalysisIfAvailable()) - SE->forgetLoop(L); + if (auto *SEWP = getAnalysisIfAvailable()) + SEWP->getSE().forgetLoop(L); DEBUG(dbgs() << "LoopRotation: rotating "; L->dump()); diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 4904426dfa8..e18d284367f 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -4785,7 +4785,8 @@ LSRInstance::ImplementSolution(const SmallVectorImpl &Solution, } LSRInstance::LSRInstance(Loop *L, Pass *P) - : IU(P->getAnalysis()), SE(P->getAnalysis()), + : IU(P->getAnalysis()), + SE(P->getAnalysis().getSE()), DT(P->getAnalysis().getDomTree()), LI(P->getAnalysis().getLoopInfo()), TTI(P->getAnalysis().getTTI( @@ -4958,7 +4959,7 @@ INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce", "Loop Strength Reduction", false, false) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(IVUsers) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) @@ -4984,8 +4985,8 @@ void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequiredID(LoopSimplifyID); AU.addRequired(); AU.addPreserved(); - AU.addRequired(); - AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); // Requiring LoopSimplify a second time here prevents IVUsers from running // twice, since LoopSimplify was invalidated by running ScalarEvolution. AU.addRequiredID(LoopSimplifyID); @@ -5008,7 +5009,8 @@ bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { if (EnablePhiElim && L->isLoopSimplifyForm()) { SmallVector DeadInsts; const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); - SCEVExpander Rewriter(getAnalysis(), DL, "lsr"); + SCEVExpander Rewriter(getAnalysis().getSE(), DL, + "lsr"); #ifndef NDEBUG Rewriter.setDebugType(DEBUG_TYPE); #endif diff --git a/lib/Transforms/Scalar/LoopUnrollPass.cpp b/lib/Transforms/Scalar/LoopUnrollPass.cpp index 0c047e78eab..f38360e6218 100644 --- a/lib/Transforms/Scalar/LoopUnrollPass.cpp +++ b/lib/Transforms/Scalar/LoopUnrollPass.cpp @@ -144,8 +144,8 @@ namespace { AU.addPreservedID(LoopSimplifyID); AU.addRequiredID(LCSSAID); AU.addPreservedID(LCSSAID); - AU.addRequired(); - AU.addPreserved(); + AU.addRequired(); + AU.addPreserved(); AU.addRequired(); // FIXME: Loop unroll requires LCSSA. And LCSSA requires dom info. // If loop unroll does not preserve dom info then LCSSA pass on next @@ -241,7 +241,7 @@ INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_DEPENDENCY(LCSSA) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_END(LoopUnroll, "loop-unroll", "Unroll loops", false, false) Pass *llvm::createLoopUnrollPass(int Threshold, int Count, int AllowPartial, @@ -894,7 +894,7 @@ bool LoopUnroll::runOnLoop(Loop *L, LPPassManager &LPM) { auto &DT = getAnalysis().getDomTree(); LoopInfo *LI = &getAnalysis().getLoopInfo(); - ScalarEvolution *SE = &getAnalysis(); + ScalarEvolution *SE = &getAnalysis().getSE(); const TargetTransformInfo &TTI = getAnalysis().getTTI(F); auto &AC = getAnalysis().getAssumptionCache(F); diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp index 62047b96fac..7475150b0c2 100644 --- a/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -193,7 +193,7 @@ namespace { AU.addRequiredID(LCSSAID); AU.addPreservedID(LCSSAID); AU.addPreserved(); - AU.addPreserved(); + AU.addPreserved(); AU.addRequired(); } @@ -915,8 +915,8 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val, << " blocks] in Function " << F->getName() << " when '" << *Val << "' == " << *LIC << "\n"); - if (ScalarEvolution *SE = getAnalysisIfAvailable()) - SE->forgetLoop(L); + if (auto *SEWP = getAnalysisIfAvailable()) + SEWP->getSE().forgetLoop(L); LoopBlocks.clear(); NewBlocks.clear(); diff --git a/lib/Transforms/Scalar/NaryReassociate.cpp b/lib/Transforms/Scalar/NaryReassociate.cpp index 58b9c9d092d..aae1cce8064 100644 --- a/lib/Transforms/Scalar/NaryReassociate.cpp +++ b/lib/Transforms/Scalar/NaryReassociate.cpp @@ -110,11 +110,11 @@ public: void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addPreserved(); - AU.addPreserved(); + AU.addPreserved(); AU.addPreserved(); AU.addRequired(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.setPreservesCFG(); @@ -191,7 +191,7 @@ INITIALIZE_PASS_BEGIN(NaryReassociate, "nary-reassociate", "Nary reassociation", false, false) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END(NaryReassociate, "nary-reassociate", "Nary reassociation", @@ -207,7 +207,7 @@ bool NaryReassociate::runOnFunction(Function &F) { AC = &getAnalysis().getAssumptionCache(F); DT = &getAnalysis().getDomTree(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); TLI = &getAnalysis().getTLI(); TTI = &getAnalysis().getTTI(F); diff --git a/lib/Transforms/Scalar/PlaceSafepoints.cpp b/lib/Transforms/Scalar/PlaceSafepoints.cpp index 79b4e1a83ee..eb1fe48f42d 100644 --- a/lib/Transforms/Scalar/PlaceSafepoints.cpp +++ b/lib/Transforms/Scalar/PlaceSafepoints.cpp @@ -142,7 +142,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass { } bool runOnFunction(Function &F) override { - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); DT = &getAnalysis().getDomTree(); LI = &getAnalysis().getLoopInfo(); for (auto I = LI->begin(), E = LI->end(); I != E; I++) { @@ -153,7 +153,7 @@ struct PlaceBackedgeSafepointsImpl : public FunctionPass { void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); // We no longer modify the IR at all in this pass. Thus all // analysis are preserved. @@ -747,7 +747,7 @@ FunctionPass *llvm::createPlaceSafepointsPass() { INITIALIZE_PASS_BEGIN(PlaceBackedgeSafepointsImpl, "place-backedge-safepoints-impl", "Place Backedge Safepoints", false, false) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_END(PlaceBackedgeSafepointsImpl, diff --git a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp index 97950caa3de..bccd8164ed4 100644 --- a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp +++ b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -322,7 +322,7 @@ public: void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.setPreservesCFG(); } @@ -412,7 +412,7 @@ INITIALIZE_PASS_BEGIN( "Split GEPs to a variadic base and a constant offset for better CSE", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END( SeparateConstOffsetFromGEP, "separate-const-offset-from-gep", @@ -1035,7 +1035,7 @@ bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) { return false; DT = &getAnalysis().getDomTree(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); bool Changed = false; for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) { diff --git a/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp b/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp index b2b886c4f11..913454bdfe8 100644 --- a/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp +++ b/lib/Transforms/Scalar/StraightLineStrengthReduce.cpp @@ -131,7 +131,7 @@ public: void getAnalysisUsage(AnalysisUsage &AU) const override { AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); // We do not modify the shape of the CFG. AU.setPreservesCFG(); @@ -212,7 +212,7 @@ char StraightLineStrengthReduce::ID = 0; INITIALIZE_PASS_BEGIN(StraightLineStrengthReduce, "slsr", "Straight line strength reduction", false, false) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_END(StraightLineStrengthReduce, "slsr", "Straight line strength reduction", false, false) @@ -690,7 +690,7 @@ bool StraightLineStrengthReduce::runOnFunction(Function &F) { TTI = &getAnalysis().getTTI(F); DT = &getAnalysis().getDomTree(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); // Traverse the dominator tree in the depth-first order. This order makes sure // all bases of a candidate are in Candidates when we process it. for (auto node = GraphTraits::nodes_begin(DT); diff --git a/lib/Transforms/Utils/LCSSA.cpp b/lib/Transforms/Utils/LCSSA.cpp index d45944d47cf..4e8c1ad2f82 100644 --- a/lib/Transforms/Utils/LCSSA.cpp +++ b/lib/Transforms/Utils/LCSSA.cpp @@ -298,7 +298,7 @@ struct LCSSA : public FunctionPass { AU.addRequired(); AU.addPreservedID(LoopSimplifyID); AU.addPreserved(); - AU.addPreserved(); + AU.addPreserved(); } }; } @@ -318,7 +318,8 @@ bool LCSSA::runOnFunction(Function &F) { bool Changed = false; LI = &getAnalysis().getLoopInfo(); DT = &getAnalysis().getDomTree(); - SE = getAnalysisIfAvailable(); + auto *SEWP = getAnalysisIfAvailable(); + SE = SEWP ? &SEWP->getSE() : nullptr; // Simplify each loop nest in the function. for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp index ca188f470f5..49950165cc1 100644 --- a/lib/Transforms/Utils/LoopSimplify.cpp +++ b/lib/Transforms/Utils/LoopSimplify.cpp @@ -760,7 +760,7 @@ namespace { AU.addPreserved(); AU.addPreserved(); - AU.addPreserved(); + AU.addPreserved(); AU.addPreserved(); AU.addPreservedID(BreakCriticalEdgesID); // No critical edges added. } @@ -790,7 +790,8 @@ bool LoopSimplify::runOnFunction(Function &F) { bool Changed = false; LI = &getAnalysis().getLoopInfo(); DT = &getAnalysis().getDomTree(); - SE = getAnalysisIfAvailable(); + auto *SEWP = getAnalysisIfAvailable(); + SE = SEWP ? &SEWP->getSE() : nullptr; AC = &getAnalysis().getAssumptionCache(F); // Simplify each loop nest in the function. diff --git a/lib/Transforms/Utils/LoopUnroll.cpp b/lib/Transforms/Utils/LoopUnroll.cpp index 6e7a571da51..971f01b72a1 100644 --- a/lib/Transforms/Utils/LoopUnroll.cpp +++ b/lib/Transforms/Utils/LoopUnroll.cpp @@ -110,10 +110,11 @@ FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI, LPPassManager *LPM, // ScalarEvolution holds references to loop exit blocks. if (LPM) { - if (ScalarEvolution *SE = LPM->getAnalysisIfAvailable()) { + if (auto *SEWP = + LPM->getAnalysisIfAvailable()) { if (Loop *L = LI->getLoopFor(BB)) { if (ForgottenLoops.insert(L).second) - SE->forgetLoop(L); + SEWP->getSE().forgetLoop(L); } } } @@ -232,8 +233,9 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, // Notify ScalarEvolution that the loop will be substantially changed, // if not outright eliminated. - ScalarEvolution *SE = - PP ? PP->getAnalysisIfAvailable() : nullptr; + auto *SEWP = + PP ? PP->getAnalysisIfAvailable() : nullptr; + ScalarEvolution *SE = SEWP ? &SEWP->getSE() : nullptr; if (SE) SE->forgetLoop(L); diff --git a/lib/Transforms/Utils/LoopUnrollRuntime.cpp b/lib/Transforms/Utils/LoopUnrollRuntime.cpp index 5658e9201c3..d803ddf040b 100644 --- a/lib/Transforms/Utils/LoopUnrollRuntime.cpp +++ b/lib/Transforms/Utils/LoopUnrollRuntime.cpp @@ -293,13 +293,14 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, // loops to be unrolled than relying on induction var simplification if (!LPM) return false; - ScalarEvolution *SE = LPM->getAnalysisIfAvailable(); - if (!SE) + auto *SEWP = LPM->getAnalysisIfAvailable(); + if (!SEWP) return false; + ScalarEvolution &SE = SEWP->getSE(); // Only unroll loops with a computable trip count and the trip count needs // to be an int value (allowing a pointer type is a TODO item) - const SCEV *BECountSC = SE->getBackedgeTakenCount(L); + const SCEV *BECountSC = SE.getBackedgeTakenCount(L); if (isa(BECountSC) || !BECountSC->getType()->isIntegerTy()) return false; @@ -308,13 +309,13 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, // Add 1 since the backedge count doesn't include the first loop iteration const SCEV *TripCountSC = - SE->getAddExpr(BECountSC, SE->getConstant(BECountSC->getType(), 1)); + SE.getAddExpr(BECountSC, SE.getConstant(BECountSC->getType(), 1)); if (isa(TripCountSC)) return false; BasicBlock *Header = L->getHeader(); const DataLayout &DL = Header->getModule()->getDataLayout(); - SCEVExpander Expander(*SE, DL, "loop-unroll"); + SCEVExpander Expander(SE, DL, "loop-unroll"); if (!AllowExpensiveTripCount && Expander.isHighCostExpansion(TripCountSC, L)) return false; @@ -331,7 +332,7 @@ bool llvm::UnrollRuntimeLoopProlog(Loop *L, unsigned Count, // If this loop is nested, then the loop unroller changes the code in // parent loop, so the Scalar Evolution pass needs to be run again if (Loop *ParentLoop = L->getParentLoop()) - SE->forgetLoop(ParentLoop); + SE.forgetLoop(ParentLoop); // Grab analyses that we preserve. auto *DTWP = LPM->getAnalysisIfAvailable(); diff --git a/lib/Transforms/Vectorize/BBVectorize.cpp b/lib/Transforms/Vectorize/BBVectorize.cpp index 8b238caf8c8..7ddf2b9505a 100644 --- a/lib/Transforms/Vectorize/BBVectorize.cpp +++ b/lib/Transforms/Vectorize/BBVectorize.cpp @@ -207,7 +207,7 @@ namespace { : BasicBlockPass(ID), Config(C) { AA = &P->getAnalysis(); DT = &P->getAnalysis().getDomTree(); - SE = &P->getAnalysis(); + SE = &P->getAnalysis().getSE(); TLI = &P->getAnalysis().getTLI(); TTI = IgnoreTargetInfo ? nullptr @@ -442,7 +442,7 @@ namespace { AA = &getAnalysis(); DT = &getAnalysis().getDomTree(); - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); TLI = &getAnalysis().getTLI(); TTI = IgnoreTargetInfo ? nullptr @@ -456,12 +456,12 @@ namespace { BasicBlockPass::getAnalysisUsage(AU); AU.addRequired(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addPreserved(); AU.addPreserved(); - AU.addPreserved(); + AU.addPreserved(); AU.setPreservesCFG(); } @@ -3200,7 +3200,7 @@ INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_END(BBVectorize, BBV_NAME, bb_vectorize_name, false, false) BasicBlockPass *llvm::createBBVectorizePass(const VectorizeConfig &C) { diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index d40fe33e6c0..e131cc02e37 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -1569,7 +1569,7 @@ struct LoopVectorize : public FunctionPass { BlockFrequency ColdEntryFreq; bool runOnFunction(Function &F) override { - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); LI = &getAnalysis().getLoopInfo(); TTI = &getAnalysis().getTTI(F); DT = &getAnalysis().getDomTree(); @@ -1863,7 +1863,7 @@ struct LoopVectorize : public FunctionPass { AU.addRequired(); AU.addRequired(); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); @@ -5399,7 +5399,7 @@ INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(LCSSA) INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index 61773cd2013..df15a70753c 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -3076,7 +3076,7 @@ struct SLPVectorizer : public FunctionPass { if (skipOptnoneFunction(F)) return false; - SE = &getAnalysis(); + SE = &getAnalysis().getSE(); TTI = &getAnalysis().getTTI(F); auto *TLIP = getAnalysisIfAvailable(); TLI = TLIP ? &TLIP->getTLI() : nullptr; @@ -3141,7 +3141,7 @@ struct SLPVectorizer : public FunctionPass { void getAnalysisUsage(AnalysisUsage &AU) const override { FunctionPass::getAnalysisUsage(AU); AU.addRequired(); - AU.addRequired(); + AU.addRequired(); AU.addRequired(); AU.addRequired(); AU.addRequired(); @@ -4053,7 +4053,7 @@ INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) -INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) +INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) INITIALIZE_PASS_DEPENDENCY(LoopSimplify) INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false) diff --git a/test/Analysis/ScalarEvolution/trip-count.ll b/test/Analysis/ScalarEvolution/trip-count.ll index 1b75c88c753..89750810d1b 100644 --- a/test/Analysis/ScalarEvolution/trip-count.ll +++ b/test/Analysis/ScalarEvolution/trip-count.ll @@ -1,4 +1,5 @@ ; RUN: opt < %s -analyze -scalar-evolution -scalar-evolution-max-iterations=0 | FileCheck %s +; RUN: opt < %s -passes='print' -disable-output 2>&1 | FileCheck %s ; PR1101 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" @@ -6,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu" @A = weak global [1000 x i32] zeroinitializer, align 32 -; CHECK: Printing analysis 'Scalar Evolution Analysis' for function 'test1': +; CHECK-LABEL: Determining loop execution counts for: @test1 ; CHECK: backedge-taken count is 10000 define void @test1(i32 %N) { @@ -32,7 +33,7 @@ return: ; preds = %bb5 } ; PR22795 -; CHECK: Printing analysis 'Scalar Evolution Analysis' for function 'test2': +; CHECK-LABEL: Classifying expressions for: @test2 ; CHECK: %iv = phi i32 [ -1, %entry ], [ %next.1, %for.inc.1 ] ; CHECK-NEXT: --> {-1,+,2}<%preheader> U: full-set S: full-set Exits: 13 diff --git a/unittests/Analysis/ScalarEvolutionTest.cpp b/unittests/Analysis/ScalarEvolutionTest.cpp index 6ce7ff43a31..938dafe6038 100644 --- a/unittests/Analysis/ScalarEvolutionTest.cpp +++ b/unittests/Analysis/ScalarEvolutionTest.cpp @@ -8,9 +8,13 @@ //===----------------------------------------------------------------------===// #include "llvm/Analysis/ScalarEvolutionExpressions.h" +#include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/LoopInfo.h" +#include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/IR/Constants.h" +#include "llvm/IR/Dominators.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Module.h" @@ -24,16 +28,23 @@ namespace { // deleting the PassManager. class ScalarEvolutionsTest : public testing::Test { protected: - ScalarEvolutionsTest() : M("", Context), SE(*new ScalarEvolution) {} - ~ScalarEvolutionsTest() override { - // Manually clean up, since we allocated new SCEV objects after the - // pass was finished. - SE.releaseMemory(); - } LLVMContext Context; Module M; - legacy::PassManager PM; - ScalarEvolution &SE; + TargetLibraryInfoImpl TLII; + TargetLibraryInfo TLI; + + std::unique_ptr AC; + std::unique_ptr DT; + std::unique_ptr LI; + + ScalarEvolutionsTest() : M("", Context), TLII(), TLI(TLII) {} + + ScalarEvolution buildSE(Function &F) { + AC.reset(new AssumptionCache(F)); + DT.reset(new DominatorTree(F)); + LI.reset(new LoopInfo(*DT)); + return ScalarEvolution(F, TLI, *AC, *DT, *LI); + } }; TEST_F(ScalarEvolutionsTest, SCEVUnknownRAUW) { @@ -49,9 +60,7 @@ TEST_F(ScalarEvolutionsTest, SCEVUnknownRAUW) { Value *V1 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V1"); Value *V2 = new GlobalVariable(M, Ty, false, GlobalValue::ExternalLinkage, Init, "V2"); - // Create a ScalarEvolution and "run" it so that it gets initialized. - PM.add(&SE); - PM.run(M); + ScalarEvolution SE = buildSE(*F); const SCEV *S0 = SE.getSCEV(V0); const SCEV *S1 = SE.getSCEV(V1); @@ -96,9 +105,7 @@ TEST_F(ScalarEvolutionsTest, SCEVMultiplyAddRecs) { BasicBlock *BB = BasicBlock::Create(Context, "entry", F); ReturnInst::Create(Context, nullptr, BB); - // Create a ScalarEvolution and "run" it so that it gets initialized. - PM.add(&SE); - PM.run(M); + ScalarEvolution SE = buildSE(*F); // It's possible to produce an empty loop through the default constructor, // but you can't add any blocks to it without a LoopInfo pass. -- 2.34.1