From: Chandler Carruth Date: Wed, 15 Jul 2015 08:53:29 +0000 (+0000) Subject: [PM/AA] Fix *numerous* serious bugs in GlobalsModRef found by X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=commitdiff_plain;h=7d51923226a143084b6555a2db275f9c99025acc [PM/AA] Fix *numerous* serious bugs in GlobalsModRef found by inspection. While we want to handle calls specially in this code because they should have been modeled by the call graph analysis that precedes it, we should *not* be re-implementing the predicates for whether an instruction reads or writes memory. Those are well defined already. Notably, at least the following issues seem to be clearly missed before: - Ordered atomic loads can "write" to memory by causing writes from other threads to become visible. Similarly for ordered atomic stores. - AtomicRMW instructions quite obviously both read and write to memory. - AtomicCmpXchg instructions also read and write to memory. - Fences read and write to memory. - Invokes of intrinsics or memory allocation functions. I don't have any test cases, and I suspect this has never really come up in the real world. But there is no reason why it wouldn't, and it makes the code simpler to do this the right way. While here, I've tried to make the loops significantly simpler as well and added helpful comments as to what is going on. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@242281 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Analysis/IPA/GlobalsModRef.cpp b/lib/Analysis/IPA/GlobalsModRef.cpp index ae4eafbc77d..4d01d7f6a28 100644 --- a/lib/Analysis/IPA/GlobalsModRef.cpp +++ b/lib/Analysis/IPA/GlobalsModRef.cpp @@ -439,30 +439,39 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) { } // Scan the function bodies for explicit loads or stores. - for (unsigned i = 0, e = SCC.size(); i != e && FunctionEffect != ModRef; - ++i) - for (inst_iterator II = inst_begin(SCC[i]->getFunction()), - E = inst_end(SCC[i]->getFunction()); - II != E && FunctionEffect != ModRef; ++II) - if (LoadInst *LI = dyn_cast(&*II)) { + for (auto *Node : SCC) { + if (FunctionEffect == ModRef) + break; // The mod/ref lattice saturates here. + for (Instruction &I : inst_range(Node->getFunction())) { + if (FunctionEffect == ModRef) + break; // The mod/ref lattice saturates here. + + // We handle calls specially because the graph-relevant aspects are + // handled above. + if (auto CS = CallSite(&I)) { + if (isAllocationFn(&I, TLI) || isFreeCall(&I, TLI)) { + // FIXME: It is completely unclear why this is necessary and not + // handled by the above graph code. + FunctionEffect |= ModRef; + } else if (Function *Callee = CS.getCalledFunction()) { + // The callgraph doesn't include intrinsic calls. + if (Callee->isIntrinsic()) { + ModRefBehavior Behaviour = + AliasAnalysis::getModRefBehavior(Callee); + FunctionEffect |= (Behaviour & ModRef); + } + } + continue; + } + + // All non-call instructions we use the primary predicates for whether + // thay read or write memory. + if (I.mayReadFromMemory()) FunctionEffect |= Ref; - if (LI->isVolatile()) - // Volatile loads may have side-effects, so mark them as writing - // memory (for example, a flag inside the processor). - FunctionEffect |= Mod; - } else if (StoreInst *SI = dyn_cast(&*II)) { + if (I.mayWriteToMemory()) FunctionEffect |= Mod; - if (SI->isVolatile()) - // Treat volatile stores as reading memory somewhere. - FunctionEffect |= Ref; - } else if (isAllocationFn(&*II, TLI) || isFreeCall(&*II, TLI)) { - FunctionEffect |= ModRef; - } else if (IntrinsicInst *Intrinsic = dyn_cast(&*II)) { - // The callgraph doesn't include intrinsic calls. - Function *Callee = Intrinsic->getCalledFunction(); - ModRefBehavior Behaviour = AliasAnalysis::getModRefBehavior(Callee); - FunctionEffect |= (Behaviour & ModRef); - } + } + } if ((FunctionEffect & Mod) == 0) ++NumReadMemFunctions;