#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Function.h"
+#include "llvm/LLVMContext.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/PHITransAddr.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/PredIteratorCache.h"
char MemoryDependenceAnalysis::ID = 0;
// Register this pass...
-static RegisterPass<MemoryDependenceAnalysis> X("memdep",
- "Memory Dependence Analysis", false, true);
+INITIALIZE_PASS_BEGIN(MemoryDependenceAnalysis, "memdep",
+ "Memory Dependence Analysis", false, true)
+INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
+INITIALIZE_PASS_END(MemoryDependenceAnalysis, "memdep",
+ "Memory Dependence Analysis", false, true)
MemoryDependenceAnalysis::MemoryDependenceAnalysis()
-: FunctionPass(&ID), PredCache(0) {
+: FunctionPass(ID), PredCache(0) {
+ initializeMemoryDependenceAnalysisPass(*PassRegistry::getPassRegistry());
}
MemoryDependenceAnalysis::~MemoryDependenceAnalysis() {
}
Instruction *Inst = --ScanIt;
// If this inst is a memory op, get the pointer it accessed
- Value *Pointer = 0;
- uint64_t PointerSize = 0;
+ AliasAnalysis::Location Loc;
if (StoreInst *S = dyn_cast<StoreInst>(Inst)) {
- Pointer = S->getPointerOperand();
- PointerSize = AA->getTypeStoreSize(S->getOperand(0)->getType());
+ Loc = AliasAnalysis::Location(S->getPointerOperand(),
+ AA->getTypeStoreSize(S->getValueOperand()
+ ->getType()),
+ S->getMetadata(LLVMContext::MD_tbaa));
} else if (VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
- Pointer = V->getOperand(0);
- PointerSize = AA->getTypeStoreSize(V->getType());
- } else if (isFreeCall(Inst)) {
- Pointer = Inst->getOperand(1);
+ Loc = AliasAnalysis::Location(V->getPointerOperand(),
+ AA->getTypeStoreSize(V->getType()),
+ V->getMetadata(LLVMContext::MD_tbaa));
+ } else if (const CallInst *CI = isFreeCall(Inst)) {
// calls to free() erase the entire structure
- PointerSize = ~0ULL;
- } else if (isFreeCall(Inst)) {
- Pointer = Inst->getOperand(0);
- // calls to free() erase the entire structure
- PointerSize = ~0ULL;
- } else if (isa<CallInst>(Inst) || isa<InvokeInst>(Inst)) {
+ Loc = AliasAnalysis::Location(CI->getArgOperand(0));
+ } else if (CallSite InstCS = cast<Value>(Inst)) {
// Debug intrinsics don't cause dependences.
if (isa<DbgInfoIntrinsic>(Inst)) continue;
- CallSite InstCS = CallSite::get(Inst);
// If these two calls do not interfere, look past it.
switch (AA->getModRefInfo(CS, InstCS)) {
case AliasAnalysis::NoModRef:
- // If the two calls don't interact (e.g. InstCS is readnone) keep
- // scanning.
+ // If the two calls are the same, return InstCS as a Def, so that
+ // CS can be found redundant and eliminated.
+ if (isReadOnlyCall && InstCS.onlyReadsMemory() &&
+ CS.getInstruction()->isIdenticalToWhenDefined(Inst))
+ return MemDepResult::getDef(Inst);
+
+ // Otherwise if the two calls don't interact (e.g. InstCS is readnone)
+ // keep scanning.
continue;
- case AliasAnalysis::Ref:
- // If the two calls read the same memory locations and CS is a readonly
- // function, then we have two cases: 1) the calls may not interfere with
- // each other at all. 2) the calls may produce the same value. In case
- // #1 we want to ignore the values, in case #2, we want to return Inst
- // as a Def dependence. This allows us to CSE in cases like:
- // X = strlen(P);
- // memchr(...);
- // Y = strlen(P); // Y = X
- if (isReadOnlyCall) {
- if (CS.getCalledFunction() != 0 &&
- CS.getCalledFunction() == InstCS.getCalledFunction())
- return MemDepResult::getDef(Inst);
- // Ignore unrelated read/read call dependences.
- continue;
- }
- // FALL THROUGH
default:
return MemDepResult::getClobber(Inst);
}
continue;
}
- if (AA->getModRefInfo(CS, Pointer, PointerSize) != AliasAnalysis::NoModRef)
+ if (AA->getModRefInfo(CS, Loc) != AliasAnalysis::NoModRef)
return MemDepResult::getClobber(Inst);
}
}
/// getPointerDependencyFrom - Return the instruction on which a memory
-/// location depends. If isLoad is true, this routine ignore may-aliases with
-/// read-only operations.
+/// location depends. If isLoad is true, this routine ignores may-aliases with
+/// read-only operations. If isLoad is false, this routine ignores may-aliases
+/// with reads from read-only locations.
MemDepResult MemoryDependenceAnalysis::
-getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
+getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
BasicBlock::iterator ScanIt, BasicBlock *BB) {
- Value* invariantTag = 0;
+ Value *InvariantTag = 0;
// Walk backwards through the basic block, looking for dependencies.
while (ScanIt != BB->begin()) {
// If we're in an invariant region, no dependencies can be found before
// we pass an invariant-begin marker.
- if (invariantTag == Inst) {
- invariantTag = 0;
+ if (InvariantTag == Inst) {
+ InvariantTag = 0;
continue;
+ }
- // If we pass an invariant-end marker, then we've just entered an invariant
- // region and can start ignoring dependencies.
- } else if (IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst)) {
+ if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
+ // Debug intrinsics don't (and can't) cause dependences.
+ if (isa<DbgInfoIntrinsic>(II)) continue;
+
+ // If we pass an invariant-end marker, then we've just entered an
+ // invariant region and can start ignoring dependencies.
if (II->getIntrinsicID() == Intrinsic::invariant_end) {
- uint64_t invariantSize = ~0ULL;
- if (ConstantInt* CI = dyn_cast<ConstantInt>(II->getOperand(2)))
- invariantSize = CI->getZExtValue();
-
+ // FIXME: This only considers queries directly on the invariant-tagged
+ // pointer, not on query pointers that are indexed off of them. It'd
+ // be nice to handle that at some point.
AliasAnalysis::AliasResult R =
- AA->alias(II->getOperand(3), invariantSize, MemPtr, MemSize);
- if (R == AliasAnalysis::MustAlias) {
- invariantTag = II->getOperand(1);
- continue;
- }
+ AA->alias(AliasAnalysis::Location(II->getArgOperand(2)), MemLoc);
+ if (R == AliasAnalysis::MustAlias)
+ InvariantTag = II->getArgOperand(0);
+
+ continue;
+ }
+
+ // If we reach a lifetime begin or end marker, then the query ends here
+ // because the value is undefined.
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start) {
+ // FIXME: This only considers queries directly on the invariant-tagged
+ // pointer, not on query pointers that are indexed off of them. It'd
+ // be nice to handle that at some point.
+ AliasAnalysis::AliasResult R =
+ AA->alias(AliasAnalysis::Location(II->getArgOperand(1)), MemLoc);
+ if (R == AliasAnalysis::MustAlias)
+ return MemDepResult::getDef(II);
+ continue;
}
}
// If we're querying on a load and we're in an invariant region, we're done
// at this point. Nothing a load depends on can live in an invariant region.
- if (isLoad && invariantTag) continue;
-
- // Debug intrinsics don't cause dependences.
- if (isa<DbgInfoIntrinsic>(Inst)) continue;
+ //
+ // FIXME: this will prevent us from returning load/load must-aliases, so GVN
+ // won't remove redundant loads.
+ if (isLoad && InvariantTag) continue;
// Values depend on loads if the pointers are must aliased. This means that
// a load depends on another must aliased load from the same value.
if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
Value *Pointer = LI->getPointerOperand();
uint64_t PointerSize = AA->getTypeStoreSize(LI->getType());
+ MDNode *TBAATag = LI->getMetadata(LLVMContext::MD_tbaa);
+ AliasAnalysis::Location LoadLoc(Pointer, PointerSize, TBAATag);
// If we found a pointer, check if it could be the same as our pointer.
- AliasAnalysis::AliasResult R =
- AA->alias(Pointer, PointerSize, MemPtr, MemSize);
+ AliasAnalysis::AliasResult R = AA->alias(LoadLoc, MemLoc);
if (R == AliasAnalysis::NoAlias)
continue;
// May-alias loads don't depend on each other without a dependence.
if (isLoad && R == AliasAnalysis::MayAlias)
continue;
+
+ // Stores don't alias loads from read-only memory.
+ if (!isLoad && AA->pointsToConstantMemory(LoadLoc))
+ continue;
+
// Stores depend on may and must aliased loads, loads depend on must-alias
// loads.
return MemDepResult::getDef(Inst);
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
// There can't be stores to the value we care about inside an
// invariant region.
- if (invariantTag) continue;
+ if (InvariantTag) continue;
// If alias analysis can tell that this store is guaranteed to not modify
// the query pointer, ignore it. Use getModRefInfo to handle cases where
// the query pointer points to constant memory etc.
- if (AA->getModRefInfo(SI, MemPtr, MemSize) == AliasAnalysis::NoModRef)
+ if (AA->getModRefInfo(SI, MemLoc) == AliasAnalysis::NoModRef)
continue;
// Ok, this store might clobber the query pointer. Check to see if it is
// a must alias: in this case, we want to return this as a def.
Value *Pointer = SI->getPointerOperand();
uint64_t PointerSize = AA->getTypeStoreSize(SI->getOperand(0)->getType());
+ MDNode *TBAATag = SI->getMetadata(LLVMContext::MD_tbaa);
// If we found a pointer, check if it could be the same as our pointer.
AliasAnalysis::AliasResult R =
- AA->alias(Pointer, PointerSize, MemPtr, MemSize);
+ AA->alias(AliasAnalysis::Location(Pointer, PointerSize, TBAATag),
+ MemLoc);
if (R == AliasAnalysis::NoAlias)
continue;
// a subsequent bitcast of the malloc call result. There can be stores to
// the malloced memory between the malloc call and its bitcast uses, and we
// need to continue scanning until the malloc call.
- if (isa<AllocaInst>(Inst) || extractMallocCall(Inst)) {
- Value *AccessPtr = MemPtr->getUnderlyingObject();
+ if (isa<AllocaInst>(Inst) ||
+ (isa<CallInst>(Inst) && extractMallocCall(Inst))) {
+ const Value *AccessPtr = MemLoc.Ptr->getUnderlyingObject();
if (AccessPtr == Inst ||
AA->alias(Inst, 1, AccessPtr, 1) == AliasAnalysis::MustAlias)
}
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
- switch (AA->getModRefInfo(Inst, MemPtr, MemSize)) {
+ switch (AA->getModRefInfo(Inst, MemLoc)) {
case AliasAnalysis::NoModRef:
// If the call has no effect on the queried pointer, just ignore it.
continue;
case AliasAnalysis::Mod:
// If we're in an invariant region, we can ignore calls that ONLY
// modify the pointer.
- if (invariantTag) continue;
+ if (InvariantTag) continue;
return MemDepResult::getClobber(Inst);
case AliasAnalysis::Ref:
// If the call is known to never store to the pointer, and if this is a
BasicBlock *QueryParent = QueryInst->getParent();
- Value *MemPtr = 0;
- uint64_t MemSize = 0;
+ AliasAnalysis::Location MemLoc;
// Do the scan.
if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
// previous instruction as a clobber.
if (SI->isVolatile())
LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
- else {
- MemPtr = SI->getPointerOperand();
- MemSize = AA->getTypeStoreSize(SI->getOperand(0)->getType());
- }
+ else
+ MemLoc = AliasAnalysis::Location(SI->getPointerOperand(),
+ AA->getTypeStoreSize(SI->getOperand(0)
+ ->getType()),
+ SI->getMetadata(LLVMContext::MD_tbaa));
} else if (LoadInst *LI = dyn_cast<LoadInst>(QueryInst)) {
// If this is a volatile load, don't mess around with it. Just return the
// previous instruction as a clobber.
if (LI->isVolatile())
LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
- else {
- MemPtr = LI->getPointerOperand();
- MemSize = AA->getTypeStoreSize(LI->getType());
- }
- } else if (isFreeCall(QueryInst)) {
- MemPtr = QueryInst->getOperand(1);
+ else
+ MemLoc = AliasAnalysis::Location(LI->getPointerOperand(),
+ AA->getTypeStoreSize(LI->getType()),
+ LI->getMetadata(LLVMContext::MD_tbaa));
+ } else if (const CallInst *CI = isFreeCall(QueryInst)) {
// calls to free() erase the entire structure, not just a field.
- MemSize = ~0UL;
+ MemLoc = AliasAnalysis::Location(CI->getArgOperand(0));
} else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
- CallSite QueryCS = CallSite::get(QueryInst);
- bool isReadOnly = AA->onlyReadsMemory(QueryCS);
- LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
- QueryParent);
+ int IntrinsicID = 0; // Intrinsic IDs start at 1.
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst);
+ if (II)
+ IntrinsicID = II->getIntrinsicID();
+
+ switch (IntrinsicID) {
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start:
+ MemLoc = AliasAnalysis::Location(II->getArgOperand(1),
+ cast<ConstantInt>(II->getArgOperand(0))
+ ->getZExtValue(),
+ II->getMetadata(LLVMContext::MD_tbaa));
+ break;
+ case Intrinsic::invariant_end:
+ MemLoc = AliasAnalysis::Location(II->getArgOperand(2),
+ cast<ConstantInt>(II->getArgOperand(1))
+ ->getZExtValue(),
+ II->getMetadata(LLVMContext::MD_tbaa));
+ break;
+ default:
+ CallSite QueryCS(QueryInst);
+ bool isReadOnly = AA->onlyReadsMemory(QueryCS);
+ LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
+ QueryParent);
+ break;
+ }
} else {
// Non-memory instruction.
LocalCache = MemDepResult::getClobber(--BasicBlock::iterator(ScanPos));
}
// If we need to do a pointer scan, make it happen.
- if (MemPtr)
- LocalCache = getPointerDependencyFrom(MemPtr, MemSize,
- isa<LoadInst>(QueryInst),
- ScanPos, QueryParent);
+ if (MemLoc.Ptr) {
+ bool isLoad = !QueryInst->mayWriteToMemory();
+ if (IntrinsicInst *II = dyn_cast<MemoryUseIntrinsic>(QueryInst)) {
+ isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_end;
+ }
+ LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
+ QueryParent);
+ }
// Remember the result!
if (Instruction *I = LocalCache.getInst())
if (Count == 0) return;
for (unsigned i = 1; i != unsigned(Count); ++i)
- assert(Cache[i-1] <= Cache[i] && "Cache isn't sorted!");
+ assert(!(Cache[i] < Cache[i-1]) && "Cache isn't sorted!");
}
#endif
// Okay, we have a cache entry. If we know it is not dirty, just return it
// with no computation.
if (!CacheP.second) {
- NumCacheNonLocal++;
+ ++NumCacheNonLocal;
return Cache;
}
// determine what is dirty, seeding our initial DirtyBlocks worklist.
for (NonLocalDepInfo::iterator I = Cache.begin(), E = Cache.end();
I != E; ++I)
- if (I->second.isDirty())
- DirtyBlocks.push_back(I->first);
+ if (I->getResult().isDirty())
+ DirtyBlocks.push_back(I->getBB());
// Sort the cache so that we can do fast binary search lookups below.
std::sort(Cache.begin(), Cache.end());
BasicBlock *QueryBB = QueryCS.getInstruction()->getParent();
for (BasicBlock **PI = PredCache->GetPreds(QueryBB); *PI; ++PI)
DirtyBlocks.push_back(*PI);
- NumUncacheNonLocal++;
+ ++NumUncacheNonLocal;
}
// isReadonlyCall - If this is a read-only call, we can be more aggressive.
DEBUG(AssertSorted(Cache, NumSortedEntries));
NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.begin()+NumSortedEntries,
- std::make_pair(DirtyBB, MemDepResult()));
- if (Entry != Cache.begin() && prior(Entry)->first == DirtyBB)
+ NonLocalDepEntry(DirtyBB));
+ if (Entry != Cache.begin() && prior(Entry)->getBB() == DirtyBB)
--Entry;
- MemDepResult *ExistingResult = 0;
+ NonLocalDepEntry *ExistingResult = 0;
if (Entry != Cache.begin()+NumSortedEntries &&
- Entry->first == DirtyBB) {
+ Entry->getBB() == DirtyBB) {
// If we already have an entry, and if it isn't already dirty, the block
// is done.
- if (!Entry->second.isDirty())
+ if (!Entry->getResult().isDirty())
continue;
// Otherwise, remember this slot so we can update the value.
- ExistingResult = &Entry->second;
+ ExistingResult = &*Entry;
}
// If the dirty entry has a pointer, start scanning from it so we don't have
// to rescan the entire block.
BasicBlock::iterator ScanPos = DirtyBB->end();
if (ExistingResult) {
- if (Instruction *Inst = ExistingResult->getInst()) {
+ if (Instruction *Inst = ExistingResult->getResult().getInst()) {
ScanPos = Inst;
// We're removing QueryInst's use of Inst.
RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
// If we had a dirty entry for the block, update it. Otherwise, just add
// a new entry.
if (ExistingResult)
- *ExistingResult = Dep;
+ ExistingResult->setResult(Dep);
else
- Cache.push_back(std::make_pair(DirtyBB, Dep));
+ Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
// If the block has a dependency (i.e. it isn't completely transparent to
// the value), remember the association!
/// own block.
///
void MemoryDependenceAnalysis::
-getNonLocalPointerDependency(Value *Pointer, bool isLoad, BasicBlock *FromBB,
- SmallVectorImpl<NonLocalDepEntry> &Result) {
- assert(isa<PointerType>(Pointer->getType()) &&
+getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
+ BasicBlock *FromBB,
+ SmallVectorImpl<NonLocalDepResult> &Result) {
+ assert(Loc.Ptr->getType()->isPointerTy() &&
"Can't get pointer deps of a non-pointer!");
Result.clear();
- // We know that the pointer value is live into FromBB find the def/clobbers
- // from presecessors.
- const Type *EltTy = cast<PointerType>(Pointer->getType())->getElementType();
- uint64_t PointeeSize = AA->getTypeStoreSize(EltTy);
+ PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD);
// This is the set of blocks we've inspected, and the pointer we consider in
// each block. Because of critical edges, we currently bail out if querying
// a block with multiple different pointers. This can happen during PHI
// translation.
DenseMap<BasicBlock*, Value*> Visited;
- if (!getNonLocalPointerDepFromBB(Pointer, PointeeSize, isLoad, FromBB,
+ if (!getNonLocalPointerDepFromBB(Address, Loc, isLoad, FromBB,
Result, Visited, true))
return;
Result.clear();
- Result.push_back(std::make_pair(FromBB,
- MemDepResult::getClobber(FromBB->begin())));
+ Result.push_back(NonLocalDepResult(FromBB,
+ MemDepResult::getClobber(FromBB->begin()),
+ const_cast<Value *>(Loc.Ptr)));
}
/// GetNonLocalInfoForBlock - Compute the memdep value for BB with
/// lookup (which may use dirty cache info if available). If we do a lookup,
/// add the result to the cache.
MemDepResult MemoryDependenceAnalysis::
-GetNonLocalInfoForBlock(Value *Pointer, uint64_t PointeeSize,
+GetNonLocalInfoForBlock(const AliasAnalysis::Location &Loc,
bool isLoad, BasicBlock *BB,
NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
// the cache set. If so, find it.
NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache->begin(), Cache->begin()+NumSortedEntries,
- std::make_pair(BB, MemDepResult()));
- if (Entry != Cache->begin() && prior(Entry)->first == BB)
+ NonLocalDepEntry(BB));
+ if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
--Entry;
- MemDepResult *ExistingResult = 0;
- if (Entry != Cache->begin()+NumSortedEntries && Entry->first == BB)
- ExistingResult = &Entry->second;
+ NonLocalDepEntry *ExistingResult = 0;
+ if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
+ ExistingResult = &*Entry;
// If we have a cached entry, and it is non-dirty, use it as the value for
// this dependency.
- if (ExistingResult && !ExistingResult->isDirty()) {
+ if (ExistingResult && !ExistingResult->getResult().isDirty()) {
++NumCacheNonLocalPtr;
- return *ExistingResult;
+ return ExistingResult->getResult();
}
// Otherwise, we have to scan for the value. If we have a dirty cache
// entry, start scanning from its position, otherwise we scan from the end
// of the block.
BasicBlock::iterator ScanPos = BB->end();
- if (ExistingResult && ExistingResult->getInst()) {
- assert(ExistingResult->getInst()->getParent() == BB &&
+ if (ExistingResult && ExistingResult->getResult().getInst()) {
+ assert(ExistingResult->getResult().getInst()->getParent() == BB &&
"Instruction invalidated?");
++NumCacheDirtyNonLocalPtr;
- ScanPos = ExistingResult->getInst();
+ ScanPos = ExistingResult->getResult().getInst();
// Eliminating the dirty entry from 'Cache', so update the reverse info.
- ValueIsLoadPair CacheKey(Pointer, isLoad);
+ ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
} else {
++NumUncacheNonLocalPtr;
}
// Scan the block for the dependency.
- MemDepResult Dep = getPointerDependencyFrom(Pointer, PointeeSize, isLoad,
- ScanPos, BB);
+ MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB);
// If we had a dirty entry for the block, update it. Otherwise, just add
// a new entry.
if (ExistingResult)
- *ExistingResult = Dep;
+ ExistingResult->setResult(Dep);
else
- Cache->push_back(std::make_pair(BB, Dep));
+ Cache->push_back(NonLocalDepEntry(BB, Dep));
// If the block has a dependency (i.e. it isn't completely transparent to
// the value), remember the reverse association because we just added it
// update MemDep when we remove instructions.
Instruction *Inst = Dep.getInst();
assert(Inst && "Didn't depend on anything?");
- ValueIsLoadPair CacheKey(Pointer, isLoad);
+ ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
return Dep;
}
break;
case 2: {
// Two new entries, insert the last one into place.
- MemoryDependenceAnalysis::NonLocalDepEntry Val = Cache.back();
+ NonLocalDepEntry Val = Cache.back();
Cache.pop_back();
MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.end()-1, Val);
case 1:
// One new entry, Just insert the new value at the appropriate position.
if (Cache.size() != 1) {
- MemoryDependenceAnalysis::NonLocalDepEntry Val = Cache.back();
+ NonLocalDepEntry Val = Cache.back();
Cache.pop_back();
MemoryDependenceAnalysis::NonLocalDepInfo::iterator Entry =
std::upper_bound(Cache.begin(), Cache.end(), Val);
}
}
-
/// getNonLocalPointerDepFromBB - Perform a dependency query based on
/// pointer/pointeesize starting at the end of StartBB. Add any clobber/def
/// results to the results vector and keep track of which blocks are visited in
/// not compute dependence information for some reason. This should be treated
/// as a clobber dependence on the first instruction in the predecessor block.
bool MemoryDependenceAnalysis::
-getNonLocalPointerDepFromBB(Value *Pointer, uint64_t PointeeSize,
+getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
+ const AliasAnalysis::Location &Loc,
bool isLoad, BasicBlock *StartBB,
- SmallVectorImpl<NonLocalDepEntry> &Result,
+ SmallVectorImpl<NonLocalDepResult> &Result,
DenseMap<BasicBlock*, Value*> &Visited,
bool SkipFirstBlock) {
// Look up the cached info for Pointer.
- ValueIsLoadPair CacheKey(Pointer, isLoad);
-
- std::pair<BBSkipFirstBlockPair, NonLocalDepInfo> *CacheInfo =
- &NonLocalPointerDeps[CacheKey];
- NonLocalDepInfo *Cache = &CacheInfo->second;
+ ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
+
+ // Set up a temporary NLPI value. If the map doesn't yet have an entry for
+ // CacheKey, this value will be inserted as the associated value. Otherwise,
+ // it'll be ignored, and we'll have to check to see if the cached size and
+ // tbaa tag are consistent with the current query.
+ NonLocalPointerInfo InitialNLPI;
+ InitialNLPI.Size = Loc.Size;
+ InitialNLPI.TBAATag = Loc.TBAATag;
+
+ // Get the NLPI for CacheKey, inserting one into the map if it doesn't
+ // already have one.
+ std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
+ NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
+ NonLocalPointerInfo *CacheInfo = &Pair.first->second;
+
+ if (!Pair.second) {
+ // If this query's Size is inconsistent with the cached one, take the
+ // maximum size and restart the query.
+ if (CacheInfo->Size != Loc.Size) {
+ CacheInfo->Size = std::max(CacheInfo->Size, Loc.Size);
+ return getNonLocalPointerDepFromBB(Pointer,
+ Loc.getWithNewSize(CacheInfo->Size),
+ isLoad, StartBB, Result, Visited,
+ SkipFirstBlock);
+ }
+
+ // If this query's TBAATag is inconsistent with the cached one, discard the
+ // tag and restart the query.
+ if (CacheInfo->TBAATag != Loc.TBAATag) {
+ CacheInfo->TBAATag = 0;
+ return getNonLocalPointerDepFromBB(Pointer, Loc.getWithoutTBAATag(),
+ isLoad, StartBB, Result, Visited,
+ SkipFirstBlock);
+ }
+ }
+
+ NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
// If we have valid cached information for exactly the block we are
// investigating, just return it with no recomputation.
- if (CacheInfo->first == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
+ if (CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
// We have a fully cached result for this query then we can just return the
// cached results and populate the visited set. However, we have to verify
// that we don't already have conflicting results for these blocks. Check
if (!Visited.empty()) {
for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
I != E; ++I) {
- DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->first);
- if (VI == Visited.end() || VI->second == Pointer) continue;
+ DenseMap<BasicBlock*, Value*>::iterator VI = Visited.find(I->getBB());
+ if (VI == Visited.end() || VI->second == Pointer.getAddr())
+ continue;
// We have a pointer mismatch in a block. Just return clobber, saying
// that something was clobbered in this result. We could also do a
}
}
+ Value *Addr = Pointer.getAddr();
for (NonLocalDepInfo::iterator I = Cache->begin(), E = Cache->end();
I != E; ++I) {
- Visited.insert(std::make_pair(I->first, Pointer));
- if (!I->second.isNonLocal())
- Result.push_back(*I);
+ Visited.insert(std::make_pair(I->getBB(), Addr));
+ if (!I->getResult().isNonLocal())
+ Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(), Addr));
}
++NumCacheCompleteNonLocalPtr;
return false;
// than its valid cache info. If empty, the result will be valid cache info,
// otherwise it isn't.
if (Cache->empty())
- CacheInfo->first = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
- else
- CacheInfo->first = BBSkipFirstBlockPair();
+ CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
+ else {
+ CacheInfo->Pair = BBSkipFirstBlockPair();
+ CacheInfo->Size = 0;
+ CacheInfo->TBAATag = 0;
+ }
SmallVector<BasicBlock*, 32> Worklist;
Worklist.push_back(StartBB);
// Get the dependency info for Pointer in BB. If we have cached
// information, we will use it, otherwise we compute it.
DEBUG(AssertSorted(*Cache, NumSortedEntries));
- MemDepResult Dep = GetNonLocalInfoForBlock(Pointer, PointeeSize, isLoad,
- BB, Cache, NumSortedEntries);
+ MemDepResult Dep = GetNonLocalInfoForBlock(Loc, isLoad, BB, Cache,
+ NumSortedEntries);
// If we got a Def or Clobber, add this to the list of results.
if (!Dep.isNonLocal()) {
- Result.push_back(NonLocalDepEntry(BB, Dep));
+ Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
continue;
}
}
// If 'Pointer' is an instruction defined in this block, then we need to do
// phi translation to change it into a value live in the predecessor block.
- // If phi translation fails, then we can't continue dependence analysis.
- Instruction *PtrInst = dyn_cast<Instruction>(Pointer);
- bool NeedsPHITranslation = PtrInst && PtrInst->getParent() == BB;
-
- // If no PHI translation is needed, just add all the predecessors of this
- // block to scan them as well.
- if (!NeedsPHITranslation) {
+ // If not, we just add the predecessors to the worklist and scan them with
+ // the same Pointer.
+ if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
SkipFirstBlock = false;
for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
// Verify that we haven't looked at this block yet.
std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
- InsertRes = Visited.insert(std::make_pair(*PI, Pointer));
+ InsertRes = Visited.insert(std::make_pair(*PI, Pointer.getAddr()));
if (InsertRes.second) {
// First time we've looked at *PI.
Worklist.push_back(*PI);
// If we have seen this block before, but it was with a different
// pointer then we have a phi translation failure and we have to treat
// this as a clobber.
- if (InsertRes.first->second != Pointer)
+ if (InsertRes.first->second != Pointer.getAddr())
goto PredTranslationFailure;
}
continue;
}
- // If we do need to do phi translation, then there are a bunch of different
- // cases, because we have to find a Value* live in the predecessor block. We
- // know that PtrInst is defined in this block at least.
-
+ // We do need to do phi translation, if we know ahead of time we can't phi
+ // translate this value, don't even try.
+ if (!Pointer.IsPotentiallyPHITranslatable())
+ goto PredTranslationFailure;
+
// We may have added values to the cache list before this PHI translation.
// If so, we haven't done anything to ensure that the cache remains sorted.
// Sort it now (if needed) so that recursive invocations of
SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
NumSortedEntries = Cache->size();
}
+ Cache = 0;
- // If this is directly a PHI node, just use the incoming values for each
- // pred as the phi translated version.
- if (PHINode *PtrPHI = dyn_cast<PHINode>(PtrInst)) {
- Cache = 0;
+ for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
+ BasicBlock *Pred = *PI;
- for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
- BasicBlock *Pred = *PI;
- Value *PredPtr = PtrPHI->getIncomingValueForBlock(Pred);
-
- // Check to see if we have already visited this pred block with another
- // pointer. If so, we can't do this lookup. This failure can occur
- // with PHI translation when a critical edge exists and the PHI node in
- // the successor translates to a pointer value different than the
- // pointer the block was first analyzed with.
- std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
- InsertRes = Visited.insert(std::make_pair(Pred, PredPtr));
+ // Get the PHI translated pointer in this predecessor. This can fail if
+ // not translatable, in which case the getAddr() returns null.
+ PHITransAddr PredPointer(Pointer);
+ PredPointer.PHITranslateValue(BB, Pred, 0);
- if (!InsertRes.second) {
- // If the predecessor was visited with PredPtr, then we already did
- // the analysis and can ignore it.
- if (InsertRes.first->second == PredPtr)
- continue;
-
- // Otherwise, the block was previously analyzed with a different
- // pointer. We can't represent the result of this case, so we just
- // treat this as a phi translation failure.
- goto PredTranslationFailure;
- }
+ Value *PredPtrVal = PredPointer.getAddr();
+
+ // Check to see if we have already visited this pred block with another
+ // pointer. If so, we can't do this lookup. This failure can occur
+ // with PHI translation when a critical edge exists and the PHI node in
+ // the successor translates to a pointer value different than the
+ // pointer the block was first analyzed with.
+ std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
+ InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal));
- // FIXME: it is entirely possible that PHI translating will end up with
- // the same value. Consider PHI translating something like:
- // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
- // to recurse here, pedantically speaking.
+ if (!InsertRes.second) {
+ // If the predecessor was visited with PredPtr, then we already did
+ // the analysis and can ignore it.
+ if (InsertRes.first->second == PredPtrVal)
+ continue;
- // If we have a problem phi translating, fall through to the code below
- // to handle the failure condition.
- if (getNonLocalPointerDepFromBB(PredPtr, PointeeSize, isLoad, Pred,
- Result, Visited))
- goto PredTranslationFailure;
+ // Otherwise, the block was previously analyzed with a different
+ // pointer. We can't represent the result of this case, so we just
+ // treat this as a phi translation failure.
+ goto PredTranslationFailure;
}
- // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
- CacheInfo = &NonLocalPointerDeps[CacheKey];
- Cache = &CacheInfo->second;
- NumSortedEntries = Cache->size();
+ // If PHI translation was unable to find an available pointer in this
+ // predecessor, then we have to assume that the pointer is clobbered in
+ // that predecessor. We can still do PRE of the load, which would insert
+ // a computation of the pointer in this predecessor.
+ if (PredPtrVal == 0) {
+ // Add the entry to the Result list.
+ NonLocalDepResult Entry(Pred,
+ MemDepResult::getClobber(Pred->getTerminator()),
+ PredPtrVal);
+ Result.push_back(Entry);
+
+ // Since we had a phi translation failure, the cache for CacheKey won't
+ // include all of the entries that we need to immediately satisfy future
+ // queries. Mark this in NonLocalPointerDeps by setting the
+ // BBSkipFirstBlockPair pointer to null. This requires reuse of the
+ // cached value to do more work but not miss the phi trans failure.
+ NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
+ NLPI.Pair = BBSkipFirstBlockPair();
+ NLPI.Size = 0;
+ NLPI.TBAATag = 0;
+ continue;
+ }
+
+ // FIXME: it is entirely possible that PHI translating will end up with
+ // the same value. Consider PHI translating something like:
+ // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
+ // to recurse here, pedantically speaking.
- // Since we did phi translation, the "Cache" set won't contain all of the
- // results for the query. This is ok (we can still use it to accelerate
- // specific block queries) but we can't do the fastpath "return all
- // results from the set" Clear out the indicator for this.
- CacheInfo->first = BBSkipFirstBlockPair();
- SkipFirstBlock = false;
- continue;
+ // If we have a problem phi translating, fall through to the code below
+ // to handle the failure condition.
+ if (getNonLocalPointerDepFromBB(PredPointer,
+ Loc.getWithNewPtr(PredPointer.getAddr()),
+ isLoad, Pred,
+ Result, Visited))
+ goto PredTranslationFailure;
}
- // TODO: BITCAST, GEP.
+ // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
+ CacheInfo = &NonLocalPointerDeps[CacheKey];
+ Cache = &CacheInfo->NonLocalDeps;
+ NumSortedEntries = Cache->size();
- // cerr << "MEMDEP: Could not PHI translate: " << *Pointer;
- // if (isa<BitCastInst>(PtrInst) || isa<GetElementPtrInst>(PtrInst))
- // cerr << "OP:\t\t\t\t" << *PtrInst->getOperand(0);
+ // Since we did phi translation, the "Cache" set won't contain all of the
+ // results for the query. This is ok (we can still use it to accelerate
+ // specific block queries) but we can't do the fastpath "return all
+ // results from the set" Clear out the indicator for this.
+ CacheInfo->Pair = BBSkipFirstBlockPair();
+ CacheInfo->Size = 0;
+ CacheInfo->TBAATag = 0;
+ SkipFirstBlock = false;
+ continue;
+
PredTranslationFailure:
if (Cache == 0) {
// Refresh the CacheInfo/Cache pointer if it got invalidated.
CacheInfo = &NonLocalPointerDeps[CacheKey];
- Cache = &CacheInfo->second;
+ Cache = &CacheInfo->NonLocalDeps;
NumSortedEntries = Cache->size();
}
- // Since we did phi translation, the "Cache" set won't contain all of the
+ // Since we failed phi translation, the "Cache" set won't contain all of the
// results for the query. This is ok (we can still use it to accelerate
// specific block queries) but we can't do the fastpath "return all
- // results from the set" Clear out the indicator for this.
- CacheInfo->first = BBSkipFirstBlockPair();
+ // results from the set". Clear out the indicator for this.
+ CacheInfo->Pair = BBSkipFirstBlockPair();
+ CacheInfo->Size = 0;
+ CacheInfo->TBAATag = 0;
// If *nothing* works, mark the pointer as being clobbered by the first
// instruction in this block.
for (NonLocalDepInfo::reverse_iterator I = Cache->rbegin(); ; ++I) {
assert(I != Cache->rend() && "Didn't find current block??");
- if (I->first != BB)
+ if (I->getBB() != BB)
continue;
- assert(I->second.isNonLocal() &&
+ assert(I->getResult().isNonLocal() &&
"Should only be here with transparent block");
- I->second = MemDepResult::getClobber(BB->begin());
+ I->setResult(MemDepResult::getClobber(BB->begin()));
ReverseNonLocalPtrDeps[BB->begin()].insert(CacheKey);
- Result.push_back(*I);
+ Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(),
+ Pointer.getAddr()));
break;
}
}
// Remove all of the entries in the BB->val map. This involves removing
// instructions from the reverse map.
- NonLocalDepInfo &PInfo = It->second.second;
+ NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
- Instruction *Target = PInfo[i].second.getInst();
+ Instruction *Target = PInfo[i].getResult().getInst();
if (Target == 0) continue; // Ignore non-local dep results.
- assert(Target->getParent() == PInfo[i].first);
+ assert(Target->getParent() == PInfo[i].getBB());
// Eliminating the dirty entry from 'Cache', so update the reverse info.
RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
/// in more places that cached info does not necessarily keep.
void MemoryDependenceAnalysis::invalidateCachedPointerInfo(Value *Ptr) {
// If Ptr isn't really a pointer, just ignore it.
- if (!isa<PointerType>(Ptr->getType())) return;
+ if (!Ptr->getType()->isPointerTy()) return;
// Flush store info for the pointer.
RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
// Flush load info for the pointer.
RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
}
+/// invalidateCachedPredecessors - Clear the PredIteratorCache info.
+/// This needs to be done when the CFG changes, e.g., due to splitting
+/// critical edges.
+void MemoryDependenceAnalysis::invalidateCachedPredecessors() {
+ PredCache->clear();
+}
+
/// removeInstruction - Remove an instruction from the dependence analysis,
/// updating the dependence of instructions that previously depended on it.
/// This method attempts to keep the cache coherent using the reverse map.
NonLocalDepInfo &BlockMap = NLDI->second.first;
for (NonLocalDepInfo::iterator DI = BlockMap.begin(), DE = BlockMap.end();
DI != DE; ++DI)
- if (Instruction *Inst = DI->second.getInst())
+ if (Instruction *Inst = DI->getResult().getInst())
RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
NonLocalDeps.erase(NLDI);
}
// Remove it from both the load info and the store info. The instruction
// can't be in either of these maps if it is non-pointer.
- if (isa<PointerType>(RemInst->getType())) {
+ if (RemInst->getType()->isPointerTy()) {
RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
}
for (NonLocalDepInfo::iterator DI = INLD.first.begin(),
DE = INLD.first.end(); DI != DE; ++DI) {
- if (DI->second.getInst() != RemInst) continue;
+ if (DI->getResult().getInst() != RemInst) continue;
// Convert to a dirty entry for the subsequent instruction.
- DI->second = NewDirtyVal;
+ DI->setResult(NewDirtyVal);
if (Instruction *NextI = NewDirtyVal.getInst())
ReverseDepsToAdd.push_back(std::make_pair(NextI, *I));
assert(P.getPointer() != RemInst &&
"Already removed NonLocalPointerDeps info for RemInst");
- NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].second;
+ NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
// The cache is not valid for any specific block anymore.
- NonLocalPointerDeps[P].first = BBSkipFirstBlockPair();
+ NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
+ NonLocalPointerDeps[P].Size = 0;
+ NonLocalPointerDeps[P].TBAATag = 0;
// Update any entries for RemInst to use the instruction after it.
for (NonLocalDepInfo::iterator DI = NLPDI.begin(), DE = NLPDI.end();
DI != DE; ++DI) {
- if (DI->second.getInst() != RemInst) continue;
+ if (DI->getResult().getInst() != RemInst) continue;
// Convert to a dirty entry for the subsequent instruction.
- DI->second = NewDirtyVal;
+ DI->setResult(NewDirtyVal);
if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
for (CachedNonLocalPointerInfo::const_iterator I =NonLocalPointerDeps.begin(),
E = NonLocalPointerDeps.end(); I != E; ++I) {
assert(I->first.getPointer() != D && "Inst occurs in NLPD map key");
- const NonLocalDepInfo &Val = I->second.second;
+ const NonLocalDepInfo &Val = I->second.NonLocalDeps;
for (NonLocalDepInfo::const_iterator II = Val.begin(), E = Val.end();
II != E; ++II)
- assert(II->second.getInst() != D && "Inst occurs as NLPD value");
+ assert(II->getResult().getInst() != D && "Inst occurs as NLPD value");
}
for (NonLocalDepMapType::const_iterator I = NonLocalDeps.begin(),
const PerInstNLInfo &INLD = I->second;
for (NonLocalDepInfo::const_iterator II = INLD.first.begin(),
EE = INLD.first.end(); II != EE; ++II)
- assert(II->second.getInst() != D && "Inst occurs in data structures");
+ assert(II->getResult().getInst() != D && "Inst occurs in data structures");
}
for (ReverseDepMapType::const_iterator I = ReverseLocalDeps.begin(),