// Default chaining methods
//===----------------------------------------------------------------------===//
-AliasAnalysis::AliasResult
-AliasAnalysis::alias(const Location &LocA, const Location &LocB) {
+AliasResult AliasAnalysis::alias(const MemoryLocation &LocA,
+ const MemoryLocation &LocB) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->alias(LocA, LocB);
}
-bool AliasAnalysis::pointsToConstantMemory(const Location &Loc,
+bool AliasAnalysis::pointsToConstantMemory(const MemoryLocation &Loc,
bool OrLocal) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->pointsToConstantMemory(Loc, OrLocal);
}
-AliasAnalysis::Location
-AliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
- AliasAnalysis::ModRefResult &Mask) {
+ModRefInfo AliasAnalysis::getArgModRefInfo(ImmutableCallSite CS,
+ unsigned ArgIdx) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- return AA->getArgLocation(CS, ArgIdx, Mask);
-}
-
-void AliasAnalysis::deleteValue(Value *V) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- AA->deleteValue(V);
-}
-
-void AliasAnalysis::copyValue(Value *From, Value *To) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- AA->copyValue(From, To);
-}
-
-void AliasAnalysis::addEscapingUse(Use &U) {
- assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- AA->addEscapingUse(U);
+ return AA->getArgModRefInfo(CS, ArgIdx);
+}
+
+ModRefInfo AliasAnalysis::getModRefInfo(Instruction *I,
+ ImmutableCallSite Call) {
+ // We may have two calls
+ if (auto CS = ImmutableCallSite(I)) {
+ // Check if the two calls modify the same memory
+ return getModRefInfo(Call, CS);
+ } else {
+ // Otherwise, check if the call modifies or references the
+ // location this memory access defines. The best we can say
+ // is that if the call references what this instruction
+ // defines, it must be clobbered by this location.
+ const MemoryLocation DefLoc = MemoryLocation::get(I);
+ if (getModRefInfo(Call, DefLoc) != MRI_NoModRef)
+ return MRI_ModRef;
+ }
+ return MRI_NoModRef;
}
-
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(ImmutableCallSite CS,
- const Location &Loc) {
+ModRefInfo AliasAnalysis::getModRefInfo(ImmutableCallSite CS,
+ const MemoryLocation &Loc) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- ModRefBehavior MRB = getModRefBehavior(CS);
- if (MRB == DoesNotAccessMemory)
- return NoModRef;
+ auto MRB = getModRefBehavior(CS);
+ if (MRB == FMRB_DoesNotAccessMemory)
+ return MRI_NoModRef;
- ModRefResult Mask = ModRef;
+ ModRefInfo Mask = MRI_ModRef;
if (onlyReadsMemory(MRB))
- Mask = Ref;
+ Mask = MRI_Ref;
if (onlyAccessesArgPointees(MRB)) {
bool doesAlias = false;
- ModRefResult AllArgsMask = NoModRef;
+ ModRefInfo AllArgsMask = MRI_NoModRef;
if (doesAccessArgPointees(MRB)) {
for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
AI != AE; ++AI) {
const Value *Arg = *AI;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CSLoc =
- getArgLocation(CS, (unsigned) std::distance(CS.arg_begin(), AI),
- ArgMask);
- if (!isNoAlias(CSLoc, Loc)) {
+ unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
+ MemoryLocation ArgLoc =
+ MemoryLocation::getForArgument(CS, ArgIdx, *TLI);
+ if (!isNoAlias(ArgLoc, Loc)) {
+ ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx);
doesAlias = true;
- AllArgsMask = ModRefResult(AllArgsMask | ArgMask);
+ AllArgsMask = ModRefInfo(AllArgsMask | ArgMask);
}
}
}
if (!doesAlias)
- return NoModRef;
- Mask = ModRefResult(Mask & AllArgsMask);
+ return MRI_NoModRef;
+ Mask = ModRefInfo(Mask & AllArgsMask);
}
// If Loc is a constant memory location, the call definitely could not
// modify the memory location.
- if ((Mask & Mod) && pointsToConstantMemory(Loc))
- Mask = ModRefResult(Mask & ~Mod);
+ if ((Mask & MRI_Mod) && pointsToConstantMemory(Loc))
+ Mask = ModRefInfo(Mask & ~MRI_Mod);
// If this is the end of the chain, don't forward.
if (!AA) return Mask;
// Otherwise, fall back to the next AA in the chain. But we can merge
// in any mask we've managed to compute.
- return ModRefResult(AA->getModRefInfo(CS, Loc) & Mask);
+ return ModRefInfo(AA->getModRefInfo(CS, Loc) & Mask);
}
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2) {
+ModRefInfo AliasAnalysis::getModRefInfo(ImmutableCallSite CS1,
+ ImmutableCallSite CS2) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
// If CS1 or CS2 are readnone, they don't interact.
- ModRefBehavior CS1B = getModRefBehavior(CS1);
- if (CS1B == DoesNotAccessMemory) return NoModRef;
+ auto CS1B = getModRefBehavior(CS1);
+ if (CS1B == FMRB_DoesNotAccessMemory)
+ return MRI_NoModRef;
- ModRefBehavior CS2B = getModRefBehavior(CS2);
- if (CS2B == DoesNotAccessMemory) return NoModRef;
+ auto CS2B = getModRefBehavior(CS2);
+ if (CS2B == FMRB_DoesNotAccessMemory)
+ return MRI_NoModRef;
// If they both only read from memory, there is no dependence.
if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
- return NoModRef;
+ return MRI_NoModRef;
- AliasAnalysis::ModRefResult Mask = ModRef;
+ ModRefInfo Mask = MRI_ModRef;
// If CS1 only reads memory, the only dependence on CS2 can be
// from CS1 reading memory written by CS2.
if (onlyReadsMemory(CS1B))
- Mask = ModRefResult(Mask & Ref);
+ Mask = ModRefInfo(Mask & MRI_Ref);
// If CS2 only access memory through arguments, accumulate the mod/ref
// information from CS1's references to the memory referenced by
// CS2's arguments.
if (onlyAccessesArgPointees(CS2B)) {
- AliasAnalysis::ModRefResult R = NoModRef;
+ ModRefInfo R = MRI_NoModRef;
if (doesAccessArgPointees(CS2B)) {
for (ImmutableCallSite::arg_iterator
I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CS2Loc =
- getArgLocation(CS2, (unsigned) std::distance(CS2.arg_begin(), I),
- ArgMask);
- // ArgMask indicates what CS2 might do to CS2Loc, and the dependence of
+ unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
+ auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, *TLI);
+
+ // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence of
// CS1 on that location is the inverse.
- if (ArgMask == Mod)
- ArgMask = ModRef;
- else if (ArgMask == Ref)
- ArgMask = Mod;
+ ModRefInfo ArgMask = getArgModRefInfo(CS2, CS2ArgIdx);
+ if (ArgMask == MRI_Mod)
+ ArgMask = MRI_ModRef;
+ else if (ArgMask == MRI_Ref)
+ ArgMask = MRI_Mod;
- R = ModRefResult((R | (getModRefInfo(CS1, CS2Loc) & ArgMask)) & Mask);
+ R = ModRefInfo((R | (getModRefInfo(CS1, CS2ArgLoc) & ArgMask)) & Mask);
if (R == Mask)
break;
}
// If CS1 only accesses memory through arguments, check if CS2 references
// any of the memory referenced by CS1's arguments. If not, return NoModRef.
if (onlyAccessesArgPointees(CS1B)) {
- AliasAnalysis::ModRefResult R = NoModRef;
+ ModRefInfo R = MRI_NoModRef;
if (doesAccessArgPointees(CS1B)) {
for (ImmutableCallSite::arg_iterator
I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
continue;
- ModRefResult ArgMask;
- Location CS1Loc = getArgLocation(
- CS1, (unsigned)std::distance(CS1.arg_begin(), I), ArgMask);
- // ArgMask indicates what CS1 might do to CS1Loc; if CS1 might Mod
- // CS1Loc, then we care about either a Mod or a Ref by CS2. If CS1
+ unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
+ auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, *TLI);
+
+ // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
+ // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
// might Ref, then we care only about a Mod by CS2.
- ModRefResult ArgR = getModRefInfo(CS2, CS1Loc);
- if (((ArgMask & Mod) != NoModRef && (ArgR & ModRef) != NoModRef) ||
- ((ArgMask & Ref) != NoModRef && (ArgR & Mod) != NoModRef))
- R = ModRefResult((R | ArgMask) & Mask);
+ ModRefInfo ArgMask = getArgModRefInfo(CS1, CS1ArgIdx);
+ ModRefInfo ArgR = getModRefInfo(CS2, CS1ArgLoc);
+ if (((ArgMask & MRI_Mod) != MRI_NoModRef &&
+ (ArgR & MRI_ModRef) != MRI_NoModRef) ||
+ ((ArgMask & MRI_Ref) != MRI_NoModRef &&
+ (ArgR & MRI_Mod) != MRI_NoModRef))
+ R = ModRefInfo((R | ArgMask) & Mask);
if (R == Mask)
break;
// Otherwise, fall back to the next AA in the chain. But we can merge
// in any mask we've managed to compute.
- return ModRefResult(AA->getModRefInfo(CS1, CS2) & Mask);
+ return ModRefInfo(AA->getModRefInfo(CS1, CS2) & Mask);
}
-AliasAnalysis::ModRefBehavior
-AliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
+FunctionModRefBehavior AliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
- ModRefBehavior Min = UnknownModRefBehavior;
+ auto Min = FMRB_UnknownModRefBehavior;
// Call back into the alias analysis with the other form of getModRefBehavior
// to see if it can give a better response.
// Otherwise, fall back to the next AA in the chain. But we can merge
// in any result we've managed to compute.
- return ModRefBehavior(AA->getModRefBehavior(CS) & Min);
+ return FunctionModRefBehavior(AA->getModRefBehavior(CS) & Min);
}
-AliasAnalysis::ModRefBehavior
-AliasAnalysis::getModRefBehavior(const Function *F) {
+FunctionModRefBehavior AliasAnalysis::getModRefBehavior(const Function *F) {
assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
return AA->getModRefBehavior(F);
}
// AliasAnalysis non-virtual helper method implementation
//===----------------------------------------------------------------------===//
-AliasAnalysis::Location AliasAnalysis::getLocation(const LoadInst *LI) {
- AAMDNodes AATags;
- LI->getAAMetadata(AATags);
-
- return Location(LI->getPointerOperand(),
- getTypeStoreSize(LI->getType()), AATags);
-}
-
-AliasAnalysis::Location AliasAnalysis::getLocation(const StoreInst *SI) {
- AAMDNodes AATags;
- SI->getAAMetadata(AATags);
-
- return Location(SI->getPointerOperand(),
- getTypeStoreSize(SI->getValueOperand()->getType()), AATags);
-}
-
-AliasAnalysis::Location AliasAnalysis::getLocation(const VAArgInst *VI) {
- AAMDNodes AATags;
- VI->getAAMetadata(AATags);
-
- return Location(VI->getPointerOperand(), UnknownSize, AATags);
-}
-
-AliasAnalysis::Location
-AliasAnalysis::getLocation(const AtomicCmpXchgInst *CXI) {
- AAMDNodes AATags;
- CXI->getAAMetadata(AATags);
-
- return Location(CXI->getPointerOperand(),
- getTypeStoreSize(CXI->getCompareOperand()->getType()),
- AATags);
-}
-
-AliasAnalysis::Location
-AliasAnalysis::getLocation(const AtomicRMWInst *RMWI) {
- AAMDNodes AATags;
- RMWI->getAAMetadata(AATags);
-
- return Location(RMWI->getPointerOperand(),
- getTypeStoreSize(RMWI->getValOperand()->getType()), AATags);
-}
-
-AliasAnalysis::Location
-AliasAnalysis::getLocationForSource(const MemTransferInst *MTI) {
- uint64_t Size = UnknownSize;
- if (ConstantInt *C = dyn_cast<ConstantInt>(MTI->getLength()))
- Size = C->getValue().getZExtValue();
-
- // memcpy/memmove can have AA tags. For memcpy, they apply
- // to both the source and the destination.
- AAMDNodes AATags;
- MTI->getAAMetadata(AATags);
-
- return Location(MTI->getRawSource(), Size, AATags);
-}
-
-AliasAnalysis::Location
-AliasAnalysis::getLocationForDest(const MemIntrinsic *MTI) {
- uint64_t Size = UnknownSize;
- if (ConstantInt *C = dyn_cast<ConstantInt>(MTI->getLength()))
- Size = C->getValue().getZExtValue();
-
- // memcpy/memmove can have AA tags. For memcpy, they apply
- // to both the source and the destination.
- AAMDNodes AATags;
- MTI->getAAMetadata(AATags);
-
- return Location(MTI->getRawDest(), Size, AATags);
-}
-
-
-
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const LoadInst *L, const Location &Loc) {
+ModRefInfo AliasAnalysis::getModRefInfo(const LoadInst *L,
+ const MemoryLocation &Loc) {
// Be conservative in the face of volatile/atomic.
if (!L->isUnordered())
- return ModRef;
+ return MRI_ModRef;
// If the load address doesn't alias the given address, it doesn't read
// or write the specified memory.
- if (!alias(getLocation(L), Loc))
- return NoModRef;
+ if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc))
+ return MRI_NoModRef;
// Otherwise, a load just reads.
- return Ref;
+ return MRI_Ref;
}
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const StoreInst *S, const Location &Loc) {
+ModRefInfo AliasAnalysis::getModRefInfo(const StoreInst *S,
+ const MemoryLocation &Loc) {
// Be conservative in the face of volatile/atomic.
if (!S->isUnordered())
- return ModRef;
-
- // If the store address cannot alias the pointer in question, then the
- // specified memory cannot be modified by the store.
- if (!alias(getLocation(S), Loc))
- return NoModRef;
-
- // If the pointer is a pointer to constant memory, then it could not have been
- // modified by this store.
- if (pointsToConstantMemory(Loc))
- return NoModRef;
+ return MRI_ModRef;
+
+ if (Loc.Ptr) {
+ // If the store address cannot alias the pointer in question, then the
+ // specified memory cannot be modified by the store.
+ if (!alias(MemoryLocation::get(S), Loc))
+ return MRI_NoModRef;
+
+ // If the pointer is a pointer to constant memory, then it could not have
+ // been modified by this store.
+ if (pointsToConstantMemory(Loc))
+ return MRI_NoModRef;
+ }
// Otherwise, a store just writes.
- return Mod;
+ return MRI_Mod;
}
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const VAArgInst *V, const Location &Loc) {
- // If the va_arg address cannot alias the pointer in question, then the
- // specified memory cannot be accessed by the va_arg.
- if (!alias(getLocation(V), Loc))
- return NoModRef;
+ModRefInfo AliasAnalysis::getModRefInfo(const VAArgInst *V,
+ const MemoryLocation &Loc) {
+
+ if (Loc.Ptr) {
+ // If the va_arg address cannot alias the pointer in question, then the
+ // specified memory cannot be accessed by the va_arg.
+ if (!alias(MemoryLocation::get(V), Loc))
+ return MRI_NoModRef;
- // If the pointer is a pointer to constant memory, then it could not have been
- // modified by this va_arg.
- if (pointsToConstantMemory(Loc))
- return NoModRef;
+ // If the pointer is a pointer to constant memory, then it could not have
+ // been modified by this va_arg.
+ if (pointsToConstantMemory(Loc))
+ return MRI_NoModRef;
+ }
// Otherwise, a va_arg reads and writes.
- return ModRef;
+ return MRI_ModRef;
}
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX, const Location &Loc) {
+ModRefInfo AliasAnalysis::getModRefInfo(const AtomicCmpXchgInst *CX,
+ const MemoryLocation &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
if (CX->getSuccessOrdering() > Monotonic)
- return ModRef;
+ return MRI_ModRef;
// If the cmpxchg address does not alias the location, it does not access it.
- if (!alias(getLocation(CX), Loc))
- return NoModRef;
+ if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc))
+ return MRI_NoModRef;
- return ModRef;
+ return MRI_ModRef;
}
-AliasAnalysis::ModRefResult
-AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW, const Location &Loc) {
+ModRefInfo AliasAnalysis::getModRefInfo(const AtomicRMWInst *RMW,
+ const MemoryLocation &Loc) {
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
if (RMW->getOrdering() > Monotonic)
- return ModRef;
+ return MRI_ModRef;
// If the atomicrmw address does not alias the location, it does not access it.
- if (!alias(getLocation(RMW), Loc))
- return NoModRef;
-
- return ModRef;
-}
-
-// FIXME: this is really just shoring-up a deficiency in alias analysis.
-// BasicAA isn't willing to spend linear time determining whether an alloca
-// was captured before or after this particular call, while we are. However,
-// with a smarter AA in place, this test is just wasting compile time.
-AliasAnalysis::ModRefResult
-AliasAnalysis::callCapturesBefore(const Instruction *I,
- const AliasAnalysis::Location &MemLoc,
- DominatorTree *DT) {
- if (!DT || !DL) return AliasAnalysis::ModRef;
-
- const Value *Object = GetUnderlyingObject(MemLoc.Ptr, DL);
+ if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc))
+ return MRI_NoModRef;
+
+ return MRI_ModRef;
+}
+
+/// \brief Return information about whether a particular call site modifies
+/// or reads the specified memory location \p MemLoc before instruction \p I
+/// in a BasicBlock. A ordered basic block \p OBB can be used to speed up
+/// instruction-ordering queries inside the BasicBlock containing \p I.
+/// FIXME: this is really just shoring-up a deficiency in alias analysis.
+/// BasicAA isn't willing to spend linear time determining whether an alloca
+/// was captured before or after this particular call, while we are. However,
+/// with a smarter AA in place, this test is just wasting compile time.
+ModRefInfo AliasAnalysis::callCapturesBefore(const Instruction *I,
+ const MemoryLocation &MemLoc,
+ DominatorTree *DT,
+ OrderedBasicBlock *OBB) {
+ if (!DT)
+ return MRI_ModRef;
+
+ const Value *Object = GetUnderlyingObject(MemLoc.Ptr, *DL);
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
- return AliasAnalysis::ModRef;
+ return MRI_ModRef;
ImmutableCallSite CS(I);
if (!CS.getInstruction() || CS.getInstruction() == Object)
- return AliasAnalysis::ModRef;
+ return MRI_ModRef;
if (llvm::PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
/* StoreCaptures */ true, I, DT,
- /* include Object */ true))
- return AliasAnalysis::ModRef;
+ /* include Object */ true,
+ /* OrderedBasicBlock */ OBB))
+ return MRI_ModRef;
unsigned ArgNo = 0;
- AliasAnalysis::ModRefResult R = AliasAnalysis::NoModRef;
+ ModRefInfo R = MRI_NoModRef;
for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
CI != CE; ++CI, ++ArgNo) {
// Only look at the no-capture or byval pointer arguments. If this
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
// escape.
- if (isNoAlias(AliasAnalysis::Location(*CI),
- AliasAnalysis::Location(Object)))
+ if (isNoAlias(MemoryLocation(*CI), MemoryLocation(Object)))
continue;
if (CS.doesNotAccessMemory(ArgNo))
continue;
if (CS.onlyReadsMemory(ArgNo)) {
- R = AliasAnalysis::Ref;
+ R = MRI_Ref;
continue;
}
- return AliasAnalysis::ModRef;
+ return MRI_ModRef;
}
return R;
}
/// InitializeAliasAnalysis - Subclasses must call this method to initialize the
/// AliasAnalysis interface before any other methods are called.
///
-void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
- DataLayoutPass *DLP = P->getAnalysisIfAvailable<DataLayoutPass>();
- DL = DLP ? &DLP->getDataLayout() : nullptr;
+void AliasAnalysis::InitializeAliasAnalysis(Pass *P, const DataLayout *NewDL) {
+ DL = NewDL;
auto *TLIP = P->getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
AA = &P->getAnalysis<AliasAnalysis>();
AU.addRequired<AliasAnalysis>(); // All AA's chain
}
-/// getTypeStoreSize - Return the DataLayout store size for the given type,
-/// if known, or a conservative value otherwise.
-///
-uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
- return DL ? DL->getTypeStoreSize(Ty) : UnknownSize;
-}
-
/// canBasicBlockModify - Return true if it is possible for execution of the
/// specified basic block to modify the location Loc.
///
bool AliasAnalysis::canBasicBlockModify(const BasicBlock &BB,
- const Location &Loc) {
- return canInstructionRangeModRef(BB.front(), BB.back(), Loc, Mod);
+ const MemoryLocation &Loc) {
+ return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod);
}
/// canInstructionRangeModRef - Return true if it is possible for the
/// execution of the specified instructions to mod\ref (according to the
/// mode) the location Loc. The instructions to consider are all
/// of the instructions in the range of [I1,I2] INCLUSIVE.
-/// I1 and I2 must be in the same basic block.
+/// I1 and I2 must be in the same basic block.
bool AliasAnalysis::canInstructionRangeModRef(const Instruction &I1,
const Instruction &I2,
- const Location &Loc,
- const ModRefResult Mode) {
+ const MemoryLocation &Loc,
+ const ModRefInfo Mode) {
assert(I1.getParent() == I2.getParent() &&
"Instructions not in same basic block!");
BasicBlock::const_iterator I = &I1;
/// isNoAliasCall - Return true if this pointer is returned by a noalias
/// function.
bool llvm::isNoAliasCall(const Value *V) {
- if (isa<CallInst>(V) || isa<InvokeInst>(V))
- return ImmutableCallSite(cast<Instruction>(V))
- .paramHasAttr(0, Attribute::NoAlias);
+ if (auto CS = ImmutableCallSite(V))
+ return CS.paramHasAttr(0, Attribute::NoAlias);
return false;
}