const TargetLibraryInfo &TLI,
bool RoundToAlign = false) {
uint64_t Size;
- if (getObjectSize(V, Size, &DL, &TLI, RoundToAlign))
+ if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
return Size;
return AliasAnalysis::UnknownSize;
}
//===----------------------------------------------------------------------===//
namespace {
- enum ExtensionKind {
- EK_NotExtended,
- EK_SignExt,
- EK_ZeroExt
- };
+// A linear transformation of a Value; this class represents ZExt(SExt(V,
+// SExtBits), ZExtBits) * Scale + Offset.
struct VariableGEPIndex {
+
+ // An opaque Value - we can't decompose this further.
const Value *V;
- ExtensionKind Extension;
+
+ // We need to track what extensions we've done as we consider the same Value
+ // with different extensions as different variables in a GEP's linear
+ // expression;
+ // e.g.: if V == -1, then sext(x) != zext(x).
+ unsigned ZExtBits;
+ unsigned SExtBits;
+
int64_t Scale;
bool operator==(const VariableGEPIndex &Other) const {
- return V == Other.V && Extension == Other.Extension &&
- Scale == Other.Scale;
+ return V == Other.V && ZExtBits == Other.ZExtBits &&
+ SExtBits == Other.SExtBits && Scale == Other.Scale;
}
bool operator!=(const VariableGEPIndex &Other) const {
///
/// Note that this looks through extends, so the high bits may not be
/// represented in the result.
-static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
- ExtensionKind &Extension,
- const DataLayout &DL, unsigned Depth,
- AssumptionCache *AC, DominatorTree *DT) {
+static const Value *GetLinearExpression(const Value *V, APInt &Scale,
+ APInt &Offset, unsigned &ZExtBits,
+ unsigned &SExtBits,
+ const DataLayout &DL, unsigned Depth,
+ AssumptionCache *AC, DominatorTree *DT,
+ bool &NSW, bool &NUW) {
assert(V->getType()->isIntegerTy() && "Not an integer value");
// Limit our recursion depth.
return V;
}
- if (ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
- // if it's a constant, just convert it to an offset
- // and remove the variable.
- Offset += Const->getValue();
+ if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
+ // if it's a constant, just convert it to an offset and remove the variable.
+ // If we've been called recursively the Offset bit width will be greater
+ // than the constant's (the Offset's always as wide as the outermost call),
+ // so we'll zext here and process any extension in the isa<SExtInst> &
+ // isa<ZExtInst> cases below.
+ Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
assert(Scale == 0 && "Constant values don't have a scale");
return V;
}
- if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
+ if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
+
+ // If we've been called recursively then Offset and Scale will be wider
+ // that the BOp operands. We'll always zext it here as we'll process sign
+ // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
+ APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
+
switch (BOp->getOpcode()) {
- default: break;
+ default:
+ // We don't understand this instruction, so we can't decompose it any
+ // further.
+ Scale = 1;
+ Offset = 0;
+ return V;
case Instruction::Or:
// X|C == X+C if all the bits in C are unset in X. Otherwise we can't
// analyze it.
- if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &DL, 0, AC,
+ if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
BOp, DT))
break;
// FALL THROUGH.
case Instruction::Add:
- V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
- DL, Depth + 1, AC, DT);
- Offset += RHSC->getValue();
- return V;
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
+ SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
+ Offset += RHS;
+ break;
+ case Instruction::Sub:
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
+ SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
+ Offset -= RHS;
+ break;
case Instruction::Mul:
- V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
- DL, Depth + 1, AC, DT);
- Offset *= RHSC->getValue();
- Scale *= RHSC->getValue();
- return V;
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
+ SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
+ Offset *= RHS;
+ Scale *= RHS;
+ break;
case Instruction::Shl:
- V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, Extension,
- DL, Depth + 1, AC, DT);
- Offset <<= RHSC->getValue().getLimitedValue();
- Scale <<= RHSC->getValue().getLimitedValue();
+ V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
+ SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
+ Offset <<= RHS.getLimitedValue();
+ Scale <<= RHS.getLimitedValue();
+ // the semantics of nsw and nuw for left shifts don't match those of
+ // multiplications, so we won't propagate them.
+ NSW = NUW = false;
return V;
}
+
+ if (isa<OverflowingBinaryOperator>(BOp)) {
+ NUW &= BOp->hasNoUnsignedWrap();
+ NSW &= BOp->hasNoSignedWrap();
+ }
+ return V;
}
}
// Since GEP indices are sign extended anyway, we don't care about the high
// bits of a sign or zero extended value - just scales and offsets. The
// extensions have to be consistent though.
- if ((isa<SExtInst>(V) && Extension != EK_ZeroExt) ||
- (isa<ZExtInst>(V) && Extension != EK_SignExt)) {
+ if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
Value *CastOp = cast<CastInst>(V)->getOperand(0);
- unsigned OldWidth = Scale.getBitWidth();
+ unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
- Scale = Scale.trunc(SmallWidth);
- Offset = Offset.trunc(SmallWidth);
- Extension = isa<SExtInst>(V) ? EK_SignExt : EK_ZeroExt;
-
- Value *Result = GetLinearExpression(CastOp, Scale, Offset, Extension, DL,
- Depth + 1, AC, DT);
- Scale = Scale.zext(OldWidth);
-
- // We have to sign-extend even if Extension == EK_ZeroExt as we can't
- // decompose a sign extension (i.e. zext(x - 1) != zext(x) - zext(-1)).
- Offset = Offset.sext(OldWidth);
+ unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
+ const Value *Result =
+ GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
+ Depth + 1, AC, DT, NSW, NUW);
+
+ // zext(zext(%x)) == zext(%x), and similiarly for sext; we'll handle this
+ // by just incrementing the number of bits we've extended by.
+ unsigned ExtendedBy = NewWidth - SmallWidth;
+
+ if (isa<SExtInst>(V) && ZExtBits == 0) {
+ // sext(sext(%x, a), b) == sext(%x, a + b)
+
+ if (NSW) {
+ // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
+ // into sext(%x) + sext(c). We'll sext the Offset ourselves:
+ unsigned OldWidth = Offset.getBitWidth();
+ Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
+ } else {
+ // We may have signed-wrapped, so don't decompose sext(%x + c) into
+ // sext(%x) + sext(c)
+ Scale = 1;
+ Offset = 0;
+ Result = CastOp;
+ ZExtBits = OldZExtBits;
+ SExtBits = OldSExtBits;
+ }
+ SExtBits += ExtendedBy;
+ } else {
+ // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
+
+ if (!NUW) {
+ // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
+ // zext(%x) + zext(c)
+ Scale = 1;
+ Offset = 0;
+ Result = CastOp;
+ ZExtBits = OldZExtBits;
+ SExtBits = OldSExtBits;
+ }
+ ZExtBits += ExtendedBy;
+ }
return Result;
}
static const Value *
DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
SmallVectorImpl<VariableGEPIndex> &VarIndices,
- bool &MaxLookupReached, const DataLayout *DL,
+ bool &MaxLookupReached, const DataLayout &DL,
AssumptionCache *AC, DominatorTree *DT) {
// Limit recursion depth to limit compile time in crazy cases.
unsigned MaxLookup = MaxLookupSearchDepth;
if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized())
return V;
- // If we are lacking DataLayout information, we can't compute the offets of
- // elements computed by GEPs. However, we can handle bitcast equivalent
- // GEPs.
- if (!DL) {
- if (!GEPOp->hasAllZeroIndices())
- return V;
- V = GEPOp->getOperand(0);
- continue;
- }
-
unsigned AS = GEPOp->getPointerAddressSpace();
// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
gep_type_iterator GTI = gep_type_begin(GEPOp);
for (User::const_op_iterator I = GEPOp->op_begin()+1,
E = GEPOp->op_end(); I != E; ++I) {
- Value *Index = *I;
+ const Value *Index = *I;
// Compute the (potentially symbolic) offset in bytes for this index.
if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
if (FieldNo == 0) continue;
- BaseOffs += DL->getStructLayout(STy)->getElementOffset(FieldNo);
+ BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo);
continue;
}
// For an array/pointer, add the element offset, explicitly scaled.
- if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
+ if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
if (CIdx->isZero()) continue;
- BaseOffs += DL->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
+ BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
continue;
}
- uint64_t Scale = DL->getTypeAllocSize(*GTI);
- ExtensionKind Extension = EK_NotExtended;
+ uint64_t Scale = DL.getTypeAllocSize(*GTI);
+ unsigned ZExtBits = 0, SExtBits = 0;
// If the integer type is smaller than the pointer size, it is implicitly
// sign extended to pointer size.
unsigned Width = Index->getType()->getIntegerBitWidth();
- if (DL->getPointerSizeInBits(AS) > Width)
- Extension = EK_SignExt;
+ unsigned PointerSize = DL.getPointerSizeInBits(AS);
+ if (PointerSize > Width)
+ SExtBits += PointerSize - Width;
// Use GetLinearExpression to decompose the index into a C1*V+C2 form.
APInt IndexScale(Width, 0), IndexOffset(Width, 0);
- Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension,
- *DL, 0, AC, DT);
+ bool NSW = true, NUW = true;
+ Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
+ SExtBits, DL, 0, AC, DT, NSW, NUW);
// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
// A[x][x] -> x*16 + x*4 -> x*20
// This also ensures that 'x' only appears in the index list once.
for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
- if (VarIndices[i].V == Index &&
- VarIndices[i].Extension == Extension) {
+ if (VarIndices[i].V == Index && VarIndices[i].ZExtBits == ZExtBits &&
+ VarIndices[i].SExtBits == SExtBits) {
Scale += VarIndices[i].Scale;
VarIndices.erase(VarIndices.begin()+i);
break;
// Make sure that we have a scale that makes sense for this target's
// pointer size.
- if (unsigned ShiftBits = 64 - DL->getPointerSizeInBits(AS)) {
+ if (unsigned ShiftBits = 64 - PointerSize) {
Scale <<= ShiftBits;
Scale = (int64_t)Scale >> ShiftBits;
}
if (Scale) {
- VariableGEPIndex Entry = {Index, Extension,
+ VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
static_cast<int64_t>(Scale)};
VarIndices.push_back(Entry);
}
initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
- void initializePass() override {
- InitializeAliasAnalysis(this);
- }
+ bool doInitialization(Module &M) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<AliasAnalysis>();
/// is we say noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2);
+ /// \brief A Heuristic for aliasGEP that searches for a constant offset
+ /// between the variables.
+ ///
+ /// GetLinearExpression has some limitations, as generally zext(%x + 1)
+ /// != zext(%x) + zext(1) if the arithmetic overflows. GetLinearExpression
+ /// will therefore conservatively refuse to decompose these expressions.
+ /// However, we know that, for all %x, zext(%x) != zext(%x + 1), even if
+ /// the addition overflows.
+ bool
+ constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> &VarIndices,
+ uint64_t V1Size, uint64_t V2Size,
+ int64_t BaseOffset, const DataLayout *DL,
+ AssumptionCache *AC, DominatorTree *DT);
+
/// \brief Dest and Src are the variable indices from two decomposed
/// GetElementPtr instructions GEP1 and GEP2 which have common base
/// pointers. Subtract the GEP2 indices from GEP1 to find the symbolic
SmallVector<const Value *, 16> Worklist;
Worklist.push_back(Loc.Ptr);
do {
- const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
+ const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), *DL);
if (!Visited.insert(V).second) {
Visited.clear();
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
Visited.clear();
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
}
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- Worklist.push_back(PN->getIncomingValue(i));
+ for (Value *IncValue : PN->incoming_values())
+ Worklist.push_back(IncValue);
continue;
}
return false;
}
+bool BasicAliasAnalysis::doInitialization(Module &M) {
+ InitializeAliasAnalysis(this, &M.getDataLayout());
+ return true;
+}
+
/// getModRefInfo - Check to see if the specified callsite can clobber the
/// specified memory object. Since we only look at local properties of this
/// function, we really can't say much about this query. We do, however, use
assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");
- const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
+ const Value *Object = GetUnderlyingObject(Loc.Ptr, *DL);
// If this is a tail call and Loc.Ptr points to a stack location, we know that
// the tail call cannot access or modify the local stack.
return AliasAnalysis::getModRefInfo(CS1, CS2);
}
+/// \brief Provide ad-hoc rules to disambiguate accesses through two GEP
+/// operators, both having the exact same pointer operand.
+static AliasAnalysis::AliasResult
+aliasSameBasePointerGEPs(const GEPOperator *GEP1, uint64_t V1Size,
+ const GEPOperator *GEP2, uint64_t V2Size,
+ const DataLayout &DL) {
+
+ assert(GEP1->getPointerOperand() == GEP2->getPointerOperand() &&
+ "Expected GEPs with the same pointer operand");
+
+ // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
+ // such that the struct field accesses provably cannot alias.
+ // We also need at least two indices (the pointer, and the struct field).
+ if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
+ GEP1->getNumIndices() < 2)
+ return AliasAnalysis::MayAlias;
+
+ // If we don't know the size of the accesses through both GEPs, we can't
+ // determine whether the struct fields accessed can't alias.
+ if (V1Size == AliasAnalysis::UnknownSize ||
+ V2Size == AliasAnalysis::UnknownSize)
+ return AliasAnalysis::MayAlias;
+
+ ConstantInt *C1 =
+ dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
+ ConstantInt *C2 =
+ dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
+
+ // If the last (struct) indices aren't constants, we can't say anything.
+ // If they're identical, the other indices might be also be dynamically
+ // equal, so the GEPs can alias.
+ if (!C1 || !C2 || C1 == C2)
+ return AliasAnalysis::MayAlias;
+
+ // Find the last-indexed type of the GEP, i.e., the type you'd get if
+ // you stripped the last index.
+ // On the way, look at each indexed type. If there's something other
+ // than an array, different indices can lead to different final types.
+ SmallVector<Value *, 8> IntermediateIndices;
+
+ // Insert the first index; we don't need to check the type indexed
+ // through it as it only drops the pointer indirection.
+ assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
+ IntermediateIndices.push_back(GEP1->getOperand(1));
+
+ // Insert all the remaining indices but the last one.
+ // Also, check that they all index through arrays.
+ for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
+ if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
+ GEP1->getSourceElementType(), IntermediateIndices)))
+ return AliasAnalysis::MayAlias;
+ IntermediateIndices.push_back(GEP1->getOperand(i + 1));
+ }
+
+ StructType *LastIndexedStruct =
+ dyn_cast<StructType>(GetElementPtrInst::getIndexedType(
+ GEP1->getSourceElementType(), IntermediateIndices));
+
+ if (!LastIndexedStruct)
+ return AliasAnalysis::MayAlias;
+
+ // We know that:
+ // - both GEPs begin indexing from the exact same pointer;
+ // - the last indices in both GEPs are constants, indexing into a struct;
+ // - said indices are different, hence, the pointed-to fields are different;
+ // - both GEPs only index through arrays prior to that.
+ //
+ // This lets us determine that the struct that GEP1 indexes into and the
+ // struct that GEP2 indexes into must either precisely overlap or be
+ // completely disjoint. Because they cannot partially overlap, indexing into
+ // different non-overlapping fields of the struct will never alias.
+
+ // Therefore, the only remaining thing needed to show that both GEPs can't
+ // alias is that the fields are not overlapping.
+ const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
+ const uint64_t StructSize = SL->getSizeInBytes();
+ const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
+ const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
+
+ auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
+ uint64_t V2Off, uint64_t V2Size) {
+ return V1Off < V2Off && V1Off + V1Size <= V2Off &&
+ ((V2Off + V2Size <= StructSize) ||
+ (V2Off + V2Size - StructSize <= V1Off));
+ };
+
+ if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
+ EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
+ return AliasAnalysis::NoAlias;
+
+ return AliasAnalysis::MayAlias;
+}
+
+bool BasicAliasAnalysis::constantOffsetHeuristic(
+ const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
+ uint64_t V2Size, int64_t BaseOffset, const DataLayout *DL,
+ AssumptionCache *AC, DominatorTree *DT) {
+ if (VarIndices.size() != 2 || V1Size == UnknownSize ||
+ V2Size == UnknownSize || !DL)
+ return false;
+
+ const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
+
+ if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
+ Var0.Scale != -Var1.Scale)
+ return false;
+
+ unsigned Width = Var1.V->getType()->getIntegerBitWidth();
+
+ // We'll strip off the Extensions of Var0 and Var1 and do another round
+ // of GetLinearExpression decomposition. In the example above, if Var0
+ // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
+
+ APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 1),
+ V1Offset(Width, 1);
+ bool NSW = true, NUW = true;
+ unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
+ const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
+ V0SExtBits, *DL, 0, AC, DT, NSW, NUW);
+ NSW = true, NUW = true;
+ const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
+ V1SExtBits, *DL, 0, AC, DT, NSW, NUW);
+
+ if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
+ V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
+ return false;
+
+ // We have a hit - Var0 and Var1 only differ by a constant offset!
+
+ // If we've been sext'ed then zext'd the maximum difference between Var0 and
+ // Var1 is possible to calculate, but we're just interested in the absolute
+ // minumum difference between the two. The minimum distance may occur due to
+ // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
+ // the minimum distance between %i and %i + 5 is 3.
+ APInt MinDiff = V0Offset - V1Offset,
+ Wrapped = APInt::getMaxValue(Width) - MinDiff + APInt(Width, 1);
+ MinDiff = APIntOps::umin(MinDiff, Wrapped);
+ uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
+
+ // We can't definitely say whether GEP1 is before or after V2 due to wrapping
+ // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
+ // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
+ // V2Size can fit in the MinDiffBytes gap.
+ return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
+ V2Size + std::abs(BaseOffset) <= MinDiffBytes;
+}
+
/// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
/// against another pointer. We know that V1 is a GEP, but we don't know
/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL),
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
- GEP2MaxLookupReached, DL, AC2, DT);
+ GEP2MaxLookupReached, *DL, AC2, DT);
const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
- GEP1MaxLookupReached, DL, AC1, DT);
+ GEP1MaxLookupReached, *DL, AC1, DT);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
// about the relation of the resulting pointer.
const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
- GEP1MaxLookupReached, DL, AC1, DT);
+ GEP1MaxLookupReached, *DL, AC1, DT);
int64_t GEP2BaseOffset;
bool GEP2MaxLookupReached;
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
- GEP2MaxLookupReached, DL, AC2, DT);
+ GEP2MaxLookupReached, *DL, AC2, DT);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
return MayAlias;
}
+
+ // If we know the two GEPs are based off of the exact same pointer (and not
+ // just the same underlying object), see if that tells us anything about
+ // the resulting pointers.
+ if (DL && GEP1->getPointerOperand() == GEP2->getPointerOperand()) {
+ AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, *DL);
+ // If we couldn't find anything interesting, don't abandon just yet.
+ if (R != MayAlias)
+ return R;
+ }
+
// If the max search depth is reached the result is undefined
if (GEP2MaxLookupReached || GEP1MaxLookupReached)
return MayAlias;
const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
- GEP1MaxLookupReached, DL, AC1, DT);
+ GEP1MaxLookupReached, *DL, AC1, DT);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
const Value *V = GEP1VariableIndices[i].V;
bool SignKnownZero, SignKnownOne;
- ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, DL,
+ ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, *DL,
0, AC1, nullptr, DT);
// Zero-extension widens the variable, and so forces the sign
// bit to zero.
- bool IsZExt = GEP1VariableIndices[i].Extension == EK_ZeroExt;
+ bool IsZExt = GEP1VariableIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
SignKnownZero |= IsZExt;
SignKnownOne &= !IsZExt;
// don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t) GEP1BaseOffset)
return NoAlias;
+
+ if (constantOffsetHeuristic(GEP1VariableIndices, V1Size, V2Size,
+ GEP1BaseOffset, DL, AC1, DT))
+ return NoAlias;
}
// Statically, we can see that the base objects are the same, but the
SmallPtrSet<Value*, 4> UniqueSrc;
SmallVector<Value*, 4> V1Srcs;
- for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
- Value *PV1 = PN->getIncomingValue(i);
+ for (Value *PV1 : PN->incoming_values()) {
if (isa<PHINode>(PV1))
// If any of the source itself is a PHI, return MayAlias conservatively
// to avoid compile time explosion. The worst possible case is if both
V1 = V1->stripPointerCasts();
V2 = V2->stripPointerCasts();
+ // If V1 or V2 is undef, the result is NoAlias because we can always pick a
+ // value for undef that aliases nothing in the program.
+ if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
+ return NoAlias;
+
// Are we checking for alias of the same value?
// Because we look 'through' phi nodes we could look at "Value" pointers from
// different iterations. We must therefore make sure that this is not the
return NoAlias; // Scalars cannot alias each other
// Figure out what objects these things are pointing to if we can.
- const Value *O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
- const Value *O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
+ const Value *O1 = GetUnderlyingObject(V1, *DL, MaxLookupSearchDepth);
+ const Value *O2 = GetUnderlyingObject(V2, *DL, MaxLookupSearchDepth);
// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.
if (!Inst)
return true;
+ if (VisitedPhiBBs.empty())
+ return true;
+
if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
return false;
for (unsigned i = 0, e = Src.size(); i != e; ++i) {
const Value *V = Src[i].V;
- ExtensionKind Extension = Src[i].Extension;
+ unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
int64_t Scale = Src[i].Scale;
// Find V in Dest. This is N^2, but pointer indices almost never have more
// than a few variable indexes.
for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
- Dest[j].Extension != Extension)
+ Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
continue;
// If we found it, subtract off Scale V's from the entry in Dest. If it
// If we didn't consume this entry, add it to the end of the Dest list.
if (Scale) {
- VariableGEPIndex Entry = { V, Extension, -Scale };
+ VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
Dest.push_back(Entry);
}
}