/// conditions?
static cl::opt<unsigned> DomConditionsMaxDomBlocks("dom-conditions-dom-blocks",
cl::Hidden,
- cl::init(20000));
+ cl::init(20));
// Controls the number of uses of the value searched for possible
// dominating comparisons.
static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
- cl::Hidden, cl::init(2000));
+ cl::Hidden, cl::init(20));
// If true, don't consider only compares whose only use is a branch.
static cl::opt<bool> DomConditionsSingleCmpUse("dom-conditions-single-cmp-use",
}
// If low bits are zero in either operand, output low known-0 bits.
- // Also compute a conserative estimate for high known-0 bits.
+ // Also compute a conservative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
KnownOne.clearAllBits();
for (BasicBlock::const_iterator I =
std::next(BasicBlock::const_iterator(Q.CxtI)),
IE(Inv); I != IE; ++I)
- if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
+ if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
return false;
return !isEphemeralValueOf(Inv, Q.CxtI);
// of the block); the common case is that the assume will come first.
for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)),
IE = Inv->getParent()->end(); I != IE; ++I)
- if (I == Q.CxtI)
+ if (&*I == Q.CxtI)
return true;
// The context must come first...
for (BasicBlock::const_iterator I =
std::next(BasicBlock::const_iterator(Q.CxtI)),
IE(Inv); I != IE; ++I)
- if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
+ if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
return false;
return !isEphemeralValueOf(Inv, Q.CxtI);
if (!Q.DT || !Q.CxtI)
return;
Instruction *Cxt = const_cast<Instruction *>(Q.CxtI);
+ // The context instruction might be in a statically unreachable block. If
+ // so, asking dominator queries may yield suprising results. (e.g. the block
+ // may not have a dom tree node)
+ if (!Q.DT->isReachableFromEntry(Cxt->getParent()))
+ return;
// Avoid useless work
if (auto VI = dyn_cast<Instruction>(V))
// instruction. Finding a condition where one path dominates the context
// isn't enough because both the true and false cases could merge before
// the context instruction we're actually interested in. Instead, we need
- // to ensure that the taken *edge* dominates the context instruction.
+ // to ensure that the taken *edge* dominates the context instruction. We
+ // know that the edge must be reachable since we started from a reachable
+ // block.
BasicBlock *BB0 = BI->getSuccessor(0);
BasicBlockEdge Edge(BI->getParent(), BB0);
if (!Edge.isSingleEdge() || !Q.DT->dominates(Edge, Q.CxtI->getParent()))
}
case Instruction::BitCast: {
Type *SrcTy = I->getOperand(0)->getType();
- if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
+ if ((SrcTy->isIntegerTy() || SrcTy->isPointerTy() ||
+ SrcTy->isFloatingPointTy()) &&
// TODO: For now, not handling conversions like:
// (bitcast i64 %x to <2 x i32>)
!I->getType()->isVectorTy()) {
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
default: break;
+ case Intrinsic::bswap:
+ computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL,
+ Depth + 1, Q);
+ KnownZero |= KnownZero2.byteSwap();
+ KnownOne |= KnownOne2.byteSwap();
+ break;
case Intrinsic::ctlz:
case Intrinsic::cttz: {
unsigned LowBits = Log2_32(BitWidth)+1;
break;
}
case Intrinsic::ctpop: {
- unsigned LowBits = Log2_32(BitWidth)+1;
- KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
+ computeKnownBits(I->getOperand(0), KnownZero2, KnownOne2, DL,
+ Depth + 1, Q);
+ // We can bound the space the count needs. Also, bits known to be zero
+ // can't contribute to the population.
+ unsigned BitsPossiblySet = BitWidth - KnownZero2.countPopulation();
+ unsigned LeadingZeros =
+ APInt(BitWidth, BitsPossiblySet).countLeadingZeros();
+ assert(LeadingZeros >= 0 && LeadingZeros <= BitWidth);
+ KnownZero |= APInt::getHighBitsSet(BitWidth, LeadingZeros);
+ KnownOne &= ~KnownZero;
+ // TODO: we could bound KnownOne using the lower bound on the number
+ // of bits which might be set provided by popcnt KnownOne2.
+ break;
+ }
+ case Intrinsic::fabs: {
+ Type *Ty = II->getType();
+ APInt SignBit = APInt::getSignBit(Ty->getScalarSizeInBits());
+ KnownZero |= APInt::getSplat(Ty->getPrimitiveSizeInBits(), SignBit);
break;
}
case Intrinsic::x86_sse42_crc32_64_64:
}
}
+static unsigned getAlignment(const Value *V, const DataLayout &DL) {
+ unsigned Align = 0;
+ if (auto *GO = dyn_cast<GlobalObject>(V)) {
+ Align = GO->getAlignment();
+ if (Align == 0) {
+ if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
+ Type *ObjectType = GVar->getType()->getElementType();
+ if (ObjectType->isSized()) {
+ // If the object is defined in the current Module, we'll be giving
+ // it the preferred alignment. Otherwise, we have to assume that it
+ // may only have the minimum ABI alignment.
+ if (GVar->isStrongDefinitionForLinker())
+ Align = DL.getPreferredAlignment(GVar);
+ else
+ Align = DL.getABITypeAlignment(ObjectType);
+ }
+ }
+ }
+ } else if (const Argument *A = dyn_cast<Argument>(V)) {
+ Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
+
+ if (!Align && A->hasStructRetAttr()) {
+ // An sret parameter has at least the ABI alignment of the return type.
+ Type *EltTy = cast<PointerType>(A->getType())->getElementType();
+ if (EltTy->isSized())
+ Align = DL.getABITypeAlignment(EltTy);
+ }
+ } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
+ Align = AI->getAlignment();
+ else if (auto CS = ImmutableCallSite(V))
+ Align = CS.getAttributes().getParamAlignment(AttributeSet::ReturnIndex);
+ else if (const LoadInst *LI = dyn_cast<LoadInst>(V))
+ if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
+ ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
+ Align = CI->getLimitedValue();
+ }
+
+ return Align;
+}
+
/// Determine which bits of V are known to be either zero or one and return
/// them in the KnownZero/KnownOne bit sets.
///
unsigned BitWidth = KnownZero.getBitWidth();
assert((V->getType()->isIntOrIntVectorTy() ||
+ V->getType()->isFPOrFPVectorTy() ||
V->getType()->getScalarType()->isPointerTy()) &&
- "Not integer or pointer type!");
+ "Not integer, floating point, or pointer type!");
assert((DL.getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
(!V->getType()->isIntOrIntVectorTy() ||
V->getType()->getScalarSizeInBits() == BitWidth) &&
return;
}
- // The address of an aligned GlobalValue has trailing zeros.
- if (auto *GO = dyn_cast<GlobalObject>(V)) {
- unsigned Align = GO->getAlignment();
- if (Align == 0) {
- if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
- Type *ObjectType = GVar->getType()->getElementType();
- if (ObjectType->isSized()) {
- // If the object is defined in the current Module, we'll be giving
- // it the preferred alignment. Otherwise, we have to assume that it
- // may only have the minimum ABI alignment.
- if (GVar->isStrongDefinitionForLinker())
- Align = DL.getPreferredAlignment(GVar);
- else
- Align = DL.getABITypeAlignment(ObjectType);
- }
- }
- }
- if (Align > 0)
- KnownZero = APInt::getLowBitsSet(BitWidth,
- countTrailingZeros(Align));
- else
- KnownZero.clearAllBits();
- KnownOne.clearAllBits();
- return;
- }
-
- if (Argument *A = dyn_cast<Argument>(V)) {
- unsigned Align = A->getType()->isPointerTy() ? A->getParamAlignment() : 0;
-
- if (!Align && A->hasStructRetAttr()) {
- // An sret parameter has at least the ABI alignment of the return type.
- Type *EltTy = cast<PointerType>(A->getType())->getElementType();
- if (EltTy->isSized())
- Align = DL.getABITypeAlignment(EltTy);
- }
-
- if (Align)
- KnownZero = APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
- else
- KnownZero.clearAllBits();
- KnownOne.clearAllBits();
-
- // Don't give up yet... there might be an assumption that provides more
- // information...
- computeKnownBitsFromAssume(V, KnownZero, KnownOne, DL, Depth, Q);
-
- // Or a dominating condition for that matter
- if (EnableDomConditions && Depth <= DomConditionsMaxDepth)
- computeKnownBitsFromDominatingCondition(V, KnownZero, KnownOne, DL,
- Depth, Q);
- return;
- }
-
// Start out not knowing anything.
KnownZero.clearAllBits(); KnownOne.clearAllBits();
if (Operator *I = dyn_cast<Operator>(V))
computeKnownBitsFromOperator(I, KnownZero, KnownOne, DL, Depth, Q);
+
+ // Aligned pointers have trailing zeros - refine KnownZero set
+ if (V->getType()->isPointerTy()) {
+ unsigned Align = getAlignment(V, DL);
+ if (Align)
+ KnownZero |= APInt::getLowBitsSet(BitWidth, countTrailingZeros(Align));
+ }
+
// computeKnownBitsFromAssume and computeKnownBitsFromDominatingCondition
// strictly refines KnownZero and KnownOne. Therefore, we run them after
// computeKnownBitsFromOperator.
ComputeSignBit(X, XKnownNonNegative, XKnownNegative, DL, Depth, Q);
if (XKnownNegative)
return true;
+
+ // If the shifter operand is a constant, and all of the bits shifted
+ // out are known to be zero, and X is known non-zero then at least one
+ // non-zero bit must remain.
+ if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) {
+ APInt KnownZero(BitWidth, 0);
+ APInt KnownOne(BitWidth, 0);
+ computeKnownBits(X, KnownZero, KnownOne, DL, Depth, Q);
+
+ auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
+ // Is there a known one in the portion not shifted out?
+ if (KnownOne.countLeadingZeros() < BitWidth - ShiftVal)
+ return true;
+ // Are all the bits to be shifted out known zero?
+ if (KnownZero.countTrailingOnes() >= ShiftVal)
+ return isKnownNonZero(X, DL, Depth, Q);
+ }
}
// div exact can only produce a zero if the dividend is zero.
else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) {
isKnownNonZero(SI->getFalseValue(), DL, Depth, Q))
return true;
}
+ // PHI
+ else if (PHINode *PN = dyn_cast<PHINode>(V)) {
+ // Try and detect a recurrence that monotonically increases from a
+ // starting value, as these are common as induction variables.
+ if (PN->getNumIncomingValues() == 2) {
+ Value *Start = PN->getIncomingValue(0);
+ Value *Induction = PN->getIncomingValue(1);
+ if (isa<ConstantInt>(Induction) && !isa<ConstantInt>(Start))
+ std::swap(Start, Induction);
+ if (ConstantInt *C = dyn_cast<ConstantInt>(Start)) {
+ if (!C->isZero() && !C->isNegative()) {
+ ConstantInt *X;
+ if ((match(Induction, m_NSWAdd(m_Specific(PN), m_ConstantInt(X))) ||
+ match(Induction, m_NUWAdd(m_Specific(PN), m_ConstantInt(X)))) &&
+ !X->isNegative())
+ return true;
+ }
+ }
+ }
+ }
if (!BitWidth) return false;
APInt KnownZero(BitWidth, 0);
static bool isAligned(const Value *Base, APInt Offset, unsigned Align,
const DataLayout &DL) {
- APInt BaseAlign(Offset.getBitWidth(), 0);
- if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base))
- BaseAlign = AI->getAlignment();
- else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base))
- BaseAlign = GV->getAlignment();
- else if (const Argument *A = dyn_cast<Argument>(Base))
- BaseAlign = A->getParamAlignment();
+ APInt BaseAlign(Offset.getBitWidth(), getAlignment(Base, DL));
if (!BaseAlign) {
Type *Ty = Base->getType()->getPointerElementType();
const LoadInst *LI = cast<LoadInst>(Inst);
if (!LI->isUnordered() ||
// Speculative load may create a race that did not exist in the source.
- LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
+ LI->getParent()->getParent()->hasFnAttribute(
+ Attribute::SanitizeThread) ||
+ // Speculative load may load data from dirty regions.
+ LI->getParent()->getParent()->hasFnAttribute(
+ Attribute::SanitizeAddress))
return false;
const DataLayout &DL = LI->getModule()->getDataLayout();
return isDereferenceableAndAlignedPointer(
case Instruction::CatchEndPad:
case Instruction::CatchRet:
case Instruction::CleanupPad:
+ case Instruction::CleanupEndPad:
case Instruction::CleanupRet:
case Instruction::TerminatePad:
return false; // Misc instructions which have effects
/// Return true if we know that the specified value is never null.
bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
+ assert(V->getType()->isPointerTy() && "V must be pointer type");
+
// Alloca never returns null, malloc might.
if (isa<AllocaInst>(V)) return true;
static bool isKnownNonNullFromDominatingCondition(const Value *V,
const Instruction *CtxI,
const DominatorTree *DT) {
+ assert(V->getType()->isPointerTy() && "V must be pointer type");
+
unsigned NumUsesExplored = 0;
for (auto U : V->users()) {
// Avoid massive lists
SmallSet<const Value *, 16> YieldsPoison;
YieldsPoison.insert(PoisonI);
- for (const Instruction *I = PoisonI, *E = BB->end(); I != E;
- I = I->getNextNode()) {
- if (I != PoisonI) {
- const Value *NotPoison = getGuaranteedNonFullPoisonOp(I);
+ for (BasicBlock::const_iterator I = PoisonI->getIterator(), E = BB->end();
+ I != E; ++I) {
+ if (&*I != PoisonI) {
+ const Value *NotPoison = getGuaranteedNonFullPoisonOp(&*I);
if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) return true;
- if (!isGuaranteedToTransferExecutionToSuccessor(I)) return false;
+ if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
+ return false;
}
// Mark poison that propagates from I through uses of I.
- if (YieldsPoison.count(I)) {
+ if (YieldsPoison.count(&*I)) {
for (const User *User : I->users()) {
const Instruction *UserI = cast<Instruction>(User);
if (UserI->getParent() == BB && propagatesFullPoison(UserI))
return {SPF_UNKNOWN, SPNB_NA, false};
}
-static Constant *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
- Instruction::CastOps *CastOp) {
+static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
+ Instruction::CastOps *CastOp) {
CastInst *CI = dyn_cast<CastInst>(V1);
Constant *C = dyn_cast<Constant>(V2);
- if (!CI || !C)
+ CastInst *CI2 = dyn_cast<CastInst>(V2);
+ if (!CI)
return nullptr;
*CastOp = CI->getOpcode();
+ if (CI2) {
+ // If V1 and V2 are both the same cast from the same type, we can look
+ // through V1.
+ if (CI2->getOpcode() == CI->getOpcode() &&
+ CI2->getSrcTy() == CI->getSrcTy())
+ return CI2->getOperand(0);
+ return nullptr;
+ } else if (!C) {
+ return nullptr;
+ }
+
if (isa<SExtInst>(CI) && CmpI->isSigned()) {
Constant *T = ConstantExpr::getTrunc(C, CI->getSrcTy());
// This is only valid if the truncated value can be sign-extended
// Deal with type mismatches.
if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
- if (Constant *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
+ if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp))
return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
cast<CastInst>(TrueVal)->getOperand(0), C,
LHS, RHS);
- if (Constant *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
+ if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp))
return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
C, cast<CastInst>(FalseVal)->getOperand(0),
LHS, RHS);