KnownOne &= KnownOne2;
// Output known-0 are known to be clear if zero in either the LHS | RHS.
KnownZero |= KnownZero2;
+
+ // and(x, add (x, -1)) is a common idiom that always clears the low bit;
+ // here we handle the more general case of adding any odd number by
+ // matching the form add(x, add(x, y)) where y is odd.
+ // TODO: This could be generalized to clearing any bit set in y where the
+ // following bit is known to be unset in y.
+ Value *Y = nullptr;
+ if (match(I->getOperand(0), m_Add(m_Specific(I->getOperand(1)),
+ m_Value(Y))) ||
+ match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
+ m_Value(Y)))) {
+ APInt KnownZero3(BitWidth, 0), KnownOne3(BitWidth, 0);
+ computeKnownBits(Y, KnownZero3, KnownOne3, DL, Depth + 1, Q);
+ if (KnownOne3.countTrailingOnes() > 0)
+ KnownZero |= APInt::getLowBitsSet(BitWidth, 1);
+ }
break;
}
case Instruction::Or: {
return false;
Value *X = nullptr, *Y = nullptr;
- // A shift of a power of two is a power of two or zero.
+ // A shift left or a logical shift right of a power of two is a power of two
+ // or zero.
if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
- match(V, m_Shr(m_Value(X), m_Value()))))
+ match(V, m_LShr(m_Value(X), m_Value()))))
return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q, DL);
if (ZExtInst *ZI = dyn_cast<ZExtInst>(V))
const DataLayout &DL) {
unsigned BitWidth = DL.getPointerTypeSizeInBits(Ptr->getType());
APInt ByteOffset(BitWidth, 0);
- while (1) {
+
+ // We walk up the defs but use a visited set to handle unreachable code. In
+ // that case, we stop after accumulating the cycle once (not that it
+ // matters).
+ SmallPtrSet<Value *, 16> Visited;
+ while (Visited.insert(Ptr).second) {
if (Ptr->getType()->isVectorTy())
break;
if (!BaseAlign) {
Type *Ty = Base->getType()->getPointerElementType();
+ if (!Ty->isSized())
+ return false;
BaseAlign = DL.getABITypeAlignment(Ty);
}
}
static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
- APInt Offset(DL.getTypeStoreSizeInBits(Base->getType()), 0);
+ Type *Ty = Base->getType();
+ assert(Ty->isSized() && "must be sized");
+ APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
return isAligned(Base, Offset, Align, DL);
}
}
// For gc.relocate, look through relocations
- if (const IntrinsicInst *I = dyn_cast<IntrinsicInst>(V))
- if (I->getIntrinsicID() == Intrinsic::experimental_gc_relocate) {
- GCRelocateOperands RelocateInst(I);
- return isDereferenceableAndAlignedPointer(
- RelocateInst.getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited);
- }
+ if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
+ return isDereferenceableAndAlignedPointer(
+ RelocateInst->getDerivedPtr(), Align, DL, CtxI, DT, TLI, Visited);
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, DL,
case Instruction::AtomicCmpXchg:
case Instruction::LandingPad:
case Instruction::Resume:
+ case Instruction::CatchSwitch:
case Instruction::CatchPad:
- case Instruction::CatchEndPad:
case Instruction::CatchRet:
case Instruction::CleanupPad:
- case Instruction::CleanupEndPad:
case Instruction::CleanupRet:
- case Instruction::TerminatePad:
return false; // Misc instructions which have effects
}
}
if (CS.isReturnNonNull())
return true;
- // operator new never returns null.
- if (isOperatorNewLikeFn(V, TLI, /*LookThroughBitCast=*/true))
- return true;
-
return false;
}
}
/// Return true if "icmp Pred LHS RHS" is always true.
-static bool isTruePredicate(CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
+static bool isTruePredicate(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
+ const DataLayout &DL, unsigned Depth,
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
+ assert(!LHS->getType()->isVectorTy() && "TODO: extend to handle vectors!");
if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
return true;
default:
return false;
- case CmpInst::ICMP_SLT:
case CmpInst::ICMP_SLE: {
- ConstantInt *CI;
+ const APInt *C;
- // LHS s< LHS +_{nsw} C if C > 0
// LHS s<= LHS +_{nsw} C if C >= 0
- if (match(RHS, m_NSWAdd(m_Specific(LHS), m_ConstantInt(CI)))) {
- if (Pred == CmpInst::ICMP_SLT)
- return CI->getValue().isStrictlyPositive();
- return !CI->isNegative();
- }
+ if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
+ return !C->isNegative();
return false;
}
- case CmpInst::ICMP_ULT:
case CmpInst::ICMP_ULE: {
- ConstantInt *CI;
+ const APInt *C;
- // LHS u< LHS +_{nuw} C if C != 0
- // LHS u<= LHS +_{nuw} C
- if (match(RHS, m_NUWAdd(m_Specific(LHS), m_ConstantInt(CI)))) {
- if (Pred == CmpInst::ICMP_ULT)
- return !CI->isZero();
+ // LHS u<= LHS +_{nuw} C for any C
+ if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
return true;
- }
+
+ // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
+ auto MatchNUWAddsToSameValue = [&](Value *A, Value *B, Value *&X,
+ const APInt *&CA, const APInt *&CB) {
+ if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
+ match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
+ return true;
+
+ // If X & C == 0 then (X | C) == X +_{nuw} C
+ if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
+ match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
+ unsigned BitWidth = CA->getBitWidth();
+ APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
+ computeKnownBits(X, KnownZero, KnownOne, DL, Depth + 1, AC, CxtI, DT);
+
+ if ((KnownZero & *CA) == *CA && (KnownZero & *CB) == *CB)
+ return true;
+ }
+
+ return false;
+ };
+
+ Value *X;
+ const APInt *CLHS, *CRHS;
+ if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
+ return CLHS->ule(*CRHS);
+
return false;
}
}
/// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
/// ALHS ARHS" is true.
static bool isImpliedCondOperands(CmpInst::Predicate Pred, Value *ALHS,
- Value *ARHS, Value *BLHS, Value *BRHS) {
+ Value *ARHS, Value *BLHS, Value *BRHS,
+ const DataLayout &DL, unsigned Depth,
+ AssumptionCache *AC, const Instruction *CxtI,
+ const DominatorTree *DT) {
switch (Pred) {
default:
return false;
case CmpInst::ICMP_SLT:
case CmpInst::ICMP_SLE:
- return isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS) &&
- isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS);
+ return isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth, AC, CxtI,
+ DT) &&
+ isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth, AC, CxtI,
+ DT);
case CmpInst::ICMP_ULT:
case CmpInst::ICMP_ULE:
- return isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS) &&
- isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS);
+ return isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth, AC, CxtI,
+ DT) &&
+ isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth, AC, CxtI,
+ DT);
}
}
-bool llvm::isImpliedCondition(Value *LHS, Value *RHS) {
+bool llvm::isImpliedCondition(Value *LHS, Value *RHS, const DataLayout &DL,
+ unsigned Depth, AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT) {
assert(LHS->getType() == RHS->getType() && "mismatched type");
Type *OpTy = LHS->getType();
assert(OpTy->getScalarType()->isIntegerTy(1));
return false;
if (APred == BPred)
- return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS);
+ return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth, AC,
+ CxtI, DT);
return false;
}