//===----------------------------------------------------------------------===//
#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/Analysis/AssumptionTracker.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/IR/CallSite.h"
// figuring out if we can use it.
struct Query {
ExclInvsSet ExclInvs;
- AssumptionTracker *AT;
+ AssumptionCache *AC;
const Instruction *CxtI;
const DominatorTree *DT;
- Query(AssumptionTracker *AT = nullptr, const Instruction *CxtI = nullptr,
+ Query(AssumptionCache *AC = nullptr, const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr)
- : AT(AT), CxtI(CxtI), DT(DT) {}
+ : AC(AC), CxtI(CxtI), DT(DT) {}
Query(const Query &Q, const Value *NewExcl)
- : ExclInvs(Q.ExclInvs), AT(Q.AT), CxtI(Q.CxtI), DT(Q.DT) {
+ : ExclInvs(Q.ExclInvs), AC(Q.AC), CxtI(Q.CxtI), DT(Q.DT) {
ExclInvs.insert(NewExcl);
}
};
void llvm::computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
const DataLayout *TD, unsigned Depth,
- AssumptionTracker *AT, const Instruction *CxtI,
+ AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
::computeKnownBits(V, KnownZero, KnownOne, TD, Depth,
- Query(AT, safeCxtI(V, CxtI), DT));
+ Query(AC, safeCxtI(V, CxtI), DT));
}
static void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
const DataLayout *TD, unsigned Depth,
- AssumptionTracker *AT, const Instruction *CxtI,
+ AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
::ComputeSignBit(V, KnownZero, KnownOne, TD, Depth,
- Query(AT, safeCxtI(V, CxtI), DT));
+ Query(AC, safeCxtI(V, CxtI), DT));
}
static bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
const Query &Q);
bool llvm::isKnownToBeAPowerOfTwo(Value *V, bool OrZero, unsigned Depth,
- AssumptionTracker *AT,
- const Instruction *CxtI,
+ AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
return ::isKnownToBeAPowerOfTwo(V, OrZero, Depth,
- Query(AT, safeCxtI(V, CxtI), DT));
+ Query(AC, safeCxtI(V, CxtI), DT));
}
static bool isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth,
const Query &Q);
bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth,
- AssumptionTracker *AT, const Instruction *CxtI,
+ AssumptionCache *AC, const Instruction *CxtI,
const DominatorTree *DT) {
- return ::isKnownNonZero(V, TD, Depth, Query(AT, safeCxtI(V, CxtI), DT));
+ return ::isKnownNonZero(V, TD, Depth, Query(AC, safeCxtI(V, CxtI), DT));
}
static bool MaskedValueIsZero(Value *V, const APInt &Mask,
const DataLayout *TD, unsigned Depth,
const Query &Q);
-bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
- const DataLayout *TD, unsigned Depth,
- AssumptionTracker *AT, const Instruction *CxtI,
- const DominatorTree *DT) {
+bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout *TD,
+ unsigned Depth, AssumptionCache *AC,
+ const Instruction *CxtI, const DominatorTree *DT) {
return ::MaskedValueIsZero(V, Mask, TD, Depth,
- Query(AT, safeCxtI(V, CxtI), DT));
+ Query(AC, safeCxtI(V, CxtI), DT));
}
static unsigned ComputeNumSignBits(Value *V, const DataLayout *TD,
unsigned Depth, const Query &Q);
unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
- unsigned Depth, AssumptionTracker *AT,
+ unsigned Depth, AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
- return ::ComputeNumSignBits(V, TD, Depth, Query(AT, safeCxtI(V, CxtI), DT));
+ return ::ComputeNumSignBits(V, TD, Depth, Query(AC, safeCxtI(V, CxtI), DT));
}
static void computeKnownBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
unsigned Depth, const Query &Q) {
// Use of assumptions is context-sensitive. If we don't have a context, we
// cannot use them!
- if (!Q.AT || !Q.CxtI)
+ if (!Q.AC || !Q.CxtI)
return;
unsigned BitWidth = KnownZero.getBitWidth();
- Function *F = const_cast<Function*>(Q.CxtI->getParent()->getParent());
- for (auto &CI : Q.AT->assumptions(F)) {
- CallInst *I = CI;
+ for (auto &AssumeVH : Q.AC->assumptions()) {
+ if (!AssumeVH)
+ continue;
+ CallInst *I = cast<CallInst>(AssumeVH);
+ assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
+ "Got assumption for the wrong function!");
if (Q.ExclInvs.count(I))
continue;
case Instruction::PHI: {
PHINode *PN = cast<PHINode>(U);
+ unsigned NumIncomingValues = PN->getNumIncomingValues();
// Don't analyze large in-degree PHIs.
- if (PN->getNumIncomingValues() > 4) break;
+ if (NumIncomingValues > 4) break;
+ // Unreachable blocks may have zero-operand PHI nodes.
+ if (NumIncomingValues == 0) break;
// Take the minimum of all incoming values. This can't infinitely loop
// because of our depth threshold.
Tmp = ComputeNumSignBits(PN->getIncomingValue(0), TD, Depth+1, Q);
- for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
+ for (unsigned i = 1, e = NumIncomingValues; i != e; ++i) {
if (Tmp == 1) return Tmp;
Tmp = std::min(Tmp,
ComputeNumSignBits(PN->getIncomingValue(i), TD,
return false;
}
+bool llvm::CannotBeOrderedLessThanZero(const Value *V, unsigned Depth) {
+ if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
+ return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero();
+
+ if (Depth == 6)
+ return false; // Limit search depth.
+
+ const Operator *I = dyn_cast<Operator>(V);
+ if (!I) return false;
+
+ switch (I->getOpcode()) {
+ default: break;
+ case Instruction::FMul:
+ // x*x is always non-negative or a NaN.
+ if (I->getOperand(0) == I->getOperand(1))
+ return true;
+ // Fall through
+ case Instruction::FAdd:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1) &&
+ CannotBeOrderedLessThanZero(I->getOperand(1), Depth+1);
+ case Instruction::FPExt:
+ case Instruction::FPTrunc:
+ // Widening/narrowing never change sign.
+ return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1);
+ case Instruction::Call:
+ if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
+ switch (II->getIntrinsicID()) {
+ default: break;
+ case Intrinsic::exp:
+ case Intrinsic::exp2:
+ case Intrinsic::fabs:
+ case Intrinsic::sqrt:
+ return true;
+ case Intrinsic::powi:
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
+ // powi(x,n) is non-negative if n is even.
+ if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0)
+ return true;
+ }
+ return CannotBeOrderedLessThanZero(I->getOperand(0), Depth+1);
+ case Intrinsic::fma:
+ case Intrinsic::fmuladd:
+ // x*x+y is non-negative if y is non-negative.
+ return I->getOperand(0) == I->getOperand(1) &&
+ CannotBeOrderedLessThanZero(I->getOperand(2), Depth+1);
+ }
+ break;
+ }
+ return false;
+}
+
/// If the specified value can be set by repeating the same byte in memory,
/// return the i8 value that it is represented with. This is
/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
// Don't handle long double formats, which have strange constraints.
}
- // We can handle constant integers that are power of two in size and a
- // multiple of 8 bits.
+ // We can handle constant integers that are multiple of 8 bits.
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
- unsigned Width = CI->getBitWidth();
- if (isPowerOf2_32(Width) && Width > 8) {
- // We can handle this value if the recursive binary decomposition is the
- // same at all levels.
- APInt Val = CI->getValue();
- APInt Val2;
- while (Val.getBitWidth() != 8) {
- unsigned NextWidth = Val.getBitWidth()/2;
- Val2 = Val.lshr(NextWidth);
- Val2 = Val2.trunc(Val.getBitWidth()/2);
- Val = Val.trunc(Val.getBitWidth()/2);
-
- // If the top/bottom halves aren't the same, reject it.
- if (Val != Val2)
- return nullptr;
- }
- return ConstantInt::get(V->getContext(), Val);
+ if (CI->getBitWidth() % 8 == 0) {
+ assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
+
+ // We can check that all bytes of an integer are equal by making use of a
+ // little trick: rotate by 8 and check if it's still the same value.
+ if (CI->getValue() != CI->getValue().rotl(8))
+ return nullptr;
+ return ConstantInt::get(V->getContext(), CI->getValue().trunc(8));
}
}
} else {
// See if InstructionSimplify knows any relevant tricks.
if (Instruction *I = dyn_cast<Instruction>(V))
- // TODO: Acquire a DominatorTree and AssumptionTracker and use them.
+ // TODO: Acquire a DominatorTree and AssumptionCache and use them.
if (Value *Simplified = SimplifyInstruction(I, TD, nullptr)) {
V = Simplified;
continue;
case Instruction::SDiv:
case Instruction::SRem: {
// x / y is undefined if y == 0 or x == INT_MIN and y == -1
- const APInt *X, *Y;
- if (match(Inst->getOperand(1), m_APInt(Y))) {
- if (*Y != 0) {
- if (*Y == -1) {
- // The numerator can't be MinSignedValue if the denominator is -1.
- if (match(Inst->getOperand(0), m_APInt(X)))
- return !Y->isMinSignedValue();
- // The numerator *might* be MinSignedValue.
- return false;
- }
- // The denominator is not 0 or -1, it's safe to proceed.
- return true;
- }
- }
+ const APInt *Numerator, *Denominator;
+ if (!match(Inst->getOperand(1), m_APInt(Denominator)))
+ return false;
+ // We cannot hoist this division if the denominator is 0.
+ if (*Denominator == 0)
+ return false;
+ // It's safe to hoist if the denominator is not 0 or -1.
+ if (*Denominator != -1)
+ return true;
+ // At this point we know that the denominator is -1. It is safe to hoist as
+ // long we know that the numerator is not INT_MIN.
+ if (match(Inst->getOperand(0), m_APInt(Numerator)))
+ return !Numerator->isMinSignedValue();
+ // The numerator *might* be MinSignedValue.
return false;
}
case Instruction::Load: {
OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
const DataLayout *DL,
- AssumptionTracker *AT,
+ AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT) {
// Multiplying n * m significant bits yields a result of n + m significant
APInt LHSKnownOne(BitWidth, 0);
APInt RHSKnownZero(BitWidth, 0);
APInt RHSKnownOne(BitWidth, 0);
- computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AT, CxtI, DT);
- computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AT, CxtI, DT);
+ computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
+ DT);
+ computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, DL, /*Depth=*/0, AC, CxtI,
+ DT);
// Note that underestimating the number of zero bits gives a more
// conservative answer.
unsigned ZeroBits = LHSKnownZero.countLeadingOnes() +
return OverflowResult::MayOverflow;
}
+
+OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
+ const DataLayout *DL,
+ AssumptionCache *AC,
+ const Instruction *CxtI,
+ const DominatorTree *DT) {
+ bool LHSKnownNonNegative, LHSKnownNegative;
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0,
+ AC, CxtI, DT);
+ if (LHSKnownNonNegative || LHSKnownNegative) {
+ bool RHSKnownNonNegative, RHSKnownNegative;
+ ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0,
+ AC, CxtI, DT);
+
+ if (LHSKnownNegative && RHSKnownNegative) {
+ // The sign bit is set in both cases: this MUST overflow.
+ // Create a simple add instruction, and insert it into the struct.
+ return OverflowResult::AlwaysOverflows;
+ }
+
+ if (LHSKnownNonNegative && RHSKnownNonNegative) {
+ // The sign bit is clear in both cases: this CANNOT overflow.
+ // Create a simple add instruction, and insert it into the struct.
+ return OverflowResult::NeverOverflows;
+ }
+ }
+
+ return OverflowResult::MayOverflow;
+}