//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "instsimplify"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/Dominators.h"
-#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/MemoryBuiltins.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/IR/ConstantRange.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/Operator.h"
-#include "llvm/Support/ConstantRange.h"
-#include "llvm/Support/GetElementPtrTypeIterator.h"
-#include "llvm/Support/PatternMatch.h"
-#include "llvm/Support/ValueHandle.h"
+#include "llvm/IR/PatternMatch.h"
+#include "llvm/IR/ValueHandle.h"
using namespace llvm;
using namespace llvm::PatternMatch;
+#define DEBUG_TYPE "instsimplify"
+
enum { RecursionLimit = 3 };
STATISTIC(NumExpand, "Number of expansions");
STATISTIC(NumReassoc, "Number of reassociations");
struct Query {
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLibraryInfo *TLI;
const DominatorTree *DT;
- Query(const DataLayout *td, const TargetLibraryInfo *tli,
- const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {}
+ Query(const DataLayout *DL, const TargetLibraryInfo *tli,
+ const DominatorTree *dt) : DL(DL), TLI(tli), DT(dt) {}
};
static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned);
Instruction::BinaryOps OpcodeToExpand = (Instruction::BinaryOps)OpcToExpand;
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
// Check whether the expression has the form "(A op' B) op C".
if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS))
}
}
- return 0;
+ return nullptr;
}
/// FactorizeBinOp - Simplify "LHS Opcode RHS" by factorizing out a common term
Instruction::BinaryOps OpcodeToExtract = (Instruction::BinaryOps)OpcToExtract;
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
if (!Op0 || Op0->getOpcode() != OpcodeToExtract ||
!Op1 || Op1->getOpcode() != OpcodeToExtract)
- return 0;
+ return nullptr;
// The expression has the form "(A op' B) op (C op' D)".
Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
}
}
- return 0;
+ return nullptr;
}
/// SimplifyAssociativeBinOp - Generic simplifications for associative binary
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
// The remaining transforms require commutativity as well as associativity.
if (!Instruction::isCommutative(Opcode))
- return 0;
+ return nullptr;
// Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
if (Op0 && Op0->getOpcode() == Opcode) {
}
}
- return 0;
+ return nullptr;
}
/// ThreadBinOpOverSelect - In the case of a binary operation with a select
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
SelectInst *SI;
if (isa<SelectInst>(LHS)) {
}
}
- return 0;
+ return nullptr;
}
/// ThreadCmpOverSelect - In the case of a comparison with a select instruction,
unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
// Make sure the select is on the LHS.
if (!isa<SelectInst>(LHS)) {
// It didn't simplify. However if "cmp TV, RHS" is equal to the select
// condition then we can replace it with 'true'. Otherwise give up.
if (!isSameCompare(Cond, Pred, TV, RHS))
- return 0;
+ return nullptr;
TCmp = getTrue(Cond->getType());
}
// It didn't simplify. However if "cmp FV, RHS" is equal to the select
// condition then we can replace it with 'false'. Otherwise give up.
if (!isSameCompare(Cond, Pred, FV, RHS))
- return 0;
+ return nullptr;
FCmp = getFalse(Cond->getType());
}
// The remaining cases only make sense if the select condition has the same
// type as the result of the comparison, so bail out if this is not so.
if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
- return 0;
+ return nullptr;
// If the false value simplified to false, then the result of the compare
// is equal to "Cond && TCmp". This also catches the case when the false
// value simplified to false and the true value to true, returning "Cond".
Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// ThreadBinOpOverPHI - In the case of a binary operation with an operand that
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
PHINode *PI;
if (isa<PHINode>(LHS)) {
PI = cast<PHINode>(LHS);
// Bail out if RHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(RHS, PI, Q.DT))
- return 0;
+ return nullptr;
} else {
assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
PI = cast<PHINode>(RHS);
// Bail out if LHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(LHS, PI, Q.DT))
- return 0;
+ return nullptr;
}
// Evaluate the BinOp on the incoming phi values.
- Value *CommonValue = 0;
+ Value *CommonValue = nullptr;
for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = PI->getIncomingValue(i);
// If the incoming value is the phi node itself, it can safely be skipped.
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
- return 0;
+ return nullptr;
CommonValue = V;
}
const Query &Q, unsigned MaxRecurse) {
// Recursion is always used, so bail out at once if we already hit the limit.
if (!MaxRecurse--)
- return 0;
+ return nullptr;
// Make sure the phi is on the LHS.
if (!isa<PHINode>(LHS)) {
// Bail out if RHS and the phi may be mutually interdependent due to a loop.
if (!ValueDominatesPHI(RHS, PI, Q.DT))
- return 0;
+ return nullptr;
// Evaluate the BinOp on the incoming phi values.
- Value *CommonValue = 0;
+ Value *CommonValue = nullptr;
for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = PI->getIncomingValue(i);
// If the incoming value is the phi node itself, it can safely be skipped.
// If the operation failed to simplify, or simplified to a different value
// to previously, then give up.
if (!V || (CommonValue && V != CommonValue))
- return 0;
+ return nullptr;
CommonValue = V;
}
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Add, CLHS->getType(), Ops,
- Q.TD, Q.TLI);
+ Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
// X + (Y - X) -> Y
// (Y - X) + X -> Y
// Eg: X + -X -> 0
- Value *Y = 0;
+ Value *Y = nullptr;
if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
return Y;
// "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
+ return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),
RecursionLimit);
}
/// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
/// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
/// folding.
-static Constant *stripAndComputeConstantOffsets(const DataLayout *TD,
- Value *&V) {
+static Constant *stripAndComputeConstantOffsets(const DataLayout *DL,
+ Value *&V,
+ bool AllowNonInbounds = false) {
assert(V->getType()->getScalarType()->isPointerTy());
// Without DataLayout, just be conservative for now. Theoretically, more could
// be done in this case.
- if (!TD)
+ if (!DL)
return ConstantInt::get(IntegerType::get(V->getContext(), 64), 0);
- unsigned IntPtrWidth = TD->getPointerSizeInBits();
- APInt Offset = APInt::getNullValue(IntPtrWidth);
+ Type *IntPtrTy = DL->getIntPtrType(V->getType())->getScalarType();
+ APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
// Even though we don't look through PHI nodes, we could be called on an
// instruction in an unreachable block, which may be on a cycle.
Visited.insert(V);
do {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
- if (!GEP->isInBounds() || !GEP->accumulateConstantOffset(*TD, Offset))
+ if ((!AllowNonInbounds && !GEP->isInBounds()) ||
+ !GEP->accumulateConstantOffset(*DL, Offset))
break;
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
"Unexpected operand type!");
} while (Visited.insert(V));
- Type *IntPtrTy = TD->getIntPtrType(V->getContext());
Constant *OffsetIntPtr = ConstantInt::get(IntPtrTy, Offset);
if (V->getType()->isVectorTy())
return ConstantVector::getSplat(V->getType()->getVectorNumElements(),
/// \brief Compute the constant difference between two pointer values.
/// If the difference is not a constant, returns zero.
-static Constant *computePointerDifference(const DataLayout *TD,
+static Constant *computePointerDifference(const DataLayout *DL,
Value *LHS, Value *RHS) {
- Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
- Constant *RHSOffset = stripAndComputeConstantOffsets(TD, RHS);
+ Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
+ Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
// If LHS and RHS are not related via constant offsets to the same base
// value, there is nothing we can do here.
if (LHS != RHS)
- return 0;
+ return nullptr;
// Otherwise, the difference of LHS - RHS can be computed as:
// LHS - RHS
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Sub, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// X - undef -> undef
// (X*2) - X -> X
// (X<<1) - X -> X
- Value *X = 0;
+ Value *X = nullptr;
if (match(Op0, m_Mul(m_Specific(Op1), m_ConstantInt<2>())) ||
match(Op0, m_Shl(m_Specific(Op1), m_One())))
return Op1;
// (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
// For example, (X + Y) - Y -> X; (Y + X) - Y -> X
- Value *Y = 0, *Z = Op1;
+ Value *Y = nullptr, *Z = Op1;
if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
// See if "V === Y - Z" simplifies.
if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
// Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
if (match(Op0, m_PtrToInt(m_Value(X))) &&
match(Op1, m_PtrToInt(m_Value(Y))))
- if (Constant *Result = computePointerDifference(Q.TD, X, Y))
+ if (Constant *Result = computePointerDifference(Q.DL, X, Y))
return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
// Mul distributes over Sub. Try some generic simplifications based on this.
// "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
- return 0;
+ return nullptr;
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
+ return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),
RecursionLimit);
}
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::FAdd, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
// fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0
// where nnan and ninf have to occur at least once somewhere in this
// expression
- Value *SubOp = 0;
+ Value *SubOp = nullptr;
if (match(Op1, m_FSub(m_AnyZero(), m_Specific(Op0))))
SubOp = Op1;
else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1))))
return Constant::getNullValue(Op0->getType());
}
- return 0;
+ return nullptr;
}
/// Given operands for an FSub, see if we can fold the result. If not, this
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::FSub, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
}
if (FMF.noNaNs() && FMF.noInfs() && Op0 == Op1)
return Constant::getNullValue(Op0->getType());
- return 0;
+ return nullptr;
}
/// Given the operands for an FMul, see if we can fold the result
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::FMul, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZero()))
return Op1;
- return 0;
+ return nullptr;
}
/// SimplifyMulInst - Given operands for a Mul, see if we can
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Mul, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
return Op0;
// (X / Y) * Y -> X if the division is exact.
- Value *X = 0;
+ Value *X = nullptr;
if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
return X;
MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFAddInst(Op0, Op1, FMF, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyFAddInst(Op0, Op1, FMF, Query (DL, TLI, DT), RecursionLimit);
}
Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFSubInst(Op0, Op1, FMF, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyFSubInst(Op0, Op1, FMF, Query (DL, TLI, DT), RecursionLimit);
}
Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1,
FastMathFlags FMF,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFMulInst(Op0, Op1, FMF, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyFMulInst(Op0, Op1, FMF, Query (DL, TLI, DT), RecursionLimit);
}
-Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyMulInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyDiv - Given operands for an SDiv or UDiv, see if we can
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD, Q.TLI);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);
}
}
return ConstantInt::get(Op0->getType(), 1);
// (X * Y) / Y -> X if the multiplication does not overflow.
- Value *X = 0, *Y = 0;
+ Value *X = nullptr, *Y = nullptr;
if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) {
if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1
OverflowingBinaryOperator *Mul = cast<OverflowingBinaryOperator>(Op0);
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// SimplifySDivInst - Given operands for an SDiv, see if we can
if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifySDivInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyUDivInst - Given operands for a UDiv, see if we can
if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyUDivInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q,
if (match(Op1, m_Undef()))
return Op1;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyFDivInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyRem - Given operands for an SRem or URem, see if we can
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD, Q.TLI);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);
}
}
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// SimplifySRemInst - Given operands for an SRem, see if we can
if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifySRemInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyURemInst - Given operands for a URem, see if we can
if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyURemInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &,
if (match(Op1, m_Undef()))
return Op1;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyFRemInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
+}
+
+/// isUndefShift - Returns true if a shift by \c Amount always yields undef.
+static bool isUndefShift(Value *Amount) {
+ Constant *C = dyn_cast<Constant>(Amount);
+ if (!C)
+ return false;
+
+ // X shift by undef -> undef because it may shift by the bitwidth.
+ if (isa<UndefValue>(C))
+ return true;
+
+ // Shifting by the bitwidth or more is undefined.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
+ if (CI->getValue().getLimitedValue() >=
+ CI->getType()->getScalarSizeInBits())
+ return true;
+
+ // If all lanes of a vector shift are undefined the whole shift is.
+ if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
+ for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I)
+ if (!isUndefShift(C->getAggregateElement(I)))
+ return false;
+ return true;
+ }
+
+ return false;
}
/// SimplifyShift - Given operands for an Shl, LShr or AShr, see if we can
if (Constant *C0 = dyn_cast<Constant>(Op0)) {
if (Constant *C1 = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { C0, C1 };
- return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.TD, Q.TLI);
+ return ConstantFoldInstOperands(Opcode, C0->getType(), Ops, Q.DL, Q.TLI);
}
}
if (match(Op1, m_Zero()))
return Op0;
- // X shift by undef -> undef because it may shift by the bitwidth.
- if (match(Op1, m_Undef()))
- return Op1;
-
- // Shifting by the bitwidth or more is undefined.
- if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1))
- if (CI->getValue().getLimitedValue() >=
- Op0->getType()->getScalarSizeInBits())
- return UndefValue::get(Op0->getType());
+ // Fold undefined shifts.
+ if (isUndefShift(Op1))
+ return UndefValue::get(Op0->getType());
// If the operation is with the result of a select instruction, check whether
// operating on either branch of the select always yields the same value.
if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
/// SimplifyShlInst - Given operands for an Shl, see if we can
Value *X;
if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
return X;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
+ return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (DL, TLI, DT),
RecursionLimit);
}
cast<OverflowingBinaryOperator>(Op0)->hasNoUnsignedWrap())
return X;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
+ return ::SimplifyLShrInst(Op0, Op1, isExact, Query (DL, TLI, DT),
RecursionLimit);
}
cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap())
return X;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
+ return ::SimplifyAShrInst(Op0, Op1, isExact, Query (DL, TLI, DT),
RecursionLimit);
}
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::And, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
return Constant::getNullValue(Op0->getType());
// (A | ?) & A = A
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
return Op1;
MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyAndInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyOrInst - Given operands for an Or, see if we can
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Or, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
return Constant::getAllOnesValue(Op0->getType());
// (A & ?) | A = A
- Value *A = 0, *B = 0;
+ Value *A = nullptr, *B = nullptr;
if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
return Op1;
if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyOrInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyXorInst - Given operands for a Xor, see if we can
if (Constant *CRHS = dyn_cast<Constant>(Op1)) {
Constant *Ops[] = { CLHS, CRHS };
return ConstantFoldInstOperands(Instruction::Xor, CLHS->getType(),
- Ops, Q.TD, Q.TLI);
+ Ops, Q.DL, Q.TLI);
}
// Canonicalize the constant to the RHS.
// "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
// for threading over phi nodes.
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *TD,
+Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyXorInst(Op0, Op1, Query (DL, TLI, DT), RecursionLimit);
}
static Type *GetCompareTy(Value *Op) {
Value *LHS, Value *RHS) {
SelectInst *SI = dyn_cast<SelectInst>(V);
if (!SI)
- return 0;
+ return nullptr;
CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
if (!Cmp)
- return 0;
+ return nullptr;
Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
return Cmp;
if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
LHS == CmpRHS && RHS == CmpLHS)
return Cmp;
- return 0;
+ return nullptr;
}
// A significant optimization not implemented here is assuming that alloca
// If the C and C++ standards are ever made sufficiently restrictive in this
// area, it may be possible to update LLVM's semantics accordingly and reinstate
// this optimization.
-static Constant *computePointerICmp(const DataLayout *TD,
+static Constant *computePointerICmp(const DataLayout *DL,
const TargetLibraryInfo *TLI,
CmpInst::Predicate Pred,
Value *LHS, Value *RHS) {
RHS = RHS->stripPointerCasts();
// A non-null pointer is not equal to a null pointer.
- if (llvm::isKnownNonNull(LHS) && isa<ConstantPointerNull>(RHS) &&
+ if (llvm::isKnownNonNull(LHS, TLI) && isa<ConstantPointerNull>(RHS) &&
(Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
return ConstantInt::get(GetCompareTy(LHS),
!CmpInst::isTrueWhenEqual(Pred));
// We can only fold certain predicates on pointer comparisons.
switch (Pred) {
default:
- return 0;
+ return nullptr;
// Equality comaprisons are easy to fold.
case CmpInst::ICMP_EQ:
// numerous hazards. AliasAnalysis and its utilities rely on special rules
// governing loads and stores which don't apply to icmps. Also, AliasAnalysis
// doesn't need to guarantee pointer inequality when it says NoAlias.
- Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
- Constant *RHSOffset = stripAndComputeConstantOffsets(TD, RHS);
+ Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
+ Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
// If LHS and RHS are related via constant offsets to the same base
// value, we can replace it with an icmp which just compares the offsets.
ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
uint64_t LHSSize, RHSSize;
if (LHSOffsetCI && RHSOffsetCI &&
- getObjectSize(LHS, LHSSize, TD, TLI) &&
- getObjectSize(RHS, RHSSize, TD, TLI)) {
+ getObjectSize(LHS, LHSSize, DL, TLI) &&
+ getObjectSize(RHS, RHSSize, DL, TLI)) {
const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
if (!LHSOffsetValue.isNegative() &&
return ConstantInt::get(GetCompareTy(LHS),
!CmpInst::isTrueWhenEqual(Pred));
}
+
+ // Even if an non-inbounds GEP occurs along the path we can still optimize
+ // equality comparisons concerning the result. We avoid walking the whole
+ // chain again by starting where the last calls to
+ // stripAndComputeConstantOffsets left off and accumulate the offsets.
+ Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
+ Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
+ if (LHS == RHS)
+ return ConstantExpr::getICmp(Pred,
+ ConstantExpr::getAdd(LHSOffset, LHSNoBound),
+ ConstantExpr::getAdd(RHSOffset, RHSNoBound));
}
// Otherwise, fail.
- return 0;
+ return nullptr;
}
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
- return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.TD, Q.TLI);
+ return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
// If we have a constant, make sure it is on the RHS.
std::swap(LHS, RHS);
return getTrue(ITy);
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
- if (isKnownNonZero(LHS, Q.TD))
+ if (isKnownNonZero(LHS, Q.DL))
return getFalse(ITy);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
- if (isKnownNonZero(LHS, Q.TD))
+ if (isKnownNonZero(LHS, Q.DL))
return getTrue(ITy);
break;
case ICmpInst::ICMP_SLT:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
if (LHSKnownNegative)
return getTrue(ITy);
if (LHSKnownNonNegative)
return getFalse(ITy);
break;
case ICmpInst::ICMP_SLE:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
if (LHSKnownNegative)
return getTrue(ITy);
- if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.TD))
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL))
return getFalse(ITy);
break;
case ICmpInst::ICMP_SGE:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
if (LHSKnownNegative)
return getFalse(ITy);
if (LHSKnownNonNegative)
return getTrue(ITy);
break;
case ICmpInst::ICMP_SGT:
- ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.TD);
+ ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, Q.DL);
if (LHSKnownNegative)
return getFalse(ITy);
- if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.TD))
+ if (LHSKnownNonNegative && isKnownNonZero(LHS, Q.DL))
return getTrue(ITy);
break;
}
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
// if the integer type is the same size as the pointer type.
- if (MaxRecurse && Q.TD && isa<PtrToIntInst>(LI) &&
- Q.TD->getPointerSizeInBits() == DstTy->getPrimitiveSizeInBits()) {
+ if (MaxRecurse && Q.DL && isa<PtrToIntInst>(LI) &&
+ Q.DL->getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
// Transfer the cast to the constant.
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
if (MaxRecurse && (LBO || RBO)) {
// Analyze the case when either LHS or RHS is an add instruction.
- Value *A = 0, *B = 0, *C = 0, *D = 0;
+ Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
// LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
if (LBO && LBO->getOpcode() == Instruction::Add) {
}
}
+ // icmp pred (urem X, Y), Y
if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
bool KnownNonNegative, KnownNegative;
switch (Pred) {
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.TD);
+ ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL);
if (!KnownNonNegative)
break;
// fall-through
return getFalse(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.TD);
+ ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.DL);
if (!KnownNonNegative)
break;
// fall-through
return getTrue(ITy);
}
}
+
+ // icmp pred X, (urem Y, X)
if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) {
bool KnownNonNegative, KnownNegative;
switch (Pred) {
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.TD);
+ ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL);
if (!KnownNonNegative)
break;
// fall-through
return getTrue(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- ComputeSignBit(RHS, KnownNonNegative, KnownNegative, Q.TD);
+ ComputeSignBit(LHS, KnownNonNegative, KnownNegative, Q.DL);
if (!KnownNonNegative)
break;
// fall-through
// Simplify comparisons of related pointers using a powerful, recursive
// GEP-walk when we have target data available..
if (LHS->getType()->isPointerTy())
- if (Constant *C = computePointerICmp(Q.TD, Q.TLI, Pred, LHS, RHS))
+ if (Constant *C = computePointerICmp(Q.DL, Q.TLI, Pred, LHS, RHS))
return C;
if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
+ return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),
RecursionLimit);
}
if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
if (Constant *CRHS = dyn_cast<Constant>(RHS))
- return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.TD, Q.TLI);
+ return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
// If we have a constant, make sure it is on the RHS.
std::swap(LHS, RHS);
if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
+ return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),
RecursionLimit);
}
unsigned MaxRecurse) {
// select true, X, Y -> X
// select false, X, Y -> Y
- if (ConstantInt *CB = dyn_cast<ConstantInt>(CondVal))
- return CB->getZExtValue() ? TrueVal : FalseVal;
+ if (Constant *CB = dyn_cast<Constant>(CondVal)) {
+ if (CB->isAllOnesValue())
+ return TrueVal;
+ if (CB->isNullValue())
+ return FalseVal;
+ }
// select C, X, X -> X
if (TrueVal == FalseVal)
if (isa<UndefValue>(FalseVal)) // select C, X, undef -> X
return TrueVal;
- return 0;
+ return nullptr;
}
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI, DT),
+ return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (DL, TLI, DT),
RecursionLimit);
}
/// fold the result. If not, this returns null.
static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
// The type of the GEP pointer operand.
- PointerType *PtrTy = dyn_cast<PointerType>(Ops[0]->getType());
- // The GEP pointer operand is not a pointer, it's a vector of pointers.
- if (!PtrTy)
- return 0;
+ PointerType *PtrTy = cast<PointerType>(Ops[0]->getType()->getScalarType());
// getelementptr P -> P.
if (Ops.size() == 1)
// Compute the (pointer) type returned by the GEP instruction.
Type *LastType = GetElementPtrInst::getIndexedType(PtrTy, Ops.slice(1));
Type *GEPTy = PointerType::get(LastType, PtrTy->getAddressSpace());
+ if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType()))
+ GEPTy = VectorType::get(GEPTy, VT->getNumElements());
return UndefValue::get(GEPTy);
}
if (Ops.size() == 2) {
// getelementptr P, 0 -> P.
- if (ConstantInt *C = dyn_cast<ConstantInt>(Ops[1]))
- if (C->isZero())
- return Ops[0];
+ if (match(Ops[1], m_Zero()))
+ return Ops[0];
// getelementptr P, N -> P if P points to a type of zero size.
- if (Q.TD) {
+ if (Q.DL) {
Type *Ty = PtrTy->getElementType();
- if (Ty->isSized() && Q.TD->getTypeAllocSize(Ty) == 0)
+ if (Ty->isSized() && Q.DL->getTypeAllocSize(Ty) == 0)
return Ops[0];
}
}
// Check to see if this is constant foldable.
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (!isa<Constant>(Ops[i]))
- return 0;
+ return nullptr;
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));
}
-Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD,
+Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyGEPInst(Ops, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we
return Agg;
}
- return 0;
+ return nullptr;
}
Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT),
+ return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (DL, TLI, DT),
RecursionLimit);
}
static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
// If all of the PHI's incoming values are the same then replace the PHI node
// with the common value.
- Value *CommonValue = 0;
+ Value *CommonValue = nullptr;
bool HasUndefInput = false;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *Incoming = PN->getIncomingValue(i);
continue;
}
if (CommonValue && Incoming != CommonValue)
- return 0; // Not the same, bail out.
+ return nullptr; // Not the same, bail out.
CommonValue = Incoming;
}
// instruction, we cannot return X as the result of the PHI node unless it
// dominates the PHI block.
if (HasUndefInput)
- return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : 0;
+ return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
return CommonValue;
}
static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) {
if (Constant *C = dyn_cast<Constant>(Op))
- return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.TD, Q.TLI);
+ return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.DL, Q.TLI);
- return 0;
+ return nullptr;
}
-Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD,
+Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyTruncInst(Op, Ty, Query (DL, TLI, DT), RecursionLimit);
}
//=== Helper functions for higher up the class hierarchy.
if (Constant *CLHS = dyn_cast<Constant>(LHS))
if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
Constant *COps[] = {CLHS, CRHS};
- return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, Q.TD,
+ return ConstantFoldInstOperands(Opcode, LHS->getType(), COps, Q.DL,
Q.TLI);
}
if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, Q, MaxRecurse))
return V;
- return 0;
+ return nullptr;
}
}
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT), RecursionLimit);
+ return ::SimplifyBinOp(Opcode, LHS, RHS, Query (DL, TLI, DT), RecursionLimit);
}
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
+ return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (DL, TLI, DT),
RecursionLimit);
}
case Intrinsic::trunc:
case Intrinsic::rint:
case Intrinsic::nearbyint:
+ case Intrinsic::round:
return true;
}
}
const Query &Q, unsigned MaxRecurse) {
// Perform idempotent optimizations
if (!IsIdempotent(IID))
- return 0;
+ return nullptr;
// Unary Ops
if (std::distance(ArgBegin, ArgEnd) == 1)
if (II->getIntrinsicID() == IID)
return II;
- return 0;
+ return nullptr;
}
template <typename IterTy>
Function *F = dyn_cast<Function>(V);
if (!F)
- return 0;
+ return nullptr;
if (unsigned IID = F->getIntrinsicID())
if (Value *Ret =
return Ret;
if (!canConstantFoldCallTo(F))
- return 0;
+ return nullptr;
SmallVector<Constant *, 4> ConstantArgs;
ConstantArgs.reserve(ArgEnd - ArgBegin);
for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) {
Constant *C = dyn_cast<Constant>(*I);
if (!C)
- return 0;
+ return nullptr;
ConstantArgs.push_back(C);
}
}
Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
- User::op_iterator ArgEnd, const DataLayout *TD,
+ User::op_iterator ArgEnd, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(TD, TLI, DT),
+ return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT),
RecursionLimit);
}
Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
- const DataLayout *TD, const TargetLibraryInfo *TLI,
+ const DataLayout *DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return ::SimplifyCall(V, Args.begin(), Args.end(), Query(TD, TLI, DT),
+ return ::SimplifyCall(V, Args.begin(), Args.end(), Query(DL, TLI, DT),
RecursionLimit);
}
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
-Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *TD,
+Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
Value *Result;
switch (I->getOpcode()) {
default:
- Result = ConstantFoldInstruction(I, TD, TLI);
+ Result = ConstantFoldInstruction(I, DL, TLI);
break;
case Instruction::FAdd:
Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), TD, TLI, DT);
+ I->getFastMathFlags(), DL, TLI, DT);
break;
case Instruction::Add:
Result = SimplifyAddInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
case Instruction::FSub:
Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), TD, TLI, DT);
+ I->getFastMathFlags(), DL, TLI, DT);
break;
case Instruction::Sub:
Result = SimplifySubInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
case Instruction::FMul:
Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
- I->getFastMathFlags(), TD, TLI, DT);
+ I->getFastMathFlags(), DL, TLI, DT);
break;
case Instruction::Mul:
- Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::SDiv:
- Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::UDiv:
- Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::FDiv:
- Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::SRem:
- Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::URem:
- Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::FRem:
- Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::Shl:
Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->hasNoSignedWrap(),
cast<BinaryOperator>(I)->hasNoUnsignedWrap(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
case Instruction::LShr:
Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
case Instruction::AShr:
Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
cast<BinaryOperator>(I)->isExact(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
case Instruction::And:
- Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::Or:
- Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::Xor:
- Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::ICmp:
Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
- I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::FCmp:
Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(),
- I->getOperand(0), I->getOperand(1), TD, TLI, DT);
+ I->getOperand(0), I->getOperand(1), DL, TLI, DT);
break;
case Instruction::Select:
Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
- I->getOperand(2), TD, TLI, DT);
+ I->getOperand(2), DL, TLI, DT);
break;
case Instruction::GetElementPtr: {
SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
- Result = SimplifyGEPInst(Ops, TD, TLI, DT);
+ Result = SimplifyGEPInst(Ops, DL, TLI, DT);
break;
}
case Instruction::InsertValue: {
InsertValueInst *IV = cast<InsertValueInst>(I);
Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
IV->getInsertedValueOperand(),
- IV->getIndices(), TD, TLI, DT);
+ IV->getIndices(), DL, TLI, DT);
break;
}
case Instruction::PHI:
- Result = SimplifyPHINode(cast<PHINode>(I), Query (TD, TLI, DT));
+ Result = SimplifyPHINode(cast<PHINode>(I), Query (DL, TLI, DT));
break;
case Instruction::Call: {
CallSite CS(cast<CallInst>(I));
Result = SimplifyCall(CS.getCalledValue(), CS.arg_begin(), CS.arg_end(),
- TD, TLI, DT);
+ DL, TLI, DT);
break;
}
case Instruction::Trunc:
- Result = SimplifyTruncInst(I->getOperand(0), I->getType(), TD, TLI, DT);
+ Result = SimplifyTruncInst(I->getOperand(0), I->getType(), DL, TLI, DT);
break;
}
/// This routine returns 'true' only when *it* simplifies something. The passed
/// in simplified value does not count toward this.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
bool Simplified = false;
// If we have an explicit value to collapse to, do that round of the
// simplification loop by hand initially.
if (SimpleV) {
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
- ++UI)
- if (*UI != I)
- Worklist.insert(cast<Instruction>(*UI));
+ for (User *U : I->users())
+ if (U != I)
+ Worklist.insert(cast<Instruction>(U));
// Replace the instruction with its simplified value.
I->replaceAllUsesWith(SimpleV);
I = Worklist[Idx];
// See if this instruction simplifies.
- SimpleV = SimplifyInstruction(I, TD, TLI, DT);
+ SimpleV = SimplifyInstruction(I, DL, TLI, DT);
if (!SimpleV)
continue;
// Stash away all the uses of the old instruction so we can check them for
// recursive simplifications after a RAUW. This is cheaper than checking all
// uses of To on the recursive step in most cases.
- for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
- ++UI)
- Worklist.insert(cast<Instruction>(*UI));
+ for (User *U : I->users())
+ Worklist.insert(cast<Instruction>(U));
// Replace the instruction with its simplified value.
I->replaceAllUsesWith(SimpleV);
}
bool llvm::recursivelySimplifyInstruction(Instruction *I,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
- return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT);
+ return replaceAndRecursivelySimplifyImpl(I, nullptr, DL, TLI, DT);
}
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
- const DataLayout *TD,
+ const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT) {
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
assert(SimpleV && "Must provide a simplified value.");
- return replaceAndRecursivelySimplifyImpl(I, SimpleV, TD, TLI, DT);
+ return replaceAndRecursivelySimplifyImpl(I, SimpleV, DL, TLI, DT);
}