namespace {
struct Query {
- const DataLayout *DL;
+ const DataLayout &DL;
const TargetLibraryInfo *TLI;
const DominatorTree *DT;
AssumptionCache *AC;
const Instruction *CxtI;
- Query(const DataLayout *DL, const TargetLibraryInfo *tli,
+ Query(const DataLayout &DL, const TargetLibraryInfo *tli,
const DominatorTree *dt, AssumptionCache *ac = nullptr,
const Instruction *cxti = nullptr)
: DL(DL), TLI(tli), DT(dt), AC(ac), CxtI(cxti) {}
static Value *SimplifyAndInst(Value *, Value *, const Query &, unsigned);
static Value *SimplifyBinOp(unsigned, Value *, Value *, const Query &,
unsigned);
+static Value *SimplifyFPBinOp(unsigned, Value *, Value *, const FastMathFlags &,
+ const Query &, unsigned);
static Value *SimplifyCmpInst(unsigned, Value *, Value *, const Query &,
unsigned);
static Value *SimplifyOrInst(Value *, Value *, const Query &, unsigned);
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout *DL, const TargetLibraryInfo *TLI,
+ const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
/// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
/// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
/// folding.
-static Constant *stripAndComputeConstantOffsets(const DataLayout *DL,
- Value *&V,
+static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
bool AllowNonInbounds = false) {
assert(V->getType()->getScalarType()->isPointerTy());
- // Without DataLayout, just be conservative for now. Theoretically, more could
- // be done in this case.
- if (!DL)
- return ConstantInt::get(IntegerType::get(V->getContext(), 64), 0);
-
- Type *IntPtrTy = DL->getIntPtrType(V->getType())->getScalarType();
+ Type *IntPtrTy = DL.getIntPtrType(V->getType())->getScalarType();
APInt Offset = APInt::getNullValue(IntPtrTy->getIntegerBitWidth());
// Even though we don't look through PHI nodes, we could be called on an
do {
if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
if ((!AllowNonInbounds && !GEP->isInBounds()) ||
- !GEP->accumulateConstantOffset(*DL, Offset))
+ !GEP->accumulateConstantOffset(DL, Offset))
break;
V = GEP->getPointerOperand();
} else if (Operator::getOpcode(V) == Instruction::BitCast) {
/// \brief Compute the constant difference between two pointer values.
/// If the difference is not a constant, returns zero.
-static Constant *computePointerDifference(const DataLayout *DL,
- Value *LHS, Value *RHS) {
+static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
+ Value *RHS) {
Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout *DL, const TargetLibraryInfo *TLI,
+ const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
}
Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout *DL,
+ const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
}
Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout *DL,
+ const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
}
Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout *DL,
+ const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
RecursionLimit);
}
-Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *DL,
+Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return nullptr;
}
-Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
+Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return nullptr;
}
-Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
+Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
RecursionLimit);
}
-static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q,
- unsigned) {
+static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
+ const Query &Q, unsigned) {
// undef / X -> undef (the undef could be a snan).
if (match(Op0, m_Undef()))
return Op0;
if (match(Op1, m_Undef()))
return Op1;
+ // 0 / X -> 0
+ // Requires that NaNs are off (X could be zero) and signed zeroes are
+ // ignored (X could be positive or negative, so the output sign is unknown).
+ if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
+ return Op0;
+
return nullptr;
}
-Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
+Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
+ const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyFDivInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
+ return ::SimplifyFDivInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
return nullptr;
}
-Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *DL,
+Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return nullptr;
}
-Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *DL,
+Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
RecursionLimit);
}
-static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &,
- unsigned) {
+static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
+ const Query &, unsigned) {
// undef % X -> undef (the undef could be a snan).
if (match(Op0, m_Undef()))
return Op0;
if (match(Op1, m_Undef()))
return Op1;
+ // 0 % X -> 0
+ // Requires that NaNs are off (X could be zero) and signed zeroes are
+ // ignored (X could be positive or negative, so the output sign is unknown).
+ if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZero()))
+ return Op0;
+
return nullptr;
}
-Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *DL,
+Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
+ const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
- return ::SimplifyFRemInst(Op0, Op1, Query(DL, TLI, DT, AC, CxtI),
+ return ::SimplifyFRemInst(Op0, Op1, FMF, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout *DL, const TargetLibraryInfo *TLI,
+ const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query(DL, TLI, DT, AC, CxtI),
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout *DL,
+ const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout *DL,
+ const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
// A & (-A) = A if A is a power of two or zero.
if (match(Op0, m_Neg(m_Specific(Op1))) ||
match(Op1, m_Neg(m_Specific(Op0)))) {
- if (isKnownToBeAPowerOfTwo(Op0, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
+ if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
+ Q.DT))
return Op0;
- if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
+ if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
+ Q.DT))
return Op1;
}
return nullptr;
}
-Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *DL,
+Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return nullptr;
}
-Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *DL,
+Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return nullptr;
}
-Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *DL,
+Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
// If the C and C++ standards are ever made sufficiently restrictive in this
// area, it may be possible to update LLVM's semantics accordingly and reinstate
// this optimization.
-static Constant *computePointerICmp(const DataLayout *DL,
+static Constant *computePointerICmp(const DataLayout &DL,
const TargetLibraryInfo *TLI,
- CmpInst::Predicate Pred,
- Value *LHS, Value *RHS) {
+ CmpInst::Predicate Pred, Value *LHS,
+ Value *RHS) {
// First, skip past any trivial no-ops.
LHS = LHS->stripPointerCasts();
RHS = RHS->stripPointerCasts();
// Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
// if the integer type is the same size as the pointer type.
- if (MaxRecurse && Q.DL && isa<PtrToIntInst>(LI) &&
- Q.DL->getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
+ if (MaxRecurse && isa<PtrToIntInst>(LI) &&
+ Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
// Transfer the cast to the constant.
if (Value *V = SimplifyICmpInst(Pred, SrcOp,
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *DL,
+ const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
Instruction *CxtI) {
if (Pred == FCmpInst::FCMP_TRUE)
return ConstantInt::get(GetCompareTy(LHS), 1);
- if (isa<UndefValue>(RHS)) // fcmp pred X, undef -> undef
- return UndefValue::get(GetCompareTy(LHS));
+ // fcmp pred x, undef and fcmp pred undef, x
+ // fold to true if unordered, false if ordered
+ if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) {
+ // Choosing NaN for the undef will always make unordered comparison succeed
+ // and ordered comparison fail.
+ return ConstantInt::get(GetCompareTy(LHS), CmpInst::isUnordered(Pred));
+ }
// fcmp x,x -> true/false. Not all compares are foldable.
if (LHS == RHS) {
}
// Handle fcmp with constant RHS
- if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
+ if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
// If the constant is a nan, see if we can fold the comparison based on it.
- if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHSC)) {
- if (CFP->getValueAPF().isNaN()) {
- if (FCmpInst::isOrdered(Pred)) // True "if ordered and foo"
+ if (CFP->getValueAPF().isNaN()) {
+ if (FCmpInst::isOrdered(Pred)) // True "if ordered and foo"
+ return ConstantInt::getFalse(CFP->getContext());
+ assert(FCmpInst::isUnordered(Pred) &&
+ "Comparison must be either ordered or unordered!");
+ // True if unordered.
+ return ConstantInt::getTrue(CFP->getContext());
+ }
+ // Check whether the constant is an infinity.
+ if (CFP->getValueAPF().isInfinity()) {
+ if (CFP->getValueAPF().isNegative()) {
+ switch (Pred) {
+ case FCmpInst::FCMP_OLT:
+ // No value is ordered and less than negative infinity.
return ConstantInt::getFalse(CFP->getContext());
- assert(FCmpInst::isUnordered(Pred) &&
- "Comparison must be either ordered or unordered!");
- // True if unordered.
- return ConstantInt::getTrue(CFP->getContext());
- }
- // Check whether the constant is an infinity.
- if (CFP->getValueAPF().isInfinity()) {
- if (CFP->getValueAPF().isNegative()) {
- switch (Pred) {
- case FCmpInst::FCMP_OLT:
- // No value is ordered and less than negative infinity.
- return ConstantInt::getFalse(CFP->getContext());
- case FCmpInst::FCMP_UGE:
- // All values are unordered with or at least negative infinity.
- return ConstantInt::getTrue(CFP->getContext());
- default:
- break;
- }
- } else {
- switch (Pred) {
- case FCmpInst::FCMP_OGT:
- // No value is ordered and greater than infinity.
- return ConstantInt::getFalse(CFP->getContext());
- case FCmpInst::FCMP_ULE:
- // All values are unordered with and at most infinity.
- return ConstantInt::getTrue(CFP->getContext());
- default:
- break;
- }
+ case FCmpInst::FCMP_UGE:
+ // All values are unordered with or at least negative infinity.
+ return ConstantInt::getTrue(CFP->getContext());
+ default:
+ break;
+ }
+ } else {
+ switch (Pred) {
+ case FCmpInst::FCMP_OGT:
+ // No value is ordered and greater than infinity.
+ return ConstantInt::getFalse(CFP->getContext());
+ case FCmpInst::FCMP_ULE:
+ // All values are unordered with and at most infinity.
+ return ConstantInt::getTrue(CFP->getContext());
+ default:
+ break;
}
}
}
+ if (CFP->getValueAPF().isZero()) {
+ switch (Pred) {
+ case FCmpInst::FCMP_UGE:
+ if (CannotBeOrderedLessThanZero(LHS))
+ return ConstantInt::getTrue(CFP->getContext());
+ break;
+ case FCmpInst::FCMP_OLT:
+ // X < 0
+ if (CannotBeOrderedLessThanZero(LHS))
+ return ConstantInt::getFalse(CFP->getContext());
+ break;
+ default:
+ break;
+ }
+ }
}
// If the comparison is with the result of a select instruction, check whether
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *DL,
+ const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
}
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const DataLayout *DL,
+ const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return Ops[0];
Type *Ty = PtrTy->getElementType();
- if (Q.DL && Ty->isSized()) {
+ if (Ty->isSized()) {
Value *P;
uint64_t C;
- uint64_t TyAllocSize = Q.DL->getTypeAllocSize(Ty);
+ uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
// getelementptr P, N -> P if P points to a type of zero size.
if (TyAllocSize == 0)
return Ops[0];
// The following transforms are only safe if the ptrtoint cast
// doesn't truncate the pointers.
if (Ops[1]->getType()->getScalarSizeInBits() ==
- Q.DL->getPointerSizeInBits(AS)) {
+ Q.DL.getPointerSizeInBits(AS)) {
auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * {
if (match(P, m_Zero()))
return Constant::getNullValue(GEPTy);
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));
}
-Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *DL,
+Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
}
Value *llvm::SimplifyInsertValueInst(
- Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const DataLayout *DL,
+ Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const DataLayout &DL,
const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query(DL, TLI, DT, AC, CxtI),
return nullptr;
}
-Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *DL,
+Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return SimplifyFMulInst (LHS, RHS, FastMathFlags(), Q, MaxRecurse);
case Instruction::SDiv: return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
case Instruction::UDiv: return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
- case Instruction::FDiv: return SimplifyFDivInst(LHS, RHS, Q, MaxRecurse);
+ case Instruction::FDiv:
+ return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
case Instruction::SRem: return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
case Instruction::URem: return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
- case Instruction::FRem: return SimplifyFRemInst(LHS, RHS, Q, MaxRecurse);
+ case Instruction::FRem:
+ return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
case Instruction::Shl:
return SimplifyShlInst(LHS, RHS, /*isNSW*/false, /*isNUW*/false,
Q, MaxRecurse);
}
}
+/// SimplifyFPBinOp - Given operands for a BinaryOperator, see if we can
+/// fold the result. If not, this returns null.
+/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
+/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
+static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+ const FastMathFlags &FMF, const Query &Q,
+ unsigned MaxRecurse) {
+ switch (Opcode) {
+ case Instruction::FAdd:
+ return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
+ case Instruction::FSub:
+ return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
+ case Instruction::FMul:
+ return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
+ default:
+ return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
+ }
+}
+
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const DataLayout *DL, const TargetLibraryInfo *TLI,
+ const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyBinOp(Opcode, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
RecursionLimit);
}
+Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
+ const FastMathFlags &FMF, const DataLayout &DL,
+ const TargetLibraryInfo *TLI,
+ const DominatorTree *DT, AssumptionCache *AC,
+ const Instruction *CxtI) {
+ return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Query(DL, TLI, DT, AC, CxtI),
+ RecursionLimit);
+}
+
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result.
static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout *DL, const TargetLibraryInfo *TLI,
+ const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyCmpInst(Predicate, LHS, RHS, Query(DL, TLI, DT, AC, CxtI),
}
Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
- User::op_iterator ArgEnd, const DataLayout *DL,
+ User::op_iterator ArgEnd, const DataLayout &DL,
const TargetLibraryInfo *TLI, const DominatorTree *DT,
AssumptionCache *AC, const Instruction *CxtI) {
return ::SimplifyCall(V, ArgBegin, ArgEnd, Query(DL, TLI, DT, AC, CxtI),
}
Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
- const DataLayout *DL, const TargetLibraryInfo *TLI,
+ const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC,
const Instruction *CxtI) {
return ::SimplifyCall(V, Args.begin(), Args.end(),
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
-Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *DL,
+Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout &DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT, AssumptionCache *AC) {
Value *Result;
AC, I);
break;
case Instruction::FDiv:
- Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
- AC, I);
+ Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
+ I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::SRem:
Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
AC, I);
break;
case Instruction::FRem:
- Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), DL, TLI, DT,
- AC, I);
+ Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
+ I->getFastMathFlags(), DL, TLI, DT, AC, I);
break;
case Instruction::Shl:
Result = SimplifyShlInst(I->getOperand(0), I->getOperand(1),
/// This routine returns 'true' only when *it* simplifies something. The passed
/// in simplified value does not count toward this.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
- const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
AssumptionCache *AC) {
bool Simplified = false;
SmallSetVector<Instruction *, 8> Worklist;
+ const DataLayout &DL = I->getModule()->getDataLayout();
// If we have an explicit value to collapse to, do that round of the
// simplification loop by hand initially.
return Simplified;
}
-bool llvm::recursivelySimplifyInstruction(Instruction *I, const DataLayout *DL,
+bool llvm::recursivelySimplifyInstruction(Instruction *I,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
AssumptionCache *AC) {
- return replaceAndRecursivelySimplifyImpl(I, nullptr, DL, TLI, DT, AC);
+ return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC);
}
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
- const DataLayout *DL,
const TargetLibraryInfo *TLI,
const DominatorTree *DT,
AssumptionCache *AC) {
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
assert(SimpleV && "Must provide a simplified value.");
- return replaceAndRecursivelySimplifyImpl(I, SimpleV, DL, TLI, DT, AC);
+ return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);
}