/// SimplifyDemandedBits - Check the specified integer node value to see if
/// it can be simplified or if things it uses can be simplified by bit
/// propagation. If so, return true.
- bool SimplifyDemandedBits(SDOperand Op, uint64_t Demanded = ~0ULL);
+ bool SimplifyDemandedBits(SDOperand Op) {
+ APInt Demanded = APInt::getAllOnesValue(Op.getValueSizeInBits());
+ return SimplifyDemandedBits(Op, Demanded);
+ }
+
+ bool SimplifyDemandedBits(SDOperand Op, const APInt &Demanded);
bool CombineToPreIndexedLoadStore(SDNode *N);
bool CombineToPostIndexedLoadStore(SDNode *N);
SDNode *MatchRotate(SDOperand LHS, SDOperand RHS);
SDOperand ReduceLoadWidth(SDNode *N);
- SDOperand GetDemandedBits(SDOperand V, uint64_t Mask);
+ SDOperand GetDemandedBits(SDOperand V, const APInt &Mask);
/// GatherAllAliases - Walk up chain skipping non-aliasing memory nodes,
/// looking for aliasing nodes and adding them to the Aliases vector.
public SelectionDAG::DAGUpdateListener {
DAGCombiner &DC;
public:
- WorkListRemover(DAGCombiner &dc) : DC(dc) {}
+ explicit WorkListRemover(DAGCombiner &dc) : DC(dc) {}
virtual void NodeDeleted(SDNode *N) {
DC.removeFromWorkList(N);
/// isNegatibleForFree - Return 1 if we can compute the negated form of the
/// specified expression for the same cost as the expression itself, or 2 if we
/// can compute the negated form more cheaply than the expression itself.
-static char isNegatibleForFree(SDOperand Op, unsigned Depth = 0) {
+static char isNegatibleForFree(SDOperand Op, bool AfterLegalize,
+ unsigned Depth = 0) {
// No compile time optimizations on this type.
if (Op.getValueType() == MVT::ppcf128)
return 0;
switch (Op.getOpcode()) {
default: return false;
case ISD::ConstantFP:
- return 1;
+ // Don't invert constant FP values after legalize. The negated constant
+ // isn't necessarily legal.
+ return AfterLegalize ? 0 : 1;
case ISD::FADD:
// FIXME: determine better conditions for this xform.
if (!UnsafeFPMath) return 0;
// -(A+B) -> -A - B
- if (char V = isNegatibleForFree(Op.getOperand(0), Depth+1))
+ if (char V = isNegatibleForFree(Op.getOperand(0), AfterLegalize, Depth+1))
return V;
// -(A+B) -> -B - A
- return isNegatibleForFree(Op.getOperand(1), Depth+1);
+ return isNegatibleForFree(Op.getOperand(1), AfterLegalize, Depth+1);
case ISD::FSUB:
// We can't turn -(A-B) into B-A when we honor signed zeros.
if (!UnsafeFPMath) return 0;
if (HonorSignDependentRoundingFPMath()) return 0;
// -(X*Y) -> (-X * Y) or (X*-Y)
- if (char V = isNegatibleForFree(Op.getOperand(0), Depth+1))
+ if (char V = isNegatibleForFree(Op.getOperand(0), AfterLegalize, Depth+1))
return V;
- return isNegatibleForFree(Op.getOperand(1), Depth+1);
+ return isNegatibleForFree(Op.getOperand(1), AfterLegalize, Depth+1);
case ISD::FP_EXTEND:
case ISD::FP_ROUND:
case ISD::FSIN:
- return isNegatibleForFree(Op.getOperand(0), Depth+1);
+ return isNegatibleForFree(Op.getOperand(0), AfterLegalize, Depth+1);
}
}
/// GetNegatedExpression - If isNegatibleForFree returns true, this function
/// returns the newly negated expression.
static SDOperand GetNegatedExpression(SDOperand Op, SelectionDAG &DAG,
- unsigned Depth = 0) {
+ bool AfterLegalize, unsigned Depth = 0) {
// fneg is removable even if it has multiple uses.
if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0);
assert(UnsafeFPMath);
// -(A+B) -> -A - B
- if (isNegatibleForFree(Op.getOperand(0), Depth+1))
+ if (isNegatibleForFree(Op.getOperand(0), AfterLegalize, Depth+1))
return DAG.getNode(ISD::FSUB, Op.getValueType(),
- GetNegatedExpression(Op.getOperand(0), DAG, Depth+1),
+ GetNegatedExpression(Op.getOperand(0), DAG,
+ AfterLegalize, Depth+1),
Op.getOperand(1));
// -(A+B) -> -B - A
return DAG.getNode(ISD::FSUB, Op.getValueType(),
- GetNegatedExpression(Op.getOperand(1), DAG, Depth+1),
+ GetNegatedExpression(Op.getOperand(1), DAG,
+ AfterLegalize, Depth+1),
Op.getOperand(0));
case ISD::FSUB:
// We can't turn -(A-B) into B-A when we honor signed zeros.
assert(!HonorSignDependentRoundingFPMath());
// -(X*Y) -> -X * Y
- if (isNegatibleForFree(Op.getOperand(0), Depth+1))
+ if (isNegatibleForFree(Op.getOperand(0), AfterLegalize, Depth+1))
return DAG.getNode(Op.getOpcode(), Op.getValueType(),
- GetNegatedExpression(Op.getOperand(0), DAG, Depth+1),
+ GetNegatedExpression(Op.getOperand(0), DAG,
+ AfterLegalize, Depth+1),
Op.getOperand(1));
// -(X*Y) -> X * -Y
return DAG.getNode(Op.getOpcode(), Op.getValueType(),
Op.getOperand(0),
- GetNegatedExpression(Op.getOperand(1), DAG, Depth+1));
+ GetNegatedExpression(Op.getOperand(1), DAG,
+ AfterLegalize, Depth+1));
case ISD::FP_EXTEND:
case ISD::FSIN:
return DAG.getNode(Op.getOpcode(), Op.getValueType(),
- GetNegatedExpression(Op.getOperand(0), DAG, Depth+1));
+ GetNegatedExpression(Op.getOperand(0), DAG,
+ AfterLegalize, Depth+1));
case ISD::FP_ROUND:
return DAG.getNode(ISD::FP_ROUND, Op.getValueType(),
- GetNegatedExpression(Op.getOperand(0), DAG, Depth+1),
+ GetNegatedExpression(Op.getOperand(0), DAG,
+ AfterLegalize, Depth+1),
Op.getOperand(1));
}
}
/// SimplifyDemandedBits - Check the specified integer node value to see if
/// it can be simplified or if things it uses can be simplified by bit
/// propagation. If so, return true.
-bool DAGCombiner::SimplifyDemandedBits(SDOperand Op, uint64_t Demanded) {
+bool DAGCombiner::SimplifyDemandedBits(SDOperand Op, const APInt &Demanded) {
TargetLowering::TargetLoweringOpt TLO(DAG, AfterLegalize);
- uint64_t KnownZero, KnownOne;
- Demanded &= MVT::getIntVTBitMask(Op.getValueType());
+ APInt KnownZero, KnownOne;
if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
return false;
// fold (a+b) -> (a|b) iff a and b share no bits.
if (MVT::isInteger(VT) && !MVT::isVector(VT)) {
- uint64_t LHSZero, LHSOne;
- uint64_t RHSZero, RHSOne;
- uint64_t Mask = MVT::getIntVTBitMask(VT);
+ APInt LHSZero, LHSOne;
+ APInt RHSZero, RHSOne;
+ APInt Mask = APInt::getAllOnesValue(MVT::getSizeInBits(VT));
DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
- if (LHSZero) {
+ if (LHSZero.getBoolValue()) {
DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, MVT::Flag));
// fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
- uint64_t LHSZero, LHSOne;
- uint64_t RHSZero, RHSOne;
- uint64_t Mask = MVT::getIntVTBitMask(VT);
+ APInt LHSZero, LHSOne;
+ APInt RHSZero, RHSOne;
+ APInt Mask = APInt::getAllOnesValue(MVT::getSizeInBits(VT));
DAG.ComputeMaskedBits(N0, Mask, LHSZero, LHSOne);
- if (LHSZero) {
+ if (LHSZero.getBoolValue()) {
DAG.ComputeMaskedBits(N1, Mask, RHSZero, RHSOne);
// If all possibly-set bits on the LHS are clear on the RHS, return an OR.
// If we know the sign bits of both operands are zero, strength reduce to a
// udiv instead. Handles (X&15) /s 4 -> X&15 >> 2
if (!MVT::isVector(VT)) {
- uint64_t SignBit = MVT::getIntVTSignBit(VT);
- if (DAG.MaskedValueIsZero(N1, SignBit) &&
- DAG.MaskedValueIsZero(N0, SignBit))
+ if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::UDIV, N1.getValueType(), N0, N1);
}
// fold (sdiv X, pow2) -> simple ops after legalize
// If we know the sign bits of both operands are zero, strength reduce to a
// urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15
if (!MVT::isVector(VT)) {
- uint64_t SignBit = MVT::getIntVTSignBit(VT);
- if (DAG.MaskedValueIsZero(N1, SignBit) &&
- DAG.MaskedValueIsZero(N0, SignBit))
+ if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::UREM, VT, N0, N1);
}
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
MVT::ValueType VT = N1.getValueType();
+ unsigned BitWidth = MVT::getSizeInBits(VT);
// fold vector ops
if (MVT::isVector(VT)) {
if (N1C && N1C->isAllOnesValue())
return N0;
// if (and x, c) is known to be zero, return 0
- if (N1C && DAG.MaskedValueIsZero(SDOperand(N, 0), MVT::getIntVTBitMask(VT)))
+ if (N1C && DAG.MaskedValueIsZero(SDOperand(N, 0),
+ APInt::getAllOnesValue(BitWidth)))
return DAG.getConstant(0, VT);
// reassociate and
SDOperand RAND = ReassociateOps(ISD::AND, N0, N1);
return N1;
// fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits.
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
- unsigned InMask = MVT::getIntVTBitMask(N0.getOperand(0).getValueType());
- if (DAG.MaskedValueIsZero(N0.getOperand(0),
- ~N1C->getValue() & InMask)) {
+ SDOperand N0Op0 = N0.getOperand(0);
+ APInt Mask = ~N1C->getAPIntValue();
+ Mask.trunc(N0Op0.getValueSizeInBits());
+ if (DAG.MaskedValueIsZero(N0Op0, Mask)) {
SDOperand Zext = DAG.getNode(ISD::ZERO_EXTEND, N0.getValueType(),
- N0.getOperand(0));
+ N0Op0);
// Replace uses of the AND with uses of the Zero extend node.
CombineTo(N, Zext);
MVT::ValueType EVT = LN0->getMemoryVT();
// If we zero all the possible extended bits, then we can turn this into
// a zextload if we are running before legalize or the operation is legal.
- if (DAG.MaskedValueIsZero(N1, ~0ULL << MVT::getSizeInBits(EVT)) &&
+ unsigned BitWidth = N1.getValueSizeInBits();
+ if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
+ BitWidth - MVT::getSizeInBits(EVT))) &&
(!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
MVT::ValueType EVT = LN0->getMemoryVT();
// If we zero all the possible extended bits, then we can turn this into
// a zextload if we are running before legalize or the operation is legal.
- if (DAG.MaskedValueIsZero(N1, ~0ULL << MVT::getSizeInBits(EVT)) &&
+ unsigned BitWidth = N1.getValueSizeInBits();
+ if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
+ BitWidth - MVT::getSizeInBits(EVT))) &&
(!AfterLegalize || TLI.isLoadXLegal(ISD::ZEXTLOAD, EVT))) {
SDOperand ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, VT, LN0->getChain(),
LN0->getBasePtr(), LN0->getSrcValue(),
ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
MVT::ValueType VT = N1.getValueType();
- unsigned OpSizeInBits = MVT::getSizeInBits(VT);
// fold vector ops
if (MVT::isVector(VT)) {
if (N1C && N1C->isAllOnesValue())
return N1;
// fold (or x, c) -> c iff (x & ~c) == 0
- if (N1C &&
- DAG.MaskedValueIsZero(N0,~N1C->getValue() & (~0ULL>>(64-OpSizeInBits))))
+ if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
return N1;
// reassociate or
SDOperand ROR = ReassociateOps(ISD::OR, N0, N1);
(N0.Val->hasOneUse() || N1.Val->hasOneUse())) {
// We can only do this xform if we know that bits from X that are set in C2
// but not in C1 are already zero. Likewise for Y.
- uint64_t LHSMask = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
- uint64_t RHSMask = cast<ConstantSDNode>(N1.getOperand(1))->getValue();
+ const APInt &LHSMask =
+ cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
+ const APInt &RHSMask =
+ cast<ConstantSDNode>(N1.getOperand(1))->getAPIntValue();
if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
// If there is an AND of either shifted operand, apply it to the result.
if (LHSMask.Val || RHSMask.Val) {
- uint64_t Mask = MVT::getIntVTBitMask(VT);
+ APInt Mask = APInt::getAllOnesValue(OpSizeInBits);
if (LHSMask.Val) {
- uint64_t RHSBits = (1ULL << LShVal)-1;
- Mask &= cast<ConstantSDNode>(LHSMask)->getValue() | RHSBits;
+ APInt RHSBits = APInt::getLowBitsSet(OpSizeInBits, LShVal);
+ Mask &= cast<ConstantSDNode>(LHSMask)->getAPIntValue() | RHSBits;
}
if (RHSMask.Val) {
- uint64_t LHSBits = ~((1ULL << (OpSizeInBits-RShVal))-1);
- Mask &= cast<ConstantSDNode>(RHSMask)->getValue() | LHSBits;
+ APInt LHSBits = APInt::getHighBitsSet(OpSizeInBits, RShVal);
+ Mask &= cast<ConstantSDNode>(RHSMask)->getAPIntValue() | LHSBits;
}
Rot = DAG.getNode(ISD::AND, VT, Rot, DAG.getConstant(Mask, VT));
LHSShiftAmt == RHSShiftAmt.getOperand(1)) {
if (ConstantSDNode *SUBC =
dyn_cast<ConstantSDNode>(RHSShiftAmt.getOperand(0))) {
- if (SUBC->getValue() == OpSizeInBits)
+ if (SUBC->getValue() == OpSizeInBits) {
if (HasROTL)
return DAG.getNode(ISD::ROTL, VT, LHSShiftArg, LHSShiftAmt).Val;
else
return DAG.getNode(ISD::ROTR, VT, LHSShiftArg, RHSShiftAmt).Val;
+ }
}
}
RHSShiftAmt == LHSShiftAmt.getOperand(1)) {
if (ConstantSDNode *SUBC =
dyn_cast<ConstantSDNode>(LHSShiftAmt.getOperand(0))) {
- if (SUBC->getValue() == OpSizeInBits)
+ if (SUBC->getValue() == OpSizeInBits) {
if (HasROTL)
return DAG.getNode(ISD::ROTL, VT, LHSShiftArg, LHSShiftAmt).Val;
else
return DAG.getNode(ISD::ROTR, VT, LHSShiftArg, RHSShiftAmt).Val;
+ }
}
}
// the constant which would cause it to be modified for this
// operation.
if (N->getOpcode() == ISD::SRA) {
- uint64_t BinOpRHSSign = BinOpCst->getValue() >> MVT::getSizeInBits(VT)-1;
- if ((bool)BinOpRHSSign != HighBitSet)
+ bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative();
+ if (BinOpRHSSignSet != HighBitSet)
return SDOperand();
}
if (N1C && N1C->isNullValue())
return N0;
// if (shl x, c) is known to be zero, return 0
- if (DAG.MaskedValueIsZero(SDOperand(N, 0), MVT::getIntVTBitMask(VT)))
+ if (DAG.MaskedValueIsZero(SDOperand(N, 0),
+ APInt::getAllOnesValue(MVT::getSizeInBits(VT))))
return DAG.getConstant(0, VT);
if (N1C && SimplifyDemandedBits(SDOperand(N, 0)))
return SDOperand(N, 0);
// If the sign bit is known to be zero, switch this to a SRL.
- if (DAG.MaskedValueIsZero(N0, MVT::getIntVTSignBit(VT)))
+ if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::SRL, VT, N0, N1);
return N1C ? visitShiftByConstant(N, N1C->getValue()) : SDOperand();
if (N1C && N1C->isNullValue())
return N0;
// if (srl x, c) is known to be zero, return 0
- if (N1C && DAG.MaskedValueIsZero(SDOperand(N, 0), ~0ULL >> (64-OpSizeInBits)))
+ if (N1C && DAG.MaskedValueIsZero(SDOperand(N, 0),
+ APInt::getAllOnesValue(OpSizeInBits)))
return DAG.getConstant(0, VT);
// fold (srl (srl x, c1), c2) -> 0 or (srl x, c1+c2)
// fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
if (N1C && N0.getOpcode() == ISD::CTLZ &&
N1C->getValue() == Log2_32(MVT::getSizeInBits(VT))) {
- uint64_t KnownZero, KnownOne, Mask = MVT::getIntVTBitMask(VT);
+ APInt KnownZero, KnownOne;
+ APInt Mask = APInt::getAllOnesValue(MVT::getSizeInBits(VT));
DAG.ComputeMaskedBits(N0.getOperand(0), Mask, KnownZero, KnownOne);
// If any of the input bits are KnownOne, then the input couldn't be all
// zeros, thus the result of the srl will always be zero.
- if (KnownOne) return DAG.getConstant(0, VT);
+ if (KnownOne.getBoolValue()) return DAG.getConstant(0, VT);
// If all of the bits input the to ctlz node are known to be zero, then
// the result of the ctlz is "32" and the result of the shift is one.
- uint64_t UnknownBits = ~KnownZero & Mask;
+ APInt UnknownBits = ~KnownZero & Mask;
if (UnknownBits == 0) return DAG.getConstant(1, VT);
// Otherwise, check to see if there is exactly one bit input to the ctlz.
// could be set on input to the CTLZ node. If this bit is set, the SRL
// will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
// to an SRL,XOR pair, which is likely to simplify more.
- unsigned ShAmt = CountTrailingZeros_64(UnknownBits);
+ unsigned ShAmt = UnknownBits.countTrailingZeros();
SDOperand Op = N0.getOperand(0);
if (ShAmt) {
Op = DAG.getNode(ISD::SRL, VT, Op,
return SDOperand(N, 0); // Don't revisit N.
// fold selects based on a setcc into other things, such as min/max/abs
- if (N0.getOpcode() == ISD::SETCC)
+ if (N0.getOpcode() == ISD::SETCC) {
// FIXME:
// Check against MVT::Other for SELECT_CC, which is a workaround for targets
// having to say they don't support SELECT_CC on every type the DAG knows
N1, N2, N0.getOperand(2));
else
return SimplifySelect(N0, N1, N2);
+ }
return SDOperand();
}
} else if (X.getValueType() > VT) {
X = DAG.getNode(ISD::TRUNCATE, VT, X);
}
- uint64_t Mask = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
+ APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
+ Mask.zext(MVT::getSizeInBits(VT));
return DAG.getNode(ISD::AND, VT, X, DAG.getConstant(Mask, VT));
}
} else if (X.getValueType() > VT) {
X = DAG.getNode(ISD::TRUNCATE, VT, X);
}
- uint64_t Mask = cast<ConstantSDNode>(N0.getOperand(1))->getValue();
+ APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
+ Mask.zext(MVT::getSizeInBits(VT));
return DAG.getNode(ISD::AND, VT, X, DAG.getConstant(Mask, VT));
}
/// GetDemandedBits - See if the specified operand can be simplified with the
/// knowledge that only the bits specified by Mask are used. If so, return the
/// simpler operand, otherwise return a null SDOperand.
-SDOperand DAGCombiner::GetDemandedBits(SDOperand V, uint64_t Mask) {
+SDOperand DAGCombiner::GetDemandedBits(SDOperand V, const APInt &Mask) {
switch (V.getOpcode()) {
default: break;
case ISD::OR:
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
// See if we can recursively simplify the LHS.
unsigned Amt = RHSC->getValue();
- Mask = (Mask << Amt) & MVT::getIntVTBitMask(V.getValueType());
- SDOperand SimplifyLHS = GetDemandedBits(V.getOperand(0), Mask);
+ APInt NewMask = Mask << Amt;
+ SDOperand SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask);
if (SimplifyLHS.Val) {
return DAG.getNode(ISD::SRL, V.getValueType(),
SimplifyLHS, V.getOperand(1));
SDOperand N1 = N->getOperand(1);
MVT::ValueType VT = N->getValueType(0);
MVT::ValueType EVT = cast<VTSDNode>(N1)->getVT();
+ unsigned VTBits = MVT::getSizeInBits(VT);
unsigned EVTBits = MVT::getSizeInBits(EVT);
// fold (sext_in_reg c1) -> c1
}
// fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero.
- if (DAG.MaskedValueIsZero(N0, 1ULL << (EVTBits-1)))
+ if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits)))
return DAG.getZeroExtendInReg(N0, EVT);
// fold operands of sext_in_reg based on knowledge that the top bits are not
// See if we can simplify the input to this truncate through knowledge that
// only the low bits are being used. For example "trunc (or (shl x, 8), y)"
// -> trunc y
- SDOperand Shorter = GetDemandedBits(N0, MVT::getIntVTBitMask(VT));
+ SDOperand Shorter =
+ GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(),
+ MVT::getSizeInBits(VT)));
if (Shorter.Val)
return DAG.getNode(ISD::TRUNCATE, VT, Shorter);
SDOperand NewConv = DAG.getNode(ISD::BIT_CONVERT, VT, N0.getOperand(0));
AddToWorkList(NewConv.Val);
- uint64_t SignBit = MVT::getIntVTSignBit(VT);
+ APInt SignBit = APInt::getSignBit(MVT::getSizeInBits(VT));
if (N0.getOpcode() == ISD::FNEG)
return DAG.getNode(ISD::XOR, VT, NewConv, DAG.getConstant(SignBit, VT));
assert(N0.getOpcode() == ISD::FABS);
AddToWorkList(X.Val);
}
- uint64_t SignBit = MVT::getIntVTSignBit(VT);
+ APInt SignBit = APInt::getSignBit(MVT::getSizeInBits(VT));
X = DAG.getNode(ISD::AND, VT, X, DAG.getConstant(SignBit, VT));
AddToWorkList(X.Val);
for (unsigned i = 0, e = BV->getNumOperands(); i != e;
i += NumInputsPerOutput) {
bool isLE = TLI.isLittleEndian();
- uint64_t NewBits = 0;
+ APInt NewBits = APInt(DstBitSize, 0);
bool EltIsUndef = true;
for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
// Shift the previously computed bits over.
if (Op.getOpcode() == ISD::UNDEF) continue;
EltIsUndef = false;
- NewBits |= cast<ConstantSDNode>(Op)->getValue();
+ NewBits |=
+ APInt(cast<ConstantSDNode>(Op)->getAPIntValue()).zext(DstBitSize);
}
if (EltIsUndef)
Ops.push_back(DAG.getNode(ISD::UNDEF, DstEltVT));
continue;
}
- uint64_t OpVal = cast<ConstantSDNode>(BV->getOperand(i))->getValue();
+ APInt OpVal = cast<ConstantSDNode>(BV->getOperand(i))->getAPIntValue();
for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
- unsigned ThisVal = OpVal & ((1ULL << DstBitSize)-1);
+ APInt ThisVal = APInt(OpVal).trunc(DstBitSize);
Ops.push_back(DAG.getConstant(ThisVal, DstEltVT));
- if (isS2V && i == 0 && j == 0 && ThisVal == OpVal)
+ if (isS2V && i == 0 && j == 0 && APInt(ThisVal).zext(SrcBitSize) == OpVal)
// Simply turn this into a SCALAR_TO_VECTOR of the new type.
return DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Ops[0]);
- OpVal >>= DstBitSize;
+ OpVal = OpVal.lshr(DstBitSize);
}
// For big endian targets, swap the order of the pieces of each element.
if (N0CFP && !N1CFP)
return DAG.getNode(ISD::FADD, VT, N1, N0);
// fold (A + (-B)) -> A-B
- if (isNegatibleForFree(N1) == 2)
- return DAG.getNode(ISD::FSUB, VT, N0, GetNegatedExpression(N1, DAG));
+ if (isNegatibleForFree(N1, AfterLegalize) == 2)
+ return DAG.getNode(ISD::FSUB, VT, N0,
+ GetNegatedExpression(N1, DAG, AfterLegalize));
// fold ((-A) + B) -> B-A
- if (isNegatibleForFree(N0) == 2)
- return DAG.getNode(ISD::FSUB, VT, N1, GetNegatedExpression(N0, DAG));
+ if (isNegatibleForFree(N0, AfterLegalize) == 2)
+ return DAG.getNode(ISD::FSUB, VT, N1,
+ GetNegatedExpression(N0, DAG, AfterLegalize));
// If allowed, fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2))
if (UnsafeFPMath && N1CFP && N0.getOpcode() == ISD::FADD &&
return DAG.getNode(ISD::FSUB, VT, N0, N1);
// fold (0-B) -> -B
if (UnsafeFPMath && N0CFP && N0CFP->getValueAPF().isZero()) {
- if (isNegatibleForFree(N1))
- return GetNegatedExpression(N1, DAG);
+ if (isNegatibleForFree(N1, AfterLegalize))
+ return GetNegatedExpression(N1, DAG, AfterLegalize);
return DAG.getNode(ISD::FNEG, VT, N1);
}
// fold (A-(-B)) -> A+B
- if (isNegatibleForFree(N1))
- return DAG.getNode(ISD::FADD, VT, N0, GetNegatedExpression(N1, DAG));
+ if (isNegatibleForFree(N1, AfterLegalize))
+ return DAG.getNode(ISD::FADD, VT, N0,
+ GetNegatedExpression(N1, DAG, AfterLegalize));
return SDOperand();
}
return DAG.getNode(ISD::FNEG, VT, N0);
// -X * -Y -> X*Y
- if (char LHSNeg = isNegatibleForFree(N0)) {
- if (char RHSNeg = isNegatibleForFree(N1)) {
+ if (char LHSNeg = isNegatibleForFree(N0, AfterLegalize)) {
+ if (char RHSNeg = isNegatibleForFree(N1, AfterLegalize)) {
// Both can be negated for free, check to see if at least one is cheaper
// negated.
if (LHSNeg == 2 || RHSNeg == 2)
- return DAG.getNode(ISD::FMUL, VT, GetNegatedExpression(N0, DAG),
- GetNegatedExpression(N1, DAG));
+ return DAG.getNode(ISD::FMUL, VT,
+ GetNegatedExpression(N0, DAG, AfterLegalize),
+ GetNegatedExpression(N1, DAG, AfterLegalize));
}
}
// -X / -Y -> X*Y
- if (char LHSNeg = isNegatibleForFree(N0)) {
- if (char RHSNeg = isNegatibleForFree(N1)) {
+ if (char LHSNeg = isNegatibleForFree(N0, AfterLegalize)) {
+ if (char RHSNeg = isNegatibleForFree(N1, AfterLegalize)) {
// Both can be negated for free, check to see if at least one is cheaper
// negated.
if (LHSNeg == 2 || RHSNeg == 2)
- return DAG.getNode(ISD::FDIV, VT, GetNegatedExpression(N0, DAG),
- GetNegatedExpression(N1, DAG));
+ return DAG.getNode(ISD::FDIV, VT,
+ GetNegatedExpression(N0, DAG, AfterLegalize),
+ GetNegatedExpression(N1, DAG, AfterLegalize));
}
}
SDOperand DAGCombiner::visitFNEG(SDNode *N) {
SDOperand N0 = N->getOperand(0);
- if (isNegatibleForFree(N0))
- return GetNegatedExpression(N0, DAG);
+ if (isNegatibleForFree(N0, AfterLegalize))
+ return GetNegatedExpression(N0, DAG, AfterLegalize);
// Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading
// constant pool values.
// Check #2.
if (!isLoad) {
SDOperand Val = cast<StoreSDNode>(N)->getValue();
- if (Val == BasePtr || BasePtr.Val->isPredecessor(Val.Val))
+ if (Val == BasePtr || BasePtr.Val->isPredecessorOf(Val.Val))
return false;
}
SDNode *Use = *I;
if (Use == N)
continue;
- if (Use->isPredecessor(N))
+ if (Use->isPredecessorOf(N))
return false;
if (!((Use->getOpcode() == ISD::LOAD &&
cast<LoadSDNode>(Use)->getBasePtr() == Ptr) ||
- (Use->getOpcode() == ISD::STORE) &&
- cast<StoreSDNode>(Use)->getBasePtr() == Ptr))
+ (Use->getOpcode() == ISD::STORE &&
+ cast<StoreSDNode>(Use)->getBasePtr() == Ptr)))
RealUse = true;
}
if (!RealUse)
SDNode *UseUse = *III;
if (!((UseUse->getOpcode() == ISD::LOAD &&
cast<LoadSDNode>(UseUse)->getBasePtr().Val == Use) ||
- (UseUse->getOpcode() == ISD::STORE) &&
- cast<StoreSDNode>(UseUse)->getBasePtr().Val == Use))
+ (UseUse->getOpcode() == ISD::STORE &&
+ cast<StoreSDNode>(UseUse)->getBasePtr().Val == Use)))
RealUse = true;
}
continue;
// Check for #2
- if (!Op->isPredecessor(N) && !N->isPredecessor(Op)) {
+ if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) {
SDOperand Result = isLoad
? DAG.getIndexedLoad(SDOperand(N,0), BasePtr, Offset, AM)
: DAG.getIndexedStore(SDOperand(N,0), BasePtr, Offset, AM);
// only the low bits are being used. For example:
// "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8"
SDOperand Shorter =
- GetDemandedBits(Value, MVT::getIntVTBitMask(ST->getMemoryVT()));
+ GetDemandedBits(Value,
+ APInt::getLowBitsSet(Value.getValueSizeInBits(),
+ MVT::getSizeInBits(ST->getMemoryVT())));
AddToWorkList(Value.Val);
if (Shorter.Val)
return DAG.getTruncStore(Chain, Shorter, Ptr, ST->getSrcValue(),
// Otherwise, see if we can simplify the operation with
// SimplifyDemandedBits, which only works if the value has a single use.
- if (SimplifyDemandedBits(Value, MVT::getIntVTBitMask(ST->getMemoryVT())))
+ if (SimplifyDemandedBits(Value,
+ APInt::getLowBitsSet(
+ Value.getValueSizeInBits(),
+ MVT::getSizeInBits(ST->getMemoryVT()))))
return SDOperand(N, 0);
}
if (TheSelect->getOpcode() == ISD::SELECT) {
// Check that the condition doesn't reach either load. If so, folding
// this will induce a cycle into the DAG.
- if (!LLD->isPredecessor(TheSelect->getOperand(0).Val) &&
- !RLD->isPredecessor(TheSelect->getOperand(0).Val)) {
+ if (!LLD->isPredecessorOf(TheSelect->getOperand(0).Val) &&
+ !RLD->isPredecessorOf(TheSelect->getOperand(0).Val)) {
Addr = DAG.getNode(ISD::SELECT, LLD->getBasePtr().getValueType(),
TheSelect->getOperand(0), LLD->getBasePtr(),
RLD->getBasePtr());
} else {
// Check that the condition doesn't reach either load. If so, folding
// this will induce a cycle into the DAG.
- if (!LLD->isPredecessor(TheSelect->getOperand(0).Val) &&
- !RLD->isPredecessor(TheSelect->getOperand(0).Val) &&
- !LLD->isPredecessor(TheSelect->getOperand(1).Val) &&
- !RLD->isPredecessor(TheSelect->getOperand(1).Val)) {
+ if (!LLD->isPredecessorOf(TheSelect->getOperand(0).Val) &&
+ !RLD->isPredecessorOf(TheSelect->getOperand(0).Val) &&
+ !LLD->isPredecessorOf(TheSelect->getOperand(1).Val) &&
+ !RLD->isPredecessorOf(TheSelect->getOperand(1).Val)) {
Addr = DAG.getNode(ISD::SELECT_CC, LLD->getBasePtr().getValueType(),
TheSelect->getOperand(0),
TheSelect->getOperand(1),