SDValue visitUMULO(SDNode *N);
SDValue visitSDIVREM(SDNode *N);
SDValue visitUDIVREM(SDNode *N);
+ SDValue visitIMINMAX(SDNode *N);
SDValue visitAND(SDNode *N);
SDValue visitANDLike(SDValue N0, SDValue N1, SDNode *LocReference);
SDValue visitOR(SDNode *N);
SDValue visitSRA(SDNode *N);
SDValue visitSRL(SDNode *N);
SDValue visitRotate(SDNode *N);
+ SDValue visitBSWAP(SDNode *N);
SDValue visitCTLZ(SDNode *N);
SDValue visitCTLZ_ZERO_UNDEF(SDNode *N);
SDValue visitCTTZ(SDNode *N);
SDValue visitZERO_EXTEND(SDNode *N);
SDValue visitANY_EXTEND(SDNode *N);
SDValue visitSIGN_EXTEND_INREG(SDNode *N);
+ SDValue visitSIGN_EXTEND_VECTOR_INREG(SDNode *N);
SDValue visitTRUNCATE(SDNode *N);
SDValue visitBITCAST(SDNode *N);
SDValue visitBUILD_PAIR(SDNode *N);
unsigned HiOp);
SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
SDValue CombineExtLoad(SDNode *N);
+ SDValue combineRepeatedFPDivisors(SDNode *N);
SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
SDValue BuildSDIV(SDNode *N);
SDValue BuildSDIVPow2(SDNode *N);
unsigned SequenceNum;
};
+ /// This is a helper function for MergeStoresOfConstantsOrVecElts. Returns a
+ /// constant build_vector of the stored constant values in Stores.
+ SDValue getMergedConstantVectorStore(SelectionDAG &DAG,
+ SDLoc SL,
+ ArrayRef<MemOpLink> Stores,
+ EVT Ty) const;
+
/// This is a helper function for MergeConsecutiveStores. When the source
/// elements of the consecutive stores are all constants or all extracted
/// vector elements, try to merge them into one larger store.
EVT MemVT, unsigned NumElem,
bool IsConstantSrc, bool UseVector);
+ /// This is a helper function for MergeConsecutiveStores.
+ /// Stores that may be merged are placed in StoreNodes.
+ /// Loads that may alias with those stores are placed in AliasLoadNodes.
+ void getStoreMergeAndAliasCandidates(
+ StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes,
+ SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes);
+
/// Merge consecutive store operations into a wide store.
/// This optimization uses wide integers or vectors when possible.
/// \return True if some memory operations were changed.
DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL)
: DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes),
OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) {
- auto *F = DAG.getMachineFunction().getFunction();
- ForCodeSize = F->hasFnAttribute(Attribute::OptimizeForSize) ||
- F->hasFnAttribute(Attribute::MinSize);
+ ForCodeSize = DAG.getMachineFunction().getFunction()->optForSize();
}
/// Runs the dag combiner on all nodes in the work list
assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
if (LHSTy.isVector())
return LHSTy;
- return LegalTypes ? TLI.getScalarShiftAmountTy(LHSTy)
- : TLI.getPointerTy();
+ auto &DL = DAG.getDataLayout();
+ return LegalTypes ? TLI.getScalarShiftAmountTy(DL, LHSTy)
+ : TLI.getPointerTy(DL);
}
/// This method returns true if we are running before type legalization or
/// Convenience wrapper around TargetLowering::getSetCCResultType
EVT getSetCCResultType(EVT VT) const {
- return TLI.getSetCCResultType(*DAG.getContext(), VT);
+ return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
}
};
}
// fold (fneg (fsub 0, B)) -> B
if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0)))
- if (N0CFP->getValueAPF().isZero())
+ if (N0CFP->isZero())
return Op.getOperand(1);
// fold (fneg (fsub A, B)) -> (fsub B, A)
continue;
if (N->use_empty()) {
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- Nodes.insert(N->getOperand(i).getNode());
+ for (const SDValue &ChildN : N->op_values())
+ Nodes.insert(ChildN.getNode());
removeFromWorklist(N);
DAG.DeleteNode(N);
LegalTypes = Level >= AfterLegalizeTypes;
// Add all the dag nodes to the worklist.
- for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
- E = DAG.allnodes_end(); I != E; ++I)
- AddToWorklist(I);
+ for (SDNode &Node : DAG.allnodes())
+ AddToWorklist(&Node);
// Create a dummy node (which is not added to allnodes), that adds a reference
// to the root node, preventing it from being deleted, and tracking any
// worklist as well. Because the worklist uniques things already, this
// won't repeatedly process the same operand.
CombinedNodes.insert(N);
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- if (!CombinedNodes.count(N->getOperand(i).getNode()))
- AddToWorklist(N->getOperand(i).getNode());
+ for (const SDValue &ChildN : N->op_values())
+ if (!CombinedNodes.count(ChildN.getNode()))
+ AddToWorklist(ChildN.getNode());
SDValue RV = combine(N);
case ISD::UMULO: return visitUMULO(N);
case ISD::SDIVREM: return visitSDIVREM(N);
case ISD::UDIVREM: return visitUDIVREM(N);
+ case ISD::SMIN:
+ case ISD::SMAX:
+ case ISD::UMIN:
+ case ISD::UMAX: return visitIMINMAX(N);
case ISD::AND: return visitAND(N);
case ISD::OR: return visitOR(N);
case ISD::XOR: return visitXOR(N);
case ISD::SRL: return visitSRL(N);
case ISD::ROTR:
case ISD::ROTL: return visitRotate(N);
+ case ISD::BSWAP: return visitBSWAP(N);
case ISD::CTLZ: return visitCTLZ(N);
case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N);
case ISD::CTTZ: return visitCTTZ(N);
case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N);
case ISD::ANY_EXTEND: return visitANY_EXTEND(N);
case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N);
+ case ISD::SIGN_EXTEND_VECTOR_INREG: return visitSIGN_EXTEND_VECTOR_INREG(N);
case ISD::TRUNCATE: return visitTRUNCATE(N);
case ISD::BITCAST: return visitBITCAST(N);
case ISD::BUILD_PAIR: return visitBUILD_PAIR(N);
if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) {
SDValue Ops[] = {N1, N0};
SDNode *CSENode;
- if (const BinaryWithFlagsSDNode *BinNode =
- dyn_cast<BinaryWithFlagsSDNode>(N)) {
+ if (const auto *BinNode = dyn_cast<BinaryWithFlagsSDNode>(N)) {
CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops,
- BinNode->Flags.hasNoUnsignedWrap(),
- BinNode->Flags.hasNoSignedWrap(),
- BinNode->Flags.hasExact());
+ &BinNode->Flags);
} else {
CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops);
}
SDNode *TF = TFs[i];
// Check each of the operands.
- for (unsigned i = 0, ie = TF->getNumOperands(); i != ie; ++i) {
- SDValue Op = TF->getOperand(i);
+ for (const SDValue &Op : TF->op_values()) {
switch (Op.getOpcode()) {
case ISD::EntryToken:
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
+static bool isNullConstant(SDValue V) {
+ ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
+ return Const != nullptr && Const->isNullValue();
+}
+
+static bool isNullFPConstant(SDValue V) {
+ ConstantFPSDNode *Const = dyn_cast<ConstantFPSDNode>(V);
+ return Const != nullptr && Const->isZero() && !Const->isNegative();
+}
+
+static bool isAllOnesConstant(SDValue V) {
+ ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
+ return Const != nullptr && Const->isAllOnesValue();
+}
+
+static bool isOneConstant(SDValue V) {
+ ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
+ return Const != nullptr && Const->isOne();
+}
+
+/// If \p N is a ContantSDNode with isOpaque() == false return it casted to a
+/// ContantSDNode pointer else nullptr.
+static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) {
+ ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N);
+ return Const != nullptr && !Const->isOpaque() ? Const : nullptr;
+}
+
SDValue DAGCombiner::visitADD(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
if (N1.getOpcode() == ISD::UNDEF)
return N1;
// fold (add c1, c2) -> c1+c2
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
+ ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
+ ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::ADD, SDLoc(N), VT, N0C, N1C);
// canonicalize constant to RHS
!isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::ADD, SDLoc(N), VT, N1, N0);
// fold (add x, 0) -> x
- if (N1C && N1C->isNullValue())
+ if (isNullConstant(N1))
return N0;
// fold (add Sym, c) -> Sym+c
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0))
(uint64_t)N1C->getSExtValue());
// fold ((c1-A)+c2) -> (c1+c2)-A
if (N1C && N0.getOpcode() == ISD::SUB)
- if (ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
+ if (ConstantSDNode *N0C = getAsNonOpaqueConstant(N0.getOperand(0))) {
SDLoc DL(N);
return DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(N1C->getAPIntValue()+
if (SDValue RADD = ReassociateOps(ISD::ADD, SDLoc(N), N0, N1))
return RADD;
// fold ((0-A) + B) -> B-A
- if (N0.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N0.getOperand(0)) &&
- cast<ConstantSDNode>(N0.getOperand(0))->isNullValue())
+ if (N0.getOpcode() == ISD::SUB && isNullConstant(N0.getOperand(0)))
return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1, N0.getOperand(1));
// fold (A + (0-B)) -> A-B
- if (N1.getOpcode() == ISD::SUB && isa<ConstantSDNode>(N1.getOperand(0)) &&
- cast<ConstantSDNode>(N1.getOperand(0))->isNullValue())
+ if (N1.getOpcode() == ISD::SUB && isNullConstant(N1.getOperand(0)))
return DAG.getNode(ISD::SUB, SDLoc(N), VT, N0, N1.getOperand(1));
// fold (A+(B-A)) -> B
if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1))
}
// fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n))
- if (N1.getOpcode() == ISD::SHL &&
- N1.getOperand(0).getOpcode() == ISD::SUB)
- if (ConstantSDNode *C =
- dyn_cast<ConstantSDNode>(N1.getOperand(0).getOperand(0)))
- if (C->getAPIntValue() == 0)
- return DAG.getNode(ISD::SUB, SDLoc(N), VT, N0,
- DAG.getNode(ISD::SHL, SDLoc(N), VT,
- N1.getOperand(0).getOperand(1),
- N1.getOperand(1)));
- if (N0.getOpcode() == ISD::SHL &&
- N0.getOperand(0).getOpcode() == ISD::SUB)
- if (ConstantSDNode *C =
- dyn_cast<ConstantSDNode>(N0.getOperand(0).getOperand(0)))
- if (C->getAPIntValue() == 0)
- return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1,
- DAG.getNode(ISD::SHL, SDLoc(N), VT,
- N0.getOperand(0).getOperand(1),
- N0.getOperand(1)));
+ if (N1.getOpcode() == ISD::SHL && N1.getOperand(0).getOpcode() == ISD::SUB &&
+ isNullConstant(N1.getOperand(0).getOperand(0)))
+ return DAG.getNode(ISD::SUB, SDLoc(N), VT, N0,
+ DAG.getNode(ISD::SHL, SDLoc(N), VT,
+ N1.getOperand(0).getOperand(1),
+ N1.getOperand(1)));
+ if (N0.getOpcode() == ISD::SHL && N0.getOperand(0).getOpcode() == ISD::SUB &&
+ isNullConstant(N0.getOperand(0).getOperand(0)))
+ return DAG.getNode(ISD::SUB, SDLoc(N), VT, N1,
+ DAG.getNode(ISD::SHL, SDLoc(N), VT,
+ N0.getOperand(0).getOperand(1),
+ N0.getOperand(1)));
if (N1.getOpcode() == ISD::AND) {
SDValue AndOp0 = N1.getOperand(0);
- ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1));
unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0);
unsigned DestBits = VT.getScalarType().getSizeInBits();
// (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x))
// and similar xforms where the inner op is either ~0 or 0.
- if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) {
+ if (NumSignBits == DestBits && isOneConstant(N1->getOperand(1))) {
SDLoc DL(N);
return DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), AndOp0);
}
return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N1, N0);
// fold (addc x, 0) -> x + no carry out
- if (N1C && N1C->isNullValue())
+ if (isNullConstant(N1))
return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
SDLoc(N), MVT::Glue));
if (N0 == N1)
return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes);
// fold (sub c1, c2) -> c1-c2
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0.getNode());
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
+ ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
+ ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::SUB, SDLoc(N), VT, N0C, N1C);
// fold (sub x, c) -> (add x, -c)
DAG.getConstant(-N1C->getAPIntValue(), DL, VT));
}
// Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1)
- if (N0C && N0C->isAllOnesValue())
+ if (isAllOnesConstant(N0))
return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0);
// fold A-(A-B) -> B
if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0))
}
// fold (subc x, 0) -> x + no borrow
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- if (N1C && N1C->isNullValue())
+ if (isNullConstant(N1))
return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, SDLoc(N),
MVT::Glue));
// Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow
- if (N0C && N0C->isAllOnesValue())
+ if (isAllOnesConstant(N0))
return CombineTo(N, DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0),
DAG.getNode(ISD::CARRY_FALSE, SDLoc(N),
MVT::Glue));
bool N0IsConst = false;
bool N1IsConst = false;
+ bool N1IsOpaqueConst = false;
+ bool N0IsOpaqueConst = false;
APInt ConstValue0, ConstValue1;
// fold vector ops
if (VT.isVector()) {
N1IsConst = isConstantSplatVector(N1.getNode(), ConstValue1);
} else {
N0IsConst = isa<ConstantSDNode>(N0);
- if (N0IsConst)
+ if (N0IsConst) {
ConstValue0 = cast<ConstantSDNode>(N0)->getAPIntValue();
+ N0IsOpaqueConst = cast<ConstantSDNode>(N0)->isOpaque();
+ }
N1IsConst = isa<ConstantSDNode>(N1);
- if (N1IsConst)
+ if (N1IsConst) {
ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue();
+ N1IsOpaqueConst = cast<ConstantSDNode>(N1)->isOpaque();
+ }
}
// fold (mul c1, c2) -> c1*c2
- if (N0IsConst && N1IsConst)
+ if (N0IsConst && N1IsConst && !N0IsOpaqueConst && !N1IsOpaqueConst)
return DAG.FoldConstantArithmetic(ISD::MUL, SDLoc(N), VT,
N0.getNode(), N1.getNode());
DAG.getConstant(0, DL, VT), N0);
}
// fold (mul x, (1 << c)) -> x << c
- if (N1IsConst && ConstValue1.isPowerOf2() && IsFullSplat) {
+ if (N1IsConst && !N1IsOpaqueConst && ConstValue1.isPowerOf2() &&
+ IsFullSplat) {
SDLoc DL(N);
return DAG.getNode(ISD::SHL, DL, VT, N0,
DAG.getConstant(ConstValue1.logBase2(), DL,
getShiftAmountTy(N0.getValueType())));
}
// fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c
- if (N1IsConst && (-ConstValue1).isPowerOf2() && IsFullSplat) {
+ if (N1IsConst && !N1IsOpaqueConst && (-ConstValue1).isPowerOf2() &&
+ IsFullSplat) {
unsigned Log2Val = (-ConstValue1).logBase2();
SDLoc DL(N);
// FIXME: If the input is something that is easily negated (e.g. a
// fold (sdiv c1, c2) -> c1/c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
- if (N0C && N1C && !N1C->isNullValue())
+ if (N0C && N1C && !N0C->isOpaque() && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SDIV, SDLoc(N), VT, N0C, N1C);
// fold (sdiv X, 1) -> X
- if (N1C && N1C->getAPIntValue() == 1LL)
+ if (N1C && N1C->isOne())
return N0;
// fold (sdiv X, -1) -> 0-X
if (N1C && N1C->isAllOnesValue()) {
N0, N1);
}
+ bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
// fold (sdiv X, pow2) -> simple ops after legalize
- if (N1C && !N1C->isNullValue() && (N1C->getAPIntValue().isPowerOf2() ||
- (-N1C->getAPIntValue()).isPowerOf2())) {
- // If dividing by powers of two is cheap, then don't perform the following
- // fold.
- if (TLI.isPow2SDivCheap())
+ // FIXME: We check for the exact bit here because the generic lowering gives
+ // better results in that case. The target-specific lowering should learn how
+ // to handle exact sdivs efficiently.
+ if (N1C && !N1C->isNullValue() && !N1C->isOpaque() &&
+ !cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact() &&
+ (N1C->getAPIntValue().isPowerOf2() ||
+ (-N1C->getAPIntValue()).isPowerOf2())) {
+ // If integer division is cheap, then don't perform the following fold.
+ if (TLI.isIntDivCheap(N->getValueType(0), MinSize))
return SDValue();
// Target-specific implementation of sdiv x, pow2.
- SDValue Res = BuildSDIVPow2(N);
- if (Res.getNode())
+ if (SDValue Res = BuildSDIVPow2(N))
return Res;
unsigned lg2 = N1C->getAPIntValue().countTrailingZeros();
// If integer divide is expensive and we satisfy the requirements, emit an
// alternate sequence.
- if (N1C && !TLI.isIntDivCheap()) {
- SDValue Op = BuildSDIV(N);
- if (Op.getNode()) return Op;
- }
+ if (N1C && !TLI.isIntDivCheap(N->getValueType(0), MinSize))
+ if (SDValue Op = BuildSDIV(N))
+ return Op;
// undef / X -> 0
if (N0.getOpcode() == ISD::UNDEF)
// fold (udiv c1, c2) -> c1/c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
- if (N0C && N1C && !N1C->isNullValue())
- return DAG.FoldConstantArithmetic(ISD::UDIV, SDLoc(N), VT, N0C, N1C);
+ if (N0C && N1C)
+ if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::UDIV, SDLoc(N), VT,
+ N0C, N1C))
+ return Folded;
// fold (udiv x, (1 << c)) -> x >>u c
- if (N1C && N1C->getAPIntValue().isPowerOf2()) {
+ if (N1C && !N1C->isOpaque() && N1C->getAPIntValue().isPowerOf2()) {
SDLoc DL(N);
return DAG.getNode(ISD::SRL, DL, VT, N0,
DAG.getConstant(N1C->getAPIntValue().logBase2(), DL,
}
// fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
if (N1.getOpcode() == ISD::SHL) {
- if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
+ if (ConstantSDNode *SHC = getAsNonOpaqueConstant(N1.getOperand(0))) {
if (SHC->getAPIntValue().isPowerOf2()) {
EVT ADDVT = N1.getOperand(1).getValueType();
SDLoc DL(N);
}
}
}
+
// fold (udiv x, c) -> alternate
- if (N1C && !TLI.isIntDivCheap()) {
- SDValue Op = BuildUDIV(N);
- if (Op.getNode()) return Op;
- }
+ bool MinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
+ if (N1C && !TLI.isIntDivCheap(N->getValueType(0), MinSize))
+ if (SDValue Op = BuildUDIV(N))
+ return Op;
// undef / X -> 0
if (N0.getOpcode() == ISD::UNDEF)
// fold (srem c1, c2) -> c1%c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
- if (N0C && N1C && !N1C->isNullValue())
- return DAG.FoldConstantArithmetic(ISD::SREM, SDLoc(N), VT, N0C, N1C);
+ if (N0C && N1C)
+ if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::SREM, SDLoc(N), VT,
+ N0C, N1C))
+ return Folded;
// If we know the sign bits of both operands are zero, strength reduce to a
// urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15
if (!VT.isVector()) {
// fold (urem c1, c2) -> c1%c2
ConstantSDNode *N0C = isConstOrConstSplat(N0);
ConstantSDNode *N1C = isConstOrConstSplat(N1);
- if (N0C && N1C && !N1C->isNullValue())
- return DAG.FoldConstantArithmetic(ISD::UREM, SDLoc(N), VT, N0C, N1C);
+ if (N0C && N1C)
+ if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::UREM, SDLoc(N), VT,
+ N0C, N1C))
+ return Folded;
// fold (urem x, pow2) -> (and x, pow2-1)
- if (N1C && !N1C->isNullValue() && N1C->getAPIntValue().isPowerOf2()) {
+ if (N1C && !N1C->isNullValue() && !N1C->isOpaque() &&
+ N1C->getAPIntValue().isPowerOf2()) {
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, VT, N0,
DAG.getConstant(N1C->getAPIntValue() - 1, DL, VT));
}
// fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
if (N1.getOpcode() == ISD::SHL) {
- if (ConstantSDNode *SHC = dyn_cast<ConstantSDNode>(N1.getOperand(0))) {
+ if (ConstantSDNode *SHC = getAsNonOpaqueConstant(N1.getOperand(0))) {
if (SHC->getAPIntValue().isPowerOf2()) {
SDLoc DL(N);
SDValue Add =
SDValue DAGCombiner::visitMULHS(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
// fold (mulhs x, 0) -> 0
- if (N1C && N1C->isNullValue())
+ if (isNullConstant(N1))
return N1;
// fold (mulhs x, 1) -> (sra x, size(x)-1)
- if (N1C && N1C->getAPIntValue() == 1) {
+ if (isOneConstant(N1)) {
SDLoc DL(N);
return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0,
DAG.getConstant(N0.getValueType().getSizeInBits() - 1,
SDValue DAGCombiner::visitMULHU(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
// fold (mulhu x, 0) -> 0
- if (N1C && N1C->isNullValue())
+ if (isNullConstant(N1))
return N1;
// fold (mulhu x, 1) -> 0
- if (N1C && N1C->getAPIntValue() == 1)
+ if (isOneConstant(N1))
return DAG.getConstant(0, DL, N0.getValueType());
// fold (mulhu x, undef) -> 0
if (N0.getOpcode() == ISD::UNDEF || N1.getOpcode() == ISD::UNDEF)
}
SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) {
- SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS);
- if (Res.getNode()) return Res;
+ if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS))
+ return Res;
EVT VT = N->getValueType(0);
SDLoc DL(N);
}
SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) {
- SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU);
- if (Res.getNode()) return Res;
+ if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU))
+ return Res;
EVT VT = N->getValueType(0);
SDLoc DL(N);
}
SDValue DAGCombiner::visitSDIVREM(SDNode *N) {
- SDValue Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM);
- if (Res.getNode()) return Res;
+ if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::SDIV, ISD::SREM))
+ return Res;
return SDValue();
}
SDValue DAGCombiner::visitUDIVREM(SDNode *N) {
- SDValue Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM);
- if (Res.getNode()) return Res;
+ if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::UDIV, ISD::UREM))
+ return Res;
+
+ return SDValue();
+}
+
+SDValue DAGCombiner::visitIMINMAX(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ EVT VT = N0.getValueType();
+
+ // fold vector ops
+ if (VT.isVector())
+ if (SDValue FoldedVOp = SimplifyVBinOp(N))
+ return FoldedVOp;
+
+ // fold (add c1, c2) -> c1+c2
+ ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
+ ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
+ if (N0C && N1C)
+ return DAG.FoldConstantArithmetic(N->getOpcode(), SDLoc(N), VT, N0C, N1C);
+
+ // canonicalize constant to RHS
+ if (isConstantIntBuildVectorOrConstantInt(N0) &&
+ !isConstantIntBuildVectorOrConstantInt(N1))
+ return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0);
return SDValue();
}
if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
LL.getValueType().isInteger()) {
// fold (and (seteq X, 0), (seteq Y, 0)) -> (seteq (or X, Y), 0)
- if (cast<ConstantSDNode>(LR)->isNullValue() && Op1 == ISD::SETEQ) {
+ if (isNullConstant(LR) && Op1 == ISD::SETEQ) {
SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0),
LR.getValueType(), LL, RL);
AddToWorklist(ORNode.getNode());
return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1);
}
- // fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1)
- if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETEQ) {
- SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(N0),
- LR.getValueType(), LL, RL);
- AddToWorklist(ANDNode.getNode());
- return DAG.getSetCC(SDLoc(LocReference), VT, ANDNode, LR, Op1);
- }
- // fold (and (setgt X, -1), (setgt Y, -1)) -> (setgt (or X, Y), -1)
- if (cast<ConstantSDNode>(LR)->isAllOnesValue() && Op1 == ISD::SETGT) {
- SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0),
- LR.getValueType(), LL, RL);
- AddToWorklist(ORNode.getNode());
- return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1);
+ if (isAllOnesConstant(LR)) {
+ // fold (and (seteq X, -1), (seteq Y, -1)) -> (seteq (and X, Y), -1)
+ if (Op1 == ISD::SETEQ) {
+ SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(N0),
+ LR.getValueType(), LL, RL);
+ AddToWorklist(ANDNode.getNode());
+ return DAG.getSetCC(SDLoc(LocReference), VT, ANDNode, LR, Op1);
+ }
+ // fold (and (setgt X, -1), (setgt Y, -1)) -> (setgt (or X, Y), -1)
+ if (Op1 == ISD::SETGT) {
+ SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(N0),
+ LR.getValueType(), LL, RL);
+ AddToWorklist(ORNode.getNode());
+ return DAG.getSetCC(SDLoc(LocReference), VT, ORNode, LR, Op1);
+ }
}
}
// Simplify (and (setne X, 0), (setne X, -1)) -> (setuge (add X, 1), 2)
if (LL == RL && isa<ConstantSDNode>(LR) && isa<ConstantSDNode>(RR) &&
Op0 == Op1 && LL.getValueType().isInteger() &&
- Op0 == ISD::SETNE && ((cast<ConstantSDNode>(LR)->isNullValue() &&
- cast<ConstantSDNode>(RR)->isAllOnesValue()) ||
- (cast<ConstantSDNode>(LR)->isAllOnesValue() &&
- cast<ConstantSDNode>(RR)->isNullValue()))) {
+ Op0 == ISD::SETNE && ((isNullConstant(LR) && isAllOnesConstant(RR)) ||
+ (isAllOnesConstant(LR) && isNullConstant(RR)))) {
SDLoc DL(N0);
SDValue ADDNode = DAG.getNode(ISD::ADD, DL, LL.getValueType(),
LL, DAG.getConstant(1, DL,
}
// fold (and c1, c2) -> c1&c2
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
+ ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- if (N0C && N1C)
+ if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::AND, SDLoc(N), VT, N0C, N1C);
// canonicalize constant to RHS
if (isConstantIntBuildVectorOrConstantInt(N0) &&
!isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0);
// fold (and x, -1) -> x
- if (N1C && N1C->isAllOnesValue())
+ if (isAllOnesConstant(N1))
return N0;
// if (and x, c) is known to be zero, return 0
unsigned BitWidth = VT.getScalarType().getSizeInBits();
// For big endian targets, we need to add an offset to the pointer
// to load the correct bytes. For little endian systems, we merely
// need to read fewer bytes from the same pointer.
- if (TLI.isBigEndian()) {
+ if (DAG.getDataLayout().isBigEndian()) {
unsigned LVTStoreBytes = LoadedVT.getStoreSize();
unsigned EVTStoreBytes = ExtVT.getStoreSize();
unsigned PtrOff = LVTStoreBytes - EVTStoreBytes;
return Combined;
// Simplify: (and (op x...), (op y...)) -> (op (and x, y))
- if (N0.getOpcode() == N1.getOpcode()) {
- SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
- if (Tmp.getNode()) return Tmp;
- }
+ if (N0.getOpcode() == N1.getOpcode())
+ if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N))
+ return Tmp;
// fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
// fold (and (sra)) -> (and (srl)) when possible.
ISD::CondCode Op0 = cast<CondCodeSDNode>(CC0)->get();
ISD::CondCode Op1 = cast<CondCodeSDNode>(CC1)->get();
- if (LR == RR && isa<ConstantSDNode>(LR) && Op0 == Op1 &&
- LL.getValueType().isInteger()) {
+ if (LR == RR && Op0 == Op1 && LL.getValueType().isInteger()) {
// fold (or (setne X, 0), (setne Y, 0)) -> (setne (or X, Y), 0)
// fold (or (setlt X, 0), (setlt Y, 0)) -> (setne (or X, Y), 0)
- if (cast<ConstantSDNode>(LR)->isNullValue() &&
- (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) {
+ if (isNullConstant(LR) && (Op1 == ISD::SETNE || Op1 == ISD::SETLT)) {
SDValue ORNode = DAG.getNode(ISD::OR, SDLoc(LR),
LR.getValueType(), LL, RL);
AddToWorklist(ORNode.getNode());
}
// fold (or (setne X, -1), (setne Y, -1)) -> (setne (and X, Y), -1)
// fold (or (setgt X, -1), (setgt Y -1)) -> (setgt (and X, Y), -1)
- if (cast<ConstantSDNode>(LR)->isAllOnesValue() &&
- (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) {
+ if (isAllOnesConstant(LR) && (Op1 == ISD::SETNE || Op1 == ISD::SETGT)) {
SDValue ANDNode = DAG.getNode(ISD::AND, SDLoc(LR),
LR.getValueType(), LL, RL);
AddToWorklist(ANDNode.getNode());
}
// (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible.
- if (N0.getOpcode() == ISD::AND &&
- N1.getOpcode() == ISD::AND &&
- N0.getOperand(1).getOpcode() == ISD::Constant &&
- N1.getOperand(1).getOpcode() == ISD::Constant &&
+ if (N0.getOpcode() == ISD::AND && N1.getOpcode() == ISD::AND &&
// Don't increase # computations.
(N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) {
// We can only do this xform if we know that bits from X that are set in C2
// but not in C1 are already zero. Likewise for Y.
- const APInt &LHSMask =
- cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
- const APInt &RHSMask =
- cast<ConstantSDNode>(N1.getOperand(1))->getAPIntValue();
-
- if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
- DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
- SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
- N0.getOperand(0), N1.getOperand(0));
- SDLoc DL(LocReference);
- return DAG.getNode(ISD::AND, DL, VT, X,
- DAG.getConstant(LHSMask | RHSMask, DL, VT));
+ if (const ConstantSDNode *N0O1C =
+ getAsNonOpaqueConstant(N0.getOperand(1))) {
+ if (const ConstantSDNode *N1O1C =
+ getAsNonOpaqueConstant(N1.getOperand(1))) {
+ // We can only do this xform if we know that bits from X that are set in
+ // C2 but not in C1 are already zero. Likewise for Y.
+ const APInt &LHSMask = N0O1C->getAPIntValue();
+ const APInt &RHSMask = N1O1C->getAPIntValue();
+
+ if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) &&
+ DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) {
+ SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT,
+ N0.getOperand(0), N1.getOperand(0));
+ SDLoc DL(LocReference);
+ return DAG.getNode(ISD::AND, DL, VT, X,
+ DAG.getConstant(LHSMask | RHSMask, DL, VT));
+ }
+ }
}
}
}
// fold (or c1, c2) -> c1|c2
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
+ ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- if (N0C && N1C)
+ if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N), VT, N0C, N1C);
// canonicalize constant to RHS
if (isConstantIntBuildVectorOrConstantInt(N0) &&
!isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0);
// fold (or x, 0) -> x
- if (N1C && N1C->isNullValue())
+ if (isNullConstant(N1))
return N0;
// fold (or x, -1) -> -1
- if (N1C && N1C->isAllOnesValue())
+ if (isAllOnesConstant(N1))
return N1;
// fold (or x, c) -> c iff (x & ~c) == 0
if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue()))
return Combined;
// Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16)
- SDValue BSwap = MatchBSwapHWord(N, N0, N1);
- if (BSwap.getNode())
+ if (SDValue BSwap = MatchBSwapHWord(N, N0, N1))
return BSwap;
- BSwap = MatchBSwapHWordLow(N, N0, N1);
- if (BSwap.getNode())
+ if (SDValue BSwap = MatchBSwapHWordLow(N, N0, N1))
return BSwap;
// reassociate or
}
}
// Simplify: (or (op x...), (op y...)) -> (op (or x, y))
- if (N0.getOpcode() == N1.getOpcode()) {
- SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
- if (Tmp.getNode()) return Tmp;
- }
+ if (N0.getOpcode() == N1.getOpcode())
+ if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N))
+ return Tmp;
// See if this is some rotate idiom.
if (SDNode *Rot = MatchRotate(N0, N1, SDLoc(N)))
if (N1.getOpcode() == ISD::UNDEF)
return N1;
// fold (xor c1, c2) -> c1^c2
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
+ ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
+ ConstantSDNode *N1C = getAsNonOpaqueConstant(N1);
if (N0C && N1C)
return DAG.FoldConstantArithmetic(ISD::XOR, SDLoc(N), VT, N0C, N1C);
// canonicalize constant to RHS
!isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0);
// fold (xor x, 0) -> x
- if (N1C && N1C->isNullValue())
+ if (isNullConstant(N1))
return N0;
// reassociate xor
if (SDValue RXOR = ReassociateOps(ISD::XOR, SDLoc(N), N0, N1))
}
// fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y)))
- if (N1C && N1C->getAPIntValue() == 1 && N0.getOpcode() == ISD::ZERO_EXTEND &&
+ if (isOneConstant(N1) && N0.getOpcode() == ISD::ZERO_EXTEND &&
N0.getNode()->hasOneUse() &&
isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){
SDValue V = N0.getOperand(0);
}
// fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc
- if (N1C && N1C->getAPIntValue() == 1 && VT == MVT::i1 &&
+ if (isOneConstant(N1) && VT == MVT::i1 &&
(N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) {
}
}
// fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants
- if (N1C && N1C->isAllOnesValue() &&
+ if (isAllOnesConstant(N1) &&
(N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) {
}
// fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2))
if (N1C && N0.getOpcode() == ISD::XOR) {
- ConstantSDNode *N00C = dyn_cast<ConstantSDNode>(N0.getOperand(0));
- ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
- if (N00C) {
+ if (const ConstantSDNode *N00C = getAsNonOpaqueConstant(N0.getOperand(0))) {
SDLoc DL(N);
return DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(1),
DAG.getConstant(N1C->getAPIntValue() ^
N00C->getAPIntValue(), DL, VT));
}
- if (N01C) {
+ if (const ConstantSDNode *N01C = getAsNonOpaqueConstant(N0.getOperand(1))) {
SDLoc DL(N);
return DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(0),
DAG.getConstant(N1C->getAPIntValue() ^
// consistent result.
// - Pushing the zero left requires shifting one bits in from the right.
// A rotate left of ~1 is a nice way of achieving the desired result.
- if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT))
- if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode()))
- if (N0.getOpcode() == ISD::SHL)
- if (auto *ShlLHS = dyn_cast<ConstantSDNode>(N0.getOperand(0)))
- if (N1C->isAllOnesValue() && ShlLHS->isOne()) {
- SDLoc DL(N);
- return DAG.getNode(ISD::ROTL, DL, VT, DAG.getConstant(~1, DL, VT),
- N0.getOperand(1));
- }
+ if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT) && N0.getOpcode() == ISD::SHL
+ && isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0))) {
+ SDLoc DL(N);
+ return DAG.getNode(ISD::ROTL, DL, VT, DAG.getConstant(~1, DL, VT),
+ N0.getOperand(1));
+ }
// Simplify: xor (op x...), (op y...) -> (op (xor x, y))
- if (N0.getOpcode() == N1.getOpcode()) {
- SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N);
- if (Tmp.getNode()) return Tmp;
- }
+ if (N0.getOpcode() == N1.getOpcode())
+ if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N))
+ return Tmp;
// Simplify the expression using non-local knowledge.
if (!VT.isVector() &&
/// Handle transforms common to the three shifts, when the shift amount is a
/// constant.
SDValue DAGCombiner::visitShiftByConstant(SDNode *N, ConstantSDNode *Amt) {
- // We can't and shouldn't fold opaque constants.
- if (Amt->isOpaque())
- return SDValue();
-
SDNode *LHS = N->getOperand(0).getNode();
if (!LHS->hasOneUse()) return SDValue();
}
// We require the RHS of the binop to be a constant and not opaque as well.
- ConstantSDNode *BinOpCst = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
- if (!BinOpCst || BinOpCst->isOpaque()) return SDValue();
+ ConstantSDNode *BinOpCst = getAsNonOpaqueConstant(LHS->getOperand(1));
+ if (!BinOpCst) return SDValue();
// FIXME: disable this unless the input to the binop is a shift by a constant.
// If it is not a shift, it pessimizes some common cases like:
SDValue N01 = N->getOperand(0).getOperand(1);
if (ConstantSDNode *N01C = isConstOrConstSplat(N01)) {
- EVT TruncVT = N->getValueType(0);
- SDValue N00 = N->getOperand(0).getOperand(0);
- APInt TruncC = N01C->getAPIntValue();
- TruncC = TruncC.trunc(TruncVT.getScalarSizeInBits());
- SDLoc DL(N);
+ if (!N01C->isOpaque()) {
+ EVT TruncVT = N->getValueType(0);
+ SDValue N00 = N->getOperand(0).getOperand(0);
+ APInt TruncC = N01C->getAPIntValue();
+ TruncC = TruncC.trunc(TruncVT.getScalarSizeInBits());
+ SDLoc DL(N);
- return DAG.getNode(ISD::AND, DL, TruncVT,
- DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00),
- DAG.getConstant(TruncC, DL, TruncVT));
+ return DAG.getNode(ISD::AND, DL, TruncVT,
+ DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00),
+ DAG.getConstant(TruncC, DL, TruncVT));
+ }
}
}
}
// fold (shl c1, c2) -> c1<<c2
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- if (N0C && N1C)
+ ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
+ if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, N0C, N1C);
// fold (shl 0, x) -> 0
- if (N0C && N0C->isNullValue())
+ if (isNullConstant(N0))
return N0;
// fold (shl x, c >= size(x)) -> undef
- if (N1C && N1C->getZExtValue() >= OpSizeInBits)
+ if (N1C && N1C->getAPIntValue().uge(OpSizeInBits))
return DAG.getUNDEF(VT);
// fold (shl x, 0) -> x
if (N1C && N1C->isNullValue())
}
}
+ // fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
+ // fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
+ if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) &&
+ cast<BinaryWithFlagsSDNode>(N0)->Flags.hasExact()) {
+ if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
+ uint64_t C1 = N0C1->getZExtValue();
+ uint64_t C2 = N1C->getZExtValue();
+ SDLoc DL(N);
+ if (C1 <= C2)
+ return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
+ DAG.getConstant(C2 - C1, DL, N1.getValueType()));
+ return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0),
+ DAG.getConstant(C1 - C2, DL, N1.getValueType()));
+ }
+ }
+
// fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or
// (and (srl x, (sub c1, c2), MASK)
// Only fold this if the inner shift has no other uses -- if it does, folding
return DAG.getNode(ISD::ADD, SDLoc(N), VT, Shl0, Shl1);
}
- if (N1C) {
- SDValue NewSHL = visitShiftByConstant(N, N1C);
- if (NewSHL.getNode())
- return NewSHL;
+ // fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
+ if (N1C && N0.getOpcode() == ISD::MUL && N0.getNode()->hasOneUse()) {
+ if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
+ SDValue Folded = DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N1), VT, N0C1, N1C);
+ return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), Folded);
+ }
}
+ if (N1C && !N1C->isOpaque())
+ if (SDValue NewSHL = visitShiftByConstant(N, N1C))
+ return NewSHL;
+
return SDValue();
}
}
// fold (sra c1, c2) -> (sra c1, c2)
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- if (N0C && N1C)
+ ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
+ if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, N0C, N1C);
// fold (sra 0, x) -> 0
- if (N0C && N0C->isNullValue())
+ if (isNullConstant(N0))
return N0;
// fold (sra -1, x) -> -1
- if (N0C && N0C->isAllOnesValue())
+ if (isAllOnesConstant(N0))
return N0;
// fold (sra x, (setge c, size(x))) -> undef
if (N1C && N1C->getZExtValue() >= OpSizeInBits)
if (DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1);
- if (N1C) {
- SDValue NewSRA = visitShiftByConstant(N, N1C);
- if (NewSRA.getNode())
+ if (N1C && !N1C->isOpaque())
+ if (SDValue NewSRA = visitShiftByConstant(N, N1C))
return NewSRA;
- }
return SDValue();
}
}
// fold (srl c1, c2) -> c1 >>u c2
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- if (N0C && N1C)
+ ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
+ if (N0C && N1C && !N1C->isOpaque())
return DAG.FoldConstantArithmetic(ISD::SRL, SDLoc(N), VT, N0C, N1C);
// fold (srl 0, x) -> 0
- if (N0C && N0C->isNullValue())
+ if (isNullConstant(N0))
return N0;
// fold (srl x, c >= size(x)) -> undef
if (N1C && N1C->getZExtValue() >= OpSizeInBits)
// fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getOpcode() == ISD::AND) {
- SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
- if (NewOp1.getNode())
+ if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1);
}
if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
return SDValue(N, 0);
- if (N1C) {
- SDValue NewSRL = visitShiftByConstant(N, N1C);
- if (NewSRL.getNode())
+ if (N1C && !N1C->isOpaque())
+ if (SDValue NewSRL = visitShiftByConstant(N, N1C))
return NewSRL;
- }
// Attempt to convert a srl of a load into a narrower zero-extending load.
- SDValue NarrowLoad = ReduceLoadWidth(N);
- if (NarrowLoad.getNode())
+ if (SDValue NarrowLoad = ReduceLoadWidth(N))
return NarrowLoad;
// Here is a common situation. We want to optimize:
return SDValue();
}
+SDValue DAGCombiner::visitBSWAP(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ // fold (bswap c1) -> c2
+ if (isConstantIntBuildVectorOrConstantInt(N0))
+ return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N0);
+ // fold (bswap (bswap x)) -> x
+ if (N0.getOpcode() == ISD::BSWAP)
+ return N0->getOperand(0);
+ return SDValue();
+}
+
SDValue DAGCombiner::visitCTLZ(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
// fold (ctlz c1) -> c2
- if (isa<ConstantSDNode>(N0))
+ if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTLZ, SDLoc(N), VT, N0);
return SDValue();
}
EVT VT = N->getValueType(0);
// fold (ctlz_zero_undef c1) -> c2
- if (isa<ConstantSDNode>(N0))
+ if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0);
return SDValue();
}
EVT VT = N->getValueType(0);
// fold (cttz c1) -> c2
- if (isa<ConstantSDNode>(N0))
+ if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0);
return SDValue();
}
EVT VT = N->getValueType(0);
// fold (cttz_zero_undef c1) -> c2
- if (isa<ConstantSDNode>(N0))
+ if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0);
return SDValue();
}
EVT VT = N->getValueType(0);
// fold (ctpop c1) -> c2
- if (isa<ConstantSDNode>(N0))
+ if (isConstantIntBuildVectorOrConstantInt(N0))
return DAG.getNode(ISD::CTPOP, SDLoc(N), VT, N0);
return SDValue();
}
// fold (select C, X, X) -> X
if (N1 == N2)
return N1;
- // fold (select true, X, Y) -> X
- ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
- if (N0C && !N0C->isNullValue())
- return N1;
- // fold (select false, X, Y) -> Y
- if (N0C && N0C->isNullValue())
- return N2;
+ if (const ConstantSDNode *N0C = dyn_cast<const ConstantSDNode>(N0)) {
+ // fold (select true, X, Y) -> X
+ // fold (select false, X, Y) -> Y
+ return !N0C->isNullValue() ? N1 : N2;
+ }
// fold (select C, 1, X) -> (or C, X)
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
- if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1)
+ if (VT == MVT::i1 && isOneConstant(N1))
return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2);
// fold (select C, 0, 1) -> (xor C, 1)
// We can't do this reliably if integer based booleans have different contents
// undiscoverable (or not reasonably discoverable). For example, it could be
// in another basic block or it could require searching a complicated
// expression.
- ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2);
if (VT.isInteger() &&
(VT0 == MVT::i1 || (VT0.isInteger() &&
TLI.getBooleanContents(false, false) ==
TLI.getBooleanContents(false, true) &&
TLI.getBooleanContents(false, false) ==
TargetLowering::ZeroOrOneBooleanContent)) &&
- N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) {
+ isNullConstant(N1) && isOneConstant(N2)) {
SDValue XORNode;
if (VT == VT0) {
SDLoc DL(N);
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, XORNode);
}
// fold (select C, 0, X) -> (and (not C), X)
- if (VT == VT0 && VT == MVT::i1 && N1C && N1C->isNullValue()) {
+ if (VT == VT0 && VT == MVT::i1 && isNullConstant(N1)) {
SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
AddToWorklist(NOTNode.getNode());
return DAG.getNode(ISD::AND, SDLoc(N), VT, NOTNode, N2);
}
// fold (select C, X, 1) -> (or (not C), X)
- if (VT == VT0 && VT == MVT::i1 && N2C && N2C->getAPIntValue() == 1) {
+ if (VT == VT0 && VT == MVT::i1 && isOneConstant(N2)) {
SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT);
AddToWorklist(NOTNode.getNode());
return DAG.getNode(ISD::OR, SDLoc(N), VT, NOTNode, N1);
}
// fold (select C, X, 0) -> (and C, X)
- if (VT == MVT::i1 && N2C && N2C->isNullValue())
+ if (VT == MVT::i1 && isNullConstant(N2))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1);
// fold (select X, X, Y) -> (or X, Y)
// fold (select X, 1, Y) -> (or X, Y)
- if (VT == MVT::i1 && (N0 == N1 || (N1C && N1C->getAPIntValue() == 1)))
+ if (VT == MVT::i1 && (N0 == N1 || isOneConstant(N1)))
return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2);
// fold (select X, Y, X) -> (and X, Y)
// fold (select X, Y, 0) -> (and X, Y)
- if (VT == MVT::i1 && (N0 == N2 || (N2C && N2C->getAPIntValue() == 0)))
+ if (VT == MVT::i1 && (N0 == N2 || isNullConstant(N2)))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1);
// If we can fold this based on the true/false value, do so.
if (SimplifySelectOps(N, N1, N2))
return SDValue(N, 0); // Don't revisit N.
- // fold selects based on a setcc into other things, such as min/max/abs
- if (N0.getOpcode() == ISD::SETCC) {
- // select x, y (fcmp lt x, y) -> fminnum x, y
- // select x, y (fcmp gt x, y) -> fmaxnum x, y
- //
- // This is OK if we don't care about what happens if either operand is a
- // NaN.
- //
-
- // FIXME: Instead of testing for UnsafeFPMath, this should be checking for
- // no signed zeros as well as no nans.
- const TargetOptions &Options = DAG.getTarget().Options;
- if (Options.UnsafeFPMath &&
- VT.isFloatingPoint() && N0.hasOneUse() &&
- DAG.isKnownNeverNaN(N1) && DAG.isKnownNeverNaN(N2)) {
- ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
-
- SDValue FMinMax =
- combineMinNumMaxNum(SDLoc(N), VT, N0.getOperand(0), N0.getOperand(1),
- N1, N2, CC, TLI, DAG);
- if (FMinMax)
- return FMinMax;
- }
-
- if ((!LegalOperations &&
- TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) ||
- TLI.isOperationLegal(ISD::SELECT_CC, VT))
- return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT,
- N0.getOperand(0), N0.getOperand(1),
- N1, N2, N0.getOperand(2));
- return SimplifySelect(SDLoc(N), N0, N1, N2);
- }
-
if (VT0 == MVT::i1) {
- if (TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT)) {
- // select (and Cond0, Cond1), X, Y
- // -> select Cond0, (select Cond1, X, Y), Y
- if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) {
- SDValue Cond0 = N0->getOperand(0);
- SDValue Cond1 = N0->getOperand(1);
- SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N),
- N1.getValueType(), Cond1, N1, N2);
+ // The code in this block deals with the following 2 equivalences:
+ // select(C0|C1, x, y) <=> select(C0, x, select(C1, x, y))
+ // select(C0&C1, x, y) <=> select(C0, select(C1, x, y), y)
+ // The target can specify its prefered form with the
+ // shouldNormalizeToSelectSequence() callback. However we always transform
+ // to the right anyway if we find the inner select exists in the DAG anyway
+ // and we always transform to the left side if we know that we can further
+ // optimize the combination of the conditions.
+ bool normalizeToSequence
+ = TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT);
+ // select (and Cond0, Cond1), X, Y
+ // -> select Cond0, (select Cond1, X, Y), Y
+ if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) {
+ SDValue Cond0 = N0->getOperand(0);
+ SDValue Cond1 = N0->getOperand(1);
+ SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N),
+ N1.getValueType(), Cond1, N1, N2);
+ if (normalizeToSequence || !InnerSelect.use_empty())
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0,
InnerSelect, N2);
- }
- // select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y)
- if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) {
- SDValue Cond0 = N0->getOperand(0);
- SDValue Cond1 = N0->getOperand(1);
- SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N),
- N1.getValueType(), Cond1, N1, N2);
+ }
+ // select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y)
+ if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) {
+ SDValue Cond0 = N0->getOperand(0);
+ SDValue Cond1 = N0->getOperand(1);
+ SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N),
+ N1.getValueType(), Cond1, N1, N2);
+ if (normalizeToSequence || !InnerSelect.use_empty())
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0, N1,
InnerSelect);
- }
}
// select Cond0, (select Cond1, X, Y), Y -> select (and Cond0, Cond1), X, Y
- if (N1->getOpcode() == ISD::SELECT) {
+ if (N1->getOpcode() == ISD::SELECT && N1->hasOneUse()) {
SDValue N1_0 = N1->getOperand(0);
SDValue N1_1 = N1->getOperand(1);
SDValue N1_2 = N1->getOperand(2);
if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) {
// Create the actual and node if we can generate good code for it.
- if (!TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT)) {
+ if (!normalizeToSequence) {
SDValue And = DAG.getNode(ISD::AND, SDLoc(N), N0.getValueType(),
N0, N1_0);
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), And,
}
}
// select Cond0, X, (select Cond1, X, Y) -> select (or Cond0, Cond1), X, Y
- if (N2->getOpcode() == ISD::SELECT) {
+ if (N2->getOpcode() == ISD::SELECT && N2->hasOneUse()) {
SDValue N2_0 = N2->getOperand(0);
SDValue N2_1 = N2->getOperand(1);
SDValue N2_2 = N2->getOperand(2);
if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) {
// Create the actual or node if we can generate good code for it.
- if (!TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT)) {
+ if (!normalizeToSequence) {
SDValue Or = DAG.getNode(ISD::OR, SDLoc(N), N0.getValueType(),
N0, N2_0);
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Or,
}
}
+ // fold selects based on a setcc into other things, such as min/max/abs
+ if (N0.getOpcode() == ISD::SETCC) {
+ // select x, y (fcmp lt x, y) -> fminnum x, y
+ // select x, y (fcmp gt x, y) -> fmaxnum x, y
+ //
+ // This is OK if we don't care about what happens if either operand is a
+ // NaN.
+ //
+
+ // FIXME: Instead of testing for UnsafeFPMath, this should be checking for
+ // no signed zeros as well as no nans.
+ const TargetOptions &Options = DAG.getTarget().Options;
+ if (Options.UnsafeFPMath &&
+ VT.isFloatingPoint() && N0.hasOneUse() &&
+ DAG.isKnownNeverNaN(N1) && DAG.isKnownNeverNaN(N2)) {
+ ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
+
+ if (SDValue FMinMax = combineMinNumMaxNum(SDLoc(N), VT, N0.getOperand(0),
+ N0.getOperand(1), N1, N2, CC,
+ TLI, DAG))
+ return FMinMax;
+ }
+
+ if ((!LegalOperations &&
+ TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) ||
+ TLI.isOperationLegal(ISD::SELECT_CC, VT))
+ return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT,
+ N0.getOperand(0), N0.getOperand(1),
+ N1, N2, N0.getOperand(2));
+ return SimplifySelect(SDLoc(N), N0, N1, N2);
+ }
+
return SDValue();
}
std::tie(IndexLo, IndexHi) = DAG.SplitVector(MSC->getIndex(), DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
- getMachineMemOperand(MSC->getPointerInfo(),
+ getMachineMemOperand(MSC->getPointerInfo(),
MachineMemOperand::MOStore, LoMemVT.getStoreSize(),
Alignment, MSC->getAAInfo(), MSC->getRanges());
std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, DL);
MachineMemOperand *MMO = DAG.getMachineFunction().
- getMachineMemOperand(MGT->getPointerInfo(),
+ getMachineMemOperand(MGT->getPointerInfo(),
MachineMemOperand::MOLoad, LoMemVT.getStoreSize(),
Alignment, MGT->getAAInfo(), MGT->getRanges());
if (N1.getOpcode() == ISD::CONCAT_VECTORS &&
N2.getOpcode() == ISD::CONCAT_VECTORS &&
ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
- SDValue CV = ConvertSelectToConcatVector(N, DAG);
- if (CV.getNode())
+ if (SDValue CV = ConvertSelectToConcatVector(N, DAG))
return CV;
}
SDLoc(N));
}
-// tryToFoldExtendOfConstant - Try to fold a sext/zext/aext
-// dag node into a ConstantSDNode or a build_vector of constants.
-// This function is called by the DAGCombiner when visiting sext/zext/aext
-// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
-// Vector extends are not folded if operations are legal; this is to
-// avoid introducing illegal build_vector dag nodes.
+/// Try to fold a sext/zext/aext dag node into a ConstantSDNode or
+/// a build_vector of constants.
+/// This function is called by the DAGCombiner when visiting sext/zext/aext
+/// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND).
+/// Vector extends are not folded if operations are legal; this is to
+/// avoid introducing illegal build_vector dag nodes.
static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI,
SelectionDAG &DAG, bool LegalTypes,
bool LegalOperations) {
EVT VT = N->getValueType(0);
assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND ||
- Opcode == ISD::ANY_EXTEND) && "Expected EXTEND dag node in input!");
+ Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG)
+ && "Expected EXTEND dag node in input!");
// fold (sext c1) -> c1
// fold (zext c1) -> c1
// We can fold this node into a build_vector.
unsigned VTBits = SVT.getSizeInBits();
unsigned EVTBits = N0->getValueType(0).getScalarType().getSizeInBits();
- unsigned ShAmt = VTBits - EVTBits;
SmallVector<SDValue, 8> Elts;
- unsigned NumElts = N0->getNumOperands();
+ unsigned NumElts = VT.getVectorNumElements();
SDLoc DL(N);
for (unsigned i=0; i != NumElts; ++i) {
}
SDLoc DL(Op);
- ConstantSDNode *CurrentND = cast<ConstantSDNode>(Op);
- const APInt &C = APInt(VTBits, CurrentND->getAPIntValue().getZExtValue());
- if (Opcode == ISD::SIGN_EXTEND)
- Elts.push_back(DAG.getConstant(C.shl(ShAmt).ashr(ShAmt).getZExtValue(),
- DL, SVT));
+ // Get the constant value and if needed trunc it to the size of the type.
+ // Nodes like build_vector might have constants wider than the scalar type.
+ APInt C = cast<ConstantSDNode>(Op)->getAPIntValue().zextOrTrunc(EVTBits);
+ if (Opcode == ISD::SIGN_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG)
+ Elts.push_back(DAG.getConstant(C.sext(VTBits), DL, SVT));
else
- Elts.push_back(DAG.getConstant(C.shl(ShAmt).lshr(ShAmt).getZExtValue(),
- DL, SVT));
+ Elts.push_back(DAG.getConstant(C.zext(VTBits), DL, SVT));
}
return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Elts).getNode();
if (N0.getOpcode() == ISD::TRUNCATE) {
// fold (sext (truncate (load x))) -> (sext (smaller load x))
// fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n)))
- SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
- if (NarrowLoad.getNode()) {
+ if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
SDNode* oye = N0.getNode()->getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
SDValue Op1 = N->getOperand(1);
assert(Op0.getValueType() == Op1.getValueType());
- ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
- ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
- if (COp0 && COp0->isNullValue())
+ if (isNullConstant(Op0))
Op = Op1;
- else if (COp1 && COp1->isNullValue())
+ else if (isNullConstant(Op1))
Op = Op0;
else
return false;
// fold (zext (truncate (load x))) -> (zext (smaller load x))
// fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n)))
if (N0.getOpcode() == ISD::TRUNCATE) {
- SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
- if (NarrowLoad.getNode()) {
+ if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
SDNode* oye = N0.getNode()->getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
}
// fold (zext (truncate x)) -> (and x, mask)
- if (N0.getOpcode() == ISD::TRUNCATE &&
- (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT))) {
-
+ if (N0.getOpcode() == ISD::TRUNCATE) {
// fold (zext (truncate (load x))) -> (zext (smaller load x))
// fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n)))
- SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
- if (NarrowLoad.getNode()) {
- SDNode* oye = N0.getNode()->getOperand(0).getNode();
+ if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
+ SDNode *oye = N0.getNode()->getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// CombineTo deleted the truncate, if needed, but not what's under it.
AddToWorklist(oye);
}
- return SDValue(N, 0); // Return N so it doesn't get rechecked!
+ return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
- SDValue Op = N0.getOperand(0);
- if (Op.getValueType().bitsLT(VT)) {
- Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, Op);
- AddToWorklist(Op.getNode());
- } else if (Op.getValueType().bitsGT(VT)) {
- Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op);
- AddToWorklist(Op.getNode());
+ EVT SrcVT = N0.getOperand(0).getValueType();
+ EVT MinVT = N0.getValueType();
+
+ // Try to mask before the extension to avoid having to generate a larger mask,
+ // possibly over several sub-vectors.
+ if (SrcVT.bitsLT(VT)) {
+ if (!LegalOperations || (TLI.isOperationLegal(ISD::AND, SrcVT) &&
+ TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) {
+ SDValue Op = N0.getOperand(0);
+ Op = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType());
+ AddToWorklist(Op.getNode());
+ return DAG.getZExtOrTrunc(Op, SDLoc(N), VT);
+ }
+ }
+
+ if (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT)) {
+ SDValue Op = N0.getOperand(0);
+ if (SrcVT.bitsLT(VT)) {
+ Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, Op);
+ AddToWorklist(Op.getNode());
+ } else if (SrcVT.bitsGT(VT)) {
+ Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op);
+ AddToWorklist(Op.getNode());
+ }
+ return DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType());
}
- return DAG.getZeroExtendInReg(Op, SDLoc(N),
- N0.getValueType().getScalarType());
}
// Fold (zext (and (trunc x), cst)) -> (and x, cst),
// fold (aext (truncate (load x))) -> (aext (smaller load x))
// fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n)))
if (N0.getOpcode() == ISD::TRUNCATE) {
- SDValue NarrowLoad = ReduceLoadWidth(N0.getNode());
- if (NarrowLoad.getNode()) {
+ if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) {
SDNode* oye = N0.getNode()->getOperand(0).getNode();
if (NarrowLoad.getNode() != N0.getNode()) {
CombineTo(N0.getNode(), NarrowLoad);
// Only look at single-use SRLs.
if (!V.getNode()->hasOneUse())
break;
- if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(V.getOperand(1))) {
+ if (ConstantSDNode *RHSC = getAsNonOpaqueConstant(V.getOperand(1))) {
// See if we can recursively simplify the LHS.
unsigned Amt = RHSC->getZExtValue();
// Watch out for shift count overflow though.
if (Amt >= Mask.getBitWidth()) break;
APInt NewMask = Mask << Amt;
- SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask);
- if (SimplifyLHS.getNode())
+ if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask))
return DAG.getNode(ISD::SRL, SDLoc(V), V.getValueType(),
SimplifyLHS, V.getOperand(1));
}
// For big endian targets, we need to adjust the offset to the pointer to
// load the correct bytes.
- if (TLI.isBigEndian()) {
+ if (DAG.getDataLayout().isBigEndian()) {
unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits();
unsigned EVTStoreBits = ExtVT.getStoreSizeInBits();
ShAmt = LVTStoreBits - EVTStoreBits - ShAmt;
// fold (sext_in_reg (load x)) -> (smaller sextload x)
// fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits))
- SDValue NarrowLoad = ReduceLoadWidth(N);
- if (NarrowLoad.getNode())
+ if (SDValue NarrowLoad = ReduceLoadWidth(N))
return NarrowLoad;
// fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24)
return SDValue();
}
+SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ EVT VT = N->getValueType(0);
+
+ if (N0.getOpcode() == ISD::UNDEF)
+ return DAG.getUNDEF(VT);
+
+ if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes,
+ LegalOperations))
+ return SDValue(Res, 0);
+
+ return SDValue();
+}
+
SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
- bool isLE = TLI.isLittleEndian();
+ bool isLE = DAG.getDataLayout().isLittleEndian();
// noop truncate
if (N0.getValueType() == N->getValueType(0))
SDValue EltNo = N0->getOperand(1);
if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) {
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
- EVT IndexTy = TLI.getVectorIdxTy();
+ EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1));
SDValue V = DAG.getNode(ISD::BITCAST, SDLoc(N),
// fold (truncate (load x)) -> (smaller load x)
// fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits))
if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) {
- SDValue Reduced = ReduceLoadWidth(N);
- if (Reduced.getNode())
+ if (SDValue Reduced = ReduceLoadWidth(N))
return Reduced;
+
// Handle the case where the load remains an extending load even
// after truncation.
if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) {
!LD2->isVolatile() &&
DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) {
unsigned Align = LD1->getAlignment();
- unsigned NewAlign = TLI.getDataLayout()->
- getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
+ unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
+ VT.getTypeForEVT(*DAG.getContext()));
if (NewAlign <= Align &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)))
// Do not change the width of a volatile load.
!cast<LoadSDNode>(N0)->isVolatile() &&
// Do not remove the cast if the types differ in endian layout.
- TLI.hasBigEndianPartOrdering(N0.getValueType()) ==
- TLI.hasBigEndianPartOrdering(VT) &&
+ TLI.hasBigEndianPartOrdering(N0.getValueType(), DAG.getDataLayout()) ==
+ TLI.hasBigEndianPartOrdering(VT, DAG.getDataLayout()) &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) &&
TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
- unsigned Align = TLI.getDataLayout()->
- getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
+ unsigned Align = DAG.getDataLayout().getABITypeAlignment(
+ VT.getTypeForEVT(*DAG.getContext()));
unsigned OrigAlign = LN0->getAlignment();
if (Align <= OrigAlign) {
}
// bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive.
- if (N0.getOpcode() == ISD::BUILD_PAIR) {
- SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT);
- if (CombineLD.getNode())
+ if (N0.getOpcode() == ISD::BUILD_PAIR)
+ if (SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT))
return CombineLD;
- }
// Remove double bitcasts from shuffles - this is often a legacy of
// XformToShuffleWithZero being used to combine bitmaskings (of
ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N0);
// If operands are a bitcast, peek through if it casts the original VT.
- // If operands are a UNDEF or constant, just bitcast back to original VT.
+ // If operands are a constant, just bitcast back to original VT.
auto PeekThroughBitcast = [&](SDValue Op) {
if (Op.getOpcode() == ISD::BITCAST &&
- Op.getOperand(0)->getValueType(0) == VT)
+ Op.getOperand(0).getValueType() == VT)
return SDValue(Op.getOperand(0));
if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()))
DstEltVT, BV->getOperand(0)));
SmallVector<SDValue, 8> Ops;
- for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
- SDValue Op = BV->getOperand(i);
+ for (SDValue Op : BV->op_values()) {
// If the vector element type is not legal, the BUILD_VECTOR operands
// are promoted and implicitly truncated. Make that explicit here.
if (Op.getValueType() != SrcEltVT)
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = BV->getNumOperands(); i != e;
i += NumInputsPerOutput) {
- bool isLE = TLI.isLittleEndian();
+ bool isLE = DAG.getDataLayout().isLittleEndian();
APInt NewBits = APInt(DstBitSize, 0);
bool EltIsUndef = true;
for (unsigned j = 0; j != NumInputsPerOutput; ++j) {
NumOutputsPerInput*BV->getNumOperands());
SmallVector<SDValue, 8> Ops;
- for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
- if (BV->getOperand(i).getOpcode() == ISD::UNDEF) {
+ for (const SDValue &Op : BV->op_values()) {
+ if (Op.getOpcode() == ISD::UNDEF) {
Ops.append(NumOutputsPerInput, DAG.getUNDEF(DstEltVT));
continue;
}
- APInt OpVal = cast<ConstantSDNode>(BV->getOperand(i))->
+ APInt OpVal = cast<ConstantSDNode>(Op)->
getAPIntValue().zextOrTrunc(SrcBitSize);
for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
}
// For big endian targets, swap the order of the pieces of each element.
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
std::reverse(Ops.end()-NumOutputsPerInput, Ops.end());
}
bool Aggressive = TLI.enableAggressiveFMAFusion(VT);
bool LookThroughFPExt = TLI.isFPExtFree(VT);
+ // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)),
+ // prefer to fold the multiply with fewer uses.
+ if (Aggressive && N0.getOpcode() == ISD::FMUL &&
+ N1.getOpcode() == ISD::FMUL) {
+ if (N0.getNode()->use_size() > N1.getNode()->use_size())
+ std::swap(N0, N1);
+ }
+
// fold (fadd (fmul x, y), z) -> (fma x, y, z)
if (N0.getOpcode() == ISD::FMUL &&
(Aggressive || N0->hasOneUse())) {
bool AllowNewConst = (Level < AfterLegalizeDAG);
// fold (fadd A, 0) -> A
- if (N1CFP && N1CFP->getValueAPF().isZero())
+ if (N1CFP && N1CFP->isZero())
return N0;
// fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2))
} // enable-unsafe-fp-math
// FADD -> FMA combines:
- SDValue Fused = visitFADDForFMACombine(N);
- if (Fused) {
+ if (SDValue Fused = visitFADDForFMACombine(N)) {
AddToWorklist(Fused.getNode());
return Fused;
}
// If 'unsafe math' is enabled, fold lots of things.
if (Options.UnsafeFPMath) {
// (fsub A, 0) -> A
- if (N1CFP && N1CFP->getValueAPF().isZero())
+ if (N1CFP && N1CFP->isZero())
return N0;
// (fsub 0, B) -> -B
- if (N0CFP && N0CFP->getValueAPF().isZero()) {
+ if (N0CFP && N0CFP->isZero()) {
if (isNegatibleForFree(N1, LegalOperations, TLI, &Options))
return GetNegatedExpression(N1, DAG, LegalOperations);
if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))
}
// FSUB -> FMA combines:
- SDValue Fused = visitFSUBForFMACombine(N);
- if (Fused) {
+ if (SDValue Fused = visitFSUBForFMACombine(N)) {
AddToWorklist(Fused.getNode());
return Fused;
}
if (Options.UnsafeFPMath) {
// fold (fmul A, 0) -> 0
- if (N1CFP && N1CFP->getValueAPF().isZero())
+ if (N1CFP && N1CFP->isZero())
return N1;
// fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2))
auto *BV1 = dyn_cast<BuildVectorSDNode>(N1);
auto *BV00 = dyn_cast<BuildVectorSDNode>(N00);
auto *BV01 = dyn_cast<BuildVectorSDNode>(N01);
-
+
// Check 1: Make sure that the first operand of the inner multiply is NOT
// a constant. Otherwise, we may induce infinite looping.
if (!(isConstOrConstSplatFP(N00) || (BV00 && BV00->isConstant()))) {
// Undo the fmul 2.0, x -> fadd x, x transformation, since if it occurs
// during an early run of DAGCombiner can prevent folding with fmuls
// inserted during lowering.
- if (N0.getOpcode() == ISD::FADD && N0.getOperand(0) == N0.getOperand(1)) {
+ if (N0.getOpcode() == ISD::FADD &&
+ (N0.getOperand(0) == N0.getOperand(1)) &&
+ N0.hasOneUse()) {
const SDValue Two = DAG.getConstantFP(2.0, DL, VT);
SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, Two, N1);
return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), MulConsts);
return SDValue();
}
+// Combine multiple FDIVs with the same divisor into multiple FMULs by the
+// reciprocal.
+// E.g., (a / D; b / D;) -> (recip = 1.0 / D; a * recip; b * recip)
+// Notice that this is not always beneficial. One reason is different target
+// may have different costs for FDIV and FMUL, so sometimes the cost of two
+// FDIVs may be lower than the cost of one FDIV and two FMULs. Another reason
+// is the critical path is increased from "one FDIV" to "one FDIV + one FMUL".
+SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) {
+ if (!DAG.getTarget().Options.UnsafeFPMath)
+ return SDValue();
+
+ // Skip if current node is a reciprocal.
+ SDValue N0 = N->getOperand(0);
+ ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0);
+ if (N0CFP && N0CFP->isExactlyValue(1.0))
+ return SDValue();
+
+ // Exit early if the target does not want this transform or if there can't
+ // possibly be enough uses of the divisor to make the transform worthwhile.
+ SDValue N1 = N->getOperand(1);
+ unsigned MinUses = TLI.combineRepeatedFPDivisors();
+ if (!MinUses || N1->use_size() < MinUses)
+ return SDValue();
+
+ // Find all FDIV users of the same divisor.
+ // Use a set because duplicates may be present in the user list.
+ SetVector<SDNode *> Users;
+ for (auto *U : N1->uses())
+ if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1)
+ Users.insert(U);
+
+ // Now that we have the actual number of divisor uses, make sure it meets
+ // the minimum threshold specified by the target.
+ if (Users.size() < MinUses)
+ return SDValue();
+
+ EVT VT = N->getValueType(0);
+ SDLoc DL(N);
+ SDValue FPOne = DAG.getConstantFP(1.0, DL, VT);
+ // FIXME: This optimization requires some level of fast-math, so the
+ // created reciprocal node should at least have the 'allowReciprocal'
+ // fast-math-flag set.
+ SDValue Reciprocal = DAG.getNode(ISD::FDIV, DL, VT, FPOne, N1);
+
+ // Dividend / Divisor -> Dividend * Reciprocal
+ for (auto *U : Users) {
+ SDValue Dividend = U->getOperand(0);
+ if (Dividend != FPOne) {
+ SDValue NewNode = DAG.getNode(ISD::FMUL, SDLoc(U), VT, Dividend,
+ Reciprocal);
+ CombineTo(U, NewNode);
+ } else if (U != Reciprocal.getNode()) {
+ // In the absence of fast-math-flags, this user node is always the
+ // same node as Reciprocal, but with FMF they may be different nodes.
+ CombineTo(U, Reciprocal);
+ }
+ }
+ return SDValue(N, 0); // N was replaced.
+}
+
SDValue DAGCombiner::visitFDIV(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
}
}
- // Combine multiple FDIVs with the same divisor into multiple FMULs by the
- // reciprocal.
- // E.g., (a / D; b / D;) -> (recip = 1.0 / D; a * recip; b * recip)
- // Notice that this is not always beneficial. One reason is different target
- // may have different costs for FDIV and FMUL, so sometimes the cost of two
- // FDIVs may be lower than the cost of one FDIV and two FMULs. Another reason
- // is the critical path is increased from "one FDIV" to "one FDIV + one FMUL".
- if (Options.UnsafeFPMath) {
- // Skip if current node is a reciprocal.
- if (N0CFP && N0CFP->isExactlyValue(1.0))
- return SDValue();
-
- SmallVector<SDNode *, 4> Users;
- // Find all FDIV users of the same divisor.
- for (SDNode::use_iterator UI = N1.getNode()->use_begin(),
- UE = N1.getNode()->use_end();
- UI != UE; ++UI) {
- SDNode *User = UI.getUse().getUser();
- if (User->getOpcode() == ISD::FDIV && User->getOperand(1) == N1)
- Users.push_back(User);
- }
-
- if (TLI.combineRepeatedFPDivisors(Users.size())) {
- SDLoc DL(N);
- SDValue FPOne = DAG.getConstantFP(1.0, DL, VT); // floating point 1.0
- SDValue Reciprocal = DAG.getNode(ISD::FDIV, DL, VT, FPOne, N1);
-
- // Dividend / Divisor -> Dividend * Reciprocal
- for (auto I = Users.begin(), E = Users.end(); I != E; ++I) {
- if ((*I)->getOperand(0) != FPOne) {
- SDValue NewNode = DAG.getNode(ISD::FMUL, SDLoc(*I), VT,
- (*I)->getOperand(0), Reciprocal);
- DAG.ReplaceAllUsesWith(*I, NewNode.getNode());
- }
- }
- return SDValue();
- }
- }
+ if (SDValue CombineRepeatedDivisors = combineRepeatedFPDivisors(N))
+ return CombineRepeatedDivisors;
return SDValue();
}
}
SDValue DAGCombiner::visitFSQRT(SDNode *N) {
- if (DAG.getTarget().Options.UnsafeFPMath &&
- !TLI.isFsqrtCheap()) {
- // Compute this as X * (1/sqrt(X)) = X * (X ** -0.5)
- if (SDValue RV = BuildRsqrtEstimate(N->getOperand(0))) {
- EVT VT = RV.getValueType();
- SDLoc DL(N);
- RV = DAG.getNode(ISD::FMUL, DL, VT, N->getOperand(0), RV);
- AddToWorklist(RV.getNode());
+ if (!DAG.getTarget().Options.UnsafeFPMath || TLI.isFsqrtCheap())
+ return SDValue();
- // Unfortunately, RV is now NaN if the input was exactly 0.
- // Select out this case and force the answer to 0.
- SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
- SDValue ZeroCmp =
- DAG.getSetCC(DL, TLI.getSetCCResultType(*DAG.getContext(), VT),
- N->getOperand(0), Zero, ISD::SETEQ);
- AddToWorklist(ZeroCmp.getNode());
- AddToWorklist(RV.getNode());
+ // Compute this as X * (1/sqrt(X)) = X * (X ** -0.5)
+ SDValue RV = BuildRsqrtEstimate(N->getOperand(0));
+ if (!RV)
+ return SDValue();
- RV = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT,
- DL, VT, ZeroCmp, Zero, RV);
- return RV;
- }
- }
- return SDValue();
+ EVT VT = RV.getValueType();
+ SDLoc DL(N);
+ RV = DAG.getNode(ISD::FMUL, DL, VT, N->getOperand(0), RV);
+ AddToWorklist(RV.getNode());
+
+ // Unfortunately, RV is now NaN if the input was exactly 0.
+ // Select out this case and force the answer to 0.
+ SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
+ EVT CCVT = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
+ SDValue ZeroCmp = DAG.getSetCC(DL, CCVT, N->getOperand(0), Zero, ISD::SETEQ);
+ AddToWorklist(ZeroCmp.getNode());
+ AddToWorklist(RV.getNode());
+
+ return DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT,
+ ZeroCmp, Zero, RV);
}
SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
}
// (fneg (fmul c, x)) -> (fmul -c, x)
- if (N0.getOpcode() == ISD::FMUL) {
+ if (N0.getOpcode() == ISD::FMUL &&
+ (N0.getNode()->hasOneUse() || !TLI.isFNegFree(VT))) {
ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1));
if (CFP1) {
APFloat CVal = CFP1->getValueAPF();
SDValue Op1 = TheXor->getOperand(1);
if (Op0.getOpcode() == Op1.getOpcode()) {
// Avoid missing important xor optimizations.
- SDValue Tmp = visitXOR(TheXor);
- if (Tmp.getNode()) {
+ if (SDValue Tmp = visitXOR(TheXor)) {
if (Tmp.getNode() != TheXor) {
DEBUG(dbgs() << "\nReplacing.8 ";
TheXor->dump(&DAG);
if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) {
bool Equal = false;
- if (ConstantSDNode *RHSCI = dyn_cast<ConstantSDNode>(Op0))
- if (RHSCI->getAPIntValue() == 1 && Op0.hasOneUse() &&
- Op0.getOpcode() == ISD::XOR) {
- TheXor = Op0.getNode();
- Equal = true;
- }
+ if (isOneConstant(Op0) && Op0.hasOneUse() &&
+ Op0.getOpcode() == ISD::XOR) {
+ TheXor = Op0.getNode();
+ Equal = true;
+ }
EVT SetCCVT = N1.getValueType();
if (LegalTypes)
SelectionDAG &DAG,
const TargetLowering &TLI) {
EVT VT;
+ unsigned AS;
+
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) {
if (LD->isIndexed() || LD->getBasePtr().getNode() != N)
return false;
VT = LD->getMemoryVT();
+ AS = LD->getAddressSpace();
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) {
if (ST->isIndexed() || ST->getBasePtr().getNode() != N)
return false;
VT = ST->getMemoryVT();
+ AS = ST->getAddressSpace();
} else
return false;
} else
return false;
- return TLI.isLegalAddressingMode(AM, VT.getTypeForEVT(*DAG.getContext()));
+ return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM,
+ VT.getTypeForEVT(*DAG.getContext()), AS);
}
/// Try turning a load/store into a pre-indexed load/store when the base
}
// Don't create a indexed load / store with zero offset.
- if (isa<ConstantSDNode>(Offset) &&
- cast<ConstantSDNode>(Offset)->isNullValue())
+ if (isNullConstant(Offset))
return false;
// Try turning it into a pre-indexed load / store except when:
// a copy of the original base pointer.
SmallVector<SDNode *, 16> OtherUses;
if (isa<ConstantSDNode>(Offset))
- for (SDNode *Use : BasePtr.getNode()->uses()) {
- if (Use == Ptr.getNode())
+ for (SDNode::use_iterator UI = BasePtr.getNode()->use_begin(),
+ UE = BasePtr.getNode()->use_end();
+ UI != UE; ++UI) {
+ SDUse &Use = UI.getUse();
+ // Skip the use that is Ptr and uses of other results from BasePtr's
+ // node (important for nodes that return multiple results).
+ if (Use.getUser() == Ptr.getNode() || Use != BasePtr)
continue;
- if (Use->isPredecessorOf(N))
+ if (Use.getUser()->isPredecessorOf(N))
continue;
- if (Use->getOpcode() != ISD::ADD && Use->getOpcode() != ISD::SUB) {
+ if (Use.getUser()->getOpcode() != ISD::ADD &&
+ Use.getUser()->getOpcode() != ISD::SUB) {
OtherUses.clear();
break;
}
- SDValue Op0 = Use->getOperand(0), Op1 = Use->getOperand(1);
- if (Op1.getNode() == BasePtr.getNode())
- std::swap(Op0, Op1);
- assert(Op0.getNode() == BasePtr.getNode() &&
- "Use of ADD/SUB but not an operand");
-
+ SDValue Op1 = Use.getUser()->getOperand((UI.getOperandNo() + 1) & 1);
if (!isa<ConstantSDNode>(Op1)) {
OtherUses.clear();
break;
break;
}
- OtherUses.push_back(Use);
+ OtherUses.push_back(Use.getUser());
}
if (Swapped)
ISD::MemIndexedMode AM = ISD::UNINDEXED;
if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) {
// Don't create a indexed load / store with zero offset.
- if (isa<ConstantSDNode>(Offset) &&
- cast<ConstantSDNode>(Offset)->isNullValue())
+ if (isNullConstant(Offset))
continue;
// Try turning it into a post-indexed load / store except when
void addSliceGain(const LoadedSlice &LS) {
// Each slice saves a truncate.
const TargetLowering &TLI = LS.DAG->getTargetLoweringInfo();
- if (!TLI.isTruncateFree(LS.Inst->getValueType(0),
- LS.Inst->getOperand(0).getValueType()))
+ if (!TLI.isTruncateFree(LS.Inst->getOperand(0).getValueType(),
+ LS.Inst->getValueType(0)))
++Truncates;
// If there is a shift amount, this slice gets rid of it.
if (LS.Shift)
/// \pre DAG != nullptr.
uint64_t getOffsetFromBase() const {
assert(DAG && "Missing context.");
- bool IsBigEndian =
- DAG->getTargetLoweringInfo().getDataLayout()->isBigEndian();
+ bool IsBigEndian = DAG->getDataLayout().isBigEndian();
assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported.");
uint64_t Offset = Shift / 8;
unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8;
// Check if it will be merged with the load.
// 1. Check the alignment constraint.
- unsigned RequiredAlignment = TLI.getDataLayout()->getABITypeAlignment(
+ unsigned RequiredAlignment = DAG->getDataLayout().getABITypeAlignment(
ResVT.getTypeForEVT(*DAG->getContext()));
if (RequiredAlignment > getAlignment())
return Result; // Fail.
else {
bool isOk = false;
- for (unsigned i = 0, e = Chain->getNumOperands(); i != e; ++i)
- if (Chain->getOperand(i).getNode() == LD) {
+ for (const SDValue &ChainOp : Chain->op_values())
+ if (ChainOp.getNode() == LD) {
isOk = true;
break;
}
unsigned StOffset;
unsigned NewAlign = St->getAlignment();
- if (DAG.getTargetLoweringInfo().isLittleEndian())
+ if (DAG.getDataLayout().isLittleEndian())
StOffset = ByteShift;
else
StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
uint64_t PtrOff = ShAmt / 8;
// For big endian targets, we need to adjust the offset to the pointer to
// load the correct bytes.
- if (TLI.isBigEndian())
+ if (DAG.getDataLayout().isBigEndian())
PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff;
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
- if (NewAlign < TLI.getDataLayout()->getABITypeAlignment(NewVTTy))
+ if (NewAlign < DAG.getDataLayout().getABITypeAlignment(NewVTTy))
return SDValue();
SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD),
unsigned LDAlign = LD->getAlignment();
unsigned STAlign = ST->getAlignment();
Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
- unsigned ABIAlign = TLI.getDataLayout()->getABITypeAlignment(IntVTTy);
+ unsigned ABIAlign = DAG.getDataLayout().getABITypeAlignment(IntVTTy);
if (LDAlign < ABIAlign || STAlign < ABIAlign)
return SDValue();
};
} // namespace
+SDValue DAGCombiner::getMergedConstantVectorStore(SelectionDAG &DAG,
+ SDLoc SL,
+ ArrayRef<MemOpLink> Stores,
+ EVT Ty) const {
+ SmallVector<SDValue, 8> BuildVector;
+
+ for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I)
+ BuildVector.push_back(cast<StoreSDNode>(Stores[I].MemNode)->getValue());
+
+ return DAG.getNode(ISD::BUILD_VECTOR, SL, Ty, BuildVector);
+}
+
bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT,
unsigned NumElem, bool IsConstantSrc, bool UseVector) {
EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
assert(TLI.isTypeLegal(Ty) && "Illegal vector store");
if (IsConstantSrc) {
- // A vector store with a constant source implies that the constant is
- // zero; we only handle merging stores of constant zeros because the zero
- // can be materialized without a load.
- // It may be beneficial to loosen this restriction to allow non-zero
- // store merging.
- StoredVal = DAG.getConstant(0, DL, Ty);
+ StoredVal = getMergedConstantVectorStore(DAG, DL, StoreNodes, Ty);
} else {
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElem ; ++i) {
// elements, so this path implies a store of constants.
assert(IsConstantSrc && "Merged vector elements should use vector store");
- unsigned StoreBW = NumElem * ElementSizeBytes * 8;
- APInt StoreInt(StoreBW, 0);
+ unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
+ APInt StoreInt(SizeInBits, 0);
// Construct a single integer constant which is made of the smaller
// constant inputs.
- bool IsLE = TLI.isLittleEndian();
+ bool IsLE = DAG.getDataLayout().isLittleEndian();
for (unsigned i = 0; i < NumElem ; ++i) {
unsigned Idx = IsLE ? (NumElem - 1 - i) : i;
StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode);
SDValue Val = St->getValue();
- StoreInt <<= ElementSizeBytes*8;
+ StoreInt <<= ElementSizeBytes * 8;
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) {
- StoreInt |= C->getAPIntValue().zext(StoreBW);
+ StoreInt |= C->getAPIntValue().zext(SizeInBits);
} else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) {
- StoreInt |= C->getValueAPF().bitcastToAPInt().zext(StoreBW);
+ StoreInt |= C->getValueAPF().bitcastToAPInt().zext(SizeInBits);
} else {
llvm_unreachable("Invalid constant element type");
}
}
// Create the new Load and Store operations.
- EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
StoredVal = DAG.getConstant(StoreInt, DL, StoreTy);
}
return true;
}
-bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
- if (OptLevel == CodeGenOpt::None)
- return false;
-
- EVT MemVT = St->getMemoryVT();
- int64_t ElementSizeBytes = MemVT.getSizeInBits()/8;
- bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute(
- Attribute::NoImplicitFloat);
-
- // Don't merge vectors into wider inputs.
- if (MemVT.isVector() || !MemVT.isSimple())
- return false;
-
- // Perform an early exit check. Do not bother looking at stored values that
- // are not constants, loads, or extracted vector elements.
- SDValue StoredVal = St->getValue();
- bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
- bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) ||
- isa<ConstantFPSDNode>(StoredVal);
- bool IsExtractVecEltSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT);
-
- if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecEltSrc)
- return false;
-
- // Only look at ends of store sequences.
- SDValue Chain = SDValue(St, 0);
- if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE)
- return false;
-
+void DAGCombiner::getStoreMergeAndAliasCandidates(
+ StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes,
+ SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes) {
// This holds the base pointer, index, and the offset in bytes from the base
// pointer.
BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr());
// We must have a base and an offset.
if (!BasePtr.Base.getNode())
- return false;
+ return;
// Do not handle stores to undef base pointers.
if (BasePtr.Base.getOpcode() == ISD::UNDEF)
- return false;
-
- // Save the LoadSDNodes that we find in the chain.
- // We need to make sure that these nodes do not interfere with
- // any of the store nodes.
- SmallVector<LSBaseSDNode*, 8> AliasLoadNodes;
-
- // Save the StoreSDNodes that we find in the chain.
- SmallVector<MemOpLink, 8> StoreNodes;
+ return;
// Walk up the chain and look for nodes with offsets from the same
// base pointer. Stop when reaching an instruction with a different kind
// or instruction which has a different base pointer.
+ EVT MemVT = St->getMemoryVT();
unsigned Seq = 0;
StoreSDNode *Index = St;
while (Index) {
if (!Ptr.equalBaseIndex(BasePtr))
break;
- // Check that the alignment is the same.
- if (Index->getAlignment() != St->getAlignment())
- break;
-
// The memory operands must not be volatile.
if (Index->isVolatile() || Index->isIndexed())
break;
if (Index->getMemoryVT() != MemVT)
break;
- // We do not allow unaligned stores because we want to prevent overriding
- // stores.
- if (Index->getAlignment()*8 != MemVT.getSizeInBits())
- break;
-
// We found a potential memory operand to merge.
StoreNodes.push_back(MemOpLink(Index, Ptr.Offset, Seq++));
}
}
}
+}
+
+bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
+ if (OptLevel == CodeGenOpt::None)
+ return false;
+
+ EVT MemVT = St->getMemoryVT();
+ int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8;
+ bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute(
+ Attribute::NoImplicitFloat);
+
+ // This function cannot currently deal with non-byte-sized memory sizes.
+ if (ElementSizeBytes * 8 != MemVT.getSizeInBits())
+ return false;
+
+ // Don't merge vectors into wider inputs.
+ if (MemVT.isVector() || !MemVT.isSimple())
+ return false;
+
+ // Perform an early exit check. Do not bother looking at stored values that
+ // are not constants, loads, or extracted vector elements.
+ SDValue StoredVal = St->getValue();
+ bool IsLoadSrc = isa<LoadSDNode>(StoredVal);
+ bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) ||
+ isa<ConstantFPSDNode>(StoredVal);
+ bool IsExtractVecEltSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT);
+
+ if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecEltSrc)
+ return false;
+
+ // Only look at ends of store sequences.
+ SDValue Chain = SDValue(St, 0);
+ if (Chain->hasOneUse() && Chain->use_begin()->getOpcode() == ISD::STORE)
+ return false;
+
+ // Save the LoadSDNodes that we find in the chain.
+ // We need to make sure that these nodes do not interfere with
+ // any of the store nodes.
+ SmallVector<LSBaseSDNode*, 8> AliasLoadNodes;
+
+ // Save the StoreSDNodes that we find in the chain.
+ SmallVector<MemOpLink, 8> StoreNodes;
+
+ getStoreMergeAndAliasCandidates(St, StoreNodes, AliasLoadNodes);
// Check if there is anything to merge.
if (StoreNodes.size() < 2)
// The node with the lowest store address.
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
+ unsigned FirstStoreAS = FirstInChain->getAddressSpace();
+ unsigned FirstStoreAlign = FirstInChain->getAlignment();
+ LLVMContext &Context = *DAG.getContext();
+ const DataLayout &DL = DAG.getDataLayout();
// Store the constants into memory as one consecutive store.
if (IsConstantSrc) {
}
// Find a legal type for the constant store.
- unsigned StoreBW = (i+1) * ElementSizeBytes * 8;
- EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
- if (TLI.isTypeLegal(StoreTy))
+ unsigned SizeInBits = (i+1) * ElementSizeBytes * 8;
+ EVT StoreTy = EVT::getIntegerVT(Context, SizeInBits);
+ if (TLI.isTypeLegal(StoreTy) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
+ FirstStoreAlign)) {
LastLegalType = i+1;
// Or check whether a truncstore is legal.
- else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) ==
- TargetLowering::TypePromoteInteger) {
+ } else if (TLI.getTypeAction(Context, StoreTy) ==
+ TargetLowering::TypePromoteInteger) {
EVT LegalizedStoredValueTy =
- TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType());
- if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy))
- LastLegalType = i+1;
+ TLI.getTypeToTransformTo(Context, StoredVal.getValueType());
+ if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
+ TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
+ FirstStoreAS, FirstStoreAlign)) {
+ LastLegalType = i + 1;
+ }
}
// Find a legal type for the vector store.
- EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
- if (TLI.isTypeLegal(Ty))
+ EVT Ty = EVT::getVectorVT(Context, MemVT, i+1);
+ if (TLI.isTypeLegal(Ty) &&
+ TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
+ FirstStoreAlign)) {
LastLegalVectorType = i + 1;
+ }
}
- // We only use vectors if the constant is known to be zero and the
- // function is not marked with the noimplicitfloat attribute.
- if (NonZero || NoVectors)
+
+ // We only use vectors if the constant is known to be zero or the target
+ // allows it and the function is not marked with the noimplicitfloat
+ // attribute.
+ if (NoVectors) {
+ LastLegalVectorType = 0;
+ } else if (NonZero && !TLI.storeOfVectorConstantIsCheap(MemVT,
+ LastLegalVectorType,
+ FirstStoreAS)) {
LastLegalVectorType = 0;
+ }
// Check if we found a legal integer type to store.
if (LastLegalType == 0 && LastLegalVectorType == 0)
return false;
// Find a legal type for the vector store.
- EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
- if (TLI.isTypeLegal(Ty))
+ EVT Ty = EVT::getVectorVT(Context, MemVT, i+1);
+ if (TLI.isTypeLegal(Ty) &&
+ TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
+ FirstStoreAlign))
NumElem = i + 1;
}
if (!Ld->hasNUsesOfValue(1, 0))
break;
- // Check that the alignment is the same as the stores.
- if (Ld->getAlignment() != St->getAlignment())
- break;
-
// The memory operands must not be volatile.
if (Ld->isVolatile() || Ld->isIndexed())
break;
St->getAlignment() >= RequiredAlignment)
return false;
+ LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
+ unsigned FirstLoadAS = FirstLoad->getAddressSpace();
+ unsigned FirstLoadAlign = FirstLoad->getAlignment();
+
// Scan the memory operations on the chain and find the first non-consecutive
// load memory address. These variables hold the index in the store node
// array.
unsigned LastLegalVectorType = 0;
unsigned LastLegalIntegerType = 0;
StartAddress = LoadNodes[0].OffsetFromBase;
- SDValue FirstChain = LoadNodes[0].MemNode->getChain();
+ SDValue FirstChain = FirstLoad->getChain();
for (unsigned i = 1; i < LoadNodes.size(); ++i) {
// All loads much share the same chain.
if (LoadNodes[i].MemNode->getChain() != FirstChain)
LastConsecutiveLoad = i;
// Find a legal type for the vector store.
- EVT StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
- if (TLI.isTypeLegal(StoreTy))
+ EVT StoreTy = EVT::getVectorVT(Context, MemVT, i+1);
+ if (TLI.isTypeLegal(StoreTy) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
+ FirstStoreAlign) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
+ FirstLoadAlign)) {
LastLegalVectorType = i + 1;
+ }
// Find a legal type for the integer store.
- unsigned StoreBW = (i+1) * ElementSizeBytes * 8;
- StoreTy = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
- if (TLI.isTypeLegal(StoreTy))
+ unsigned SizeInBits = (i+1) * ElementSizeBytes * 8;
+ StoreTy = EVT::getIntegerVT(Context, SizeInBits);
+ if (TLI.isTypeLegal(StoreTy) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
+ FirstStoreAlign) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
+ FirstLoadAlign))
LastLegalIntegerType = i + 1;
// Or check whether a truncstore and extload is legal.
- else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) ==
+ else if (TLI.getTypeAction(Context, StoreTy) ==
TargetLowering::TypePromoteInteger) {
EVT LegalizedStoredValueTy =
- TLI.getTypeToTransformTo(*DAG.getContext(), StoreTy);
+ TLI.getTypeToTransformTo(Context, StoreTy);
if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValueTy, StoreTy) &&
TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValueTy, StoreTy) &&
- TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValueTy, StoreTy))
+ TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValueTy, StoreTy) &&
+ TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
+ FirstStoreAS, FirstStoreAlign) &&
+ TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
+ FirstLoadAS, FirstLoadAlign))
LastLegalIntegerType = i+1;
}
}
// to memory.
EVT JointMemOpVT;
if (UseVectorTy) {
- JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
+ JointMemOpVT = EVT::getVectorVT(Context, MemVT, NumElem);
} else {
- unsigned StoreBW = NumElem * ElementSizeBytes * 8;
- JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), StoreBW);
+ unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
+ JointMemOpVT = EVT::getIntegerVT(Context, SizeInBits);
}
SDLoc LoadDL(LoadNodes[0].MemNode);
SDLoc StoreDL(StoreNodes[0].MemNode);
- LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
- SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL,
- FirstLoad->getChain(),
- FirstLoad->getBasePtr(),
- FirstLoad->getPointerInfo(),
- false, false, false,
- FirstLoad->getAlignment());
-
- SDValue NewStore = DAG.getStore(LatestOp->getChain(), StoreDL, NewLoad,
- FirstInChain->getBasePtr(),
- FirstInChain->getPointerInfo(), false, false,
- FirstInChain->getAlignment());
+ SDValue NewLoad = DAG.getLoad(
+ JointMemOpVT, LoadDL, FirstLoad->getChain(), FirstLoad->getBasePtr(),
+ FirstLoad->getPointerInfo(), false, false, false, FirstLoadAlign);
+
+ SDValue NewStore = DAG.getStore(
+ LatestOp->getChain(), StoreDL, NewLoad, FirstInChain->getBasePtr(),
+ FirstInChain->getPointerInfo(), false, false, FirstStoreAlign);
// Replace one of the loads with the new load.
LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[0].MemNode);
ST->isUnindexed()) {
unsigned OrigAlign = ST->getAlignment();
EVT SVT = Value.getOperand(0).getValueType();
- unsigned Align = TLI.getDataLayout()->
- getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext()));
+ unsigned Align = DAG.getDataLayout().getABITypeAlignment(
+ SVT.getTypeForEVT(*DAG.getContext()));
if (Align <= OrigAlign &&
((!LegalOperations && !ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, SVT)))
uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32);
SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32);
- if (TLI.isBigEndian()) std::swap(Lo, Hi);
+ if (DAG.getDataLayout().isBigEndian())
+ std::swap(Lo, Hi);
unsigned Alignment = ST->getAlignment();
bool isVolatile = ST->isVolatile();
// Try transforming a pair floating point load / store ops to integer
// load / store ops.
- SDValue NewST = TransformFPLoadStorePair(N);
- if (NewST.getNode())
+ if (SDValue NewST = TransformFPLoadStorePair(N))
return NewST;
bool UseAA = CombinerAA.getNumOccurrences() > 0 ? CombinerAA
EVT ResultVT = EVE->getValueType(0);
EVT VecEltVT = InVecVT.getVectorElementType();
unsigned Align = OriginalLoad->getAlignment();
- unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
+ unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
VecEltVT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT))
// scalar_to_vector here as well.
if (!LegalOperations) {
- EVT IndexTy = TLI.getVectorIdxTy();
+ EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout());
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT, SVInVec,
DAG.getConstant(OrigElt, SDLoc(SVOp), IndexTy));
}
if (!ValidTypes)
return SDValue();
- bool isLE = TLI.isLittleEndian();
+ bool isLE = DAG.getDataLayout().isLittleEndian();
unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits();
assert(ElemRatio > 1 && "Invalid element size ratio");
SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType):
if (Op.getOpcode() == ISD::UNDEF) continue;
// See if we can combine this build_vector into a blend with a zero vector.
- if (!VecIn2.getNode() && ((Op.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(Op.getNode())->isNullValue()) ||
- (Op.getOpcode() == ISD::ConstantFP &&
- cast<ConstantFPSDNode>(Op.getNode())->getValueAPF().isZero()))) {
+ if (!VecIn2.getNode() && (isNullConstant(Op) || isNullFPConstant(Op))) {
UsesZeroVector = true;
continue;
}
// Try to replace VecIn1 with two extract_subvectors
// No need to update the masks, they should still be correct.
- VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
- DAG.getConstant(VT.getVectorNumElements(), dl, TLI.getVectorIdxTy()));
- VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
- DAG.getConstant(0, dl, TLI.getVectorIdxTy()));
+ VecIn2 = DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
+ DAG.getConstant(VT.getVectorNumElements(), dl,
+ TLI.getVectorIdxTy(DAG.getDataLayout())));
+ VecIn1 = DAG.getNode(
+ ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
+ DAG.getConstant(0, dl, TLI.getVectorIdxTy(DAG.getDataLayout())));
} else
return SDValue();
}
}
// If any of the operands is a floating point scalar bitcast to a vector,
- // use floating point types throughout, and bitcast everything.
+ // use floating point types throughout, and bitcast everything.
// Replace UNDEFs by another scalar UNDEF node, of the final desired type.
if (AnyFP) {
SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits());
DAG.getNode(ISD::BUILD_VECTOR, DL, VecVT, Ops));
}
-SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
- // TODO: Check to see if this is a CONCAT_VECTORS of a bunch of
- // EXTRACT_SUBVECTOR operations. If so, and if the EXTRACT_SUBVECTOR vector
- // inputs come from at most two distinct vectors, turn this into a shuffle
- // node.
+// Check to see if this is a CONCAT_VECTORS of a bunch of EXTRACT_SUBVECTOR
+// operations. If so, and if the EXTRACT_SUBVECTOR vector inputs come from at
+// most two distinct vectors the same size as the result, attempt to turn this
+// into a legal shuffle.
+static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) {
+ EVT VT = N->getValueType(0);
+ EVT OpVT = N->getOperand(0).getValueType();
+ int NumElts = VT.getVectorNumElements();
+ int NumOpElts = OpVT.getVectorNumElements();
+
+ SDValue SV0 = DAG.getUNDEF(VT), SV1 = DAG.getUNDEF(VT);
+ SmallVector<int, 8> Mask;
+
+ for (SDValue Op : N->ops()) {
+ // Peek through any bitcast.
+ while (Op.getOpcode() == ISD::BITCAST)
+ Op = Op.getOperand(0);
+ // UNDEF nodes convert to UNDEF shuffle mask values.
+ if (Op.getOpcode() == ISD::UNDEF) {
+ Mask.append((unsigned)NumOpElts, -1);
+ continue;
+ }
+
+ if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR)
+ return SDValue();
+
+ // What vector are we extracting the subvector from and at what index?
+ SDValue ExtVec = Op.getOperand(0);
+ if (ExtVec.getOpcode() == ISD::UNDEF) {
+ Mask.append((unsigned)NumOpElts, -1);
+ continue;
+ }
+
+ EVT ExtVT = ExtVec.getValueType();
+ if (!isa<ConstantSDNode>(Op.getOperand(1)))
+ return SDValue();
+ int ExtIdx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+
+ // Ensure that we are extracting a subvector from a vector the same
+ // size as the result.
+ if (ExtVT.getSizeInBits() != VT.getSizeInBits())
+ return SDValue();
+
+ // Scale the subvector index to account for any bitcast.
+ int NumExtElts = ExtVT.getVectorNumElements();
+ if (0 == (NumExtElts % NumElts))
+ ExtIdx /= (NumExtElts / NumElts);
+ else if (0 == (NumElts % NumExtElts))
+ ExtIdx *= (NumElts / NumExtElts);
+ else
+ return SDValue();
+
+ // At most we can reference 2 inputs in the final shuffle.
+ if (SV0.getOpcode() == ISD::UNDEF || SV0 == ExtVec) {
+ SV0 = ExtVec;
+ for (int i = 0; i != NumOpElts; ++i)
+ Mask.push_back(i + ExtIdx);
+ } else if (SV1.getOpcode() == ISD::UNDEF || SV1 == ExtVec) {
+ SV1 = ExtVec;
+ for (int i = 0; i != NumOpElts; ++i)
+ Mask.push_back(i + ExtIdx + NumElts);
+ } else {
+ return SDValue();
+ }
+ }
+
+ if (!DAG.getTargetLoweringInfo().isShuffleMaskLegal(Mask, VT))
+ return SDValue();
+
+ return DAG.getVectorShuffle(VT, SDLoc(N), DAG.getBitcast(VT, SV0),
+ DAG.getBitcast(VT, SV1), Mask);
+}
+
+SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) {
// If we only have one input vector, we don't need to do any concatenation.
if (N->getNumOperands() == 1)
return N->getOperand(0);
if (SDValue V = combineConcatVectorOfScalars(N, DAG))
return V;
+ // Fold CONCAT_VECTORS of EXTRACT_SUBVECTOR (or undef) to VECTOR_SHUFFLE.
+ if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT))
+ if (SDValue V = combineConcatVectorOfExtracts(N, DAG))
+ return V;
+
// Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR
// nodes often generate nop CONCAT_VECTOR nodes.
// Scan the CONCAT_VECTOR operands and look for a CONCAT operations that
SDValue RHS = N->getOperand(1);
SDLoc dl(N);
- // Make sure we're not running after operation legalization where it
+ // Make sure we're not running after operation legalization where it
// may have custom lowered the vector shuffles.
if (LegalOperations)
return SDValue();
if (RHS.getOpcode() == ISD::BITCAST)
RHS = RHS.getOperand(0);
- if (RHS.getOpcode() == ISD::BUILD_VECTOR) {
+ if (RHS.getOpcode() != ISD::BUILD_VECTOR)
+ return SDValue();
+
+ EVT RVT = RHS.getValueType();
+ unsigned NumElts = RHS.getNumOperands();
+
+ // Attempt to create a valid clear mask, splitting the mask into
+ // sub elements and checking to see if each is
+ // all zeros or all ones - suitable for shuffle masking.
+ auto BuildClearMask = [&](int Split) {
+ int NumSubElts = NumElts * Split;
+ int NumSubBits = RVT.getScalarSizeInBits() / Split;
+
SmallVector<int, 8> Indices;
- unsigned NumElts = RHS.getNumOperands();
+ for (int i = 0; i != NumSubElts; ++i) {
+ int EltIdx = i / Split;
+ int SubIdx = i % Split;
+ SDValue Elt = RHS.getOperand(EltIdx);
+ if (Elt.getOpcode() == ISD::UNDEF) {
+ Indices.push_back(-1);
+ continue;
+ }
- for (unsigned i = 0; i != NumElts; ++i) {
- SDValue Elt = RHS.getOperand(i);
- if (!isa<ConstantSDNode>(Elt))
+ APInt Bits;
+ if (isa<ConstantSDNode>(Elt))
+ Bits = cast<ConstantSDNode>(Elt)->getAPIntValue();
+ else if (isa<ConstantFPSDNode>(Elt))
+ Bits = cast<ConstantFPSDNode>(Elt)->getValueAPF().bitcastToAPInt();
+ else
return SDValue();
- if (cast<ConstantSDNode>(Elt)->isAllOnesValue())
+ // Extract the sub element from the constant bit mask.
+ if (DAG.getDataLayout().isBigEndian()) {
+ Bits = Bits.lshr((Split - SubIdx - 1) * NumSubBits);
+ } else {
+ Bits = Bits.lshr(SubIdx * NumSubBits);
+ }
+
+ if (Split > 1)
+ Bits = Bits.trunc(NumSubBits);
+
+ if (Bits.isAllOnesValue())
Indices.push_back(i);
- else if (cast<ConstantSDNode>(Elt)->isNullValue())
- Indices.push_back(NumElts+i);
+ else if (Bits == 0)
+ Indices.push_back(i + NumSubElts);
else
return SDValue();
}
// Let's see if the target supports this vector_shuffle.
- EVT RVT = RHS.getValueType();
- if (!TLI.isVectorClearMaskLegal(Indices, RVT))
+ EVT ClearSVT = EVT::getIntegerVT(*DAG.getContext(), NumSubBits);
+ EVT ClearVT = EVT::getVectorVT(*DAG.getContext(), ClearSVT, NumSubElts);
+ if (!TLI.isVectorClearMaskLegal(Indices, ClearVT))
return SDValue();
- // Return the new VECTOR_SHUFFLE node.
- EVT EltVT = RVT.getVectorElementType();
- SmallVector<SDValue,8> ZeroOps(RVT.getVectorNumElements(),
- DAG.getConstant(0, dl, EltVT));
- SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, dl, RVT, ZeroOps);
- LHS = DAG.getNode(ISD::BITCAST, dl, RVT, LHS);
- SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]);
- return DAG.getNode(ISD::BITCAST, dl, VT, Shuf);
- }
+ SDValue Zero = DAG.getConstant(0, dl, ClearVT);
+ return DAG.getBitcast(VT, DAG.getVectorShuffle(ClearVT, dl,
+ DAG.getBitcast(ClearVT, LHS),
+ Zero, &Indices[0]));
+ };
+
+ // Determine maximum split level (byte level masking).
+ int MaxSplit = 1;
+ if (RVT.getScalarSizeInBits() % 8 == 0)
+ MaxSplit = RVT.getScalarSizeInBits() / 8;
+
+ for (int Split = 1; Split <= MaxSplit; ++Split)
+ if (RVT.getScalarSizeInBits() % Split == 0)
+ if (SDValue S = BuildClearMask(Split))
+ return S;
return SDValue();
}
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
- if (SDValue Shuffle = XformToShuffleWithZero(N))
- return Shuffle;
-
// If the LHS and RHS are BUILD_VECTOR nodes, see if we can constant fold
// this operation.
if (LHS.getOpcode() == ISD::BUILD_VECTOR &&
// Can't fold divide by zero.
if (N->getOpcode() == ISD::SDIV || N->getOpcode() == ISD::UDIV ||
N->getOpcode() == ISD::FDIV) {
- if ((RHSOp.getOpcode() == ISD::Constant &&
- cast<ConstantSDNode>(RHSOp.getNode())->isNullValue()) ||
- (RHSOp.getOpcode() == ISD::ConstantFP &&
- cast<ConstantFPSDNode>(RHSOp.getNode())->getValueAPF().isZero()))
+ if (isNullConstant(RHSOp) || (RHSOp.getOpcode() == ISD::ConstantFP &&
+ cast<ConstantFPSDNode>(RHSOp.getNode())->isZero()))
break;
}
return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(N), LHS.getValueType(), Ops);
}
+ // Try to convert a constant mask AND into a shuffle clear mask.
+ if (SDValue Shuffle = XformToShuffleWithZero(N))
+ return Shuffle;
+
// Type legalization might introduce new shuffles in the DAG.
// Fold (VBinOp (shuffle (A, Undef, Mask)), (shuffle (B, Undef, Mask)))
// -> (shuffle (VBinOp (A, B)), Undef, Mask).
EVT VT = N2.getValueType();
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
- ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N3.getNode());
// Determine if the condition we're dealing with is constant
SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()),
N0, N1, CC, DL, false);
if (SCC.getNode()) AddToWorklist(SCC.getNode());
- ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode());
- // fold select_cc true, x, y -> x
- if (SCCC && !SCCC->isNullValue())
- return N2;
- // fold select_cc false, x, y -> y
- if (SCCC && SCCC->isNullValue())
- return N3;
+ if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) {
+ // fold select_cc true, x, y -> x
+ // fold select_cc false, x, y -> y
+ return !SCCC->isNullValue() ? N2 : N3;
+ }
// Check to see if we can simplify the select into an fabs node
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) {
// Allow either -0.0 or 0.0
- if (CFP->getValueAPF().isZero()) {
+ if (CFP->isZero()) {
// select (setg[te] X, +/-0.0), X, fneg(X) -> fabs
if ((CC == ISD::SETGE || CC == ISD::SETGT) &&
N0 == N2 && N3.getOpcode() == ISD::FNEG &&
const_cast<ConstantFP*>(TV->getConstantFPValue())
};
Type *FPTy = Elts[0]->getType();
- const DataLayout &TD = *TLI.getDataLayout();
+ const DataLayout &TD = DAG.getDataLayout();
// Create a ConstantArray of the two constants.
Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);
- SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(),
- TD.getPrefTypeAlignment(FPTy));
+ SDValue CPIdx =
+ DAG.getConstantPool(CA, TLI.getPointerTy(DAG.getDataLayout()),
+ TD.getPrefTypeAlignment(FPTy));
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
// Get the offsets to the 0 and 1 element of the array so that we can
CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx,
CstOffset);
AddToWorklist(CPIdx.getNode());
- return DAG.getLoad(TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
- MachinePointerInfo::getConstantPool(), false,
- false, false, Alignment);
+ return DAG.getLoad(
+ TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx,
+ MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
+ false, false, false, Alignment);
}
}
// Check to see if we can perform the "gzip trick", transforming
// (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A)
- if (N1C && N3C && N3C->isNullValue() && CC == ISD::SETLT &&
- (N1C->isNullValue() || // (a < 0) ? b : 0
- (N1C->getAPIntValue() == 1 && N0 == N2))) { // (a < 1) ? a : 0
+ if (isNullConstant(N3) && CC == ISD::SETLT &&
+ (isNullConstant(N1) || // (a < 0) ? b : 0
+ (isOneConstant(N1) && N0 == N2))) { // (a < 1) ? a : 0
EVT XType = N0.getValueType();
EVT AType = N2.getValueType();
if (XType.bitsGE(AType)) {
// single bit-test can be materialized as an all-ones register with
// shift-left and shift-right-arith.
if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
- N0->getValueType(0) == VT &&
- N1C && N1C->isNullValue() &&
- N2C && N2C->isNullValue()) {
+ N0->getValueType(0) == VT && isNullConstant(N1) && isNullConstant(N2)) {
SDValue AndLHS = N0->getOperand(0);
ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) {
}
// fold select C, 16, 0 -> shl C, 4
- if (N2C && N3C && N3C->isNullValue() && N2C->getAPIntValue().isPowerOf2() &&
+ if (N2C && isNullConstant(N3) && N2C->getAPIntValue().isPowerOf2() &&
TLI.getBooleanContents(N0.getValueType()) ==
TargetLowering::ZeroOrOneBooleanContent) {
// If the caller doesn't want us to simplify this into a zext of a compare,
// don't do it.
- if (NotExtCompare && N2C->getAPIntValue() == 1)
+ if (NotExtCompare && N2C->isOne())
return SDValue();
// Get a SetCC of the condition
AddToWorklist(SCC.getNode());
AddToWorklist(Temp.getNode());
- if (N2C->getAPIntValue() == 1)
+ if (N2C->isOne())
return Temp;
// shl setcc result by log2 n2c
// Check to see if this is the equivalent of setcc
// FIXME: Turn all of these into setcc if setcc if setcc is legal
// otherwise, go ahead with the folds.
- if (0 && N3C && N3C->isNullValue() && N2C && (N2C->getAPIntValue() == 1ULL)) {
+ if (0 && isNullConstant(N3) && isOneConstant(N2)) {
EVT XType = N0.getValueType();
if (!LegalOperations ||
TLI.isOperationLegal(ISD::SETCC, getSetCCResultType(XType))) {
}
// fold (seteq X, 0) -> (srl (ctlz X, log2(size(X))))
- if (N1C && N1C->isNullValue() && CC == ISD::SETEQ &&
+ if (isNullConstant(N1) && CC == ISD::SETEQ &&
(!LegalOperations ||
TLI.isOperationLegal(ISD::CTLZ, XType))) {
SDValue Ctlz = DAG.getNode(ISD::CTLZ, SDLoc(N0), XType, N0);
getShiftAmountTy(Ctlz.getValueType())));
}
// fold (setgt X, 0) -> (srl (and (-X, ~X), size(X)-1))
- if (N1C && N1C->isNullValue() && CC == ISD::SETGT) {
+ if (isNullConstant(N1) && CC == ISD::SETGT) {
SDLoc DL(N0);
SDValue NegN0 = DAG.getNode(ISD::SUB, DL,
XType, DAG.getConstant(0, DL, XType), N0);
getShiftAmountTy(XType)));
}
// fold (setgt X, -1) -> (xor (srl (X, size(X)-1), 1))
- if (N1C && N1C->isAllOnesValue() && CC == ISD::SETGT) {
+ if (isAllOnesConstant(N1) && CC == ISD::SETGT) {
SDLoc DL(N0);
SDValue Sign = DAG.getNode(ISD::SRL, DL, XType, N0,
DAG.getConstant(XType.getSizeInBits() - 1, DL,
return SDValue();
// Avoid division by zero.
- if (!C->getAPIntValue())
+ if (C->isNullValue())
return SDValue();
std::vector<SDNode*> Built;
return SDValue();
// Avoid division by zero.
- if (!C->getAPIntValue())
+ if (C->isNullValue())
return SDValue();
std::vector<SDNode *> Built;
return SDValue();
// Avoid division by zero.
- if (!C->getAPIntValue())
+ if (C->isNullValue())
return SDValue();
std::vector<SDNode*> Built;
// If they are both volatile then they cannot be reordered.
if (Op0->isVolatile() && Op1->isVolatile()) return true;
+ // If one operation reads from invariant memory, and the other may store, they
+ // cannot alias. These should really be checking the equivalent of mayWrite,
+ // but it only matters for memory nodes other than load /store.
+ if (Op0->isInvariant() && Op1->writeMem())
+ return false;
+
+ if (Op1->isInvariant() && Op0->writeMem())
+ return false;
+
// Gather base node and offset information.
SDValue Base1, Base2;
int64_t Offset1, Offset2;
Op0->getSrcValueOffset() - MinOffset;
int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) +
Op1->getSrcValueOffset() - MinOffset;
- AliasAnalysis::AliasResult AAResult =
- AA.alias(AliasAnalysis::Location(Op0->getMemOperand()->getValue(),
- Overlap1,
- UseTBAA ? Op0->getAAInfo() : AAMDNodes()),
- AliasAnalysis::Location(Op1->getMemOperand()->getValue(),
- Overlap2,
- UseTBAA ? Op1->getAAInfo() : AAMDNodes()));
- if (AAResult == AliasAnalysis::NoAlias)
+ AliasResult AAResult =
+ AA.alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap1,
+ UseTBAA ? Op0->getAAInfo() : AAMDNodes()),
+ MemoryLocation(Op1->getMemOperand()->getValue(), Overlap2,
+ UseTBAA ? Op1->getAAInfo() : AAMDNodes()));
+ if (AAResult == NoAlias)
return false;
}
// aliases list. If not, then continue up the chain looking for the next
// candidate.
while (!Chains.empty()) {
- SDValue Chain = Chains.back();
- Chains.pop_back();
+ SDValue Chain = Chains.pop_back_val();
// For TokenFactor nodes, look at each operand and only continue up the
// chain until we find two aliases. If we've seen two aliases, assume we'll
UIE = M->use_end(); UI != UIE; ++UI)
if (UI.getUse().getValueType() == MVT::Other &&
Visited.insert(*UI).second) {
- if (isa<MemIntrinsicSDNode>(*UI) || isa<MemSDNode>(*UI)) {
+ if (isa<MemSDNode>(*UI)) {
// We've not visited this use, and we care about it (it could have an
// ordering dependency with the original node).
Aliases.clear();