//
// The LLVM Compiler Infrastructure
//
-// This file was developed by the LLVM research group and is distributed under
-// the University of Illinois Open Source License. See LICENSE.TXT for details.
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
+#include "llvm/Target/TargetAsmInfo.h"
#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetSubtarget.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
-#include "llvm/Target/MRegisterInfo.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/CallingConv.h"
#include "llvm/DerivedTypes.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/MathExtras.h"
-#include "llvm/Target/TargetAsmInfo.h"
using namespace llvm;
/// InitLibcallNames - Set default libcall names.
Names[RTLIB::NEG_I64] = "__negdi2";
Names[RTLIB::ADD_F32] = "__addsf3";
Names[RTLIB::ADD_F64] = "__adddf3";
+ Names[RTLIB::ADD_F80] = "__addxf3";
Names[RTLIB::ADD_PPCF128] = "__gcc_qadd";
Names[RTLIB::SUB_F32] = "__subsf3";
Names[RTLIB::SUB_F64] = "__subdf3";
+ Names[RTLIB::SUB_F80] = "__subxf3";
Names[RTLIB::SUB_PPCF128] = "__gcc_qsub";
Names[RTLIB::MUL_F32] = "__mulsf3";
Names[RTLIB::MUL_F64] = "__muldf3";
+ Names[RTLIB::MUL_F80] = "__mulxf3";
Names[RTLIB::MUL_PPCF128] = "__gcc_qmul";
Names[RTLIB::DIV_F32] = "__divsf3";
Names[RTLIB::DIV_F64] = "__divdf3";
+ Names[RTLIB::DIV_F80] = "__divxf3";
Names[RTLIB::DIV_PPCF128] = "__gcc_qdiv";
Names[RTLIB::REM_F32] = "fmodf";
Names[RTLIB::REM_F64] = "fmod";
+ Names[RTLIB::REM_F80] = "fmodl";
Names[RTLIB::REM_PPCF128] = "fmodl";
- Names[RTLIB::NEG_F32] = "__negsf2";
- Names[RTLIB::NEG_F64] = "__negdf2";
Names[RTLIB::POWI_F32] = "__powisf2";
Names[RTLIB::POWI_F64] = "__powidf2";
Names[RTLIB::POWI_F80] = "__powixf2";
Names[RTLIB::SQRT_PPCF128] = "sqrtl";
Names[RTLIB::SIN_F32] = "sinf";
Names[RTLIB::SIN_F64] = "sin";
+ Names[RTLIB::SIN_F80] = "sinl";
+ Names[RTLIB::SIN_PPCF128] = "sinl";
Names[RTLIB::COS_F32] = "cosf";
Names[RTLIB::COS_F64] = "cos";
+ Names[RTLIB::COS_F80] = "cosl";
+ Names[RTLIB::COS_PPCF128] = "cosl";
+ Names[RTLIB::POW_F32] = "powf";
+ Names[RTLIB::POW_F64] = "pow";
+ Names[RTLIB::POW_F80] = "powl";
+ Names[RTLIB::POW_PPCF128] = "powl";
Names[RTLIB::FPEXT_F32_F64] = "__extendsfdf2";
Names[RTLIB::FPROUND_F64_F32] = "__truncdfsf2";
Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
// All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions));
memset(LoadXActions, 0, sizeof(LoadXActions));
- memset(&StoreXActions, 0, sizeof(StoreXActions));
- memset(&IndexedModeActions, 0, sizeof(IndexedModeActions));
- memset(&ConvertActions, 0, sizeof(ConvertActions));
+ memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
+ memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
+ memset(ConvertActions, 0, sizeof(ConvertActions));
- // Set all indexed load / store to expand.
+ // Set default actions for various operations.
for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
+ // Default all indexed load / store to expand.
for (unsigned IM = (unsigned)ISD::PRE_INC;
IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
setIndexedLoadAction(IM, (MVT::ValueType)VT, Expand);
setIndexedStoreAction(IM, (MVT::ValueType)VT, Expand);
}
+
+ // These operations default to expand.
+ setOperationAction(ISD::FGETSIGN, (MVT::ValueType)VT, Expand);
}
-
+
+ // ConstantFP nodes default to expand. Targets can either change this to
+ // Legal, in which case all fp constants are legal, or use addLegalFPImmediate
+ // to optimize expansions for certain constants.
+ setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
+ setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
+ setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
+
+ // Default ISD::TRAP to expand (which turns it into abort).
+ setOperationAction(ISD::TRAP, MVT::Other, Expand);
+
IsLittleEndian = TD->isLittleEndian();
UsesGlobalOffsetTable = false;
ShiftAmountTy = SetCCResultTy = PointerTy = getValueType(TD->getIntPtrType());
TargetLowering::~TargetLowering() {}
+
+SDOperand TargetLowering::LowerMEMCPY(SDOperand Op, SelectionDAG &DAG) {
+ assert(getSubtarget() && "Subtarget not defined");
+ SDOperand ChainOp = Op.getOperand(0);
+ SDOperand DestOp = Op.getOperand(1);
+ SDOperand SourceOp = Op.getOperand(2);
+ SDOperand CountOp = Op.getOperand(3);
+ SDOperand AlignOp = Op.getOperand(4);
+ SDOperand AlwaysInlineOp = Op.getOperand(5);
+
+ bool AlwaysInline = (bool)cast<ConstantSDNode>(AlwaysInlineOp)->getValue();
+ unsigned Align = (unsigned)cast<ConstantSDNode>(AlignOp)->getValue();
+ if (Align == 0) Align = 1;
+
+ // If size is unknown, call memcpy.
+ ConstantSDNode *I = dyn_cast<ConstantSDNode>(CountOp);
+ if (!I) {
+ assert(!AlwaysInline && "Cannot inline copy of unknown size");
+ return LowerMEMCPYCall(ChainOp, DestOp, SourceOp, CountOp, DAG);
+ }
+
+ // If not DWORD aligned or if size is more than threshold, then call memcpy.
+ // The libc version is likely to be faster for the following cases. It can
+ // use the address value and run time information about the CPU.
+ // With glibc 2.6.1 on a core 2, coping an array of 100M longs was 30% faster
+ unsigned Size = I->getValue();
+ if (AlwaysInline ||
+ (Size <= getSubtarget()->getMaxInlineSizeThreshold() &&
+ (Align & 3) == 0))
+ return LowerMEMCPYInline(ChainOp, DestOp, SourceOp, Size, Align, DAG);
+ return LowerMEMCPYCall(ChainOp, DestOp, SourceOp, CountOp, DAG);
+}
+
+
+SDOperand TargetLowering::LowerMEMCPYCall(SDOperand Chain,
+ SDOperand Dest,
+ SDOperand Source,
+ SDOperand Count,
+ SelectionDAG &DAG) {
+ MVT::ValueType IntPtr = getPointerTy();
+ TargetLowering::ArgListTy Args;
+ TargetLowering::ArgListEntry Entry;
+ Entry.Ty = getTargetData()->getIntPtrType();
+ Entry.Node = Dest; Args.push_back(Entry);
+ Entry.Node = Source; Args.push_back(Entry);
+ Entry.Node = Count; Args.push_back(Entry);
+ std::pair<SDOperand,SDOperand> CallResult =
+ LowerCallTo(Chain, Type::VoidTy, false, false, false, CallingConv::C,
+ false, DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG);
+ return CallResult.second;
+}
+
+
/// computeRegisterProperties - Once all of the register classes are added,
/// this allows us to compute derived properties we expose.
void TargetLowering::computeRegisterProperties() {
unsigned NumVectorRegs = 1;
+ // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally we
+ // could break down into LHS/RHS like LegalizeDAG does.
+ if (!isPowerOf2_32(NumElts)) {
+ NumVectorRegs = NumElts;
+ NumElts = 1;
+ }
+
// Divide the input until we get to a supported size. This will always
// end with a scalar if the target doesn't support vectors.
while (NumElts > 1 &&
return 1;
}
+/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
+/// function arguments in the caller parameter area.
+unsigned TargetLowering::getByValTypeAlignment(const Type *Ty) const {
+ return Log2_32(TD->getCallFrameTypeAlignment(Ty));
+}
+
+SDOperand TargetLowering::getPICJumpTableRelocBase(SDOperand Table,
+ SelectionDAG &DAG) const {
+ if (usesGlobalOffsetTable())
+ return DAG.getNode(ISD::GLOBAL_OFFSET_TABLE, getPointerTy());
+ return Table;
+}
+
//===----------------------------------------------------------------------===//
// Optimization Methods
//===----------------------------------------------------------------------===//
/// are any bits set in the constant that are not demanded. If so, shrink the
/// constant and return true.
bool TargetLowering::TargetLoweringOpt::ShrinkDemandedConstant(SDOperand Op,
- uint64_t Demanded) {
+ const APInt &Demanded) {
// FIXME: ISD::SELECT, ISD::SELECT_CC
switch(Op.getOpcode()) {
default: break;
case ISD::OR:
case ISD::XOR:
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
- if ((~Demanded & C->getValue()) != 0) {
+ if (C->getAPIntValue().intersects(~Demanded)) {
MVT::ValueType VT = Op.getValueType();
SDOperand New = DAG.getNode(Op.getOpcode(), VT, Op.getOperand(0),
- DAG.getConstant(Demanded & C->getValue(),
+ DAG.getConstant(Demanded &
+ C->getAPIntValue(),
VT));
return CombineTo(Op, New);
}
/// analyze the expression and return a mask of KnownOne and KnownZero bits for
/// the expression (used to simplify the caller). The KnownZero/One bits may
/// only be accurate for those bits in the DemandedMask.
-bool TargetLowering::SimplifyDemandedBits(SDOperand Op, uint64_t DemandedMask,
- uint64_t &KnownZero,
- uint64_t &KnownOne,
+bool TargetLowering::SimplifyDemandedBits(SDOperand Op,
+ const APInt &DemandedMask,
+ APInt &KnownZero,
+ APInt &KnownOne,
TargetLoweringOpt &TLO,
unsigned Depth) const {
- KnownZero = KnownOne = 0; // Don't know anything.
+ unsigned BitWidth = DemandedMask.getBitWidth();
+ assert(Op.getValueSizeInBits() == BitWidth &&
+ "Mask size mismatches value type size!");
+ APInt NewMask = DemandedMask;
+
+ // Don't know anything.
+ KnownZero = KnownOne = APInt(BitWidth, 0);
- // The masks are not wide enough to represent this type! Should use APInt.
- if (Op.getValueType() == MVT::i128)
- return false;
-
// Other users may use these bits.
if (!Op.Val->hasOneUse()) {
if (Depth != 0) {
return false;
}
// If this is the root being simplified, allow it to have multiple uses,
- // just set the DemandedMask to all bits.
- DemandedMask = MVT::getIntVTBitMask(Op.getValueType());
+ // just set the NewMask to all bits.
+ NewMask = APInt::getAllOnesValue(BitWidth);
} else if (DemandedMask == 0) {
// Not demanding any bits from Op.
if (Op.getOpcode() != ISD::UNDEF)
return false;
}
- uint64_t KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
+ APInt KnownZero2, KnownOne2, KnownZeroOut, KnownOneOut;
switch (Op.getOpcode()) {
case ISD::Constant:
// We know all of the bits for a constant!
- KnownOne = cast<ConstantSDNode>(Op)->getValue() & DemandedMask;
- KnownZero = ~KnownOne & DemandedMask;
+ KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue() & NewMask;
+ KnownZero = ~KnownOne & NewMask;
return false; // Don't fall through, will infinitely loop.
case ISD::AND:
// If the RHS is a constant, check to see if the LHS would be zero without
// simplify the LHS, here we're using information from the LHS to simplify
// the RHS.
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
- uint64_t LHSZero, LHSOne;
- TLO.DAG.ComputeMaskedBits(Op.getOperand(0), DemandedMask,
+ APInt LHSZero, LHSOne;
+ TLO.DAG.ComputeMaskedBits(Op.getOperand(0), NewMask,
LHSZero, LHSOne, Depth+1);
// If the LHS already has zeros where RHSC does, this and is dead.
- if ((LHSZero & DemandedMask) == (~RHSC->getValue() & DemandedMask))
+ if ((LHSZero & NewMask) == (~RHSC->getAPIntValue() & NewMask))
return TLO.CombineTo(Op, Op.getOperand(0));
// If any of the set bits in the RHS are known zero on the LHS, shrink
// the constant.
- if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & DemandedMask))
+ if (TLO.ShrinkDemandedConstant(Op, ~LHSZero & NewMask))
return true;
}
- if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
+ if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownZero,
+ if (SimplifyDemandedBits(Op.getOperand(0), ~KnownZero & NewMask,
KnownZero2, KnownOne2, TLO, Depth+1))
return true;
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// If all of the demanded bits are known one on one side, return the other.
// These bits cannot contribute to the result of the 'and'.
- if ((DemandedMask & ~KnownZero2 & KnownOne)==(DemandedMask & ~KnownZero2))
+ if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
return TLO.CombineTo(Op, Op.getOperand(0));
- if ((DemandedMask & ~KnownZero & KnownOne2)==(DemandedMask & ~KnownZero))
+ if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
return TLO.CombineTo(Op, Op.getOperand(1));
// If all of the demanded bits in the inputs are known zeros, return zero.
- if ((DemandedMask & (KnownZero|KnownZero2)) == DemandedMask)
+ if ((NewMask & (KnownZero|KnownZero2)) == NewMask)
return TLO.CombineTo(Op, TLO.DAG.getConstant(0, Op.getValueType()));
// If the RHS is a constant, see if we can simplify it.
- if (TLO.ShrinkDemandedConstant(Op, DemandedMask & ~KnownZero2))
+ if (TLO.ShrinkDemandedConstant(Op, ~KnownZero2 & NewMask))
return true;
// Output known-1 bits are only known if set in both the LHS & RHS.
KnownZero |= KnownZero2;
break;
case ISD::OR:
- if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
+ if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & ~KnownOne,
+ if (SimplifyDemandedBits(Op.getOperand(0), ~KnownOne & NewMask,
KnownZero2, KnownOne2, TLO, Depth+1))
return true;
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'or'.
- if ((DemandedMask & ~KnownOne2 & KnownZero) == (DemandedMask & ~KnownOne2))
+ if ((NewMask & ~KnownOne2 & KnownZero) == (~KnownOne2 & NewMask))
return TLO.CombineTo(Op, Op.getOperand(0));
- if ((DemandedMask & ~KnownOne & KnownZero2) == (DemandedMask & ~KnownOne))
+ if ((NewMask & ~KnownOne & KnownZero2) == (~KnownOne & NewMask))
return TLO.CombineTo(Op, Op.getOperand(1));
// If all of the potentially set bits on one side are known to be set on
// the other side, just use the 'other' side.
- if ((DemandedMask & (~KnownZero) & KnownOne2) ==
- (DemandedMask & (~KnownZero)))
+ if ((NewMask & ~KnownZero & KnownOne2) == (~KnownZero & NewMask))
return TLO.CombineTo(Op, Op.getOperand(0));
- if ((DemandedMask & (~KnownZero2) & KnownOne) ==
- (DemandedMask & (~KnownZero2)))
+ if ((NewMask & ~KnownZero2 & KnownOne) == (~KnownZero2 & NewMask))
return TLO.CombineTo(Op, Op.getOperand(1));
// If the RHS is a constant, see if we can simplify it.
- if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
+ if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
// Output known-0 bits are only known if clear in both the LHS & RHS.
KnownOne |= KnownOne2;
break;
case ISD::XOR:
- if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero,
+ if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, KnownZero2,
+ if (SimplifyDemandedBits(Op.getOperand(0), NewMask, KnownZero2,
KnownOne2, TLO, Depth+1))
return true;
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// If all of the demanded bits are known zero on one side, return the other.
// These bits cannot contribute to the result of the 'xor'.
- if ((DemandedMask & KnownZero) == DemandedMask)
+ if ((KnownZero & NewMask) == NewMask)
return TLO.CombineTo(Op, Op.getOperand(0));
- if ((DemandedMask & KnownZero2) == DemandedMask)
+ if ((KnownZero2 & NewMask) == NewMask)
return TLO.CombineTo(Op, Op.getOperand(1));
// If all of the unknown bits are known to be zero on one side or the other
// (but not both) turn this into an *inclusive* or.
// e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
- if ((DemandedMask & ~KnownZero & ~KnownZero2) == 0)
+ if ((NewMask & ~KnownZero & ~KnownZero2) == 0)
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, Op.getValueType(),
Op.getOperand(0),
Op.getOperand(1)));
// bits on that side are also known to be set on the other side, turn this
// into an AND, as we know the bits will be cleared.
// e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
- if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask) { // all known
+ if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known
if ((KnownOne & KnownOne2) == KnownOne) {
MVT::ValueType VT = Op.getValueType();
- SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & DemandedMask, VT);
+ SDOperand ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT);
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, VT, Op.getOperand(0),
ANDC));
}
// If the RHS is a constant, see if we can simplify it.
// FIXME: for XOR, we prefer to force bits to 1 if they will make a -1.
- if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
+ if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
KnownZero = KnownZeroOut;
KnownOne = KnownOneOut;
break;
- case ISD::SETCC:
- // If we know the result of a setcc has the top bits zero, use this info.
- if (getSetCCResultContents() == TargetLowering::ZeroOrOneSetCCResult)
- KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
- break;
case ISD::SELECT:
- if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero,
+ if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
- if (SimplifyDemandedBits(Op.getOperand(1), DemandedMask, KnownZero2,
+ if (SimplifyDemandedBits(Op.getOperand(1), NewMask, KnownZero2,
KnownOne2, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// If the operands are constants, see if we can simplify them.
- if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
+ if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
// Only known if known in both the LHS and RHS.
KnownZero &= KnownZero2;
break;
case ISD::SELECT_CC:
- if (SimplifyDemandedBits(Op.getOperand(3), DemandedMask, KnownZero,
+ if (SimplifyDemandedBits(Op.getOperand(3), NewMask, KnownZero,
KnownOne, TLO, Depth+1))
return true;
- if (SimplifyDemandedBits(Op.getOperand(2), DemandedMask, KnownZero2,
+ if (SimplifyDemandedBits(Op.getOperand(2), NewMask, KnownZero2,
KnownOne2, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
// If the operands are constants, see if we can simplify them.
- if (TLO.ShrinkDemandedConstant(Op, DemandedMask))
+ if (TLO.ShrinkDemandedConstant(Op, NewMask))
return true;
// Only known if known in both the LHS and RHS.
unsigned ShAmt = SA->getValue();
SDOperand InOp = Op.getOperand(0);
+ // If the shift count is an invalid immediate, don't do anything.
+ if (ShAmt >= BitWidth)
+ break;
+
// If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
// single shift. We can do this if the bottom bits (which are shifted
// out) are never demanded.
if (InOp.getOpcode() == ISD::SRL &&
isa<ConstantSDNode>(InOp.getOperand(1))) {
- if (ShAmt && (DemandedMask & ((1ULL << ShAmt)-1)) == 0) {
+ if (ShAmt && (NewMask & APInt::getLowBitsSet(BitWidth, ShAmt)) == 0) {
unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue();
unsigned Opc = ISD::SHL;
int Diff = ShAmt-C1;
}
}
- if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask >> ShAmt,
+ if (SimplifyDemandedBits(Op.getOperand(0), NewMask.lshr(ShAmt),
KnownZero, KnownOne, TLO, Depth+1))
return true;
KnownZero <<= SA->getValue();
KnownOne <<= SA->getValue();
- KnownZero |= (1ULL << SA->getValue())-1; // low bits known zero.
+ // low bits known zero.
+ KnownZero |= APInt::getLowBitsSet(BitWidth, SA->getValue());
}
break;
case ISD::SRL:
if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
MVT::ValueType VT = Op.getValueType();
unsigned ShAmt = SA->getValue();
- uint64_t TypeMask = MVT::getIntVTBitMask(VT);
unsigned VTSize = MVT::getSizeInBits(VT);
SDOperand InOp = Op.getOperand(0);
+ // If the shift count is an invalid immediate, don't do anything.
+ if (ShAmt >= BitWidth)
+ break;
+
// If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
// single shift. We can do this if the top bits (which are shifted out)
// are never demanded.
if (InOp.getOpcode() == ISD::SHL &&
isa<ConstantSDNode>(InOp.getOperand(1))) {
- if (ShAmt && (DemandedMask & (~0ULL << (VTSize-ShAmt))) == 0) {
+ if (ShAmt && (NewMask & APInt::getHighBitsSet(VTSize, ShAmt)) == 0) {
unsigned C1 = cast<ConstantSDNode>(InOp.getOperand(1))->getValue();
unsigned Opc = ISD::SRL;
int Diff = ShAmt-C1;
}
// Compute the new bits that are at the top now.
- if (SimplifyDemandedBits(InOp, (DemandedMask << ShAmt) & TypeMask,
+ if (SimplifyDemandedBits(InOp, (NewMask << ShAmt),
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero &= TypeMask;
- KnownOne &= TypeMask;
- KnownZero >>= ShAmt;
- KnownOne >>= ShAmt;
+ KnownZero = KnownZero.lshr(ShAmt);
+ KnownOne = KnownOne.lshr(ShAmt);
- uint64_t HighBits = (1ULL << ShAmt)-1;
- HighBits <<= VTSize - ShAmt;
+ APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
KnownZero |= HighBits; // High bits known zero.
}
break;
MVT::ValueType VT = Op.getValueType();
unsigned ShAmt = SA->getValue();
- // Compute the new bits that are at the top now.
- uint64_t TypeMask = MVT::getIntVTBitMask(VT);
-
- uint64_t InDemandedMask = (DemandedMask << ShAmt) & TypeMask;
+ // If the shift count is an invalid immediate, don't do anything.
+ if (ShAmt >= BitWidth)
+ break;
+
+ APInt InDemandedMask = (NewMask << ShAmt);
// If any of the demanded bits are produced by the sign extension, we also
// demand the input sign bit.
- uint64_t HighBits = (1ULL << ShAmt)-1;
- HighBits <<= MVT::getSizeInBits(VT) - ShAmt;
- if (HighBits & DemandedMask)
- InDemandedMask |= MVT::getIntVTSignBit(VT);
+ APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
+ if (HighBits.intersects(NewMask))
+ InDemandedMask |= APInt::getSignBit(MVT::getSizeInBits(VT));
if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero &= TypeMask;
- KnownOne &= TypeMask;
- KnownZero >>= ShAmt;
- KnownOne >>= ShAmt;
+ KnownZero = KnownZero.lshr(ShAmt);
+ KnownOne = KnownOne.lshr(ShAmt);
- // Handle the sign bits.
- uint64_t SignBit = MVT::getIntVTSignBit(VT);
- SignBit >>= ShAmt; // Adjust to where it is now in the mask.
+ // Handle the sign bit, adjusted to where it is now in the mask.
+ APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
// If the input sign bit is known to be zero, or if none of the top bits
// are demanded, turn this into an unsigned shift right.
- if ((KnownZero & SignBit) || (HighBits & ~DemandedMask) == HighBits) {
+ if (KnownZero.intersects(SignBit) || (HighBits & ~NewMask) == HighBits) {
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, VT, Op.getOperand(0),
Op.getOperand(1)));
- } else if (KnownOne & SignBit) { // New bits are known one.
+ } else if (KnownOne.intersects(SignBit)) { // New bits are known one.
KnownOne |= HighBits;
}
}
// Sign extension. Compute the demanded bits in the result that are not
// present in the input.
- uint64_t NewBits = ~MVT::getIntVTBitMask(EVT) & DemandedMask;
+ APInt NewBits = APInt::getHighBitsSet(BitWidth,
+ BitWidth - MVT::getSizeInBits(EVT)) &
+ NewMask;
// If none of the extended bits are demanded, eliminate the sextinreg.
if (NewBits == 0)
return TLO.CombineTo(Op, Op.getOperand(0));
- uint64_t InSignBit = MVT::getIntVTSignBit(EVT);
- int64_t InputDemandedBits = DemandedMask & MVT::getIntVTBitMask(EVT);
+ APInt InSignBit = APInt::getSignBit(MVT::getSizeInBits(EVT));
+ InSignBit.zext(BitWidth);
+ APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth,
+ MVT::getSizeInBits(EVT)) &
+ NewMask;
// Since the sign extended bits are demanded, we know that the sign
// bit is demanded.
// top bits of the result.
// If the input sign bit is known zero, convert this into a zero extension.
- if (KnownZero & InSignBit)
+ if (KnownZero.intersects(InSignBit))
return TLO.CombineTo(Op,
TLO.DAG.getZeroExtendInReg(Op.getOperand(0), EVT));
- if (KnownOne & InSignBit) { // Input sign bit known set
+ if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
KnownOne |= NewBits;
KnownZero &= ~NewBits;
} else { // Input sign bit unknown
}
break;
}
- case ISD::CTTZ:
- case ISD::CTLZ:
- case ISD::CTPOP: {
- MVT::ValueType VT = Op.getValueType();
- unsigned LowBits = Log2_32(MVT::getSizeInBits(VT))+1;
- KnownZero = ~((1ULL << LowBits)-1) & MVT::getIntVTBitMask(VT);
- KnownOne = 0;
- break;
- }
- case ISD::LOAD: {
- if (ISD::isZEXTLoad(Op.Val)) {
- LoadSDNode *LD = cast<LoadSDNode>(Op);
- MVT::ValueType VT = LD->getLoadedVT();
- KnownZero |= ~MVT::getIntVTBitMask(VT) & DemandedMask;
- }
- break;
- }
case ISD::ZERO_EXTEND: {
- uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
+ unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits();
+ APInt InMask = NewMask;
+ InMask.trunc(OperandBitWidth);
// If none of the top bits are demanded, convert this into an any_extend.
- uint64_t NewBits = (~InMask) & DemandedMask;
- if (NewBits == 0)
+ APInt NewBits =
+ APInt::getHighBitsSet(BitWidth, BitWidth - OperandBitWidth) & NewMask;
+ if (!NewBits.intersects(NewMask))
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ANY_EXTEND,
Op.getValueType(),
Op.getOperand(0)));
- if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
+ if (SimplifyDemandedBits(Op.getOperand(0), InMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ KnownZero.zext(BitWidth);
+ KnownOne.zext(BitWidth);
KnownZero |= NewBits;
break;
}
case ISD::SIGN_EXTEND: {
MVT::ValueType InVT = Op.getOperand(0).getValueType();
- uint64_t InMask = MVT::getIntVTBitMask(InVT);
- uint64_t InSignBit = MVT::getIntVTSignBit(InVT);
- uint64_t NewBits = (~InMask) & DemandedMask;
+ unsigned InBits = MVT::getSizeInBits(InVT);
+ APInt InMask = APInt::getLowBitsSet(BitWidth, InBits);
+ APInt InSignBit = APInt::getLowBitsSet(BitWidth, InBits);
+ APInt NewBits = ~InMask & NewMask;
// If none of the top bits are demanded, convert this into an any_extend.
if (NewBits == 0)
// Since some of the sign extended bits are demanded, we know that the sign
// bit is demanded.
- uint64_t InDemandedBits = DemandedMask & InMask;
+ APInt InDemandedBits = InMask & NewMask;
InDemandedBits |= InSignBit;
+ InDemandedBits.trunc(InBits);
if (SimplifyDemandedBits(Op.getOperand(0), InDemandedBits, KnownZero,
KnownOne, TLO, Depth+1))
return true;
+ KnownZero.zext(BitWidth);
+ KnownOne.zext(BitWidth);
// If the sign bit is known zero, convert this to a zero extend.
- if (KnownZero & InSignBit)
+ if (KnownZero.intersects(InSignBit))
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::ZERO_EXTEND,
Op.getValueType(),
Op.getOperand(0)));
// If the sign bit is known one, the top bits match.
- if (KnownOne & InSignBit) {
+ if (KnownOne.intersects(InSignBit)) {
KnownOne |= NewBits;
KnownZero &= ~NewBits;
} else { // Otherwise, top bits aren't known.
break;
}
case ISD::ANY_EXTEND: {
- uint64_t InMask = MVT::getIntVTBitMask(Op.getOperand(0).getValueType());
- if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
+ unsigned OperandBitWidth = Op.getOperand(0).getValueSizeInBits();
+ APInt InMask = NewMask;
+ InMask.trunc(OperandBitWidth);
+ if (SimplifyDemandedBits(Op.getOperand(0), InMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
+ KnownZero.zext(BitWidth);
+ KnownOne.zext(BitWidth);
break;
}
case ISD::TRUNCATE: {
// Simplify the input, using demanded bit information, and compute the known
// zero/one bits live out.
- if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
+ APInt TruncMask = NewMask;
+ TruncMask.zext(Op.getOperand(0).getValueSizeInBits());
+ if (SimplifyDemandedBits(Op.getOperand(0), TruncMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
+ KnownZero.trunc(BitWidth);
+ KnownOne.trunc(BitWidth);
// If the input is only used by this truncate, see if we can shrink it based
// on the known demanded bits.
if (Op.getOperand(0).Val->hasOneUse()) {
SDOperand In = Op.getOperand(0);
+ unsigned InBitWidth = In.getValueSizeInBits();
switch (In.getOpcode()) {
default: break;
case ISD::SRL:
// Shrink SRL by a constant if none of the high bits shifted in are
// demanded.
if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(In.getOperand(1))){
- uint64_t HighBits = MVT::getIntVTBitMask(In.getValueType());
- HighBits &= ~MVT::getIntVTBitMask(Op.getValueType());
- HighBits >>= ShAmt->getValue();
+ APInt HighBits = APInt::getHighBitsSet(InBitWidth,
+ InBitWidth - BitWidth);
+ HighBits = HighBits.lshr(ShAmt->getValue());
+ HighBits.trunc(BitWidth);
- if (ShAmt->getValue() < MVT::getSizeInBits(Op.getValueType()) &&
- (DemandedMask & HighBits) == 0) {
+ if (ShAmt->getValue() < BitWidth && !(HighBits & NewMask)) {
// None of the shifted in bits are needed. Add a truncate of the
// shift input, then shift it.
SDOperand NewTrunc = TLO.DAG.getNode(ISD::TRUNCATE,
}
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- uint64_t OutMask = MVT::getIntVTBitMask(Op.getValueType());
- KnownZero &= OutMask;
- KnownOne &= OutMask;
break;
}
case ISD::AssertZext: {
MVT::ValueType VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- uint64_t InMask = MVT::getIntVTBitMask(VT);
- if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask & InMask,
+ APInt InMask = APInt::getLowBitsSet(BitWidth,
+ MVT::getSizeInBits(VT));
+ if (SimplifyDemandedBits(Op.getOperand(0), InMask & NewMask,
KnownZero, KnownOne, TLO, Depth+1))
return true;
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
- KnownZero |= ~InMask & DemandedMask;
+ KnownZero |= ~InMask & NewMask;
break;
}
+ case ISD::BIT_CONVERT:
+#if 0
+ // If this is an FP->Int bitcast and if the sign bit is the only thing that
+ // is demanded, turn this into a FGETSIGN.
+ if (NewMask == MVT::getIntVTSignBit(Op.getValueType()) &&
+ MVT::isFloatingPoint(Op.getOperand(0).getValueType()) &&
+ !MVT::isVector(Op.getOperand(0).getValueType())) {
+ // Only do this xform if FGETSIGN is valid or if before legalize.
+ if (!TLO.AfterLegalize ||
+ isOperationLegal(ISD::FGETSIGN, Op.getValueType())) {
+ // Make a FGETSIGN + SHL to move the sign bit into the appropriate
+ // place. We expect the SHL to be eliminated by other optimizations.
+ SDOperand Sign = TLO.DAG.getNode(ISD::FGETSIGN, Op.getValueType(),
+ Op.getOperand(0));
+ unsigned ShVal = MVT::getSizeInBits(Op.getValueType())-1;
+ SDOperand ShAmt = TLO.DAG.getConstant(ShVal, getShiftAmountTy());
+ return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SHL, Op.getValueType(),
+ Sign, ShAmt));
+ }
+ }
+#endif
+ break;
case ISD::ADD:
case ISD::SUB:
case ISD::INTRINSIC_WO_CHAIN:
case ISD::INTRINSIC_W_CHAIN:
case ISD::INTRINSIC_VOID:
+ case ISD::CTTZ:
+ case ISD::CTLZ:
+ case ISD::CTPOP:
+ case ISD::LOAD:
+ case ISD::SETCC:
+ case ISD::FGETSIGN:
// Just use ComputeMaskedBits to compute output bits.
- TLO.DAG.ComputeMaskedBits(Op, DemandedMask, KnownZero, KnownOne, Depth);
+ TLO.DAG.ComputeMaskedBits(Op, NewMask, KnownZero, KnownOne, Depth);
break;
}
// If we know the value of all of the demanded bits, return this as a
// constant.
- if ((DemandedMask & (KnownZero|KnownOne)) == DemandedMask)
+ if ((NewMask & (KnownZero|KnownOne)) == NewMask)
return TLO.CombineTo(Op, TLO.DAG.getConstant(KnownOne, Op.getValueType()));
return false;
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
void TargetLowering::computeMaskedBitsForTargetNode(const SDOperand Op,
- uint64_t Mask,
- uint64_t &KnownZero,
- uint64_t &KnownOne,
+ const APInt &Mask,
+ APInt &KnownZero,
+ APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth) const {
assert((Op.getOpcode() >= ISD::BUILTIN_OP_END ||
Op.getOpcode() == ISD::INTRINSIC_VOID) &&
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
- KnownZero = 0;
- KnownOne = 0;
+ KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
}
/// ComputeNumSignBitsForTargetNode - This method can be implemented by
cast<ConstantSDNode>(N0.getOperand(1))->getValue() == 1) {
// If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We
// can only do this if the top bits are known zero.
+ unsigned BitWidth = N0.getValueSizeInBits();
if (DAG.MaskedValueIsZero(N0,
- MVT::getIntVTBitMask(N0.getValueType())-1)){
+ APInt::getHighBitsSet(BitWidth,
+ BitWidth-1))) {
// Okay, get the un-inverted input value.
SDOperand Val;
if (N0.getOpcode() == ISD::XOR)
// Constant fold or commute setcc.
SDOperand O = DAG.FoldSetCC(VT, N0, N1, Cond);
if (O.Val) return O;
+ } else if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1.Val)) {
+ // If the RHS of an FP comparison is a constant, simplify it away in
+ // some cases.
+ if (CFP->getValueAPF().isNaN()) {
+ // If an operand is known to be a nan, we can fold it.
+ switch (ISD::getUnorderedFlavor(Cond)) {
+ default: assert(0 && "Unknown flavor!");
+ case 0: // Known false.
+ return DAG.getConstant(0, VT);
+ case 1: // Known true.
+ return DAG.getConstant(1, VT);
+ case 2: // Undefined.
+ return DAG.getNode(ISD::UNDEF, VT);
+ }
+ }
+
+ // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the
+ // constant if knowing that the operand is non-nan is enough. We prefer to
+ // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to
+ // materialize 0.0.
+ if (Cond == ISD::SETO || Cond == ISD::SETUO)
+ return DAG.getSetCC(VT, N0, N0, Cond);
}
if (N0 == N1) {
if (N0.getOpcode() == ISD::XOR)
// If we know that all of the inverted bits are zero, don't bother
// performing the inversion.
- if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getValue()))
- return DAG.getSetCC(VT, N0.getOperand(0),
- DAG.getConstant(LHSR->getValue()^RHSC->getValue(),
- N0.getValueType()), Cond);
+ if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue()))
+ return
+ DAG.getSetCC(VT, N0.getOperand(0),
+ DAG.getConstant(LHSR->getAPIntValue() ^
+ RHSC->getAPIntValue(),
+ N0.getValueType()),
+ Cond);
}
// Turn (C1-X) == C2 --> X == C1-C2
if (ConstantSDNode *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
if (N0.getOpcode() == ISD::SUB && N0.Val->hasOneUse()) {
- return DAG.getSetCC(VT, N0.getOperand(1),
- DAG.getConstant(SUBC->getValue()-RHSC->getValue(),
- N0.getValueType()), Cond);
+ return
+ DAG.getSetCC(VT, N0.getOperand(1),
+ DAG.getConstant(SUBC->getAPIntValue() -
+ RHSC->getAPIntValue(),
+ N0.getValueType()),
+ Cond);
}
}
}
return C_Unknown;
}
+/// LowerXConstraint - try to replace an X constraint, which matches anything,
+/// with another that has more specific requirements based on the type of the
+/// corresponding operand.
+void TargetLowering::lowerXConstraint(MVT::ValueType ConstraintVT,
+ std::string& s) const {
+ if (MVT::isInteger(ConstraintVT))
+ s = "r";
+ else if (MVT::isFloatingPoint(ConstraintVT))
+ s = "f"; // works for many targets
+ else
+ s = "";
+}
+
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops.
void TargetLowering::LowerAsmOperandForConstraint(SDOperand Op,
SelectionDAG &DAG) {
switch (ConstraintLetter) {
default: break;
+ case 'X': // Allows any operand; labels (basic block) use this.
+ if (Op.getOpcode() == ISD::BasicBlock) {
+ Ops.push_back(Op);
+ return;
+ }
+ // fall through
case 'i': // Simple Integer or Relocatable Constant
case 'n': // Simple Integer
- case 's': // Relocatable Constant
- case 'X': { // Allows any operand.
+ case 's': { // Relocatable Constant
// These operands are interested in values of the form (GV+C), where C may
// be folded in as an offset of GV, or it may be explicitly added. Also, it
// is possible and fine if either GV or C are missing.
std::string RegName(Constraint.begin()+1, Constraint.end()-1);
// Figure out which register class contains this reg.
- const MRegisterInfo *RI = TM.getRegisterInfo();
- for (MRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
+ const TargetRegisterInfo *RI = TM.getRegisterInfo();
+ for (TargetRegisterInfo::regclass_iterator RCI = RI->regclass_begin(),
E = RI->regclass_end(); RCI != E; ++RCI) {
const TargetRegisterClass *RC = *RCI;
for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
I != E; ++I) {
- if (StringsEqualNoCase(RegName, RI->get(*I).Name))
+ if (StringsEqualNoCase(RegName, RI->get(*I).AsmName))
return std::make_pair(*I, RC);
}
}
// Check to see if we can do this.
if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))
return SDOperand(); // BuildSDIV only operates on i32 or i64
- if (!isOperationLegal(ISD::MULHS, VT))
- return SDOperand(); // Make sure the target supports MULHS.
int64_t d = cast<ConstantSDNode>(N->getOperand(1))->getSignExtended();
ms magics = (VT == MVT::i32) ? magic32(d) : magic64(d);
// Multiply the numerator (operand 0) by the magic value
- SDOperand Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0),
- DAG.getConstant(magics.m, VT));
+ SDOperand Q;
+ if (isOperationLegal(ISD::MULHS, VT))
+ Q = DAG.getNode(ISD::MULHS, VT, N->getOperand(0),
+ DAG.getConstant(magics.m, VT));
+ else if (isOperationLegal(ISD::SMUL_LOHI, VT))
+ Q = SDOperand(DAG.getNode(ISD::SMUL_LOHI, DAG.getVTList(VT, VT),
+ N->getOperand(0),
+ DAG.getConstant(magics.m, VT)).Val, 1);
+ else
+ return SDOperand(); // No mulhs or equvialent
// If d > 0 and m < 0, add the numerator
if (d > 0 && magics.m < 0) {
Q = DAG.getNode(ISD::ADD, VT, Q, N->getOperand(0));
// Check to see if we can do this.
if (!isTypeLegal(VT) || (VT != MVT::i32 && VT != MVT::i64))
return SDOperand(); // BuildUDIV only operates on i32 or i64
- if (!isOperationLegal(ISD::MULHU, VT))
- return SDOperand(); // Make sure the target supports MULHU.
uint64_t d = cast<ConstantSDNode>(N->getOperand(1))->getValue();
mu magics = (VT == MVT::i32) ? magicu32(d) : magicu64(d);
// Multiply the numerator (operand 0) by the magic value
- SDOperand Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0),
- DAG.getConstant(magics.m, VT));
+ SDOperand Q;
+ if (isOperationLegal(ISD::MULHU, VT))
+ Q = DAG.getNode(ISD::MULHU, VT, N->getOperand(0),
+ DAG.getConstant(magics.m, VT));
+ else if (isOperationLegal(ISD::UMUL_LOHI, VT))
+ Q = SDOperand(DAG.getNode(ISD::UMUL_LOHI, DAG.getVTList(VT, VT),
+ N->getOperand(0),
+ DAG.getConstant(magics.m, VT)).Val, 1);
+ else
+ return SDOperand(); // No mulhu or equvialent
if (Created)
Created->push_back(Q.Val);