setTargetDAGCombine(ISD::XOR);
}
- if (!Subtarget.abiUsesSoftFloat()) {
+ if (!Subtarget.useSoftFloat()) {
addRegisterClass(MVT::f32, &Mips::FGR32RegClass);
// When dealing with single precision only, use libcalls
setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
}
- computeRegisterProperties();
+ computeRegisterProperties(Subtarget.getRegisterInfo());
}
const MipsTargetLowering *
// Return 0.
if (C == 0)
- return DAG.getConstant(0, VT);
+ return DAG.getConstant(0, DL, VT);
// Return x.
if (C == 1)
// If c is power of 2, return (shl x, log2(c)).
if (isPowerOf2_64(C))
return DAG.getNode(ISD::SHL, DL, VT, X,
- DAG.getConstant(Log2_64(C), ShiftTy));
+ DAG.getConstant(Log2_64(C), DL, ShiftTy));
unsigned Log2Ceil = Log2_64_Ceil(C);
uint64_t Floor = 1LL << Log2_64(C);
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)))
if (!VT.isVector())
- return genConstMult(N->getOperand(0), C->getZExtValue(), SDLoc(N),
- VT, TL->getScalarShiftAmountTy(VT), DAG);
+ return genConstMult(N->getOperand(0), C->getZExtValue(), SDLoc(N), VT,
+ TL->getScalarShiftAmountTy(DAG.getDataLayout(), VT),
+ DAG);
return SDValue(N, 0);
}
(SplatValue.getZExtValue() >= EltSize))
return SDValue();
- return DAG.getNode(Opc, SDLoc(N), Ty, N->getOperand(0),
- DAG.getConstant(SplatValue.getZExtValue(), MVT::i32));
+ SDLoc DL(N);
+ return DAG.getNode(Opc, DL, Ty, N->getOperand(0),
+ DAG.getConstant(SplatValue.getZExtValue(), DL, MVT::i32));
}
static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
case Mips::INSERT_FD_PSEUDO:
return emitINSERT_FD(MI, BB);
case Mips::INSERT_B_VIDX_PSEUDO:
+ case Mips::INSERT_B_VIDX64_PSEUDO:
return emitINSERT_DF_VIDX(MI, BB, 1, false);
case Mips::INSERT_H_VIDX_PSEUDO:
+ case Mips::INSERT_H_VIDX64_PSEUDO:
return emitINSERT_DF_VIDX(MI, BB, 2, false);
case Mips::INSERT_W_VIDX_PSEUDO:
+ case Mips::INSERT_W_VIDX64_PSEUDO:
return emitINSERT_DF_VIDX(MI, BB, 4, false);
case Mips::INSERT_D_VIDX_PSEUDO:
+ case Mips::INSERT_D_VIDX64_PSEUDO:
return emitINSERT_DF_VIDX(MI, BB, 8, false);
case Mips::INSERT_FW_VIDX_PSEUDO:
+ case Mips::INSERT_FW_VIDX64_PSEUDO:
return emitINSERT_DF_VIDX(MI, BB, 4, true);
case Mips::INSERT_FD_VIDX_PSEUDO:
+ case Mips::INSERT_FD_VIDX64_PSEUDO:
return emitINSERT_DF_VIDX(MI, BB, 8, true);
case Mips::FILL_FW_PSEUDO:
return emitFILL_FW(MI, BB);
Nd.getAlignment());
// i32 load from higher address.
- Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
+ Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, DL, PtrVT));
SDValue Hi = DAG.getLoad(MVT::i32, DL, Lo.getValue(1), Ptr,
MachinePointerInfo(), Nd.isVolatile(),
Nd.isNonTemporal(), Nd.isInvariant(),
SDValue Val = Nd.getValue(), Ptr = Nd.getBasePtr(), Chain = Nd.getChain();
EVT PtrVT = Ptr.getValueType();
SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
- Val, DAG.getConstant(0, MVT::i32));
+ Val, DAG.getConstant(0, DL, MVT::i32));
SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
- Val, DAG.getConstant(1, MVT::i32));
+ Val, DAG.getConstant(1, DL, MVT::i32));
if (!Subtarget.isLittle())
std::swap(Lo, Hi);
Nd.getAAInfo());
// i32 store to higher address.
- Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, PtrVT));
+ Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr, DAG.getConstant(4, DL, PtrVT));
return DAG.getStore(Chain, DL, Hi, Ptr, MachinePointerInfo(),
Nd.isVolatile(), Nd.isNonTemporal(),
std::min(Nd.getAlignment(), 4U), Nd.getAAInfo());
static SDValue initAccumulator(SDValue In, SDLoc DL, SelectionDAG &DAG) {
SDValue InLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
SDValue InHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, In,
- DAG.getConstant(1, MVT::i32));
+ DAG.getConstant(1, DL, MVT::i32));
return DAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped, InLo, InHi);
}
SDValue LaneB = Op->getOperand(2);
if (ResVecTy == MVT::v2i64) {
- LaneA = DAG.getConstant(0, MVT::i32);
+ LaneA = DAG.getConstant(0, DL, MVT::i32);
ViaVecTy = MVT::v4i32;
} else
LaneA = LaneB;
}
static SDValue lowerMSASplatImm(SDValue Op, unsigned ImmOp, SelectionDAG &DAG) {
- return DAG.getConstant(Op->getConstantOperandVal(ImmOp), Op->getValueType(0));
+ return DAG.getConstant(Op->getConstantOperandVal(ImmOp), SDLoc(Op),
+ Op->getValueType(0));
}
static SDValue getBuildVectorSplat(EVT VecTy, SDValue SplatValue,
SplatValueA = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValue);
SplatValueB = DAG.getNode(ISD::SRL, DL, MVT::i64, SplatValue,
- DAG.getConstant(32, MVT::i32));
+ DAG.getConstant(32, DL, MVT::i32));
SplatValueB = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, SplatValueB);
}
if (ConstantSDNode *CImm = dyn_cast<ConstantSDNode>(Imm)) {
APInt BitImm = APInt(64, 1) << CImm->getAPIntValue();
- SDValue BitImmHiOp = DAG.getConstant(BitImm.lshr(32).trunc(32), MVT::i32);
- SDValue BitImmLoOp = DAG.getConstant(BitImm.trunc(32), MVT::i32);
+ SDValue BitImmHiOp = DAG.getConstant(BitImm.lshr(32).trunc(32), DL,
+ MVT::i32);
+ SDValue BitImmLoOp = DAG.getConstant(BitImm.trunc(32), DL, MVT::i32);
if (BigEndian)
std::swap(BitImmLoOp, BitImmHiOp);
Exp2Imm = getBuildVectorSplat(VecTy, Imm, BigEndian, DAG);
- Exp2Imm =
- DAG.getNode(ISD::SHL, DL, VecTy, DAG.getConstant(1, VecTy), Exp2Imm);
+ Exp2Imm = DAG.getNode(ISD::SHL, DL, VecTy, DAG.getConstant(1, DL, VecTy),
+ Exp2Imm);
}
return DAG.getNode(Opc, DL, VecTy, Op->getOperand(1), Exp2Imm);
static SDValue lowerMSABitClear(SDValue Op, SelectionDAG &DAG) {
EVT ResTy = Op->getValueType(0);
SDLoc DL(Op);
- SDValue One = DAG.getConstant(1, ResTy);
+ SDValue One = DAG.getConstant(1, DL, ResTy);
SDValue Bit = DAG.getNode(ISD::SHL, DL, ResTy, One, Op->getOperand(2));
return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1),
EVT ResTy = Op->getValueType(0);
APInt BitImm = APInt(ResTy.getVectorElementType().getSizeInBits(), 1)
<< cast<ConstantSDNode>(Op->getOperand(2))->getAPIntValue();
- SDValue BitMask = DAG.getConstant(~BitImm, ResTy);
+ SDValue BitMask = DAG.getConstant(~BitImm, DL, ResTy);
return DAG.getNode(ISD::AND, DL, ResTy, Op->getOperand(1), BitMask);
}
APInt Mask = APInt::getHighBitsSet(EltTy.getSizeInBits(),
Op->getConstantOperandVal(3));
return DAG.getNode(ISD::VSELECT, DL, VecTy,
- DAG.getConstant(Mask, VecTy, true), Op->getOperand(2),
- Op->getOperand(1));
+ DAG.getConstant(Mask, DL, VecTy, true),
+ Op->getOperand(2), Op->getOperand(1));
}
case Intrinsic::mips_binsri_b:
case Intrinsic::mips_binsri_h:
APInt Mask = APInt::getLowBitsSet(EltTy.getSizeInBits(),
Op->getConstantOperandVal(3));
return DAG.getNode(ISD::VSELECT, DL, VecTy,
- DAG.getConstant(Mask, VecTy, true), Op->getOperand(2),
- Op->getOperand(1));
+ DAG.getConstant(Mask, DL, VecTy, true),
+ Op->getOperand(2), Op->getOperand(1));
}
case Intrinsic::mips_bmnz_v:
return DAG.getNode(ISD::VSELECT, DL, Op->getValueType(0), Op->getOperand(3),
case Intrinsic::mips_bneg_w:
case Intrinsic::mips_bneg_d: {
EVT VecTy = Op->getValueType(0);
- SDValue One = DAG.getConstant(1, VecTy);
+ SDValue One = DAG.getConstant(1, DL, VecTy);
return DAG.getNode(ISD::XOR, DL, VecTy, Op->getOperand(1),
DAG.getNode(ISD::SHL, DL, VecTy, One,
case Intrinsic::mips_bset_w:
case Intrinsic::mips_bset_d: {
EVT VecTy = Op->getValueType(0);
- SDValue One = DAG.getConstant(1, VecTy);
+ SDValue One = DAG.getConstant(1, DL, VecTy);
return DAG.getNode(ISD::OR, DL, VecTy, Op->getOperand(1),
DAG.getNode(ISD::SHL, DL, VecTy, One,
return DAG.getNode(ISD::UDIV, DL, Op->getValueType(0), Op->getOperand(1),
Op->getOperand(2));
case Intrinsic::mips_fadd_w:
- case Intrinsic::mips_fadd_d:
+ case Intrinsic::mips_fadd_d: {
+ // TODO: If intrinsics have fast-math-flags, propagate them.
return DAG.getNode(ISD::FADD, DL, Op->getValueType(0), Op->getOperand(1),
Op->getOperand(2));
+ }
// Don't lower mips_fcaf_[wd] since LLVM folds SETFALSE condcodes away
case Intrinsic::mips_fceq_w:
case Intrinsic::mips_fceq_d:
return DAG.getSetCC(DL, Op->getValueType(0), Op->getOperand(1),
Op->getOperand(2), ISD::SETUNE);
case Intrinsic::mips_fdiv_w:
- case Intrinsic::mips_fdiv_d:
+ case Intrinsic::mips_fdiv_d: {
+ // TODO: If intrinsics have fast-math-flags, propagate them.
return DAG.getNode(ISD::FDIV, DL, Op->getValueType(0), Op->getOperand(1),
Op->getOperand(2));
+ }
case Intrinsic::mips_ffint_u_w:
case Intrinsic::mips_ffint_u_d:
return DAG.getNode(ISD::UINT_TO_FP, DL, Op->getValueType(0),
case Intrinsic::mips_fill_h:
case Intrinsic::mips_fill_w:
case Intrinsic::mips_fill_d: {
- SmallVector<SDValue, 16> Ops;
EVT ResTy = Op->getValueType(0);
-
- for (unsigned i = 0; i < ResTy.getVectorNumElements(); ++i)
- Ops.push_back(Op->getOperand(1));
+ SmallVector<SDValue, 16> Ops(ResTy.getVectorNumElements(),
+ Op->getOperand(1));
// If ResTy is v2i64 then the type legalizer will break this node down into
// an equivalent v4i32.
}
case Intrinsic::mips_fexp2_w:
case Intrinsic::mips_fexp2_d: {
+ // TODO: If intrinsics have fast-math-flags, propagate them.
EVT ResTy = Op->getValueType(0);
return DAG.getNode(
ISD::FMUL, SDLoc(Op), ResTy, Op->getOperand(1),
return DAG.getNode(ISD::FMA, SDLoc(Op), Op->getValueType(0),
Op->getOperand(1), Op->getOperand(2), Op->getOperand(3));
case Intrinsic::mips_fmul_w:
- case Intrinsic::mips_fmul_d:
+ case Intrinsic::mips_fmul_d: {
+ // TODO: If intrinsics have fast-math-flags, propagate them.
return DAG.getNode(ISD::FMUL, DL, Op->getValueType(0), Op->getOperand(1),
Op->getOperand(2));
+ }
case Intrinsic::mips_fmsub_w:
case Intrinsic::mips_fmsub_d: {
+ // TODO: If intrinsics have fast-math-flags, propagate them.
EVT ResTy = Op->getValueType(0);
return DAG.getNode(ISD::FSUB, SDLoc(Op), ResTy, Op->getOperand(1),
DAG.getNode(ISD::FMUL, SDLoc(Op), ResTy,
case Intrinsic::mips_fsqrt_d:
return DAG.getNode(ISD::FSQRT, DL, Op->getValueType(0), Op->getOperand(1));
case Intrinsic::mips_fsub_w:
- case Intrinsic::mips_fsub_d:
+ case Intrinsic::mips_fsub_d: {
+ // TODO: If intrinsics have fast-math-flags, propagate them.
return DAG.getNode(ISD::FSUB, DL, Op->getValueType(0), Op->getOperand(1),
Op->getOperand(2));
+ }
case Intrinsic::mips_ftrunc_u_w:
case Intrinsic::mips_ftrunc_u_d:
return DAG.getNode(ISD::FP_TO_UINT, DL, Op->getValueType(0),
case Intrinsic::mips_insve_d:
return DAG.getNode(MipsISD::INSVE, DL, Op->getValueType(0),
Op->getOperand(1), Op->getOperand(2), Op->getOperand(3),
- DAG.getConstant(0, MVT::i32));
+ DAG.getConstant(0, DL, MVT::i32));
case Intrinsic::mips_ldi_b:
case Intrinsic::mips_ldi_h:
case Intrinsic::mips_ldi_w:
}
// SelectionDAG::getConstant will promote SplatValue appropriately.
- SDValue Result = DAG.getConstant(SplatValue, ViaVecTy);
+ SDValue Result = DAG.getConstant(SplatValue, DL, ViaVecTy);
// Bitcast to the type we originally wanted
if (ViaVecTy != ResTy)
for (unsigned i = 0; i < NumElts; ++i) {
Vector = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, ResTy, Vector,
Node->getOperand(i),
- DAG.getConstant(i, MVT::i32));
+ DAG.getConstant(i, DL, MVT::i32));
}
return Vector;
}
// It is therefore possible to lower into SHF when the mask takes the form:
// <a, b, c, d, a+4, b+4, c+4, d+4, a+8, b+8, c+8, d+8, ...>
// When undef's appear they are treated as if they were whatever value is
-// necessary in order to fit the above form.
+// necessary in order to fit the above forms.
//
// For example:
// %2 = shufflevector <8 x i16> %0, <8 x i16> undef,
Imm |= Idx & 0x3;
}
- return DAG.getNode(MipsISD::SHF, SDLoc(Op), ResTy,
- DAG.getConstant(Imm, MVT::i32), Op->getOperand(0));
+ SDLoc DL(Op);
+ return DAG.getNode(MipsISD::SHF, DL, ResTy,
+ DAG.getConstant(Imm, DL, MVT::i32), Op->getOperand(0));
+}
+
+/// Determine whether a range fits a regular pattern of values.
+/// This function accounts for the possibility of jumping over the End iterator.
+template <typename ValType>
+static bool
+fitsRegularPattern(typename SmallVectorImpl<ValType>::const_iterator Begin,
+ unsigned CheckStride,
+ typename SmallVectorImpl<ValType>::const_iterator End,
+ ValType ExpectedIndex, unsigned ExpectedIndexStride) {
+ auto &I = Begin;
+
+ while (I != End) {
+ if (*I != -1 && *I != ExpectedIndex)
+ return false;
+ ExpectedIndex += ExpectedIndexStride;
+
+ // Incrementing past End is undefined behaviour so we must increment one
+ // step at a time and check for End at each step.
+ for (unsigned n = 0; n < CheckStride && I != End; ++n, ++I)
+ ; // Empty loop body.
+ }
+ return true;
+}
+
+// Determine whether VECTOR_SHUFFLE is a SPLATI.
+//
+// It is a SPLATI when the mask is:
+// <x, x, x, ...>
+// where x is any valid index.
+//
+// When undef's appear in the mask they are treated as if they were whatever
+// value is necessary in order to fit the above form.
+static bool isVECTOR_SHUFFLE_SPLATI(SDValue Op, EVT ResTy,
+ SmallVector<int, 16> Indices,
+ SelectionDAG &DAG) {
+ assert((Indices.size() % 2) == 0);
+
+ int SplatIndex = -1;
+ for (const auto &V : Indices) {
+ if (V != -1) {
+ SplatIndex = V;
+ break;
+ }
+ }
+
+ return fitsRegularPattern<int>(Indices.begin(), 1, Indices.end(), SplatIndex,
+ 0);
}
// Lower VECTOR_SHUFFLE into ILVEV (if possible).
//
// ILVEV interleaves the even elements from each vector.
//
-// It is possible to lower into ILVEV when the mask takes the form:
-// <0, n, 2, n+2, 4, n+4, ...>
+// It is possible to lower into ILVEV when the mask consists of two of the
+// following forms interleaved:
+// <0, 2, 4, ...>
+// <n, n+2, n+4, ...>
// where n is the number of elements in the vector.
+// For example:
+// <0, 0, 2, 2, 4, 4, ...>
+// <0, n, 2, n+2, 4, n+4, ...>
//
// When undef's appear in the mask they are treated as if they were whatever
-// value is necessary in order to fit the above form.
+// value is necessary in order to fit the above forms.
static SDValue lowerVECTOR_SHUFFLE_ILVEV(SDValue Op, EVT ResTy,
SmallVector<int, 16> Indices,
SelectionDAG &DAG) {
- assert ((Indices.size() % 2) == 0);
- int WsIdx = 0;
- int WtIdx = ResTy.getVectorNumElements();
+ assert((Indices.size() % 2) == 0);
+
+ SDValue Wt;
+ SDValue Ws;
+ const auto &Begin = Indices.begin();
+ const auto &End = Indices.end();
+
+ // Check even elements are taken from the even elements of one half or the
+ // other and pick an operand accordingly.
+ if (fitsRegularPattern<int>(Begin, 2, End, 0, 2))
+ Wt = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Begin, 2, End, Indices.size(), 2))
+ Wt = Op->getOperand(1);
+ else
+ return SDValue();
- for (unsigned i = 0; i < Indices.size(); i += 2) {
- if (Indices[i] != -1 && Indices[i] != WsIdx)
- return SDValue();
- if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
- return SDValue();
- WsIdx += 2;
- WtIdx += 2;
- }
+ // Check odd elements are taken from the even elements of one half or the
+ // other and pick an operand accordingly.
+ if (fitsRegularPattern<int>(Begin + 1, 2, End, 0, 2))
+ Ws = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Begin + 1, 2, End, Indices.size(), 2))
+ Ws = Op->getOperand(1);
+ else
+ return SDValue();
- return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), ResTy, Op->getOperand(0),
- Op->getOperand(1));
+ return DAG.getNode(MipsISD::ILVEV, SDLoc(Op), ResTy, Ws, Wt);
}
// Lower VECTOR_SHUFFLE into ILVOD (if possible).
//
// ILVOD interleaves the odd elements from each vector.
//
-// It is possible to lower into ILVOD when the mask takes the form:
-// <1, n+1, 3, n+3, 5, n+5, ...>
+// It is possible to lower into ILVOD when the mask consists of two of the
+// following forms interleaved:
+// <1, 3, 5, ...>
+// <n+1, n+3, n+5, ...>
// where n is the number of elements in the vector.
+// For example:
+// <1, 1, 3, 3, 5, 5, ...>
+// <1, n+1, 3, n+3, 5, n+5, ...>
//
// When undef's appear in the mask they are treated as if they were whatever
-// value is necessary in order to fit the above form.
+// value is necessary in order to fit the above forms.
static SDValue lowerVECTOR_SHUFFLE_ILVOD(SDValue Op, EVT ResTy,
SmallVector<int, 16> Indices,
SelectionDAG &DAG) {
- assert ((Indices.size() % 2) == 0);
- int WsIdx = 1;
- int WtIdx = ResTy.getVectorNumElements() + 1;
+ assert((Indices.size() % 2) == 0);
+
+ SDValue Wt;
+ SDValue Ws;
+ const auto &Begin = Indices.begin();
+ const auto &End = Indices.end();
+
+ // Check even elements are taken from the odd elements of one half or the
+ // other and pick an operand accordingly.
+ if (fitsRegularPattern<int>(Begin, 2, End, 1, 2))
+ Wt = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Begin, 2, End, Indices.size() + 1, 2))
+ Wt = Op->getOperand(1);
+ else
+ return SDValue();
- for (unsigned i = 0; i < Indices.size(); i += 2) {
- if (Indices[i] != -1 && Indices[i] != WsIdx)
- return SDValue();
- if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
- return SDValue();
- WsIdx += 2;
- WtIdx += 2;
- }
+ // Check odd elements are taken from the odd elements of one half or the
+ // other and pick an operand accordingly.
+ if (fitsRegularPattern<int>(Begin + 1, 2, End, 1, 2))
+ Ws = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Begin + 1, 2, End, Indices.size() + 1, 2))
+ Ws = Op->getOperand(1);
+ else
+ return SDValue();
- return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), ResTy, Op->getOperand(0),
- Op->getOperand(1));
+ return DAG.getNode(MipsISD::ILVOD, SDLoc(Op), ResTy, Wt, Ws);
}
-// Lower VECTOR_SHUFFLE into ILVL (if possible).
+// Lower VECTOR_SHUFFLE into ILVR (if possible).
//
-// ILVL interleaves consecutive elements from the left half of each vector.
+// ILVR interleaves consecutive elements from the right (lowest-indexed) half of
+// each vector.
//
-// It is possible to lower into ILVL when the mask takes the form:
-// <0, n, 1, n+1, 2, n+2, ...>
+// It is possible to lower into ILVR when the mask consists of two of the
+// following forms interleaved:
+// <0, 1, 2, ...>
+// <n, n+1, n+2, ...>
// where n is the number of elements in the vector.
+// For example:
+// <0, 0, 1, 1, 2, 2, ...>
+// <0, n, 1, n+1, 2, n+2, ...>
//
// When undef's appear in the mask they are treated as if they were whatever
-// value is necessary in order to fit the above form.
-static SDValue lowerVECTOR_SHUFFLE_ILVL(SDValue Op, EVT ResTy,
+// value is necessary in order to fit the above forms.
+static SDValue lowerVECTOR_SHUFFLE_ILVR(SDValue Op, EVT ResTy,
SmallVector<int, 16> Indices,
SelectionDAG &DAG) {
- assert ((Indices.size() % 2) == 0);
- int WsIdx = 0;
- int WtIdx = ResTy.getVectorNumElements();
+ assert((Indices.size() % 2) == 0);
+
+ SDValue Wt;
+ SDValue Ws;
+ const auto &Begin = Indices.begin();
+ const auto &End = Indices.end();
+
+ // Check even elements are taken from the right (lowest-indexed) elements of
+ // one half or the other and pick an operand accordingly.
+ if (fitsRegularPattern<int>(Begin, 2, End, 0, 1))
+ Wt = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Begin, 2, End, Indices.size(), 1))
+ Wt = Op->getOperand(1);
+ else
+ return SDValue();
- for (unsigned i = 0; i < Indices.size(); i += 2) {
- if (Indices[i] != -1 && Indices[i] != WsIdx)
- return SDValue();
- if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
- return SDValue();
- WsIdx ++;
- WtIdx ++;
- }
+ // Check odd elements are taken from the right (lowest-indexed) elements of
+ // one half or the other and pick an operand accordingly.
+ if (fitsRegularPattern<int>(Begin + 1, 2, End, 0, 1))
+ Ws = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Begin + 1, 2, End, Indices.size(), 1))
+ Ws = Op->getOperand(1);
+ else
+ return SDValue();
- return DAG.getNode(MipsISD::ILVL, SDLoc(Op), ResTy, Op->getOperand(0),
- Op->getOperand(1));
+ return DAG.getNode(MipsISD::ILVR, SDLoc(Op), ResTy, Ws, Wt);
}
-// Lower VECTOR_SHUFFLE into ILVR (if possible).
+// Lower VECTOR_SHUFFLE into ILVL (if possible).
//
-// ILVR interleaves consecutive elements from the right half of each vector.
+// ILVL interleaves consecutive elements from the left (highest-indexed) half
+// of each vector.
//
-// It is possible to lower into ILVR when the mask takes the form:
-// <x, n+x, x+1, n+x+1, x+2, n+x+2, ...>
+// It is possible to lower into ILVL when the mask consists of two of the
+// following forms interleaved:
+// <x, x+1, x+2, ...>
+// <n+x, n+x+1, n+x+2, ...>
// where n is the number of elements in the vector and x is half n.
+// For example:
+// <x, x, x+1, x+1, x+2, x+2, ...>
+// <x, n+x, x+1, n+x+1, x+2, n+x+2, ...>
//
// When undef's appear in the mask they are treated as if they were whatever
-// value is necessary in order to fit the above form.
-static SDValue lowerVECTOR_SHUFFLE_ILVR(SDValue Op, EVT ResTy,
+// value is necessary in order to fit the above forms.
+static SDValue lowerVECTOR_SHUFFLE_ILVL(SDValue Op, EVT ResTy,
SmallVector<int, 16> Indices,
SelectionDAG &DAG) {
- assert ((Indices.size() % 2) == 0);
- unsigned NumElts = ResTy.getVectorNumElements();
- int WsIdx = NumElts / 2;
- int WtIdx = NumElts + NumElts / 2;
+ assert((Indices.size() % 2) == 0);
+
+ unsigned HalfSize = Indices.size() / 2;
+ SDValue Wt;
+ SDValue Ws;
+ const auto &Begin = Indices.begin();
+ const auto &End = Indices.end();
+
+ // Check even elements are taken from the left (highest-indexed) elements of
+ // one half or the other and pick an operand accordingly.
+ if (fitsRegularPattern<int>(Begin, 2, End, HalfSize, 1))
+ Wt = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Begin, 2, End, Indices.size() + HalfSize, 1))
+ Wt = Op->getOperand(1);
+ else
+ return SDValue();
- for (unsigned i = 0; i < Indices.size(); i += 2) {
- if (Indices[i] != -1 && Indices[i] != WsIdx)
- return SDValue();
- if (Indices[i+1] != -1 && Indices[i+1] != WtIdx)
- return SDValue();
- WsIdx ++;
- WtIdx ++;
- }
+ // Check odd elements are taken from the left (highest-indexed) elements of
+ // one half or the other and pick an operand accordingly.
+ if (fitsRegularPattern<int>(Begin + 1, 2, End, HalfSize, 1))
+ Ws = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Begin + 1, 2, End, Indices.size() + HalfSize,
+ 1))
+ Ws = Op->getOperand(1);
+ else
+ return SDValue();
- return DAG.getNode(MipsISD::ILVR, SDLoc(Op), ResTy, Op->getOperand(0),
- Op->getOperand(1));
+ return DAG.getNode(MipsISD::ILVL, SDLoc(Op), ResTy, Ws, Wt);
}
// Lower VECTOR_SHUFFLE into PCKEV (if possible).
//
// PCKEV copies the even elements of each vector into the result vector.
//
-// It is possible to lower into PCKEV when the mask takes the form:
-// <0, 2, 4, ..., n, n+2, n+4, ...>
+// It is possible to lower into PCKEV when the mask consists of two of the
+// following forms concatenated:
+// <0, 2, 4, ...>
+// <n, n+2, n+4, ...>
// where n is the number of elements in the vector.
+// For example:
+// <0, 2, 4, ..., 0, 2, 4, ...>
+// <0, 2, 4, ..., n, n+2, n+4, ...>
//
// When undef's appear in the mask they are treated as if they were whatever
-// value is necessary in order to fit the above form.
+// value is necessary in order to fit the above forms.
static SDValue lowerVECTOR_SHUFFLE_PCKEV(SDValue Op, EVT ResTy,
SmallVector<int, 16> Indices,
SelectionDAG &DAG) {
- assert ((Indices.size() % 2) == 0);
- int Idx = 0;
+ assert((Indices.size() % 2) == 0);
+
+ SDValue Wt;
+ SDValue Ws;
+ const auto &Begin = Indices.begin();
+ const auto &Mid = Indices.begin() + Indices.size() / 2;
+ const auto &End = Indices.end();
+
+ if (fitsRegularPattern<int>(Begin, 1, Mid, 0, 2))
+ Wt = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Begin, 1, Mid, Indices.size(), 2))
+ Wt = Op->getOperand(1);
+ else
+ return SDValue();
- for (unsigned i = 0; i < Indices.size(); ++i) {
- if (Indices[i] != -1 && Indices[i] != Idx)
- return SDValue();
- Idx += 2;
- }
+ if (fitsRegularPattern<int>(Mid, 1, End, 0, 2))
+ Ws = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Mid, 1, End, Indices.size(), 2))
+ Ws = Op->getOperand(1);
+ else
+ return SDValue();
- return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), ResTy, Op->getOperand(0),
- Op->getOperand(1));
+ return DAG.getNode(MipsISD::PCKEV, SDLoc(Op), ResTy, Ws, Wt);
}
// Lower VECTOR_SHUFFLE into PCKOD (if possible).
//
// PCKOD copies the odd elements of each vector into the result vector.
//
-// It is possible to lower into PCKOD when the mask takes the form:
-// <1, 3, 5, ..., n+1, n+3, n+5, ...>
+// It is possible to lower into PCKOD when the mask consists of two of the
+// following forms concatenated:
+// <1, 3, 5, ...>
+// <n+1, n+3, n+5, ...>
// where n is the number of elements in the vector.
+// For example:
+// <1, 3, 5, ..., 1, 3, 5, ...>
+// <1, 3, 5, ..., n+1, n+3, n+5, ...>
//
// When undef's appear in the mask they are treated as if they were whatever
-// value is necessary in order to fit the above form.
+// value is necessary in order to fit the above forms.
static SDValue lowerVECTOR_SHUFFLE_PCKOD(SDValue Op, EVT ResTy,
SmallVector<int, 16> Indices,
SelectionDAG &DAG) {
- assert ((Indices.size() % 2) == 0);
- int Idx = 1;
+ assert((Indices.size() % 2) == 0);
+
+ SDValue Wt;
+ SDValue Ws;
+ const auto &Begin = Indices.begin();
+ const auto &Mid = Indices.begin() + Indices.size() / 2;
+ const auto &End = Indices.end();
+
+ if (fitsRegularPattern<int>(Begin, 1, Mid, 1, 2))
+ Wt = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Begin, 1, Mid, Indices.size() + 1, 2))
+ Wt = Op->getOperand(1);
+ else
+ return SDValue();
- for (unsigned i = 0; i < Indices.size(); ++i) {
- if (Indices[i] != -1 && Indices[i] != Idx)
- return SDValue();
- Idx += 2;
- }
+ if (fitsRegularPattern<int>(Mid, 1, End, 1, 2))
+ Ws = Op->getOperand(0);
+ else if (fitsRegularPattern<int>(Mid, 1, End, Indices.size() + 1, 2))
+ Ws = Op->getOperand(1);
+ else
+ return SDValue();
- return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), ResTy, Op->getOperand(0),
- Op->getOperand(1));
+ return DAG.getNode(MipsISD::PCKOD, SDLoc(Op), ResTy, Ws, Wt);
}
// Lower VECTOR_SHUFFLE into VSHF.
for (SmallVector<int, 16>::iterator I = Indices.begin(); I != Indices.end();
++I)
- Ops.push_back(DAG.getTargetConstant(*I, MaskEltTy));
+ Ops.push_back(DAG.getTargetConstant(*I, DL, MaskEltTy));
SDValue MaskVec = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecTy, Ops);
for (int i = 0; i < ResTyNumElts; ++i)
Indices.push_back(Node->getMaskElt(i));
- SDValue Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG);
- if (Result.getNode())
- return Result;
- Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG);
+ // splati.[bhwd] is preferable to the others but is matched from
+ // MipsISD::VSHF.
+ if (isVECTOR_SHUFFLE_SPLATI(Op, ResTy, Indices, DAG))
+ return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
+ SDValue Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG);
if (Result.getNode())
return Result;
Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG);
if (Result.getNode())
return Result;
Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG);
+ if (Result.getNode())
+ return Result;
+ Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG);
if (Result.getNode())
return Result;
return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
unsigned Ws = MI->getOperand(1).getReg();
unsigned Lane = MI->getOperand(2).getImm();
- if (Lane == 0)
- BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Ws, 0, Mips::sub_lo);
- else {
- unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+ if (Lane == 0) {
+ unsigned Wt = Ws;
+ if (!Subtarget.useOddSPReg()) {
+ // We must copy to an even-numbered MSA register so that the
+ // single-precision sub-register is also guaranteed to be even-numbered.
+ Wt = RegInfo.createVirtualRegister(&Mips::MSA128WEvensRegClass);
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Wt).addReg(Ws);
+ }
+
+ BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
+ } else {
+ unsigned Wt = RegInfo.createVirtualRegister(
+ Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass :
+ &Mips::MSA128WEvensRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::SPLATI_W), Wt).addReg(Ws).addImm(Lane);
BuildMI(*BB, MI, DL, TII->get(Mips::COPY), Fd).addReg(Wt, 0, Mips::sub_lo);
unsigned Wd_in = MI->getOperand(1).getReg();
unsigned Lane = MI->getOperand(2).getImm();
unsigned Fs = MI->getOperand(3).getReg();
- unsigned Wt = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
+ unsigned Wt = RegInfo.createVirtualRegister(
+ Subtarget.useOddSPReg() ? &Mips::MSA128WRegClass :
+ &Mips::MSA128WEvensRegClass);
BuildMI(*BB, MI, DL, TII->get(Mips::SUBREG_TO_REG), Wt)
.addImm(0)
const TargetRegisterClass *VecRC = nullptr;
const TargetRegisterClass *GPRRC =
- Subtarget.isGP64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
+ Subtarget.isABI_N64() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
unsigned EltLog2Size;
unsigned InsertOp = 0;
unsigned InsveOp = 0;
// sld.df inteprets $rt modulo the number of columns so we only need to negate
// the lane index to do this.
unsigned LaneTmp2 = RegInfo.createVirtualRegister(GPRRC);
- BuildMI(*BB, MI, DL, TII->get(Mips::SUB), LaneTmp2)
- .addReg(Mips::ZERO)
+ BuildMI(*BB, MI, DL, TII->get(Subtarget.isABI_N64() ? Mips::DSUB : Mips::SUB),
+ LaneTmp2)
+ .addReg(Subtarget.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO)
.addReg(LaneReg);
BuildMI(*BB, MI, DL, TII->get(Mips::SLD_B), Wd)
.addReg(WdTmp2)