}
// NVPTXTargetLowering Constructor.
-NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM)
- : TargetLowering(TM), nvTM(&TM),
- nvptxSubtarget(TM.getSubtarget<NVPTXSubtarget>()) {
+NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
+ const NVPTXSubtarget &STI)
+ : TargetLowering(TM), nvTM(&TM), STI(STI) {
// always lower memset, memcpy, and memmove intrinsics to load/store
// instructions, rather
setOperationAction(ISD::SRA_PARTS, MVT::i64 , Custom);
setOperationAction(ISD::SRL_PARTS, MVT::i64 , Custom);
- if (nvptxSubtarget.hasROT64()) {
+ if (STI.hasROT64()) {
setOperationAction(ISD::ROTL, MVT::i64, Legal);
setOperationAction(ISD::ROTR, MVT::i64, Legal);
} else {
setOperationAction(ISD::ROTL, MVT::i64, Expand);
setOperationAction(ISD::ROTR, MVT::i64, Expand);
}
- if (nvptxSubtarget.hasROT32()) {
+ if (STI.hasROT32()) {
setOperationAction(ISD::ROTL, MVT::i32, Legal);
setOperationAction(ISD::ROTR, MVT::i32, Legal);
} else {
// Now deduce the information based on the above mentioned
// actions
- computeRegisterProperties();
+ computeRegisterProperties(STI.getRegisterInfo());
}
const char *NVPTXTargetLowering::getTargetNodeName(unsigned Opcode) const {
unsigned retAlignment,
const ImmutableCallSite *CS) const {
- bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return "";
Type *retTy = CLI.RetTy;
ImmutableCallSite *CS = CLI.CS;
- bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return Chain;
EVT ObjectVT = getValueType(retTy);
unsigned NumElts = ObjectVT.getVectorNumElements();
EVT EltVT = ObjectVT.getVectorElementType();
- assert(nvTM->getSubtargetImpl()->getTargetLowering()->getNumRegisters(
- F->getContext(), ObjectVT) == NumElts &&
+ assert(STI.getTargetLowering()->getNumRegisters(F->getContext(),
+ ObjectVT) == NumElts &&
"Vector was not scalarized");
unsigned sz = EltVT.getSizeInBits();
bool needTruncate = sz < 8 ? true : false;
LoadRetVTs.push_back(EltVT);
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SmallVector<SDValue, 4> LoadRetOps;
- LoadRetOps.push_back(Chain);
- LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
- LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
- LoadRetOps.push_back(InFlag);
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(0, MVT::i32), InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParam, dl,
DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
}
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SmallVector<SDValue, 4> LoadRetOps;
- LoadRetOps.push_back(Chain);
- LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
- LoadRetOps.push_back(DAG.getConstant(0, MVT::i32));
- LoadRetOps.push_back(InFlag);
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(0, MVT::i32), InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParamV2, dl,
DAG.getVTList(LoadRetVTs), LoadRetOps, EltVT, MachinePointerInfo());
}
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SmallVector<SDValue, 4> LoadRetOps;
- LoadRetOps.push_back(Chain);
- LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
- LoadRetOps.push_back(DAG.getConstant(Ofst, MVT::i32));
- LoadRetOps.push_back(InFlag);
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(Ofst, MVT::i32), InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
Opc, dl, DAG.getVTList(LoadRetVTs),
LoadRetOps, EltVT, MachinePointerInfo());
LoadRetVTs.push_back(MVT::Other);
LoadRetVTs.push_back(MVT::Glue);
- SmallVector<SDValue, 4> LoadRetOps;
- LoadRetOps.push_back(Chain);
- LoadRetOps.push_back(DAG.getConstant(1, MVT::i32));
- LoadRetOps.push_back(DAG.getConstant(Offsets[i], MVT::i32));
- LoadRetOps.push_back(InFlag);
+ SDValue LoadRetOps[] = {Chain, DAG.getConstant(1, MVT::i32),
+ DAG.getConstant(Offsets[i], MVT::i32), InFlag};
SDValue retval = DAG.getMemIntrinsicNode(
NVPTXISD::LoadParam, dl,
DAG.getVTList(LoadRetVTs), LoadRetOps,
SDValue ShAmt = Op.getOperand(2);
unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
- if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
// For 32bit and sm35, we can use the funnel shift 'shf' instruction.
// {dHi, dLo} = {aHi, aLo} >> Amt
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
- if (VTBits == 32 && nvptxSubtarget.getSmVersion() >= 35) {
+ if (VTBits == 32 && STI.getSmVersion() >= 35) {
// For 32bit and sm35, we can use the funnel shift 'shf' instruction.
// {dHi, dLo} = {aHi, aLo} << Amt
}
// Then any remaining arguments
- for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i) {
- Ops.push_back(N->getOperand(i));
- }
+ Ops.append(N->op_begin() + 2, N->op_end());
SDValue NewSt = DAG.getMemIntrinsicNode(
Opcode, DL, DAG.getVTList(MVT::Other), Ops,
const Function *F = MF.getFunction();
const AttributeSet &PAL = F->getAttributes();
- const TargetLowering *TLI = DAG.getSubtarget().getTargetLowering();
+ const TargetLowering *TLI = STI.getTargetLowering();
SDValue Root = DAG.getRoot();
std::vector<SDValue> OutChains;
bool isKernel = llvm::isKernelFunction(*F);
- bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return Chain;
Type *RetTy = F->getReturnType();
const DataLayout *TD = getDataLayout();
- bool isABI = (nvptxSubtarget.getSmVersion() >= 20);
+ bool isABI = (STI.getSmVersion() >= 20);
assert(isABI && "Non-ABI compilation is not supported");
if (!isABI)
return Chain;
}
std::pair<unsigned, const TargetRegisterClass *>
-NVPTXTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
+NVPTXTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ const std::string &Constraint,
MVT VT) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
return std::make_pair(0U, &NVPTX::Float64RegsRegClass);
}
}
- return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
}
/// getFunctionAlignment - Return the Log2 alignment of this function.
default: break;
case ISD::ADD:
case ISD::FADD:
- return PerformADDCombine(N, DCI, nvptxSubtarget, OptLevel);
+ return PerformADDCombine(N, DCI, STI, OptLevel);
case ISD::MUL:
return PerformMULCombine(N, DCI, OptLevel);
case ISD::SHL:
}
}
- SmallVector<SDValue, 8> OtherOps;
-
// Copy regular operands
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- OtherOps.push_back(N->getOperand(i));
+ SmallVector<SDValue, 8> OtherOps(N->op_begin(), N->op_end());
// The select routine does not have access to the LoadSDNode instance, so
// pass along the extension information
OtherOps.push_back(Chain); // Chain
// Skip operand 1 (intrinsic ID)
// Others
- for (unsigned i = 2, e = N->getNumOperands(); i != e; ++i)
- OtherOps.push_back(N->getOperand(i));
+ OtherOps.append(N->op_begin() + 2, N->op_end());
MemIntrinsicSDNode *MemSD = cast<MemIntrinsicSDNode>(N);
"Custom handling of non-i8 ldu/ldg?");
// Just copy all operands as-is
- SmallVector<SDValue, 4> Ops;
- for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
- Ops.push_back(N->getOperand(i));
+ SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
// Force output to i16
SDVTList LdResVTs = DAG.getVTList(MVT::i16, MVT::Other);
delete DwarfLocSection;
delete DwarfARangesSection;
delete DwarfRangesSection;
- delete DwarfMacroInfoSection;
}
const MCSection *