X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FR600%2FAMDGPUISelDAGToDAG.cpp;h=8a9340289d8c36256d14f402e06e0315dd4ac701;hb=c31aaa5a3fcef2851898fb30c61c16a70564079a;hp=696910911f9e07cb1eb0465d0675f1e8479b4432;hpb=3f5d63b95618860ca69eeab9be37cf26a253150e;p=oota-llvm.git diff --git a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp index 696910911f9..8a9340289d8 100644 --- a/lib/Target/R600/AMDGPUISelDAGToDAG.cpp +++ b/lib/Target/R600/AMDGPUISelDAGToDAG.cpp @@ -14,17 +14,18 @@ #include "AMDGPUInstrInfo.h" #include "AMDGPUISelLowering.h" // For AMDGPUISD #include "AMDGPURegisterInfo.h" +#include "AMDGPUSubtarget.h" #include "R600InstrInfo.h" +#include "SIDefines.h" #include "SIISelLowering.h" -#include "llvm/ADT/ValueMap.h" -#include "llvm/Analysis/ValueTracking.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "SIMachineFunctionInfo.h" +#include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGISel.h" -#include "llvm/Support/Compiler.h" -#include -#include +#include "llvm/IR/Function.h" using namespace llvm; @@ -43,11 +44,12 @@ public: AMDGPUDAGToDAGISel(TargetMachine &TM); virtual ~AMDGPUDAGToDAGISel(); - SDNode *Select(SDNode *N); - virtual const char *getPassName() const; - virtual void PostprocessISelDAG(); + SDNode *Select(SDNode *N) override; + const char *getPassName() const override; + void PostprocessISelDAG() override; private: + bool isInlineImmediate(SDNode *N) const; inline SDValue getSmallIPtrImm(unsigned Imm); bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs, const R600InstrInfo *TII); @@ -58,13 +60,12 @@ private: bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2); bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2); bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2); - SDValue SimplifyI24(SDValue &Op); - bool SelectI24(SDValue Addr, SDValue &Op); - bool SelectU24(SDValue Addr, SDValue &Op); static bool checkType(const Value *ptr, unsigned int addrspace); + static bool checkPrivateAddress(const MachineMemOperand *Op); static bool isGlobalStore(const StoreSDNode *N); + static bool isFlatStore(const StoreSDNode *N); static bool isPrivateStore(const StoreSDNode *N); static bool isLocalStore(const StoreSDNode *N); static bool isRegionStore(const StoreSDNode *N); @@ -72,16 +73,46 @@ private: bool isCPLoad(const LoadSDNode *N) const; bool isConstantLoad(const LoadSDNode *N, int cbID) const; bool isGlobalLoad(const LoadSDNode *N) const; + bool isFlatLoad(const LoadSDNode *N) const; bool isParamLoad(const LoadSDNode *N) const; bool isPrivateLoad(const LoadSDNode *N) const; bool isLocalLoad(const LoadSDNode *N) const; bool isRegionLoad(const LoadSDNode *N) const; + const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const; bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr); - bool SelectGlobalValueVariableOffset(SDValue Addr, - SDValue &BaseReg, SDValue& Offset); + bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg, + SDValue& Offset); bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset); bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset); + bool isDSOffsetLegal(const SDValue &Base, unsigned Offset, + unsigned OffsetBits) const; + bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const; + bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0, + SDValue &Offset1) const; + void SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, + SDValue &SOffset, SDValue &Offset, SDValue &Offen, + SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC, + SDValue &TFE) const; + bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, + SDValue &Offset) const; + bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, + SDValue &VAddr, SDValue &Offset, + SDValue &SLC) const; + bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr, + SDValue &SOffset, SDValue &ImmOffset) const; + bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset, + SDValue &Offset, SDValue &GLC, SDValue &SLC, + SDValue &TFE) const; + bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset, + SDValue &Offset, SDValue &GLC) const; + SDNode *SelectAddrSpaceCast(SDNode *N); + bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const; + bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods, + SDValue &Clamp, SDValue &Omod) const; + + SDNode *SelectADD_SUB_I64(SDNode *N); + SDNode *SelectDIV_SCALE(SDNode *N); // Include the pieces autogenerated from the target description. #include "AMDGPUGenDAGISel.inc" @@ -90,8 +121,7 @@ private: /// \brief This pass converts a legalized DAG into a AMDGPU-specific // DAG, ready for instruction scheduling. -FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM - ) { +FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) { return new AMDGPUDAGToDAGISel(TM); } @@ -102,12 +132,53 @@ AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM) AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() { } +bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const { + const SITargetLowering *TL + = static_cast(getTargetLowering()); + return TL->analyzeImmediate(N) == 0; +} + +/// \brief Determine the register class for \p OpNo +/// \returns The register class of the virtual register that will be used for +/// the given operand number \OpNo or NULL if the register class cannot be +/// determined. +const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N, + unsigned OpNo) const { + if (!N->isMachineOpcode()) + return nullptr; + + switch (N->getMachineOpcode()) { + default: { + const MCInstrDesc &Desc = + TM.getSubtargetImpl()->getInstrInfo()->get(N->getMachineOpcode()); + unsigned OpIdx = Desc.getNumDefs() + OpNo; + if (OpIdx >= Desc.getNumOperands()) + return nullptr; + int RegClass = Desc.OpInfo[OpIdx].RegClass; + if (RegClass == -1) + return nullptr; + + return TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RegClass); + } + case AMDGPU::REG_SEQUENCE: { + unsigned RCID = cast(N->getOperand(0))->getZExtValue(); + const TargetRegisterClass *SuperRC = + TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RCID); + + SDValue SubRegOp = N->getOperand(OpNo + 1); + unsigned SubRegIdx = cast(SubRegOp)->getZExtValue(); + return TM.getSubtargetImpl()->getRegisterInfo()->getSubClassWithSubReg( + SuperRC, SubRegIdx); + } + } +} + SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) { return CurDAG->getTargetConstant(Imm, MVT::i32); } bool AMDGPUDAGToDAGISel::SelectADDRParam( - SDValue Addr, SDValue& R1, SDValue& R2) { + SDValue Addr, SDValue& R1, SDValue& R2) { if (Addr.getOpcode() == ISD::FrameIndex) { if (FrameIndexSDNode *FIN = dyn_cast(Addr)) { @@ -161,132 +232,136 @@ bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) { } SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { - const R600InstrInfo *TII = - static_cast(TM.getInstrInfo()); unsigned int Opc = N->getOpcode(); if (N->isMachineOpcode()) { - return NULL; // Already selected. + N->setNodeId(-1); + return nullptr; // Already selected. } + + const AMDGPUSubtarget &ST = TM.getSubtarget(); switch (Opc) { default: break; - case AMDGPUISD::CONST_ADDRESS: { - for (SDNode::use_iterator I = N->use_begin(), Next = llvm::next(I); - I != SDNode::use_end(); I = Next) { - Next = llvm::next(I); - if (!I->isMachineOpcode()) { - continue; - } - unsigned Opcode = I->getMachineOpcode(); - bool HasDst = TII->getOperandIdx(Opcode, AMDGPU::OpName::dst) > -1; - int SrcIdx = I.getOperandNo(); - int SelIdx; - // Unlike MachineInstrs, SDNodes do not have results in their operand - // list, so we need to increment the SrcIdx, since - // R600InstrInfo::getOperandIdx is based on the MachineInstr indices. - if (HasDst) { - SrcIdx++; - } - - SelIdx = TII->getSelIdx(I->getMachineOpcode(), SrcIdx); - if (SelIdx < 0) { - continue; - } - - SDValue CstOffset; - if (N->getValueType(0).isVector() || - !SelectGlobalValueConstantOffset(N->getOperand(0), CstOffset)) - continue; + // We are selecting i64 ADD here instead of custom lower it during + // DAG legalization, so we can fold some i64 ADDs used for address + // calculation into the LOAD and STORE instructions. + case ISD::ADD: + case ISD::SUB: { + if (N->getValueType(0) != MVT::i64 || + ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) + break; - // Gather constants values - int SrcIndices[] = { - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src2), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W) - }; - std::vector Consts; - for (unsigned i = 0; i < sizeof(SrcIndices) / sizeof(int); i++) { - int OtherSrcIdx = SrcIndices[i]; - int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx); - if (OtherSrcIdx < 0 || OtherSelIdx < 0) { + return SelectADD_SUB_I64(N); + } + case ISD::SCALAR_TO_VECTOR: + case AMDGPUISD::BUILD_VERTICAL_VECTOR: + case ISD::BUILD_VECTOR: { + unsigned RegClassID; + const AMDGPURegisterInfo *TRI = static_cast( + TM.getSubtargetImpl()->getRegisterInfo()); + const SIRegisterInfo *SIRI = static_cast( + TM.getSubtargetImpl()->getRegisterInfo()); + EVT VT = N->getValueType(0); + unsigned NumVectorElts = VT.getVectorNumElements(); + EVT EltVT = VT.getVectorElementType(); + assert(EltVT.bitsEq(MVT::i32)); + if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) { + bool UseVReg = true; + for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end(); + U != E; ++U) { + if (!U->isMachineOpcode()) { continue; } - if (HasDst) { - OtherSrcIdx--; - OtherSelIdx--; + const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo()); + if (!RC) { + continue; } - if (RegisterSDNode *Reg = - dyn_cast(I->getOperand(OtherSrcIdx))) { - if (Reg->getReg() == AMDGPU::ALU_CONST) { - ConstantSDNode *Cst = dyn_cast(I->getOperand(OtherSelIdx)); - Consts.push_back(Cst->getZExtValue()); - } + if (SIRI->isSGPRClass(RC)) { + UseVReg = false; } } - - ConstantSDNode *Cst = dyn_cast(CstOffset); - Consts.push_back(Cst->getZExtValue()); - if (!TII->fitsConstReadLimitations(Consts)) - continue; - - // Convert back to SDNode indices - if (HasDst) { - SrcIdx--; - SelIdx--; + switch(NumVectorElts) { + case 1: RegClassID = UseVReg ? AMDGPU::VReg_32RegClassID : + AMDGPU::SReg_32RegClassID; + break; + case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID : + AMDGPU::SReg_64RegClassID; + break; + case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID : + AMDGPU::SReg_128RegClassID; + break; + case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID : + AMDGPU::SReg_256RegClassID; + break; + case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID : + AMDGPU::SReg_512RegClassID; + break; + default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR"); } - std::vector Ops; - for (int i = 0, e = I->getNumOperands(); i != e; ++i) { - if (i == SrcIdx) { - Ops.push_back(CurDAG->getRegister(AMDGPU::ALU_CONST, MVT::f32)); - } else if (i == SelIdx) { - Ops.push_back(CstOffset); - } else { - Ops.push_back(I->getOperand(i)); - } + } else { + // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG + // that adds a 128 bits reg copy when going through TwoAddressInstructions + // pass. We want to avoid 128 bits copies as much as possible because they + // can't be bundled by our scheduler. + switch(NumVectorElts) { + case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break; + case 4: + if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR) + RegClassID = AMDGPU::R600_Reg128VerticalRegClassID; + else + RegClassID = AMDGPU::R600_Reg128RegClassID; + break; + default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR"); } - CurDAG->UpdateNodeOperands(*I, Ops.data(), Ops.size()); } - break; - } - case ISD::BUILD_VECTOR: { - const AMDGPUSubtarget &ST = TM.getSubtarget(); - if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) { - break; + + SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32); + + if (NumVectorElts == 1) { + return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, + N->getOperand(0), RegClass); } - // BUILD_VECTOR is usually lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG - // that adds a 128 bits reg copy when going through TwoAddressInstructions - // pass. We want to avoid 128 bits copies as much as possible because they - // can't be bundled by our scheduler. - SDValue RegSeqArgs[9] = { - CurDAG->getTargetConstant(AMDGPU::R600_Reg128RegClassID, MVT::i32), - SDValue(), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32), - SDValue(), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32), - SDValue(), CurDAG->getTargetConstant(AMDGPU::sub2, MVT::i32), - SDValue(), CurDAG->getTargetConstant(AMDGPU::sub3, MVT::i32) - }; + + assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not " + "supported yet"); + // 16 = Max Num Vector Elements + // 2 = 2 REG_SEQUENCE operands per element (value, subreg index) + // 1 = Vector Register Class + SmallVector RegSeqArgs(NumVectorElts * 2 + 1); + + RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32); bool IsRegSeq = true; - for (unsigned i = 0; i < N->getNumOperands(); i++) { + unsigned NOps = N->getNumOperands(); + for (unsigned i = 0; i < NOps; i++) { + // XXX: Why is this here? if (dyn_cast(N->getOperand(i))) { IsRegSeq = false; break; } - RegSeqArgs[2 * i + 1] = N->getOperand(i); + RegSeqArgs[1 + (2 * i)] = N->getOperand(i); + RegSeqArgs[1 + (2 * i) + 1] = + CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32); + } + + if (NOps != NumVectorElts) { + // Fill in the missing undef elements if this was a scalar_to_vector. + assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts); + + MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, + SDLoc(N), EltVT); + for (unsigned i = NOps; i < NumVectorElts; ++i) { + RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0); + RegSeqArgs[1 + (2 * i) + 1] = + CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32); + } } + if (!IsRegSeq) break; return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), - RegSeqArgs, 2 * N->getNumOperands() + 1); + RegSeqArgs); } case ISD::BUILD_PAIR: { SDValue RC, SubReg0, SubReg1; - const AMDGPUSubtarget &ST = TM.getSubtarget(); if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { break; } @@ -307,340 +382,206 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { SDLoc(N), N->getValueType(0), Ops); } - case ISD::ConstantFP: - case ISD::Constant: { + case ISD::Constant: + case ISD::ConstantFP: { const AMDGPUSubtarget &ST = TM.getSubtarget(); - // XXX: Custom immediate lowering not implemented yet. Instead we use - // pseudo instructions defined in SIInstructions.td - if (ST.getGeneration() > AMDGPUSubtarget::NORTHERN_ISLANDS) { + if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS || + N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N)) break; - } - uint64_t ImmValue = 0; - unsigned ImmReg = AMDGPU::ALU_LITERAL_X; - - if (N->getOpcode() == ISD::ConstantFP) { - // XXX: 64-bit Immediates not supported yet - assert(N->getValueType(0) != MVT::f64); - - ConstantFPSDNode *C = dyn_cast(N); - APFloat Value = C->getValueAPF(); - float FloatValue = Value.convertToFloat(); - if (FloatValue == 0.0) { - ImmReg = AMDGPU::ZERO; - } else if (FloatValue == 0.5) { - ImmReg = AMDGPU::HALF; - } else if (FloatValue == 1.0) { - ImmReg = AMDGPU::ONE; - } else { - ImmValue = Value.bitcastToAPInt().getZExtValue(); - } - } else { - // XXX: 64-bit Immediates not supported yet - assert(N->getValueType(0) != MVT::i64); - - ConstantSDNode *C = dyn_cast(N); - if (C->getZExtValue() == 0) { - ImmReg = AMDGPU::ZERO; - } else if (C->getZExtValue() == 1) { - ImmReg = AMDGPU::ONE_INT; - } else { - ImmValue = C->getZExtValue(); - } + uint64_t Imm; + if (ConstantFPSDNode *FP = dyn_cast(N)) + Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue(); + else { + ConstantSDNode *C = cast(N); + Imm = C->getZExtValue(); } - for (SDNode::use_iterator Use = N->use_begin(), Next = llvm::next(Use); - Use != SDNode::use_end(); Use = Next) { - Next = llvm::next(Use); - std::vector Ops; - for (unsigned i = 0; i < Use->getNumOperands(); ++i) { - Ops.push_back(Use->getOperand(i)); - } + SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32, + CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32)); + SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32, + CurDAG->getConstant(Imm >> 32, MVT::i32)); + const SDValue Ops[] = { + CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32), + SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32), + SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32) + }; - if (!Use->isMachineOpcode()) { - if (ImmReg == AMDGPU::ALU_LITERAL_X) { - // We can only use literal constants (e.g. AMDGPU::ZERO, - // AMDGPU::ONE, etc) in machine opcodes. - continue; - } - } else { - if (!TII->isALUInstr(Use->getMachineOpcode()) || - (TII->get(Use->getMachineOpcode()).TSFlags & - R600_InstFlag::VECTOR)) { - continue; - } + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N), + N->getValueType(0), Ops); + } - int ImmIdx = TII->getOperandIdx(Use->getMachineOpcode(), - AMDGPU::OpName::literal); - if (ImmIdx == -1) { - continue; - } + case AMDGPUISD::REGISTER_LOAD: { + if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) + break; + SDValue Addr, Offset; + + SelectADDRIndirect(N->getOperand(1), Addr, Offset); + const SDValue Ops[] = { + Addr, + Offset, + CurDAG->getTargetConstant(0, MVT::i32), + N->getOperand(0), + }; + return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N), + CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other), + Ops); + } + case AMDGPUISD::REGISTER_STORE: { + if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) + break; + SDValue Addr, Offset; + SelectADDRIndirect(N->getOperand(2), Addr, Offset); + const SDValue Ops[] = { + N->getOperand(1), + Addr, + Offset, + CurDAG->getTargetConstant(0, MVT::i32), + N->getOperand(0), + }; + return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N), + CurDAG->getVTList(MVT::Other), + Ops); + } - if (TII->getOperandIdx(Use->getMachineOpcode(), - AMDGPU::OpName::dst) != -1) { - // subtract one from ImmIdx, because the DST operand is usually index - // 0 for MachineInstrs, but we have no DST in the Ops vector. - ImmIdx--; - } + case AMDGPUISD::BFE_I32: + case AMDGPUISD::BFE_U32: { + if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) + break; - // Check that we aren't already using an immediate. - // XXX: It's possible for an instruction to have more than one - // immediate operand, but this is not supported yet. - if (ImmReg == AMDGPU::ALU_LITERAL_X) { - ConstantSDNode *C = dyn_cast(Use->getOperand(ImmIdx)); - assert(C); + // There is a scalar version available, but unlike the vector version which + // has a separate operand for the offset and width, the scalar version packs + // the width and offset into a single operand. Try to move to the scalar + // version if the offsets are constant, so that we can try to keep extended + // loads of kernel arguments in SGPRs. - if (C->getZExtValue() != 0) { - // This instruction is already using an immediate. - continue; - } + // TODO: Technically we could try to pattern match scalar bitshifts of + // dynamic values, but it's probably not useful. + ConstantSDNode *Offset = dyn_cast(N->getOperand(1)); + if (!Offset) + break; - // Set the immediate value - Ops[ImmIdx] = CurDAG->getTargetConstant(ImmValue, MVT::i32); - } - } - // Set the immediate register - Ops[Use.getOperandNo()] = CurDAG->getRegister(ImmReg, MVT::i32); + ConstantSDNode *Width = dyn_cast(N->getOperand(2)); + if (!Width) + break; - CurDAG->UpdateNodeOperands(*Use, Ops.data(), Use->getNumOperands()); - } - break; - } - } - SDNode *Result = SelectCode(N); + bool Signed = Opc == AMDGPUISD::BFE_I32; - // Fold operands of selected node + // Transformation function, pack the offset and width of a BFE into + // the format expected by the S_BFE_I32 / S_BFE_U32. In the second + // source, bits [5:0] contain the offset and bits [22:16] the width. - const AMDGPUSubtarget &ST = TM.getSubtarget(); - if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { - const R600InstrInfo *TII = - static_cast(TM.getInstrInfo()); - if (Result && Result->isMachineOpcode() && Result->getMachineOpcode() == AMDGPU::DOT_4) { - bool IsModified = false; - do { - std::vector Ops; - for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end(); - I != E; ++I) - Ops.push_back(*I); - IsModified = FoldDotOperands(Result->getMachineOpcode(), TII, Ops); - if (IsModified) { - Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size()); - } - } while (IsModified); + uint32_t OffsetVal = Offset->getZExtValue(); + uint32_t WidthVal = Width->getZExtValue(); - } - if (Result && Result->isMachineOpcode() && - !(TII->get(Result->getMachineOpcode()).TSFlags & R600_InstFlag::VECTOR) - && TII->hasInstrModifiers(Result->getMachineOpcode())) { - // Fold FNEG/FABS - // TODO: Isel can generate multiple MachineInst, we need to recursively - // parse Result - bool IsModified = false; - do { - std::vector Ops; - for(SDNode::op_iterator I = Result->op_begin(), E = Result->op_end(); - I != E; ++I) - Ops.push_back(*I); - IsModified = FoldOperands(Result->getMachineOpcode(), TII, Ops); - if (IsModified) { - Result = CurDAG->UpdateNodeOperands(Result, Ops.data(), Ops.size()); - } - } while (IsModified); - - // If node has a single use which is CLAMP_R600, folds it - if (Result->hasOneUse() && Result->isMachineOpcode()) { - SDNode *PotentialClamp = *Result->use_begin(); - if (PotentialClamp->isMachineOpcode() && - PotentialClamp->getMachineOpcode() == AMDGPU::CLAMP_R600) { - unsigned ClampIdx = - TII->getOperandIdx(Result->getMachineOpcode(), AMDGPU::OpName::clamp); - std::vector Ops; - unsigned NumOp = Result->getNumOperands(); - for (unsigned i = 0; i < NumOp; ++i) { - Ops.push_back(Result->getOperand(i)); - } - Ops[ClampIdx - 1] = CurDAG->getTargetConstant(1, MVT::i32); - Result = CurDAG->SelectNodeTo(PotentialClamp, - Result->getMachineOpcode(), PotentialClamp->getVTList(), - Ops.data(), NumOp); - } - } - } - } + uint32_t PackedVal = OffsetVal | WidthVal << 16; - return Result; -} + SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32); + return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32, + SDLoc(N), + MVT::i32, + N->getOperand(0), + PackedOffsetWidth); -bool AMDGPUDAGToDAGISel::FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, - SDValue &Abs, const R600InstrInfo *TII) { - switch (Src.getOpcode()) { - case ISD::FNEG: - Src = Src.getOperand(0); - Neg = CurDAG->getTargetConstant(1, MVT::i32); - return true; - case ISD::FABS: - if (!Abs.getNode()) - return false; - Src = Src.getOperand(0); - Abs = CurDAG->getTargetConstant(1, MVT::i32); - return true; - case ISD::BITCAST: - Src = Src.getOperand(0); - return true; - default: - return false; } + case AMDGPUISD::DIV_SCALE: { + return SelectDIV_SCALE(N); + } + case ISD::CopyToReg: { + const SITargetLowering& Lowering = + *static_cast(getTargetLowering()); + Lowering.legalizeTargetIndependentNode(N, *CurDAG); + break; + } + case ISD::ADDRSPACECAST: + return SelectAddrSpaceCast(N); + } + + return SelectCode(N); } -bool AMDGPUDAGToDAGISel::FoldOperands(unsigned Opcode, - const R600InstrInfo *TII, std::vector &Ops) { - int OperandIdx[] = { - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src2) - }; - int SelIdx[] = { - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_sel) - }; - int NegIdx[] = { - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src2_neg) - }; - int AbsIdx[] = { - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs), - -1 - }; +bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) { + assert(AS != 0 && "Use checkPrivateAddress instead."); + if (!Ptr) + return false; - for (unsigned i = 0; i < 3; i++) { - if (OperandIdx[i] < 0) - return false; - SDValue &Src = Ops[OperandIdx[i] - 1]; - SDValue &Sel = Ops[SelIdx[i] - 1]; - SDValue &Neg = Ops[NegIdx[i] - 1]; - SDValue FakeAbs; - SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs; - if (FoldOperand(Src, Sel, Neg, Abs, TII)) - return true; - } - return false; + return Ptr->getType()->getPointerAddressSpace() == AS; } -bool AMDGPUDAGToDAGISel::FoldDotOperands(unsigned Opcode, - const R600InstrInfo *TII, std::vector &Ops) { - int OperandIdx[] = { - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_X), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Y), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_Z), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_W), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_X), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Y), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_Z), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_W) - }; - int SelIdx[] = { - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_X), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Y), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_Z), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_sel_W), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_X), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Y), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_Z), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_sel_W) - }; - int NegIdx[] = { - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_X), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Y), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_Z), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_neg_W), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_X), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Y), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_Z), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_neg_W) - }; - int AbsIdx[] = { - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_X), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Y), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_Z), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src0_abs_W), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_X), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Y), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_Z), - TII->getOperandIdx(Opcode, AMDGPU::OpName::src1_abs_W) - }; +bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) { + if (Op->getPseudoValue()) + return true; - for (unsigned i = 0; i < 8; i++) { - if (OperandIdx[i] < 0) - return false; - SDValue &Src = Ops[OperandIdx[i] - 1]; - SDValue &Sel = Ops[SelIdx[i] - 1]; - SDValue &Neg = Ops[NegIdx[i] - 1]; - SDValue &Abs = Ops[AbsIdx[i] - 1]; - if (FoldOperand(Src, Sel, Neg, Abs, TII)) - return true; - } - return false; -} + if (PointerType *PT = dyn_cast(Op->getValue()->getType())) + return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS; -bool AMDGPUDAGToDAGISel::checkType(const Value *ptr, unsigned int addrspace) { - if (!ptr) { - return false; - } - Type *ptrType = ptr->getType(); - return dyn_cast(ptrType)->getAddressSpace() == addrspace; + return false; } bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) { - return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS); + return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS); } bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) { - return (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS) - && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS) - && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS)); + const Value *MemVal = N->getMemOperand()->getValue(); + return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) && + !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) && + !checkType(MemVal, AMDGPUAS::REGION_ADDRESS)); } bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) { - return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS); + return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS); +} + +bool AMDGPUDAGToDAGISel::isFlatStore(const StoreSDNode *N) { + return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS); } bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) { - return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS); + return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS); } bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const { - if (CbId == -1) { - return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS); - } - return checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_BUFFER_0 + CbId); + const Value *MemVal = N->getMemOperand()->getValue(); + if (CbId == -1) + return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS); + + return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId); } bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const { - return checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS); + if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) { + const AMDGPUSubtarget &ST = TM.getSubtarget(); + if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS || + N->getMemoryVT().bitsLT(MVT::i32)) { + return true; + } + } + return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS); } bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const { - return checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS); + return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS); } bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const { - return checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS); + return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS); +} + +bool AMDGPUDAGToDAGISel::isFlatLoad(const LoadSDNode *N) const { + return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS); } bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const { - return checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS); + return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS); } bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const { MachineMemOperand *MMO = N->getMemOperand(); - if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) { + if (checkPrivateAddress(N->getMemOperand())) { if (MMO) { - const Value *V = MMO->getValue(); - const PseudoSourceValue *PSV = dyn_cast(V); + const PseudoSourceValue *PSV = MMO->getPseudoValue(); if (PSV && PSV == PseudoSourceValue::getConstantPool()) { return true; } @@ -650,19 +591,22 @@ bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const { } bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const { - if (checkType(N->getSrcValue(), AMDGPUAS::PRIVATE_ADDRESS)) { + if (checkPrivateAddress(N->getMemOperand())) { // Check to make sure we are not a constant pool load or a constant load // that is marked as a private load if (isCPLoad(N) || isConstantLoad(N, -1)) { return false; } } - if (!checkType(N->getSrcValue(), AMDGPUAS::LOCAL_ADDRESS) - && !checkType(N->getSrcValue(), AMDGPUAS::GLOBAL_ADDRESS) - && !checkType(N->getSrcValue(), AMDGPUAS::REGION_ADDRESS) - && !checkType(N->getSrcValue(), AMDGPUAS::CONSTANT_ADDRESS) - && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_D_ADDRESS) - && !checkType(N->getSrcValue(), AMDGPUAS::PARAM_I_ADDRESS)) { + + const Value *MemVal = N->getMemOperand()->getValue(); + if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) && + !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) && + !checkType(MemVal, AMDGPUAS::FLAT_ADDRESS) && + !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) && + !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) && + !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) && + !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)) { return true; } return false; @@ -682,7 +626,7 @@ const char *AMDGPUDAGToDAGISel::getPassName() const { //===----------------------------------------------------------------------===// bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr, - SDValue& IntPtr) { + SDValue& IntPtr) { if (ConstantSDNode *Cst = dyn_cast(Addr)) { IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true); return true; @@ -692,7 +636,7 @@ bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr, bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr, SDValue& BaseReg, SDValue &Offset) { - if (!dyn_cast(Addr)) { + if (!isa(Addr)) { BaseReg = Addr; Offset = CurDAG->getIntPtrConstant(0, true); return true; @@ -702,7 +646,7 @@ bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr, bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset) { - ConstantSDNode * IMMOffset; + ConstantSDNode *IMMOffset; if (Addr.getOpcode() == ISD::ADD && (IMMOffset = dyn_cast(Addr.getOperand(1))) @@ -746,92 +690,509 @@ bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, return true; } -SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) { - APInt Demanded = APInt(32, 0x00FFFFFF); - APInt KnownZero, KnownOne; - TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true); - const TargetLowering *TLI = getTargetLowering(); - if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) { - CurDAG->ReplaceAllUsesWith(Op, TLO.New); - CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode()); - return SimplifyI24(TLO.New); - } else { - return Op; +SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) { + SDLoc DL(N); + SDValue LHS = N->getOperand(0); + SDValue RHS = N->getOperand(1); + + bool IsAdd = (N->getOpcode() == ISD::ADD); + + SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32); + SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32); + + SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, + DL, MVT::i32, LHS, Sub0); + SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, + DL, MVT::i32, LHS, Sub1); + + SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, + DL, MVT::i32, RHS, Sub0); + SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, + DL, MVT::i32, RHS, Sub1); + + SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue); + SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) }; + + + unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; + unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; + + SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs); + SDValue Carry(AddLo, 1); + SDNode *AddHi + = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32, + SDValue(Hi0, 0), SDValue(Hi1, 0), Carry); + + SDValue Args[5] = { + CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32), + SDValue(AddLo,0), + Sub0, + SDValue(AddHi,0), + Sub1, + }; + return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args); +} + +SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) { + SDLoc SL(N); + EVT VT = N->getValueType(0); + + assert(VT == MVT::f32 || VT == MVT::f64); + + unsigned Opc + = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32; + + const SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32); + const SDValue False = CurDAG->getTargetConstant(0, MVT::i1); + SDValue Ops[] = { + Zero, // src0_modifiers + N->getOperand(0), // src0 + Zero, // src1_modifiers + N->getOperand(1), // src1 + Zero, // src2_modifiers + N->getOperand(2), // src2 + False, // clamp + Zero // omod + }; + + return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops); +} + +bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset, + unsigned OffsetBits) const { + const AMDGPUSubtarget &ST = TM.getSubtarget(); + if ((OffsetBits == 16 && !isUInt<16>(Offset)) || + (OffsetBits == 8 && !isUInt<8>(Offset))) + return false; + + if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) + return true; + + // On Southern Islands instruction with a negative base value and an offset + // don't seem to work. + return CurDAG->SignBitIsZero(Base); +} + +bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base, + SDValue &Offset) const { + if (CurDAG->isBaseWithConstantOffset(Addr)) { + SDValue N0 = Addr.getOperand(0); + SDValue N1 = Addr.getOperand(1); + ConstantSDNode *C1 = cast(N1); + if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) { + // (add n0, c0) + Base = N0; + Offset = N1; + return true; + } + } + + // If we have a constant address, prefer to put the constant into the + // offset. This can save moves to load the constant address since multiple + // operations can share the zero base address register, and enables merging + // into read2 / write2 instructions. + if (const ConstantSDNode *CAddr = dyn_cast(Addr)) { + if (isUInt<16>(CAddr->getZExtValue())) { + SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32); + MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, + SDLoc(Addr), MVT::i32, Zero); + Base = SDValue(MovZero, 0); + Offset = Addr; + return true; + } } + + // default case + Base = Addr; + Offset = CurDAG->getTargetConstant(0, MVT::i16); + return true; +} + +bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base, + SDValue &Offset0, + SDValue &Offset1) const { + if (CurDAG->isBaseWithConstantOffset(Addr)) { + SDValue N0 = Addr.getOperand(0); + SDValue N1 = Addr.getOperand(1); + ConstantSDNode *C1 = cast(N1); + unsigned DWordOffset0 = C1->getZExtValue() / 4; + unsigned DWordOffset1 = DWordOffset0 + 1; + // (add n0, c0) + if (isDSOffsetLegal(N0, DWordOffset1, 8)) { + Base = N0; + Offset0 = CurDAG->getTargetConstant(DWordOffset0, MVT::i8); + Offset1 = CurDAG->getTargetConstant(DWordOffset1, MVT::i8); + return true; + } + } + + if (const ConstantSDNode *CAddr = dyn_cast(Addr)) { + unsigned DWordOffset0 = CAddr->getZExtValue() / 4; + unsigned DWordOffset1 = DWordOffset0 + 1; + assert(4 * DWordOffset0 == CAddr->getZExtValue()); + + if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) { + SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32); + MachineSDNode *MovZero + = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, + SDLoc(Addr), MVT::i32, Zero); + Base = SDValue(MovZero, 0); + Offset0 = CurDAG->getTargetConstant(DWordOffset0, MVT::i8); + Offset1 = CurDAG->getTargetConstant(DWordOffset1, MVT::i8); + return true; + } + } + + // default case + Base = Addr; + Offset0 = CurDAG->getTargetConstant(0, MVT::i8); + Offset1 = CurDAG->getTargetConstant(1, MVT::i8); + return true; +} + +static SDValue wrapAddr64Rsrc(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) { + return SDValue(DAG->getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::v4i32, + Ptr), 0); +} + +static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) { + return isUInt<12>(Imm->getZExtValue()); +} + +void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr, + SDValue &VAddr, SDValue &SOffset, + SDValue &Offset, SDValue &Offen, + SDValue &Idxen, SDValue &Addr64, + SDValue &GLC, SDValue &SLC, + SDValue &TFE) const { + SDLoc DL(Addr); + + GLC = CurDAG->getTargetConstant(0, MVT::i1); + SLC = CurDAG->getTargetConstant(0, MVT::i1); + TFE = CurDAG->getTargetConstant(0, MVT::i1); + + Idxen = CurDAG->getTargetConstant(0, MVT::i1); + Offen = CurDAG->getTargetConstant(0, MVT::i1); + Addr64 = CurDAG->getTargetConstant(0, MVT::i1); + SOffset = CurDAG->getTargetConstant(0, MVT::i32); + + if (CurDAG->isBaseWithConstantOffset(Addr)) { + SDValue N0 = Addr.getOperand(0); + SDValue N1 = Addr.getOperand(1); + ConstantSDNode *C1 = cast(N1); + + if (isLegalMUBUFImmOffset(C1)) { + + if (N0.getOpcode() == ISD::ADD) { + // (add (add N2, N3), C1) -> addr64 + SDValue N2 = N0.getOperand(0); + SDValue N3 = N0.getOperand(1); + Addr64 = CurDAG->getTargetConstant(1, MVT::i1); + Ptr = N2; + VAddr = N3; + Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16); + return; + } + + // (add N0, C1) -> offset + VAddr = CurDAG->getTargetConstant(0, MVT::i32); + Ptr = N0; + Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16); + return; + } + } + if (Addr.getOpcode() == ISD::ADD) { + // (add N0, N1) -> addr64 + SDValue N0 = Addr.getOperand(0); + SDValue N1 = Addr.getOperand(1); + Addr64 = CurDAG->getTargetConstant(1, MVT::i1); + Ptr = N0; + VAddr = N1; + Offset = CurDAG->getTargetConstant(0, MVT::i16); + return; + } + + // default case -> offset + VAddr = CurDAG->getTargetConstant(0, MVT::i32); + Ptr = Addr; + Offset = CurDAG->getTargetConstant(0, MVT::i16); + } -bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) { +bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, + SDValue &VAddr, + SDValue &Offset) const { + SDValue Ptr, SOffset, Offen, Idxen, Addr64, GLC, SLC, TFE; - assert(Op.getValueType() == MVT::i32); + SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64, + GLC, SLC, TFE); - if (CurDAG->ComputeNumSignBits(Op) == 9) { - I24 = SimplifyI24(Op); + ConstantSDNode *C = cast(Addr64); + if (C->getSExtValue()) { + SDLoc DL(Addr); + SRsrc = wrapAddr64Rsrc(CurDAG, DL, Ptr); return true; } return false; } -bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) { - APInt KnownZero; - APInt KnownOne; - CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne); +bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, + SDValue &VAddr, SDValue &Offset, + SDValue &SLC) const { + SLC = CurDAG->getTargetConstant(0, MVT::i1); + + return SelectMUBUFAddr64(Addr, SRsrc, VAddr, Offset); +} - assert (Op.getValueType() == MVT::i32); +static SDValue buildSMovImm32(SelectionDAG *DAG, SDLoc DL, uint64_t Val) { + SDValue K = DAG->getTargetConstant(Val, MVT::i32); + return SDValue(DAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, K), 0); +} - // ANY_EXTEND and EXTLOAD operations can only be done on types smaller than - // i32. These smaller types are legal to use with the i24 instructions. - if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 || - Op.getOpcode() == ISD::ANY_EXTEND || - ISD::isEXTLoad(Op.getNode())) { - U24 = SimplifyI24(Op); +static SDValue buildRSRC(SelectionDAG *DAG, SDLoc DL, SDValue Ptr, + uint32_t RsrcDword1, uint64_t RsrcDword2And3) { + + SDValue PtrLo = DAG->getTargetExtractSubreg(AMDGPU::sub0, DL, MVT::i32, Ptr); + SDValue PtrHi = DAG->getTargetExtractSubreg(AMDGPU::sub1, DL, MVT::i32, Ptr); + if (RsrcDword1) { + PtrHi = SDValue(DAG->getMachineNode(AMDGPU::S_OR_B32, DL, MVT::i32, PtrHi, + DAG->getConstant(RsrcDword1, MVT::i32)), 0); + } + + SDValue DataLo = buildSMovImm32(DAG, DL, + RsrcDword2And3 & UINT64_C(0xFFFFFFFF)); + SDValue DataHi = buildSMovImm32(DAG, DL, RsrcDword2And3 >> 32); + + const SDValue Ops[] = { + DAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32), + PtrLo, + DAG->getTargetConstant(AMDGPU::sub0, MVT::i32), + PtrHi, + DAG->getTargetConstant(AMDGPU::sub1, MVT::i32), + DataLo, + DAG->getTargetConstant(AMDGPU::sub2, MVT::i32), + DataHi, + DAG->getTargetConstant(AMDGPU::sub3, MVT::i32) + }; + + return SDValue(DAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL, + MVT::v4i32, Ops), 0); +} + +/// \brief Return a resource descriptor with the 'Add TID' bit enabled +/// The TID (Thread ID) is multipled by the stride value (bits [61:48] +/// of the resource descriptor) to create an offset, which is added to the +/// resource ponter. +static SDValue buildScratchRSRC(SelectionDAG *DAG, SDLoc DL, SDValue Ptr) { + + uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | AMDGPU::RSRC_TID_ENABLE | + 0xffffffff; // Size + + return buildRSRC(DAG, DL, Ptr, 0, Rsrc); +} + +bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc, + SDValue &VAddr, SDValue &SOffset, + SDValue &ImmOffset) const { + + SDLoc DL(Addr); + MachineFunction &MF = CurDAG->getMachineFunction(); + const SIRegisterInfo *TRI = + static_cast(MF.getSubtarget().getRegisterInfo()); + MachineRegisterInfo &MRI = MF.getRegInfo(); + const SITargetLowering& Lowering = + *static_cast(getTargetLowering()); + + unsigned ScratchPtrReg = + TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_PTR); + unsigned ScratchOffsetReg = + TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET); + Lowering.CreateLiveInRegister(*CurDAG, &AMDGPU::SReg_32RegClass, + ScratchOffsetReg, MVT::i32); + + Rsrc = buildScratchRSRC(CurDAG, DL, + CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, + MRI.getLiveInVirtReg(ScratchPtrReg), MVT::i64)); + SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, + MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32); + + // (add n0, c1) + if (CurDAG->isBaseWithConstantOffset(Addr)) { + SDValue N1 = Addr.getOperand(1); + ConstantSDNode *C1 = cast(N1); + + if (isLegalMUBUFImmOffset(C1)) { + VAddr = Addr.getOperand(0); + ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16); + return true; + } + } + + // (add FI, n0) + if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) && + isa(Addr.getOperand(0))) { + VAddr = Addr.getOperand(1); + ImmOffset = Addr.getOperand(0); + return true; + } + + // (FI) + if (isa(Addr)) { + VAddr = SDValue(CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32, + CurDAG->getConstant(0, MVT::i32)), 0); + ImmOffset = Addr; + return true; + } + + // (node) + VAddr = Addr; + ImmOffset = CurDAG->getTargetConstant(0, MVT::i16); + return true; +} + +bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, + SDValue &SOffset, SDValue &Offset, + SDValue &GLC, SDValue &SLC, + SDValue &TFE) const { + SDValue Ptr, VAddr, Offen, Idxen, Addr64; + + SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64, + GLC, SLC, TFE); + + if (!cast(Offen)->getSExtValue() && + !cast(Idxen)->getSExtValue() && + !cast(Addr64)->getSExtValue()) { + uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | + APInt::getAllOnesValue(32).getZExtValue(); // Size + SDLoc DL(Addr); + SRsrc = buildRSRC(CurDAG, DL, Ptr, 0, Rsrc); return true; } return false; } -void AMDGPUDAGToDAGISel::PostprocessISelDAG() { +bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, + SDValue &Soffset, SDValue &Offset, + SDValue &GLC) const { + SDValue SLC, TFE; - if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) { - return; + return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE); +} + +// FIXME: This is incorrect and only enough to be able to compile. +SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) { + AddrSpaceCastSDNode *ASC = cast(N); + SDLoc DL(N); + + assert(Subtarget.hasFlatAddressSpace() && + "addrspacecast only supported with flat address space!"); + + assert((ASC->getSrcAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS && + ASC->getDestAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) && + "Cannot cast address space to / from constant address!"); + + assert((ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS || + ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) && + "Can only cast to / from flat address space!"); + + // The flat instructions read the address as the index of the VGPR holding the + // address, so casting should just be reinterpreting the base VGPR, so just + // insert trunc / bitcast / zext. + + SDValue Src = ASC->getOperand(0); + EVT DestVT = ASC->getValueType(0); + EVT SrcVT = Src.getValueType(); + + unsigned SrcSize = SrcVT.getSizeInBits(); + unsigned DestSize = DestVT.getSizeInBits(); + + if (SrcSize > DestSize) { + assert(SrcSize == 64 && DestSize == 32); + return CurDAG->getMachineNode( + TargetOpcode::EXTRACT_SUBREG, + DL, + DestVT, + Src, + CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32)); } - // Go over all selected nodes and try to fold them a bit more - const AMDGPUTargetLowering& Lowering = - (*(const AMDGPUTargetLowering*)getTargetLowering()); - for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), - E = CurDAG->allnodes_end(); I != E; ++I) { - - SDNode *Node = I; - switch (Node->getOpcode()) { - // Fix the register class in copy to CopyToReg nodes - ISel will always - // use SReg classes for 64-bit copies, but this is not always what we want. - case ISD::CopyToReg: { - unsigned Reg = cast(Node->getOperand(1))->getReg(); - SDValue Val = Node->getOperand(2); - const TargetRegisterClass *RC = RegInfo->getRegClass(Reg); - if (RC != &AMDGPU::SReg_64RegClass) { - continue; - } - if (!Val.getNode()->isMachineOpcode() || - Val.getNode()->getMachineOpcode() == AMDGPU::IMPLICIT_DEF) { - continue; - } + if (DestSize > SrcSize) { + assert(SrcSize == 32 && DestSize == 64); - const MCInstrDesc Desc = TM.getInstrInfo()->get(Val.getNode()->getMachineOpcode()); - const TargetRegisterInfo *TRI = TM.getRegisterInfo(); - RegInfo->setRegClass(Reg, TRI->getRegClass(Desc.OpInfo[0].RegClass)); - continue; - } - } + SDValue RC = CurDAG->getTargetConstant(AMDGPU::VSrc_64RegClassID, MVT::i32); - MachineSDNode *MachineNode = dyn_cast(I); - if (!MachineNode) - continue; + const SDValue Ops[] = { + RC, + Src, + CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32), + SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32, + CurDAG->getConstant(0, MVT::i32)), 0), + CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32) + }; - SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG); - if (ResNode != Node) { - ReplaceUses(Node, ResNode); - } + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, + SDLoc(N), N->getValueType(0), Ops); + } + + assert(SrcSize == 64 && DestSize == 64); + return CurDAG->getNode(ISD::BITCAST, DL, DestVT, Src).getNode(); +} + +bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src, + SDValue &SrcMods) const { + + unsigned Mods = 0; + + Src = In; + + if (Src.getOpcode() == ISD::FNEG) { + Mods |= SISrcMods::NEG; + Src = Src.getOperand(0); + } + + if (Src.getOpcode() == ISD::FABS) { + Mods |= SISrcMods::ABS; + Src = Src.getOperand(0); } + + SrcMods = CurDAG->getTargetConstant(Mods, MVT::i32); + + return true; +} + +bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src, + SDValue &SrcMods, SDValue &Clamp, + SDValue &Omod) const { + // FIXME: Handle Clamp and Omod + Clamp = CurDAG->getTargetConstant(0, MVT::i32); + Omod = CurDAG->getTargetConstant(0, MVT::i32); + + return SelectVOP3Mods(In, Src, SrcMods); +} + +void AMDGPUDAGToDAGISel::PostprocessISelDAG() { + const AMDGPUTargetLowering& Lowering = + *static_cast(getTargetLowering()); + bool IsModified = false; + do { + IsModified = false; + // Go over all selected nodes and try to fold them a bit more + for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), + E = CurDAG->allnodes_end(); I != E; ++I) { + + SDNode *Node = I; + + MachineSDNode *MachineNode = dyn_cast(I); + if (!MachineNode) + continue; + + SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG); + if (ResNode != Node) { + ReplaceUses(Node, ResNode); + IsModified = true; + } + } + CurDAG->RemoveDeadNodes(); + } while (IsModified); }