X-Git-Url: http://plrg.eecs.uci.edu/git/?p=oota-llvm.git;a=blobdiff_plain;f=lib%2FTarget%2FAMDGPU%2FSIISelLowering.cpp;h=8ae687c5e827989b26f4f53c767537ad86104f44;hp=40fcc6d049da2fabe9c81d7556a31b071b8ccf02;hb=cac05d9b5837f7d5bc73d9d87b555ca9e562d351;hpb=cdc323b2b7b2920b6c929c7d5169fde82c7a42ac diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index 40fcc6d049d..8ae687c5e82 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -452,7 +452,12 @@ SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT, true, // isNonTemporal true, // isInvariant Align); // Alignment - return DAG.getNode(ISD::FP16_TO_FP, SL, VT, Load); + SDValue Ops[] = { + DAG.getNode(ISD::FP16_TO_FP, SL, VT, Load), + Load.getValue(1) + }; + + return DAG.getMergeValues(Ops, SL); } ISD::LoadExtType ExtTy = Signed ? ISD::SEXTLOAD : ISD::ZEXTLOAD; @@ -570,6 +575,8 @@ SDValue SITargetLowering::LowerFormalArguments( AnalyzeFormalArguments(CCInfo, Splits); + SmallVector Chains; + for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) { const ISD::InputArg &Arg = Ins[i]; @@ -588,8 +595,9 @@ SDValue SITargetLowering::LowerFormalArguments( VA.getLocMemOffset(); // The first 36 bytes of the input buffer contains information about // thread group and global sizes. - SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, DAG.getRoot(), + SDValue Arg = LowerParameter(DAG, VT, MemVT, DL, Chain, Offset, Ins[i].Flags.isSExt()); + Chains.push_back(Arg.getValue(1)); const PointerType *ParamTy = dyn_cast(FType->getParamType(Ins[i].getOrigArgIndex())); @@ -615,7 +623,8 @@ SDValue SITargetLowering::LowerFormalArguments( Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0, &AMDGPU::SReg_64RegClass); Reg = MF.addLiveIn(Reg, &AMDGPU::SReg_64RegClass); - InVals.push_back(DAG.getCopyFromReg(Chain, DL, Reg, VT)); + SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); + InVals.push_back(Copy); continue; } @@ -635,7 +644,9 @@ SDValue SITargetLowering::LowerFormalArguments( for (unsigned j = 1; j != NumElements; ++j) { Reg = ArgLocs[ArgIdx++].getLocReg(); Reg = MF.addLiveIn(Reg, RC); - Regs.push_back(DAG.getCopyFromReg(Chain, DL, Reg, VT)); + + SDValue Copy = DAG.getCopyFromReg(Chain, DL, Reg, VT); + Regs.push_back(Copy); } // Fill up the missing vector elements @@ -654,7 +665,11 @@ SDValue SITargetLowering::LowerFormalArguments( AMDGPU::SGPR_32RegClass.begin(), AMDGPU::SGPR_32RegClass.getNumRegs())); Info->ScratchOffsetReg = AMDGPU::SGPR_32RegClass.getRegister(ScratchIdx); } - return Chain; + + if (Chains.empty()) + return Chain; + + return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); } MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter( @@ -797,10 +812,29 @@ static SDNode *findUser(SDValue Value, unsigned Opcode) { SDValue SITargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const { + SDLoc SL(Op); FrameIndexSDNode *FINode = cast(Op); unsigned FrameIndex = FINode->getIndex(); - return DAG.getTargetFrameIndex(FrameIndex, MVT::i32); + // A FrameIndex node represents a 32-bit offset into scratch memory. If + // the high bit of a frame index offset were to be set, this would mean + // that it represented an offset of ~2GB * 64 = ~128GB from the start of the + // scratch buffer, with 64 being the number of threads per wave. + // + // If we know the machine uses less than 128GB of scratch, then we can + // amrk the high bit of the FrameIndex node as known zero, + // which is important, because it means in most situations we can + // prove that values derived from FrameIndex nodes are non-negative. + // This enables us to take advantage of more addressing modes when + // accessing scratch buffers, since for scratch reads/writes, the register + // offset must always be positive. + + SDValue TFI = DAG.getTargetFrameIndex(FrameIndex, MVT::i32); + if (Subtarget->enableHugeScratchBuffer()) + return TFI; + + return DAG.getNode(ISD::AssertZext, SL, MVT::i32, TFI, + DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), 31))); } /// This transforms the control flow intrinsics to get the branch destination as @@ -928,6 +962,7 @@ SDValue SITargetLowering::copyToM0(SelectionDAG &DAG, SDValue Chain, SDLoc DL, SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); + auto MFI = MF.getInfo(); const SIRegisterInfo *TRI = static_cast(Subtarget->getRegisterInfo()); @@ -966,8 +1001,7 @@ SDValue SITargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, case Intrinsic::AMDGPU_read_workdim: return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), - MF.getInfo()->ABIArgOffset, - false); + getImplicitParameterOffset(MFI, GRID_DIM), false); case Intrinsic::r600_read_tgid_x: return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass, @@ -2019,6 +2053,13 @@ void SITargetLowering::adjustWritemask(MachineSDNode *&Node, } } +static bool isFrameIndexOp(SDValue Op) { + if (Op.getOpcode() == ISD::AssertZext) + Op = Op.getOperand(0); + + return isa(Op); +} + /// \brief Legalize target independent instructions (e.g. INSERT_SUBREG) /// with frame index operands. /// LLVM assumes that inputs are to these instructions are registers. @@ -2027,7 +2068,7 @@ void SITargetLowering::legalizeTargetIndependentNode(SDNode *Node, SmallVector Ops; for (unsigned i = 0; i < Node->getNumOperands(); ++i) { - if (!isa(Node->getOperand(i))) { + if (!isFrameIndexOp(Node->getOperand(i))) { Ops.push_back(Node->getOperand(i)); continue; }