1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "SIRegisterInfo.h"
16 #include "SIInstrInfo.h"
17 #include "SIMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/RegisterScavenging.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/LLVMContext.h"
26 SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo() {}
28 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, unsigned Reg) const {
29 MCRegAliasIterator R(Reg, this, true);
31 for (; R.isValid(); ++R)
35 unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
36 const MachineFunction &MF) const {
37 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
38 if (ST.hasSGPRInitBug()) {
39 // Leave space for flat_scr, xnack_mask, vcc, and alignment
40 unsigned BaseIdx = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 8 - 4;
41 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
42 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
45 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
46 // 96/97 need to be reserved for flat_scr, 98/99 for xnack_mask, and
47 // 100/101 for vcc. This is the next sgpr128 down.
48 return AMDGPU::SGPR92_SGPR93_SGPR94_SGPR95;
51 return AMDGPU::SGPR96_SGPR97_SGPR98_SGPR99;
54 unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
55 const MachineFunction &MF) const {
56 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
57 if (ST.hasSGPRInitBug()) {
58 unsigned Idx = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 6 - 1;
59 return AMDGPU::SGPR_32RegClass.getRegister(Idx);
62 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
63 // Next register before reservations for flat_scr, xnack_mask, vcc,
64 // and scratch resource.
65 return AMDGPU::SGPR91;
68 return AMDGPU::SGPR95;
71 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
72 BitVector Reserved(getNumRegs());
73 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
75 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
76 // this seems likely to result in bugs, so I'm marking them as reserved.
77 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
78 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
80 // Reserve the last 2 registers so we will always have at least 2 more that
81 // will physically contain VCC.
82 reserveRegisterTuples(Reserved, AMDGPU::SGPR102_SGPR103);
84 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
86 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
87 // SI/CI have 104 SGPRs. VI has 102. We need to shift down the reservation
88 // for VCC/XNACK_MASK/FLAT_SCR.
90 // TODO The SGPRs that alias to XNACK_MASK could be used as general purpose
91 // SGPRs when the XNACK feature is not used. This is currently not done
92 // because the code that counts SGPRs cannot account for such holes.
93 reserveRegisterTuples(Reserved, AMDGPU::SGPR96_SGPR97);
94 reserveRegisterTuples(Reserved, AMDGPU::SGPR98_SGPR99);
95 reserveRegisterTuples(Reserved, AMDGPU::SGPR100_SGPR101);
98 // Tonga and Iceland can only allocate a fixed number of SGPRs due
100 if (ST.hasSGPRInitBug()) {
101 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
102 // Reserve some SGPRs for FLAT_SCRATCH, XNACK_MASK, and VCC (6 SGPRs).
103 unsigned Limit = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 6;
105 for (unsigned i = Limit; i < NumSGPRs; ++i) {
106 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
107 reserveRegisterTuples(Reserved, Reg);
111 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
113 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
114 if (ScratchWaveOffsetReg != AMDGPU::NoRegister) {
115 // Reserve 1 SGPR for scratch wave offset in case we need to spill.
116 reserveRegisterTuples(Reserved, ScratchWaveOffsetReg);
119 unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
120 if (ScratchRSrcReg != AMDGPU::NoRegister) {
121 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
123 // TODO: May need to reserve a VGPR if doing LDS spilling.
124 reserveRegisterTuples(Reserved, ScratchRSrcReg);
125 assert(!isSubRegister(ScratchRSrcReg, ScratchWaveOffsetReg));
131 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
132 unsigned Idx) const {
133 const AMDGPUSubtarget &STI = MF.getSubtarget<AMDGPUSubtarget>();
134 // FIXME: We should adjust the max number of waves based on LDS size.
135 unsigned SGPRLimit = getNumSGPRsAllowed(STI.getGeneration(),
136 STI.getMaxWavesPerCU());
137 unsigned VGPRLimit = getNumVGPRsAllowed(STI.getMaxWavesPerCU());
139 unsigned VSLimit = SGPRLimit + VGPRLimit;
141 for (regclass_iterator I = regclass_begin(), E = regclass_end();
143 const TargetRegisterClass *RC = *I;
145 unsigned NumSubRegs = std::max((int)RC->getSize() / 4, 1);
148 if (isPseudoRegClass(RC)) {
149 // FIXME: This is a hack. We should never be considering the pressure of
150 // these since no virtual register should ever have this class.
152 } else if (isSGPRClass(RC)) {
153 Limit = SGPRLimit / NumSubRegs;
155 Limit = VGPRLimit / NumSubRegs;
158 const int *Sets = getRegClassPressureSets(RC);
160 for (unsigned i = 0; Sets[i] != -1; ++i) {
161 if (Sets[i] == (int)Idx)
168 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
169 return Fn.getFrameInfo()->hasStackObjects();
172 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
175 case AMDGPU::SI_SPILL_S512_SAVE:
176 case AMDGPU::SI_SPILL_S512_RESTORE:
177 case AMDGPU::SI_SPILL_V512_SAVE:
178 case AMDGPU::SI_SPILL_V512_RESTORE:
180 case AMDGPU::SI_SPILL_S256_SAVE:
181 case AMDGPU::SI_SPILL_S256_RESTORE:
182 case AMDGPU::SI_SPILL_V256_SAVE:
183 case AMDGPU::SI_SPILL_V256_RESTORE:
185 case AMDGPU::SI_SPILL_S128_SAVE:
186 case AMDGPU::SI_SPILL_S128_RESTORE:
187 case AMDGPU::SI_SPILL_V128_SAVE:
188 case AMDGPU::SI_SPILL_V128_RESTORE:
190 case AMDGPU::SI_SPILL_V96_SAVE:
191 case AMDGPU::SI_SPILL_V96_RESTORE:
193 case AMDGPU::SI_SPILL_S64_SAVE:
194 case AMDGPU::SI_SPILL_S64_RESTORE:
195 case AMDGPU::SI_SPILL_V64_SAVE:
196 case AMDGPU::SI_SPILL_V64_RESTORE:
198 case AMDGPU::SI_SPILL_S32_SAVE:
199 case AMDGPU::SI_SPILL_S32_RESTORE:
200 case AMDGPU::SI_SPILL_V32_SAVE:
201 case AMDGPU::SI_SPILL_V32_RESTORE:
203 default: llvm_unreachable("Invalid spill opcode");
207 void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
208 unsigned LoadStoreOp,
210 unsigned ScratchRsrcReg,
211 unsigned ScratchOffset,
213 RegScavenger *RS) const {
215 MachineBasicBlock *MBB = MI->getParent();
216 const MachineFunction *MF = MI->getParent()->getParent();
217 const SIInstrInfo *TII =
218 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
219 LLVMContext &Ctx = MF->getFunction()->getContext();
220 DebugLoc DL = MI->getDebugLoc();
221 bool IsLoad = TII->get(LoadStoreOp).mayLoad();
223 bool RanOutOfSGPRs = false;
224 unsigned SOffset = ScratchOffset;
226 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
227 unsigned Size = NumSubRegs * 4;
229 if (!isUInt<12>(Offset + Size)) {
230 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
231 if (SOffset == AMDGPU::NoRegister) {
232 RanOutOfSGPRs = true;
233 SOffset = AMDGPU::SGPR0;
235 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
236 .addReg(ScratchOffset)
242 Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
244 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
245 unsigned SubReg = NumSubRegs > 1 ?
246 getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
249 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
250 .addReg(SubReg, getDefRegState(IsLoad))
251 .addReg(ScratchRsrcReg)
257 .addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
258 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
262 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
263 int SPAdj, unsigned FIOperandNum,
264 RegScavenger *RS) const {
265 MachineFunction *MF = MI->getParent()->getParent();
266 MachineBasicBlock *MBB = MI->getParent();
267 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
268 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
269 const SIInstrInfo *TII =
270 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
271 DebugLoc DL = MI->getDebugLoc();
273 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
274 int Index = MI->getOperand(FIOperandNum).getIndex();
276 switch (MI->getOpcode()) {
277 // SGPR register spill
278 case AMDGPU::SI_SPILL_S512_SAVE:
279 case AMDGPU::SI_SPILL_S256_SAVE:
280 case AMDGPU::SI_SPILL_S128_SAVE:
281 case AMDGPU::SI_SPILL_S64_SAVE:
282 case AMDGPU::SI_SPILL_S32_SAVE: {
283 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
285 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
286 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
287 &AMDGPU::SGPR_32RegClass, i);
288 struct SIMachineFunctionInfo::SpilledReg Spill =
289 MFI->getSpilledReg(MF, Index, i);
291 BuildMI(*MBB, MI, DL,
292 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
297 // FIXME: Since this spills to another register instead of an actual
298 // frame index, we should delete the frame index when all references to
301 MI->eraseFromParent();
305 // SGPR register restore
306 case AMDGPU::SI_SPILL_S512_RESTORE:
307 case AMDGPU::SI_SPILL_S256_RESTORE:
308 case AMDGPU::SI_SPILL_S128_RESTORE:
309 case AMDGPU::SI_SPILL_S64_RESTORE:
310 case AMDGPU::SI_SPILL_S32_RESTORE: {
311 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
313 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
314 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
315 &AMDGPU::SGPR_32RegClass, i);
316 struct SIMachineFunctionInfo::SpilledReg Spill =
317 MFI->getSpilledReg(MF, Index, i);
319 BuildMI(*MBB, MI, DL,
320 TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
324 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
327 // TODO: only do this when it is needed
328 switch (MF->getSubtarget<AMDGPUSubtarget>().getGeneration()) {
329 case AMDGPUSubtarget::SOUTHERN_ISLANDS:
330 // "VALU writes SGPR" -> "SMRD reads that SGPR" needs 4 wait states
332 TII->insertWaitStates(MI, 4);
334 case AMDGPUSubtarget::SEA_ISLANDS:
336 default: // VOLCANIC_ISLANDS and later
337 // "VALU writes SGPR -> VMEM reads that SGPR" needs 5 wait states
338 // ("S_NOP 4") on VI and later. This also applies to VALUs which write
339 // VCC, but we're unlikely to see VMEM use VCC.
340 TII->insertWaitStates(MI, 5);
343 MI->eraseFromParent();
347 // VGPR register spill
348 case AMDGPU::SI_SPILL_V512_SAVE:
349 case AMDGPU::SI_SPILL_V256_SAVE:
350 case AMDGPU::SI_SPILL_V128_SAVE:
351 case AMDGPU::SI_SPILL_V96_SAVE:
352 case AMDGPU::SI_SPILL_V64_SAVE:
353 case AMDGPU::SI_SPILL_V32_SAVE:
354 buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
355 TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
356 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
357 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
358 FrameInfo->getObjectOffset(Index), RS);
359 MI->eraseFromParent();
361 case AMDGPU::SI_SPILL_V32_RESTORE:
362 case AMDGPU::SI_SPILL_V64_RESTORE:
363 case AMDGPU::SI_SPILL_V96_RESTORE:
364 case AMDGPU::SI_SPILL_V128_RESTORE:
365 case AMDGPU::SI_SPILL_V256_RESTORE:
366 case AMDGPU::SI_SPILL_V512_RESTORE: {
367 buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
368 TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
369 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
370 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
371 FrameInfo->getObjectOffset(Index), RS);
372 MI->eraseFromParent();
377 int64_t Offset = FrameInfo->getObjectOffset(Index);
378 FIOp.ChangeToImmediate(Offset);
379 if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
380 unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
381 BuildMI(*MBB, MI, MI->getDebugLoc(),
382 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
384 FIOp.ChangeToRegister(TmpReg, false, false, true);
390 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
391 return getEncodingValue(Reg) & 0xff;
394 // FIXME: This is very slow. It might be worth creating a map from physreg to
396 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
397 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
399 static const TargetRegisterClass *const BaseClasses[] = {
400 &AMDGPU::VGPR_32RegClass,
401 &AMDGPU::SReg_32RegClass,
402 &AMDGPU::VReg_64RegClass,
403 &AMDGPU::SReg_64RegClass,
404 &AMDGPU::VReg_96RegClass,
405 &AMDGPU::VReg_128RegClass,
406 &AMDGPU::SReg_128RegClass,
407 &AMDGPU::VReg_256RegClass,
408 &AMDGPU::SReg_256RegClass,
409 &AMDGPU::VReg_512RegClass,
410 &AMDGPU::SReg_512RegClass
413 for (const TargetRegisterClass *BaseClass : BaseClasses) {
414 if (BaseClass->contains(Reg)) {
421 // TODO: It might be helpful to have some target specific flags in
422 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
423 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
424 switch (RC->getSize()) {
426 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
428 return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
430 return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
432 return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
434 return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
436 return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
438 llvm_unreachable("Invalid register class size");
442 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
443 const TargetRegisterClass *SRC) const {
444 switch (SRC->getSize()) {
446 return &AMDGPU::VGPR_32RegClass;
448 return &AMDGPU::VReg_64RegClass;
450 return &AMDGPU::VReg_96RegClass;
452 return &AMDGPU::VReg_128RegClass;
454 return &AMDGPU::VReg_256RegClass;
456 return &AMDGPU::VReg_512RegClass;
458 llvm_unreachable("Invalid register class size");
462 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
463 const TargetRegisterClass *RC, unsigned SubIdx) const {
464 if (SubIdx == AMDGPU::NoSubRegister)
467 // If this register has a sub-register, we can safely assume it is a 32-bit
468 // register, because all of SI's sub-registers are 32-bit.
469 if (isSGPRClass(RC)) {
470 return &AMDGPU::SGPR_32RegClass;
472 return &AMDGPU::VGPR_32RegClass;
476 bool SIRegisterInfo::shouldRewriteCopySrc(
477 const TargetRegisterClass *DefRC,
479 const TargetRegisterClass *SrcRC,
480 unsigned SrcSubReg) const {
481 // We want to prefer the smallest register class possible, so we don't want to
482 // stop and rewrite on anything that looks like a subregister
483 // extract. Operations mostly don't care about the super register class, so we
484 // only want to stop on the most basic of copies between the smae register
487 // e.g. if we have something like
490 // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
491 // vreg3 = COPY vreg2, sub0
493 // We want to look through the COPY to find:
494 // => vreg3 = COPY vreg0
497 return getCommonSubClass(DefRC, SrcRC) != nullptr;
500 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
501 const TargetRegisterClass *SubRC,
502 unsigned Channel) const {
507 case 0: return AMDGPU::VCC_LO;
508 case 1: return AMDGPU::VCC_HI;
509 default: llvm_unreachable("Invalid SubIdx for VCC");
512 case AMDGPU::FLAT_SCR:
515 return AMDGPU::FLAT_SCR_LO;
517 return AMDGPU::FLAT_SCR_HI;
519 llvm_unreachable("Invalid SubIdx for FLAT_SCR");
526 return AMDGPU::EXEC_LO;
528 return AMDGPU::EXEC_HI;
530 llvm_unreachable("Invalid SubIdx for EXEC");
535 const TargetRegisterClass *RC = getPhysRegClass(Reg);
536 // 32-bit registers don't have sub-registers, so we can just return the
537 // Reg. We need to have this check here, because the calculation below
538 // using getHWRegIndex() will fail with special 32-bit registers like
539 // VCC_LO, VCC_HI, EXEC_LO, EXEC_HI and M0.
540 if (RC->getSize() == 4) {
541 assert(Channel == 0);
545 unsigned Index = getHWRegIndex(Reg);
546 return SubRC->getRegister(Index + Channel);
549 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
550 return OpType == AMDGPU::OPERAND_REG_IMM32;
553 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
554 if (opCanUseLiteralConstant(OpType))
557 return OpType == AMDGPU::OPERAND_REG_INLINE_C;
560 // FIXME: Most of these are flexible with HSA and we don't need to reserve them
561 // as input registers if unused. Whether the dispatch ptr is necessary should be
562 // easy to detect from used intrinsics. Scratch setup is harder to know.
563 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
564 enum PreloadedValue Value) const {
566 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
567 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
570 case SIRegisterInfo::WORKGROUP_ID_X:
571 assert(MFI->hasWorkGroupIDX());
572 return MFI->WorkGroupIDXSystemSGPR;
573 case SIRegisterInfo::WORKGROUP_ID_Y:
574 assert(MFI->hasWorkGroupIDY());
575 return MFI->WorkGroupIDYSystemSGPR;
576 case SIRegisterInfo::WORKGROUP_ID_Z:
577 assert(MFI->hasWorkGroupIDZ());
578 return MFI->WorkGroupIDZSystemSGPR;
579 case SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET:
580 return MFI->PrivateSegmentWaveByteOffsetSystemSGPR;
581 case SIRegisterInfo::PRIVATE_SEGMENT_BUFFER:
582 assert(ST.isAmdHsaOS() && "Non-HSA ABI currently uses relocations");
583 assert(MFI->hasPrivateSegmentBuffer());
584 return MFI->PrivateSegmentBufferUserSGPR;
585 case SIRegisterInfo::KERNARG_SEGMENT_PTR:
586 assert(MFI->hasKernargSegmentPtr());
587 return MFI->KernargSegmentPtrUserSGPR;
588 case SIRegisterInfo::DISPATCH_PTR:
589 assert(MFI->hasDispatchPtr());
590 return MFI->DispatchPtrUserSGPR;
591 case SIRegisterInfo::QUEUE_PTR:
592 llvm_unreachable("not implemented");
593 case SIRegisterInfo::WORKITEM_ID_X:
594 assert(MFI->hasWorkItemIDX());
595 return AMDGPU::VGPR0;
596 case SIRegisterInfo::WORKITEM_ID_Y:
597 assert(MFI->hasWorkItemIDY());
598 return AMDGPU::VGPR1;
599 case SIRegisterInfo::WORKITEM_ID_Z:
600 assert(MFI->hasWorkItemIDZ());
601 return AMDGPU::VGPR2;
603 llvm_unreachable("unexpected preloaded value type");
606 /// \brief Returns a register that is not used at any point in the function.
607 /// If all registers are used, then this function will return
608 // AMDGPU::NoRegister.
609 unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
610 const TargetRegisterClass *RC) const {
611 for (unsigned Reg : *RC)
612 if (!MRI.isPhysRegUsed(Reg))
614 return AMDGPU::NoRegister;
617 unsigned SIRegisterInfo::getNumVGPRsAllowed(unsigned WaveCount) const {
632 unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
633 unsigned WaveCount) const {
634 if (gen >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {