1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "SIRegisterInfo.h"
16 #include "SIInstrInfo.h"
17 #include "SIMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/RegisterScavenging.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/LLVMContext.h"
26 SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo() {}
28 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, unsigned Reg) const {
29 MCRegAliasIterator R(Reg, this, true);
31 for (; R.isValid(); ++R)
35 unsigned SIRegisterInfo::reservedPrivateSegmentBufferReg(
36 const MachineFunction &MF) const {
37 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
38 if (ST.hasSGPRInitBug()) {
39 unsigned BaseIdx = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 4 - 4;
40 if (ST.isXNACKEnabled())
43 unsigned BaseReg(AMDGPU::SGPR_32RegClass.getRegister(BaseIdx));
44 return getMatchingSuperReg(BaseReg, AMDGPU::sub0, &AMDGPU::SReg_128RegClass);
47 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
48 // 98/99 need to be reserved for flat_scr or 96/97 for flat_scr and
49 // 98/99 for xnack_mask, and 100/101 for vcc. This is the next sgpr128 down
51 return AMDGPU::SGPR92_SGPR93_SGPR94_SGPR95;
54 return AMDGPU::SGPR96_SGPR97_SGPR98_SGPR99;
57 unsigned SIRegisterInfo::reservedPrivateSegmentWaveByteOffsetReg(
58 const MachineFunction &MF) const {
59 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
60 if (ST.hasSGPRInitBug()) {
63 if (!ST.isXNACKEnabled())
64 Idx = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 4 - 5;
66 Idx = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 6 - 1;
68 return AMDGPU::SGPR_32RegClass.getRegister(Idx);
71 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
72 if (!ST.isXNACKEnabled()) {
73 // Next register before reservations for flat_scr and vcc.
74 return AMDGPU::SGPR97;
76 // Next register before reservations for flat_scr, xnack_mask, vcc,
77 // and scratch resource.
78 return AMDGPU::SGPR91;
82 return AMDGPU::SGPR95;
85 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
86 BitVector Reserved(getNumRegs());
87 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
89 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
90 // this seems likely to result in bugs, so I'm marking them as reserved.
91 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
92 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
94 // Reserve the last 2 registers so we will always have at least 2 more that
95 // will physically contain VCC.
96 reserveRegisterTuples(Reserved, AMDGPU::SGPR102_SGPR103);
98 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
100 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
101 // SI/CI have 104 SGPRs. VI has 102. We need to shift down the reservation
103 reserveRegisterTuples(Reserved, AMDGPU::SGPR98_SGPR99);
104 reserveRegisterTuples(Reserved, AMDGPU::SGPR100_SGPR101);
106 if (ST.isXNACKEnabled())
107 reserveRegisterTuples(Reserved, AMDGPU::SGPR96_SGPR97);
110 // Tonga and Iceland can only allocate a fixed number of SGPRs due
112 if (ST.hasSGPRInitBug()) {
113 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
114 // Reserve some SGPRs for FLAT_SCRATCH and VCC (4 SGPRs).
115 unsigned Limit = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 4;
117 if (ST.isXNACKEnabled())
120 for (unsigned i = Limit; i < NumSGPRs; ++i) {
121 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
122 reserveRegisterTuples(Reserved, Reg);
126 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
128 unsigned ScratchWaveOffsetReg = MFI->getScratchWaveOffsetReg();
129 if (ScratchWaveOffsetReg != AMDGPU::NoRegister) {
130 // Reserve 1 SGPR for scratch wave offset in case we need to spill.
131 reserveRegisterTuples(Reserved, ScratchWaveOffsetReg);
134 unsigned ScratchRSrcReg = MFI->getScratchRSrcReg();
135 if (ScratchRSrcReg != AMDGPU::NoRegister) {
136 // Reserve 4 SGPRs for the scratch buffer resource descriptor in case we need
138 // TODO: May need to reserve a VGPR if doing LDS spilling.
139 reserveRegisterTuples(Reserved, ScratchRSrcReg);
140 assert(!isSubRegister(ScratchRSrcReg, ScratchWaveOffsetReg));
146 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
147 unsigned Idx) const {
148 const AMDGPUSubtarget &STI = MF.getSubtarget<AMDGPUSubtarget>();
149 // FIXME: We should adjust the max number of waves based on LDS size.
150 unsigned SGPRLimit = getNumSGPRsAllowed(STI.getGeneration(),
151 STI.getMaxWavesPerCU());
152 unsigned VGPRLimit = getNumVGPRsAllowed(STI.getMaxWavesPerCU());
154 unsigned VSLimit = SGPRLimit + VGPRLimit;
156 for (regclass_iterator I = regclass_begin(), E = regclass_end();
158 const TargetRegisterClass *RC = *I;
160 unsigned NumSubRegs = std::max((int)RC->getSize() / 4, 1);
163 if (isPseudoRegClass(RC)) {
164 // FIXME: This is a hack. We should never be considering the pressure of
165 // these since no virtual register should ever have this class.
167 } else if (isSGPRClass(RC)) {
168 Limit = SGPRLimit / NumSubRegs;
170 Limit = VGPRLimit / NumSubRegs;
173 const int *Sets = getRegClassPressureSets(RC);
175 for (unsigned i = 0; Sets[i] != -1; ++i) {
176 if (Sets[i] == (int)Idx)
183 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
184 return Fn.getFrameInfo()->hasStackObjects();
187 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
190 case AMDGPU::SI_SPILL_S512_SAVE:
191 case AMDGPU::SI_SPILL_S512_RESTORE:
192 case AMDGPU::SI_SPILL_V512_SAVE:
193 case AMDGPU::SI_SPILL_V512_RESTORE:
195 case AMDGPU::SI_SPILL_S256_SAVE:
196 case AMDGPU::SI_SPILL_S256_RESTORE:
197 case AMDGPU::SI_SPILL_V256_SAVE:
198 case AMDGPU::SI_SPILL_V256_RESTORE:
200 case AMDGPU::SI_SPILL_S128_SAVE:
201 case AMDGPU::SI_SPILL_S128_RESTORE:
202 case AMDGPU::SI_SPILL_V128_SAVE:
203 case AMDGPU::SI_SPILL_V128_RESTORE:
205 case AMDGPU::SI_SPILL_V96_SAVE:
206 case AMDGPU::SI_SPILL_V96_RESTORE:
208 case AMDGPU::SI_SPILL_S64_SAVE:
209 case AMDGPU::SI_SPILL_S64_RESTORE:
210 case AMDGPU::SI_SPILL_V64_SAVE:
211 case AMDGPU::SI_SPILL_V64_RESTORE:
213 case AMDGPU::SI_SPILL_S32_SAVE:
214 case AMDGPU::SI_SPILL_S32_RESTORE:
215 case AMDGPU::SI_SPILL_V32_SAVE:
216 case AMDGPU::SI_SPILL_V32_RESTORE:
218 default: llvm_unreachable("Invalid spill opcode");
222 void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
223 unsigned LoadStoreOp,
225 unsigned ScratchRsrcReg,
226 unsigned ScratchOffset,
228 RegScavenger *RS) const {
230 MachineBasicBlock *MBB = MI->getParent();
231 const MachineFunction *MF = MI->getParent()->getParent();
232 const SIInstrInfo *TII =
233 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
234 LLVMContext &Ctx = MF->getFunction()->getContext();
235 DebugLoc DL = MI->getDebugLoc();
236 bool IsLoad = TII->get(LoadStoreOp).mayLoad();
238 bool RanOutOfSGPRs = false;
239 unsigned SOffset = ScratchOffset;
241 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
242 unsigned Size = NumSubRegs * 4;
244 if (!isUInt<12>(Offset + Size)) {
245 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
246 if (SOffset == AMDGPU::NoRegister) {
247 RanOutOfSGPRs = true;
248 SOffset = AMDGPU::SGPR0;
250 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
251 .addReg(ScratchOffset)
257 Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
259 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
260 unsigned SubReg = NumSubRegs > 1 ?
261 getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
264 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
265 .addReg(SubReg, getDefRegState(IsLoad))
266 .addReg(ScratchRsrcReg)
272 .addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
273 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
277 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
278 int SPAdj, unsigned FIOperandNum,
279 RegScavenger *RS) const {
280 MachineFunction *MF = MI->getParent()->getParent();
281 MachineBasicBlock *MBB = MI->getParent();
282 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
283 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
284 const SIInstrInfo *TII =
285 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
286 DebugLoc DL = MI->getDebugLoc();
288 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
289 int Index = MI->getOperand(FIOperandNum).getIndex();
291 switch (MI->getOpcode()) {
292 // SGPR register spill
293 case AMDGPU::SI_SPILL_S512_SAVE:
294 case AMDGPU::SI_SPILL_S256_SAVE:
295 case AMDGPU::SI_SPILL_S128_SAVE:
296 case AMDGPU::SI_SPILL_S64_SAVE:
297 case AMDGPU::SI_SPILL_S32_SAVE: {
298 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
300 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
301 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
302 &AMDGPU::SGPR_32RegClass, i);
303 struct SIMachineFunctionInfo::SpilledReg Spill =
304 MFI->getSpilledReg(MF, Index, i);
306 BuildMI(*MBB, MI, DL,
307 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
312 // FIXME: Since this spills to another register instead of an actual
313 // frame index, we should delete the frame index when all references to
316 MI->eraseFromParent();
320 // SGPR register restore
321 case AMDGPU::SI_SPILL_S512_RESTORE:
322 case AMDGPU::SI_SPILL_S256_RESTORE:
323 case AMDGPU::SI_SPILL_S128_RESTORE:
324 case AMDGPU::SI_SPILL_S64_RESTORE:
325 case AMDGPU::SI_SPILL_S32_RESTORE: {
326 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
328 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
329 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
330 &AMDGPU::SGPR_32RegClass, i);
331 struct SIMachineFunctionInfo::SpilledReg Spill =
332 MFI->getSpilledReg(MF, Index, i);
334 BuildMI(*MBB, MI, DL,
335 TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
339 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
342 // TODO: only do this when it is needed
343 switch (MF->getSubtarget<AMDGPUSubtarget>().getGeneration()) {
344 case AMDGPUSubtarget::SOUTHERN_ISLANDS:
345 // "VALU writes SGPR" -> "SMRD reads that SGPR" needs 4 wait states
347 TII->insertWaitStates(MI, 4);
349 case AMDGPUSubtarget::SEA_ISLANDS:
351 default: // VOLCANIC_ISLANDS and later
352 // "VALU writes SGPR -> VMEM reads that SGPR" needs 5 wait states
353 // ("S_NOP 4") on VI and later. This also applies to VALUs which write
354 // VCC, but we're unlikely to see VMEM use VCC.
355 TII->insertWaitStates(MI, 5);
358 MI->eraseFromParent();
362 // VGPR register spill
363 case AMDGPU::SI_SPILL_V512_SAVE:
364 case AMDGPU::SI_SPILL_V256_SAVE:
365 case AMDGPU::SI_SPILL_V128_SAVE:
366 case AMDGPU::SI_SPILL_V96_SAVE:
367 case AMDGPU::SI_SPILL_V64_SAVE:
368 case AMDGPU::SI_SPILL_V32_SAVE:
369 buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
370 TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
371 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
372 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
373 FrameInfo->getObjectOffset(Index), RS);
374 MI->eraseFromParent();
376 case AMDGPU::SI_SPILL_V32_RESTORE:
377 case AMDGPU::SI_SPILL_V64_RESTORE:
378 case AMDGPU::SI_SPILL_V96_RESTORE:
379 case AMDGPU::SI_SPILL_V128_RESTORE:
380 case AMDGPU::SI_SPILL_V256_RESTORE:
381 case AMDGPU::SI_SPILL_V512_RESTORE: {
382 buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
383 TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
384 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
385 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
386 FrameInfo->getObjectOffset(Index), RS);
387 MI->eraseFromParent();
392 int64_t Offset = FrameInfo->getObjectOffset(Index);
393 FIOp.ChangeToImmediate(Offset);
394 if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
395 unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
396 BuildMI(*MBB, MI, MI->getDebugLoc(),
397 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
399 FIOp.ChangeToRegister(TmpReg, false, false, true);
405 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
406 return getEncodingValue(Reg) & 0xff;
409 // FIXME: This is very slow. It might be worth creating a map from physreg to
411 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
412 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
414 static const TargetRegisterClass *const BaseClasses[] = {
415 &AMDGPU::VGPR_32RegClass,
416 &AMDGPU::SReg_32RegClass,
417 &AMDGPU::VReg_64RegClass,
418 &AMDGPU::SReg_64RegClass,
419 &AMDGPU::VReg_96RegClass,
420 &AMDGPU::VReg_128RegClass,
421 &AMDGPU::SReg_128RegClass,
422 &AMDGPU::VReg_256RegClass,
423 &AMDGPU::SReg_256RegClass,
424 &AMDGPU::VReg_512RegClass,
425 &AMDGPU::SReg_512RegClass
428 for (const TargetRegisterClass *BaseClass : BaseClasses) {
429 if (BaseClass->contains(Reg)) {
436 // TODO: It might be helpful to have some target specific flags in
437 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
438 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
439 switch (RC->getSize()) {
441 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
443 return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
445 return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
447 return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
449 return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
451 return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
453 llvm_unreachable("Invalid register class size");
457 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
458 const TargetRegisterClass *SRC) const {
459 switch (SRC->getSize()) {
461 return &AMDGPU::VGPR_32RegClass;
463 return &AMDGPU::VReg_64RegClass;
465 return &AMDGPU::VReg_96RegClass;
467 return &AMDGPU::VReg_128RegClass;
469 return &AMDGPU::VReg_256RegClass;
471 return &AMDGPU::VReg_512RegClass;
473 llvm_unreachable("Invalid register class size");
477 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
478 const TargetRegisterClass *RC, unsigned SubIdx) const {
479 if (SubIdx == AMDGPU::NoSubRegister)
482 // If this register has a sub-register, we can safely assume it is a 32-bit
483 // register, because all of SI's sub-registers are 32-bit.
484 if (isSGPRClass(RC)) {
485 return &AMDGPU::SGPR_32RegClass;
487 return &AMDGPU::VGPR_32RegClass;
491 bool SIRegisterInfo::shouldRewriteCopySrc(
492 const TargetRegisterClass *DefRC,
494 const TargetRegisterClass *SrcRC,
495 unsigned SrcSubReg) const {
496 // We want to prefer the smallest register class possible, so we don't want to
497 // stop and rewrite on anything that looks like a subregister
498 // extract. Operations mostly don't care about the super register class, so we
499 // only want to stop on the most basic of copies between the smae register
502 // e.g. if we have something like
505 // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
506 // vreg3 = COPY vreg2, sub0
508 // We want to look through the COPY to find:
509 // => vreg3 = COPY vreg0
512 return getCommonSubClass(DefRC, SrcRC) != nullptr;
515 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
516 const TargetRegisterClass *SubRC,
517 unsigned Channel) const {
522 case 0: return AMDGPU::VCC_LO;
523 case 1: return AMDGPU::VCC_HI;
524 default: llvm_unreachable("Invalid SubIdx for VCC");
527 case AMDGPU::FLAT_SCR:
530 return AMDGPU::FLAT_SCR_LO;
532 return AMDGPU::FLAT_SCR_HI;
534 llvm_unreachable("Invalid SubIdx for FLAT_SCR");
541 return AMDGPU::EXEC_LO;
543 return AMDGPU::EXEC_HI;
545 llvm_unreachable("Invalid SubIdx for EXEC");
550 const TargetRegisterClass *RC = getPhysRegClass(Reg);
551 // 32-bit registers don't have sub-registers, so we can just return the
552 // Reg. We need to have this check here, because the calculation below
553 // using getHWRegIndex() will fail with special 32-bit registers like
554 // VCC_LO, VCC_HI, EXEC_LO, EXEC_HI and M0.
555 if (RC->getSize() == 4) {
556 assert(Channel == 0);
560 unsigned Index = getHWRegIndex(Reg);
561 return SubRC->getRegister(Index + Channel);
564 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
565 return OpType == AMDGPU::OPERAND_REG_IMM32;
568 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
569 if (opCanUseLiteralConstant(OpType))
572 return OpType == AMDGPU::OPERAND_REG_INLINE_C;
575 // FIXME: Most of these are flexible with HSA and we don't need to reserve them
576 // as input registers if unused. Whether the dispatch ptr is necessary should be
577 // easy to detect from used intrinsics. Scratch setup is harder to know.
578 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
579 enum PreloadedValue Value) const {
581 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
582 const AMDGPUSubtarget &ST = MF.getSubtarget<AMDGPUSubtarget>();
585 case SIRegisterInfo::WORKGROUP_ID_X:
586 assert(MFI->hasWorkGroupIDX());
587 return MFI->WorkGroupIDXSystemSGPR;
588 case SIRegisterInfo::WORKGROUP_ID_Y:
589 assert(MFI->hasWorkGroupIDY());
590 return MFI->WorkGroupIDYSystemSGPR;
591 case SIRegisterInfo::WORKGROUP_ID_Z:
592 assert(MFI->hasWorkGroupIDZ());
593 return MFI->WorkGroupIDZSystemSGPR;
594 case SIRegisterInfo::PRIVATE_SEGMENT_WAVE_BYTE_OFFSET:
595 return MFI->PrivateSegmentWaveByteOffsetSystemSGPR;
596 case SIRegisterInfo::PRIVATE_SEGMENT_BUFFER:
597 assert(ST.isAmdHsaOS() && "Non-HSA ABI currently uses relocations");
598 assert(MFI->hasPrivateSegmentBuffer());
599 return MFI->PrivateSegmentBufferUserSGPR;
600 case SIRegisterInfo::KERNARG_SEGMENT_PTR:
601 assert(MFI->hasKernargSegmentPtr());
602 return MFI->KernargSegmentPtrUserSGPR;
603 case SIRegisterInfo::DISPATCH_PTR:
604 assert(MFI->hasDispatchPtr());
605 return MFI->DispatchPtrUserSGPR;
606 case SIRegisterInfo::QUEUE_PTR:
607 llvm_unreachable("not implemented");
608 case SIRegisterInfo::WORKITEM_ID_X:
609 assert(MFI->hasWorkItemIDX());
610 return AMDGPU::VGPR0;
611 case SIRegisterInfo::WORKITEM_ID_Y:
612 assert(MFI->hasWorkItemIDY());
613 return AMDGPU::VGPR1;
614 case SIRegisterInfo::WORKITEM_ID_Z:
615 assert(MFI->hasWorkItemIDZ());
616 return AMDGPU::VGPR2;
618 llvm_unreachable("unexpected preloaded value type");
621 /// \brief Returns a register that is not used at any point in the function.
622 /// If all registers are used, then this function will return
623 // AMDGPU::NoRegister.
624 unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
625 const TargetRegisterClass *RC) const {
626 for (unsigned Reg : *RC)
627 if (!MRI.isPhysRegUsed(Reg))
629 return AMDGPU::NoRegister;
632 unsigned SIRegisterInfo::getNumVGPRsAllowed(unsigned WaveCount) const {
647 unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
648 unsigned WaveCount) const {
649 if (gen >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {