1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
15 #include "SIRegisterInfo.h"
16 #include "SIInstrInfo.h"
17 #include "SIMachineFunctionInfo.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/RegisterScavenging.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/LLVMContext.h"
26 SIRegisterInfo::SIRegisterInfo() : AMDGPURegisterInfo() {}
28 void SIRegisterInfo::reserveRegisterTuples(BitVector &Reserved, unsigned Reg) const {
29 MCRegAliasIterator R(Reg, this, true);
31 for (; R.isValid(); ++R)
35 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
36 BitVector Reserved(getNumRegs());
37 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
39 // EXEC_LO and EXEC_HI could be allocated and used as regular register, but
40 // this seems likely to result in bugs, so I'm marking them as reserved.
41 reserveRegisterTuples(Reserved, AMDGPU::EXEC);
42 reserveRegisterTuples(Reserved, AMDGPU::FLAT_SCR);
44 // Tonga and Iceland can only allocate a fixed number of SGPRs due
46 if (MF.getSubtarget<AMDGPUSubtarget>().hasSGPRInitBug()) {
47 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
48 // Reserve some SGPRs for FLAT_SCRATCH and VCC (4 SGPRs).
49 // Assume XNACK_MASK is unused.
50 unsigned Limit = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 4;
52 for (unsigned i = Limit; i < NumSGPRs; ++i) {
53 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
54 reserveRegisterTuples(Reserved, Reg);
61 unsigned SIRegisterInfo::getRegPressureSetLimit(const MachineFunction &MF,
64 const AMDGPUSubtarget &STI = MF.getSubtarget<AMDGPUSubtarget>();
65 // FIXME: We should adjust the max number of waves based on LDS size.
66 unsigned SGPRLimit = getNumSGPRsAllowed(STI.getGeneration(),
67 STI.getMaxWavesPerCU());
68 unsigned VGPRLimit = getNumVGPRsAllowed(STI.getMaxWavesPerCU());
70 for (regclass_iterator I = regclass_begin(), E = regclass_end();
73 unsigned NumSubRegs = std::max((int)(*I)->getSize() / 4, 1);
76 if (isSGPRClass(*I)) {
77 Limit = SGPRLimit / NumSubRegs;
79 Limit = VGPRLimit / NumSubRegs;
82 const int *Sets = getRegClassPressureSets(*I);
84 for (unsigned i = 0; Sets[i] != -1; ++i) {
85 if (Sets[i] == (int)Idx)
92 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
93 return Fn.getFrameInfo()->hasStackObjects();
96 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
99 case AMDGPU::SI_SPILL_S512_SAVE:
100 case AMDGPU::SI_SPILL_S512_RESTORE:
101 case AMDGPU::SI_SPILL_V512_SAVE:
102 case AMDGPU::SI_SPILL_V512_RESTORE:
104 case AMDGPU::SI_SPILL_S256_SAVE:
105 case AMDGPU::SI_SPILL_S256_RESTORE:
106 case AMDGPU::SI_SPILL_V256_SAVE:
107 case AMDGPU::SI_SPILL_V256_RESTORE:
109 case AMDGPU::SI_SPILL_S128_SAVE:
110 case AMDGPU::SI_SPILL_S128_RESTORE:
111 case AMDGPU::SI_SPILL_V128_SAVE:
112 case AMDGPU::SI_SPILL_V128_RESTORE:
114 case AMDGPU::SI_SPILL_V96_SAVE:
115 case AMDGPU::SI_SPILL_V96_RESTORE:
117 case AMDGPU::SI_SPILL_S64_SAVE:
118 case AMDGPU::SI_SPILL_S64_RESTORE:
119 case AMDGPU::SI_SPILL_V64_SAVE:
120 case AMDGPU::SI_SPILL_V64_RESTORE:
122 case AMDGPU::SI_SPILL_S32_SAVE:
123 case AMDGPU::SI_SPILL_S32_RESTORE:
124 case AMDGPU::SI_SPILL_V32_SAVE:
125 case AMDGPU::SI_SPILL_V32_RESTORE:
127 default: llvm_unreachable("Invalid spill opcode");
131 void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
132 unsigned LoadStoreOp,
134 unsigned ScratchRsrcReg,
135 unsigned ScratchOffset,
137 RegScavenger *RS) const {
139 MachineBasicBlock *MBB = MI->getParent();
140 const MachineFunction *MF = MI->getParent()->getParent();
141 const SIInstrInfo *TII =
142 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
143 LLVMContext &Ctx = MF->getFunction()->getContext();
144 DebugLoc DL = MI->getDebugLoc();
145 bool IsLoad = TII->get(LoadStoreOp).mayLoad();
147 bool RanOutOfSGPRs = false;
148 unsigned SOffset = ScratchOffset;
150 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
151 unsigned Size = NumSubRegs * 4;
153 if (!isUInt<12>(Offset + Size)) {
154 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
155 if (SOffset == AMDGPU::NoRegister) {
156 RanOutOfSGPRs = true;
157 SOffset = AMDGPU::SGPR0;
159 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
160 .addReg(ScratchOffset)
166 Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
168 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
169 unsigned SubReg = NumSubRegs > 1 ?
170 getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
172 bool IsKill = (i == e - 1);
174 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
175 .addReg(SubReg, getDefRegState(IsLoad))
176 .addReg(ScratchRsrcReg, getKillRegState(IsKill))
182 .addReg(Value, RegState::Implicit | getDefRegState(IsLoad))
183 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
187 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
188 int SPAdj, unsigned FIOperandNum,
189 RegScavenger *RS) const {
190 MachineFunction *MF = MI->getParent()->getParent();
191 MachineBasicBlock *MBB = MI->getParent();
192 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
193 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
194 const SIInstrInfo *TII =
195 static_cast<const SIInstrInfo *>(MF->getSubtarget().getInstrInfo());
196 DebugLoc DL = MI->getDebugLoc();
198 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
199 int Index = MI->getOperand(FIOperandNum).getIndex();
201 switch (MI->getOpcode()) {
202 // SGPR register spill
203 case AMDGPU::SI_SPILL_S512_SAVE:
204 case AMDGPU::SI_SPILL_S256_SAVE:
205 case AMDGPU::SI_SPILL_S128_SAVE:
206 case AMDGPU::SI_SPILL_S64_SAVE:
207 case AMDGPU::SI_SPILL_S32_SAVE: {
208 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
210 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
211 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
212 &AMDGPU::SGPR_32RegClass, i);
213 struct SIMachineFunctionInfo::SpilledReg Spill =
214 MFI->getSpilledReg(MF, Index, i);
216 if (Spill.VGPR == AMDGPU::NoRegister) {
217 LLVMContext &Ctx = MF->getFunction()->getContext();
218 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
221 BuildMI(*MBB, MI, DL,
222 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
228 MI->eraseFromParent();
232 // SGPR register restore
233 case AMDGPU::SI_SPILL_S512_RESTORE:
234 case AMDGPU::SI_SPILL_S256_RESTORE:
235 case AMDGPU::SI_SPILL_S128_RESTORE:
236 case AMDGPU::SI_SPILL_S64_RESTORE:
237 case AMDGPU::SI_SPILL_S32_RESTORE: {
238 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
240 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
241 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
242 &AMDGPU::SGPR_32RegClass, i);
243 struct SIMachineFunctionInfo::SpilledReg Spill =
244 MFI->getSpilledReg(MF, Index, i);
246 if (Spill.VGPR == AMDGPU::NoRegister) {
247 LLVMContext &Ctx = MF->getFunction()->getContext();
248 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
251 BuildMI(*MBB, MI, DL,
252 TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
256 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
259 // TODO: only do this when it is needed
260 switch (MF->getSubtarget<AMDGPUSubtarget>().getGeneration()) {
261 case AMDGPUSubtarget::SOUTHERN_ISLANDS:
262 // "VALU writes SGPR" -> "SMRD reads that SGPR" needs "S_NOP 3" on SI
263 TII->insertNOPs(MI, 3);
265 case AMDGPUSubtarget::SEA_ISLANDS:
267 default: // VOLCANIC_ISLANDS and later
268 // "VALU writes SGPR -> VMEM reads that SGPR" needs "S_NOP 4" on VI
269 // and later. This also applies to VALUs which write VCC, but we're
270 // unlikely to see VMEM use VCC.
271 TII->insertNOPs(MI, 4);
274 MI->eraseFromParent();
278 // VGPR register spill
279 case AMDGPU::SI_SPILL_V512_SAVE:
280 case AMDGPU::SI_SPILL_V256_SAVE:
281 case AMDGPU::SI_SPILL_V128_SAVE:
282 case AMDGPU::SI_SPILL_V96_SAVE:
283 case AMDGPU::SI_SPILL_V64_SAVE:
284 case AMDGPU::SI_SPILL_V32_SAVE:
285 buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
286 TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
287 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
288 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
289 FrameInfo->getObjectOffset(Index), RS);
290 MI->eraseFromParent();
292 case AMDGPU::SI_SPILL_V32_RESTORE:
293 case AMDGPU::SI_SPILL_V64_RESTORE:
294 case AMDGPU::SI_SPILL_V96_RESTORE:
295 case AMDGPU::SI_SPILL_V128_RESTORE:
296 case AMDGPU::SI_SPILL_V256_RESTORE:
297 case AMDGPU::SI_SPILL_V512_RESTORE: {
298 buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
299 TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
300 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
301 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
302 FrameInfo->getObjectOffset(Index), RS);
303 MI->eraseFromParent();
308 int64_t Offset = FrameInfo->getObjectOffset(Index);
309 FIOp.ChangeToImmediate(Offset);
310 if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
311 unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
312 BuildMI(*MBB, MI, MI->getDebugLoc(),
313 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
315 FIOp.ChangeToRegister(TmpReg, false, false, true);
321 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
322 return getEncodingValue(Reg) & 0xff;
325 // FIXME: This is very slow. It might be worth creating a map from physreg to
327 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
328 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
330 static const TargetRegisterClass *const BaseClasses[] = {
331 &AMDGPU::VGPR_32RegClass,
332 &AMDGPU::SReg_32RegClass,
333 &AMDGPU::VReg_64RegClass,
334 &AMDGPU::SReg_64RegClass,
335 &AMDGPU::VReg_96RegClass,
336 &AMDGPU::VReg_128RegClass,
337 &AMDGPU::SReg_128RegClass,
338 &AMDGPU::VReg_256RegClass,
339 &AMDGPU::SReg_256RegClass,
340 &AMDGPU::VReg_512RegClass,
341 &AMDGPU::SReg_512RegClass
344 for (const TargetRegisterClass *BaseClass : BaseClasses) {
345 if (BaseClass->contains(Reg)) {
352 // TODO: It might be helpful to have some target specific flags in
353 // TargetRegisterClass to mark which classes are VGPRs to make this trivial.
354 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
355 switch (RC->getSize()) {
357 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
359 return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
361 return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
363 return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
365 return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
367 return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
369 llvm_unreachable("Invalid register class size");
373 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
374 const TargetRegisterClass *SRC) const {
375 switch (SRC->getSize()) {
377 return &AMDGPU::VGPR_32RegClass;
379 return &AMDGPU::VReg_64RegClass;
381 return &AMDGPU::VReg_96RegClass;
383 return &AMDGPU::VReg_128RegClass;
385 return &AMDGPU::VReg_256RegClass;
387 return &AMDGPU::VReg_512RegClass;
389 llvm_unreachable("Invalid register class size");
393 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
394 const TargetRegisterClass *RC, unsigned SubIdx) const {
395 if (SubIdx == AMDGPU::NoSubRegister)
398 // If this register has a sub-register, we can safely assume it is a 32-bit
399 // register, because all of SI's sub-registers are 32-bit.
400 if (isSGPRClass(RC)) {
401 return &AMDGPU::SGPR_32RegClass;
403 return &AMDGPU::VGPR_32RegClass;
407 bool SIRegisterInfo::shouldRewriteCopySrc(
408 const TargetRegisterClass *DefRC,
410 const TargetRegisterClass *SrcRC,
411 unsigned SrcSubReg) const {
412 // We want to prefer the smallest register class possible, so we don't want to
413 // stop and rewrite on anything that looks like a subregister
414 // extract. Operations mostly don't care about the super register class, so we
415 // only want to stop on the most basic of copies between the smae register
418 // e.g. if we have something like
421 // vreg2 = REG_SEQUENCE vreg0, sub0, vreg1, sub1, vreg2, sub2
422 // vreg3 = COPY vreg2, sub0
424 // We want to look through the COPY to find:
425 // => vreg3 = COPY vreg0
428 return getCommonSubClass(DefRC, SrcRC) != nullptr;
431 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
432 const TargetRegisterClass *SubRC,
433 unsigned Channel) const {
438 case 0: return AMDGPU::VCC_LO;
439 case 1: return AMDGPU::VCC_HI;
440 default: llvm_unreachable("Invalid SubIdx for VCC");
443 case AMDGPU::FLAT_SCR:
446 return AMDGPU::FLAT_SCR_LO;
448 return AMDGPU::FLAT_SCR_HI;
450 llvm_unreachable("Invalid SubIdx for FLAT_SCR");
457 return AMDGPU::EXEC_LO;
459 return AMDGPU::EXEC_HI;
461 llvm_unreachable("Invalid SubIdx for EXEC");
466 const TargetRegisterClass *RC = getPhysRegClass(Reg);
467 // 32-bit registers don't have sub-registers, so we can just return the
468 // Reg. We need to have this check here, because the calculation below
469 // using getHWRegIndex() will fail with special 32-bit registers like
470 // VCC_LO, VCC_HI, EXEC_LO, EXEC_HI and M0.
471 if (RC->getSize() == 4) {
472 assert(Channel == 0);
476 unsigned Index = getHWRegIndex(Reg);
477 return SubRC->getRegister(Index + Channel);
480 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
481 return OpType == AMDGPU::OPERAND_REG_IMM32;
484 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
485 if (opCanUseLiteralConstant(OpType))
488 return OpType == AMDGPU::OPERAND_REG_INLINE_C;
491 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
492 enum PreloadedValue Value) const {
494 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
496 case SIRegisterInfo::TGID_X:
497 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0);
498 case SIRegisterInfo::TGID_Y:
499 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1);
500 case SIRegisterInfo::TGID_Z:
501 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
502 case SIRegisterInfo::SCRATCH_WAVE_OFFSET:
503 if (MFI->getShaderType() != ShaderType::COMPUTE)
504 return MFI->ScratchOffsetReg;
505 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
506 case SIRegisterInfo::SCRATCH_PTR:
507 return AMDGPU::SGPR2_SGPR3;
508 case SIRegisterInfo::INPUT_PTR:
509 return AMDGPU::SGPR0_SGPR1;
510 case SIRegisterInfo::TIDIG_X:
511 return AMDGPU::VGPR0;
512 case SIRegisterInfo::TIDIG_Y:
513 return AMDGPU::VGPR1;
514 case SIRegisterInfo::TIDIG_Z:
515 return AMDGPU::VGPR2;
517 llvm_unreachable("unexpected preloaded value type");
520 /// \brief Returns a register that is not used at any point in the function.
521 /// If all registers are used, then this function will return
522 // AMDGPU::NoRegister.
523 unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
524 const TargetRegisterClass *RC) const {
525 for (unsigned Reg : *RC)
526 if (!MRI.isPhysRegUsed(Reg))
528 return AMDGPU::NoRegister;
531 unsigned SIRegisterInfo::getNumVGPRsAllowed(unsigned WaveCount) const {
546 unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
547 unsigned WaveCount) const {
548 if (gen >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {