1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
16 #include "SIRegisterInfo.h"
17 #include "SIInstrInfo.h"
18 #include "SIMachineFunctionInfo.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/RegisterScavenging.h"
22 #include "llvm/IR/Function.h"
23 #include "llvm/IR/LLVMContext.h"
27 SIRegisterInfo::SIRegisterInfo(const AMDGPUSubtarget &st)
28 : AMDGPURegisterInfo(st)
31 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
32 BitVector Reserved(getNumRegs());
33 Reserved.set(AMDGPU::EXEC);
35 // EXEC_LO and EXEC_HI could be allocated and used as regular register,
36 // but this seems likely to result in bugs, so I'm marking them as reserved.
37 Reserved.set(AMDGPU::EXEC_LO);
38 Reserved.set(AMDGPU::EXEC_HI);
40 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
41 Reserved.set(AMDGPU::FLAT_SCR);
42 Reserved.set(AMDGPU::FLAT_SCR_LO);
43 Reserved.set(AMDGPU::FLAT_SCR_HI);
45 // Reserve some VGPRs to use as temp registers in case we have to spill VGPRs
46 Reserved.set(AMDGPU::VGPR255);
47 Reserved.set(AMDGPU::VGPR254);
49 // Tonga and Iceland can only allocate a fixed number of SGPRs due
51 if (ST.hasSGPRInitBug()) {
52 unsigned NumSGPRs = AMDGPU::SGPR_32RegClass.getNumRegs();
53 // Reserve some SGPRs for FLAT_SCRATCH and VCC (4 SGPRs).
54 // Assume XNACK_MASK is unused.
55 unsigned Limit = AMDGPUSubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG - 4;
57 for (unsigned i = Limit; i < NumSGPRs; ++i) {
58 unsigned Reg = AMDGPU::SGPR_32RegClass.getRegister(i);
59 MCRegAliasIterator R = MCRegAliasIterator(Reg, this, true);
61 for (; R.isValid(); ++R)
69 unsigned SIRegisterInfo::getRegPressureSetLimit(unsigned Idx) const {
71 // FIXME: We should adjust the max number of waves based on LDS size.
72 unsigned SGPRLimit = getNumSGPRsAllowed(ST.getGeneration(),
73 ST.getMaxWavesPerCU());
74 unsigned VGPRLimit = getNumVGPRsAllowed(ST.getMaxWavesPerCU());
76 for (regclass_iterator I = regclass_begin(), E = regclass_end();
79 unsigned NumSubRegs = std::max((int)(*I)->getSize() / 4, 1);
82 if (isSGPRClass(*I)) {
83 Limit = SGPRLimit / NumSubRegs;
85 Limit = VGPRLimit / NumSubRegs;
88 const int *Sets = getRegClassPressureSets(*I);
90 for (unsigned i = 0; Sets[i] != -1; ++i) {
91 if (Sets[i] == (int)Idx)
98 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
99 return Fn.getFrameInfo()->hasStackObjects();
102 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
105 case AMDGPU::SI_SPILL_S512_SAVE:
106 case AMDGPU::SI_SPILL_S512_RESTORE:
107 case AMDGPU::SI_SPILL_V512_SAVE:
108 case AMDGPU::SI_SPILL_V512_RESTORE:
110 case AMDGPU::SI_SPILL_S256_SAVE:
111 case AMDGPU::SI_SPILL_S256_RESTORE:
112 case AMDGPU::SI_SPILL_V256_SAVE:
113 case AMDGPU::SI_SPILL_V256_RESTORE:
115 case AMDGPU::SI_SPILL_S128_SAVE:
116 case AMDGPU::SI_SPILL_S128_RESTORE:
117 case AMDGPU::SI_SPILL_V128_SAVE:
118 case AMDGPU::SI_SPILL_V128_RESTORE:
120 case AMDGPU::SI_SPILL_V96_SAVE:
121 case AMDGPU::SI_SPILL_V96_RESTORE:
123 case AMDGPU::SI_SPILL_S64_SAVE:
124 case AMDGPU::SI_SPILL_S64_RESTORE:
125 case AMDGPU::SI_SPILL_V64_SAVE:
126 case AMDGPU::SI_SPILL_V64_RESTORE:
128 case AMDGPU::SI_SPILL_S32_SAVE:
129 case AMDGPU::SI_SPILL_S32_RESTORE:
130 case AMDGPU::SI_SPILL_V32_SAVE:
131 case AMDGPU::SI_SPILL_V32_RESTORE:
133 default: llvm_unreachable("Invalid spill opcode");
137 void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
138 unsigned LoadStoreOp,
140 unsigned ScratchRsrcReg,
141 unsigned ScratchOffset,
143 RegScavenger *RS) const {
145 const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
146 MachineBasicBlock *MBB = MI->getParent();
147 const MachineFunction *MF = MI->getParent()->getParent();
148 LLVMContext &Ctx = MF->getFunction()->getContext();
149 DebugLoc DL = MI->getDebugLoc();
150 bool IsLoad = TII->get(LoadStoreOp).mayLoad();
152 bool RanOutOfSGPRs = false;
153 unsigned SOffset = ScratchOffset;
155 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
156 unsigned Size = NumSubRegs * 4;
158 if (!isUInt<12>(Offset + Size)) {
159 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
160 if (SOffset == AMDGPU::NoRegister) {
161 RanOutOfSGPRs = true;
162 SOffset = AMDGPU::SGPR0;
164 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
165 .addReg(ScratchOffset)
171 Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
173 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
174 unsigned SubReg = NumSubRegs > 1 ?
175 getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
177 bool IsKill = (i == e - 1);
179 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
180 .addReg(SubReg, getDefRegState(IsLoad))
181 .addReg(ScratchRsrcReg, getKillRegState(IsKill))
187 .addReg(Value, RegState::Implicit | getDefRegState(IsLoad));
191 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
192 int SPAdj, unsigned FIOperandNum,
193 RegScavenger *RS) const {
194 MachineFunction *MF = MI->getParent()->getParent();
195 MachineBasicBlock *MBB = MI->getParent();
196 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
197 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
198 const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
199 DebugLoc DL = MI->getDebugLoc();
201 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
202 int Index = MI->getOperand(FIOperandNum).getIndex();
204 switch (MI->getOpcode()) {
205 // SGPR register spill
206 case AMDGPU::SI_SPILL_S512_SAVE:
207 case AMDGPU::SI_SPILL_S256_SAVE:
208 case AMDGPU::SI_SPILL_S128_SAVE:
209 case AMDGPU::SI_SPILL_S64_SAVE:
210 case AMDGPU::SI_SPILL_S32_SAVE: {
211 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
213 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
214 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
215 &AMDGPU::SGPR_32RegClass, i);
216 struct SIMachineFunctionInfo::SpilledReg Spill =
217 MFI->getSpilledReg(MF, Index, i);
219 if (Spill.VGPR == AMDGPU::NoRegister) {
220 LLVMContext &Ctx = MF->getFunction()->getContext();
221 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
224 BuildMI(*MBB, MI, DL,
225 TII->getMCOpcodeFromPseudo(AMDGPU::V_WRITELANE_B32),
231 MI->eraseFromParent();
235 // SGPR register restore
236 case AMDGPU::SI_SPILL_S512_RESTORE:
237 case AMDGPU::SI_SPILL_S256_RESTORE:
238 case AMDGPU::SI_SPILL_S128_RESTORE:
239 case AMDGPU::SI_SPILL_S64_RESTORE:
240 case AMDGPU::SI_SPILL_S32_RESTORE: {
241 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
243 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
244 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
245 &AMDGPU::SGPR_32RegClass, i);
246 bool isM0 = SubReg == AMDGPU::M0;
247 struct SIMachineFunctionInfo::SpilledReg Spill =
248 MFI->getSpilledReg(MF, Index, i);
250 if (Spill.VGPR == AMDGPU::NoRegister) {
251 LLVMContext &Ctx = MF->getFunction()->getContext();
252 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
256 SubReg = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
258 BuildMI(*MBB, MI, DL,
259 TII->getMCOpcodeFromPseudo(AMDGPU::V_READLANE_B32),
263 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
265 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
269 TII->insertNOPs(MI, 3);
270 MI->eraseFromParent();
274 // VGPR register spill
275 case AMDGPU::SI_SPILL_V512_SAVE:
276 case AMDGPU::SI_SPILL_V256_SAVE:
277 case AMDGPU::SI_SPILL_V128_SAVE:
278 case AMDGPU::SI_SPILL_V96_SAVE:
279 case AMDGPU::SI_SPILL_V64_SAVE:
280 case AMDGPU::SI_SPILL_V32_SAVE:
281 buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
282 TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
283 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
284 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
285 FrameInfo->getObjectOffset(Index), RS);
286 MI->eraseFromParent();
288 case AMDGPU::SI_SPILL_V32_RESTORE:
289 case AMDGPU::SI_SPILL_V64_RESTORE:
290 case AMDGPU::SI_SPILL_V96_RESTORE:
291 case AMDGPU::SI_SPILL_V128_RESTORE:
292 case AMDGPU::SI_SPILL_V256_RESTORE:
293 case AMDGPU::SI_SPILL_V512_RESTORE: {
294 buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
295 TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
296 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_rsrc)->getReg(),
297 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
298 FrameInfo->getObjectOffset(Index), RS);
299 MI->eraseFromParent();
304 int64_t Offset = FrameInfo->getObjectOffset(Index);
305 FIOp.ChangeToImmediate(Offset);
306 if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
307 unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
308 BuildMI(*MBB, MI, MI->getDebugLoc(),
309 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
311 FIOp.ChangeToRegister(TmpReg, false, false, true);
317 const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
319 switch(VT.SimpleTy) {
321 case MVT::i32: return &AMDGPU::VGPR_32RegClass;
325 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
326 return getEncodingValue(Reg) & 0xff;
329 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
330 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
332 static const TargetRegisterClass *BaseClasses[] = {
333 &AMDGPU::VGPR_32RegClass,
334 &AMDGPU::SReg_32RegClass,
335 &AMDGPU::VReg_64RegClass,
336 &AMDGPU::SReg_64RegClass,
337 &AMDGPU::VReg_96RegClass,
338 &AMDGPU::VReg_128RegClass,
339 &AMDGPU::SReg_128RegClass,
340 &AMDGPU::VReg_256RegClass,
341 &AMDGPU::SReg_256RegClass,
342 &AMDGPU::VReg_512RegClass
345 for (const TargetRegisterClass *BaseClass : BaseClasses) {
346 if (BaseClass->contains(Reg)) {
353 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
354 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) ||
355 getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) ||
356 getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) ||
357 getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) ||
358 getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) ||
359 getCommonSubClass(&AMDGPU::VReg_512RegClass, RC);
362 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
363 const TargetRegisterClass *SRC) const {
366 } else if (SRC == &AMDGPU::SCCRegRegClass) {
367 return &AMDGPU::VCCRegRegClass;
368 } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_32RegClass)) {
369 return &AMDGPU::VGPR_32RegClass;
370 } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_64RegClass)) {
371 return &AMDGPU::VReg_64RegClass;
372 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_128RegClass)) {
373 return &AMDGPU::VReg_128RegClass;
374 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_256RegClass)) {
375 return &AMDGPU::VReg_256RegClass;
376 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_512RegClass)) {
377 return &AMDGPU::VReg_512RegClass;
382 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
383 const TargetRegisterClass *RC, unsigned SubIdx) const {
384 if (SubIdx == AMDGPU::NoSubRegister)
387 // If this register has a sub-register, we can safely assume it is a 32-bit
388 // register, because all of SI's sub-registers are 32-bit.
389 if (isSGPRClass(RC)) {
390 return &AMDGPU::SGPR_32RegClass;
392 return &AMDGPU::VGPR_32RegClass;
396 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
397 const TargetRegisterClass *SubRC,
398 unsigned Channel) const {
403 case 0: return AMDGPU::VCC_LO;
404 case 1: return AMDGPU::VCC_HI;
405 default: llvm_unreachable("Invalid SubIdx for VCC");
408 case AMDGPU::FLAT_SCR:
411 return AMDGPU::FLAT_SCR_LO;
413 return AMDGPU::FLAT_SCR_HI;
415 llvm_unreachable("Invalid SubIdx for FLAT_SCR");
422 return AMDGPU::EXEC_LO;
424 return AMDGPU::EXEC_HI;
426 llvm_unreachable("Invalid SubIdx for EXEC");
431 const TargetRegisterClass *RC = getPhysRegClass(Reg);
432 // 32-bit registers don't have sub-registers, so we can just return the
433 // Reg. We need to have this check here, because the calculation below
434 // using getHWRegIndex() will fail with special 32-bit registers like
435 // VCC_LO, VCC_HI, EXEC_LO, EXEC_HI and M0.
436 if (RC->getSize() == 4) {
437 assert(Channel == 0);
441 unsigned Index = getHWRegIndex(Reg);
442 return SubRC->getRegister(Index + Channel);
445 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
446 return OpType == AMDGPU::OPERAND_REG_IMM32;
449 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
450 if (opCanUseLiteralConstant(OpType))
453 return OpType == AMDGPU::OPERAND_REG_INLINE_C;
456 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
457 enum PreloadedValue Value) const {
459 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
461 case SIRegisterInfo::TGID_X:
462 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0);
463 case SIRegisterInfo::TGID_Y:
464 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1);
465 case SIRegisterInfo::TGID_Z:
466 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
467 case SIRegisterInfo::SCRATCH_WAVE_OFFSET:
468 if (MFI->getShaderType() != ShaderType::COMPUTE)
469 return MFI->ScratchOffsetReg;
470 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
471 case SIRegisterInfo::SCRATCH_PTR:
472 return AMDGPU::SGPR2_SGPR3;
473 case SIRegisterInfo::INPUT_PTR:
474 return AMDGPU::SGPR0_SGPR1;
475 case SIRegisterInfo::TIDIG_X:
476 return AMDGPU::VGPR0;
477 case SIRegisterInfo::TIDIG_Y:
478 return AMDGPU::VGPR1;
479 case SIRegisterInfo::TIDIG_Z:
480 return AMDGPU::VGPR2;
482 llvm_unreachable("unexpected preloaded value type");
485 /// \brief Returns a register that is not used at any point in the function.
486 /// If all registers are used, then this function will return
487 // AMDGPU::NoRegister.
488 unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
489 const TargetRegisterClass *RC) const {
491 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
493 if (!MRI.isPhysRegUsed(*I))
496 return AMDGPU::NoRegister;
499 unsigned SIRegisterInfo::getNumVGPRsAllowed(unsigned WaveCount) const {
514 unsigned SIRegisterInfo::getNumSGPRsAllowed(AMDGPUSubtarget::Generation gen,
515 unsigned WaveCount) const {
516 if (gen >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {