1 //===-- SIRegisterInfo.cpp - SI Register Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI implementation of the TargetRegisterInfo class.
13 //===----------------------------------------------------------------------===//
16 #include "SIRegisterInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "SIInstrInfo.h"
19 #include "SIMachineFunctionInfo.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/RegisterScavenging.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/Support/Debug.h"
29 SIRegisterInfo::SIRegisterInfo(const AMDGPUSubtarget &st)
30 : AMDGPURegisterInfo(st)
33 BitVector SIRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
34 BitVector Reserved(getNumRegs());
35 Reserved.set(AMDGPU::EXEC);
37 // EXEC_LO and EXEC_HI could be allocated and used as regular register,
38 // but this seems likely to result in bugs, so I'm marking them as reserved.
39 Reserved.set(AMDGPU::EXEC_LO);
40 Reserved.set(AMDGPU::EXEC_HI);
42 Reserved.set(AMDGPU::INDIRECT_BASE_ADDR);
43 Reserved.set(AMDGPU::FLAT_SCR);
44 Reserved.set(AMDGPU::FLAT_SCR_LO);
45 Reserved.set(AMDGPU::FLAT_SCR_HI);
47 // Reserve some VGPRs to use as temp registers in case we have to spill VGPRs
48 Reserved.set(AMDGPU::VGPR255);
49 Reserved.set(AMDGPU::VGPR254);
54 unsigned SIRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
55 MachineFunction &MF) const {
56 return RC->getNumRegs();
59 bool SIRegisterInfo::requiresRegisterScavenging(const MachineFunction &Fn) const {
60 return Fn.getFrameInfo()->hasStackObjects();
63 static unsigned getNumSubRegsForSpillOp(unsigned Op) {
66 case AMDGPU::SI_SPILL_S512_SAVE:
67 case AMDGPU::SI_SPILL_S512_RESTORE:
68 case AMDGPU::SI_SPILL_V512_SAVE:
69 case AMDGPU::SI_SPILL_V512_RESTORE:
71 case AMDGPU::SI_SPILL_S256_SAVE:
72 case AMDGPU::SI_SPILL_S256_RESTORE:
73 case AMDGPU::SI_SPILL_V256_SAVE:
74 case AMDGPU::SI_SPILL_V256_RESTORE:
76 case AMDGPU::SI_SPILL_S128_SAVE:
77 case AMDGPU::SI_SPILL_S128_RESTORE:
78 case AMDGPU::SI_SPILL_V128_SAVE:
79 case AMDGPU::SI_SPILL_V128_RESTORE:
81 case AMDGPU::SI_SPILL_V96_SAVE:
82 case AMDGPU::SI_SPILL_V96_RESTORE:
84 case AMDGPU::SI_SPILL_S64_SAVE:
85 case AMDGPU::SI_SPILL_S64_RESTORE:
86 case AMDGPU::SI_SPILL_V64_SAVE:
87 case AMDGPU::SI_SPILL_V64_RESTORE:
89 case AMDGPU::SI_SPILL_S32_SAVE:
90 case AMDGPU::SI_SPILL_S32_RESTORE:
91 case AMDGPU::SI_SPILL_V32_SAVE:
92 case AMDGPU::SI_SPILL_V32_RESTORE:
94 default: llvm_unreachable("Invalid spill opcode");
98 void SIRegisterInfo::buildScratchLoadStore(MachineBasicBlock::iterator MI,
102 unsigned ScratchOffset,
104 RegScavenger *RS) const {
106 const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
107 MachineBasicBlock *MBB = MI->getParent();
108 const MachineFunction *MF = MI->getParent()->getParent();
109 LLVMContext &Ctx = MF->getFunction()->getContext();
110 DebugLoc DL = MI->getDebugLoc();
111 bool IsLoad = TII->get(LoadStoreOp).mayLoad();
113 bool RanOutOfSGPRs = false;
114 unsigned SOffset = ScratchOffset;
116 unsigned RsrcReg = RS->scavengeRegister(&AMDGPU::SReg_128RegClass, MI, 0);
117 if (RsrcReg == AMDGPU::NoRegister) {
118 RanOutOfSGPRs = true;
119 RsrcReg = AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3;
122 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
123 unsigned Size = NumSubRegs * 4;
125 uint64_t Rsrc = AMDGPU::RSRC_DATA_FORMAT | AMDGPU::RSRC_TID_ENABLE |
128 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B64),
129 getSubReg(RsrcReg, AMDGPU::sub0_sub1))
131 .addReg(RsrcReg, RegState::ImplicitDefine);
133 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32),
134 getSubReg(RsrcReg, AMDGPU::sub2))
135 .addImm(Rsrc & 0xffffffff)
136 .addReg(RsrcReg, RegState::ImplicitDefine);
138 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32),
139 getSubReg(RsrcReg, AMDGPU::sub3))
141 .addReg(RsrcReg, RegState::ImplicitDefine);
143 if (!isUInt<12>(Offset + Size)) {
144 SOffset = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
145 if (SOffset == AMDGPU::NoRegister) {
146 RanOutOfSGPRs = true;
147 SOffset = AMDGPU::SGPR0;
149 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), SOffset)
150 .addReg(ScratchOffset)
156 Ctx.emitError("Ran out of SGPRs for spilling VGPRS");
158 for (unsigned i = 0, e = NumSubRegs; i != e; ++i, Offset += 4) {
159 unsigned SubReg = NumSubRegs > 1 ?
160 getPhysRegSubReg(Value, &AMDGPU::VGPR_32RegClass, i) :
162 bool IsKill = (i == e - 1);
164 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
165 .addReg(SubReg, getDefRegState(IsLoad))
166 .addReg(RsrcReg, getKillRegState(IsKill))
168 .addReg(SOffset, getKillRegState(IsKill))
172 .addReg(Value, RegState::Implicit | getDefRegState(IsLoad));
176 void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
177 int SPAdj, unsigned FIOperandNum,
178 RegScavenger *RS) const {
179 MachineFunction *MF = MI->getParent()->getParent();
180 MachineBasicBlock *MBB = MI->getParent();
181 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
182 MachineFrameInfo *FrameInfo = MF->getFrameInfo();
183 const SIInstrInfo *TII = static_cast<const SIInstrInfo*>(ST.getInstrInfo());
184 DebugLoc DL = MI->getDebugLoc();
186 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
187 int Index = MI->getOperand(FIOperandNum).getIndex();
189 switch (MI->getOpcode()) {
190 // SGPR register spill
191 case AMDGPU::SI_SPILL_S512_SAVE:
192 case AMDGPU::SI_SPILL_S256_SAVE:
193 case AMDGPU::SI_SPILL_S128_SAVE:
194 case AMDGPU::SI_SPILL_S64_SAVE:
195 case AMDGPU::SI_SPILL_S32_SAVE: {
196 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
198 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
199 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
200 &AMDGPU::SGPR_32RegClass, i);
201 struct SIMachineFunctionInfo::SpilledReg Spill =
202 MFI->getSpilledReg(MF, Index, i);
204 if (Spill.VGPR == AMDGPU::NoRegister) {
205 LLVMContext &Ctx = MF->getFunction()->getContext();
206 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
209 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_WRITELANE_B32), Spill.VGPR)
214 MI->eraseFromParent();
218 // SGPR register restore
219 case AMDGPU::SI_SPILL_S512_RESTORE:
220 case AMDGPU::SI_SPILL_S256_RESTORE:
221 case AMDGPU::SI_SPILL_S128_RESTORE:
222 case AMDGPU::SI_SPILL_S64_RESTORE:
223 case AMDGPU::SI_SPILL_S32_RESTORE: {
224 unsigned NumSubRegs = getNumSubRegsForSpillOp(MI->getOpcode());
226 for (unsigned i = 0, e = NumSubRegs; i < e; ++i) {
227 unsigned SubReg = getPhysRegSubReg(MI->getOperand(0).getReg(),
228 &AMDGPU::SGPR_32RegClass, i);
229 bool isM0 = SubReg == AMDGPU::M0;
230 struct SIMachineFunctionInfo::SpilledReg Spill =
231 MFI->getSpilledReg(MF, Index, i);
233 if (Spill.VGPR == AMDGPU::NoRegister) {
234 LLVMContext &Ctx = MF->getFunction()->getContext();
235 Ctx.emitError("Ran out of VGPRs for spilling SGPR");
239 SubReg = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, MI, 0);
242 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_READLANE_B32), SubReg)
245 .addReg(MI->getOperand(0).getReg(), RegState::ImplicitDefine);
247 BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
251 TII->insertNOPs(MI, 3);
252 MI->eraseFromParent();
256 // VGPR register spill
257 case AMDGPU::SI_SPILL_V512_SAVE:
258 case AMDGPU::SI_SPILL_V256_SAVE:
259 case AMDGPU::SI_SPILL_V128_SAVE:
260 case AMDGPU::SI_SPILL_V96_SAVE:
261 case AMDGPU::SI_SPILL_V64_SAVE:
262 case AMDGPU::SI_SPILL_V32_SAVE:
263 buildScratchLoadStore(MI, AMDGPU::BUFFER_STORE_DWORD_OFFSET,
264 TII->getNamedOperand(*MI, AMDGPU::OpName::src)->getReg(),
265 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_ptr)->getReg(),
266 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
267 FrameInfo->getObjectOffset(Index), RS);
268 MI->eraseFromParent();
270 case AMDGPU::SI_SPILL_V32_RESTORE:
271 case AMDGPU::SI_SPILL_V64_RESTORE:
272 case AMDGPU::SI_SPILL_V128_RESTORE:
273 case AMDGPU::SI_SPILL_V256_RESTORE:
274 case AMDGPU::SI_SPILL_V512_RESTORE: {
275 buildScratchLoadStore(MI, AMDGPU::BUFFER_LOAD_DWORD_OFFSET,
276 TII->getNamedOperand(*MI, AMDGPU::OpName::dst)->getReg(),
277 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_ptr)->getReg(),
278 TII->getNamedOperand(*MI, AMDGPU::OpName::scratch_offset)->getReg(),
279 FrameInfo->getObjectOffset(Index), RS);
280 MI->eraseFromParent();
285 int64_t Offset = FrameInfo->getObjectOffset(Index);
286 FIOp.ChangeToImmediate(Offset);
287 if (!TII->isImmOperandLegal(MI, FIOperandNum, FIOp)) {
288 unsigned TmpReg = RS->scavengeRegister(&AMDGPU::VGPR_32RegClass, MI, SPAdj);
289 BuildMI(*MBB, MI, MI->getDebugLoc(),
290 TII->get(AMDGPU::V_MOV_B32_e32), TmpReg)
292 FIOp.ChangeToRegister(TmpReg, false);
298 const TargetRegisterClass * SIRegisterInfo::getCFGStructurizerRegClass(
300 switch(VT.SimpleTy) {
302 case MVT::i32: return &AMDGPU::VGPR_32RegClass;
306 unsigned SIRegisterInfo::getHWRegIndex(unsigned Reg) const {
307 return getEncodingValue(Reg) & 0xff;
310 const TargetRegisterClass *SIRegisterInfo::getPhysRegClass(unsigned Reg) const {
311 assert(!TargetRegisterInfo::isVirtualRegister(Reg));
313 static const TargetRegisterClass *BaseClasses[] = {
314 &AMDGPU::VGPR_32RegClass,
315 &AMDGPU::SReg_32RegClass,
316 &AMDGPU::VReg_64RegClass,
317 &AMDGPU::SReg_64RegClass,
318 &AMDGPU::VReg_96RegClass,
319 &AMDGPU::VReg_128RegClass,
320 &AMDGPU::SReg_128RegClass,
321 &AMDGPU::VReg_256RegClass,
322 &AMDGPU::SReg_256RegClass,
323 &AMDGPU::VReg_512RegClass
326 for (const TargetRegisterClass *BaseClass : BaseClasses) {
327 if (BaseClass->contains(Reg)) {
334 bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
335 return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) ||
336 getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) ||
337 getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) ||
338 getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) ||
339 getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) ||
340 getCommonSubClass(&AMDGPU::VReg_512RegClass, RC);
343 const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
344 const TargetRegisterClass *SRC) const {
347 } else if (SRC == &AMDGPU::SCCRegRegClass) {
348 return &AMDGPU::VCCRegRegClass;
349 } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_32RegClass)) {
350 return &AMDGPU::VGPR_32RegClass;
351 } else if (getCommonSubClass(SRC, &AMDGPU::SGPR_64RegClass)) {
352 return &AMDGPU::VReg_64RegClass;
353 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_128RegClass)) {
354 return &AMDGPU::VReg_128RegClass;
355 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_256RegClass)) {
356 return &AMDGPU::VReg_256RegClass;
357 } else if (getCommonSubClass(SRC, &AMDGPU::SReg_512RegClass)) {
358 return &AMDGPU::VReg_512RegClass;
363 const TargetRegisterClass *SIRegisterInfo::getSubRegClass(
364 const TargetRegisterClass *RC, unsigned SubIdx) const {
365 if (SubIdx == AMDGPU::NoSubRegister)
368 // If this register has a sub-register, we can safely assume it is a 32-bit
369 // register, because all of SI's sub-registers are 32-bit.
370 if (isSGPRClass(RC)) {
371 return &AMDGPU::SGPR_32RegClass;
373 return &AMDGPU::VGPR_32RegClass;
377 unsigned SIRegisterInfo::getPhysRegSubReg(unsigned Reg,
378 const TargetRegisterClass *SubRC,
379 unsigned Channel) const {
384 case 0: return AMDGPU::VCC_LO;
385 case 1: return AMDGPU::VCC_HI;
386 default: llvm_unreachable("Invalid SubIdx for VCC");
389 case AMDGPU::FLAT_SCR:
392 return AMDGPU::FLAT_SCR_LO;
394 return AMDGPU::FLAT_SCR_HI;
396 llvm_unreachable("Invalid SubIdx for FLAT_SCR");
403 return AMDGPU::EXEC_LO;
405 return AMDGPU::EXEC_HI;
407 llvm_unreachable("Invalid SubIdx for EXEC");
412 const TargetRegisterClass *RC = getPhysRegClass(Reg);
413 // 32-bit registers don't have sub-registers, so we can just return the
414 // Reg. We need to have this check here, because the calculation below
415 // using getHWRegIndex() will fail with special 32-bit registers like
416 // VCC_LO, VCC_HI, EXEC_LO, EXEC_HI and M0.
417 if (RC->getSize() == 4) {
418 assert(Channel == 0);
422 unsigned Index = getHWRegIndex(Reg);
423 return SubRC->getRegister(Index + Channel);
426 bool SIRegisterInfo::opCanUseLiteralConstant(unsigned OpType) const {
427 return OpType == AMDGPU::OPERAND_REG_IMM32;
430 bool SIRegisterInfo::opCanUseInlineConstant(unsigned OpType) const {
431 if (opCanUseLiteralConstant(OpType))
434 return OpType == AMDGPU::OPERAND_REG_INLINE_C;
437 unsigned SIRegisterInfo::getPreloadedValue(const MachineFunction &MF,
438 enum PreloadedValue Value) const {
440 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
442 case SIRegisterInfo::TGID_X:
443 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 0);
444 case SIRegisterInfo::TGID_Y:
445 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 1);
446 case SIRegisterInfo::TGID_Z:
447 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 2);
448 case SIRegisterInfo::SCRATCH_WAVE_OFFSET:
449 return AMDGPU::SReg_32RegClass.getRegister(MFI->NumUserSGPRs + 4);
450 case SIRegisterInfo::SCRATCH_PTR:
451 return AMDGPU::SGPR2_SGPR3;
452 case SIRegisterInfo::INPUT_PTR:
453 return AMDGPU::SGPR0_SGPR1;
454 case SIRegisterInfo::TIDIG_X:
455 return AMDGPU::VGPR0;
456 case SIRegisterInfo::TIDIG_Y:
457 return AMDGPU::VGPR1;
458 case SIRegisterInfo::TIDIG_Z:
459 return AMDGPU::VGPR2;
461 llvm_unreachable("unexpected preloaded value type");
464 /// \brief Returns a register that is not used at any point in the function.
465 /// If all registers are used, then this function will return
466 // AMDGPU::NoRegister.
467 unsigned SIRegisterInfo::findUnusedRegister(const MachineRegisterInfo &MRI,
468 const TargetRegisterClass *RC) const {
470 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
472 if (!MRI.isPhysRegUsed(*I))
475 return AMDGPU::NoRegister;