1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
16 #include "SIInstrInfo.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "SIDefines.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/MC/MCInstrDesc.h"
25 SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
26 : AMDGPUInstrInfo(tm),
30 const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
34 //===----------------------------------------------------------------------===//
35 // TargetInstrInfo callbacks
36 //===----------------------------------------------------------------------===//
39 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
40 MachineBasicBlock::iterator MI, DebugLoc DL,
41 unsigned DestReg, unsigned SrcReg,
44 // If we are trying to copy to or from SCC, there is a bug somewhere else in
45 // the backend. While it may be theoretically possible to do this, it should
46 // never be necessary.
47 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
49 static const int16_t Sub0_15[] = {
50 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
51 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
52 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
53 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
56 static const int16_t Sub0_7[] = {
57 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
58 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
61 static const int16_t Sub0_3[] = {
62 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
65 static const int16_t Sub0_2[] = {
66 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0
69 static const int16_t Sub0_1[] = {
70 AMDGPU::sub0, AMDGPU::sub1, 0
74 const int16_t *SubIndices;
76 if (AMDGPU::M0 == DestReg) {
77 // Check if M0 isn't already set to this value
78 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
79 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
81 if (!I->definesRegister(AMDGPU::M0))
84 unsigned Opc = I->getOpcode();
85 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
88 if (!I->readsRegister(SrcReg))
91 // The copy isn't necessary
96 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
97 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
98 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
99 .addReg(SrcReg, getKillRegState(KillSrc));
102 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
103 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
104 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
105 .addReg(SrcReg, getKillRegState(KillSrc));
108 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
109 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
110 Opcode = AMDGPU::S_MOV_B32;
113 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
114 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
115 Opcode = AMDGPU::S_MOV_B32;
118 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
119 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
120 Opcode = AMDGPU::S_MOV_B32;
121 SubIndices = Sub0_15;
123 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
124 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
125 AMDGPU::SReg_32RegClass.contains(SrcReg));
126 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
127 .addReg(SrcReg, getKillRegState(KillSrc));
130 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
131 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
132 AMDGPU::SReg_64RegClass.contains(SrcReg));
133 Opcode = AMDGPU::V_MOV_B32_e32;
136 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) {
137 assert(AMDGPU::VReg_96RegClass.contains(SrcReg));
138 Opcode = AMDGPU::V_MOV_B32_e32;
141 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
142 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
143 AMDGPU::SReg_128RegClass.contains(SrcReg));
144 Opcode = AMDGPU::V_MOV_B32_e32;
147 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
148 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
149 AMDGPU::SReg_256RegClass.contains(SrcReg));
150 Opcode = AMDGPU::V_MOV_B32_e32;
153 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
154 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
155 AMDGPU::SReg_512RegClass.contains(SrcReg));
156 Opcode = AMDGPU::V_MOV_B32_e32;
157 SubIndices = Sub0_15;
160 llvm_unreachable("Can't copy register!");
163 while (unsigned SubIdx = *SubIndices++) {
164 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
165 get(Opcode), RI.getSubReg(DestReg, SubIdx));
167 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
170 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
174 unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
178 // Try to map original to commuted opcode
179 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
182 // Try to map commuted to original opcode
183 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
189 MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
192 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
193 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg())
196 // Cannot commute VOP2 if src0 is SGPR.
197 if (isVOP2(MI->getOpcode()) && MI->getOperand(1).isReg() &&
198 RI.isSGPRClass(MRI.getRegClass(MI->getOperand(1).getReg())))
201 if (!MI->getOperand(2).isReg()) {
202 // XXX: Commute instructions with FPImm operands
203 if (NewMI || MI->getOperand(2).isFPImm() ||
204 (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
208 // XXX: Commute VOP3 instructions with abs and neg set.
209 if (isVOP3(MI->getOpcode()) &&
210 (MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
211 AMDGPU::OpName::abs)).getImm() ||
212 MI->getOperand(AMDGPU::getNamedOperandIdx(MI->getOpcode(),
213 AMDGPU::OpName::neg)).getImm()))
216 unsigned Reg = MI->getOperand(1).getReg();
217 MI->getOperand(1).ChangeToImmediate(MI->getOperand(2).getImm());
218 MI->getOperand(2).ChangeToRegister(Reg, false);
220 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
224 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
229 MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB,
230 MachineBasicBlock::iterator I,
232 unsigned SrcReg) const {
233 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32),
234 DstReg) .addReg(SrcReg);
237 bool SIInstrInfo::isMov(unsigned Opcode) const {
239 default: return false;
240 case AMDGPU::S_MOV_B32:
241 case AMDGPU::S_MOV_B64:
242 case AMDGPU::V_MOV_B32_e32:
243 case AMDGPU::V_MOV_B32_e64:
249 SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
250 return RC != &AMDGPU::EXECRegRegClass;
253 int SIInstrInfo::isMIMG(uint16_t Opcode) const {
254 return get(Opcode).TSFlags & SIInstrFlags::MIMG;
257 int SIInstrInfo::isSMRD(uint16_t Opcode) const {
258 return get(Opcode).TSFlags & SIInstrFlags::SMRD;
261 bool SIInstrInfo::isVOP1(uint16_t Opcode) const {
262 return get(Opcode).TSFlags & SIInstrFlags::VOP1;
265 bool SIInstrInfo::isVOP2(uint16_t Opcode) const {
266 return get(Opcode).TSFlags & SIInstrFlags::VOP2;
269 bool SIInstrInfo::isVOP3(uint16_t Opcode) const {
270 return get(Opcode).TSFlags & SIInstrFlags::VOP3;
273 bool SIInstrInfo::isVOPC(uint16_t Opcode) const {
274 return get(Opcode).TSFlags & SIInstrFlags::VOPC;
277 bool SIInstrInfo::isSALUInstr(const MachineInstr &MI) const {
278 return get(MI.getOpcode()).TSFlags & SIInstrFlags::SALU;
281 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
283 return MO.getImm() >= -16 && MO.getImm() <= 64;
286 return MO.getFPImm()->isExactlyValue(0.0) ||
287 MO.getFPImm()->isExactlyValue(0.5) ||
288 MO.getFPImm()->isExactlyValue(-0.5) ||
289 MO.getFPImm()->isExactlyValue(1.0) ||
290 MO.getFPImm()->isExactlyValue(-1.0) ||
291 MO.getFPImm()->isExactlyValue(2.0) ||
292 MO.getFPImm()->isExactlyValue(-2.0) ||
293 MO.getFPImm()->isExactlyValue(4.0) ||
294 MO.getFPImm()->isExactlyValue(-4.0);
299 bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
300 return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
303 bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
304 StringRef &ErrInfo) const {
305 uint16_t Opcode = MI->getOpcode();
306 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
307 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
308 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
311 if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) {
312 unsigned ConstantBusCount = 0;
313 unsigned SGPRUsed = AMDGPU::NoRegister;
314 for (int i = 0, e = MI->getNumOperands(); i != e; ++i) {
315 const MachineOperand &MO = MI->getOperand(i);
316 if (MO.isReg() && MO.isUse() &&
317 !TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
319 // EXEC register uses the constant bus.
320 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC)
323 // SGPRs use the constant bus
324 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC ||
326 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) ||
327 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) {
328 if (SGPRUsed != MO.getReg()) {
330 SGPRUsed = MO.getReg();
334 // Literal constants use the constant bus.
335 if (isLiteralConstant(MO))
338 if (ConstantBusCount > 1) {
339 ErrInfo = "VOP* instruction uses the constant bus more than once";
344 // Verify SRC1 for VOP2 and VOPC
345 if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
346 const MachineOperand &Src1 = MI->getOperand(Src1Idx);
347 if (Src1.isImm() || Src1.isFPImm()) {
348 ErrInfo = "VOP[2C] src1 cannot be an immediate.";
354 if (isVOP3(Opcode)) {
355 if (Src0Idx != -1 && isLiteralConstant(MI->getOperand(Src0Idx))) {
356 ErrInfo = "VOP3 src0 cannot be a literal constant.";
359 if (Src1Idx != -1 && isLiteralConstant(MI->getOperand(Src1Idx))) {
360 ErrInfo = "VOP3 src1 cannot be a literal constant.";
363 if (Src2Idx != -1 && isLiteralConstant(MI->getOperand(Src2Idx))) {
364 ErrInfo = "VOP3 src2 cannot be a literal constant.";
371 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const {
372 switch (MI.getOpcode()) {
373 default: return AMDGPU::INSTRUCTION_LIST_END;
374 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE;
375 case AMDGPU::COPY: return AMDGPU::COPY;
376 case AMDGPU::PHI: return AMDGPU::PHI;
377 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32;
378 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64;
379 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32;
380 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64;
381 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32;
382 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64;
386 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const {
387 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END;
390 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI,
391 unsigned OpNo) const {
392 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
393 const MCInstrDesc &Desc = get(MI.getOpcode());
394 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() ||
395 Desc.OpInfo[OpNo].RegClass == -1)
396 return MRI.getRegClass(MI.getOperand(OpNo).getReg());
398 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
399 return RI.getRegClass(RCID);
402 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const {
403 switch (MI.getOpcode()) {
405 case AMDGPU::REG_SEQUENCE:
406 return RI.hasVGPRs(getOpRegClass(MI, 0));
408 return RI.hasVGPRs(getOpRegClass(MI, OpNo));
412 void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const {
413 MachineBasicBlock::iterator I = MI;
414 MachineOperand &MO = MI->getOperand(OpIdx);
415 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
416 unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
417 const TargetRegisterClass *RC = RI.getRegClass(RCID);
418 unsigned Opcode = AMDGPU::V_MOV_B32_e32;
420 Opcode = AMDGPU::COPY;
421 } else if (RI.isSGPRClass(RC)) {
422 Opcode = AMDGPU::S_MOV_B32;
425 unsigned Reg = MRI.createVirtualRegister(RI.getRegClass(RCID));
426 BuildMI(*MI->getParent(), I, MI->getParent()->findDebugLoc(I), get(Opcode),
428 MO.ChangeToRegister(Reg, false);
431 void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
432 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
433 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
434 AMDGPU::OpName::src0);
435 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
436 AMDGPU::OpName::src1);
437 int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
438 AMDGPU::OpName::src2);
441 if (isVOP2(MI->getOpcode()) && Src1Idx != -1) {
442 MachineOperand &Src1 = MI->getOperand(Src1Idx);
443 // Legalize VOP2 instructions where src1 is not a VGPR.
444 if (Src1.isImm() || Src1.isFPImm() ||
445 (Src1.isReg() && RI.isSGPRClass(MRI.getRegClass(Src1.getReg())))) {
446 if (MI->isCommutable()) {
447 if (commuteInstruction(MI))
450 legalizeOpWithMove(MI, Src1Idx);
455 if (isVOP3(MI->getOpcode())) {
456 int VOP3Idx[3] = {Src0Idx, Src1Idx, Src2Idx};
457 unsigned SGPRReg = AMDGPU::NoRegister;
458 for (unsigned i = 0; i < 3; ++i) {
459 int Idx = VOP3Idx[i];
462 MachineOperand &MO = MI->getOperand(Idx);
465 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
466 continue; // VGPRs are legal
468 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) {
469 SGPRReg = MO.getReg();
470 // We can use one SGPR in each VOP3 instruction.
473 } else if (!isLiteralConstant(MO)) {
474 // If it is not a register and not a literal constant, then it must be
475 // an inline constant which is always legal.
478 // If we make it this far, then the operand is not legal and we must
480 legalizeOpWithMove(MI, Idx);
484 // Legalize REG_SEQUENCE
485 // The register class of the operands much be the same type as the register
486 // class of the output.
487 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) {
488 const TargetRegisterClass *RC = NULL, *SRC = NULL, *VRC = NULL;
489 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
490 if (!MI->getOperand(i).isReg() ||
491 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
493 const TargetRegisterClass *OpRC =
494 MRI.getRegClass(MI->getOperand(i).getReg());
495 if (RI.hasVGPRs(OpRC)) {
502 // If any of the operands are VGPR registers, then they all most be
503 // otherwise we will create illegal VGPR->SGPR copies when legalizing
505 if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) {
508 VRC = RI.getEquivalentVGPRClass(SRC);
515 // Update all the operands so they have the same type.
516 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) {
517 if (!MI->getOperand(i).isReg() ||
518 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg()))
520 unsigned DstReg = MRI.createVirtualRegister(RC);
521 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
522 get(AMDGPU::COPY), DstReg)
523 .addOperand(MI->getOperand(i));
524 MI->getOperand(i).setReg(DstReg);
529 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const {
530 SmallVector<MachineInstr *, 128> Worklist;
531 Worklist.push_back(&TopInst);
533 while (!Worklist.empty()) {
534 MachineInstr *Inst = Worklist.pop_back_val();
535 unsigned NewOpcode = getVALUOp(*Inst);
536 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END)
539 MachineRegisterInfo &MRI = Inst->getParent()->getParent()->getRegInfo();
541 // Use the new VALU Opcode.
542 const MCInstrDesc &NewDesc = get(NewOpcode);
543 Inst->setDesc(NewDesc);
545 // Add the implict and explicit register definitions.
546 if (NewDesc.ImplicitUses) {
547 for (unsigned i = 0; NewDesc.ImplicitUses[i]; ++i) {
548 Inst->addOperand(MachineOperand::CreateReg(NewDesc.ImplicitUses[i],
553 if (NewDesc.ImplicitDefs) {
554 for (unsigned i = 0; NewDesc.ImplicitDefs[i]; ++i) {
555 Inst->addOperand(MachineOperand::CreateReg(NewDesc.ImplicitDefs[i],
560 legalizeOperands(Inst);
562 // Update the destination register class.
563 const TargetRegisterClass *NewDstRC = getOpRegClass(*Inst, 0);
565 switch (Inst->getOpcode()) {
566 // For target instructions, getOpRegClass just returns the virtual
567 // register class associated with the operand, so we need to find an
568 // equivalent VGPR register class in order to move the instruction to the
572 case AMDGPU::REG_SEQUENCE:
573 if (RI.hasVGPRs(NewDstRC))
575 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC);
583 unsigned DstReg = Inst->getOperand(0).getReg();
584 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC);
585 MRI.replaceRegWith(DstReg, NewDstReg);
587 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(NewDstReg),
588 E = MRI.use_end(); I != E; ++I) {
589 MachineInstr &UseMI = *I;
590 if (!canReadVGPR(UseMI, I.getOperandNo())) {
591 Worklist.push_back(&UseMI);
597 //===----------------------------------------------------------------------===//
598 // Indirect addressing callbacks
599 //===----------------------------------------------------------------------===//
601 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
602 unsigned Channel) const {
603 assert(Channel == 0);
607 const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const {
608 return &AMDGPU::VReg_32RegClass;
611 MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
612 MachineBasicBlock *MBB,
613 MachineBasicBlock::iterator I,
615 unsigned Address, unsigned OffsetReg) const {
616 const DebugLoc &DL = MBB->findDebugLoc(I);
617 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
618 getIndirectIndexBegin(*MBB->getParent()));
620 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1))
621 .addReg(IndirectBaseReg, RegState::Define)
622 .addOperand(I->getOperand(0))
623 .addReg(IndirectBaseReg)
629 MachineInstrBuilder SIInstrInfo::buildIndirectRead(
630 MachineBasicBlock *MBB,
631 MachineBasicBlock::iterator I,
633 unsigned Address, unsigned OffsetReg) const {
634 const DebugLoc &DL = MBB->findDebugLoc(I);
635 unsigned IndirectBaseReg = AMDGPU::VReg_32RegClass.getRegister(
636 getIndirectIndexBegin(*MBB->getParent()));
638 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC))
639 .addOperand(I->getOperand(0))
640 .addOperand(I->getOperand(1))
641 .addReg(IndirectBaseReg)
647 void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved,
648 const MachineFunction &MF) const {
649 int End = getIndirectIndexEnd(MF);
650 int Begin = getIndirectIndexBegin(MF);
656 for (int Index = Begin; Index <= End; ++Index)
657 Reserved.set(AMDGPU::VReg_32RegClass.getRegister(Index));
659 for (int Index = std::max(0, Begin - 1); Index <= End; ++Index)
660 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index));
662 for (int Index = std::max(0, Begin - 2); Index <= End; ++Index)
663 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index));
665 for (int Index = std::max(0, Begin - 3); Index <= End; ++Index)
666 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index));
668 for (int Index = std::max(0, Begin - 7); Index <= End; ++Index)
669 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index));
671 for (int Index = std::max(0, Begin - 15); Index <= End; ++Index)
672 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index));