1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI Implementation of TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
16 #include "SIInstrInfo.h"
17 #include "AMDGPUTargetMachine.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/MC/MCInstrDesc.h"
25 SIInstrInfo::SIInstrInfo(AMDGPUTargetMachine &tm)
26 : AMDGPUInstrInfo(tm),
30 const SIRegisterInfo &SIInstrInfo::getRegisterInfo() const {
35 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
36 MachineBasicBlock::iterator MI, DebugLoc DL,
37 unsigned DestReg, unsigned SrcReg,
40 // If we are trying to copy to or from SCC, there is a bug somewhere else in
41 // the backend. While it may be theoretically possible to do this, it should
42 // never be necessary.
43 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC);
45 const int16_t Sub0_15[] = {
46 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
47 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7,
48 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11,
49 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0
52 const int16_t Sub0_7[] = {
53 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3,
54 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0
57 const int16_t Sub0_3[] = {
58 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0
61 const int16_t Sub0_1[] = {
62 AMDGPU::sub0, AMDGPU::sub1, 0
66 const int16_t *SubIndices;
68 if (AMDGPU::M0 == DestReg) {
69 // Check if M0 isn't already set to this value
70 for (MachineBasicBlock::reverse_iterator E = MBB.rend(),
71 I = MachineBasicBlock::reverse_iterator(MI); I != E; ++I) {
73 if (!I->definesRegister(AMDGPU::M0))
76 unsigned Opc = I->getOpcode();
77 if (Opc != TargetOpcode::COPY && Opc != AMDGPU::S_MOV_B32)
80 if (!I->readsRegister(SrcReg))
83 // The copy isn't necessary
88 if (AMDGPU::SReg_32RegClass.contains(DestReg)) {
89 assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
90 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg)
91 .addReg(SrcReg, getKillRegState(KillSrc));
94 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) {
95 assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
96 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg)
97 .addReg(SrcReg, getKillRegState(KillSrc));
100 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) {
101 assert(AMDGPU::SReg_128RegClass.contains(SrcReg));
102 Opcode = AMDGPU::S_MOV_B32;
105 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) {
106 assert(AMDGPU::SReg_256RegClass.contains(SrcReg));
107 Opcode = AMDGPU::S_MOV_B32;
110 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) {
111 assert(AMDGPU::SReg_512RegClass.contains(SrcReg));
112 Opcode = AMDGPU::S_MOV_B32;
113 SubIndices = Sub0_15;
115 } else if (AMDGPU::VReg_32RegClass.contains(DestReg)) {
116 assert(AMDGPU::VReg_32RegClass.contains(SrcReg) ||
117 AMDGPU::SReg_32RegClass.contains(SrcReg));
118 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg)
119 .addReg(SrcReg, getKillRegState(KillSrc));
122 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) {
123 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) ||
124 AMDGPU::SReg_64RegClass.contains(SrcReg));
125 Opcode = AMDGPU::V_MOV_B32_e32;
128 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) {
129 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) ||
130 AMDGPU::SReg_128RegClass.contains(SrcReg));
131 Opcode = AMDGPU::V_MOV_B32_e32;
134 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) {
135 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) ||
136 AMDGPU::SReg_256RegClass.contains(SrcReg));
137 Opcode = AMDGPU::V_MOV_B32_e32;
140 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) {
141 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) ||
142 AMDGPU::SReg_512RegClass.contains(SrcReg));
143 Opcode = AMDGPU::V_MOV_B32_e32;
144 SubIndices = Sub0_15;
147 llvm_unreachable("Can't copy register!");
150 while (unsigned SubIdx = *SubIndices++) {
151 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL,
152 get(Opcode), RI.getSubReg(DestReg, SubIdx));
154 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc));
157 Builder.addReg(DestReg, RegState::Define | RegState::Implicit);
161 unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const {
165 // Try to map original to commuted opcode
166 if ((NewOpc = AMDGPU::getCommuteRev(Opcode)) != -1)
169 // Try to map commuted to original opcode
170 if ((NewOpc = AMDGPU::getCommuteOrig(Opcode)) != -1)
176 MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
179 if (MI->getNumOperands() < 3 || !MI->getOperand(1).isReg() ||
180 !MI->getOperand(2).isReg())
183 MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
186 MI->setDesc(get(commuteOpcode(MI->getOpcode())));
191 MachineInstr * SIInstrInfo::getMovImmInstr(MachineFunction *MF, unsigned DstReg,
193 MachineInstr * MI = MF->CreateMachineInstr(get(AMDGPU::V_MOV_B32_e32), DebugLoc());
194 MachineInstrBuilder MIB(*MF, MI);
195 MIB.addReg(DstReg, RegState::Define);
202 bool SIInstrInfo::isMov(unsigned Opcode) const {
204 default: return false;
205 case AMDGPU::S_MOV_B32:
206 case AMDGPU::S_MOV_B64:
207 case AMDGPU::V_MOV_B32_e32:
208 case AMDGPU::V_MOV_B32_e64:
214 SIInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
215 return RC != &AMDGPU::EXECRegRegClass;
218 //===----------------------------------------------------------------------===//
219 // Indirect addressing callbacks
220 //===----------------------------------------------------------------------===//
222 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex,
223 unsigned Channel) const {
224 assert(Channel == 0);
229 int SIInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
230 llvm_unreachable("Unimplemented");
233 int SIInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
234 llvm_unreachable("Unimplemented");
237 const TargetRegisterClass *SIInstrInfo::getIndirectAddrStoreRegClass(
238 unsigned SourceReg) const {
239 llvm_unreachable("Unimplemented");
242 const TargetRegisterClass *SIInstrInfo::getIndirectAddrLoadRegClass() const {
243 llvm_unreachable("Unimplemented");
246 MachineInstrBuilder SIInstrInfo::buildIndirectWrite(
247 MachineBasicBlock *MBB,
248 MachineBasicBlock::iterator I,
250 unsigned Address, unsigned OffsetReg) const {
251 llvm_unreachable("Unimplemented");
254 MachineInstrBuilder SIInstrInfo::buildIndirectRead(
255 MachineBasicBlock *MBB,
256 MachineBasicBlock::iterator I,
258 unsigned Address, unsigned OffsetReg) const {
259 llvm_unreachable("Unimplemented");
262 const TargetRegisterClass *SIInstrInfo::getSuperIndirectRegClass() const {
263 llvm_unreachable("Unimplemented");