1 //===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief Implementation of the TargetInstrInfo class that is common to all
14 //===----------------------------------------------------------------------===//
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUTargetMachine.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #define GET_INSTRINFO_CTOR_DTOR
26 #define GET_INSTRINFO_NAMED_OPS
27 #define GET_INSTRMAP_INFO
28 #include "AMDGPUGenInstrInfo.inc"
30 // Pin the vtable to this file.
31 void AMDGPUInstrInfo::anchor() {}
33 AMDGPUInstrInfo::AMDGPUInstrInfo(const AMDGPUSubtarget &st)
34 : AMDGPUGenInstrInfo(-1,-1), RI(st), ST(st) { }
36 const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
40 bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
41 unsigned &SrcReg, unsigned &DstReg,
42 unsigned &SubIdx) const {
43 // TODO: Implement this function
47 unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
48 int &FrameIndex) const {
49 // TODO: Implement this function
53 unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
54 int &FrameIndex) const {
55 // TODO: Implement this function
59 bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
60 const MachineMemOperand *&MMO,
61 int &FrameIndex) const {
62 // TODO: Implement this function
65 unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
66 int &FrameIndex) const {
67 // TODO: Implement this function
70 unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
71 int &FrameIndex) const {
72 // TODO: Implement this function
75 bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
76 const MachineMemOperand *&MMO,
77 int &FrameIndex) const {
78 // TODO: Implement this function
83 AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
84 MachineBasicBlock::iterator &MBBI,
85 LiveVariables *LV) const {
86 // TODO: Implement this function
89 bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
90 MachineBasicBlock &MBB) const {
91 while (iter != MBB.end()) {
92 switch (iter->getOpcode()) {
95 case AMDGPU::BRANCH_COND_i32:
96 case AMDGPU::BRANCH_COND_f32:
106 AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
107 MachineBasicBlock::iterator MI,
108 unsigned SrcReg, bool isKill,
110 const TargetRegisterClass *RC,
111 const TargetRegisterInfo *TRI) const {
112 llvm_unreachable("Not Implemented");
116 AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
117 MachineBasicBlock::iterator MI,
118 unsigned DestReg, int FrameIndex,
119 const TargetRegisterClass *RC,
120 const TargetRegisterInfo *TRI) const {
121 llvm_unreachable("Not Implemented");
124 bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const {
125 MachineBasicBlock *MBB = MI->getParent();
126 int OffsetOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
127 AMDGPU::OpName::addr);
128 // addr is a custom operand with multiple MI operands, and only the
129 // first MI operand is given a name.
130 int RegOpIdx = OffsetOpIdx + 1;
131 int ChanOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
132 AMDGPU::OpName::chan);
133 if (isRegisterLoad(*MI)) {
134 int DstOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
135 AMDGPU::OpName::dst);
136 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
137 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
138 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
139 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
140 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
141 buildMovInstr(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
142 getIndirectAddrRegClass()->getRegister(Address));
144 buildIndirectRead(MBB, MI, MI->getOperand(DstOpIdx).getReg(),
147 } else if (isRegisterStore(*MI)) {
148 int ValOpIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
149 AMDGPU::OpName::val);
150 unsigned RegIndex = MI->getOperand(RegOpIdx).getImm();
151 unsigned Channel = MI->getOperand(ChanOpIdx).getImm();
152 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
153 unsigned OffsetReg = MI->getOperand(OffsetOpIdx).getReg();
154 if (OffsetReg == AMDGPU::INDIRECT_BASE_ADDR) {
155 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
156 MI->getOperand(ValOpIdx).getReg());
158 buildIndirectWrite(MBB, MI, MI->getOperand(ValOpIdx).getReg(),
159 calculateIndirectAddress(RegIndex, Channel),
172 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
174 const SmallVectorImpl<unsigned> &Ops,
175 int FrameIndex) const {
176 // TODO: Implement this function
180 AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
182 const SmallVectorImpl<unsigned> &Ops,
183 MachineInstr *LoadMI) const {
184 // TODO: Implement this function
188 AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
189 const SmallVectorImpl<unsigned> &Ops) const {
190 // TODO: Implement this function
194 AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
195 unsigned Reg, bool UnfoldLoad,
197 SmallVectorImpl<MachineInstr*> &NewMIs) const {
198 // TODO: Implement this function
203 AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
204 SmallVectorImpl<SDNode*> &NewNodes) const {
205 // TODO: Implement this function
210 AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
211 bool UnfoldLoad, bool UnfoldStore,
212 unsigned *LoadRegIndex) const {
213 // TODO: Implement this function
217 bool AMDGPUInstrInfo::enableClusterLoads() const {
221 bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
222 int64_t Offset1, int64_t Offset2,
223 unsigned NumLoads) const {
224 assert(Offset2 > Offset1
225 && "Second offset should be larger than first offset!");
226 // If we have less than 16 loads in a row, and the offsets are within 16,
227 // then schedule together.
228 // TODO: Make the loads schedule near if it fits in a cacheline
229 return (NumLoads < 16 && (Offset2 - Offset1) < 16);
233 AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
235 // TODO: Implement this function
238 void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
239 MachineBasicBlock::iterator MI) const {
240 // TODO: Implement this function
243 bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
244 // TODO: Implement this function
248 AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
249 const SmallVectorImpl<MachineOperand> &Pred2)
251 // TODO: Implement this function
255 bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
256 std::vector<MachineOperand> &Pred) const {
257 // TODO: Implement this function
261 bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
262 // TODO: Implement this function
263 return MI->getDesc().isPredicable();
267 AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
268 // TODO: Implement this function
272 bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
273 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
276 bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
277 return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
280 int AMDGPUInstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
281 const MachineRegisterInfo &MRI = MF.getRegInfo();
282 const MachineFrameInfo *MFI = MF.getFrameInfo();
285 if (MFI->getNumObjects() == 0) {
289 if (MRI.livein_empty()) {
293 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
294 for (MachineRegisterInfo::livein_iterator LI = MRI.livein_begin(),
295 LE = MRI.livein_end();
297 unsigned Reg = LI->first;
298 if (TargetRegisterInfo::isVirtualRegister(Reg) ||
299 !IndirectRC->contains(Reg))
304 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
306 if (IndirectRC->getRegister(RegIndex) == Reg)
309 Offset = std::max(Offset, (int)RegIndex);
315 int AMDGPUInstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
317 const MachineFrameInfo *MFI = MF.getFrameInfo();
319 // Variable sized objects are not supported
320 assert(!MFI->hasVarSizedObjects());
322 if (MFI->getNumObjects() == 0) {
326 Offset = MF.getTarget().getFrameLowering()->getFrameIndexOffset(MF, -1);
328 return getIndirectIndexBegin(MF) + Offset;
331 int AMDGPUInstrInfo::getMaskedMIMGOp(uint16_t Opcode, unsigned Channels) const {
333 default: return Opcode;
334 case 1: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_1);
335 case 2: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_2);
336 case 3: return AMDGPU::getMaskedMIMGOp(Opcode, AMDGPU::Channels_3);
340 // Wrapper for Tablegen'd function. enum Subtarget is not defined in any
341 // header files, so we need to wrap it in a function that takes unsigned
345 int getMCOpcode(uint16_t Opcode, unsigned Gen) {
346 return getMCOpcode(Opcode);