1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass compute turns all control flow pseudo instructions into native one
12 /// computing their address on the fly ; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "r600cf"
16 #include "llvm/Support/Debug.h"
17 #include "llvm/Support/raw_ostream.h"
20 #include "R600Defines.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "R600RegisterInfo.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 class R600ControlFlowFinalizer : public MachineFunctionPass {
33 enum ControlFlowInstruction {
46 const R600InstrInfo *TII;
47 unsigned MaxFetchInst;
48 const AMDGPUSubtarget &ST;
50 bool isFetch(const MachineInstr *MI) const {
51 switch (MI->getOpcode()) {
52 case AMDGPU::TEX_VTX_CONSTBUF:
53 case AMDGPU::TEX_VTX_TEXBUF:
55 case AMDGPU::TEX_GET_TEXTURE_RESINFO:
56 case AMDGPU::TEX_GET_GRADIENTS_H:
57 case AMDGPU::TEX_GET_GRADIENTS_V:
58 case AMDGPU::TEX_SET_GRADIENTS_H:
59 case AMDGPU::TEX_SET_GRADIENTS_V:
60 case AMDGPU::TEX_SAMPLE:
61 case AMDGPU::TEX_SAMPLE_C:
62 case AMDGPU::TEX_SAMPLE_L:
63 case AMDGPU::TEX_SAMPLE_C_L:
64 case AMDGPU::TEX_SAMPLE_LB:
65 case AMDGPU::TEX_SAMPLE_C_LB:
66 case AMDGPU::TEX_SAMPLE_G:
67 case AMDGPU::TEX_SAMPLE_C_G:
69 case AMDGPU::TXD_SHADOW:
70 case AMDGPU::VTX_READ_GLOBAL_8_eg:
71 case AMDGPU::VTX_READ_GLOBAL_32_eg:
72 case AMDGPU::VTX_READ_GLOBAL_128_eg:
73 case AMDGPU::VTX_READ_PARAM_8_eg:
74 case AMDGPU::VTX_READ_PARAM_16_eg:
75 case AMDGPU::VTX_READ_PARAM_32_eg:
76 case AMDGPU::VTX_READ_PARAM_128_eg:
83 bool IsTrivialInst(MachineInstr *MI) const {
84 switch (MI->getOpcode()) {
93 const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
94 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX) {
97 return TII->get(AMDGPU::CF_TC_R600);
99 return TII->get(AMDGPU::CF_CALL_FS_R600);
101 return TII->get(AMDGPU::WHILE_LOOP_R600);
103 return TII->get(AMDGPU::END_LOOP_R600);
105 return TII->get(AMDGPU::LOOP_BREAK_R600);
106 case CF_LOOP_CONTINUE:
107 return TII->get(AMDGPU::CF_CONTINUE_R600);
109 return TII->get(AMDGPU::CF_JUMP_R600);
111 return TII->get(AMDGPU::CF_ELSE_R600);
113 return TII->get(AMDGPU::POP_R600);
118 return TII->get(AMDGPU::CF_TC_EG);
120 return TII->get(AMDGPU::CF_CALL_FS_EG);
122 return TII->get(AMDGPU::WHILE_LOOP_EG);
124 return TII->get(AMDGPU::END_LOOP_EG);
126 return TII->get(AMDGPU::LOOP_BREAK_EG);
127 case CF_LOOP_CONTINUE:
128 return TII->get(AMDGPU::CF_CONTINUE_EG);
130 return TII->get(AMDGPU::CF_JUMP_EG);
132 return TII->get(AMDGPU::CF_ELSE_EG);
134 return TII->get(AMDGPU::POP_EG);
139 MachineBasicBlock::iterator
140 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
141 unsigned CfAddress) const {
142 MachineBasicBlock::iterator ClauseHead = I;
143 unsigned AluInstCount = 0;
144 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
145 if (IsTrivialInst(I))
150 if (AluInstCount > MaxFetchInst)
153 BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
154 getHWInstrDesc(CF_TC))
155 .addImm(CfAddress) // ADDR
156 .addImm(AluInstCount); // COUNT
159 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
160 MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
162 void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
164 for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
166 MachineInstr *MI = *It;
167 CounterPropagateAddr(MI, Addr);
172 R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
173 TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
174 ST(tm.getSubtarget<AMDGPUSubtarget>()) {
175 const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
176 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX)
182 virtual bool runOnMachineFunction(MachineFunction &MF) {
183 unsigned MaxStack = 0;
184 unsigned CurrentStack = 0;
185 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
187 MachineBasicBlock &MBB = *MB;
188 unsigned CfCount = 0;
189 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
190 std::vector<MachineInstr * > IfThenElseStack;
191 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
192 if (MFI->ShaderType == 1) {
193 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
194 getHWInstrDesc(CF_CALL_FS));
197 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
200 DEBUG(dbgs() << CfCount << ":"; I->dump(););
201 I = MakeFetchClause(MBB, I, 0);
206 MachineBasicBlock::iterator MI = I;
208 switch (MI->getOpcode()) {
209 case AMDGPU::CF_ALU_PUSH_BEFORE:
211 MaxStack = std::max(MaxStack, CurrentStack);
213 case AMDGPU::EG_ExportBuf:
214 case AMDGPU::EG_ExportSwz:
215 case AMDGPU::R600_ExportBuf:
216 case AMDGPU::R600_ExportSwz:
217 case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
218 case AMDGPU::RAT_WRITE_CACHELESS_128_eg:
219 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
222 case AMDGPU::WHILELOOP: {
224 MaxStack = std::max(MaxStack, CurrentStack);
225 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
226 getHWInstrDesc(CF_WHILE_LOOP))
228 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
229 std::set<MachineInstr *>());
230 Pair.second.insert(MIb);
231 LoopStack.push_back(Pair);
232 MI->eraseFromParent();
236 case AMDGPU::ENDLOOP: {
238 std::pair<unsigned, std::set<MachineInstr *> > Pair =
240 LoopStack.pop_back();
241 CounterPropagateAddr(Pair.second, CfCount);
242 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
243 .addImm(Pair.first + 1);
244 MI->eraseFromParent();
248 case AMDGPU::IF_PREDICATE_SET: {
249 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
250 getHWInstrDesc(CF_JUMP))
253 IfThenElseStack.push_back(MIb);
254 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
255 MI->eraseFromParent();
260 MachineInstr * JumpInst = IfThenElseStack.back();
261 IfThenElseStack.pop_back();
262 CounterPropagateAddr(JumpInst, CfCount);
263 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
264 getHWInstrDesc(CF_ELSE))
267 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
268 IfThenElseStack.push_back(MIb);
269 MI->eraseFromParent();
273 case AMDGPU::ENDIF: {
275 MachineInstr *IfOrElseInst = IfThenElseStack.back();
276 IfThenElseStack.pop_back();
277 CounterPropagateAddr(IfOrElseInst, CfCount + 1);
278 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
279 getHWInstrDesc(CF_POP))
283 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
284 MI->eraseFromParent();
288 case AMDGPU::PREDICATED_BREAK: {
291 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP))
294 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
295 getHWInstrDesc(CF_LOOP_BREAK))
297 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_POP))
300 LoopStack.back().second.insert(MIb);
301 MI->eraseFromParent();
304 case AMDGPU::CONTINUE: {
305 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
306 getHWInstrDesc(CF_LOOP_CONTINUE))
308 LoopStack.back().second.insert(MIb);
309 MI->eraseFromParent();
317 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
318 TII->get(AMDGPU::STACK_SIZE))
325 const char *getPassName() const {
326 return "R600 Control Flow Finalizer Pass";
330 char R600ControlFlowFinalizer::ID = 0;
335 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
336 return new R600ControlFlowFinalizer(TM);