1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass compute turns all control flow pseudo instructions into native one
12 /// computing their address on the fly ; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "r600cf"
16 #include "llvm/Support/Debug.h"
17 #include "llvm/Support/raw_ostream.h"
20 #include "R600Defines.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "R600RegisterInfo.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 class R600ControlFlowFinalizer : public MachineFunctionPass {
33 enum ControlFlowInstruction {
47 const R600InstrInfo *TII;
48 unsigned MaxFetchInst;
49 const AMDGPUSubtarget &ST;
51 bool isFetch(const MachineInstr *MI) const {
52 switch (MI->getOpcode()) {
53 case AMDGPU::TEX_VTX_CONSTBUF:
54 case AMDGPU::TEX_VTX_TEXBUF:
56 case AMDGPU::TEX_GET_TEXTURE_RESINFO:
57 case AMDGPU::TEX_GET_GRADIENTS_H:
58 case AMDGPU::TEX_GET_GRADIENTS_V:
59 case AMDGPU::TEX_SET_GRADIENTS_H:
60 case AMDGPU::TEX_SET_GRADIENTS_V:
61 case AMDGPU::TEX_SAMPLE:
62 case AMDGPU::TEX_SAMPLE_C:
63 case AMDGPU::TEX_SAMPLE_L:
64 case AMDGPU::TEX_SAMPLE_C_L:
65 case AMDGPU::TEX_SAMPLE_LB:
66 case AMDGPU::TEX_SAMPLE_C_LB:
67 case AMDGPU::TEX_SAMPLE_G:
68 case AMDGPU::TEX_SAMPLE_C_G:
70 case AMDGPU::TXD_SHADOW:
71 case AMDGPU::VTX_READ_GLOBAL_8_eg:
72 case AMDGPU::VTX_READ_GLOBAL_32_eg:
73 case AMDGPU::VTX_READ_GLOBAL_128_eg:
74 case AMDGPU::VTX_READ_PARAM_8_eg:
75 case AMDGPU::VTX_READ_PARAM_16_eg:
76 case AMDGPU::VTX_READ_PARAM_32_eg:
77 case AMDGPU::VTX_READ_PARAM_128_eg:
84 bool IsTrivialInst(MachineInstr *MI) const {
85 switch (MI->getOpcode()) {
94 const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
96 bool isEg = (ST.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX);
99 Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
102 Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600;
105 Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600;
108 Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600;
111 Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600;
113 case CF_LOOP_CONTINUE:
114 Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600;
117 Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600;
120 Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600;
123 Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
126 if (ST.device()->getGeneration() == AMDGPUDeviceInfo::HD6XXX) {
127 Opcode = AMDGPU::CF_END_CM;
130 Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600;
133 assert (Opcode && "No opcode selected");
134 return TII->get(Opcode);
137 MachineBasicBlock::iterator
138 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
139 unsigned CfAddress) const {
140 MachineBasicBlock::iterator ClauseHead = I;
141 unsigned AluInstCount = 0;
142 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
143 if (IsTrivialInst(I))
148 if (AluInstCount > MaxFetchInst)
151 BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
152 getHWInstrDesc(CF_TC))
153 .addImm(CfAddress) // ADDR
154 .addImm(AluInstCount); // COUNT
157 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
158 MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
160 void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
162 for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
164 MachineInstr *MI = *It;
165 CounterPropagateAddr(MI, Addr);
169 unsigned getHWStackSize(unsigned StackSubEntry, bool hasPush) const {
170 switch (ST.device()->getGeneration()) {
171 case AMDGPUDeviceInfo::HD4XXX:
175 case AMDGPUDeviceInfo::HD5XXX:
178 case AMDGPUDeviceInfo::HD6XXX:
182 return (StackSubEntry + 3)/4; // Need ceil value of StackSubEntry/4
186 R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
187 TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
188 ST(tm.getSubtarget<AMDGPUSubtarget>()) {
189 const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
190 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX)
196 virtual bool runOnMachineFunction(MachineFunction &MF) {
197 unsigned MaxStack = 0;
198 unsigned CurrentStack = 0;
200 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
202 MachineBasicBlock &MBB = *MB;
203 unsigned CfCount = 0;
204 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
205 std::vector<MachineInstr * > IfThenElseStack;
206 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
207 if (MFI->ShaderType == 1) {
208 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
209 getHWInstrDesc(CF_CALL_FS));
212 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
215 DEBUG(dbgs() << CfCount << ":"; I->dump(););
216 I = MakeFetchClause(MBB, I, 0);
221 MachineBasicBlock::iterator MI = I;
223 switch (MI->getOpcode()) {
224 case AMDGPU::CF_ALU_PUSH_BEFORE:
226 MaxStack = std::max(MaxStack, CurrentStack);
229 case AMDGPU::EG_ExportBuf:
230 case AMDGPU::EG_ExportSwz:
231 case AMDGPU::R600_ExportBuf:
232 case AMDGPU::R600_ExportSwz:
233 case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
234 case AMDGPU::RAT_WRITE_CACHELESS_128_eg:
235 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
238 case AMDGPU::WHILELOOP: {
240 MaxStack = std::max(MaxStack, CurrentStack);
241 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
242 getHWInstrDesc(CF_WHILE_LOOP))
244 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
245 std::set<MachineInstr *>());
246 Pair.second.insert(MIb);
247 LoopStack.push_back(Pair);
248 MI->eraseFromParent();
252 case AMDGPU::ENDLOOP: {
254 std::pair<unsigned, std::set<MachineInstr *> > Pair =
256 LoopStack.pop_back();
257 CounterPropagateAddr(Pair.second, CfCount);
258 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
259 .addImm(Pair.first + 1);
260 MI->eraseFromParent();
264 case AMDGPU::IF_PREDICATE_SET: {
265 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
266 getHWInstrDesc(CF_JUMP))
269 IfThenElseStack.push_back(MIb);
270 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
271 MI->eraseFromParent();
276 MachineInstr * JumpInst = IfThenElseStack.back();
277 IfThenElseStack.pop_back();
278 CounterPropagateAddr(JumpInst, CfCount);
279 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
280 getHWInstrDesc(CF_ELSE))
283 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
284 IfThenElseStack.push_back(MIb);
285 MI->eraseFromParent();
289 case AMDGPU::ENDIF: {
291 MachineInstr *IfOrElseInst = IfThenElseStack.back();
292 IfThenElseStack.pop_back();
293 CounterPropagateAddr(IfOrElseInst, CfCount + 1);
294 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
295 getHWInstrDesc(CF_POP))
299 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
300 MI->eraseFromParent();
304 case AMDGPU::PREDICATED_BREAK: {
307 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP))
310 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
311 getHWInstrDesc(CF_LOOP_BREAK))
313 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_POP))
316 LoopStack.back().second.insert(MIb);
317 MI->eraseFromParent();
320 case AMDGPU::CONTINUE: {
321 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
322 getHWInstrDesc(CF_LOOP_CONTINUE))
324 LoopStack.back().second.insert(MIb);
325 MI->eraseFromParent();
329 case AMDGPU::RETURN: {
330 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END));
332 MI->eraseFromParent();
334 BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD));
342 MFI->StackSize = getHWStackSize(MaxStack, hasPush);
348 const char *getPassName() const {
349 return "R600 Control Flow Finalizer Pass";
353 char R600ControlFlowFinalizer::ID = 0;
358 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
359 return new R600ControlFlowFinalizer(TM);