1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass compute turns all control flow pseudo instructions into native one
12 /// computing their address on the fly ; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "r600cf"
16 #include "llvm/Support/Debug.h"
17 #include "llvm/Support/raw_ostream.h"
20 #include "R600Defines.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "R600RegisterInfo.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 class R600ControlFlowFinalizer : public MachineFunctionPass {
33 enum ControlFlowInstruction {
47 const R600InstrInfo *TII;
48 unsigned MaxFetchInst;
49 const AMDGPUSubtarget &ST;
51 bool isFetch(const MachineInstr *MI) const {
52 switch (MI->getOpcode()) {
53 case AMDGPU::TEX_VTX_CONSTBUF:
54 case AMDGPU::TEX_VTX_TEXBUF:
56 case AMDGPU::TEX_GET_TEXTURE_RESINFO:
57 case AMDGPU::TEX_GET_GRADIENTS_H:
58 case AMDGPU::TEX_GET_GRADIENTS_V:
59 case AMDGPU::TEX_SET_GRADIENTS_H:
60 case AMDGPU::TEX_SET_GRADIENTS_V:
61 case AMDGPU::TEX_SAMPLE:
62 case AMDGPU::TEX_SAMPLE_C:
63 case AMDGPU::TEX_SAMPLE_L:
64 case AMDGPU::TEX_SAMPLE_C_L:
65 case AMDGPU::TEX_SAMPLE_LB:
66 case AMDGPU::TEX_SAMPLE_C_LB:
67 case AMDGPU::TEX_SAMPLE_G:
68 case AMDGPU::TEX_SAMPLE_C_G:
70 case AMDGPU::TXD_SHADOW:
71 case AMDGPU::VTX_READ_GLOBAL_8_eg:
72 case AMDGPU::VTX_READ_GLOBAL_32_eg:
73 case AMDGPU::VTX_READ_GLOBAL_128_eg:
74 case AMDGPU::VTX_READ_PARAM_8_eg:
75 case AMDGPU::VTX_READ_PARAM_16_eg:
76 case AMDGPU::VTX_READ_PARAM_32_eg:
77 case AMDGPU::VTX_READ_PARAM_128_eg:
84 bool IsTrivialInst(MachineInstr *MI) const {
85 switch (MI->getOpcode()) {
94 const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
96 bool isEg = (ST.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX);
99 Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
102 Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600;
105 Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600;
108 Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600;
111 Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600;
113 case CF_LOOP_CONTINUE:
114 Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600;
117 Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600;
120 Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600;
123 Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
126 if (ST.device()->getGeneration() == AMDGPUDeviceInfo::HD6XXX) {
127 Opcode = AMDGPU::CF_END_CM;
130 Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600;
133 assert (Opcode && "No opcode selected");
134 return TII->get(Opcode);
137 MachineBasicBlock::iterator
138 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
139 unsigned CfAddress) const {
140 MachineBasicBlock::iterator ClauseHead = I;
141 unsigned AluInstCount = 0;
142 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
143 if (IsTrivialInst(I))
148 if (AluInstCount > MaxFetchInst)
151 BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
152 getHWInstrDesc(CF_TC))
153 .addImm(CfAddress) // ADDR
154 .addImm(AluInstCount); // COUNT
157 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
158 MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
160 void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
162 for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
164 MachineInstr *MI = *It;
165 CounterPropagateAddr(MI, Addr);
170 R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
171 TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
172 ST(tm.getSubtarget<AMDGPUSubtarget>()) {
173 const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
174 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX)
180 virtual bool runOnMachineFunction(MachineFunction &MF) {
181 unsigned MaxStack = 0;
182 unsigned CurrentStack = 0;
183 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
185 MachineBasicBlock &MBB = *MB;
186 unsigned CfCount = 0;
187 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
188 std::vector<MachineInstr * > IfThenElseStack;
189 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
190 if (MFI->ShaderType == 1) {
191 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
192 getHWInstrDesc(CF_CALL_FS));
195 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
198 DEBUG(dbgs() << CfCount << ":"; I->dump(););
199 I = MakeFetchClause(MBB, I, 0);
204 MachineBasicBlock::iterator MI = I;
206 switch (MI->getOpcode()) {
207 case AMDGPU::CF_ALU_PUSH_BEFORE:
209 MaxStack = std::max(MaxStack, CurrentStack);
211 case AMDGPU::EG_ExportBuf:
212 case AMDGPU::EG_ExportSwz:
213 case AMDGPU::R600_ExportBuf:
214 case AMDGPU::R600_ExportSwz:
215 case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
216 case AMDGPU::RAT_WRITE_CACHELESS_128_eg:
217 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
220 case AMDGPU::WHILELOOP: {
222 MaxStack = std::max(MaxStack, CurrentStack);
223 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
224 getHWInstrDesc(CF_WHILE_LOOP))
226 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
227 std::set<MachineInstr *>());
228 Pair.second.insert(MIb);
229 LoopStack.push_back(Pair);
230 MI->eraseFromParent();
234 case AMDGPU::ENDLOOP: {
236 std::pair<unsigned, std::set<MachineInstr *> > Pair =
238 LoopStack.pop_back();
239 CounterPropagateAddr(Pair.second, CfCount);
240 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
241 .addImm(Pair.first + 1);
242 MI->eraseFromParent();
246 case AMDGPU::IF_PREDICATE_SET: {
247 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
248 getHWInstrDesc(CF_JUMP))
251 IfThenElseStack.push_back(MIb);
252 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
253 MI->eraseFromParent();
258 MachineInstr * JumpInst = IfThenElseStack.back();
259 IfThenElseStack.pop_back();
260 CounterPropagateAddr(JumpInst, CfCount);
261 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
262 getHWInstrDesc(CF_ELSE))
265 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
266 IfThenElseStack.push_back(MIb);
267 MI->eraseFromParent();
271 case AMDGPU::ENDIF: {
273 MachineInstr *IfOrElseInst = IfThenElseStack.back();
274 IfThenElseStack.pop_back();
275 CounterPropagateAddr(IfOrElseInst, CfCount + 1);
276 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
277 getHWInstrDesc(CF_POP))
281 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
282 MI->eraseFromParent();
286 case AMDGPU::PREDICATED_BREAK: {
289 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP))
292 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
293 getHWInstrDesc(CF_LOOP_BREAK))
295 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_POP))
298 LoopStack.back().second.insert(MIb);
299 MI->eraseFromParent();
302 case AMDGPU::CONTINUE: {
303 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
304 getHWInstrDesc(CF_LOOP_CONTINUE))
306 LoopStack.back().second.insert(MIb);
307 MI->eraseFromParent();
311 case AMDGPU::RETURN: {
312 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END));
314 MI->eraseFromParent();
316 BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD));
324 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
325 TII->get(AMDGPU::STACK_SIZE))
332 const char *getPassName() const {
333 return "R600 Control Flow Finalizer Pass";
337 char R600ControlFlowFinalizer::ID = 0;
342 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
343 return new R600ControlFlowFinalizer(TM);