1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This pass compute turns all control flow pseudo instructions into native one
12 /// computing their address on the fly ; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "r600cf"
16 #include "llvm/Support/Debug.h"
17 #include "llvm/Support/raw_ostream.h"
20 #include "R600Defines.h"
21 #include "R600InstrInfo.h"
22 #include "R600MachineFunctionInfo.h"
23 #include "R600RegisterInfo.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 class R600ControlFlowFinalizer : public MachineFunctionPass {
33 typedef std::pair<MachineInstr *, std::vector<MachineInstr *> > ClauseFile;
35 enum ControlFlowInstruction {
50 const R600InstrInfo *TII;
51 unsigned MaxFetchInst;
52 const AMDGPUSubtarget &ST;
54 bool IsTrivialInst(MachineInstr *MI) const {
55 switch (MI->getOpcode()) {
64 const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
66 bool isEg = (ST.device()->getGeneration() >= AMDGPUDeviceInfo::HD5XXX);
69 Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
72 Opcode = isEg ? AMDGPU::CF_VC_EG : AMDGPU::CF_VC_R600;
75 Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600;
78 Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600;
81 Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600;
84 Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600;
86 case CF_LOOP_CONTINUE:
87 Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600;
90 Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600;
93 Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600;
96 Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
99 if (ST.device()->getDeviceFlag() == OCL_DEVICE_CAYMAN) {
100 Opcode = AMDGPU::CF_END_CM;
103 Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600;
106 assert (Opcode && "No opcode selected");
107 return TII->get(Opcode);
111 MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
113 MachineBasicBlock::iterator ClauseHead = I;
114 std::vector<MachineInstr *> ClauseContent;
115 unsigned AluInstCount = 0;
116 bool IsTex = TII->usesTextureCache(ClauseHead);
117 for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
118 if (IsTrivialInst(I))
120 if (AluInstCount > MaxFetchInst)
122 if ((IsTex && !TII->usesTextureCache(I)) ||
123 (!IsTex && !TII->usesVertexCache(I)))
126 ClauseContent.push_back(I);
128 MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
129 getHWInstrDesc(IsTex?CF_TC:CF_VC))
131 .addImm(AluInstCount - 1); // COUNT
132 return ClauseFile(MIb, ClauseContent);
136 EmitFetchClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
138 CounterPropagateAddr(Clause.first, CfCount);
139 MachineBasicBlock *BB = Clause.first->getParent();
140 BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::FETCH_CLAUSE))
142 for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
143 BB->splice(InsertPos, BB, Clause.second[i]);
145 CfCount += 2 * Clause.second.size();
148 void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
149 MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
151 void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
153 for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
155 MachineInstr *MI = *It;
156 CounterPropagateAddr(MI, Addr);
160 unsigned getHWStackSize(unsigned StackSubEntry, bool hasPush) const {
161 switch (ST.device()->getGeneration()) {
162 case AMDGPUDeviceInfo::HD4XXX:
166 case AMDGPUDeviceInfo::HD5XXX:
169 case AMDGPUDeviceInfo::HD6XXX:
173 return (StackSubEntry + 3)/4; // Need ceil value of StackSubEntry/4
177 R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
178 TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())),
179 ST(tm.getSubtarget<AMDGPUSubtarget>()) {
180 const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
181 if (ST.device()->getGeneration() <= AMDGPUDeviceInfo::HD4XXX)
187 virtual bool runOnMachineFunction(MachineFunction &MF) {
188 unsigned MaxStack = 0;
189 unsigned CurrentStack = 0;
191 for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
193 MachineBasicBlock &MBB = *MB;
194 unsigned CfCount = 0;
195 std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
196 std::vector<MachineInstr * > IfThenElseStack;
197 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
198 if (MFI->ShaderType == 1) {
199 BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
200 getHWInstrDesc(CF_CALL_FS));
203 std::vector<ClauseFile> FetchClauses;
204 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
206 if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) {
207 DEBUG(dbgs() << CfCount << ":"; I->dump(););
208 FetchClauses.push_back(MakeFetchClause(MBB, I));
213 MachineBasicBlock::iterator MI = I;
215 switch (MI->getOpcode()) {
216 case AMDGPU::CF_ALU_PUSH_BEFORE:
218 MaxStack = std::max(MaxStack, CurrentStack);
221 case AMDGPU::EG_ExportBuf:
222 case AMDGPU::EG_ExportSwz:
223 case AMDGPU::R600_ExportBuf:
224 case AMDGPU::R600_ExportSwz:
225 case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
226 case AMDGPU::RAT_WRITE_CACHELESS_128_eg:
227 DEBUG(dbgs() << CfCount << ":"; MI->dump(););
230 case AMDGPU::WHILELOOP: {
232 MaxStack = std::max(MaxStack, CurrentStack);
233 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
234 getHWInstrDesc(CF_WHILE_LOOP))
236 std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
237 std::set<MachineInstr *>());
238 Pair.second.insert(MIb);
239 LoopStack.push_back(Pair);
240 MI->eraseFromParent();
244 case AMDGPU::ENDLOOP: {
246 std::pair<unsigned, std::set<MachineInstr *> > Pair =
248 LoopStack.pop_back();
249 CounterPropagateAddr(Pair.second, CfCount);
250 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
251 .addImm(Pair.first + 1);
252 MI->eraseFromParent();
256 case AMDGPU::IF_PREDICATE_SET: {
257 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
258 getHWInstrDesc(CF_JUMP))
261 IfThenElseStack.push_back(MIb);
262 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
263 MI->eraseFromParent();
268 MachineInstr * JumpInst = IfThenElseStack.back();
269 IfThenElseStack.pop_back();
270 CounterPropagateAddr(JumpInst, CfCount);
271 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
272 getHWInstrDesc(CF_ELSE))
275 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
276 IfThenElseStack.push_back(MIb);
277 MI->eraseFromParent();
281 case AMDGPU::ENDIF: {
283 MachineInstr *IfOrElseInst = IfThenElseStack.back();
284 IfThenElseStack.pop_back();
285 CounterPropagateAddr(IfOrElseInst, CfCount + 1);
286 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
287 getHWInstrDesc(CF_POP))
291 DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
292 MI->eraseFromParent();
296 case AMDGPU::PREDICATED_BREAK: {
299 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_JUMP))
302 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
303 getHWInstrDesc(CF_LOOP_BREAK))
305 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_POP))
308 LoopStack.back().second.insert(MIb);
309 MI->eraseFromParent();
312 case AMDGPU::CONTINUE: {
313 MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
314 getHWInstrDesc(CF_LOOP_CONTINUE))
316 LoopStack.back().second.insert(MIb);
317 MI->eraseFromParent();
321 case AMDGPU::RETURN: {
322 BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END));
324 MI->eraseFromParent();
326 BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD));
329 for (unsigned i = 0, e = FetchClauses.size(); i < e; i++)
330 EmitFetchClause(I, FetchClauses[i], CfCount);
336 MFI->StackSize = getHWStackSize(MaxStack, hasPush);
342 const char *getPassName() const {
343 return "R600 Control Flow Finalizer Pass";
347 char R600ControlFlowFinalizer::ID = 0;
352 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
353 return new R600ControlFlowFinalizer(TM);