R600: Recommit 199842: Add work-around for the CF stack entry HW bug
[oota-llvm.git] / lib / Target / R600 / R600ControlFlowFinalizer.cpp
1 //===-- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// This pass compute turns all control flow pseudo instructions into native one
12 /// computing their address on the fly ; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
14
15 #define DEBUG_TYPE "r600cf"
16 #include "llvm/Support/Debug.h"
17 #include "AMDGPU.h"
18 #include "R600Defines.h"
19 #include "R600InstrInfo.h"
20 #include "R600MachineFunctionInfo.h"
21 #include "R600RegisterInfo.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/Support/raw_ostream.h"
26
27 using namespace llvm;
28
29 namespace {
30
31 struct CFStack {
32
33   enum StackItem {
34     ENTRY = 0,
35     SUB_ENTRY = 1,
36     FIRST_NON_WQM_PUSH = 2,
37     FIRST_NON_WQM_PUSH_W_FULL_ENTRY = 3
38   };
39
40   const AMDGPUSubtarget &ST;
41   std::vector<StackItem> BranchStack;
42   std::vector<StackItem> LoopStack;
43   unsigned MaxStackSize;
44   unsigned CurrentEntries;
45   unsigned CurrentSubEntries;
46
47   CFStack(const AMDGPUSubtarget &st, unsigned ShaderType) : ST(st),
48       // We need to reserve a stack entry for CALL_FS in vertex shaders.
49       MaxStackSize(ShaderType == ShaderType::VERTEX ? 1 : 0),
50       CurrentEntries(0), CurrentSubEntries(0) { }
51
52   unsigned getLoopDepth();
53   bool branchStackContains(CFStack::StackItem);
54   bool requiresWorkAroundForInst(unsigned Opcode);
55   unsigned getSubEntrySize(CFStack::StackItem Item);
56   void updateMaxStackSize();
57   void pushBranch(unsigned Opcode, bool isWQM = false);
58   void pushLoop();
59   void popBranch();
60   void popLoop();
61 };
62
63 unsigned CFStack::getLoopDepth() {
64   return LoopStack.size();
65 }
66
67 bool CFStack::branchStackContains(CFStack::StackItem Item) {
68   for (std::vector<CFStack::StackItem>::const_iterator I = BranchStack.begin(),
69        E = BranchStack.end(); I != E; ++I) {
70     if (*I == Item)
71       return true;
72   }
73   return false;
74 }
75
76 bool CFStack::requiresWorkAroundForInst(unsigned Opcode) {
77   if (Opcode == AMDGPU::CF_ALU_PUSH_BEFORE && ST.hasCaymanISA() &&
78       getLoopDepth() > 1)
79     return true;
80
81   if (!ST.hasCFAluBug())
82     return false;
83
84   switch(Opcode) {
85   default: return false;
86   case AMDGPU::CF_ALU_PUSH_BEFORE:
87   case AMDGPU::CF_ALU_ELSE_AFTER:
88   case AMDGPU::CF_ALU_BREAK:
89   case AMDGPU::CF_ALU_CONTINUE:
90     if (CurrentSubEntries == 0)
91       return false;
92     if (ST.getWavefrontSize() == 64) {
93       // We are being conservative here.  We only require this work-around if
94       // CurrentSubEntries > 3 &&
95       // (CurrentSubEntries % 4 == 3 || CurrentSubEntries % 4 == 0)
96       //
97       // We have to be conservative, because we don't know for certain that
98       // our stack allocation algorithm for Evergreen/NI is correct.  Applying this
99       // work-around when CurrentSubEntries > 3 allows us to over-allocate stack
100       // resources without any problems.
101       return CurrentSubEntries > 3;
102     } else {
103       assert(ST.getWavefrontSize() == 32);
104       // We are being conservative here.  We only require the work-around if
105       // CurrentSubEntries > 7 &&
106       // (CurrentSubEntries % 8 == 7 || CurrentSubEntries % 8 == 0)
107       // See the comment on the wavefront size == 64 case for why we are
108       // being conservative.
109       return CurrentSubEntries > 7;
110     }
111   }
112 }
113
114 unsigned CFStack::getSubEntrySize(CFStack::StackItem Item) {
115   switch(Item) {
116   default:
117     return 0;
118   case CFStack::FIRST_NON_WQM_PUSH:
119   assert(!ST.hasCaymanISA());
120   if (ST.getGeneration() <= AMDGPUSubtarget::R700) {
121     // +1 For the push operation.
122     // +2 Extra space required.
123     return 3;
124   } else {
125     // Some documentation says that this is not necessary on Evergreen,
126     // but experimentation has show that we need to allocate 1 extra
127     // sub-entry for the first non-WQM push.
128     // +1 For the push operation.
129     // +1 Extra space required.
130     return 2;
131   }
132   case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY:
133     assert(ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
134     // +1 For the push operation.
135     // +1 Extra space required.
136     return 2;
137   case CFStack::SUB_ENTRY:
138     return 1;
139   }
140 }
141
142 void CFStack::updateMaxStackSize() {
143   unsigned CurrentStackSize = CurrentEntries +
144                               (RoundUpToAlignment(CurrentSubEntries, 4) / 4);
145   MaxStackSize = std::max(CurrentStackSize, MaxStackSize);
146 }
147
148 void CFStack::pushBranch(unsigned Opcode, bool isWQM) {
149   CFStack::StackItem Item = CFStack::ENTRY;
150   switch(Opcode) {
151   case AMDGPU::CF_PUSH_EG:
152   case AMDGPU::CF_ALU_PUSH_BEFORE:
153     if (!isWQM) {
154       if (!ST.hasCaymanISA() && !branchStackContains(CFStack::FIRST_NON_WQM_PUSH))
155         Item = CFStack::FIRST_NON_WQM_PUSH;  // May not be required on Evergreen/NI
156                                              // See comment in
157                                              // CFStack::getSubEntrySize()
158       else if (CurrentEntries > 0 &&
159                ST.getGeneration() > AMDGPUSubtarget::EVERGREEN &&
160                !ST.hasCaymanISA() &&
161                !branchStackContains(CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY))
162         Item = CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY;
163       else
164         Item = CFStack::SUB_ENTRY;
165     } else
166       Item = CFStack::ENTRY;
167     break;
168   }
169   BranchStack.push_back(Item);
170   if (Item == CFStack::ENTRY)
171     CurrentEntries++;
172   else
173     CurrentSubEntries += getSubEntrySize(Item);
174   updateMaxStackSize();
175 }
176
177 void CFStack::pushLoop() {
178   LoopStack.push_back(CFStack::ENTRY);
179   CurrentEntries++;
180   updateMaxStackSize();
181 }
182
183 void CFStack::popBranch() {
184   CFStack::StackItem Top = BranchStack.back();
185   if (Top == CFStack::ENTRY)
186     CurrentEntries--;
187   else
188     CurrentSubEntries-= getSubEntrySize(Top);
189   BranchStack.pop_back();
190 }
191
192 void CFStack::popLoop() {
193   CurrentEntries--;
194   LoopStack.pop_back();
195 }
196
197 class R600ControlFlowFinalizer : public MachineFunctionPass {
198
199 private:
200   typedef std::pair<MachineInstr *, std::vector<MachineInstr *> > ClauseFile;
201
202   enum ControlFlowInstruction {
203     CF_TC,
204     CF_VC,
205     CF_CALL_FS,
206     CF_WHILE_LOOP,
207     CF_END_LOOP,
208     CF_LOOP_BREAK,
209     CF_LOOP_CONTINUE,
210     CF_JUMP,
211     CF_ELSE,
212     CF_POP,
213     CF_END
214   };
215
216   static char ID;
217   const R600InstrInfo *TII;
218   const R600RegisterInfo *TRI;
219   unsigned MaxFetchInst;
220   const AMDGPUSubtarget &ST;
221
222   bool IsTrivialInst(MachineInstr *MI) const {
223     switch (MI->getOpcode()) {
224     case AMDGPU::KILL:
225     case AMDGPU::RETURN:
226       return true;
227     default:
228       return false;
229     }
230   }
231
232   const MCInstrDesc &getHWInstrDesc(ControlFlowInstruction CFI) const {
233     unsigned Opcode = 0;
234     bool isEg = (ST.getGeneration() >= AMDGPUSubtarget::EVERGREEN);
235     switch (CFI) {
236     case CF_TC:
237       Opcode = isEg ? AMDGPU::CF_TC_EG : AMDGPU::CF_TC_R600;
238       break;
239     case CF_VC:
240       Opcode = isEg ? AMDGPU::CF_VC_EG : AMDGPU::CF_VC_R600;
241       break;
242     case CF_CALL_FS:
243       Opcode = isEg ? AMDGPU::CF_CALL_FS_EG : AMDGPU::CF_CALL_FS_R600;
244       break;
245     case CF_WHILE_LOOP:
246       Opcode = isEg ? AMDGPU::WHILE_LOOP_EG : AMDGPU::WHILE_LOOP_R600;
247       break;
248     case CF_END_LOOP:
249       Opcode = isEg ? AMDGPU::END_LOOP_EG : AMDGPU::END_LOOP_R600;
250       break;
251     case CF_LOOP_BREAK:
252       Opcode = isEg ? AMDGPU::LOOP_BREAK_EG : AMDGPU::LOOP_BREAK_R600;
253       break;
254     case CF_LOOP_CONTINUE:
255       Opcode = isEg ? AMDGPU::CF_CONTINUE_EG : AMDGPU::CF_CONTINUE_R600;
256       break;
257     case CF_JUMP:
258       Opcode = isEg ? AMDGPU::CF_JUMP_EG : AMDGPU::CF_JUMP_R600;
259       break;
260     case CF_ELSE:
261       Opcode = isEg ? AMDGPU::CF_ELSE_EG : AMDGPU::CF_ELSE_R600;
262       break;
263     case CF_POP:
264       Opcode = isEg ? AMDGPU::POP_EG : AMDGPU::POP_R600;
265       break;
266     case CF_END:
267       if (ST.hasCaymanISA()) {
268         Opcode = AMDGPU::CF_END_CM;
269         break;
270       }
271       Opcode = isEg ? AMDGPU::CF_END_EG : AMDGPU::CF_END_R600;
272       break;
273     }
274     assert (Opcode && "No opcode selected");
275     return TII->get(Opcode);
276   }
277
278   bool isCompatibleWithClause(const MachineInstr *MI,
279       std::set<unsigned> &DstRegs) const {
280     unsigned DstMI, SrcMI;
281     for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
282         E = MI->operands_end(); I != E; ++I) {
283       const MachineOperand &MO = *I;
284       if (!MO.isReg())
285         continue;
286       if (MO.isDef()) {
287         unsigned Reg = MO.getReg();
288         if (AMDGPU::R600_Reg128RegClass.contains(Reg))
289           DstMI = Reg;
290         else
291           DstMI = TRI->getMatchingSuperReg(Reg,
292               TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
293               &AMDGPU::R600_Reg128RegClass);
294       }
295       if (MO.isUse()) {
296         unsigned Reg = MO.getReg();
297         if (AMDGPU::R600_Reg128RegClass.contains(Reg))
298           SrcMI = Reg;
299         else
300           SrcMI = TRI->getMatchingSuperReg(Reg,
301               TRI->getSubRegFromChannel(TRI->getHWRegChan(Reg)),
302               &AMDGPU::R600_Reg128RegClass);
303       }
304     }
305     if ((DstRegs.find(SrcMI) == DstRegs.end())) {
306       DstRegs.insert(DstMI);
307       return true;
308     } else
309       return false;
310   }
311
312   ClauseFile
313   MakeFetchClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
314       const {
315     MachineBasicBlock::iterator ClauseHead = I;
316     std::vector<MachineInstr *> ClauseContent;
317     unsigned AluInstCount = 0;
318     bool IsTex = TII->usesTextureCache(ClauseHead);
319     std::set<unsigned> DstRegs;
320     for (MachineBasicBlock::iterator E = MBB.end(); I != E; ++I) {
321       if (IsTrivialInst(I))
322         continue;
323       if (AluInstCount >= MaxFetchInst)
324         break;
325       if ((IsTex && !TII->usesTextureCache(I)) ||
326           (!IsTex && !TII->usesVertexCache(I)))
327         break;
328       if (!isCompatibleWithClause(I, DstRegs))
329         break;
330       AluInstCount ++;
331       ClauseContent.push_back(I);
332     }
333     MachineInstr *MIb = BuildMI(MBB, ClauseHead, MBB.findDebugLoc(ClauseHead),
334         getHWInstrDesc(IsTex?CF_TC:CF_VC))
335         .addImm(0) // ADDR
336         .addImm(AluInstCount - 1); // COUNT
337     return ClauseFile(MIb, ClauseContent);
338   }
339
340   void getLiteral(MachineInstr *MI, std::vector<int64_t> &Lits) const {
341     static const unsigned LiteralRegs[] = {
342       AMDGPU::ALU_LITERAL_X,
343       AMDGPU::ALU_LITERAL_Y,
344       AMDGPU::ALU_LITERAL_Z,
345       AMDGPU::ALU_LITERAL_W
346     };
347     const SmallVector<std::pair<MachineOperand *, int64_t>, 3 > Srcs =
348         TII->getSrcs(MI);
349     for (unsigned i = 0, e = Srcs.size(); i < e; ++i) {
350       if (Srcs[i].first->getReg() != AMDGPU::ALU_LITERAL_X)
351         continue;
352       int64_t Imm = Srcs[i].second;
353       std::vector<int64_t>::iterator It =
354           std::find(Lits.begin(), Lits.end(), Imm);
355       if (It != Lits.end()) {
356         unsigned Index = It - Lits.begin();
357         Srcs[i].first->setReg(LiteralRegs[Index]);
358       } else {
359         assert(Lits.size() < 4 && "Too many literals in Instruction Group");
360         Srcs[i].first->setReg(LiteralRegs[Lits.size()]);
361         Lits.push_back(Imm);
362       }
363     }
364   }
365
366   MachineBasicBlock::iterator insertLiterals(
367       MachineBasicBlock::iterator InsertPos,
368       const std::vector<unsigned> &Literals) const {
369     MachineBasicBlock *MBB = InsertPos->getParent();
370     for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
371       unsigned LiteralPair0 = Literals[i];
372       unsigned LiteralPair1 = (i + 1 < e)?Literals[i + 1]:0;
373       InsertPos = BuildMI(MBB, InsertPos->getDebugLoc(),
374           TII->get(AMDGPU::LITERALS))
375           .addImm(LiteralPair0)
376           .addImm(LiteralPair1);
377     }
378     return InsertPos;
379   }
380
381   ClauseFile
382   MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator &I)
383       const {
384     MachineBasicBlock::iterator ClauseHead = I;
385     std::vector<MachineInstr *> ClauseContent;
386     I++;
387     for (MachineBasicBlock::instr_iterator E = MBB.instr_end(); I != E;) {
388       if (IsTrivialInst(I)) {
389         ++I;
390         continue;
391       }
392       if (!I->isBundle() && !TII->isALUInstr(I->getOpcode()))
393         break;
394       std::vector<int64_t> Literals;
395       if (I->isBundle()) {
396         MachineInstr *DeleteMI = I;
397         MachineBasicBlock::instr_iterator BI = I.getInstrIterator();
398         while (++BI != E && BI->isBundledWithPred()) {
399           BI->unbundleFromPred();
400           for (unsigned i = 0, e = BI->getNumOperands(); i != e; ++i) {
401             MachineOperand &MO = BI->getOperand(i);
402             if (MO.isReg() && MO.isInternalRead())
403               MO.setIsInternalRead(false);
404           }
405           getLiteral(BI, Literals);
406           ClauseContent.push_back(BI);
407         }
408         I = BI;
409         DeleteMI->eraseFromParent();
410       } else {
411         getLiteral(I, Literals);
412         ClauseContent.push_back(I);
413         I++;
414       }
415       for (unsigned i = 0, e = Literals.size(); i < e; i+=2) {
416         unsigned literal0 = Literals[i];
417         unsigned literal2 = (i + 1 < e)?Literals[i + 1]:0;
418         MachineInstr *MILit = BuildMI(MBB, I, I->getDebugLoc(),
419             TII->get(AMDGPU::LITERALS))
420             .addImm(literal0)
421             .addImm(literal2);
422         ClauseContent.push_back(MILit);
423       }
424     }
425     assert(ClauseContent.size() < 128 && "ALU clause is too big");
426     ClauseHead->getOperand(7).setImm(ClauseContent.size() - 1);
427     return ClauseFile(ClauseHead, ClauseContent);
428   }
429
430   void
431   EmitFetchClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
432       unsigned &CfCount) {
433     CounterPropagateAddr(Clause.first, CfCount);
434     MachineBasicBlock *BB = Clause.first->getParent();
435     BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::FETCH_CLAUSE))
436         .addImm(CfCount);
437     for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
438       BB->splice(InsertPos, BB, Clause.second[i]);
439     }
440     CfCount += 2 * Clause.second.size();
441   }
442
443   void
444   EmitALUClause(MachineBasicBlock::iterator InsertPos, ClauseFile &Clause,
445       unsigned &CfCount) {
446     Clause.first->getOperand(0).setImm(0);
447     CounterPropagateAddr(Clause.first, CfCount);
448     MachineBasicBlock *BB = Clause.first->getParent();
449     BuildMI(BB, InsertPos->getDebugLoc(), TII->get(AMDGPU::ALU_CLAUSE))
450         .addImm(CfCount);
451     for (unsigned i = 0, e = Clause.second.size(); i < e; ++i) {
452       BB->splice(InsertPos, BB, Clause.second[i]);
453     }
454     CfCount += Clause.second.size();
455   }
456
457   void CounterPropagateAddr(MachineInstr *MI, unsigned Addr) const {
458     MI->getOperand(0).setImm(Addr + MI->getOperand(0).getImm());
459   }
460   void CounterPropagateAddr(std::set<MachineInstr *> MIs, unsigned Addr)
461       const {
462     for (std::set<MachineInstr *>::iterator It = MIs.begin(), E = MIs.end();
463         It != E; ++It) {
464       MachineInstr *MI = *It;
465       CounterPropagateAddr(MI, Addr);
466     }
467   }
468
469 public:
470   R600ControlFlowFinalizer(TargetMachine &tm) : MachineFunctionPass(ID),
471     TII (0), TRI(0),
472     ST(tm.getSubtarget<AMDGPUSubtarget>()) {
473       const AMDGPUSubtarget &ST = tm.getSubtarget<AMDGPUSubtarget>();
474       MaxFetchInst = ST.getTexVTXClauseSize();
475   }
476
477   virtual bool runOnMachineFunction(MachineFunction &MF) {
478     TII=static_cast<const R600InstrInfo *>(MF.getTarget().getInstrInfo());
479     TRI=static_cast<const R600RegisterInfo *>(MF.getTarget().getRegisterInfo());
480     R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
481
482     CFStack CFStack(ST, MFI->ShaderType);
483     for (MachineFunction::iterator MB = MF.begin(), ME = MF.end(); MB != ME;
484         ++MB) {
485       MachineBasicBlock &MBB = *MB;
486       unsigned CfCount = 0;
487       std::vector<std::pair<unsigned, std::set<MachineInstr *> > > LoopStack;
488       std::vector<MachineInstr * > IfThenElseStack;
489       if (MFI->ShaderType == 1) {
490         BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),
491             getHWInstrDesc(CF_CALL_FS));
492         CfCount++;
493       }
494       std::vector<ClauseFile> FetchClauses, AluClauses;
495       std::vector<MachineInstr *> LastAlu(1);
496       std::vector<MachineInstr *> ToPopAfter;
497       
498       for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
499           I != E;) {
500         if (TII->usesTextureCache(I) || TII->usesVertexCache(I)) {
501           DEBUG(dbgs() << CfCount << ":"; I->dump(););
502           FetchClauses.push_back(MakeFetchClause(MBB, I));
503           CfCount++;
504           continue;
505         }
506
507         MachineBasicBlock::iterator MI = I;
508         if (MI->getOpcode() != AMDGPU::ENDIF)
509           LastAlu.back() = 0;
510         if (MI->getOpcode() == AMDGPU::CF_ALU)
511           LastAlu.back() = MI;
512         I++;
513         bool RequiresWorkAround =
514             CFStack.requiresWorkAroundForInst(MI->getOpcode());
515         switch (MI->getOpcode()) {
516         case AMDGPU::CF_ALU_PUSH_BEFORE:
517           if (RequiresWorkAround) {
518             DEBUG(dbgs() << "Applying bug work-around for ALU_PUSH_BEFORE\n");
519             BuildMI(MBB, MI, MBB.findDebugLoc(MI), TII->get(AMDGPU::CF_PUSH_EG))
520                 .addImm(CfCount + 1)
521                 .addImm(1);
522             MI->setDesc(TII->get(AMDGPU::CF_ALU));
523             CfCount++;
524             CFStack.pushBranch(AMDGPU::CF_PUSH_EG);
525           } else
526             CFStack.pushBranch(AMDGPU::CF_ALU_PUSH_BEFORE);
527
528         case AMDGPU::CF_ALU:
529           I = MI;
530           AluClauses.push_back(MakeALUClause(MBB, I));
531           DEBUG(dbgs() << CfCount << ":"; MI->dump(););
532           CfCount++;
533           break;
534         case AMDGPU::WHILELOOP: {
535           CFStack.pushLoop();
536           MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
537               getHWInstrDesc(CF_WHILE_LOOP))
538               .addImm(1);
539           std::pair<unsigned, std::set<MachineInstr *> > Pair(CfCount,
540               std::set<MachineInstr *>());
541           Pair.second.insert(MIb);
542           LoopStack.push_back(Pair);
543           MI->eraseFromParent();
544           CfCount++;
545           break;
546         }
547         case AMDGPU::ENDLOOP: {
548           CFStack.popLoop();
549           std::pair<unsigned, std::set<MachineInstr *> > Pair =
550               LoopStack.back();
551           LoopStack.pop_back();
552           CounterPropagateAddr(Pair.second, CfCount);
553           BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END_LOOP))
554               .addImm(Pair.first + 1);
555           MI->eraseFromParent();
556           CfCount++;
557           break;
558         }
559         case AMDGPU::IF_PREDICATE_SET: {
560           LastAlu.push_back(0);
561           MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
562               getHWInstrDesc(CF_JUMP))
563               .addImm(0)
564               .addImm(0);
565           IfThenElseStack.push_back(MIb);
566           DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
567           MI->eraseFromParent();
568           CfCount++;
569           break;
570         }
571         case AMDGPU::ELSE: {
572           MachineInstr * JumpInst = IfThenElseStack.back();
573           IfThenElseStack.pop_back();
574           CounterPropagateAddr(JumpInst, CfCount);
575           MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
576               getHWInstrDesc(CF_ELSE))
577               .addImm(0)
578               .addImm(0);
579           DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
580           IfThenElseStack.push_back(MIb);
581           MI->eraseFromParent();
582           CfCount++;
583           break;
584         }
585         case AMDGPU::ENDIF: {
586           CFStack.popBranch();
587           if (LastAlu.back()) {
588             ToPopAfter.push_back(LastAlu.back());
589           } else {
590             MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
591                 getHWInstrDesc(CF_POP))
592                 .addImm(CfCount + 1)
593                 .addImm(1);
594             (void)MIb;
595             DEBUG(dbgs() << CfCount << ":"; MIb->dump(););
596             CfCount++;
597           }
598           
599           MachineInstr *IfOrElseInst = IfThenElseStack.back();
600           IfThenElseStack.pop_back();
601           CounterPropagateAddr(IfOrElseInst, CfCount);
602           IfOrElseInst->getOperand(1).setImm(1);
603           LastAlu.pop_back();
604           MI->eraseFromParent();
605           break;
606         }
607         case AMDGPU::BREAK: {
608           CfCount ++;
609           MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
610               getHWInstrDesc(CF_LOOP_BREAK))
611               .addImm(0);
612           LoopStack.back().second.insert(MIb);
613           MI->eraseFromParent();
614           break;
615         }
616         case AMDGPU::CONTINUE: {
617           MachineInstr *MIb = BuildMI(MBB, MI, MBB.findDebugLoc(MI),
618               getHWInstrDesc(CF_LOOP_CONTINUE))
619               .addImm(0);
620           LoopStack.back().second.insert(MIb);
621           MI->eraseFromParent();
622           CfCount++;
623           break;
624         }
625         case AMDGPU::RETURN: {
626           BuildMI(MBB, MI, MBB.findDebugLoc(MI), getHWInstrDesc(CF_END));
627           CfCount++;
628           MI->eraseFromParent();
629           if (CfCount % 2) {
630             BuildMI(MBB, I, MBB.findDebugLoc(MI), TII->get(AMDGPU::PAD));
631             CfCount++;
632           }
633           for (unsigned i = 0, e = FetchClauses.size(); i < e; i++)
634             EmitFetchClause(I, FetchClauses[i], CfCount);
635           for (unsigned i = 0, e = AluClauses.size(); i < e; i++)
636             EmitALUClause(I, AluClauses[i], CfCount);
637         }
638         default:
639           if (TII->isExport(MI->getOpcode())) {
640             DEBUG(dbgs() << CfCount << ":"; MI->dump(););
641             CfCount++;
642           }
643           break;
644         }
645       }
646       for (unsigned i = 0, e = ToPopAfter.size(); i < e; ++i) {
647         MachineInstr *Alu = ToPopAfter[i];
648         BuildMI(MBB, Alu, MBB.findDebugLoc((MachineBasicBlock::iterator)Alu),
649             TII->get(AMDGPU::CF_ALU_POP_AFTER))
650             .addImm(Alu->getOperand(0).getImm())
651             .addImm(Alu->getOperand(1).getImm())
652             .addImm(Alu->getOperand(2).getImm())
653             .addImm(Alu->getOperand(3).getImm())
654             .addImm(Alu->getOperand(4).getImm())
655             .addImm(Alu->getOperand(5).getImm())
656             .addImm(Alu->getOperand(6).getImm())
657             .addImm(Alu->getOperand(7).getImm())
658             .addImm(Alu->getOperand(8).getImm());
659         Alu->eraseFromParent();
660       }
661       MFI->StackSize = CFStack.MaxStackSize;
662     }
663
664     return false;
665   }
666
667   const char *getPassName() const {
668     return "R600 Control Flow Finalizer Pass";
669   }
670 };
671
672 char R600ControlFlowFinalizer::ID = 0;
673
674 } // end anonymous namespace
675
676
677 llvm::FunctionPass *llvm::createR600ControlFlowFinalizer(TargetMachine &TM) {
678   return new R600ControlFlowFinalizer(TM);
679 }