1 //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This pass lowers the pseudo control flow instructions to real
12 /// machine instructions.
14 /// All control flow is handled using predicated instructions and
15 /// a predicate stack. Each Scalar ALU controls the operations of 64 Vector
16 /// ALUs. The Scalar ALU can update the predicate for any of the Vector ALUs
17 /// by writting to the 64-bit EXEC register (each bit corresponds to a
18 /// single vector ALU). Typically, for predicates, a vector ALU will write
19 /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
20 /// Vector ALU) and then the ScalarALU will AND the VCC register with the
21 /// EXEC to update the predicates.
24 /// %VCC = V_CMP_GT_F32 %VGPR1, %VGPR2
25 /// %SGPR0 = SI_IF %VCC
26 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0
27 /// %SGPR0 = SI_ELSE %SGPR0
28 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR0
33 /// %SGPR0 = S_AND_SAVEEXEC_B64 %VCC // Save and update the exec mask
34 /// %SGPR0 = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
35 /// S_CBRANCH_EXECZ label0 // This instruction is an optional
36 /// // optimization which allows us to
37 /// // branch if all the bits of
39 /// %VGPR0 = V_ADD_F32 %VGPR0, %VGPR0 // Do the IF block of the branch
42 /// %SGPR0 = S_OR_SAVEEXEC_B64 %EXEC // Restore the exec mask for the Then block
43 /// %EXEC = S_XOR_B64 %SGPR0, %EXEC // Clear live bits from saved exec mask
44 /// S_BRANCH_EXECZ label1 // Use our branch optimization
45 /// // instruction again.
46 /// %VGPR0 = V_SUB_F32 %VGPR0, %VGPR // Do the THEN block
48 /// %EXEC = S_OR_B64 %EXEC, %SGPR0 // Re-enable saved exec mask bits
49 //===----------------------------------------------------------------------===//
52 #include "SIInstrInfo.h"
53 #include "SIMachineFunctionInfo.h"
54 #include "llvm/CodeGen/MachineFunction.h"
55 #include "llvm/CodeGen/MachineFunctionPass.h"
56 #include "llvm/CodeGen/MachineInstrBuilder.h"
57 #include "llvm/CodeGen/MachineRegisterInfo.h"
58 #include "llvm/IR/Constants.h"
64 class SILowerControlFlowPass : public MachineFunctionPass {
67 static const unsigned SkipThreshold = 12;
70 const SIRegisterInfo *TRI;
71 const SIInstrInfo *TII;
73 bool shouldSkip(MachineBasicBlock *From, MachineBasicBlock *To);
75 void Skip(MachineInstr &From, MachineOperand &To);
76 void SkipIfDead(MachineInstr &MI);
78 void If(MachineInstr &MI);
79 void Else(MachineInstr &MI);
80 void Break(MachineInstr &MI);
81 void IfBreak(MachineInstr &MI);
82 void ElseBreak(MachineInstr &MI);
83 void Loop(MachineInstr &MI);
84 void EndCf(MachineInstr &MI);
86 void Kill(MachineInstr &MI);
87 void Branch(MachineInstr &MI);
89 void InitM0ForLDS(MachineBasicBlock::iterator MI);
90 void LoadM0(MachineInstr &MI, MachineInstr *MovRel);
91 void IndirectSrc(MachineInstr &MI);
92 void IndirectDst(MachineInstr &MI);
95 SILowerControlFlowPass(TargetMachine &tm) :
96 MachineFunctionPass(ID), TRI(nullptr), TII(nullptr) { }
98 bool runOnMachineFunction(MachineFunction &MF) override;
100 const char *getPassName() const override {
101 return "SI Lower control flow instructions";
106 } // End anonymous namespace
108 char SILowerControlFlowPass::ID = 0;
110 FunctionPass *llvm::createSILowerControlFlowPass(TargetMachine &tm) {
111 return new SILowerControlFlowPass(tm);
114 bool SILowerControlFlowPass::shouldSkip(MachineBasicBlock *From,
115 MachineBasicBlock *To) {
117 unsigned NumInstr = 0;
119 for (MachineBasicBlock *MBB = From; MBB != To && !MBB->succ_empty();
120 MBB = *MBB->succ_begin()) {
122 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
123 NumInstr < SkipThreshold && I != E; ++I) {
125 if (I->isBundle() || !I->isBundled())
126 if (++NumInstr >= SkipThreshold)
134 void SILowerControlFlowPass::Skip(MachineInstr &From, MachineOperand &To) {
136 if (!shouldSkip(*From.getParent()->succ_begin(), To.getMBB()))
139 DebugLoc DL = From.getDebugLoc();
140 BuildMI(*From.getParent(), &From, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
142 .addReg(AMDGPU::EXEC);
145 void SILowerControlFlowPass::SkipIfDead(MachineInstr &MI) {
147 MachineBasicBlock &MBB = *MI.getParent();
148 DebugLoc DL = MI.getDebugLoc();
150 if (MBB.getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType !=
152 !shouldSkip(&MBB, &MBB.getParent()->back()))
155 MachineBasicBlock::iterator Insert = &MI;
158 // If the exec mask is non-zero, skip the next two instructions
159 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
161 .addReg(AMDGPU::EXEC);
163 // Exec mask is zero: Export to NULL target...
164 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::EXP))
166 .addImm(0x09) // V_008DFC_SQ_EXP_NULL
170 .addReg(AMDGPU::VGPR0)
171 .addReg(AMDGPU::VGPR0)
172 .addReg(AMDGPU::VGPR0)
173 .addReg(AMDGPU::VGPR0);
175 // ... and terminate wavefront
176 BuildMI(MBB, Insert, DL, TII->get(AMDGPU::S_ENDPGM));
179 void SILowerControlFlowPass::If(MachineInstr &MI) {
180 MachineBasicBlock &MBB = *MI.getParent();
181 DebugLoc DL = MI.getDebugLoc();
182 unsigned Reg = MI.getOperand(0).getReg();
183 unsigned Vcc = MI.getOperand(1).getReg();
185 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), Reg)
188 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), Reg)
189 .addReg(AMDGPU::EXEC)
192 Skip(MI, MI.getOperand(2));
194 MI.eraseFromParent();
197 void SILowerControlFlowPass::Else(MachineInstr &MI) {
198 MachineBasicBlock &MBB = *MI.getParent();
199 DebugLoc DL = MI.getDebugLoc();
200 unsigned Dst = MI.getOperand(0).getReg();
201 unsigned Src = MI.getOperand(1).getReg();
203 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
204 TII->get(AMDGPU::S_OR_SAVEEXEC_B64), Dst)
205 .addReg(Src); // Saved EXEC
207 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
208 .addReg(AMDGPU::EXEC)
211 Skip(MI, MI.getOperand(2));
213 MI.eraseFromParent();
216 void SILowerControlFlowPass::Break(MachineInstr &MI) {
217 MachineBasicBlock &MBB = *MI.getParent();
218 DebugLoc DL = MI.getDebugLoc();
220 unsigned Dst = MI.getOperand(0).getReg();
221 unsigned Src = MI.getOperand(1).getReg();
223 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
224 .addReg(AMDGPU::EXEC)
227 MI.eraseFromParent();
230 void SILowerControlFlowPass::IfBreak(MachineInstr &MI) {
231 MachineBasicBlock &MBB = *MI.getParent();
232 DebugLoc DL = MI.getDebugLoc();
234 unsigned Dst = MI.getOperand(0).getReg();
235 unsigned Vcc = MI.getOperand(1).getReg();
236 unsigned Src = MI.getOperand(2).getReg();
238 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
242 MI.eraseFromParent();
245 void SILowerControlFlowPass::ElseBreak(MachineInstr &MI) {
246 MachineBasicBlock &MBB = *MI.getParent();
247 DebugLoc DL = MI.getDebugLoc();
249 unsigned Dst = MI.getOperand(0).getReg();
250 unsigned Saved = MI.getOperand(1).getReg();
251 unsigned Src = MI.getOperand(2).getReg();
253 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_OR_B64), Dst)
257 MI.eraseFromParent();
260 void SILowerControlFlowPass::Loop(MachineInstr &MI) {
261 MachineBasicBlock &MBB = *MI.getParent();
262 DebugLoc DL = MI.getDebugLoc();
263 unsigned Src = MI.getOperand(0).getReg();
265 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_ANDN2_B64), AMDGPU::EXEC)
266 .addReg(AMDGPU::EXEC)
269 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
270 .addOperand(MI.getOperand(1))
271 .addReg(AMDGPU::EXEC);
273 MI.eraseFromParent();
276 void SILowerControlFlowPass::EndCf(MachineInstr &MI) {
277 MachineBasicBlock &MBB = *MI.getParent();
278 DebugLoc DL = MI.getDebugLoc();
279 unsigned Reg = MI.getOperand(0).getReg();
281 BuildMI(MBB, MBB.getFirstNonPHI(), DL,
282 TII->get(AMDGPU::S_OR_B64), AMDGPU::EXEC)
283 .addReg(AMDGPU::EXEC)
286 MI.eraseFromParent();
289 void SILowerControlFlowPass::Branch(MachineInstr &MI) {
290 if (MI.getOperand(0).getMBB() == MI.getParent()->getNextNode())
291 MI.eraseFromParent();
293 // If these aren't equal, this is probably an infinite loop.
296 void SILowerControlFlowPass::Kill(MachineInstr &MI) {
297 MachineBasicBlock &MBB = *MI.getParent();
298 DebugLoc DL = MI.getDebugLoc();
299 const MachineOperand &Op = MI.getOperand(0);
301 // Kill is only allowed in pixel / geometry shaders
302 assert(MBB.getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType ==
304 MBB.getParent()->getInfo<SIMachineFunctionInfo>()->ShaderType ==
305 ShaderType::GEOMETRY);
307 // Clear this thread from the exec mask if the operand is negative
308 if ((Op.isImm() || Op.isFPImm())) {
309 // Constant operand: Set exec mask to 0 or do nothing
310 if (Op.isImm() ? (Op.getImm() & 0x80000000) :
311 Op.getFPImm()->isNegative()) {
312 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
316 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMPX_LE_F32_e32), AMDGPU::VCC)
321 MI.eraseFromParent();
324 /// The m0 register stores the maximum allowable address for LDS reads and
325 /// writes. Its value must be at least the size in bytes of LDS allocated by
326 /// the shader. For simplicity, we set it to the maximum possible value.
327 void SILowerControlFlowPass::InitM0ForLDS(MachineBasicBlock::iterator MI) {
328 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32),
329 AMDGPU::M0).addImm(0xffffffff);
332 void SILowerControlFlowPass::LoadM0(MachineInstr &MI, MachineInstr *MovRel) {
334 MachineBasicBlock &MBB = *MI.getParent();
335 DebugLoc DL = MI.getDebugLoc();
336 MachineBasicBlock::iterator I = MI;
338 unsigned Save = MI.getOperand(1).getReg();
339 unsigned Idx = MI.getOperand(3).getReg();
341 if (AMDGPU::SReg_32RegClass.contains(Idx)) {
342 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
344 MBB.insert(I, MovRel);
347 assert(AMDGPU::SReg_64RegClass.contains(Save));
348 assert(AMDGPU::VReg_32RegClass.contains(Idx));
350 // Save the EXEC mask
351 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), Save)
352 .addReg(AMDGPU::EXEC);
354 // Read the next variant into VCC (lower 32 bits) <- also loop target
355 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32),
359 // Move index from VCC into M0
360 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
361 .addReg(AMDGPU::VCC_LO);
363 // Compare the just read M0 value to all possible Idx values
364 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CMP_EQ_U32_e32), AMDGPU::VCC)
368 // Update EXEC, save the original EXEC value to VCC
369 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_AND_SAVEEXEC_B64), AMDGPU::VCC)
370 .addReg(AMDGPU::VCC);
372 // Do the actual move
373 MBB.insert(I, MovRel);
375 // Update EXEC, switch all done bits to 0 and all todo bits to 1
376 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_XOR_B64), AMDGPU::EXEC)
377 .addReg(AMDGPU::EXEC)
378 .addReg(AMDGPU::VCC);
380 // Loop back to V_READFIRSTLANE_B32 if there are still variants to cover
381 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
383 .addReg(AMDGPU::EXEC);
386 BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
390 // FIXME: Are there any values other than the LDS address clamp that need to
391 // be stored in the m0 register and may be live for more than a few
392 // instructions? If so, we should save the m0 register at the beginning
393 // of this function and restore it here.
394 // FIXME: Add support for LDS direct loads.
396 MI.eraseFromParent();
399 void SILowerControlFlowPass::IndirectSrc(MachineInstr &MI) {
401 MachineBasicBlock &MBB = *MI.getParent();
402 DebugLoc DL = MI.getDebugLoc();
404 unsigned Dst = MI.getOperand(0).getReg();
405 unsigned Vec = MI.getOperand(2).getReg();
406 unsigned Off = MI.getOperand(4).getImm();
407 unsigned SubReg = TRI->getSubReg(Vec, AMDGPU::sub0);
411 MachineInstr *MovRel =
412 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELS_B32_e32), Dst)
413 .addReg(SubReg + Off)
414 .addReg(AMDGPU::M0, RegState::Implicit)
415 .addReg(Vec, RegState::Implicit);
420 void SILowerControlFlowPass::IndirectDst(MachineInstr &MI) {
422 MachineBasicBlock &MBB = *MI.getParent();
423 DebugLoc DL = MI.getDebugLoc();
425 unsigned Dst = MI.getOperand(0).getReg();
426 unsigned Off = MI.getOperand(4).getImm();
427 unsigned Val = MI.getOperand(5).getReg();
428 unsigned SubReg = TRI->getSubReg(Dst, AMDGPU::sub0);
432 MachineInstr *MovRel =
433 BuildMI(*MBB.getParent(), DL, TII->get(AMDGPU::V_MOVRELD_B32_e32))
434 .addReg(SubReg + Off, RegState::Define)
436 .addReg(AMDGPU::M0, RegState::Implicit)
437 .addReg(Dst, RegState::Implicit);
442 bool SILowerControlFlowPass::runOnMachineFunction(MachineFunction &MF) {
443 TII = static_cast<const SIInstrInfo*>(MF.getTarget().getInstrInfo());
444 TRI = static_cast<const SIRegisterInfo*>(MF.getTarget().getRegisterInfo());
445 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
447 bool HaveKill = false;
449 bool NeedWQM = false;
452 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
455 MachineBasicBlock &MBB = *BI;
456 MachineBasicBlock::iterator I, Next;
457 for (I = MBB.begin(); I != MBB.end(); I = Next) {
460 MachineInstr &MI = *I;
461 if (TII->isDS(MI.getOpcode())) {
466 switch (MI.getOpcode()) {
473 case AMDGPU::SI_ELSE:
477 case AMDGPU::SI_BREAK:
481 case AMDGPU::SI_IF_BREAK:
485 case AMDGPU::SI_ELSE_BREAK:
489 case AMDGPU::SI_LOOP:
494 case AMDGPU::SI_END_CF:
495 if (--Depth == 0 && HaveKill) {
502 case AMDGPU::SI_KILL:
510 case AMDGPU::S_BRANCH:
514 case AMDGPU::SI_INDIRECT_SRC:
518 case AMDGPU::SI_INDIRECT_DST_V1:
519 case AMDGPU::SI_INDIRECT_DST_V2:
520 case AMDGPU::SI_INDIRECT_DST_V4:
521 case AMDGPU::SI_INDIRECT_DST_V8:
522 case AMDGPU::SI_INDIRECT_DST_V16:
526 case AMDGPU::V_INTERP_P1_F32:
527 case AMDGPU::V_INTERP_P2_F32:
528 case AMDGPU::V_INTERP_MOV_F32:
537 MachineBasicBlock &MBB = MF.front();
538 // Initialize M0 to a value that won't cause LDS access to be discarded
539 // due to offset clamping
540 InitM0ForLDS(MBB.getFirstNonPHI());
543 if (NeedWQM && MFI->ShaderType == ShaderType::PIXEL) {
544 MachineBasicBlock &MBB = MF.front();
545 BuildMI(MBB, MBB.getFirstNonPHI(), DebugLoc(), TII->get(AMDGPU::S_WQM_B64),
546 AMDGPU::EXEC).addReg(AMDGPU::EXEC);