1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "AntiDepBreaker.h"
23 #include "AggressiveAntiDepBreaker.h"
24 #include "CriticalAntiDepBreaker.h"
25 #include "RegisterClassInfo.h"
26 #include "ScheduleDAGInstrs.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/CodeGen/LatencyPriorityQueue.h"
29 #include "llvm/CodeGen/SchedulerRegistry.h"
30 #include "llvm/CodeGen/MachineDominators.h"
31 #include "llvm/CodeGen/MachineFrameInfo.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineLoopInfo.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
36 #include "llvm/Analysis/AliasAnalysis.h"
37 #include "llvm/Target/TargetLowering.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Target/TargetInstrInfo.h"
40 #include "llvm/Target/TargetRegisterInfo.h"
41 #include "llvm/Target/TargetSubtargetInfo.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include "llvm/ADT/BitVector.h"
47 #include "llvm/ADT/Statistic.h"
50 STATISTIC(NumNoops, "Number of noops inserted");
51 STATISTIC(NumStalls, "Number of pipeline stalls");
52 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
54 // Post-RA scheduling is enabled with
55 // TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to
56 // override the target.
58 EnablePostRAScheduler("post-RA-scheduler",
59 cl::desc("Enable scheduling after register allocation"),
60 cl::init(false), cl::Hidden);
61 static cl::opt<std::string>
62 EnableAntiDepBreaking("break-anti-dependencies",
63 cl::desc("Break post-RA scheduling anti-dependencies: "
64 "\"critical\", \"all\", or \"none\""),
65 cl::init("none"), cl::Hidden);
67 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
69 DebugDiv("postra-sched-debugdiv",
70 cl::desc("Debug control MBBs that are scheduled"),
71 cl::init(0), cl::Hidden);
73 DebugMod("postra-sched-debugmod",
74 cl::desc("Debug control MBBs that are scheduled"),
75 cl::init(0), cl::Hidden);
77 AntiDepBreaker::~AntiDepBreaker() { }
80 class PostRAScheduler : public MachineFunctionPass {
82 const TargetInstrInfo *TII;
83 RegisterClassInfo RegClassInfo;
87 PostRAScheduler() : MachineFunctionPass(ID) {}
89 void getAnalysisUsage(AnalysisUsage &AU) const {
91 AU.addRequired<AliasAnalysis>();
92 AU.addRequired<TargetPassConfig>();
93 AU.addRequired<MachineDominatorTree>();
94 AU.addPreserved<MachineDominatorTree>();
95 AU.addRequired<MachineLoopInfo>();
96 AU.addPreserved<MachineLoopInfo>();
97 MachineFunctionPass::getAnalysisUsage(AU);
100 bool runOnMachineFunction(MachineFunction &Fn);
102 char PostRAScheduler::ID = 0;
104 class SchedulePostRATDList : public ScheduleDAGInstrs {
105 /// AvailableQueue - The priority queue to use for the available SUnits.
107 LatencyPriorityQueue AvailableQueue;
109 /// PendingQueue - This contains all of the instructions whose operands have
110 /// been issued, but their results are not ready yet (due to the latency of
111 /// the operation). Once the operands becomes available, the instruction is
112 /// added to the AvailableQueue.
113 std::vector<SUnit*> PendingQueue;
115 /// Topo - A topological ordering for SUnits.
116 ScheduleDAGTopologicalSort Topo;
118 /// HazardRec - The hazard recognizer to use.
119 ScheduleHazardRecognizer *HazardRec;
121 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
122 AntiDepBreaker *AntiDepBreak;
124 /// AA - AliasAnalysis for making memory reference queries.
127 /// LiveRegs - true if the register is live.
131 SchedulePostRATDList(
132 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
133 AliasAnalysis *AA, const RegisterClassInfo&,
134 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
135 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs);
137 ~SchedulePostRATDList();
139 /// StartBlock - Initialize register live-range state for scheduling in
142 void StartBlock(MachineBasicBlock *BB);
144 /// Schedule - Schedule the instruction range using list scheduling.
148 /// Observe - Update liveness information to account for the current
149 /// instruction, which will not be scheduled.
151 void Observe(MachineInstr *MI, unsigned Count);
153 /// FinishBlock - Clean up register live-range state.
157 /// FixupKills - Fix register kill flags that have been made
158 /// invalid due to scheduling
160 void FixupKills(MachineBasicBlock *MBB);
163 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
164 void ReleaseSuccessors(SUnit *SU);
165 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
166 void ListScheduleTopDown();
167 void StartBlockForKills(MachineBasicBlock *BB);
169 // ToggleKillFlag - Toggle a register operand kill flag. Other
170 // adjustments may be made to the instruction if necessary. Return
171 // true if the operand has been deleted, false if not.
172 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
174 void dumpSchedule() const;
178 char &llvm::PostRASchedulerID = PostRAScheduler::ID;
180 INITIALIZE_PASS(PostRAScheduler, "post-RA-sched",
181 "Post RA top-down list latency scheduler", false, false)
183 SchedulePostRATDList::SchedulePostRATDList(
184 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
185 AliasAnalysis *AA, const RegisterClassInfo &RCI,
186 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
187 SmallVectorImpl<const TargetRegisterClass*> &CriticalPathRCs)
188 : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), Topo(SUnits), AA(AA),
189 LiveRegs(TRI->getNumRegs())
191 const TargetMachine &TM = MF.getTarget();
192 const InstrItineraryData *InstrItins = TM.getInstrItineraryData();
194 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this);
196 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ?
197 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) :
198 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ?
199 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL));
202 SchedulePostRATDList::~SchedulePostRATDList() {
207 /// dumpSchedule - dump the scheduled Sequence.
208 void SchedulePostRATDList::dumpSchedule() const {
209 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
210 if (SUnit *SU = Sequence[i])
213 dbgs() << "**** NOOP ****\n";
217 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
218 TII = Fn.getTarget().getInstrInfo();
219 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
220 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
221 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>();
222 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
224 RegClassInfo.runOnMachineFunction(Fn);
226 // Check for explicit enable/disable of post-ra scheduling.
227 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode =
228 TargetSubtargetInfo::ANTIDEP_NONE;
229 SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs;
230 if (EnablePostRAScheduler.getPosition() > 0) {
231 if (!EnablePostRAScheduler)
234 // Check that post-RA scheduling is enabled for this target.
235 // This may upgrade the AntiDepMode.
236 const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>();
237 if (!ST.enablePostRAScheduler(PassConfig->getOptLevel(), AntiDepMode,
242 // Check for antidep breaking override...
243 if (EnableAntiDepBreaking.getPosition() > 0) {
244 AntiDepMode = (EnableAntiDepBreaking == "all")
245 ? TargetSubtargetInfo::ANTIDEP_ALL
246 : ((EnableAntiDepBreaking == "critical")
247 ? TargetSubtargetInfo::ANTIDEP_CRITICAL
248 : TargetSubtargetInfo::ANTIDEP_NONE);
251 DEBUG(dbgs() << "PostRAScheduler\n");
253 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode,
256 // Loop over all of the basic blocks
257 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
258 MBB != MBBe; ++MBB) {
260 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
262 static int bbcnt = 0;
263 if (bbcnt++ % DebugDiv != DebugMod)
265 dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getName()
266 << ":BB#" << MBB->getNumber() << " ***\n";
270 // Initialize register live-range state for scheduling in this block.
271 Scheduler.StartBlock(MBB);
273 // Schedule each sequence of instructions not interrupted by a label
274 // or anything else that effectively needs to shut down scheduling.
275 MachineBasicBlock::iterator Current = MBB->end();
276 unsigned Count = MBB->size(), CurrentCount = Count;
277 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
278 MachineInstr *MI = llvm::prior(I);
279 // Calls are not scheduling boundaries before register allocation, but
280 // post-ra we don't gain anything by scheduling across calls since we
281 // don't need to worry about register pressure.
282 if (MI->isCall() || TII->isSchedulingBoundary(MI, MBB, Fn)) {
283 Scheduler.Run(MBB, I, Current, CurrentCount);
284 Scheduler.EmitSchedule();
286 CurrentCount = Count - 1;
287 Scheduler.Observe(MI, CurrentCount);
292 Count -= MI->getBundleSize();
294 assert(Count == 0 && "Instruction count mismatch!");
295 assert((MBB->begin() == Current || CurrentCount != 0) &&
296 "Instruction count mismatch!");
297 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
298 Scheduler.EmitSchedule();
300 // Clean up register live-range state.
301 Scheduler.FinishBlock();
303 // Update register kills
304 Scheduler.FixupKills(MBB);
310 /// StartBlock - Initialize register live-range state for scheduling in
313 void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
314 // Call the superclass.
315 ScheduleDAGInstrs::StartBlock(BB);
317 // Reset the hazard recognizer and anti-dep breaker.
319 if (AntiDepBreak != NULL)
320 AntiDepBreak->StartBlock(BB);
323 /// Schedule - Schedule the instruction range using list scheduling.
325 void SchedulePostRATDList::Schedule() {
326 // Build the scheduling graph.
329 if (AntiDepBreak != NULL) {
331 AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
332 InsertPosIndex, DbgValues);
335 // We made changes. Update the dependency graph.
336 // Theoretically we could update the graph in place:
337 // When a live range is changed to use a different register, remove
338 // the def's anti-dependence *and* output-dependence edges due to
339 // that register, and add new anti-dependence and output-dependence
340 // edges based on the next live range of the register.
347 NumFixedAnti += Broken;
351 DEBUG(dbgs() << "********** List Scheduling **********\n");
352 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
353 SUnits[su].dumpAll(this));
355 AvailableQueue.initNodes(SUnits);
356 ListScheduleTopDown();
357 AvailableQueue.releaseState();
360 dbgs() << "*** Final schedule ***\n";
366 /// Observe - Update liveness information to account for the current
367 /// instruction, which will not be scheduled.
369 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
370 if (AntiDepBreak != NULL)
371 AntiDepBreak->Observe(MI, Count, InsertPosIndex);
374 /// FinishBlock - Clean up register live-range state.
376 void SchedulePostRATDList::FinishBlock() {
377 if (AntiDepBreak != NULL)
378 AntiDepBreak->FinishBlock();
380 // Call the superclass.
381 ScheduleDAGInstrs::FinishBlock();
384 /// StartBlockForKills - Initialize register live-range state for updating kills
386 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
387 // Start with no live registers.
390 // Determine the live-out physregs for this block.
391 if (!BB->empty() && BB->back().isReturn()) {
392 // In a return block, examine the function live-out regs.
393 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
394 E = MRI.liveout_end(); I != E; ++I) {
397 // Repeat, for all subregs.
398 for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
400 LiveRegs.set(*Subreg);
404 // In a non-return block, examine the live-in regs of all successors.
405 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
406 SE = BB->succ_end(); SI != SE; ++SI) {
407 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
408 E = (*SI)->livein_end(); I != E; ++I) {
411 // Repeat, for all subregs.
412 for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
414 LiveRegs.set(*Subreg);
420 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
421 MachineOperand &MO) {
422 // Setting kill flag...
428 // If MO itself is live, clear the kill flag...
429 if (LiveRegs.test(MO.getReg())) {
434 // If any subreg of MO is live, then create an imp-def for that
435 // subreg and keep MO marked as killed.
438 const unsigned SuperReg = MO.getReg();
439 for (const uint16_t *Subreg = TRI->getSubRegisters(SuperReg);
441 if (LiveRegs.test(*Subreg)) {
442 MI->addOperand(MachineOperand::CreateReg(*Subreg,
456 /// FixupKills - Fix the register kill flags, they may have been made
457 /// incorrect by instruction reordering.
459 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
460 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
462 BitVector killedRegs(TRI->getNumRegs());
463 BitVector ReservedRegs = TRI->getReservedRegs(MF);
465 StartBlockForKills(MBB);
467 // Examine block from end to start...
468 unsigned Count = MBB->size();
469 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
471 MachineInstr *MI = --I;
472 if (MI->isDebugValue())
475 // Update liveness. Registers that are defed but not used in this
476 // instruction are now dead. Mark register and all subregs as they
477 // are completely defined.
478 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
479 MachineOperand &MO = MI->getOperand(i);
481 LiveRegs.clearBitsNotInMask(MO.getRegMask());
482 if (!MO.isReg()) continue;
483 unsigned Reg = MO.getReg();
484 if (Reg == 0) continue;
485 if (!MO.isDef()) continue;
486 // Ignore two-addr defs.
487 if (MI->isRegTiedToUseOperand(i)) continue;
491 // Repeat for all subregs.
492 for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
494 LiveRegs.reset(*Subreg);
497 // Examine all used registers and set/clear kill flag. When a
498 // register is used multiple times we only set the kill flag on
501 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
502 MachineOperand &MO = MI->getOperand(i);
503 if (!MO.isReg() || !MO.isUse()) continue;
504 unsigned Reg = MO.getReg();
505 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
508 if (!killedRegs.test(Reg)) {
510 // A register is not killed if any subregs are live...
511 for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
513 if (LiveRegs.test(*Subreg)) {
519 // If subreg is not live, then register is killed if it became
520 // live in this instruction
522 kill = !LiveRegs.test(Reg);
525 if (MO.isKill() != kill) {
526 DEBUG(dbgs() << "Fixing " << MO << " in ");
527 // Warning: ToggleKillFlag may invalidate MO.
528 ToggleKillFlag(MI, MO);
535 // Mark any used register (that is not using undef) and subregs as
537 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
538 MachineOperand &MO = MI->getOperand(i);
539 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
540 unsigned Reg = MO.getReg();
541 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
545 for (const uint16_t *Subreg = TRI->getSubRegisters(Reg);
547 LiveRegs.set(*Subreg);
552 //===----------------------------------------------------------------------===//
553 // Top-Down Scheduling
554 //===----------------------------------------------------------------------===//
556 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
557 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
558 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
559 SUnit *SuccSU = SuccEdge->getSUnit();
562 if (SuccSU->NumPredsLeft == 0) {
563 dbgs() << "*** Scheduling failed! ***\n";
565 dbgs() << " has been released too many times!\n";
569 --SuccSU->NumPredsLeft;
571 // Standard scheduler algorithms will recompute the depth of the successor
573 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
575 // However, we lazily compute node depth instead. Note that
576 // ScheduleNodeTopDown has already updated the depth of this node which causes
577 // all descendents to be marked dirty. Setting the successor depth explicitly
578 // here would cause depth to be recomputed for all its ancestors. If the
579 // successor is not yet ready (because of a transitively redundant edge) then
580 // this causes depth computation to be quadratic in the size of the DAG.
582 // If all the node's predecessors are scheduled, this node is ready
583 // to be scheduled. Ignore the special ExitSU node.
584 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
585 PendingQueue.push_back(SuccSU);
588 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
589 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
590 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
592 ReleaseSucc(SU, &*I);
596 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
597 /// count of its successors. If a successor pending count is zero, add it to
598 /// the Available queue.
599 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
600 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
601 DEBUG(SU->dump(this));
603 Sequence.push_back(SU);
604 assert(CurCycle >= SU->getDepth() &&
605 "Node scheduled above its depth!");
606 SU->setDepthToAtLeast(CurCycle);
608 ReleaseSuccessors(SU);
609 SU->isScheduled = true;
610 AvailableQueue.ScheduledNode(SU);
613 /// ListScheduleTopDown - The main loop of list scheduling for top-down
615 void SchedulePostRATDList::ListScheduleTopDown() {
616 unsigned CurCycle = 0;
618 // We're scheduling top-down but we're visiting the regions in
619 // bottom-up order, so we don't know the hazards at the start of a
620 // region. So assume no hazards (this should usually be ok as most
621 // blocks are a single region).
624 // Release any successors of the special Entry node.
625 ReleaseSuccessors(&EntrySU);
627 // Add all leaves to Available queue.
628 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
629 // It is available if it has no predecessors.
630 bool available = SUnits[i].Preds.empty();
632 AvailableQueue.push(&SUnits[i]);
633 SUnits[i].isAvailable = true;
637 // In any cycle where we can't schedule any instructions, we must
638 // stall or emit a noop, depending on the target.
639 bool CycleHasInsts = false;
641 // While Available queue is not empty, grab the node with the highest
642 // priority. If it is not ready put it back. Schedule the node.
643 std::vector<SUnit*> NotReady;
644 Sequence.reserve(SUnits.size());
645 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
646 // Check to see if any of the pending instructions are ready to issue. If
647 // so, add them to the available queue.
648 unsigned MinDepth = ~0u;
649 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
650 if (PendingQueue[i]->getDepth() <= CurCycle) {
651 AvailableQueue.push(PendingQueue[i]);
652 PendingQueue[i]->isAvailable = true;
653 PendingQueue[i] = PendingQueue.back();
654 PendingQueue.pop_back();
656 } else if (PendingQueue[i]->getDepth() < MinDepth)
657 MinDepth = PendingQueue[i]->getDepth();
660 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this));
662 SUnit *FoundSUnit = 0;
663 bool HasNoopHazards = false;
664 while (!AvailableQueue.empty()) {
665 SUnit *CurSUnit = AvailableQueue.pop();
667 ScheduleHazardRecognizer::HazardType HT =
668 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
669 if (HT == ScheduleHazardRecognizer::NoHazard) {
670 FoundSUnit = CurSUnit;
674 // Remember if this is a noop hazard.
675 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
677 NotReady.push_back(CurSUnit);
680 // Add the nodes that aren't ready back onto the available list.
681 if (!NotReady.empty()) {
682 AvailableQueue.push_all(NotReady);
686 // If we found a node to schedule...
688 // ... schedule the node...
689 ScheduleNodeTopDown(FoundSUnit, CurCycle);
690 HazardRec->EmitInstruction(FoundSUnit);
691 CycleHasInsts = true;
692 if (HazardRec->atIssueLimit()) {
693 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n');
694 HazardRec->AdvanceCycle();
696 CycleHasInsts = false;
700 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
701 HazardRec->AdvanceCycle();
702 } else if (!HasNoopHazards) {
703 // Otherwise, we have a pipeline stall, but no other problem,
704 // just advance the current cycle and try again.
705 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
706 HazardRec->AdvanceCycle();
709 // Otherwise, we have no instructions to issue and we have instructions
710 // that will fault if we don't do this right. This is the case for
711 // processors without pipeline interlocks and other cases.
712 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
713 HazardRec->EmitNoop();
714 Sequence.push_back(0); // NULL here means noop
719 CycleHasInsts = false;
724 unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false);
726 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
729 assert(Sequence.size() - Noops == ScheduledNodes &&
730 "The number of nodes scheduled doesn't match the expected number!");