1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "llvm/CodeGen/Passes.h"
23 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
24 #include "llvm/CodeGen/LatencyPriorityQueue.h"
25 #include "llvm/CodeGen/SchedulerRegistry.h"
26 #include "llvm/CodeGen/MachineDominators.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/Target/TargetInstrInfo.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Support/Compiler.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/ADT/Statistic.h"
38 STATISTIC(NumStalls, "Number of pipeline stalls");
41 EnableAntiDepBreaking("break-anti-dependencies",
42 cl::desc("Break post-RA scheduling anti-dependencies"),
43 cl::init(true), cl::Hidden);
46 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
49 PostRAScheduler() : MachineFunctionPass(&ID) {}
51 void getAnalysisUsage(AnalysisUsage &AU) const {
52 AU.addRequired<MachineDominatorTree>();
53 AU.addPreserved<MachineDominatorTree>();
54 AU.addRequired<MachineLoopInfo>();
55 AU.addPreserved<MachineLoopInfo>();
56 MachineFunctionPass::getAnalysisUsage(AU);
59 const char *getPassName() const {
60 return "Post RA top-down list latency scheduler";
63 bool runOnMachineFunction(MachineFunction &Fn);
65 char PostRAScheduler::ID = 0;
67 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
68 /// AvailableQueue - The priority queue to use for the available SUnits.
70 LatencyPriorityQueue AvailableQueue;
72 /// PendingQueue - This contains all of the instructions whose operands have
73 /// been issued, but their results are not ready yet (due to the latency of
74 /// the operation). Once the operands becomes available, the instruction is
75 /// added to the AvailableQueue.
76 std::vector<SUnit*> PendingQueue;
78 /// Topo - A topological ordering for SUnits.
79 ScheduleDAGTopologicalSort Topo;
82 SchedulePostRATDList(MachineBasicBlock *mbb, const TargetMachine &tm,
83 const MachineLoopInfo &MLI,
84 const MachineDominatorTree &MDT)
85 : ScheduleDAGInstrs(mbb, tm, MLI, MDT), Topo(SUnits) {}
90 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
91 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
92 void ListScheduleTopDown();
93 bool BreakAntiDependencies();
97 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
98 DOUT << "PostRAScheduler\n";
100 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
101 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
103 // Loop over all of the basic blocks
104 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
105 MBB != MBBe; ++MBB) {
107 SchedulePostRATDList Scheduler(MBB, Fn.getTarget(), MLI, MDT);
111 Scheduler.EmitSchedule();
117 /// Schedule - Schedule the DAG using list scheduling.
118 void SchedulePostRATDList::Schedule() {
119 DOUT << "********** List Scheduling **********\n";
121 // Build the scheduling graph.
124 if (EnableAntiDepBreaking) {
125 if (BreakAntiDependencies()) {
126 // We made changes. Update the dependency graph.
127 // Theoretically we could update the graph in place:
128 // When a live range is changed to use a different register, remove
129 // the def's anti-dependence *and* output-dependence edges due to
130 // that register, and add new anti-dependence and output-dependence
131 // edges based on the next live range of the register.
137 AvailableQueue.initNodes(SUnits);
139 ListScheduleTopDown();
141 AvailableQueue.releaseState();
144 /// getInstrOperandRegClass - Return register class of the operand of an
145 /// instruction of the specified TargetInstrDesc.
146 static const TargetRegisterClass*
147 getInstrOperandRegClass(const TargetRegisterInfo *TRI,
148 const TargetInstrInfo *TII, const TargetInstrDesc &II,
150 if (Op >= II.getNumOperands())
152 if (II.OpInfo[Op].isLookupPtrRegClass())
153 return TII->getPointerRegClass();
154 return TRI->getRegClass(II.OpInfo[Op].RegClass);
157 /// CriticalPathStep - Return the next SUnit after SU on the bottom-up
159 static SDep *CriticalPathStep(SUnit *SU) {
161 unsigned NextDepth = 0;
162 // Find the predecessor edge with the greatest depth.
163 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
165 SUnit *PredSU = P->getSUnit();
166 unsigned PredLatency = P->getLatency();
167 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
168 // In the case of a latency tie, prefer an anti-dependency edge over
169 // other types of edges.
170 if (NextDepth < PredTotalLatency ||
171 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
172 NextDepth = PredTotalLatency;
179 /// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
180 /// of the ScheduleDAG and break them by renaming registers.
182 bool SchedulePostRATDList::BreakAntiDependencies() {
183 // The code below assumes that there is at least one instruction,
184 // so just duck out immediately if the block is empty.
185 if (BB->empty()) return false;
187 // Find the node at the bottom of the critical path.
189 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
190 SUnit *SU = &SUnits[i];
191 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
195 DOUT << "Critical path has total latency "
196 << (Max ? Max->getDepth() + Max->Latency : 0) << "\n";
198 // We'll be ignoring anti-dependencies on non-allocatable registers, because
199 // they may not be safe to break.
200 const BitVector AllocatableSet = TRI->getAllocatableSet(*MF);
202 // Track progress along the critical path through the SUnit graph as we walk
204 SUnit *CriticalPathSU = Max;
205 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
207 // For live regs that are only used in one register class in a live range,
208 // the register class. If the register is not live, the corresponding value
209 // is null. If the register is live but used in multiple register classes,
210 // the corresponding value is -1 casted to a pointer.
211 const TargetRegisterClass *
212 Classes[TargetRegisterInfo::FirstVirtualRegister] = {};
214 // Map registers to all their references within a live range.
215 std::multimap<unsigned, MachineOperand *> RegRefs;
217 // The index of the most recent kill (proceding bottom-up), or ~0u if
218 // the register is not live.
219 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
220 std::fill(KillIndices, array_endof(KillIndices), ~0u);
221 // The index of the most recent complete def (proceding bottom up), or ~0u if
222 // the register is live.
223 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
224 std::fill(DefIndices, array_endof(DefIndices), BB->size());
226 // Determine the live-out physregs for this block.
227 if (BB->back().getDesc().isReturn())
228 // In a return block, examine the function live-out regs.
229 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
230 E = MRI.liveout_end(); I != E; ++I) {
232 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
233 KillIndices[Reg] = BB->size();
234 DefIndices[Reg] = ~0u;
235 // Repeat, for all aliases.
236 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
237 unsigned AliasReg = *Alias;
238 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
239 KillIndices[AliasReg] = BB->size();
240 DefIndices[AliasReg] = ~0u;
244 // In a non-return block, examine the live-in regs of all successors.
245 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
246 SE = BB->succ_end(); SI != SE; ++SI)
247 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
248 E = (*SI)->livein_end(); I != E; ++I) {
250 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
251 KillIndices[Reg] = BB->size();
252 DefIndices[Reg] = ~0u;
253 // Repeat, for all aliases.
254 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
255 unsigned AliasReg = *Alias;
256 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
257 KillIndices[AliasReg] = BB->size();
258 DefIndices[AliasReg] = ~0u;
262 // Consider callee-saved registers as live-out, since we're running after
263 // prologue/epilogue insertion so there's no way to add additional
266 // TODO: If the callee saves and restores these, then we can potentially
267 // use them between the save and the restore. To do that, we could scan
268 // the exit blocks to see which of these registers are defined.
269 // Alternatively, callee-saved registers that aren't saved and restored
270 // could be marked live-in in every block.
271 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
273 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
274 KillIndices[Reg] = BB->size();
275 DefIndices[Reg] = ~0u;
276 // Repeat, for all aliases.
277 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
278 unsigned AliasReg = *Alias;
279 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
280 KillIndices[AliasReg] = BB->size();
281 DefIndices[AliasReg] = ~0u;
285 // Consider this pattern:
294 // There are three anti-dependencies here, and without special care,
295 // we'd break all of them using the same register:
304 // because at each anti-dependence, B is the first register that
305 // isn't A which is free. This re-introduces anti-dependencies
306 // at all but one of the original anti-dependencies that we were
307 // trying to break. To avoid this, keep track of the most recent
308 // register that each register was replaced with, avoid avoid
309 // using it to repair an anti-dependence on the same register.
310 // This lets us produce this:
319 // This still has an anti-dependence on B, but at least it isn't on the
320 // original critical path.
322 // TODO: If we tracked more than one register here, we could potentially
323 // fix that remaining critical edge too. This is a little more involved,
324 // because unlike the most recent register, less recent registers should
325 // still be considered, though only if no other registers are available.
326 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
328 // Attempt to break anti-dependence edges on the critical path. Walk the
329 // instructions from the bottom up, tracking information about liveness
330 // as we go to help determine which registers are available.
331 bool Changed = false;
332 unsigned Count = BB->size() - 1;
333 for (MachineBasicBlock::reverse_iterator I = BB->rbegin(), E = BB->rend();
334 I != E; ++I, --Count) {
335 MachineInstr *MI = &*I;
337 // After regalloc, IMPLICIT_DEF instructions aren't safe to treat as
338 // dependence-breaking. In the case of an INSERT_SUBREG, the IMPLICIT_DEF
339 // is left behind appearing to clobber the super-register, while the
340 // subregister needs to remain live. So we just ignore them.
341 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
344 // Check if this instruction has a dependence on the critical path that
345 // is an anti-dependence that we may be able to break. If it is, set
346 // AntiDepReg to the non-zero register associated with the anti-dependence.
348 // We limit our attention to the critical path as a heuristic to avoid
349 // breaking anti-dependence edges that aren't going to significantly
350 // impact the overall schedule. There are a limited number of registers
351 // and we want to save them for the important edges.
353 // TODO: Instructions with multiple defs could have multiple
354 // anti-dependencies. The current code here only knows how to break one
355 // edge per instruction. Note that we'd have to be able to break all of
356 // the anti-dependencies in an instruction in order to be effective.
357 unsigned AntiDepReg = 0;
358 if (MI == CriticalPathMI) {
359 if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
360 SUnit *NextSU = Edge->getSUnit();
362 // Only consider anti-dependence edges.
363 if (Edge->getKind() == SDep::Anti) {
364 AntiDepReg = Edge->getReg();
365 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
366 // Don't break anti-dependencies on non-allocatable registers.
367 if (AllocatableSet.test(AntiDepReg)) {
368 // If the SUnit has other dependencies on the SUnit that it
369 // anti-depends on, don't bother breaking the anti-dependency
370 // since those edges would prevent such units from being
371 // scheduled past each other regardless.
373 // Also, if there are dependencies on other SUnits with the
374 // same register as the anti-dependency, don't attempt to
376 for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
377 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
378 if (P->getSUnit() == NextSU ?
379 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
380 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
386 CriticalPathSU = NextSU;
387 CriticalPathMI = CriticalPathSU->getInstr();
389 // We've reached the end of the critical path.
395 // Scan the register operands for this instruction and update
396 // Classes and RegRefs.
397 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
398 MachineOperand &MO = MI->getOperand(i);
399 if (!MO.isReg()) continue;
400 unsigned Reg = MO.getReg();
401 if (Reg == 0) continue;
402 const TargetRegisterClass *NewRC =
403 getInstrOperandRegClass(TRI, TII, MI->getDesc(), i);
405 // If this instruction has a use of AntiDepReg, breaking it
407 if (MO.isUse() && AntiDepReg == Reg)
410 // For now, only allow the register to be changed if its register
411 // class is consistent across all uses.
412 if (!Classes[Reg] && NewRC)
413 Classes[Reg] = NewRC;
414 else if (!NewRC || Classes[Reg] != NewRC)
415 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
417 // Now check for aliases.
418 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
419 // If an alias of the reg is used during the live range, give up.
420 // Note that this allows us to skip checking if AntiDepReg
421 // overlaps with any of the aliases, among other things.
422 unsigned AliasReg = *Alias;
423 if (Classes[AliasReg]) {
424 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
425 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
429 // If we're still willing to consider this register, note the reference.
430 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
431 RegRefs.insert(std::make_pair(Reg, &MO));
434 // Determine AntiDepReg's register class, if it is live and is
435 // consistently used within a single class.
436 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
437 assert((AntiDepReg == 0 || RC != NULL) &&
438 "Register should be live if it's causing an anti-dependence!");
439 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
442 // Look for a suitable register to use to break the anti-depenence.
444 // TODO: Instead of picking the first free register, consider which might
446 if (AntiDepReg != 0) {
447 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(*MF),
448 RE = RC->allocation_order_end(*MF); R != RE; ++R) {
449 unsigned NewReg = *R;
450 // Don't replace a register with itself.
451 if (NewReg == AntiDepReg) continue;
452 // Don't replace a register with one that was recently used to repair
453 // an anti-dependence with this AntiDepReg, because that would
454 // re-introduce that anti-dependence.
455 if (NewReg == LastNewReg[AntiDepReg]) continue;
456 // If NewReg is dead and NewReg's most recent def is not before
457 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
458 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
459 "Kill and Def maps aren't consistent for AntiDepReg!");
460 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
461 "Kill and Def maps aren't consistent for NewReg!");
462 if (KillIndices[NewReg] == ~0u &&
463 Classes[NewReg] != reinterpret_cast<TargetRegisterClass *>(-1) &&
464 KillIndices[AntiDepReg] <= DefIndices[NewReg]) {
465 DOUT << "Breaking anti-dependence edge on "
466 << TRI->getName(AntiDepReg)
467 << " with " << RegRefs.count(AntiDepReg) << " references"
468 << " using " << TRI->getName(NewReg) << "!\n";
470 // Update the references to the old register to refer to the new
472 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
473 std::multimap<unsigned, MachineOperand *>::iterator>
474 Range = RegRefs.equal_range(AntiDepReg);
475 for (std::multimap<unsigned, MachineOperand *>::iterator
476 Q = Range.first, QE = Range.second; Q != QE; ++Q)
477 Q->second->setReg(NewReg);
479 // We just went back in time and modified history; the
480 // liveness information for the anti-depenence reg is now
481 // inconsistent. Set the state as if it were dead.
482 Classes[NewReg] = Classes[AntiDepReg];
483 DefIndices[NewReg] = DefIndices[AntiDepReg];
484 KillIndices[NewReg] = KillIndices[AntiDepReg];
486 Classes[AntiDepReg] = 0;
487 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
488 KillIndices[AntiDepReg] = ~0u;
490 RegRefs.erase(AntiDepReg);
492 LastNewReg[AntiDepReg] = NewReg;
499 // Proceding upwards, registers that are defed but not used in this
500 // instruction are now dead.
501 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
502 MachineOperand &MO = MI->getOperand(i);
503 if (!MO.isReg()) continue;
504 unsigned Reg = MO.getReg();
505 if (Reg == 0) continue;
506 if (!MO.isDef()) continue;
507 // Ignore two-addr defs.
508 if (MI->isRegReDefinedByTwoAddr(i)) continue;
510 DefIndices[Reg] = Count;
511 KillIndices[Reg] = ~0u;
514 // Repeat, for all subregs.
515 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
517 unsigned SubregReg = *Subreg;
518 DefIndices[SubregReg] = Count;
519 KillIndices[SubregReg] = ~0u;
520 Classes[SubregReg] = 0;
521 RegRefs.erase(SubregReg);
523 // Conservatively mark super-registers as unusable.
524 for (const unsigned *Super = TRI->getSuperRegisters(Reg);
526 unsigned SuperReg = *Super;
527 Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
530 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
531 MachineOperand &MO = MI->getOperand(i);
532 if (!MO.isReg()) continue;
533 unsigned Reg = MO.getReg();
534 if (Reg == 0) continue;
535 if (!MO.isUse()) continue;
537 const TargetRegisterClass *NewRC =
538 getInstrOperandRegClass(TRI, TII, MI->getDesc(), i);
540 // For now, only allow the register to be changed if its register
541 // class is consistent across all uses.
542 if (!Classes[Reg] && NewRC)
543 Classes[Reg] = NewRC;
544 else if (!NewRC || Classes[Reg] != NewRC)
545 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
547 RegRefs.insert(std::make_pair(Reg, &MO));
549 // It wasn't previously live but now it is, this is a kill.
550 if (KillIndices[Reg] == ~0u) {
551 KillIndices[Reg] = Count;
552 DefIndices[Reg] = ~0u;
554 // Repeat, for all aliases.
555 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
556 unsigned AliasReg = *Alias;
557 if (KillIndices[AliasReg] == ~0u) {
558 KillIndices[AliasReg] = Count;
559 DefIndices[AliasReg] = ~0u;
564 assert(Count == ~0u && "Count mismatch!");
569 //===----------------------------------------------------------------------===//
570 // Top-Down Scheduling
571 //===----------------------------------------------------------------------===//
573 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
574 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
575 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
576 SUnit *SuccSU = SuccEdge->getSUnit();
577 --SuccSU->NumPredsLeft;
580 if (SuccSU->NumPredsLeft < 0) {
581 cerr << "*** Scheduling failed! ***\n";
583 cerr << " has been released too many times!\n";
588 // Compute how many cycles it will be before this actually becomes
589 // available. This is the max of the start time of all predecessors plus
591 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
593 if (SuccSU->NumPredsLeft == 0) {
594 PendingQueue.push_back(SuccSU);
598 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
599 /// count of its successors. If a successor pending count is zero, add it to
600 /// the Available queue.
601 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
602 DOUT << "*** Scheduling [" << CurCycle << "]: ";
603 DEBUG(SU->dump(this));
605 Sequence.push_back(SU);
606 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
607 SU->setDepthToAtLeast(CurCycle);
609 // Top down: release successors.
610 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
612 ReleaseSucc(SU, &*I);
614 SU->isScheduled = true;
615 AvailableQueue.ScheduledNode(SU);
618 /// ListScheduleTopDown - The main loop of list scheduling for top-down
620 void SchedulePostRATDList::ListScheduleTopDown() {
621 unsigned CurCycle = 0;
623 // All leaves to Available queue.
624 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
625 // It is available if it has no predecessors.
626 if (SUnits[i].Preds.empty()) {
627 AvailableQueue.push(&SUnits[i]);
628 SUnits[i].isAvailable = true;
632 // While Available queue is not empty, grab the node with the highest
633 // priority. If it is not ready put it back. Schedule the node.
634 Sequence.reserve(SUnits.size());
635 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
636 // Check to see if any of the pending instructions are ready to issue. If
637 // so, add them to the available queue.
638 unsigned MinDepth = ~0u;
639 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
640 if (PendingQueue[i]->getDepth() <= CurCycle) {
641 AvailableQueue.push(PendingQueue[i]);
642 PendingQueue[i]->isAvailable = true;
643 PendingQueue[i] = PendingQueue.back();
644 PendingQueue.pop_back();
646 } else if (PendingQueue[i]->getDepth() < MinDepth)
647 MinDepth = PendingQueue[i]->getDepth();
650 // If there are no instructions available, don't try to issue anything.
651 if (AvailableQueue.empty()) {
652 CurCycle = MinDepth != ~0u ? MinDepth : CurCycle + 1;
656 SUnit *FoundSUnit = AvailableQueue.pop();
658 // If we found a node to schedule, do it now.
660 ScheduleNodeTopDown(FoundSUnit, CurCycle);
662 // If this is a pseudo-op node, we don't want to increment the current
664 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
667 // Otherwise, we have a pipeline stall, but no other problem, just advance
668 // the current cycle and try again.
669 DOUT << "*** Advancing cycle, no work to do\n";
676 VerifySchedule(/*isBottomUp=*/false);
680 //===----------------------------------------------------------------------===//
681 // Public Constructor Functions
682 //===----------------------------------------------------------------------===//
684 FunctionPass *llvm::createPostRAScheduler() {
685 return new PostRAScheduler();