1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "ExactHazardRecognizer.h"
23 #include "SimpleHazardRecognizer.h"
24 #include "ScheduleDAGInstrs.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/CodeGen/LatencyPriorityQueue.h"
27 #include "llvm/CodeGen/SchedulerRegistry.h"
28 #include "llvm/CodeGen/MachineDominators.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineLoopInfo.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
33 #include "llvm/Target/TargetLowering.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetRegisterInfo.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/ADT/Statistic.h"
46 STATISTIC(NumNoops, "Number of noops inserted");
47 STATISTIC(NumStalls, "Number of pipeline stalls");
50 EnableAntiDepBreaking("break-anti-dependencies",
51 cl::desc("Break post-RA scheduling anti-dependencies"),
52 cl::init(true), cl::Hidden);
55 EnablePostRAHazardAvoidance("avoid-hazards",
56 cl::desc("Enable exact hazard avoidance"),
57 cl::init(false), cl::Hidden);
60 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
63 PostRAScheduler() : MachineFunctionPass(&ID) {}
65 void getAnalysisUsage(AnalysisUsage &AU) const {
67 AU.addRequired<MachineDominatorTree>();
68 AU.addPreserved<MachineDominatorTree>();
69 AU.addRequired<MachineLoopInfo>();
70 AU.addPreserved<MachineLoopInfo>();
71 MachineFunctionPass::getAnalysisUsage(AU);
74 const char *getPassName() const {
75 return "Post RA top-down list latency scheduler";
78 bool runOnMachineFunction(MachineFunction &Fn);
80 char PostRAScheduler::ID = 0;
82 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
83 /// AvailableQueue - The priority queue to use for the available SUnits.
85 LatencyPriorityQueue AvailableQueue;
87 /// PendingQueue - This contains all of the instructions whose operands have
88 /// been issued, but their results are not ready yet (due to the latency of
89 /// the operation). Once the operands becomes available, the instruction is
90 /// added to the AvailableQueue.
91 std::vector<SUnit*> PendingQueue;
93 /// Topo - A topological ordering for SUnits.
94 ScheduleDAGTopologicalSort Topo;
96 /// AllocatableSet - The set of allocatable registers.
97 /// We'll be ignoring anti-dependencies on non-allocatable registers,
98 /// because they may not be safe to break.
99 const BitVector AllocatableSet;
101 /// HazardRec - The hazard recognizer to use.
102 ScheduleHazardRecognizer *HazardRec;
104 /// Classes - For live regs that are only used in one register class in a
105 /// live range, the register class. If the register is not live, the
106 /// corresponding value is null. If the register is live but used in
107 /// multiple register classes, the corresponding value is -1 casted to a
109 const TargetRegisterClass *
110 Classes[TargetRegisterInfo::FirstVirtualRegister];
112 /// RegRegs - Map registers to all their references within a live range.
113 std::multimap<unsigned, MachineOperand *> RegRefs;
115 /// The index of the most recent kill (proceding bottom-up), or ~0u if
116 /// the register is not live.
117 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
119 /// The index of the most recent complete def (proceding bottom up), or ~0u
120 /// if the register is live.
121 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
124 SchedulePostRATDList(MachineFunction &MF,
125 const MachineLoopInfo &MLI,
126 const MachineDominatorTree &MDT,
127 ScheduleHazardRecognizer *HR)
128 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
129 AllocatableSet(TRI->getAllocatableSet(MF)),
132 ~SchedulePostRATDList() {
136 /// StartBlock - Initialize register live-range state for scheduling in
139 void StartBlock(MachineBasicBlock *BB);
141 /// Schedule - Schedule the instruction range using list scheduling.
145 /// FixupKills - Fix register kill flags that have been made
146 /// invalid due to scheduling
148 void FixupKills(MachineBasicBlock *MBB);
150 /// Observe - Update liveness information to account for the current
151 /// instruction, which will not be scheduled.
153 void Observe(MachineInstr *MI, unsigned Count);
155 /// FinishBlock - Clean up register live-range state.
159 /// GenerateLivenessForKills - If true then generate Def/Kill
160 /// information for use in updating register kill. If false then
161 /// generate Def/Kill information for anti-dependence breaking.
162 bool GenerateLivenessForKills;
165 void PrescanInstruction(MachineInstr *MI);
166 void ScanInstruction(MachineInstr *MI, unsigned Count);
167 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
168 void ReleaseSuccessors(SUnit *SU);
169 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
170 void ListScheduleTopDown();
171 bool BreakAntiDependencies();
172 unsigned findSuitableFreeRegister(unsigned AntiDepReg,
174 const TargetRegisterClass *);
178 /// isSchedulingBoundary - Test if the given instruction should be
179 /// considered a scheduling boundary. This primarily includes labels
182 static bool isSchedulingBoundary(const MachineInstr *MI,
183 const MachineFunction &MF) {
184 // Terminators and labels can't be scheduled around.
185 if (MI->getDesc().isTerminator() || MI->isLabel())
188 // Don't attempt to schedule around any instruction that modifies
189 // a stack-oriented pointer, as it's unlikely to be profitable. This
190 // saves compile time, because it doesn't require every single
191 // stack slot reference to depend on the instruction that does the
193 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
194 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
200 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
201 DEBUG(errs() << "PostRAScheduler\n");
203 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
204 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
205 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
206 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
207 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
208 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
210 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR);
212 // Loop over all of the basic blocks
213 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
214 MBB != MBBe; ++MBB) {
215 // Initialize register live-range state for scheduling in this block.
216 Scheduler.GenerateLivenessForKills = false;
217 Scheduler.StartBlock(MBB);
219 // Schedule each sequence of instructions not interrupted by a label
220 // or anything else that effectively needs to shut down scheduling.
221 MachineBasicBlock::iterator Current = MBB->end();
222 unsigned Count = MBB->size(), CurrentCount = Count;
223 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
224 MachineInstr *MI = prior(I);
225 if (isSchedulingBoundary(MI, Fn)) {
226 Scheduler.Run(MBB, I, Current, CurrentCount);
227 Scheduler.EmitSchedule();
229 CurrentCount = Count - 1;
230 Scheduler.Observe(MI, CurrentCount);
235 assert(Count == 0 && "Instruction count mismatch!");
236 assert((MBB->begin() == Current || CurrentCount != 0) &&
237 "Instruction count mismatch!");
238 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
239 Scheduler.EmitSchedule();
241 // Clean up register live-range state.
242 Scheduler.FinishBlock();
244 // Initialize register live-range state again and update register kills
245 Scheduler.GenerateLivenessForKills = true;
246 Scheduler.StartBlock(MBB);
247 Scheduler.FixupKills(MBB);
248 Scheduler.FinishBlock();
254 /// StartBlock - Initialize register live-range state for scheduling in
257 void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
258 // Call the superclass.
259 ScheduleDAGInstrs::StartBlock(BB);
261 // Reset the hazard recognizer.
264 // Clear out the register class data.
265 std::fill(Classes, array_endof(Classes),
266 static_cast<const TargetRegisterClass *>(0));
268 // Initialize the indices to indicate that no registers are live.
269 std::fill(KillIndices, array_endof(KillIndices), ~0u);
270 std::fill(DefIndices, array_endof(DefIndices), BB->size());
272 // Determine the live-out physregs for this block.
273 if (!BB->empty() && BB->back().getDesc().isReturn())
274 // In a return block, examine the function live-out regs.
275 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
276 E = MRI.liveout_end(); I != E; ++I) {
278 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
279 KillIndices[Reg] = BB->size();
280 DefIndices[Reg] = ~0u;
281 // Repeat, for all aliases.
282 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
283 unsigned AliasReg = *Alias;
284 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
285 KillIndices[AliasReg] = BB->size();
286 DefIndices[AliasReg] = ~0u;
290 // In a non-return block, examine the live-in regs of all successors.
291 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
292 SE = BB->succ_end(); SI != SE; ++SI)
293 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
294 E = (*SI)->livein_end(); I != E; ++I) {
296 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
297 KillIndices[Reg] = BB->size();
298 DefIndices[Reg] = ~0u;
299 // Repeat, for all aliases.
300 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
301 unsigned AliasReg = *Alias;
302 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
303 KillIndices[AliasReg] = BB->size();
304 DefIndices[AliasReg] = ~0u;
308 if (!GenerateLivenessForKills) {
309 // Consider callee-saved registers as live-out, since we're running after
310 // prologue/epilogue insertion so there's no way to add additional
313 // TODO: there is a new method
314 // MachineFrameInfo::getPristineRegs(MBB). It gives you a list of
315 // CSRs that have not been saved when entering the MBB. The
316 // remaining CSRs have been saved and can be treated like call
317 // clobbered registers.
318 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
320 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
321 KillIndices[Reg] = BB->size();
322 DefIndices[Reg] = ~0u;
323 // Repeat, for all aliases.
324 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
325 unsigned AliasReg = *Alias;
326 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
327 KillIndices[AliasReg] = BB->size();
328 DefIndices[AliasReg] = ~0u;
334 /// Schedule - Schedule the instruction range using list scheduling.
336 void SchedulePostRATDList::Schedule() {
337 DEBUG(errs() << "********** List Scheduling **********\n");
339 // Build the scheduling graph.
342 if (EnableAntiDepBreaking) {
343 if (BreakAntiDependencies()) {
344 // We made changes. Update the dependency graph.
345 // Theoretically we could update the graph in place:
346 // When a live range is changed to use a different register, remove
347 // the def's anti-dependence *and* output-dependence edges due to
348 // that register, and add new anti-dependence and output-dependence
349 // edges based on the next live range of the register.
357 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
358 SUnits[su].dumpAll(this));
360 AvailableQueue.initNodes(SUnits);
362 ListScheduleTopDown();
364 AvailableQueue.releaseState();
367 /// Observe - Update liveness information to account for the current
368 /// instruction, which will not be scheduled.
370 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
371 assert(Count < InsertPosIndex && "Instruction index out of expected range!");
373 // Any register which was defined within the previous scheduling region
374 // may have been rescheduled and its lifetime may overlap with registers
375 // in ways not reflected in our current liveness state. For each such
376 // register, adjust the liveness state to be conservatively correct.
377 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg)
378 if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
379 assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
380 // Mark this register to be non-renamable.
381 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
382 // Move the def index to the end of the previous region, to reflect
383 // that the def could theoretically have been scheduled at the end.
384 DefIndices[Reg] = InsertPosIndex;
387 PrescanInstruction(MI);
388 ScanInstruction(MI, Count);
391 /// FinishBlock - Clean up register live-range state.
393 void SchedulePostRATDList::FinishBlock() {
396 // Call the superclass.
397 ScheduleDAGInstrs::FinishBlock();
400 /// CriticalPathStep - Return the next SUnit after SU on the bottom-up
402 static SDep *CriticalPathStep(SUnit *SU) {
404 unsigned NextDepth = 0;
405 // Find the predecessor edge with the greatest depth.
406 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
408 SUnit *PredSU = P->getSUnit();
409 unsigned PredLatency = P->getLatency();
410 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
411 // In the case of a latency tie, prefer an anti-dependency edge over
412 // other types of edges.
413 if (NextDepth < PredTotalLatency ||
414 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
415 NextDepth = PredTotalLatency;
422 void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
423 // Scan the register operands for this instruction and update
424 // Classes and RegRefs.
425 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
426 MachineOperand &MO = MI->getOperand(i);
427 if (!MO.isReg()) continue;
428 unsigned Reg = MO.getReg();
429 if (Reg == 0) continue;
430 const TargetRegisterClass *NewRC = 0;
432 if (i < MI->getDesc().getNumOperands())
433 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
435 // For now, only allow the register to be changed if its register
436 // class is consistent across all uses.
437 if (!Classes[Reg] && NewRC)
438 Classes[Reg] = NewRC;
439 else if (!NewRC || Classes[Reg] != NewRC)
440 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
442 // Now check for aliases.
443 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
444 // If an alias of the reg is used during the live range, give up.
445 // Note that this allows us to skip checking if AntiDepReg
446 // overlaps with any of the aliases, among other things.
447 unsigned AliasReg = *Alias;
448 if (Classes[AliasReg]) {
449 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
450 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
454 // If we're still willing to consider this register, note the reference.
455 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
456 RegRefs.insert(std::make_pair(Reg, &MO));
460 void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
463 // Proceding upwards, registers that are defed but not used in this
464 // instruction are now dead.
465 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
466 MachineOperand &MO = MI->getOperand(i);
467 if (!MO.isReg()) continue;
468 unsigned Reg = MO.getReg();
469 if (Reg == 0) continue;
470 if (!MO.isDef()) continue;
471 // Ignore two-addr defs.
472 if (MI->isRegTiedToUseOperand(i)) continue;
474 DefIndices[Reg] = Count;
475 KillIndices[Reg] = ~0u;
476 assert(((KillIndices[Reg] == ~0u) !=
477 (DefIndices[Reg] == ~0u)) &&
478 "Kill and Def maps aren't consistent for Reg!");
481 // Repeat, for all subregs.
482 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
484 unsigned SubregReg = *Subreg;
485 DefIndices[SubregReg] = Count;
486 KillIndices[SubregReg] = ~0u;
487 Classes[SubregReg] = 0;
488 RegRefs.erase(SubregReg);
490 // Conservatively mark super-registers as unusable.
491 for (const unsigned *Super = TRI->getSuperRegisters(Reg);
493 unsigned SuperReg = *Super;
494 Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
497 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
498 MachineOperand &MO = MI->getOperand(i);
499 if (!MO.isReg()) continue;
500 unsigned Reg = MO.getReg();
501 if (Reg == 0) continue;
502 if (!MO.isUse()) continue;
504 const TargetRegisterClass *NewRC = 0;
505 if (i < MI->getDesc().getNumOperands())
506 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
508 // For now, only allow the register to be changed if its register
509 // class is consistent across all uses.
510 if (!Classes[Reg] && NewRC)
511 Classes[Reg] = NewRC;
512 else if (!NewRC || Classes[Reg] != NewRC)
513 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
515 RegRefs.insert(std::make_pair(Reg, &MO));
517 // It wasn't previously live but now it is, this is a kill.
518 if (KillIndices[Reg] == ~0u) {
519 KillIndices[Reg] = Count;
520 DefIndices[Reg] = ~0u;
521 assert(((KillIndices[Reg] == ~0u) !=
522 (DefIndices[Reg] == ~0u)) &&
523 "Kill and Def maps aren't consistent for Reg!");
525 // Repeat, for all aliases.
526 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
527 unsigned AliasReg = *Alias;
528 if (KillIndices[AliasReg] == ~0u) {
529 KillIndices[AliasReg] = Count;
530 DefIndices[AliasReg] = ~0u;
537 SchedulePostRATDList::findSuitableFreeRegister(unsigned AntiDepReg,
539 const TargetRegisterClass *RC) {
540 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
541 RE = RC->allocation_order_end(MF); R != RE; ++R) {
542 unsigned NewReg = *R;
543 // Don't replace a register with itself.
544 if (NewReg == AntiDepReg) continue;
545 // Don't replace a register with one that was recently used to repair
546 // an anti-dependence with this AntiDepReg, because that would
547 // re-introduce that anti-dependence.
548 if (NewReg == LastNewReg) continue;
549 // If NewReg is dead and NewReg's most recent def is not before
550 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
551 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
552 "Kill and Def maps aren't consistent for AntiDepReg!");
553 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
554 "Kill and Def maps aren't consistent for NewReg!");
555 if (KillIndices[NewReg] != ~0u ||
556 Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) ||
557 KillIndices[AntiDepReg] > DefIndices[NewReg])
562 // No registers are free and available!
566 /// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
567 /// of the ScheduleDAG and break them by renaming registers.
569 bool SchedulePostRATDList::BreakAntiDependencies() {
570 // The code below assumes that there is at least one instruction,
571 // so just duck out immediately if the block is empty.
572 if (SUnits.empty()) return false;
574 // Find the node at the bottom of the critical path.
576 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
577 SUnit *SU = &SUnits[i];
578 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
582 DEBUG(errs() << "Critical path has total latency "
583 << (Max->getDepth() + Max->Latency) << "\n");
585 // Track progress along the critical path through the SUnit graph as we walk
587 SUnit *CriticalPathSU = Max;
588 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
590 // Consider this pattern:
599 // There are three anti-dependencies here, and without special care,
600 // we'd break all of them using the same register:
609 // because at each anti-dependence, B is the first register that
610 // isn't A which is free. This re-introduces anti-dependencies
611 // at all but one of the original anti-dependencies that we were
612 // trying to break. To avoid this, keep track of the most recent
613 // register that each register was replaced with, avoid
614 // using it to repair an anti-dependence on the same register.
615 // This lets us produce this:
624 // This still has an anti-dependence on B, but at least it isn't on the
625 // original critical path.
627 // TODO: If we tracked more than one register here, we could potentially
628 // fix that remaining critical edge too. This is a little more involved,
629 // because unlike the most recent register, less recent registers should
630 // still be considered, though only if no other registers are available.
631 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
633 // Attempt to break anti-dependence edges on the critical path. Walk the
634 // instructions from the bottom up, tracking information about liveness
635 // as we go to help determine which registers are available.
636 bool Changed = false;
637 unsigned Count = InsertPosIndex - 1;
638 for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
640 MachineInstr *MI = --I;
642 // After regalloc, IMPLICIT_DEF instructions aren't safe to treat as
643 // dependence-breaking. In the case of an INSERT_SUBREG, the IMPLICIT_DEF
644 // is left behind appearing to clobber the super-register, while the
645 // subregister needs to remain live. So we just ignore them.
646 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
649 // Check if this instruction has a dependence on the critical path that
650 // is an anti-dependence that we may be able to break. If it is, set
651 // AntiDepReg to the non-zero register associated with the anti-dependence.
653 // We limit our attention to the critical path as a heuristic to avoid
654 // breaking anti-dependence edges that aren't going to significantly
655 // impact the overall schedule. There are a limited number of registers
656 // and we want to save them for the important edges.
658 // TODO: Instructions with multiple defs could have multiple
659 // anti-dependencies. The current code here only knows how to break one
660 // edge per instruction. Note that we'd have to be able to break all of
661 // the anti-dependencies in an instruction in order to be effective.
662 unsigned AntiDepReg = 0;
663 if (MI == CriticalPathMI) {
664 if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
665 SUnit *NextSU = Edge->getSUnit();
667 // Only consider anti-dependence edges.
668 if (Edge->getKind() == SDep::Anti) {
669 AntiDepReg = Edge->getReg();
670 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
671 // Don't break anti-dependencies on non-allocatable registers.
672 if (!AllocatableSet.test(AntiDepReg))
675 // If the SUnit has other dependencies on the SUnit that it
676 // anti-depends on, don't bother breaking the anti-dependency
677 // since those edges would prevent such units from being
678 // scheduled past each other regardless.
680 // Also, if there are dependencies on other SUnits with the
681 // same register as the anti-dependency, don't attempt to
683 for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
684 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
685 if (P->getSUnit() == NextSU ?
686 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
687 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
693 CriticalPathSU = NextSU;
694 CriticalPathMI = CriticalPathSU->getInstr();
696 // We've reached the end of the critical path.
702 PrescanInstruction(MI);
704 // If this instruction has a use of AntiDepReg, breaking it
706 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
707 MachineOperand &MO = MI->getOperand(i);
708 if (!MO.isReg()) continue;
709 unsigned Reg = MO.getReg();
710 if (Reg == 0) continue;
711 if (MO.isUse() && AntiDepReg == Reg) {
717 // Determine AntiDepReg's register class, if it is live and is
718 // consistently used within a single class.
719 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
720 assert((AntiDepReg == 0 || RC != NULL) &&
721 "Register should be live if it's causing an anti-dependence!");
722 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
725 // Look for a suitable register to use to break the anti-depenence.
727 // TODO: Instead of picking the first free register, consider which might
729 if (AntiDepReg != 0) {
730 if (unsigned NewReg = findSuitableFreeRegister(AntiDepReg,
731 LastNewReg[AntiDepReg],
733 DEBUG(errs() << "Breaking anti-dependence edge on "
734 << TRI->getName(AntiDepReg)
735 << " with " << RegRefs.count(AntiDepReg) << " references"
736 << " using " << TRI->getName(NewReg) << "!\n");
738 // Update the references to the old register to refer to the new
740 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
741 std::multimap<unsigned, MachineOperand *>::iterator>
742 Range = RegRefs.equal_range(AntiDepReg);
743 for (std::multimap<unsigned, MachineOperand *>::iterator
744 Q = Range.first, QE = Range.second; Q != QE; ++Q)
745 Q->second->setReg(NewReg);
747 // We just went back in time and modified history; the
748 // liveness information for the anti-depenence reg is now
749 // inconsistent. Set the state as if it were dead.
750 Classes[NewReg] = Classes[AntiDepReg];
751 DefIndices[NewReg] = DefIndices[AntiDepReg];
752 KillIndices[NewReg] = KillIndices[AntiDepReg];
753 assert(((KillIndices[NewReg] == ~0u) !=
754 (DefIndices[NewReg] == ~0u)) &&
755 "Kill and Def maps aren't consistent for NewReg!");
757 Classes[AntiDepReg] = 0;
758 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
759 KillIndices[AntiDepReg] = ~0u;
760 assert(((KillIndices[AntiDepReg] == ~0u) !=
761 (DefIndices[AntiDepReg] == ~0u)) &&
762 "Kill and Def maps aren't consistent for AntiDepReg!");
764 RegRefs.erase(AntiDepReg);
766 LastNewReg[AntiDepReg] = NewReg;
770 ScanInstruction(MI, Count);
776 /// FixupKills - Fix the register kill flags, they may have been made
777 /// incorrect by instruction reordering.
779 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
780 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
782 std::set<unsigned> killedRegs;
783 BitVector ReservedRegs = TRI->getReservedRegs(MF);
785 // Examine block from end to start...
786 unsigned Count = MBB->size();
787 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
789 MachineInstr *MI = --I;
791 // Update liveness. Registers that are defed but not used in this
792 // instruction are now dead. Mark register and all subregs as they
793 // are completely defined.
794 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
795 MachineOperand &MO = MI->getOperand(i);
796 if (!MO.isReg()) continue;
797 unsigned Reg = MO.getReg();
798 if (Reg == 0) continue;
799 if (!MO.isDef()) continue;
800 // Ignore two-addr defs.
801 if (MI->isRegTiedToUseOperand(i)) continue;
803 KillIndices[Reg] = ~0u;
805 // Repeat for all subregs.
806 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
808 KillIndices[*Subreg] = ~0u;
812 // Examine all used registers and set kill flag. When a register
813 // is used multiple times we only set the kill flag on the first
816 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
817 MachineOperand &MO = MI->getOperand(i);
818 if (!MO.isReg() || !MO.isUse()) continue;
819 unsigned Reg = MO.getReg();
820 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
823 if (killedRegs.find(Reg) == killedRegs.end()) {
825 // A register is not killed if any subregs are live...
826 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
828 if (KillIndices[*Subreg] != ~0u) {
834 // If subreg is not live, then register is killed if it became
835 // live in this instruction
837 kill = (KillIndices[Reg] == ~0u);
840 if (MO.isKill() != kill) {
842 DEBUG(errs() << "Fixed " << MO << " in ");
846 killedRegs.insert(Reg);
849 // Mark any used register (that is not using undef) and subregs as
851 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
852 MachineOperand &MO = MI->getOperand(i);
853 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
854 unsigned Reg = MO.getReg();
855 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
857 KillIndices[Reg] = Count;
859 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
861 KillIndices[*Subreg] = Count;
867 //===----------------------------------------------------------------------===//
868 // Top-Down Scheduling
869 //===----------------------------------------------------------------------===//
871 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
872 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
873 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
874 SUnit *SuccSU = SuccEdge->getSUnit();
875 --SuccSU->NumPredsLeft;
878 if (SuccSU->NumPredsLeft < 0) {
879 errs() << "*** Scheduling failed! ***\n";
881 errs() << " has been released too many times!\n";
886 // Compute how many cycles it will be before this actually becomes
887 // available. This is the max of the start time of all predecessors plus
889 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
891 // If all the node's predecessors are scheduled, this node is ready
892 // to be scheduled. Ignore the special ExitSU node.
893 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
894 PendingQueue.push_back(SuccSU);
897 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
898 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
899 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
901 ReleaseSucc(SU, &*I);
904 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
905 /// count of its successors. If a successor pending count is zero, add it to
906 /// the Available queue.
907 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
908 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
909 DEBUG(SU->dump(this));
911 Sequence.push_back(SU);
912 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
913 SU->setDepthToAtLeast(CurCycle);
915 ReleaseSuccessors(SU);
916 SU->isScheduled = true;
917 AvailableQueue.ScheduledNode(SU);
920 /// ListScheduleTopDown - The main loop of list scheduling for top-down
922 void SchedulePostRATDList::ListScheduleTopDown() {
923 unsigned CurCycle = 0;
925 // Release any successors of the special Entry node.
926 ReleaseSuccessors(&EntrySU);
928 // All leaves to Available queue.
929 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
930 // It is available if it has no predecessors.
931 if (SUnits[i].Preds.empty()) {
932 AvailableQueue.push(&SUnits[i]);
933 SUnits[i].isAvailable = true;
937 // In any cycle where we can't schedule any instructions, we must
938 // stall or emit a noop, depending on the target.
939 bool CycleInstCnt = 0;
941 // While Available queue is not empty, grab the node with the highest
942 // priority. If it is not ready put it back. Schedule the node.
943 std::vector<SUnit*> NotReady;
944 Sequence.reserve(SUnits.size());
945 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
946 // Check to see if any of the pending instructions are ready to issue. If
947 // so, add them to the available queue.
948 unsigned MinDepth = ~0u;
949 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
950 if (PendingQueue[i]->getDepth() <= CurCycle) {
951 AvailableQueue.push(PendingQueue[i]);
952 PendingQueue[i]->isAvailable = true;
953 PendingQueue[i] = PendingQueue.back();
954 PendingQueue.pop_back();
956 } else if (PendingQueue[i]->getDepth() < MinDepth)
957 MinDepth = PendingQueue[i]->getDepth();
960 DEBUG(errs() << "\n*** Examining Available\n";
961 LatencyPriorityQueue q = AvailableQueue;
964 errs() << "Height " << su->getHeight() << ": ";
968 SUnit *FoundSUnit = 0;
970 bool HasNoopHazards = false;
971 while (!AvailableQueue.empty()) {
972 SUnit *CurSUnit = AvailableQueue.pop();
974 ScheduleHazardRecognizer::HazardType HT =
975 HazardRec->getHazardType(CurSUnit);
976 if (HT == ScheduleHazardRecognizer::NoHazard) {
977 FoundSUnit = CurSUnit;
981 // Remember if this is a noop hazard.
982 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
984 NotReady.push_back(CurSUnit);
987 // Add the nodes that aren't ready back onto the available list.
988 if (!NotReady.empty()) {
989 AvailableQueue.push_all(NotReady);
993 // If we found a node to schedule, do it now.
995 ScheduleNodeTopDown(FoundSUnit, CurCycle);
996 HazardRec->EmitInstruction(FoundSUnit);
999 // If we are using the target-specific hazards, then don't
1000 // advance the cycle time just because we schedule a node. If
1001 // the target allows it we can schedule multiple nodes in the
1003 if (!EnablePostRAHazardAvoidance) {
1004 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
1008 if (CycleInstCnt > 0) {
1009 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
1010 HazardRec->AdvanceCycle();
1011 } else if (!HasNoopHazards) {
1012 // Otherwise, we have a pipeline stall, but no other problem,
1013 // just advance the current cycle and try again.
1014 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
1015 HazardRec->AdvanceCycle();
1018 // Otherwise, we have no instructions to issue and we have instructions
1019 // that will fault if we don't do this right. This is the case for
1020 // processors without pipeline interlocks and other cases.
1021 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
1022 HazardRec->EmitNoop();
1023 Sequence.push_back(0); // NULL here means noop
1033 VerifySchedule(/*isBottomUp=*/false);
1037 //===----------------------------------------------------------------------===//
1038 // Public Constructor Functions
1039 //===----------------------------------------------------------------------===//
1041 FunctionPass *llvm::createPostRAScheduler() {
1042 return new PostRAScheduler();