1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/InlineAsm.h"
21 #include "llvm/CodeGen/SchedulerRegistry.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetLowering.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
38 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
39 STATISTIC(NumUnfolds, "Number of nodes unfolded");
40 STATISTIC(NumDups, "Number of duplicated nodes");
41 STATISTIC(NumPRCopies, "Number of physical register copies");
43 static RegisterScheduler
44 burrListDAGScheduler("list-burr",
45 "Bottom-up register reduction list scheduling",
46 createBURRListDAGScheduler);
47 static RegisterScheduler
48 tdrListrDAGScheduler("list-tdrr",
49 "Top-down register reduction list scheduling",
50 createTDRRListDAGScheduler);
51 static RegisterScheduler
52 sourceListDAGScheduler("source",
53 "Similar to list-burr but schedules in source "
54 "order when possible",
55 createSourceListDAGScheduler);
57 static RegisterScheduler
58 hybridListDAGScheduler("list-hybrid",
59 "Bottom-up register pressure aware list scheduling "
60 "which tries to balance latency and register pressure",
61 createHybridListDAGScheduler);
63 static RegisterScheduler
64 ILPListDAGScheduler("list-ilp",
65 "Bottom-up register pressure aware list scheduling "
66 "which tries to balance ILP and register pressure",
67 createILPListDAGScheduler);
69 static cl::opt<bool> DisableSchedCycles(
70 "disable-sched-cycles", cl::Hidden, cl::init(false),
71 cl::desc("Disable cycle-level precision during preRA scheduling"));
73 // Temporary sched=list-ilp flags until the heuristics are robust.
74 static cl::opt<bool> DisableSchedRegPressure(
75 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
76 cl::desc("Disable regpressure priority in sched=list-ilp"));
77 static cl::opt<bool> DisableSchedLiveUses(
78 "disable-sched-live-uses", cl::Hidden, cl::init(true),
79 cl::desc("Disable live use priority in sched=list-ilp"));
80 static cl::opt<bool> DisableSchedVRegCycle(
81 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
82 cl::desc("Disable virtual register cycle interference checks"));
83 static cl::opt<bool> DisableSchedStalls(
84 "disable-sched-stalls", cl::Hidden, cl::init(true),
85 cl::desc("Disable no-stall priority in sched=list-ilp"));
86 static cl::opt<bool> DisableSchedCriticalPath(
87 "disable-sched-critical-path", cl::Hidden, cl::init(false),
88 cl::desc("Disable critical path priority in sched=list-ilp"));
89 static cl::opt<bool> DisableSchedHeight(
90 "disable-sched-height", cl::Hidden, cl::init(false),
91 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
93 static cl::opt<int> MaxReorderWindow(
94 "max-sched-reorder", cl::Hidden, cl::init(6),
95 cl::desc("Number of instructions to allow ahead of the critical path "
96 "in sched=list-ilp"));
98 static cl::opt<unsigned> AvgIPC(
99 "sched-avg-ipc", cl::Hidden, cl::init(1),
100 cl::desc("Average inst/cycle whan no target itinerary exists."));
104 // For sched=list-ilp, Count the number of times each factor comes into play.
105 enum { FactPressureDiff, FactRegUses, FactStall, FactHeight, FactDepth,
106 FactStatic, FactOther, NumFactors };
108 static const char *FactorName[NumFactors] =
109 {"PressureDiff", "RegUses", "Stall", "Height", "Depth","Static", "Other"};
110 static int FactorCount[NumFactors];
114 //===----------------------------------------------------------------------===//
115 /// ScheduleDAGRRList - The actual register reduction list scheduler
116 /// implementation. This supports both top-down and bottom-up scheduling.
118 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
120 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
124 /// NeedLatency - True if the scheduler will make use of latency information.
128 /// AvailableQueue - The priority queue to use for the available SUnits.
129 SchedulingPriorityQueue *AvailableQueue;
131 /// PendingQueue - This contains all of the instructions whose operands have
132 /// been issued, but their results are not ready yet (due to the latency of
133 /// the operation). Once the operands becomes available, the instruction is
134 /// added to the AvailableQueue.
135 std::vector<SUnit*> PendingQueue;
137 /// HazardRec - The hazard recognizer to use.
138 ScheduleHazardRecognizer *HazardRec;
140 /// CurCycle - The current scheduler state corresponds to this cycle.
143 /// MinAvailableCycle - Cycle of the soonest available instruction.
144 unsigned MinAvailableCycle;
146 /// IssueCount - Count instructions issued in this cycle
147 /// Currently valid only for bottom-up scheduling.
150 /// LiveRegDefs - A set of physical registers and their definition
151 /// that are "live". These nodes must be scheduled before any other nodes that
152 /// modifies the registers can be scheduled.
153 unsigned NumLiveRegs;
154 std::vector<SUnit*> LiveRegDefs;
155 std::vector<SUnit*> LiveRegGens;
157 /// Topo - A topological ordering for SUnits which permits fast IsReachable
158 /// and similar queries.
159 ScheduleDAGTopologicalSort Topo;
162 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
163 SchedulingPriorityQueue *availqueue,
164 CodeGenOpt::Level OptLevel)
165 : ScheduleDAGSDNodes(mf), isBottomUp(availqueue->isBottomUp()),
166 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
169 const TargetMachine &tm = mf.getTarget();
170 if (DisableSchedCycles || !NeedLatency)
171 HazardRec = new ScheduleHazardRecognizer();
173 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
176 ~ScheduleDAGRRList() {
178 delete AvailableQueue;
183 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
185 /// IsReachable - Checks if SU is reachable from TargetSU.
186 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
187 return Topo.IsReachable(SU, TargetSU);
190 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
192 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
193 return Topo.WillCreateCycle(SU, TargetSU);
196 /// AddPred - adds a predecessor edge to SUnit SU.
197 /// This returns true if this is a new predecessor.
198 /// Updates the topological ordering if required.
199 void AddPred(SUnit *SU, const SDep &D) {
200 Topo.AddPred(SU, D.getSUnit());
204 /// RemovePred - removes a predecessor edge from SUnit SU.
205 /// This returns true if an edge was removed.
206 /// Updates the topological ordering if required.
207 void RemovePred(SUnit *SU, const SDep &D) {
208 Topo.RemovePred(SU, D.getSUnit());
213 bool isReady(SUnit *SU) {
214 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
215 AvailableQueue->isReady(SU);
218 void ReleasePred(SUnit *SU, const SDep *PredEdge);
219 void ReleasePredecessors(SUnit *SU);
220 void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
221 void ReleaseSuccessors(SUnit *SU);
222 void ReleasePending();
223 void AdvanceToCycle(unsigned NextCycle);
224 void AdvancePastStalls(SUnit *SU);
225 void EmitNode(SUnit *SU);
226 void ScheduleNodeBottomUp(SUnit*);
227 void CapturePred(SDep *PredEdge);
228 void UnscheduleNodeBottomUp(SUnit*);
229 void RestoreHazardCheckerBottomUp();
230 void BacktrackBottomUp(SUnit*, SUnit*);
231 SUnit *CopyAndMoveSuccessors(SUnit*);
232 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
233 const TargetRegisterClass*,
234 const TargetRegisterClass*,
235 SmallVector<SUnit*, 2>&);
236 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
238 SUnit *PickNodeToScheduleBottomUp();
239 void ListScheduleBottomUp();
241 void ScheduleNodeTopDown(SUnit*);
242 void ListScheduleTopDown();
245 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
246 /// Updates the topological ordering if required.
247 SUnit *CreateNewSUnit(SDNode *N) {
248 unsigned NumSUnits = SUnits.size();
249 SUnit *NewNode = NewSUnit(N);
250 // Update the topological ordering.
251 if (NewNode->NodeNum >= NumSUnits)
252 Topo.InitDAGTopologicalSorting();
256 /// CreateClone - Creates a new SUnit from an existing one.
257 /// Updates the topological ordering if required.
258 SUnit *CreateClone(SUnit *N) {
259 unsigned NumSUnits = SUnits.size();
260 SUnit *NewNode = Clone(N);
261 // Update the topological ordering.
262 if (NewNode->NodeNum >= NumSUnits)
263 Topo.InitDAGTopologicalSorting();
267 /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
268 /// need actual latency information but the hybrid scheduler does.
269 bool ForceUnitLatencies() const {
273 } // end anonymous namespace
276 /// Schedule - Schedule the DAG using list scheduling.
277 void ScheduleDAGRRList::Schedule() {
279 << "********** List Scheduling BB#" << BB->getNumber()
280 << " '" << BB->getName() << "' **********\n");
282 for (int i = 0; i < NumFactors; ++i) {
289 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
291 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
292 LiveRegGens.resize(TRI->getNumRegs(), NULL);
294 // Build the scheduling graph.
295 BuildSchedGraph(NULL);
297 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
298 SUnits[su].dumpAll(this));
299 Topo.InitDAGTopologicalSorting();
301 AvailableQueue->initNodes(SUnits);
305 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
307 ListScheduleBottomUp();
309 ListScheduleTopDown();
312 for (int i = 0; i < NumFactors; ++i) {
313 DEBUG(dbgs() << FactorName[i] << "\t" << FactorCount[i] << "\n");
316 AvailableQueue->releaseState();
319 //===----------------------------------------------------------------------===//
320 // Bottom-Up Scheduling
321 //===----------------------------------------------------------------------===//
323 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
324 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
325 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
326 SUnit *PredSU = PredEdge->getSUnit();
329 if (PredSU->NumSuccsLeft == 0) {
330 dbgs() << "*** Scheduling failed! ***\n";
332 dbgs() << " has been released too many times!\n";
336 --PredSU->NumSuccsLeft;
338 if (!ForceUnitLatencies()) {
339 // Updating predecessor's height. This is now the cycle when the
340 // predecessor can be scheduled without causing a pipeline stall.
341 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
344 // If all the node's successors are scheduled, this node is ready
345 // to be scheduled. Ignore the special EntrySU node.
346 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
347 PredSU->isAvailable = true;
349 unsigned Height = PredSU->getHeight();
350 if (Height < MinAvailableCycle)
351 MinAvailableCycle = Height;
353 if (isReady(PredSU)) {
354 AvailableQueue->push(PredSU);
356 // CapturePred and others may have left the node in the pending queue, avoid
358 else if (!PredSU->isPending) {
359 PredSU->isPending = true;
360 PendingQueue.push_back(PredSU);
365 /// Call ReleasePred for each predecessor, then update register live def/gen.
366 /// Always update LiveRegDefs for a register dependence even if the current SU
367 /// also defines the register. This effectively create one large live range
368 /// across a sequence of two-address node. This is important because the
369 /// entire chain must be scheduled together. Example:
372 /// flags = (2) addc flags
373 /// flags = (1) addc flags
377 /// LiveRegDefs[flags] = 3
378 /// LiveRegGens[flags] = 1
380 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
381 /// interference on flags.
382 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
383 // Bottom up: release predecessors
384 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
386 ReleasePred(SU, &*I);
387 if (I->isAssignedRegDep()) {
388 // This is a physical register dependency and it's impossible or
389 // expensive to copy the register. Make sure nothing that can
390 // clobber the register is scheduled between the predecessor and
392 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
393 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
394 "interference on register dependence");
395 LiveRegDefs[I->getReg()] = I->getSUnit();
396 if (!LiveRegGens[I->getReg()]) {
398 LiveRegGens[I->getReg()] = SU;
404 /// Check to see if any of the pending instructions are ready to issue. If
405 /// so, add them to the available queue.
406 void ScheduleDAGRRList::ReleasePending() {
407 if (DisableSchedCycles) {
408 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
412 // If the available queue is empty, it is safe to reset MinAvailableCycle.
413 if (AvailableQueue->empty())
414 MinAvailableCycle = UINT_MAX;
416 // Check to see if any of the pending instructions are ready to issue. If
417 // so, add them to the available queue.
418 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
419 unsigned ReadyCycle =
420 isBottomUp ? PendingQueue[i]->getHeight() : PendingQueue[i]->getDepth();
421 if (ReadyCycle < MinAvailableCycle)
422 MinAvailableCycle = ReadyCycle;
424 if (PendingQueue[i]->isAvailable) {
425 if (!isReady(PendingQueue[i]))
427 AvailableQueue->push(PendingQueue[i]);
429 PendingQueue[i]->isPending = false;
430 PendingQueue[i] = PendingQueue.back();
431 PendingQueue.pop_back();
436 /// Move the scheduler state forward by the specified number of Cycles.
437 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
438 if (NextCycle <= CurCycle)
442 AvailableQueue->setCurCycle(NextCycle);
443 if (!HazardRec->isEnabled()) {
444 // Bypass lots of virtual calls in case of long latency.
445 CurCycle = NextCycle;
448 for (; CurCycle != NextCycle; ++CurCycle) {
450 HazardRec->RecedeCycle();
452 HazardRec->AdvanceCycle();
455 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
456 // available Q to release pending nodes at least once before popping.
460 /// Move the scheduler state forward until the specified node's dependents are
461 /// ready and can be scheduled with no resource conflicts.
462 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
463 if (DisableSchedCycles)
466 // FIXME: Nodes such as CopyFromReg probably should not advance the current
467 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
468 // has predecessors the cycle will be advanced when they are scheduled.
469 // But given the crude nature of modeling latency though such nodes, we
470 // currently need to treat these nodes like real instructions.
471 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
473 unsigned ReadyCycle = isBottomUp ? SU->getHeight() : SU->getDepth();
475 // Bump CurCycle to account for latency. We assume the latency of other
476 // available instructions may be hidden by the stall (not a full pipe stall).
477 // This updates the hazard recognizer's cycle before reserving resources for
479 AdvanceToCycle(ReadyCycle);
481 // Calls are scheduled in their preceding cycle, so don't conflict with
482 // hazards from instructions after the call. EmitNode will reset the
483 // scoreboard state before emitting the call.
484 if (isBottomUp && SU->isCall)
487 // FIXME: For resource conflicts in very long non-pipelined stages, we
488 // should probably skip ahead here to avoid useless scoreboard checks.
491 ScheduleHazardRecognizer::HazardType HT =
492 HazardRec->getHazardType(SU, isBottomUp ? -Stalls : Stalls);
494 if (HT == ScheduleHazardRecognizer::NoHazard)
499 AdvanceToCycle(CurCycle + Stalls);
502 /// Record this SUnit in the HazardRecognizer.
503 /// Does not update CurCycle.
504 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
505 if (!HazardRec->isEnabled())
508 // Check for phys reg copy.
512 switch (SU->getNode()->getOpcode()) {
514 assert(SU->getNode()->isMachineOpcode() &&
515 "This target-independent node should not be scheduled.");
517 case ISD::MERGE_VALUES:
518 case ISD::TokenFactor:
520 case ISD::CopyFromReg:
522 // Noops don't affect the scoreboard state. Copies are likely to be
526 // For inline asm, clear the pipeline state.
530 if (isBottomUp && SU->isCall) {
531 // Calls are scheduled with their preceding instructions. For bottom-up
532 // scheduling, clear the pipeline state before emitting.
536 HazardRec->EmitInstruction(SU);
538 if (!isBottomUp && SU->isCall) {
543 static void resetVRegCycle(SUnit *SU);
545 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
546 /// count of its predecessors. If a predecessor pending count is zero, add it to
547 /// the Available queue.
548 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
549 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
550 DEBUG(SU->dump(this));
553 if (CurCycle < SU->getHeight())
554 DEBUG(dbgs() << " Height [" << SU->getHeight()
555 << "] pipeline stall!\n");
558 // FIXME: Do not modify node height. It may interfere with
559 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
560 // node its ready cycle can aid heuristics, and after scheduling it can
561 // indicate the scheduled cycle.
562 SU->setHeightToAtLeast(CurCycle);
564 // Reserve resources for the scheduled intruction.
567 Sequence.push_back(SU);
569 AvailableQueue->ScheduledNode(SU);
571 // If HazardRec is disabled, and each inst counts as one cycle, then
572 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
573 // PendingQueue for schedulers that implement HasReadyFilter.
574 if (!HazardRec->isEnabled() && AvgIPC < 2)
575 AdvanceToCycle(CurCycle + 1);
577 // Update liveness of predecessors before successors to avoid treating a
578 // two-address node as a live range def.
579 ReleasePredecessors(SU);
581 // Release all the implicit physical register defs that are live.
582 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
584 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
585 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
586 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
588 LiveRegDefs[I->getReg()] = NULL;
589 LiveRegGens[I->getReg()] = NULL;
595 SU->isScheduled = true;
597 // Conditions under which the scheduler should eagerly advance the cycle:
598 // (1) No available instructions
599 // (2) All pipelines full, so available instructions must have hazards.
601 // If HazardRec is disabled, the cycle was pre-advanced before calling
602 // ReleasePredecessors. In that case, IssueCount should remain 0.
604 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
605 if (HazardRec->isEnabled() || AvgIPC > 1) {
606 if (SU->getNode() && SU->getNode()->isMachineOpcode())
608 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
609 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
610 AdvanceToCycle(CurCycle + 1);
614 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
615 /// unscheduled, incrcease the succ left count of its predecessors. Remove
616 /// them from AvailableQueue if necessary.
617 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
618 SUnit *PredSU = PredEdge->getSUnit();
619 if (PredSU->isAvailable) {
620 PredSU->isAvailable = false;
621 if (!PredSU->isPending)
622 AvailableQueue->remove(PredSU);
625 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
626 ++PredSU->NumSuccsLeft;
629 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
630 /// its predecessor states to reflect the change.
631 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
632 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
633 DEBUG(SU->dump(this));
635 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
638 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
639 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
640 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
641 "Physical register dependency violated?");
643 LiveRegDefs[I->getReg()] = NULL;
644 LiveRegGens[I->getReg()] = NULL;
648 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
650 if (I->isAssignedRegDep()) {
651 // This becomes the nearest def. Note that an earlier def may still be
652 // pending if this is a two-address node.
653 LiveRegDefs[I->getReg()] = SU;
654 if (!LiveRegDefs[I->getReg()]) {
657 if (LiveRegGens[I->getReg()] == NULL ||
658 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
659 LiveRegGens[I->getReg()] = I->getSUnit();
662 if (SU->getHeight() < MinAvailableCycle)
663 MinAvailableCycle = SU->getHeight();
665 SU->setHeightDirty();
666 SU->isScheduled = false;
667 SU->isAvailable = true;
668 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
669 // Don't make available until backtracking is complete.
670 SU->isPending = true;
671 PendingQueue.push_back(SU);
674 AvailableQueue->push(SU);
676 AvailableQueue->UnscheduledNode(SU);
679 /// After backtracking, the hazard checker needs to be restored to a state
680 /// corresponding the the current cycle.
681 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
684 unsigned LookAhead = std::min((unsigned)Sequence.size(),
685 HazardRec->getMaxLookAhead());
689 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
690 unsigned HazardCycle = (*I)->getHeight();
691 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
693 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
694 HazardRec->RecedeCycle();
700 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
701 /// BTCycle in order to schedule a specific node.
702 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
703 SUnit *OldSU = Sequence.back();
706 if (SU->isSucc(OldSU))
707 // Don't try to remove SU from AvailableQueue.
708 SU->isAvailable = false;
709 // FIXME: use ready cycle instead of height
710 CurCycle = OldSU->getHeight();
711 UnscheduleNodeBottomUp(OldSU);
712 AvailableQueue->setCurCycle(CurCycle);
715 OldSU = Sequence.back();
718 assert(!SU->isSucc(OldSU) && "Something is wrong!");
720 RestoreHazardCheckerBottomUp();
727 static bool isOperandOf(const SUnit *SU, SDNode *N) {
728 for (const SDNode *SUNode = SU->getNode(); SUNode;
729 SUNode = SUNode->getGluedNode()) {
730 if (SUNode->isOperandOf(N))
736 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
737 /// successors to the newly created node.
738 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
739 SDNode *N = SU->getNode();
743 if (SU->getNode()->getGluedNode())
747 bool TryUnfold = false;
748 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
749 EVT VT = N->getValueType(i);
752 else if (VT == MVT::Other)
755 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
756 const SDValue &Op = N->getOperand(i);
757 EVT VT = Op.getNode()->getValueType(Op.getResNo());
763 SmallVector<SDNode*, 2> NewNodes;
764 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
767 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
768 assert(NewNodes.size() == 2 && "Expected a load folding node!");
771 SDNode *LoadNode = NewNodes[0];
772 unsigned NumVals = N->getNumValues();
773 unsigned OldNumVals = SU->getNode()->getNumValues();
774 for (unsigned i = 0; i != NumVals; ++i)
775 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
776 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
777 SDValue(LoadNode, 1));
779 // LoadNode may already exist. This can happen when there is another
780 // load from the same location and producing the same type of value
781 // but it has different alignment or volatileness.
782 bool isNewLoad = true;
784 if (LoadNode->getNodeId() != -1) {
785 LoadSU = &SUnits[LoadNode->getNodeId()];
788 LoadSU = CreateNewSUnit(LoadNode);
789 LoadNode->setNodeId(LoadSU->NodeNum);
791 InitNumRegDefsLeft(LoadSU);
792 ComputeLatency(LoadSU);
795 SUnit *NewSU = CreateNewSUnit(N);
796 assert(N->getNodeId() == -1 && "Node already inserted!");
797 N->setNodeId(NewSU->NodeNum);
799 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
800 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
801 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
802 NewSU->isTwoAddress = true;
806 if (TID.isCommutable())
807 NewSU->isCommutable = true;
809 InitNumRegDefsLeft(NewSU);
810 ComputeLatency(NewSU);
812 // Record all the edges to and from the old SU, by category.
813 SmallVector<SDep, 4> ChainPreds;
814 SmallVector<SDep, 4> ChainSuccs;
815 SmallVector<SDep, 4> LoadPreds;
816 SmallVector<SDep, 4> NodePreds;
817 SmallVector<SDep, 4> NodeSuccs;
818 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
821 ChainPreds.push_back(*I);
822 else if (isOperandOf(I->getSUnit(), LoadNode))
823 LoadPreds.push_back(*I);
825 NodePreds.push_back(*I);
827 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
830 ChainSuccs.push_back(*I);
832 NodeSuccs.push_back(*I);
835 // Now assign edges to the newly-created nodes.
836 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
837 const SDep &Pred = ChainPreds[i];
838 RemovePred(SU, Pred);
840 AddPred(LoadSU, Pred);
842 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
843 const SDep &Pred = LoadPreds[i];
844 RemovePred(SU, Pred);
846 AddPred(LoadSU, Pred);
848 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
849 const SDep &Pred = NodePreds[i];
850 RemovePred(SU, Pred);
851 AddPred(NewSU, Pred);
853 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
854 SDep D = NodeSuccs[i];
855 SUnit *SuccDep = D.getSUnit();
857 RemovePred(SuccDep, D);
860 // Balance register pressure.
861 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
862 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
863 --NewSU->NumRegDefsLeft;
865 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
866 SDep D = ChainSuccs[i];
867 SUnit *SuccDep = D.getSUnit();
869 RemovePred(SuccDep, D);
876 // Add a data dependency to reflect that NewSU reads the value defined
878 AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
881 AvailableQueue->addNode(LoadSU);
882 AvailableQueue->addNode(NewSU);
886 if (NewSU->NumSuccsLeft == 0) {
887 NewSU->isAvailable = true;
893 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
894 NewSU = CreateClone(SU);
896 // New SUnit has the exact same predecessors.
897 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
899 if (!I->isArtificial())
902 // Only copy scheduled successors. Cut them from old node's successor
903 // list and move them over.
904 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
905 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
907 if (I->isArtificial())
909 SUnit *SuccSU = I->getSUnit();
910 if (SuccSU->isScheduled) {
915 DelDeps.push_back(std::make_pair(SuccSU, D));
918 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
919 RemovePred(DelDeps[i].first, DelDeps[i].second);
921 AvailableQueue->updateNode(SU);
922 AvailableQueue->addNode(NewSU);
928 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
929 /// scheduled successors of the given SUnit to the last copy.
930 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
931 const TargetRegisterClass *DestRC,
932 const TargetRegisterClass *SrcRC,
933 SmallVector<SUnit*, 2> &Copies) {
934 SUnit *CopyFromSU = CreateNewSUnit(NULL);
935 CopyFromSU->CopySrcRC = SrcRC;
936 CopyFromSU->CopyDstRC = DestRC;
938 SUnit *CopyToSU = CreateNewSUnit(NULL);
939 CopyToSU->CopySrcRC = DestRC;
940 CopyToSU->CopyDstRC = SrcRC;
942 // Only copy scheduled successors. Cut them from old node's successor
943 // list and move them over.
944 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
945 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
947 if (I->isArtificial())
949 SUnit *SuccSU = I->getSUnit();
950 if (SuccSU->isScheduled) {
952 D.setSUnit(CopyToSU);
954 DelDeps.push_back(std::make_pair(SuccSU, *I));
957 // Avoid scheduling the def-side copy before other successors. Otherwise
958 // we could introduce another physreg interference on the copy and
959 // continue inserting copies indefinitely.
960 SDep D(CopyFromSU, SDep::Order, /*Latency=*/0,
961 /*Reg=*/0, /*isNormalMemory=*/false,
962 /*isMustAlias=*/false, /*isArtificial=*/true);
966 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
967 RemovePred(DelDeps[i].first, DelDeps[i].second);
969 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
970 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
972 AvailableQueue->updateNode(SU);
973 AvailableQueue->addNode(CopyFromSU);
974 AvailableQueue->addNode(CopyToSU);
975 Copies.push_back(CopyFromSU);
976 Copies.push_back(CopyToSU);
981 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
982 /// definition of the specified node.
983 /// FIXME: Move to SelectionDAG?
984 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
985 const TargetInstrInfo *TII) {
986 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
987 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
988 unsigned NumRes = TID.getNumDefs();
989 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
994 return N->getValueType(NumRes);
997 /// CheckForLiveRegDef - Return true and update live register vector if the
998 /// specified register def of the specified SUnit clobbers any "live" registers.
999 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1000 std::vector<SUnit*> &LiveRegDefs,
1001 SmallSet<unsigned, 4> &RegAdded,
1002 SmallVector<unsigned, 4> &LRegs,
1003 const TargetRegisterInfo *TRI) {
1004 for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
1006 // Check if Ref is live.
1007 if (!LiveRegDefs[Reg]) continue;
1009 // Allow multiple uses of the same def.
1010 if (LiveRegDefs[Reg] == SU) continue;
1012 // Add Reg to the set of interfering live regs.
1013 if (RegAdded.insert(Reg))
1014 LRegs.push_back(Reg);
1018 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1019 /// scheduling of the given node to satisfy live physical register dependencies.
1020 /// If the specific node is the last one that's available to schedule, do
1021 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1022 bool ScheduleDAGRRList::
1023 DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
1024 if (NumLiveRegs == 0)
1027 SmallSet<unsigned, 4> RegAdded;
1028 // If this node would clobber any "live" register, then it's not ready.
1030 // If SU is the currently live definition of the same register that it uses,
1031 // then we are free to schedule it.
1032 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1034 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1035 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1036 RegAdded, LRegs, TRI);
1039 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1040 if (Node->getOpcode() == ISD::INLINEASM) {
1041 // Inline asm can clobber physical defs.
1042 unsigned NumOps = Node->getNumOperands();
1043 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1044 --NumOps; // Ignore the glue operand.
1046 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1048 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1049 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1051 ++i; // Skip the ID value.
1052 if (InlineAsm::isRegDefKind(Flags) ||
1053 InlineAsm::isRegDefEarlyClobberKind(Flags)) {
1054 // Check for def of register or earlyclobber register.
1055 for (; NumVals; --NumVals, ++i) {
1056 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1057 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1058 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1066 if (!Node->isMachineOpcode())
1068 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
1069 if (!TID.ImplicitDefs)
1071 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg)
1072 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1075 return !LRegs.empty();
1078 /// Return a node that can be scheduled in this cycle. Requirements:
1079 /// (1) Ready: latency has been satisfied
1080 /// (2) No Hazards: resources are available
1081 /// (3) No Interferences: may unschedule to break register interferences.
1082 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1083 SmallVector<SUnit*, 4> Interferences;
1084 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
1086 SUnit *CurSU = AvailableQueue->pop();
1088 SmallVector<unsigned, 4> LRegs;
1089 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1091 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1093 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1094 Interferences.push_back(CurSU);
1095 CurSU = AvailableQueue->pop();
1098 // Add the nodes that aren't ready back onto the available list.
1099 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1100 Interferences[i]->isPending = false;
1101 assert(Interferences[i]->isAvailable && "must still be available");
1102 AvailableQueue->push(Interferences[i]);
1107 // All candidates are delayed due to live physical reg dependencies.
1108 // Try backtracking, code duplication, or inserting cross class copies
1110 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1111 SUnit *TrySU = Interferences[i];
1112 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1114 // Try unscheduling up to the point where it's safe to schedule
1117 unsigned LiveCycle = UINT_MAX;
1118 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1119 unsigned Reg = LRegs[j];
1120 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1121 BtSU = LiveRegGens[Reg];
1122 LiveCycle = BtSU->getHeight();
1125 if (!WillCreateCycle(TrySU, BtSU)) {
1126 BacktrackBottomUp(TrySU, BtSU);
1128 // Force the current node to be scheduled before the node that
1129 // requires the physical reg dep.
1130 if (BtSU->isAvailable) {
1131 BtSU->isAvailable = false;
1132 if (!BtSU->isPending)
1133 AvailableQueue->remove(BtSU);
1135 AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1,
1136 /*Reg=*/0, /*isNormalMemory=*/false,
1137 /*isMustAlias=*/false, /*isArtificial=*/true));
1139 // If one or more successors has been unscheduled, then the current
1140 // node is no longer avaialable. Schedule a successor that's now
1141 // available instead.
1142 if (!TrySU->isAvailable) {
1143 CurSU = AvailableQueue->pop();
1147 TrySU->isPending = false;
1148 Interferences.erase(Interferences.begin()+i);
1155 // Can't backtrack. If it's too expensive to copy the value, then try
1156 // duplicate the nodes that produces these "too expensive to copy"
1157 // values to break the dependency. In case even that doesn't work,
1158 // insert cross class copies.
1159 // If it's not too expensive, i.e. cost != -1, issue copies.
1160 SUnit *TrySU = Interferences[0];
1161 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1162 assert(LRegs.size() == 1 && "Can't handle this yet!");
1163 unsigned Reg = LRegs[0];
1164 SUnit *LRDef = LiveRegDefs[Reg];
1165 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1166 const TargetRegisterClass *RC =
1167 TRI->getMinimalPhysRegClass(Reg, VT);
1168 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1170 // If cross copy register class is the same as RC, then it must be possible
1171 // copy the value directly. Do not try duplicate the def.
1172 // If cross copy register class is not the same as RC, then it's possible to
1173 // copy the value but it require cross register class copies and it is
1175 // If cross copy register class is null, then it's not possible to copy
1176 // the value at all.
1179 NewDef = CopyAndMoveSuccessors(LRDef);
1180 if (!DestRC && !NewDef)
1181 report_fatal_error("Can't handle live physical register dependency!");
1184 // Issue copies, these can be expensive cross register class copies.
1185 SmallVector<SUnit*, 2> Copies;
1186 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1187 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1188 << " to SU #" << Copies.front()->NodeNum << "\n");
1189 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
1190 /*Reg=*/0, /*isNormalMemory=*/false,
1191 /*isMustAlias=*/false,
1192 /*isArtificial=*/true));
1193 NewDef = Copies.back();
1196 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1197 << " to SU #" << TrySU->NodeNum << "\n");
1198 LiveRegDefs[Reg] = NewDef;
1199 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
1200 /*Reg=*/0, /*isNormalMemory=*/false,
1201 /*isMustAlias=*/false,
1202 /*isArtificial=*/true));
1203 TrySU->isAvailable = false;
1207 assert(CurSU && "Unable to resolve live physical register dependencies!");
1209 // Add the nodes that aren't ready back onto the available list.
1210 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1211 Interferences[i]->isPending = false;
1212 // May no longer be available due to backtracking.
1213 if (Interferences[i]->isAvailable) {
1214 AvailableQueue->push(Interferences[i]);
1220 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1222 void ScheduleDAGRRList::ListScheduleBottomUp() {
1223 // Release any predecessors of the special Exit node.
1224 ReleasePredecessors(&ExitSU);
1226 // Add root to Available queue.
1227 if (!SUnits.empty()) {
1228 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1229 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1230 RootSU->isAvailable = true;
1231 AvailableQueue->push(RootSU);
1234 // While Available queue is not empty, grab the node with the highest
1235 // priority. If it is not ready put it back. Schedule the node.
1236 Sequence.reserve(SUnits.size());
1237 while (!AvailableQueue->empty()) {
1238 DEBUG(dbgs() << "\nExamining Available:\n";
1239 AvailableQueue->dump(this));
1241 // Pick the best node to schedule taking all constraints into
1243 SUnit *SU = PickNodeToScheduleBottomUp();
1245 AdvancePastStalls(SU);
1247 ScheduleNodeBottomUp(SU);
1249 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1250 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1251 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1252 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1256 // Reverse the order if it is bottom up.
1257 std::reverse(Sequence.begin(), Sequence.end());
1260 VerifySchedule(isBottomUp);
1264 //===----------------------------------------------------------------------===//
1265 // Top-Down Scheduling
1266 //===----------------------------------------------------------------------===//
1268 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1269 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
1270 void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
1271 SUnit *SuccSU = SuccEdge->getSUnit();
1274 if (SuccSU->NumPredsLeft == 0) {
1275 dbgs() << "*** Scheduling failed! ***\n";
1277 dbgs() << " has been released too many times!\n";
1278 llvm_unreachable(0);
1281 --SuccSU->NumPredsLeft;
1283 // If all the node's predecessors are scheduled, this node is ready
1284 // to be scheduled. Ignore the special ExitSU node.
1285 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
1286 SuccSU->isAvailable = true;
1287 AvailableQueue->push(SuccSU);
1291 void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
1292 // Top down: release successors
1293 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1295 assert(!I->isAssignedRegDep() &&
1296 "The list-tdrr scheduler doesn't yet support physreg dependencies!");
1298 ReleaseSucc(SU, &*I);
1302 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1303 /// count of its successors. If a successor pending count is zero, add it to
1304 /// the Available queue.
1305 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU) {
1306 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
1307 DEBUG(SU->dump(this));
1309 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1310 SU->setDepthToAtLeast(CurCycle);
1311 Sequence.push_back(SU);
1313 ReleaseSuccessors(SU);
1314 SU->isScheduled = true;
1315 AvailableQueue->ScheduledNode(SU);
1318 /// ListScheduleTopDown - The main loop of list scheduling for top-down
1320 void ScheduleDAGRRList::ListScheduleTopDown() {
1321 AvailableQueue->setCurCycle(CurCycle);
1323 // Release any successors of the special Entry node.
1324 ReleaseSuccessors(&EntrySU);
1326 // All leaves to Available queue.
1327 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1328 // It is available if it has no predecessors.
1329 if (SUnits[i].Preds.empty()) {
1330 AvailableQueue->push(&SUnits[i]);
1331 SUnits[i].isAvailable = true;
1335 // While Available queue is not empty, grab the node with the highest
1336 // priority. If it is not ready put it back. Schedule the node.
1337 Sequence.reserve(SUnits.size());
1338 while (!AvailableQueue->empty()) {
1339 SUnit *CurSU = AvailableQueue->pop();
1342 ScheduleNodeTopDown(CurSU);
1344 AvailableQueue->setCurCycle(CurCycle);
1348 VerifySchedule(isBottomUp);
1353 //===----------------------------------------------------------------------===//
1354 // RegReductionPriorityQueue Definition
1355 //===----------------------------------------------------------------------===//
1357 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1358 // to reduce register pressure.
1361 class RegReductionPQBase;
1363 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1364 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1367 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1368 // reduction scheduler.
1369 struct bu_ls_rr_sort : public queue_sort {
1372 HasReadyFilter = false
1375 RegReductionPQBase *SPQ;
1376 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1377 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1379 bool operator()(SUnit* left, SUnit* right) const;
1382 // td_ls_rr_sort - Priority function for top down register pressure reduction
1384 struct td_ls_rr_sort : public queue_sort {
1387 HasReadyFilter = false
1390 RegReductionPQBase *SPQ;
1391 td_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1392 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1394 bool operator()(const SUnit* left, const SUnit* right) const;
1397 // src_ls_rr_sort - Priority function for source order scheduler.
1398 struct src_ls_rr_sort : public queue_sort {
1401 HasReadyFilter = false
1404 RegReductionPQBase *SPQ;
1405 src_ls_rr_sort(RegReductionPQBase *spq)
1407 src_ls_rr_sort(const src_ls_rr_sort &RHS)
1410 bool operator()(SUnit* left, SUnit* right) const;
1413 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1414 struct hybrid_ls_rr_sort : public queue_sort {
1417 HasReadyFilter = false
1420 RegReductionPQBase *SPQ;
1421 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1423 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
1426 bool isReady(SUnit *SU, unsigned CurCycle) const;
1428 bool operator()(SUnit* left, SUnit* right) const;
1431 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1433 struct ilp_ls_rr_sort : public queue_sort {
1436 HasReadyFilter = false
1439 RegReductionPQBase *SPQ;
1440 ilp_ls_rr_sort(RegReductionPQBase *spq)
1442 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
1445 bool isReady(SUnit *SU, unsigned CurCycle) const;
1447 bool operator()(SUnit* left, SUnit* right) const;
1450 class RegReductionPQBase : public SchedulingPriorityQueue {
1452 std::vector<SUnit*> Queue;
1453 unsigned CurQueueId;
1454 bool TracksRegPressure;
1456 // SUnits - The SUnits for the current graph.
1457 std::vector<SUnit> *SUnits;
1459 MachineFunction &MF;
1460 const TargetInstrInfo *TII;
1461 const TargetRegisterInfo *TRI;
1462 const TargetLowering *TLI;
1463 ScheduleDAGRRList *scheduleDAG;
1465 // SethiUllmanNumbers - The SethiUllman number for each node.
1466 std::vector<unsigned> SethiUllmanNumbers;
1468 /// RegPressure - Tracking current reg pressure per register class.
1470 std::vector<unsigned> RegPressure;
1472 /// RegLimit - Tracking the number of allocatable registers per register
1474 std::vector<unsigned> RegLimit;
1477 RegReductionPQBase(MachineFunction &mf,
1478 bool hasReadyFilter,
1480 const TargetInstrInfo *tii,
1481 const TargetRegisterInfo *tri,
1482 const TargetLowering *tli)
1483 : SchedulingPriorityQueue(hasReadyFilter),
1484 CurQueueId(0), TracksRegPressure(tracksrp),
1485 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1486 if (TracksRegPressure) {
1487 unsigned NumRC = TRI->getNumRegClasses();
1488 RegLimit.resize(NumRC);
1489 RegPressure.resize(NumRC);
1490 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1491 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1492 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1493 E = TRI->regclass_end(); I != E; ++I)
1494 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1498 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1499 scheduleDAG = scheduleDag;
1502 ScheduleHazardRecognizer* getHazardRec() {
1503 return scheduleDAG->getHazardRec();
1506 void initNodes(std::vector<SUnit> &sunits);
1508 void addNode(const SUnit *SU);
1510 void updateNode(const SUnit *SU);
1512 void releaseState() {
1514 SethiUllmanNumbers.clear();
1515 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1518 unsigned getNodePriority(const SUnit *SU) const;
1520 unsigned getNodeOrdering(const SUnit *SU) const {
1521 if (!SU->getNode()) return 0;
1523 return scheduleDAG->DAG->GetOrdering(SU->getNode());
1526 bool empty() const { return Queue.empty(); }
1528 void push(SUnit *U) {
1529 assert(!U->NodeQueueId && "Node in the queue already");
1530 U->NodeQueueId = ++CurQueueId;
1534 void remove(SUnit *SU) {
1535 assert(!Queue.empty() && "Queue is empty!");
1536 assert(SU->NodeQueueId != 0 && "Not in queue!");
1537 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1539 if (I != prior(Queue.end()))
1540 std::swap(*I, Queue.back());
1542 SU->NodeQueueId = 0;
1545 bool tracksRegPressure() const { return TracksRegPressure; }
1547 void dumpRegPressure() const;
1549 bool HighRegPressure(const SUnit *SU) const;
1551 bool MayReduceRegPressure(SUnit *SU) const;
1553 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1555 void ScheduledNode(SUnit *SU);
1557 void UnscheduledNode(SUnit *SU);
1560 bool canClobber(const SUnit *SU, const SUnit *Op);
1561 void AddPseudoTwoAddrDeps();
1562 void PrescheduleNodesWithMultipleUses();
1563 void CalculateSethiUllmanNumbers();
1567 class RegReductionPriorityQueue : public RegReductionPQBase {
1568 static SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker) {
1569 std::vector<SUnit *>::iterator Best = Q.begin();
1570 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
1571 E = Q.end(); I != E; ++I)
1572 if (Picker(*Best, *I))
1575 if (Best != prior(Q.end()))
1576 std::swap(*Best, Q.back());
1584 RegReductionPriorityQueue(MachineFunction &mf,
1586 const TargetInstrInfo *tii,
1587 const TargetRegisterInfo *tri,
1588 const TargetLowering *tli)
1589 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli),
1592 bool isBottomUp() const { return SF::IsBottomUp; }
1594 bool isReady(SUnit *U) const {
1595 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1599 if (Queue.empty()) return NULL;
1601 SUnit *V = popFromQueue(Queue, Picker);
1606 void dump(ScheduleDAG *DAG) const {
1607 // Emulate pop() without clobbering NodeQueueIds.
1608 std::vector<SUnit*> DumpQueue = Queue;
1609 SF DumpPicker = Picker;
1610 while (!DumpQueue.empty()) {
1611 SUnit *SU = popFromQueue(DumpQueue, DumpPicker);
1613 dbgs() << "Height " << SU->getHeight() << ": ";
1615 dbgs() << "Depth " << SU->getDepth() << ": ";
1621 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1622 BURegReductionPriorityQueue;
1624 typedef RegReductionPriorityQueue<td_ls_rr_sort>
1625 TDRegReductionPriorityQueue;
1627 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1628 SrcRegReductionPriorityQueue;
1630 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1631 HybridBURRPriorityQueue;
1633 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1634 ILPBURRPriorityQueue;
1635 } // end anonymous namespace
1637 //===----------------------------------------------------------------------===//
1638 // Static Node Priority for Register Pressure Reduction
1639 //===----------------------------------------------------------------------===//
1641 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1642 /// Smaller number is the higher priority.
1644 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1645 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1646 if (SethiUllmanNumber != 0)
1647 return SethiUllmanNumber;
1650 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1652 if (I->isCtrl()) continue; // ignore chain preds
1653 SUnit *PredSU = I->getSUnit();
1654 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1655 if (PredSethiUllman > SethiUllmanNumber) {
1656 SethiUllmanNumber = PredSethiUllman;
1658 } else if (PredSethiUllman == SethiUllmanNumber)
1662 SethiUllmanNumber += Extra;
1664 if (SethiUllmanNumber == 0)
1665 SethiUllmanNumber = 1;
1667 return SethiUllmanNumber;
1670 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1671 /// scheduling units.
1672 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1673 SethiUllmanNumbers.assign(SUnits->size(), 0);
1675 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1676 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1679 void RegReductionPQBase::addNode(const SUnit *SU) {
1680 unsigned SUSize = SethiUllmanNumbers.size();
1681 if (SUnits->size() > SUSize)
1682 SethiUllmanNumbers.resize(SUSize*2, 0);
1683 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1686 void RegReductionPQBase::updateNode(const SUnit *SU) {
1687 SethiUllmanNumbers[SU->NodeNum] = 0;
1688 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1691 // Lower priority means schedule further down. For bottom-up scheduling, lower
1692 // priority SUs are scheduled before higher priority SUs.
1693 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1694 assert(SU->NodeNum < SethiUllmanNumbers.size());
1695 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1696 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1697 // CopyToReg should be close to its uses to facilitate coalescing and
1700 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1701 Opc == TargetOpcode::SUBREG_TO_REG ||
1702 Opc == TargetOpcode::INSERT_SUBREG)
1703 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1704 // close to their uses to facilitate coalescing.
1706 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1707 // If SU does not have a register use, i.e. it doesn't produce a value
1708 // that would be consumed (e.g. store), then it terminates a chain of
1709 // computation. Give it a large SethiUllman number so it will be
1710 // scheduled right before its predecessors that it doesn't lengthen
1711 // their live ranges.
1713 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1714 // If SU does not have a register def, schedule it close to its uses
1715 // because it does not lengthen any live ranges.
1717 return SethiUllmanNumbers[SU->NodeNum];
1720 //===----------------------------------------------------------------------===//
1721 // Register Pressure Tracking
1722 //===----------------------------------------------------------------------===//
1724 void RegReductionPQBase::dumpRegPressure() const {
1725 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1726 E = TRI->regclass_end(); I != E; ++I) {
1727 const TargetRegisterClass *RC = *I;
1728 unsigned Id = RC->getID();
1729 unsigned RP = RegPressure[Id];
1731 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1736 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1740 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1744 SUnit *PredSU = I->getSUnit();
1745 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1746 // to cover the number of registers defined (they are all live).
1747 if (PredSU->NumRegDefsLeft == 0) {
1750 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1751 RegDefPos.IsValid(); RegDefPos.Advance()) {
1752 EVT VT = RegDefPos.GetValue();
1753 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1754 unsigned Cost = TLI->getRepRegClassCostFor(VT);
1755 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1762 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1763 const SDNode *N = SU->getNode();
1765 if (!N->isMachineOpcode() || !SU->NumSuccs)
1768 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1769 for (unsigned i = 0; i != NumDefs; ++i) {
1770 EVT VT = N->getValueType(i);
1771 if (!N->hasAnyUseOfValue(i))
1773 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1774 if (RegPressure[RCId] >= RegLimit[RCId])
1780 // Compute the register pressure contribution by this instruction by count up
1781 // for uses that are not live and down for defs. Only count register classes
1782 // that are already under high pressure. As a side effect, compute the number of
1783 // uses of registers that are already live.
1785 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1786 // so could probably be factored.
1787 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1790 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1794 SUnit *PredSU = I->getSUnit();
1795 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1796 // to cover the number of registers defined (they are all live).
1797 if (PredSU->NumRegDefsLeft == 0) {
1798 if (PredSU->getNode()->isMachineOpcode())
1802 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1803 RegDefPos.IsValid(); RegDefPos.Advance()) {
1804 EVT VT = RegDefPos.GetValue();
1805 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1806 if (RegPressure[RCId] >= RegLimit[RCId])
1810 const SDNode *N = SU->getNode();
1812 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
1815 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1816 for (unsigned i = 0; i != NumDefs; ++i) {
1817 EVT VT = N->getValueType(i);
1818 if (!N->hasAnyUseOfValue(i))
1820 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1821 if (RegPressure[RCId] >= RegLimit[RCId])
1827 void RegReductionPQBase::ScheduledNode(SUnit *SU) {
1828 if (!TracksRegPressure)
1834 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1838 SUnit *PredSU = I->getSUnit();
1839 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1840 // to cover the number of registers defined (they are all live).
1841 if (PredSU->NumRegDefsLeft == 0) {
1844 // FIXME: The ScheduleDAG currently loses information about which of a
1845 // node's values is consumed by each dependence. Consequently, if the node
1846 // defines multiple register classes, we don't know which to pressurize
1847 // here. Instead the following loop consumes the register defs in an
1848 // arbitrary order. At least it handles the common case of clustered loads
1849 // to the same class. For precise liveness, each SDep needs to indicate the
1850 // result number. But that tightly couples the ScheduleDAG with the
1851 // SelectionDAG making updates tricky. A simpler hack would be to attach a
1852 // value type or register class to SDep.
1854 // The most important aspect of register tracking is balancing the increase
1855 // here with the reduction further below. Note that this SU may use multiple
1856 // defs in PredSU. The can't be determined here, but we've already
1857 // compensated by reducing NumRegDefsLeft in PredSU during
1858 // ScheduleDAGSDNodes::AddSchedEdges.
1859 --PredSU->NumRegDefsLeft;
1860 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
1861 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1862 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
1865 EVT VT = RegDefPos.GetValue();
1866 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1867 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1872 // We should have this assert, but there may be dead SDNodes that never
1873 // materialize as SUnits, so they don't appear to generate liveness.
1874 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
1875 int SkipRegDefs = (int)SU->NumRegDefsLeft;
1876 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
1877 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
1878 if (SkipRegDefs > 0)
1880 EVT VT = RegDefPos.GetValue();
1881 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1882 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT)) {
1883 // Register pressure tracking is imprecise. This can happen. But we try
1884 // hard not to let it happen because it likely results in poor scheduling.
1885 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
1886 RegPressure[RCId] = 0;
1889 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
1895 void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
1896 if (!TracksRegPressure)
1899 const SDNode *N = SU->getNode();
1902 if (!N->isMachineOpcode()) {
1903 if (N->getOpcode() != ISD::CopyToReg)
1906 unsigned Opc = N->getMachineOpcode();
1907 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1908 Opc == TargetOpcode::INSERT_SUBREG ||
1909 Opc == TargetOpcode::SUBREG_TO_REG ||
1910 Opc == TargetOpcode::REG_SEQUENCE ||
1911 Opc == TargetOpcode::IMPLICIT_DEF)
1915 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1919 SUnit *PredSU = I->getSUnit();
1920 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
1921 // counts data deps.
1922 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
1924 const SDNode *PN = PredSU->getNode();
1925 if (!PN->isMachineOpcode()) {
1926 if (PN->getOpcode() == ISD::CopyFromReg) {
1927 EVT VT = PN->getValueType(0);
1928 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1929 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1933 unsigned POpc = PN->getMachineOpcode();
1934 if (POpc == TargetOpcode::IMPLICIT_DEF)
1936 if (POpc == TargetOpcode::EXTRACT_SUBREG) {
1937 EVT VT = PN->getOperand(0).getValueType();
1938 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1939 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1941 } else if (POpc == TargetOpcode::INSERT_SUBREG ||
1942 POpc == TargetOpcode::SUBREG_TO_REG) {
1943 EVT VT = PN->getValueType(0);
1944 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1945 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1948 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
1949 for (unsigned i = 0; i != NumDefs; ++i) {
1950 EVT VT = PN->getValueType(i);
1951 if (!PN->hasAnyUseOfValue(i))
1953 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1954 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
1955 // Register pressure tracking is imprecise. This can happen.
1956 RegPressure[RCId] = 0;
1958 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
1962 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
1963 // may transfer data dependencies to CopyToReg.
1964 if (SU->NumSuccs && N->isMachineOpcode()) {
1965 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1966 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1967 EVT VT = N->getValueType(i);
1968 if (VT == MVT::Glue || VT == MVT::Other)
1970 if (!N->hasAnyUseOfValue(i))
1972 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1973 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1980 //===----------------------------------------------------------------------===//
1981 // Dynamic Node Priority for Register Pressure Reduction
1982 //===----------------------------------------------------------------------===//
1984 /// closestSucc - Returns the scheduled cycle of the successor which is
1985 /// closest to the current cycle.
1986 static unsigned closestSucc(const SUnit *SU) {
1987 unsigned MaxHeight = 0;
1988 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1990 if (I->isCtrl()) continue; // ignore chain succs
1991 unsigned Height = I->getSUnit()->getHeight();
1992 // If there are bunch of CopyToRegs stacked up, they should be considered
1993 // to be at the same position.
1994 if (I->getSUnit()->getNode() &&
1995 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
1996 Height = closestSucc(I->getSUnit())+1;
1997 if (Height > MaxHeight)
2003 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2004 /// for scratch registers, i.e. number of data dependencies.
2005 static unsigned calcMaxScratches(const SUnit *SU) {
2006 unsigned Scratches = 0;
2007 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2009 if (I->isCtrl()) continue; // ignore chain preds
2015 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2016 /// CopyFromReg from a virtual register.
2017 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2018 bool RetVal = false;
2019 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2021 if (I->isCtrl()) continue;
2022 const SUnit *PredSU = I->getSUnit();
2023 if (PredSU->getNode() &&
2024 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2026 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2027 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2037 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2038 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2039 /// it has no other use. It should be scheduled closer to the terminator.
2040 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2041 bool RetVal = false;
2042 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2044 if (I->isCtrl()) continue;
2045 const SUnit *SuccSU = I->getSUnit();
2046 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2048 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2049 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2059 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2060 // set isVRegCycle for its CopyFromReg operands.
2062 // This is only relevant for single-block loops, in which case the VRegCycle
2063 // node is likely an induction variable in which the operand and target virtual
2064 // registers should be coalesced (e.g. pre/post increment values). Setting the
2065 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2066 // CopyFromReg so that this node becomes the virtual register "kill". This
2067 // avoids interference between the values live in and out of the block and
2068 // eliminates a copy inside the loop.
2069 static void initVRegCycle(SUnit *SU) {
2070 if (DisableSchedVRegCycle)
2073 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2076 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2078 SU->isVRegCycle = true;
2080 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2082 if (I->isCtrl()) continue;
2083 I->getSUnit()->isVRegCycle = true;
2087 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2088 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
2089 static void resetVRegCycle(SUnit *SU) {
2090 if (!SU->isVRegCycle)
2093 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2095 if (I->isCtrl()) continue; // ignore chain preds
2096 SUnit *PredSU = I->getSUnit();
2097 if (PredSU->isVRegCycle) {
2098 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2099 "VRegCycle def must be CopyFromReg");
2100 I->getSUnit()->isVRegCycle = 0;
2105 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2106 // means a node that defines the VRegCycle has not been scheduled yet.
2107 static bool hasVRegCycleUse(const SUnit *SU) {
2108 // If this SU also defines the VReg, don't hoist it as a "use".
2109 if (SU->isVRegCycle)
2112 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2114 if (I->isCtrl()) continue; // ignore chain preds
2115 if (I->getSUnit()->isVRegCycle &&
2116 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2117 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2124 // Check for either a dependence (latency) or resource (hazard) stall.
2126 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2127 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2128 if ((int)SPQ->getCurCycle() < Height) return true;
2129 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2130 != ScheduleHazardRecognizer::NoHazard)
2135 // Return -1 if left has higher priority, 1 if right has higher priority.
2136 // Return 0 if latency-based priority is equivalent.
2137 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2138 RegReductionPQBase *SPQ) {
2139 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2140 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2141 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2142 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2143 int LHeight = (int)left->getHeight() + LPenalty;
2144 int RHeight = (int)right->getHeight() + RPenalty;
2146 bool LStall = (!checkPref || left->SchedulingPref == Sched::Latency) &&
2147 BUHasStall(left, LHeight, SPQ);
2148 bool RStall = (!checkPref || right->SchedulingPref == Sched::Latency) &&
2149 BUHasStall(right, RHeight, SPQ);
2151 // If scheduling one of the node will cause a pipeline stall, delay it.
2152 // If scheduling either one of the node will cause a pipeline stall, sort
2153 // them according to their height.
2156 DEBUG(++FactorCount[FactStall]);
2159 if (LHeight != RHeight) {
2160 DEBUG(++FactorCount[FactStall]);
2161 return LHeight > RHeight ? 1 : -1;
2163 } else if (RStall) {
2164 DEBUG(++FactorCount[FactStall]);
2168 // If either node is scheduling for latency, sort them by height/depth
2170 if (!checkPref || (left->SchedulingPref == Sched::Latency ||
2171 right->SchedulingPref == Sched::Latency)) {
2172 if (DisableSchedCycles) {
2173 if (LHeight != RHeight) {
2174 DEBUG(++FactorCount[FactHeight]);
2175 return LHeight > RHeight ? 1 : -1;
2179 // If neither instruction stalls (!LStall && !RStall) then
2180 // its height is already covered so only its depth matters. We also reach
2181 // this if both stall but have the same height.
2182 int LDepth = left->getDepth() - LPenalty;
2183 int RDepth = right->getDepth() - RPenalty;
2184 if (LDepth != RDepth) {
2185 DEBUG(++FactorCount[FactDepth]);
2186 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2187 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2188 << ") depth " << RDepth << "\n");
2189 return LDepth < RDepth ? 1 : -1;
2192 if (left->Latency != right->Latency) {
2193 DEBUG(++FactorCount[FactOther]);
2194 return left->Latency > right->Latency ? 1 : -1;
2200 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2201 unsigned LPriority = SPQ->getNodePriority(left);
2202 unsigned RPriority = SPQ->getNodePriority(right);
2203 if (LPriority != RPriority) {
2204 DEBUG(++FactorCount[FactStatic]);
2205 return LPriority > RPriority;
2207 else if(LPriority == 0) {
2208 // Schedule zero-latency TokenFactor below any other special
2209 // nodes. The alternative may be to avoid artificially boosting the
2210 // TokenFactor's height when it is scheduled, but we currently rely on an
2211 // instruction's final height to equal the cycle in which it is scheduled,
2212 // so heights are monotonically increasing.
2213 unsigned LOpc = left->getNode() ? left->getNode()->getOpcode() : 0;
2214 unsigned ROpc = right->getNode() ? right->getNode()->getOpcode() : 0;
2215 if (LOpc == ISD::TokenFactor)
2217 if (ROpc == ISD::TokenFactor)
2221 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2226 // and the following instructions are both ready.
2230 // Then schedule t2 = op first.
2237 // This creates more short live intervals.
2238 unsigned LDist = closestSucc(left);
2239 unsigned RDist = closestSucc(right);
2240 if (LDist != RDist) {
2241 DEBUG(++FactorCount[FactOther]);
2242 return LDist < RDist;
2245 // How many registers becomes live when the node is scheduled.
2246 unsigned LScratch = calcMaxScratches(left);
2247 unsigned RScratch = calcMaxScratches(right);
2248 if (LScratch != RScratch) {
2249 DEBUG(++FactorCount[FactOther]);
2250 return LScratch > RScratch;
2253 if (!DisableSchedCycles) {
2254 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2259 if (left->getHeight() != right->getHeight()) {
2260 DEBUG(++FactorCount[FactHeight]);
2261 return left->getHeight() > right->getHeight();
2264 if (left->getDepth() != right->getDepth()) {
2265 DEBUG(++FactorCount[FactDepth]);
2266 return left->getDepth() < right->getDepth();
2270 assert(left->NodeQueueId && right->NodeQueueId &&
2271 "NodeQueueId cannot be zero");
2272 DEBUG(++FactorCount[FactOther]);
2273 return (left->NodeQueueId > right->NodeQueueId);
2277 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2278 return BURRSort(left, right, SPQ);
2281 // Source order, otherwise bottom up.
2282 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2283 unsigned LOrder = SPQ->getNodeOrdering(left);
2284 unsigned ROrder = SPQ->getNodeOrdering(right);
2286 // Prefer an ordering where the lower the non-zero order number, the higher
2288 if ((LOrder || ROrder) && LOrder != ROrder)
2289 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2291 return BURRSort(left, right, SPQ);
2294 // If the time between now and when the instruction will be ready can cover
2295 // the spill code, then avoid adding it to the ready queue. This gives long
2296 // stalls highest priority and allows hoisting across calls. It should also
2297 // speed up processing the available queue.
2298 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2299 static const unsigned ReadyDelay = 3;
2301 if (SPQ->MayReduceRegPressure(SU)) return true;
2303 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2305 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2306 != ScheduleHazardRecognizer::NoHazard)
2312 // Return true if right should be scheduled with higher priority than left.
2313 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2314 if (left->isCall || right->isCall)
2315 // No way to compute latency of calls.
2316 return BURRSort(left, right, SPQ);
2318 bool LHigh = SPQ->HighRegPressure(left);
2319 bool RHigh = SPQ->HighRegPressure(right);
2320 // Avoid causing spills. If register pressure is high, schedule for
2321 // register pressure reduction.
2322 if (LHigh && !RHigh) {
2323 DEBUG(++FactorCount[FactPressureDiff]);
2324 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2325 << right->NodeNum << ")\n");
2328 else if (!LHigh && RHigh) {
2329 DEBUG(++FactorCount[FactPressureDiff]);
2330 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2331 << left->NodeNum << ")\n");
2334 if (!LHigh && !RHigh) {
2335 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2339 return BURRSort(left, right, SPQ);
2342 // Schedule as many instructions in each cycle as possible. So don't make an
2343 // instruction available unless it is ready in the current cycle.
2344 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2345 if (SU->getHeight() > CurCycle) return false;
2347 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2348 != ScheduleHazardRecognizer::NoHazard)
2354 static bool canEnableCoalescing(SUnit *SU) {
2355 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2356 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2357 // CopyToReg should be close to its uses to facilitate coalescing and
2361 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2362 Opc == TargetOpcode::SUBREG_TO_REG ||
2363 Opc == TargetOpcode::INSERT_SUBREG)
2364 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2365 // close to their uses to facilitate coalescing.
2368 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2369 // If SU does not have a register def, schedule it close to its uses
2370 // because it does not lengthen any live ranges.
2376 // list-ilp is currently an experimental scheduler that allows various
2377 // heuristics to be enabled prior to the normal register reduction logic.
2378 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2379 if (left->isCall || right->isCall)
2380 // No way to compute latency of calls.
2381 return BURRSort(left, right, SPQ);
2383 unsigned LLiveUses = 0, RLiveUses = 0;
2384 int LPDiff = 0, RPDiff = 0;
2385 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2386 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2387 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2389 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2390 DEBUG(++FactorCount[FactPressureDiff]);
2391 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2392 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2393 return LPDiff > RPDiff;
2396 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2397 bool LReduce = canEnableCoalescing(left);
2398 bool RReduce = canEnableCoalescing(right);
2399 DEBUG(if (LReduce != RReduce) ++FactorCount[FactPressureDiff]);
2400 if (LReduce && !RReduce) return false;
2401 if (RReduce && !LReduce) return true;
2404 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2405 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2406 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2407 DEBUG(++FactorCount[FactRegUses]);
2408 return LLiveUses < RLiveUses;
2411 if (!DisableSchedStalls) {
2412 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2413 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2414 if (LStall != RStall) {
2415 DEBUG(++FactorCount[FactHeight]);
2416 return left->getHeight() > right->getHeight();
2420 if (!DisableSchedCriticalPath) {
2421 int spread = (int)left->getDepth() - (int)right->getDepth();
2422 if (std::abs(spread) > MaxReorderWindow) {
2423 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2424 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2425 << right->getDepth() << "\n");
2426 DEBUG(++FactorCount[FactDepth]);
2427 return left->getDepth() < right->getDepth();
2431 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2432 int spread = (int)left->getHeight() - (int)right->getHeight();
2433 if (std::abs(spread) > MaxReorderWindow) {
2434 DEBUG(++FactorCount[FactHeight]);
2435 return left->getHeight() > right->getHeight();
2439 return BURRSort(left, right, SPQ);
2442 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2444 // Add pseudo dependency edges for two-address nodes.
2445 AddPseudoTwoAddrDeps();
2446 // Reroute edges to nodes with multiple uses.
2447 if (!TracksRegPressure)
2448 PrescheduleNodesWithMultipleUses();
2449 // Calculate node priorities.
2450 CalculateSethiUllmanNumbers();
2452 // For single block loops, mark nodes that look like canonical IV increments.
2453 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2454 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2455 initVRegCycle(&sunits[i]);
2460 //===----------------------------------------------------------------------===//
2461 // Preschedule for Register Pressure
2462 //===----------------------------------------------------------------------===//
2464 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2465 if (SU->isTwoAddress) {
2466 unsigned Opc = SU->getNode()->getMachineOpcode();
2467 const TargetInstrDesc &TID = TII->get(Opc);
2468 unsigned NumRes = TID.getNumDefs();
2469 unsigned NumOps = TID.getNumOperands() - NumRes;
2470 for (unsigned i = 0; i != NumOps; ++i) {
2471 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
2472 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2473 if (DU->getNodeId() != -1 &&
2474 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2482 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2483 /// physical register defs.
2484 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2485 const TargetInstrInfo *TII,
2486 const TargetRegisterInfo *TRI) {
2487 SDNode *N = SuccSU->getNode();
2488 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2489 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2490 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2491 for (const SDNode *SUNode = SU->getNode(); SUNode;
2492 SUNode = SUNode->getGluedNode()) {
2493 if (!SUNode->isMachineOpcode())
2495 const unsigned *SUImpDefs =
2496 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2499 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2500 EVT VT = N->getValueType(i);
2501 if (VT == MVT::Glue || VT == MVT::Other)
2503 if (!N->hasAnyUseOfValue(i))
2505 unsigned Reg = ImpDefs[i - NumDefs];
2506 for (;*SUImpDefs; ++SUImpDefs) {
2507 unsigned SUReg = *SUImpDefs;
2508 if (TRI->regsOverlap(Reg, SUReg))
2516 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2517 /// are not handled well by the general register pressure reduction
2518 /// heuristics. When presented with code like this:
2527 /// the heuristics tend to push the store up, but since the
2528 /// operand of the store has another use (U), this would increase
2529 /// the length of that other use (the U->N edge).
2531 /// This function transforms code like the above to route U's
2532 /// dependence through the store when possible, like this:
2543 /// This results in the store being scheduled immediately
2544 /// after N, which shortens the U->N live range, reducing
2545 /// register pressure.
2547 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2548 // Visit all the nodes in topological order, working top-down.
2549 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2550 SUnit *SU = &(*SUnits)[i];
2551 // For now, only look at nodes with no data successors, such as stores.
2552 // These are especially important, due to the heuristics in
2553 // getNodePriority for nodes with no data successors.
2554 if (SU->NumSuccs != 0)
2556 // For now, only look at nodes with exactly one data predecessor.
2557 if (SU->NumPreds != 1)
2559 // Avoid prescheduling copies to virtual registers, which don't behave
2560 // like other nodes from the perspective of scheduling heuristics.
2561 if (SDNode *N = SU->getNode())
2562 if (N->getOpcode() == ISD::CopyToReg &&
2563 TargetRegisterInfo::isVirtualRegister
2564 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2567 // Locate the single data predecessor.
2569 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2570 EE = SU->Preds.end(); II != EE; ++II)
2571 if (!II->isCtrl()) {
2572 PredSU = II->getSUnit();
2577 // Don't rewrite edges that carry physregs, because that requires additional
2578 // support infrastructure.
2579 if (PredSU->hasPhysRegDefs)
2581 // Short-circuit the case where SU is PredSU's only data successor.
2582 if (PredSU->NumSuccs == 1)
2584 // Avoid prescheduling to copies from virtual registers, which don't behave
2585 // like other nodes from the perspective of scheduling heuristics.
2586 if (SDNode *N = SU->getNode())
2587 if (N->getOpcode() == ISD::CopyFromReg &&
2588 TargetRegisterInfo::isVirtualRegister
2589 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2592 // Perform checks on the successors of PredSU.
2593 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2594 EE = PredSU->Succs.end(); II != EE; ++II) {
2595 SUnit *PredSuccSU = II->getSUnit();
2596 if (PredSuccSU == SU) continue;
2597 // If PredSU has another successor with no data successors, for
2598 // now don't attempt to choose either over the other.
2599 if (PredSuccSU->NumSuccs == 0)
2600 goto outer_loop_continue;
2601 // Don't break physical register dependencies.
2602 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2603 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2604 goto outer_loop_continue;
2605 // Don't introduce graph cycles.
2606 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2607 goto outer_loop_continue;
2610 // Ok, the transformation is safe and the heuristics suggest it is
2611 // profitable. Update the graph.
2612 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2613 << " next to PredSU #" << PredSU->NodeNum
2614 << " to guide scheduling in the presence of multiple uses\n");
2615 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2616 SDep Edge = PredSU->Succs[i];
2617 assert(!Edge.isAssignedRegDep());
2618 SUnit *SuccSU = Edge.getSUnit();
2620 Edge.setSUnit(PredSU);
2621 scheduleDAG->RemovePred(SuccSU, Edge);
2622 scheduleDAG->AddPred(SU, Edge);
2624 scheduleDAG->AddPred(SuccSU, Edge);
2628 outer_loop_continue:;
2632 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2633 /// it as a def&use operand. Add a pseudo control edge from it to the other
2634 /// node (if it won't create a cycle) so the two-address one will be scheduled
2635 /// first (lower in the schedule). If both nodes are two-address, favor the
2636 /// one that has a CopyToReg use (more likely to be a loop induction update).
2637 /// If both are two-address, but one is commutable while the other is not
2638 /// commutable, favor the one that's not commutable.
2639 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2640 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2641 SUnit *SU = &(*SUnits)[i];
2642 if (!SU->isTwoAddress)
2645 SDNode *Node = SU->getNode();
2646 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2649 bool isLiveOut = hasOnlyLiveOutUses(SU);
2650 unsigned Opc = Node->getMachineOpcode();
2651 const TargetInstrDesc &TID = TII->get(Opc);
2652 unsigned NumRes = TID.getNumDefs();
2653 unsigned NumOps = TID.getNumOperands() - NumRes;
2654 for (unsigned j = 0; j != NumOps; ++j) {
2655 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
2657 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2658 if (DU->getNodeId() == -1)
2660 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2661 if (!DUSU) continue;
2662 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2663 E = DUSU->Succs.end(); I != E; ++I) {
2664 if (I->isCtrl()) continue;
2665 SUnit *SuccSU = I->getSUnit();
2668 // Be conservative. Ignore if nodes aren't at roughly the same
2669 // depth and height.
2670 if (SuccSU->getHeight() < SU->getHeight() &&
2671 (SU->getHeight() - SuccSU->getHeight()) > 1)
2673 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2674 // constrains whatever is using the copy, instead of the copy
2675 // itself. In the case that the copy is coalesced, this
2676 // preserves the intent of the pseudo two-address heurietics.
2677 while (SuccSU->Succs.size() == 1 &&
2678 SuccSU->getNode()->isMachineOpcode() &&
2679 SuccSU->getNode()->getMachineOpcode() ==
2680 TargetOpcode::COPY_TO_REGCLASS)
2681 SuccSU = SuccSU->Succs.front().getSUnit();
2682 // Don't constrain non-instruction nodes.
2683 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2685 // Don't constrain nodes with physical register defs if the
2686 // predecessor can clobber them.
2687 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2688 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2691 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2692 // these may be coalesced away. We want them close to their uses.
2693 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2694 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2695 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2696 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2698 if ((!canClobber(SuccSU, DUSU) ||
2699 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2700 (!SU->isCommutable && SuccSU->isCommutable)) &&
2701 !scheduleDAG->IsReachable(SuccSU, SU)) {
2702 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2703 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2704 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
2705 /*Reg=*/0, /*isNormalMemory=*/false,
2706 /*isMustAlias=*/false,
2707 /*isArtificial=*/true));
2714 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
2715 /// predecessors of the successors of the SUnit SU. Stop when the provided
2716 /// limit is exceeded.
2717 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
2720 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2722 const SUnit *SuccSU = I->getSUnit();
2723 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
2724 EE = SuccSU->Preds.end(); II != EE; ++II) {
2725 SUnit *PredSU = II->getSUnit();
2726 if (!PredSU->isScheduled)
2736 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
2737 unsigned LPriority = SPQ->getNodePriority(left);
2738 unsigned RPriority = SPQ->getNodePriority(right);
2739 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode();
2740 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode();
2741 bool LIsFloater = LIsTarget && left->NumPreds == 0;
2742 bool RIsFloater = RIsTarget && right->NumPreds == 0;
2743 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
2744 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
2746 if (left->NumSuccs == 0 && right->NumSuccs != 0)
2748 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
2755 if (left->NumSuccs == 1)
2757 if (right->NumSuccs == 1)
2760 if (LPriority+LBonus != RPriority+RBonus)
2761 return LPriority+LBonus < RPriority+RBonus;
2763 if (left->getDepth() != right->getDepth())
2764 return left->getDepth() < right->getDepth();
2766 if (left->NumSuccsLeft != right->NumSuccsLeft)
2767 return left->NumSuccsLeft > right->NumSuccsLeft;
2769 assert(left->NodeQueueId && right->NodeQueueId &&
2770 "NodeQueueId cannot be zero");
2771 return (left->NodeQueueId > right->NodeQueueId);
2774 //===----------------------------------------------------------------------===//
2775 // Public Constructor Functions
2776 //===----------------------------------------------------------------------===//
2778 llvm::ScheduleDAGSDNodes *
2779 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2780 CodeGenOpt::Level OptLevel) {
2781 const TargetMachine &TM = IS->TM;
2782 const TargetInstrInfo *TII = TM.getInstrInfo();
2783 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2785 BURegReductionPriorityQueue *PQ =
2786 new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2787 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2788 PQ->setScheduleDAG(SD);
2792 llvm::ScheduleDAGSDNodes *
2793 llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
2794 CodeGenOpt::Level OptLevel) {
2795 const TargetMachine &TM = IS->TM;
2796 const TargetInstrInfo *TII = TM.getInstrInfo();
2797 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2799 TDRegReductionPriorityQueue *PQ =
2800 new TDRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2801 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2802 PQ->setScheduleDAG(SD);
2806 llvm::ScheduleDAGSDNodes *
2807 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2808 CodeGenOpt::Level OptLevel) {
2809 const TargetMachine &TM = IS->TM;
2810 const TargetInstrInfo *TII = TM.getInstrInfo();
2811 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2813 SrcRegReductionPriorityQueue *PQ =
2814 new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2815 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2816 PQ->setScheduleDAG(SD);
2820 llvm::ScheduleDAGSDNodes *
2821 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
2822 CodeGenOpt::Level OptLevel) {
2823 const TargetMachine &TM = IS->TM;
2824 const TargetInstrInfo *TII = TM.getInstrInfo();
2825 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2826 const TargetLowering *TLI = &IS->getTargetLowering();
2828 HybridBURRPriorityQueue *PQ =
2829 new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2831 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2832 PQ->setScheduleDAG(SD);
2836 llvm::ScheduleDAGSDNodes *
2837 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
2838 CodeGenOpt::Level OptLevel) {
2839 const TargetMachine &TM = IS->TM;
2840 const TargetInstrInfo *TII = TM.getInstrInfo();
2841 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2842 const TargetLowering *TLI = &IS->getTargetLowering();
2844 ILPBURRPriorityQueue *PQ =
2845 new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2846 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2847 PQ->setScheduleDAG(SD);