1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/InlineAsm.h"
21 #include "llvm/CodeGen/SchedulerRegistry.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetLowering.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
38 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
39 STATISTIC(NumUnfolds, "Number of nodes unfolded");
40 STATISTIC(NumDups, "Number of duplicated nodes");
41 STATISTIC(NumPRCopies, "Number of physical register copies");
43 static RegisterScheduler
44 burrListDAGScheduler("list-burr",
45 "Bottom-up register reduction list scheduling",
46 createBURRListDAGScheduler);
47 static RegisterScheduler
48 tdrListrDAGScheduler("list-tdrr",
49 "Top-down register reduction list scheduling",
50 createTDRRListDAGScheduler);
51 static RegisterScheduler
52 sourceListDAGScheduler("source",
53 "Similar to list-burr but schedules in source "
54 "order when possible",
55 createSourceListDAGScheduler);
57 static RegisterScheduler
58 hybridListDAGScheduler("list-hybrid",
59 "Bottom-up register pressure aware list scheduling "
60 "which tries to balance latency and register pressure",
61 createHybridListDAGScheduler);
63 static RegisterScheduler
64 ILPListDAGScheduler("list-ilp",
65 "Bottom-up register pressure aware list scheduling "
66 "which tries to balance ILP and register pressure",
67 createILPListDAGScheduler);
69 static cl::opt<bool> DisableSchedCycles(
70 "disable-sched-cycles", cl::Hidden, cl::init(false),
71 cl::desc("Disable cycle-level precision during preRA scheduling"));
73 // Temporary sched=list-ilp flags until the heuristics are robust.
74 // Some options are also available under sched=list-hybrid.
75 static cl::opt<bool> DisableSchedRegPressure(
76 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
77 cl::desc("Disable regpressure priority in sched=list-ilp"));
78 static cl::opt<bool> DisableSchedLiveUses(
79 "disable-sched-live-uses", cl::Hidden, cl::init(true),
80 cl::desc("Disable live use priority in sched=list-ilp"));
81 static cl::opt<bool> DisableSchedVRegCycle(
82 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
83 cl::desc("Disable virtual register cycle interference checks"));
84 static cl::opt<bool> DisableSchedPhysRegJoin(
85 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
86 cl::desc("Disable physreg def-use affinity"));
87 static cl::opt<bool> DisableSchedStalls(
88 "disable-sched-stalls", cl::Hidden, cl::init(true),
89 cl::desc("Disable no-stall priority in sched=list-ilp"));
90 static cl::opt<bool> DisableSchedCriticalPath(
91 "disable-sched-critical-path", cl::Hidden, cl::init(false),
92 cl::desc("Disable critical path priority in sched=list-ilp"));
93 static cl::opt<bool> DisableSchedHeight(
94 "disable-sched-height", cl::Hidden, cl::init(false),
95 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
97 static cl::opt<int> MaxReorderWindow(
98 "max-sched-reorder", cl::Hidden, cl::init(6),
99 cl::desc("Number of instructions to allow ahead of the critical path "
100 "in sched=list-ilp"));
102 static cl::opt<unsigned> AvgIPC(
103 "sched-avg-ipc", cl::Hidden, cl::init(1),
104 cl::desc("Average inst/cycle whan no target itinerary exists."));
108 // For sched=list-ilp, Count the number of times each factor comes into play.
109 enum { FactPressureDiff, FactRegUses, FactStall, FactHeight, FactDepth,
110 FactStatic, FactOther, NumFactors };
112 static const char *FactorName[NumFactors] =
113 {"PressureDiff", "RegUses", "Stall", "Height", "Depth","Static", "Other"};
114 static int FactorCount[NumFactors];
118 //===----------------------------------------------------------------------===//
119 /// ScheduleDAGRRList - The actual register reduction list scheduler
120 /// implementation. This supports both top-down and bottom-up scheduling.
122 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
124 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
128 /// NeedLatency - True if the scheduler will make use of latency information.
132 /// AvailableQueue - The priority queue to use for the available SUnits.
133 SchedulingPriorityQueue *AvailableQueue;
135 /// PendingQueue - This contains all of the instructions whose operands have
136 /// been issued, but their results are not ready yet (due to the latency of
137 /// the operation). Once the operands becomes available, the instruction is
138 /// added to the AvailableQueue.
139 std::vector<SUnit*> PendingQueue;
141 /// HazardRec - The hazard recognizer to use.
142 ScheduleHazardRecognizer *HazardRec;
144 /// CurCycle - The current scheduler state corresponds to this cycle.
147 /// MinAvailableCycle - Cycle of the soonest available instruction.
148 unsigned MinAvailableCycle;
150 /// IssueCount - Count instructions issued in this cycle
151 /// Currently valid only for bottom-up scheduling.
154 /// LiveRegDefs - A set of physical registers and their definition
155 /// that are "live". These nodes must be scheduled before any other nodes that
156 /// modifies the registers can be scheduled.
157 unsigned NumLiveRegs;
158 std::vector<SUnit*> LiveRegDefs;
159 std::vector<SUnit*> LiveRegGens;
161 /// Topo - A topological ordering for SUnits which permits fast IsReachable
162 /// and similar queries.
163 ScheduleDAGTopologicalSort Topo;
166 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
167 SchedulingPriorityQueue *availqueue,
168 CodeGenOpt::Level OptLevel)
169 : ScheduleDAGSDNodes(mf), isBottomUp(availqueue->isBottomUp()),
170 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
173 const TargetMachine &tm = mf.getTarget();
174 if (DisableSchedCycles || !NeedLatency)
175 HazardRec = new ScheduleHazardRecognizer();
177 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
180 ~ScheduleDAGRRList() {
182 delete AvailableQueue;
187 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
189 /// IsReachable - Checks if SU is reachable from TargetSU.
190 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
191 return Topo.IsReachable(SU, TargetSU);
194 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
196 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
197 return Topo.WillCreateCycle(SU, TargetSU);
200 /// AddPred - adds a predecessor edge to SUnit SU.
201 /// This returns true if this is a new predecessor.
202 /// Updates the topological ordering if required.
203 void AddPred(SUnit *SU, const SDep &D) {
204 Topo.AddPred(SU, D.getSUnit());
208 /// RemovePred - removes a predecessor edge from SUnit SU.
209 /// This returns true if an edge was removed.
210 /// Updates the topological ordering if required.
211 void RemovePred(SUnit *SU, const SDep &D) {
212 Topo.RemovePred(SU, D.getSUnit());
217 bool isReady(SUnit *SU) {
218 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
219 AvailableQueue->isReady(SU);
222 void ReleasePred(SUnit *SU, const SDep *PredEdge);
223 void ReleasePredecessors(SUnit *SU);
224 void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
225 void ReleaseSuccessors(SUnit *SU);
226 void ReleasePending();
227 void AdvanceToCycle(unsigned NextCycle);
228 void AdvancePastStalls(SUnit *SU);
229 void EmitNode(SUnit *SU);
230 void ScheduleNodeBottomUp(SUnit*);
231 void CapturePred(SDep *PredEdge);
232 void UnscheduleNodeBottomUp(SUnit*);
233 void RestoreHazardCheckerBottomUp();
234 void BacktrackBottomUp(SUnit*, SUnit*);
235 SUnit *CopyAndMoveSuccessors(SUnit*);
236 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
237 const TargetRegisterClass*,
238 const TargetRegisterClass*,
239 SmallVector<SUnit*, 2>&);
240 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
242 SUnit *PickNodeToScheduleBottomUp();
243 void ListScheduleBottomUp();
245 void ScheduleNodeTopDown(SUnit*);
246 void ListScheduleTopDown();
249 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
250 /// Updates the topological ordering if required.
251 SUnit *CreateNewSUnit(SDNode *N) {
252 unsigned NumSUnits = SUnits.size();
253 SUnit *NewNode = NewSUnit(N);
254 // Update the topological ordering.
255 if (NewNode->NodeNum >= NumSUnits)
256 Topo.InitDAGTopologicalSorting();
260 /// CreateClone - Creates a new SUnit from an existing one.
261 /// Updates the topological ordering if required.
262 SUnit *CreateClone(SUnit *N) {
263 unsigned NumSUnits = SUnits.size();
264 SUnit *NewNode = Clone(N);
265 // Update the topological ordering.
266 if (NewNode->NodeNum >= NumSUnits)
267 Topo.InitDAGTopologicalSorting();
271 /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
272 /// need actual latency information but the hybrid scheduler does.
273 bool ForceUnitLatencies() const {
277 } // end anonymous namespace
280 /// Schedule - Schedule the DAG using list scheduling.
281 void ScheduleDAGRRList::Schedule() {
283 << "********** List Scheduling BB#" << BB->getNumber()
284 << " '" << BB->getName() << "' **********\n");
286 for (int i = 0; i < NumFactors; ++i) {
293 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
295 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
296 LiveRegGens.resize(TRI->getNumRegs(), NULL);
298 // Build the scheduling graph.
299 BuildSchedGraph(NULL);
301 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
302 SUnits[su].dumpAll(this));
303 Topo.InitDAGTopologicalSorting();
305 AvailableQueue->initNodes(SUnits);
309 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
311 ListScheduleBottomUp();
313 ListScheduleTopDown();
316 for (int i = 0; i < NumFactors; ++i) {
317 DEBUG(dbgs() << FactorName[i] << "\t" << FactorCount[i] << "\n");
320 AvailableQueue->releaseState();
323 //===----------------------------------------------------------------------===//
324 // Bottom-Up Scheduling
325 //===----------------------------------------------------------------------===//
327 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
328 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
329 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
330 SUnit *PredSU = PredEdge->getSUnit();
333 if (PredSU->NumSuccsLeft == 0) {
334 dbgs() << "*** Scheduling failed! ***\n";
336 dbgs() << " has been released too many times!\n";
340 --PredSU->NumSuccsLeft;
342 if (!ForceUnitLatencies()) {
343 // Updating predecessor's height. This is now the cycle when the
344 // predecessor can be scheduled without causing a pipeline stall.
345 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
348 // If all the node's successors are scheduled, this node is ready
349 // to be scheduled. Ignore the special EntrySU node.
350 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
351 PredSU->isAvailable = true;
353 unsigned Height = PredSU->getHeight();
354 if (Height < MinAvailableCycle)
355 MinAvailableCycle = Height;
357 if (isReady(PredSU)) {
358 AvailableQueue->push(PredSU);
360 // CapturePred and others may have left the node in the pending queue, avoid
362 else if (!PredSU->isPending) {
363 PredSU->isPending = true;
364 PendingQueue.push_back(PredSU);
369 /// Call ReleasePred for each predecessor, then update register live def/gen.
370 /// Always update LiveRegDefs for a register dependence even if the current SU
371 /// also defines the register. This effectively create one large live range
372 /// across a sequence of two-address node. This is important because the
373 /// entire chain must be scheduled together. Example:
376 /// flags = (2) addc flags
377 /// flags = (1) addc flags
381 /// LiveRegDefs[flags] = 3
382 /// LiveRegGens[flags] = 1
384 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
385 /// interference on flags.
386 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
387 // Bottom up: release predecessors
388 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
390 ReleasePred(SU, &*I);
391 if (I->isAssignedRegDep()) {
392 // This is a physical register dependency and it's impossible or
393 // expensive to copy the register. Make sure nothing that can
394 // clobber the register is scheduled between the predecessor and
396 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
397 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
398 "interference on register dependence");
399 LiveRegDefs[I->getReg()] = I->getSUnit();
400 if (!LiveRegGens[I->getReg()]) {
402 LiveRegGens[I->getReg()] = SU;
408 /// Check to see if any of the pending instructions are ready to issue. If
409 /// so, add them to the available queue.
410 void ScheduleDAGRRList::ReleasePending() {
411 if (DisableSchedCycles) {
412 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
416 // If the available queue is empty, it is safe to reset MinAvailableCycle.
417 if (AvailableQueue->empty())
418 MinAvailableCycle = UINT_MAX;
420 // Check to see if any of the pending instructions are ready to issue. If
421 // so, add them to the available queue.
422 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
423 unsigned ReadyCycle =
424 isBottomUp ? PendingQueue[i]->getHeight() : PendingQueue[i]->getDepth();
425 if (ReadyCycle < MinAvailableCycle)
426 MinAvailableCycle = ReadyCycle;
428 if (PendingQueue[i]->isAvailable) {
429 if (!isReady(PendingQueue[i]))
431 AvailableQueue->push(PendingQueue[i]);
433 PendingQueue[i]->isPending = false;
434 PendingQueue[i] = PendingQueue.back();
435 PendingQueue.pop_back();
440 /// Move the scheduler state forward by the specified number of Cycles.
441 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
442 if (NextCycle <= CurCycle)
446 AvailableQueue->setCurCycle(NextCycle);
447 if (!HazardRec->isEnabled()) {
448 // Bypass lots of virtual calls in case of long latency.
449 CurCycle = NextCycle;
452 for (; CurCycle != NextCycle; ++CurCycle) {
454 HazardRec->RecedeCycle();
456 HazardRec->AdvanceCycle();
459 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
460 // available Q to release pending nodes at least once before popping.
464 /// Move the scheduler state forward until the specified node's dependents are
465 /// ready and can be scheduled with no resource conflicts.
466 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
467 if (DisableSchedCycles)
470 // FIXME: Nodes such as CopyFromReg probably should not advance the current
471 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
472 // has predecessors the cycle will be advanced when they are scheduled.
473 // But given the crude nature of modeling latency though such nodes, we
474 // currently need to treat these nodes like real instructions.
475 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
477 unsigned ReadyCycle = isBottomUp ? SU->getHeight() : SU->getDepth();
479 // Bump CurCycle to account for latency. We assume the latency of other
480 // available instructions may be hidden by the stall (not a full pipe stall).
481 // This updates the hazard recognizer's cycle before reserving resources for
483 AdvanceToCycle(ReadyCycle);
485 // Calls are scheduled in their preceding cycle, so don't conflict with
486 // hazards from instructions after the call. EmitNode will reset the
487 // scoreboard state before emitting the call.
488 if (isBottomUp && SU->isCall)
491 // FIXME: For resource conflicts in very long non-pipelined stages, we
492 // should probably skip ahead here to avoid useless scoreboard checks.
495 ScheduleHazardRecognizer::HazardType HT =
496 HazardRec->getHazardType(SU, isBottomUp ? -Stalls : Stalls);
498 if (HT == ScheduleHazardRecognizer::NoHazard)
503 AdvanceToCycle(CurCycle + Stalls);
506 /// Record this SUnit in the HazardRecognizer.
507 /// Does not update CurCycle.
508 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
509 if (!HazardRec->isEnabled())
512 // Check for phys reg copy.
516 switch (SU->getNode()->getOpcode()) {
518 assert(SU->getNode()->isMachineOpcode() &&
519 "This target-independent node should not be scheduled.");
521 case ISD::MERGE_VALUES:
522 case ISD::TokenFactor:
524 case ISD::CopyFromReg:
526 // Noops don't affect the scoreboard state. Copies are likely to be
530 // For inline asm, clear the pipeline state.
534 if (isBottomUp && SU->isCall) {
535 // Calls are scheduled with their preceding instructions. For bottom-up
536 // scheduling, clear the pipeline state before emitting.
540 HazardRec->EmitInstruction(SU);
542 if (!isBottomUp && SU->isCall) {
547 static void resetVRegCycle(SUnit *SU);
549 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
550 /// count of its predecessors. If a predecessor pending count is zero, add it to
551 /// the Available queue.
552 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
553 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
554 DEBUG(SU->dump(this));
557 if (CurCycle < SU->getHeight())
558 DEBUG(dbgs() << " Height [" << SU->getHeight()
559 << "] pipeline stall!\n");
562 // FIXME: Do not modify node height. It may interfere with
563 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
564 // node its ready cycle can aid heuristics, and after scheduling it can
565 // indicate the scheduled cycle.
566 SU->setHeightToAtLeast(CurCycle);
568 // Reserve resources for the scheduled intruction.
571 Sequence.push_back(SU);
573 AvailableQueue->ScheduledNode(SU);
575 // If HazardRec is disabled, and each inst counts as one cycle, then
576 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
577 // PendingQueue for schedulers that implement HasReadyFilter.
578 if (!HazardRec->isEnabled() && AvgIPC < 2)
579 AdvanceToCycle(CurCycle + 1);
581 // Update liveness of predecessors before successors to avoid treating a
582 // two-address node as a live range def.
583 ReleasePredecessors(SU);
585 // Release all the implicit physical register defs that are live.
586 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
588 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
589 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
590 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
592 LiveRegDefs[I->getReg()] = NULL;
593 LiveRegGens[I->getReg()] = NULL;
599 SU->isScheduled = true;
601 // Conditions under which the scheduler should eagerly advance the cycle:
602 // (1) No available instructions
603 // (2) All pipelines full, so available instructions must have hazards.
605 // If HazardRec is disabled, the cycle was pre-advanced before calling
606 // ReleasePredecessors. In that case, IssueCount should remain 0.
608 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
609 if (HazardRec->isEnabled() || AvgIPC > 1) {
610 if (SU->getNode() && SU->getNode()->isMachineOpcode())
612 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
613 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
614 AdvanceToCycle(CurCycle + 1);
618 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
619 /// unscheduled, incrcease the succ left count of its predecessors. Remove
620 /// them from AvailableQueue if necessary.
621 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
622 SUnit *PredSU = PredEdge->getSUnit();
623 if (PredSU->isAvailable) {
624 PredSU->isAvailable = false;
625 if (!PredSU->isPending)
626 AvailableQueue->remove(PredSU);
629 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
630 ++PredSU->NumSuccsLeft;
633 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
634 /// its predecessor states to reflect the change.
635 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
636 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
637 DEBUG(SU->dump(this));
639 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
642 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
643 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
644 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
645 "Physical register dependency violated?");
647 LiveRegDefs[I->getReg()] = NULL;
648 LiveRegGens[I->getReg()] = NULL;
652 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
654 if (I->isAssignedRegDep()) {
655 // This becomes the nearest def. Note that an earlier def may still be
656 // pending if this is a two-address node.
657 LiveRegDefs[I->getReg()] = SU;
658 if (!LiveRegDefs[I->getReg()]) {
661 if (LiveRegGens[I->getReg()] == NULL ||
662 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
663 LiveRegGens[I->getReg()] = I->getSUnit();
666 if (SU->getHeight() < MinAvailableCycle)
667 MinAvailableCycle = SU->getHeight();
669 SU->setHeightDirty();
670 SU->isScheduled = false;
671 SU->isAvailable = true;
672 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
673 // Don't make available until backtracking is complete.
674 SU->isPending = true;
675 PendingQueue.push_back(SU);
678 AvailableQueue->push(SU);
680 AvailableQueue->UnscheduledNode(SU);
683 /// After backtracking, the hazard checker needs to be restored to a state
684 /// corresponding the the current cycle.
685 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
688 unsigned LookAhead = std::min((unsigned)Sequence.size(),
689 HazardRec->getMaxLookAhead());
693 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
694 unsigned HazardCycle = (*I)->getHeight();
695 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
697 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
698 HazardRec->RecedeCycle();
704 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
705 /// BTCycle in order to schedule a specific node.
706 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
707 SUnit *OldSU = Sequence.back();
710 if (SU->isSucc(OldSU))
711 // Don't try to remove SU from AvailableQueue.
712 SU->isAvailable = false;
713 // FIXME: use ready cycle instead of height
714 CurCycle = OldSU->getHeight();
715 UnscheduleNodeBottomUp(OldSU);
716 AvailableQueue->setCurCycle(CurCycle);
719 OldSU = Sequence.back();
722 assert(!SU->isSucc(OldSU) && "Something is wrong!");
724 RestoreHazardCheckerBottomUp();
731 static bool isOperandOf(const SUnit *SU, SDNode *N) {
732 for (const SDNode *SUNode = SU->getNode(); SUNode;
733 SUNode = SUNode->getGluedNode()) {
734 if (SUNode->isOperandOf(N))
740 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
741 /// successors to the newly created node.
742 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
743 SDNode *N = SU->getNode();
747 if (SU->getNode()->getGluedNode())
751 bool TryUnfold = false;
752 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
753 EVT VT = N->getValueType(i);
756 else if (VT == MVT::Other)
759 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
760 const SDValue &Op = N->getOperand(i);
761 EVT VT = Op.getNode()->getValueType(Op.getResNo());
767 SmallVector<SDNode*, 2> NewNodes;
768 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
771 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
772 assert(NewNodes.size() == 2 && "Expected a load folding node!");
775 SDNode *LoadNode = NewNodes[0];
776 unsigned NumVals = N->getNumValues();
777 unsigned OldNumVals = SU->getNode()->getNumValues();
778 for (unsigned i = 0; i != NumVals; ++i)
779 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
780 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
781 SDValue(LoadNode, 1));
783 // LoadNode may already exist. This can happen when there is another
784 // load from the same location and producing the same type of value
785 // but it has different alignment or volatileness.
786 bool isNewLoad = true;
788 if (LoadNode->getNodeId() != -1) {
789 LoadSU = &SUnits[LoadNode->getNodeId()];
792 LoadSU = CreateNewSUnit(LoadNode);
793 LoadNode->setNodeId(LoadSU->NodeNum);
795 InitNumRegDefsLeft(LoadSU);
796 ComputeLatency(LoadSU);
799 SUnit *NewSU = CreateNewSUnit(N);
800 assert(N->getNodeId() == -1 && "Node already inserted!");
801 N->setNodeId(NewSU->NodeNum);
803 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
804 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
805 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
806 NewSU->isTwoAddress = true;
810 if (TID.isCommutable())
811 NewSU->isCommutable = true;
813 InitNumRegDefsLeft(NewSU);
814 ComputeLatency(NewSU);
816 // Record all the edges to and from the old SU, by category.
817 SmallVector<SDep, 4> ChainPreds;
818 SmallVector<SDep, 4> ChainSuccs;
819 SmallVector<SDep, 4> LoadPreds;
820 SmallVector<SDep, 4> NodePreds;
821 SmallVector<SDep, 4> NodeSuccs;
822 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
825 ChainPreds.push_back(*I);
826 else if (isOperandOf(I->getSUnit(), LoadNode))
827 LoadPreds.push_back(*I);
829 NodePreds.push_back(*I);
831 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
834 ChainSuccs.push_back(*I);
836 NodeSuccs.push_back(*I);
839 // Now assign edges to the newly-created nodes.
840 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
841 const SDep &Pred = ChainPreds[i];
842 RemovePred(SU, Pred);
844 AddPred(LoadSU, Pred);
846 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
847 const SDep &Pred = LoadPreds[i];
848 RemovePred(SU, Pred);
850 AddPred(LoadSU, Pred);
852 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
853 const SDep &Pred = NodePreds[i];
854 RemovePred(SU, Pred);
855 AddPred(NewSU, Pred);
857 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
858 SDep D = NodeSuccs[i];
859 SUnit *SuccDep = D.getSUnit();
861 RemovePred(SuccDep, D);
864 // Balance register pressure.
865 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
866 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
867 --NewSU->NumRegDefsLeft;
869 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
870 SDep D = ChainSuccs[i];
871 SUnit *SuccDep = D.getSUnit();
873 RemovePred(SuccDep, D);
880 // Add a data dependency to reflect that NewSU reads the value defined
882 AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
885 AvailableQueue->addNode(LoadSU);
886 AvailableQueue->addNode(NewSU);
890 if (NewSU->NumSuccsLeft == 0) {
891 NewSU->isAvailable = true;
897 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
898 NewSU = CreateClone(SU);
900 // New SUnit has the exact same predecessors.
901 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
903 if (!I->isArtificial())
906 // Only copy scheduled successors. Cut them from old node's successor
907 // list and move them over.
908 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
909 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
911 if (I->isArtificial())
913 SUnit *SuccSU = I->getSUnit();
914 if (SuccSU->isScheduled) {
919 DelDeps.push_back(std::make_pair(SuccSU, D));
922 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
923 RemovePred(DelDeps[i].first, DelDeps[i].second);
925 AvailableQueue->updateNode(SU);
926 AvailableQueue->addNode(NewSU);
932 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
933 /// scheduled successors of the given SUnit to the last copy.
934 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
935 const TargetRegisterClass *DestRC,
936 const TargetRegisterClass *SrcRC,
937 SmallVector<SUnit*, 2> &Copies) {
938 SUnit *CopyFromSU = CreateNewSUnit(NULL);
939 CopyFromSU->CopySrcRC = SrcRC;
940 CopyFromSU->CopyDstRC = DestRC;
942 SUnit *CopyToSU = CreateNewSUnit(NULL);
943 CopyToSU->CopySrcRC = DestRC;
944 CopyToSU->CopyDstRC = SrcRC;
946 // Only copy scheduled successors. Cut them from old node's successor
947 // list and move them over.
948 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
949 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
951 if (I->isArtificial())
953 SUnit *SuccSU = I->getSUnit();
954 if (SuccSU->isScheduled) {
956 D.setSUnit(CopyToSU);
958 DelDeps.push_back(std::make_pair(SuccSU, *I));
961 // Avoid scheduling the def-side copy before other successors. Otherwise
962 // we could introduce another physreg interference on the copy and
963 // continue inserting copies indefinitely.
964 SDep D(CopyFromSU, SDep::Order, /*Latency=*/0,
965 /*Reg=*/0, /*isNormalMemory=*/false,
966 /*isMustAlias=*/false, /*isArtificial=*/true);
970 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
971 RemovePred(DelDeps[i].first, DelDeps[i].second);
973 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
974 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
976 AvailableQueue->updateNode(SU);
977 AvailableQueue->addNode(CopyFromSU);
978 AvailableQueue->addNode(CopyToSU);
979 Copies.push_back(CopyFromSU);
980 Copies.push_back(CopyToSU);
985 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
986 /// definition of the specified node.
987 /// FIXME: Move to SelectionDAG?
988 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
989 const TargetInstrInfo *TII) {
990 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
991 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
992 unsigned NumRes = TID.getNumDefs();
993 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
998 return N->getValueType(NumRes);
1001 /// CheckForLiveRegDef - Return true and update live register vector if the
1002 /// specified register def of the specified SUnit clobbers any "live" registers.
1003 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1004 std::vector<SUnit*> &LiveRegDefs,
1005 SmallSet<unsigned, 4> &RegAdded,
1006 SmallVector<unsigned, 4> &LRegs,
1007 const TargetRegisterInfo *TRI) {
1008 for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
1010 // Check if Ref is live.
1011 if (!LiveRegDefs[*AliasI]) continue;
1013 // Allow multiple uses of the same def.
1014 if (LiveRegDefs[*AliasI] == SU) continue;
1016 // Add Reg to the set of interfering live regs.
1017 if (RegAdded.insert(*AliasI)) {
1018 assert(*AliasI == Reg && "alias clobber"); //!!!
1019 LRegs.push_back(*AliasI);
1024 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1025 /// scheduling of the given node to satisfy live physical register dependencies.
1026 /// If the specific node is the last one that's available to schedule, do
1027 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1028 bool ScheduleDAGRRList::
1029 DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
1030 if (NumLiveRegs == 0)
1033 SmallSet<unsigned, 4> RegAdded;
1034 // If this node would clobber any "live" register, then it's not ready.
1036 // If SU is the currently live definition of the same register that it uses,
1037 // then we are free to schedule it.
1038 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1040 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1041 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1042 RegAdded, LRegs, TRI);
1045 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1046 if (Node->getOpcode() == ISD::INLINEASM) {
1047 // Inline asm can clobber physical defs.
1048 unsigned NumOps = Node->getNumOperands();
1049 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1050 --NumOps; // Ignore the glue operand.
1052 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1054 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1055 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1057 ++i; // Skip the ID value.
1058 if (InlineAsm::isRegDefKind(Flags) ||
1059 InlineAsm::isRegDefEarlyClobberKind(Flags)) {
1060 // Check for def of register or earlyclobber register.
1061 for (; NumVals; --NumVals, ++i) {
1062 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1063 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1064 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1072 if (!Node->isMachineOpcode())
1074 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
1075 if (!TID.ImplicitDefs)
1077 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg)
1078 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1081 return !LRegs.empty();
1084 /// Return a node that can be scheduled in this cycle. Requirements:
1085 /// (1) Ready: latency has been satisfied
1086 /// (2) No Hazards: resources are available
1087 /// (3) No Interferences: may unschedule to break register interferences.
1088 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1089 SmallVector<SUnit*, 4> Interferences;
1090 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
1092 SUnit *CurSU = AvailableQueue->pop();
1094 SmallVector<unsigned, 4> LRegs;
1095 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1097 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1099 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1100 Interferences.push_back(CurSU);
1101 CurSU = AvailableQueue->pop();
1104 // Add the nodes that aren't ready back onto the available list.
1105 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1106 Interferences[i]->isPending = false;
1107 assert(Interferences[i]->isAvailable && "must still be available");
1108 AvailableQueue->push(Interferences[i]);
1113 // All candidates are delayed due to live physical reg dependencies.
1114 // Try backtracking, code duplication, or inserting cross class copies
1116 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1117 SUnit *TrySU = Interferences[i];
1118 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1120 // Try unscheduling up to the point where it's safe to schedule
1123 unsigned LiveCycle = UINT_MAX;
1124 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1125 unsigned Reg = LRegs[j];
1126 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1127 BtSU = LiveRegGens[Reg];
1128 LiveCycle = BtSU->getHeight();
1131 if (!WillCreateCycle(TrySU, BtSU)) {
1132 BacktrackBottomUp(TrySU, BtSU);
1134 // Force the current node to be scheduled before the node that
1135 // requires the physical reg dep.
1136 if (BtSU->isAvailable) {
1137 BtSU->isAvailable = false;
1138 if (!BtSU->isPending)
1139 AvailableQueue->remove(BtSU);
1141 AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1,
1142 /*Reg=*/0, /*isNormalMemory=*/false,
1143 /*isMustAlias=*/false, /*isArtificial=*/true));
1145 // If one or more successors has been unscheduled, then the current
1146 // node is no longer avaialable. Schedule a successor that's now
1147 // available instead.
1148 if (!TrySU->isAvailable) {
1149 CurSU = AvailableQueue->pop();
1153 TrySU->isPending = false;
1154 Interferences.erase(Interferences.begin()+i);
1161 // Can't backtrack. If it's too expensive to copy the value, then try
1162 // duplicate the nodes that produces these "too expensive to copy"
1163 // values to break the dependency. In case even that doesn't work,
1164 // insert cross class copies.
1165 // If it's not too expensive, i.e. cost != -1, issue copies.
1166 SUnit *TrySU = Interferences[0];
1167 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1168 assert(LRegs.size() == 1 && "Can't handle this yet!");
1169 unsigned Reg = LRegs[0];
1170 SUnit *LRDef = LiveRegDefs[Reg];
1171 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1172 const TargetRegisterClass *RC =
1173 TRI->getMinimalPhysRegClass(Reg, VT);
1174 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1176 // If cross copy register class is the same as RC, then it must be possible
1177 // copy the value directly. Do not try duplicate the def.
1178 // If cross copy register class is not the same as RC, then it's possible to
1179 // copy the value but it require cross register class copies and it is
1181 // If cross copy register class is null, then it's not possible to copy
1182 // the value at all.
1185 NewDef = CopyAndMoveSuccessors(LRDef);
1186 if (!DestRC && !NewDef)
1187 report_fatal_error("Can't handle live physical register dependency!");
1190 // Issue copies, these can be expensive cross register class copies.
1191 SmallVector<SUnit*, 2> Copies;
1192 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1193 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1194 << " to SU #" << Copies.front()->NodeNum << "\n");
1195 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
1196 /*Reg=*/0, /*isNormalMemory=*/false,
1197 /*isMustAlias=*/false,
1198 /*isArtificial=*/true));
1199 NewDef = Copies.back();
1202 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1203 << " to SU #" << TrySU->NodeNum << "\n");
1204 LiveRegDefs[Reg] = NewDef;
1205 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
1206 /*Reg=*/0, /*isNormalMemory=*/false,
1207 /*isMustAlias=*/false,
1208 /*isArtificial=*/true));
1209 TrySU->isAvailable = false;
1213 assert(CurSU && "Unable to resolve live physical register dependencies!");
1215 // Add the nodes that aren't ready back onto the available list.
1216 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1217 Interferences[i]->isPending = false;
1218 // May no longer be available due to backtracking.
1219 if (Interferences[i]->isAvailable) {
1220 AvailableQueue->push(Interferences[i]);
1226 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1228 void ScheduleDAGRRList::ListScheduleBottomUp() {
1229 // Release any predecessors of the special Exit node.
1230 ReleasePredecessors(&ExitSU);
1232 // Add root to Available queue.
1233 if (!SUnits.empty()) {
1234 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1235 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1236 RootSU->isAvailable = true;
1237 AvailableQueue->push(RootSU);
1240 // While Available queue is not empty, grab the node with the highest
1241 // priority. If it is not ready put it back. Schedule the node.
1242 Sequence.reserve(SUnits.size());
1243 while (!AvailableQueue->empty()) {
1244 DEBUG(dbgs() << "\nExamining Available:\n";
1245 AvailableQueue->dump(this));
1247 // Pick the best node to schedule taking all constraints into
1249 SUnit *SU = PickNodeToScheduleBottomUp();
1251 AdvancePastStalls(SU);
1253 ScheduleNodeBottomUp(SU);
1255 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1256 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1257 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1258 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1262 // Reverse the order if it is bottom up.
1263 std::reverse(Sequence.begin(), Sequence.end());
1266 VerifySchedule(isBottomUp);
1270 //===----------------------------------------------------------------------===//
1271 // Top-Down Scheduling
1272 //===----------------------------------------------------------------------===//
1274 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1275 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
1276 void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
1277 SUnit *SuccSU = SuccEdge->getSUnit();
1280 if (SuccSU->NumPredsLeft == 0) {
1281 dbgs() << "*** Scheduling failed! ***\n";
1283 dbgs() << " has been released too many times!\n";
1284 llvm_unreachable(0);
1287 --SuccSU->NumPredsLeft;
1289 // If all the node's predecessors are scheduled, this node is ready
1290 // to be scheduled. Ignore the special ExitSU node.
1291 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
1292 SuccSU->isAvailable = true;
1293 AvailableQueue->push(SuccSU);
1297 void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
1298 // Top down: release successors
1299 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1301 assert(!I->isAssignedRegDep() &&
1302 "The list-tdrr scheduler doesn't yet support physreg dependencies!");
1304 ReleaseSucc(SU, &*I);
1308 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1309 /// count of its successors. If a successor pending count is zero, add it to
1310 /// the Available queue.
1311 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU) {
1312 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
1313 DEBUG(SU->dump(this));
1315 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1316 SU->setDepthToAtLeast(CurCycle);
1317 Sequence.push_back(SU);
1319 ReleaseSuccessors(SU);
1320 SU->isScheduled = true;
1321 AvailableQueue->ScheduledNode(SU);
1324 /// ListScheduleTopDown - The main loop of list scheduling for top-down
1326 void ScheduleDAGRRList::ListScheduleTopDown() {
1327 AvailableQueue->setCurCycle(CurCycle);
1329 // Release any successors of the special Entry node.
1330 ReleaseSuccessors(&EntrySU);
1332 // All leaves to Available queue.
1333 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1334 // It is available if it has no predecessors.
1335 if (SUnits[i].Preds.empty()) {
1336 AvailableQueue->push(&SUnits[i]);
1337 SUnits[i].isAvailable = true;
1341 // While Available queue is not empty, grab the node with the highest
1342 // priority. If it is not ready put it back. Schedule the node.
1343 Sequence.reserve(SUnits.size());
1344 while (!AvailableQueue->empty()) {
1345 SUnit *CurSU = AvailableQueue->pop();
1348 ScheduleNodeTopDown(CurSU);
1350 AvailableQueue->setCurCycle(CurCycle);
1354 VerifySchedule(isBottomUp);
1359 //===----------------------------------------------------------------------===//
1360 // RegReductionPriorityQueue Definition
1361 //===----------------------------------------------------------------------===//
1363 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1364 // to reduce register pressure.
1367 class RegReductionPQBase;
1369 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1370 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1373 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1374 // reduction scheduler.
1375 struct bu_ls_rr_sort : public queue_sort {
1378 HasReadyFilter = false
1381 RegReductionPQBase *SPQ;
1382 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1383 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1385 bool operator()(SUnit* left, SUnit* right) const;
1388 // td_ls_rr_sort - Priority function for top down register pressure reduction
1390 struct td_ls_rr_sort : public queue_sort {
1393 HasReadyFilter = false
1396 RegReductionPQBase *SPQ;
1397 td_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1398 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1400 bool operator()(const SUnit* left, const SUnit* right) const;
1403 // src_ls_rr_sort - Priority function for source order scheduler.
1404 struct src_ls_rr_sort : public queue_sort {
1407 HasReadyFilter = false
1410 RegReductionPQBase *SPQ;
1411 src_ls_rr_sort(RegReductionPQBase *spq)
1413 src_ls_rr_sort(const src_ls_rr_sort &RHS)
1416 bool operator()(SUnit* left, SUnit* right) const;
1419 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1420 struct hybrid_ls_rr_sort : public queue_sort {
1423 HasReadyFilter = false
1426 RegReductionPQBase *SPQ;
1427 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1429 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
1432 bool isReady(SUnit *SU, unsigned CurCycle) const;
1434 bool operator()(SUnit* left, SUnit* right) const;
1437 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1439 struct ilp_ls_rr_sort : public queue_sort {
1442 HasReadyFilter = false
1445 RegReductionPQBase *SPQ;
1446 ilp_ls_rr_sort(RegReductionPQBase *spq)
1448 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
1451 bool isReady(SUnit *SU, unsigned CurCycle) const;
1453 bool operator()(SUnit* left, SUnit* right) const;
1456 class RegReductionPQBase : public SchedulingPriorityQueue {
1458 std::vector<SUnit*> Queue;
1459 unsigned CurQueueId;
1460 bool TracksRegPressure;
1462 // SUnits - The SUnits for the current graph.
1463 std::vector<SUnit> *SUnits;
1465 MachineFunction &MF;
1466 const TargetInstrInfo *TII;
1467 const TargetRegisterInfo *TRI;
1468 const TargetLowering *TLI;
1469 ScheduleDAGRRList *scheduleDAG;
1471 // SethiUllmanNumbers - The SethiUllman number for each node.
1472 std::vector<unsigned> SethiUllmanNumbers;
1474 /// RegPressure - Tracking current reg pressure per register class.
1476 std::vector<unsigned> RegPressure;
1478 /// RegLimit - Tracking the number of allocatable registers per register
1480 std::vector<unsigned> RegLimit;
1483 RegReductionPQBase(MachineFunction &mf,
1484 bool hasReadyFilter,
1486 const TargetInstrInfo *tii,
1487 const TargetRegisterInfo *tri,
1488 const TargetLowering *tli)
1489 : SchedulingPriorityQueue(hasReadyFilter),
1490 CurQueueId(0), TracksRegPressure(tracksrp),
1491 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1492 if (TracksRegPressure) {
1493 unsigned NumRC = TRI->getNumRegClasses();
1494 RegLimit.resize(NumRC);
1495 RegPressure.resize(NumRC);
1496 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1497 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1498 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1499 E = TRI->regclass_end(); I != E; ++I)
1500 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1504 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1505 scheduleDAG = scheduleDag;
1508 ScheduleHazardRecognizer* getHazardRec() {
1509 return scheduleDAG->getHazardRec();
1512 void initNodes(std::vector<SUnit> &sunits);
1514 void addNode(const SUnit *SU);
1516 void updateNode(const SUnit *SU);
1518 void releaseState() {
1520 SethiUllmanNumbers.clear();
1521 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1524 unsigned getNodePriority(const SUnit *SU) const;
1526 unsigned getNodeOrdering(const SUnit *SU) const {
1527 if (!SU->getNode()) return 0;
1529 return scheduleDAG->DAG->GetOrdering(SU->getNode());
1532 bool empty() const { return Queue.empty(); }
1534 void push(SUnit *U) {
1535 assert(!U->NodeQueueId && "Node in the queue already");
1536 U->NodeQueueId = ++CurQueueId;
1540 void remove(SUnit *SU) {
1541 assert(!Queue.empty() && "Queue is empty!");
1542 assert(SU->NodeQueueId != 0 && "Not in queue!");
1543 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1545 if (I != prior(Queue.end()))
1546 std::swap(*I, Queue.back());
1548 SU->NodeQueueId = 0;
1551 bool tracksRegPressure() const { return TracksRegPressure; }
1553 void dumpRegPressure() const;
1555 bool HighRegPressure(const SUnit *SU) const;
1557 bool MayReduceRegPressure(SUnit *SU) const;
1559 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1561 void ScheduledNode(SUnit *SU);
1563 void UnscheduledNode(SUnit *SU);
1566 bool canClobber(const SUnit *SU, const SUnit *Op);
1567 void AddPseudoTwoAddrDeps();
1568 void PrescheduleNodesWithMultipleUses();
1569 void CalculateSethiUllmanNumbers();
1573 class RegReductionPriorityQueue : public RegReductionPQBase {
1574 static SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker) {
1575 std::vector<SUnit *>::iterator Best = Q.begin();
1576 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
1577 E = Q.end(); I != E; ++I)
1578 if (Picker(*Best, *I))
1581 if (Best != prior(Q.end()))
1582 std::swap(*Best, Q.back());
1590 RegReductionPriorityQueue(MachineFunction &mf,
1592 const TargetInstrInfo *tii,
1593 const TargetRegisterInfo *tri,
1594 const TargetLowering *tli)
1595 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli),
1598 bool isBottomUp() const { return SF::IsBottomUp; }
1600 bool isReady(SUnit *U) const {
1601 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1605 if (Queue.empty()) return NULL;
1607 SUnit *V = popFromQueue(Queue, Picker);
1612 void dump(ScheduleDAG *DAG) const {
1613 // Emulate pop() without clobbering NodeQueueIds.
1614 std::vector<SUnit*> DumpQueue = Queue;
1615 SF DumpPicker = Picker;
1616 while (!DumpQueue.empty()) {
1617 SUnit *SU = popFromQueue(DumpQueue, DumpPicker);
1619 dbgs() << "Height " << SU->getHeight() << ": ";
1621 dbgs() << "Depth " << SU->getDepth() << ": ";
1627 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1628 BURegReductionPriorityQueue;
1630 typedef RegReductionPriorityQueue<td_ls_rr_sort>
1631 TDRegReductionPriorityQueue;
1633 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1634 SrcRegReductionPriorityQueue;
1636 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1637 HybridBURRPriorityQueue;
1639 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1640 ILPBURRPriorityQueue;
1641 } // end anonymous namespace
1643 //===----------------------------------------------------------------------===//
1644 // Static Node Priority for Register Pressure Reduction
1645 //===----------------------------------------------------------------------===//
1647 // Check for special nodes that bypass scheduling heuristics.
1648 // Currently this pushes TokenFactor nodes down, but may be used for other
1649 // pseudo-ops as well.
1651 // Return -1 to schedule right above left, 1 for left above right.
1652 // Return 0 if no bias exists.
1653 static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1654 bool LSchedLow = left->isScheduleLow;
1655 bool RSchedLow = right->isScheduleLow;
1656 if (LSchedLow != RSchedLow)
1657 return LSchedLow < RSchedLow ? 1 : -1;
1661 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1662 /// Smaller number is the higher priority.
1664 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1665 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1666 if (SethiUllmanNumber != 0)
1667 return SethiUllmanNumber;
1670 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1672 if (I->isCtrl()) continue; // ignore chain preds
1673 SUnit *PredSU = I->getSUnit();
1674 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1675 if (PredSethiUllman > SethiUllmanNumber) {
1676 SethiUllmanNumber = PredSethiUllman;
1678 } else if (PredSethiUllman == SethiUllmanNumber)
1682 SethiUllmanNumber += Extra;
1684 if (SethiUllmanNumber == 0)
1685 SethiUllmanNumber = 1;
1687 return SethiUllmanNumber;
1690 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1691 /// scheduling units.
1692 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1693 SethiUllmanNumbers.assign(SUnits->size(), 0);
1695 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1696 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1699 void RegReductionPQBase::addNode(const SUnit *SU) {
1700 unsigned SUSize = SethiUllmanNumbers.size();
1701 if (SUnits->size() > SUSize)
1702 SethiUllmanNumbers.resize(SUSize*2, 0);
1703 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1706 void RegReductionPQBase::updateNode(const SUnit *SU) {
1707 SethiUllmanNumbers[SU->NodeNum] = 0;
1708 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1711 // Lower priority means schedule further down. For bottom-up scheduling, lower
1712 // priority SUs are scheduled before higher priority SUs.
1713 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1714 assert(SU->NodeNum < SethiUllmanNumbers.size());
1715 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1716 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1717 // CopyToReg should be close to its uses to facilitate coalescing and
1720 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1721 Opc == TargetOpcode::SUBREG_TO_REG ||
1722 Opc == TargetOpcode::INSERT_SUBREG)
1723 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1724 // close to their uses to facilitate coalescing.
1726 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1727 // If SU does not have a register use, i.e. it doesn't produce a value
1728 // that would be consumed (e.g. store), then it terminates a chain of
1729 // computation. Give it a large SethiUllman number so it will be
1730 // scheduled right before its predecessors that it doesn't lengthen
1731 // their live ranges.
1733 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1734 // If SU does not have a register def, schedule it close to its uses
1735 // because it does not lengthen any live ranges.
1738 return SethiUllmanNumbers[SU->NodeNum];
1740 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1742 // FIXME: This assumes all of the defs are used as call operands.
1743 int NP = (int)Priority - SU->getNode()->getNumValues();
1744 return (NP > 0) ? NP : 0;
1750 //===----------------------------------------------------------------------===//
1751 // Register Pressure Tracking
1752 //===----------------------------------------------------------------------===//
1754 void RegReductionPQBase::dumpRegPressure() const {
1755 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1756 E = TRI->regclass_end(); I != E; ++I) {
1757 const TargetRegisterClass *RC = *I;
1758 unsigned Id = RC->getID();
1759 unsigned RP = RegPressure[Id];
1761 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1766 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1770 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1774 SUnit *PredSU = I->getSUnit();
1775 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1776 // to cover the number of registers defined (they are all live).
1777 if (PredSU->NumRegDefsLeft == 0) {
1780 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1781 RegDefPos.IsValid(); RegDefPos.Advance()) {
1782 EVT VT = RegDefPos.GetValue();
1783 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1784 unsigned Cost = TLI->getRepRegClassCostFor(VT);
1785 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1792 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1793 const SDNode *N = SU->getNode();
1795 if (!N->isMachineOpcode() || !SU->NumSuccs)
1798 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1799 for (unsigned i = 0; i != NumDefs; ++i) {
1800 EVT VT = N->getValueType(i);
1801 if (!N->hasAnyUseOfValue(i))
1803 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1804 if (RegPressure[RCId] >= RegLimit[RCId])
1810 // Compute the register pressure contribution by this instruction by count up
1811 // for uses that are not live and down for defs. Only count register classes
1812 // that are already under high pressure. As a side effect, compute the number of
1813 // uses of registers that are already live.
1815 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1816 // so could probably be factored.
1817 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1820 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1824 SUnit *PredSU = I->getSUnit();
1825 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1826 // to cover the number of registers defined (they are all live).
1827 if (PredSU->NumRegDefsLeft == 0) {
1828 if (PredSU->getNode()->isMachineOpcode())
1832 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1833 RegDefPos.IsValid(); RegDefPos.Advance()) {
1834 EVT VT = RegDefPos.GetValue();
1835 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1836 if (RegPressure[RCId] >= RegLimit[RCId])
1840 const SDNode *N = SU->getNode();
1842 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
1845 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1846 for (unsigned i = 0; i != NumDefs; ++i) {
1847 EVT VT = N->getValueType(i);
1848 if (!N->hasAnyUseOfValue(i))
1850 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1851 if (RegPressure[RCId] >= RegLimit[RCId])
1857 void RegReductionPQBase::ScheduledNode(SUnit *SU) {
1858 if (!TracksRegPressure)
1864 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1868 SUnit *PredSU = I->getSUnit();
1869 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1870 // to cover the number of registers defined (they are all live).
1871 if (PredSU->NumRegDefsLeft == 0) {
1874 // FIXME: The ScheduleDAG currently loses information about which of a
1875 // node's values is consumed by each dependence. Consequently, if the node
1876 // defines multiple register classes, we don't know which to pressurize
1877 // here. Instead the following loop consumes the register defs in an
1878 // arbitrary order. At least it handles the common case of clustered loads
1879 // to the same class. For precise liveness, each SDep needs to indicate the
1880 // result number. But that tightly couples the ScheduleDAG with the
1881 // SelectionDAG making updates tricky. A simpler hack would be to attach a
1882 // value type or register class to SDep.
1884 // The most important aspect of register tracking is balancing the increase
1885 // here with the reduction further below. Note that this SU may use multiple
1886 // defs in PredSU. The can't be determined here, but we've already
1887 // compensated by reducing NumRegDefsLeft in PredSU during
1888 // ScheduleDAGSDNodes::AddSchedEdges.
1889 --PredSU->NumRegDefsLeft;
1890 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
1891 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1892 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
1895 EVT VT = RegDefPos.GetValue();
1896 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1897 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1902 // We should have this assert, but there may be dead SDNodes that never
1903 // materialize as SUnits, so they don't appear to generate liveness.
1904 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
1905 int SkipRegDefs = (int)SU->NumRegDefsLeft;
1906 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
1907 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
1908 if (SkipRegDefs > 0)
1910 EVT VT = RegDefPos.GetValue();
1911 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1912 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT)) {
1913 // Register pressure tracking is imprecise. This can happen. But we try
1914 // hard not to let it happen because it likely results in poor scheduling.
1915 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
1916 RegPressure[RCId] = 0;
1919 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
1925 void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
1926 if (!TracksRegPressure)
1929 const SDNode *N = SU->getNode();
1932 if (!N->isMachineOpcode()) {
1933 if (N->getOpcode() != ISD::CopyToReg)
1936 unsigned Opc = N->getMachineOpcode();
1937 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1938 Opc == TargetOpcode::INSERT_SUBREG ||
1939 Opc == TargetOpcode::SUBREG_TO_REG ||
1940 Opc == TargetOpcode::REG_SEQUENCE ||
1941 Opc == TargetOpcode::IMPLICIT_DEF)
1945 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1949 SUnit *PredSU = I->getSUnit();
1950 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
1951 // counts data deps.
1952 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
1954 const SDNode *PN = PredSU->getNode();
1955 if (!PN->isMachineOpcode()) {
1956 if (PN->getOpcode() == ISD::CopyFromReg) {
1957 EVT VT = PN->getValueType(0);
1958 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1959 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1963 unsigned POpc = PN->getMachineOpcode();
1964 if (POpc == TargetOpcode::IMPLICIT_DEF)
1966 if (POpc == TargetOpcode::EXTRACT_SUBREG) {
1967 EVT VT = PN->getOperand(0).getValueType();
1968 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1969 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1971 } else if (POpc == TargetOpcode::INSERT_SUBREG ||
1972 POpc == TargetOpcode::SUBREG_TO_REG) {
1973 EVT VT = PN->getValueType(0);
1974 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1975 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1978 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
1979 for (unsigned i = 0; i != NumDefs; ++i) {
1980 EVT VT = PN->getValueType(i);
1981 if (!PN->hasAnyUseOfValue(i))
1983 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1984 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
1985 // Register pressure tracking is imprecise. This can happen.
1986 RegPressure[RCId] = 0;
1988 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
1992 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
1993 // may transfer data dependencies to CopyToReg.
1994 if (SU->NumSuccs && N->isMachineOpcode()) {
1995 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1996 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1997 EVT VT = N->getValueType(i);
1998 if (VT == MVT::Glue || VT == MVT::Other)
2000 if (!N->hasAnyUseOfValue(i))
2002 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2003 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2010 //===----------------------------------------------------------------------===//
2011 // Dynamic Node Priority for Register Pressure Reduction
2012 //===----------------------------------------------------------------------===//
2014 /// closestSucc - Returns the scheduled cycle of the successor which is
2015 /// closest to the current cycle.
2016 static unsigned closestSucc(const SUnit *SU) {
2017 unsigned MaxHeight = 0;
2018 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2020 if (I->isCtrl()) continue; // ignore chain succs
2021 unsigned Height = I->getSUnit()->getHeight();
2022 // If there are bunch of CopyToRegs stacked up, they should be considered
2023 // to be at the same position.
2024 if (I->getSUnit()->getNode() &&
2025 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2026 Height = closestSucc(I->getSUnit())+1;
2027 if (Height > MaxHeight)
2033 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2034 /// for scratch registers, i.e. number of data dependencies.
2035 static unsigned calcMaxScratches(const SUnit *SU) {
2036 unsigned Scratches = 0;
2037 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2039 if (I->isCtrl()) continue; // ignore chain preds
2045 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2046 /// CopyFromReg from a virtual register.
2047 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2048 bool RetVal = false;
2049 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2051 if (I->isCtrl()) continue;
2052 const SUnit *PredSU = I->getSUnit();
2053 if (PredSU->getNode() &&
2054 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2056 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2057 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2067 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2068 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2069 /// it has no other use. It should be scheduled closer to the terminator.
2070 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2071 bool RetVal = false;
2072 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2074 if (I->isCtrl()) continue;
2075 const SUnit *SuccSU = I->getSUnit();
2076 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2078 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2079 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2089 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2090 // set isVRegCycle for its CopyFromReg operands.
2092 // This is only relevant for single-block loops, in which case the VRegCycle
2093 // node is likely an induction variable in which the operand and target virtual
2094 // registers should be coalesced (e.g. pre/post increment values). Setting the
2095 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2096 // CopyFromReg so that this node becomes the virtual register "kill". This
2097 // avoids interference between the values live in and out of the block and
2098 // eliminates a copy inside the loop.
2099 static void initVRegCycle(SUnit *SU) {
2100 if (DisableSchedVRegCycle)
2103 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2106 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2108 SU->isVRegCycle = true;
2110 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2112 if (I->isCtrl()) continue;
2113 I->getSUnit()->isVRegCycle = true;
2117 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2118 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
2119 static void resetVRegCycle(SUnit *SU) {
2120 if (!SU->isVRegCycle)
2123 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2125 if (I->isCtrl()) continue; // ignore chain preds
2126 SUnit *PredSU = I->getSUnit();
2127 if (PredSU->isVRegCycle) {
2128 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2129 "VRegCycle def must be CopyFromReg");
2130 I->getSUnit()->isVRegCycle = 0;
2135 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2136 // means a node that defines the VRegCycle has not been scheduled yet.
2137 static bool hasVRegCycleUse(const SUnit *SU) {
2138 // If this SU also defines the VReg, don't hoist it as a "use".
2139 if (SU->isVRegCycle)
2142 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2144 if (I->isCtrl()) continue; // ignore chain preds
2145 if (I->getSUnit()->isVRegCycle &&
2146 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2147 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2154 // Check for either a dependence (latency) or resource (hazard) stall.
2156 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2157 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2158 if ((int)SPQ->getCurCycle() < Height) return true;
2159 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2160 != ScheduleHazardRecognizer::NoHazard)
2165 // Return -1 if left has higher priority, 1 if right has higher priority.
2166 // Return 0 if latency-based priority is equivalent.
2167 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2168 RegReductionPQBase *SPQ) {
2169 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2170 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2171 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2172 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2173 int LHeight = (int)left->getHeight() + LPenalty;
2174 int RHeight = (int)right->getHeight() + RPenalty;
2176 bool LStall = (!checkPref || left->SchedulingPref == Sched::Latency) &&
2177 BUHasStall(left, LHeight, SPQ);
2178 bool RStall = (!checkPref || right->SchedulingPref == Sched::Latency) &&
2179 BUHasStall(right, RHeight, SPQ);
2181 // If scheduling one of the node will cause a pipeline stall, delay it.
2182 // If scheduling either one of the node will cause a pipeline stall, sort
2183 // them according to their height.
2186 DEBUG(++FactorCount[FactStall]);
2189 if (LHeight != RHeight) {
2190 DEBUG(++FactorCount[FactStall]);
2191 return LHeight > RHeight ? 1 : -1;
2193 } else if (RStall) {
2194 DEBUG(++FactorCount[FactStall]);
2198 // If either node is scheduling for latency, sort them by height/depth
2200 if (!checkPref || (left->SchedulingPref == Sched::Latency ||
2201 right->SchedulingPref == Sched::Latency)) {
2202 if (DisableSchedCycles) {
2203 if (LHeight != RHeight) {
2204 DEBUG(++FactorCount[FactHeight]);
2205 return LHeight > RHeight ? 1 : -1;
2209 // If neither instruction stalls (!LStall && !RStall) then
2210 // its height is already covered so only its depth matters. We also reach
2211 // this if both stall but have the same height.
2212 int LDepth = left->getDepth() - LPenalty;
2213 int RDepth = right->getDepth() - RPenalty;
2214 if (LDepth != RDepth) {
2215 DEBUG(++FactorCount[FactDepth]);
2216 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2217 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2218 << ") depth " << RDepth << "\n");
2219 return LDepth < RDepth ? 1 : -1;
2222 if (left->Latency != right->Latency) {
2223 DEBUG(++FactorCount[FactOther]);
2224 return left->Latency > right->Latency ? 1 : -1;
2230 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2231 // Schedule physical register definitions close to their use. This is
2232 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2233 // long as shortening physreg live ranges is generally good, we can defer
2234 // creating a subtarget hook.
2235 if (!DisableSchedPhysRegJoin) {
2236 bool LHasPhysReg = left->hasPhysRegDefs;
2237 bool RHasPhysReg = right->hasPhysRegDefs;
2238 if (LHasPhysReg != RHasPhysReg) {
2239 DEBUG(++FactorCount[FactRegUses]);
2241 const char *PhysRegMsg[] = {" has no physreg", " defines a physreg"};
2243 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
2244 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2245 << PhysRegMsg[RHasPhysReg] << "\n");
2246 return LHasPhysReg < RHasPhysReg;
2250 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2251 unsigned LPriority = SPQ->getNodePriority(left);
2252 unsigned RPriority = SPQ->getNodePriority(right);
2254 // Be really careful about hoisting call operands above previous calls.
2255 // Only allows it if it would reduce register pressure.
2256 if (left->isCall && right->isCallOp) {
2257 unsigned RNumVals = right->getNode()->getNumValues();
2258 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2260 if (right->isCall && left->isCallOp) {
2261 unsigned LNumVals = left->getNode()->getNumValues();
2262 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2265 if (LPriority != RPriority) {
2266 DEBUG(++FactorCount[FactStatic]);
2267 return LPriority > RPriority;
2270 // One or both of the nodes are calls and their sethi-ullman numbers are the
2271 // same, then keep source order.
2272 if (left->isCall || right->isCall) {
2273 unsigned LOrder = SPQ->getNodeOrdering(left);
2274 unsigned ROrder = SPQ->getNodeOrdering(right);
2276 // Prefer an ordering where the lower the non-zero order number, the higher
2278 if ((LOrder || ROrder) && LOrder != ROrder)
2279 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2282 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2287 // and the following instructions are both ready.
2291 // Then schedule t2 = op first.
2298 // This creates more short live intervals.
2299 unsigned LDist = closestSucc(left);
2300 unsigned RDist = closestSucc(right);
2301 if (LDist != RDist) {
2302 DEBUG(++FactorCount[FactOther]);
2303 return LDist < RDist;
2306 // How many registers becomes live when the node is scheduled.
2307 unsigned LScratch = calcMaxScratches(left);
2308 unsigned RScratch = calcMaxScratches(right);
2309 if (LScratch != RScratch) {
2310 DEBUG(++FactorCount[FactOther]);
2311 return LScratch > RScratch;
2314 // Comparing latency against a call makes little sense unless the node
2315 // is register pressure-neutral.
2316 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2317 return (left->NodeQueueId > right->NodeQueueId);
2319 // Do not compare latencies when one or both of the nodes are calls.
2320 if (!DisableSchedCycles &&
2321 !(left->isCall || right->isCall)) {
2322 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2327 if (left->getHeight() != right->getHeight()) {
2328 DEBUG(++FactorCount[FactHeight]);
2329 return left->getHeight() > right->getHeight();
2332 if (left->getDepth() != right->getDepth()) {
2333 DEBUG(++FactorCount[FactDepth]);
2334 return left->getDepth() < right->getDepth();
2338 assert(left->NodeQueueId && right->NodeQueueId &&
2339 "NodeQueueId cannot be zero");
2340 DEBUG(++FactorCount[FactOther]);
2341 return (left->NodeQueueId > right->NodeQueueId);
2345 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2346 if (int res = checkSpecialNodes(left, right))
2349 return BURRSort(left, right, SPQ);
2352 // Source order, otherwise bottom up.
2353 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2354 if (int res = checkSpecialNodes(left, right))
2357 unsigned LOrder = SPQ->getNodeOrdering(left);
2358 unsigned ROrder = SPQ->getNodeOrdering(right);
2360 // Prefer an ordering where the lower the non-zero order number, the higher
2362 if ((LOrder || ROrder) && LOrder != ROrder)
2363 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2365 return BURRSort(left, right, SPQ);
2368 // If the time between now and when the instruction will be ready can cover
2369 // the spill code, then avoid adding it to the ready queue. This gives long
2370 // stalls highest priority and allows hoisting across calls. It should also
2371 // speed up processing the available queue.
2372 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2373 static const unsigned ReadyDelay = 3;
2375 if (SPQ->MayReduceRegPressure(SU)) return true;
2377 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2379 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2380 != ScheduleHazardRecognizer::NoHazard)
2386 // Return true if right should be scheduled with higher priority than left.
2387 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2388 if (int res = checkSpecialNodes(left, right))
2391 if (left->isCall || right->isCall)
2392 // No way to compute latency of calls.
2393 return BURRSort(left, right, SPQ);
2395 bool LHigh = SPQ->HighRegPressure(left);
2396 bool RHigh = SPQ->HighRegPressure(right);
2397 // Avoid causing spills. If register pressure is high, schedule for
2398 // register pressure reduction.
2399 if (LHigh && !RHigh) {
2400 DEBUG(++FactorCount[FactPressureDiff]);
2401 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2402 << right->NodeNum << ")\n");
2405 else if (!LHigh && RHigh) {
2406 DEBUG(++FactorCount[FactPressureDiff]);
2407 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2408 << left->NodeNum << ")\n");
2411 if (!LHigh && !RHigh) {
2412 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2416 return BURRSort(left, right, SPQ);
2419 // Schedule as many instructions in each cycle as possible. So don't make an
2420 // instruction available unless it is ready in the current cycle.
2421 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2422 if (SU->getHeight() > CurCycle) return false;
2424 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2425 != ScheduleHazardRecognizer::NoHazard)
2431 static bool canEnableCoalescing(SUnit *SU) {
2432 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2433 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2434 // CopyToReg should be close to its uses to facilitate coalescing and
2438 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2439 Opc == TargetOpcode::SUBREG_TO_REG ||
2440 Opc == TargetOpcode::INSERT_SUBREG)
2441 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2442 // close to their uses to facilitate coalescing.
2445 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2446 // If SU does not have a register def, schedule it close to its uses
2447 // because it does not lengthen any live ranges.
2453 // list-ilp is currently an experimental scheduler that allows various
2454 // heuristics to be enabled prior to the normal register reduction logic.
2455 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2456 if (int res = checkSpecialNodes(left, right))
2459 if (left->isCall || right->isCall)
2460 // No way to compute latency of calls.
2461 return BURRSort(left, right, SPQ);
2463 unsigned LLiveUses = 0, RLiveUses = 0;
2464 int LPDiff = 0, RPDiff = 0;
2465 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2466 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2467 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2469 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2470 DEBUG(++FactorCount[FactPressureDiff]);
2471 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2472 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2473 return LPDiff > RPDiff;
2476 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2477 bool LReduce = canEnableCoalescing(left);
2478 bool RReduce = canEnableCoalescing(right);
2479 DEBUG(if (LReduce != RReduce) ++FactorCount[FactPressureDiff]);
2480 if (LReduce && !RReduce) return false;
2481 if (RReduce && !LReduce) return true;
2484 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2485 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2486 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2487 DEBUG(++FactorCount[FactRegUses]);
2488 return LLiveUses < RLiveUses;
2491 if (!DisableSchedStalls) {
2492 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2493 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2494 if (LStall != RStall) {
2495 DEBUG(++FactorCount[FactHeight]);
2496 return left->getHeight() > right->getHeight();
2500 if (!DisableSchedCriticalPath) {
2501 int spread = (int)left->getDepth() - (int)right->getDepth();
2502 if (std::abs(spread) > MaxReorderWindow) {
2503 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2504 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2505 << right->getDepth() << "\n");
2506 DEBUG(++FactorCount[FactDepth]);
2507 return left->getDepth() < right->getDepth();
2511 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2512 int spread = (int)left->getHeight() - (int)right->getHeight();
2513 if (std::abs(spread) > MaxReorderWindow) {
2514 DEBUG(++FactorCount[FactHeight]);
2515 return left->getHeight() > right->getHeight();
2519 return BURRSort(left, right, SPQ);
2522 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2524 // Add pseudo dependency edges for two-address nodes.
2525 AddPseudoTwoAddrDeps();
2526 // Reroute edges to nodes with multiple uses.
2527 if (!TracksRegPressure)
2528 PrescheduleNodesWithMultipleUses();
2529 // Calculate node priorities.
2530 CalculateSethiUllmanNumbers();
2532 // For single block loops, mark nodes that look like canonical IV increments.
2533 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2534 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2535 initVRegCycle(&sunits[i]);
2540 //===----------------------------------------------------------------------===//
2541 // Preschedule for Register Pressure
2542 //===----------------------------------------------------------------------===//
2544 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2545 if (SU->isTwoAddress) {
2546 unsigned Opc = SU->getNode()->getMachineOpcode();
2547 const TargetInstrDesc &TID = TII->get(Opc);
2548 unsigned NumRes = TID.getNumDefs();
2549 unsigned NumOps = TID.getNumOperands() - NumRes;
2550 for (unsigned i = 0; i != NumOps; ++i) {
2551 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
2552 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2553 if (DU->getNodeId() != -1 &&
2554 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2562 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2563 /// physical register defs.
2564 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2565 const TargetInstrInfo *TII,
2566 const TargetRegisterInfo *TRI) {
2567 SDNode *N = SuccSU->getNode();
2568 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2569 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2570 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2571 for (const SDNode *SUNode = SU->getNode(); SUNode;
2572 SUNode = SUNode->getGluedNode()) {
2573 if (!SUNode->isMachineOpcode())
2575 const unsigned *SUImpDefs =
2576 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2579 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2580 EVT VT = N->getValueType(i);
2581 if (VT == MVT::Glue || VT == MVT::Other)
2583 if (!N->hasAnyUseOfValue(i))
2585 unsigned Reg = ImpDefs[i - NumDefs];
2586 for (;*SUImpDefs; ++SUImpDefs) {
2587 unsigned SUReg = *SUImpDefs;
2588 if (TRI->regsOverlap(Reg, SUReg))
2596 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2597 /// are not handled well by the general register pressure reduction
2598 /// heuristics. When presented with code like this:
2607 /// the heuristics tend to push the store up, but since the
2608 /// operand of the store has another use (U), this would increase
2609 /// the length of that other use (the U->N edge).
2611 /// This function transforms code like the above to route U's
2612 /// dependence through the store when possible, like this:
2623 /// This results in the store being scheduled immediately
2624 /// after N, which shortens the U->N live range, reducing
2625 /// register pressure.
2627 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2628 // Visit all the nodes in topological order, working top-down.
2629 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2630 SUnit *SU = &(*SUnits)[i];
2631 // For now, only look at nodes with no data successors, such as stores.
2632 // These are especially important, due to the heuristics in
2633 // getNodePriority for nodes with no data successors.
2634 if (SU->NumSuccs != 0)
2636 // For now, only look at nodes with exactly one data predecessor.
2637 if (SU->NumPreds != 1)
2639 // Avoid prescheduling copies to virtual registers, which don't behave
2640 // like other nodes from the perspective of scheduling heuristics.
2641 if (SDNode *N = SU->getNode())
2642 if (N->getOpcode() == ISD::CopyToReg &&
2643 TargetRegisterInfo::isVirtualRegister
2644 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2647 // Locate the single data predecessor.
2649 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2650 EE = SU->Preds.end(); II != EE; ++II)
2651 if (!II->isCtrl()) {
2652 PredSU = II->getSUnit();
2657 // Don't rewrite edges that carry physregs, because that requires additional
2658 // support infrastructure.
2659 if (PredSU->hasPhysRegDefs)
2661 // Short-circuit the case where SU is PredSU's only data successor.
2662 if (PredSU->NumSuccs == 1)
2664 // Avoid prescheduling to copies from virtual registers, which don't behave
2665 // like other nodes from the perspective of scheduling heuristics.
2666 if (SDNode *N = SU->getNode())
2667 if (N->getOpcode() == ISD::CopyFromReg &&
2668 TargetRegisterInfo::isVirtualRegister
2669 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2672 // Perform checks on the successors of PredSU.
2673 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2674 EE = PredSU->Succs.end(); II != EE; ++II) {
2675 SUnit *PredSuccSU = II->getSUnit();
2676 if (PredSuccSU == SU) continue;
2677 // If PredSU has another successor with no data successors, for
2678 // now don't attempt to choose either over the other.
2679 if (PredSuccSU->NumSuccs == 0)
2680 goto outer_loop_continue;
2681 // Don't break physical register dependencies.
2682 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2683 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2684 goto outer_loop_continue;
2685 // Don't introduce graph cycles.
2686 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2687 goto outer_loop_continue;
2690 // Ok, the transformation is safe and the heuristics suggest it is
2691 // profitable. Update the graph.
2692 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2693 << " next to PredSU #" << PredSU->NodeNum
2694 << " to guide scheduling in the presence of multiple uses\n");
2695 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2696 SDep Edge = PredSU->Succs[i];
2697 assert(!Edge.isAssignedRegDep());
2698 SUnit *SuccSU = Edge.getSUnit();
2700 Edge.setSUnit(PredSU);
2701 scheduleDAG->RemovePred(SuccSU, Edge);
2702 scheduleDAG->AddPred(SU, Edge);
2704 scheduleDAG->AddPred(SuccSU, Edge);
2708 outer_loop_continue:;
2712 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2713 /// it as a def&use operand. Add a pseudo control edge from it to the other
2714 /// node (if it won't create a cycle) so the two-address one will be scheduled
2715 /// first (lower in the schedule). If both nodes are two-address, favor the
2716 /// one that has a CopyToReg use (more likely to be a loop induction update).
2717 /// If both are two-address, but one is commutable while the other is not
2718 /// commutable, favor the one that's not commutable.
2719 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2720 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2721 SUnit *SU = &(*SUnits)[i];
2722 if (!SU->isTwoAddress)
2725 SDNode *Node = SU->getNode();
2726 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2729 bool isLiveOut = hasOnlyLiveOutUses(SU);
2730 unsigned Opc = Node->getMachineOpcode();
2731 const TargetInstrDesc &TID = TII->get(Opc);
2732 unsigned NumRes = TID.getNumDefs();
2733 unsigned NumOps = TID.getNumOperands() - NumRes;
2734 for (unsigned j = 0; j != NumOps; ++j) {
2735 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
2737 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2738 if (DU->getNodeId() == -1)
2740 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2741 if (!DUSU) continue;
2742 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2743 E = DUSU->Succs.end(); I != E; ++I) {
2744 if (I->isCtrl()) continue;
2745 SUnit *SuccSU = I->getSUnit();
2748 // Be conservative. Ignore if nodes aren't at roughly the same
2749 // depth and height.
2750 if (SuccSU->getHeight() < SU->getHeight() &&
2751 (SU->getHeight() - SuccSU->getHeight()) > 1)
2753 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2754 // constrains whatever is using the copy, instead of the copy
2755 // itself. In the case that the copy is coalesced, this
2756 // preserves the intent of the pseudo two-address heurietics.
2757 while (SuccSU->Succs.size() == 1 &&
2758 SuccSU->getNode()->isMachineOpcode() &&
2759 SuccSU->getNode()->getMachineOpcode() ==
2760 TargetOpcode::COPY_TO_REGCLASS)
2761 SuccSU = SuccSU->Succs.front().getSUnit();
2762 // Don't constrain non-instruction nodes.
2763 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2765 // Don't constrain nodes with physical register defs if the
2766 // predecessor can clobber them.
2767 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2768 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2771 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2772 // these may be coalesced away. We want them close to their uses.
2773 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2774 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2775 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2776 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2778 if ((!canClobber(SuccSU, DUSU) ||
2779 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2780 (!SU->isCommutable && SuccSU->isCommutable)) &&
2781 !scheduleDAG->IsReachable(SuccSU, SU)) {
2782 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2783 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2784 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
2785 /*Reg=*/0, /*isNormalMemory=*/false,
2786 /*isMustAlias=*/false,
2787 /*isArtificial=*/true));
2794 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
2795 /// predecessors of the successors of the SUnit SU. Stop when the provided
2796 /// limit is exceeded.
2797 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
2800 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2802 const SUnit *SuccSU = I->getSUnit();
2803 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
2804 EE = SuccSU->Preds.end(); II != EE; ++II) {
2805 SUnit *PredSU = II->getSUnit();
2806 if (!PredSU->isScheduled)
2816 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
2817 if (int res = checkSpecialNodes(left, right))
2820 unsigned LPriority = SPQ->getNodePriority(left);
2821 unsigned RPriority = SPQ->getNodePriority(right);
2822 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode();
2823 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode();
2824 bool LIsFloater = LIsTarget && left->NumPreds == 0;
2825 bool RIsFloater = RIsTarget && right->NumPreds == 0;
2826 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
2827 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
2829 if (left->NumSuccs == 0 && right->NumSuccs != 0)
2831 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
2838 if (left->NumSuccs == 1)
2840 if (right->NumSuccs == 1)
2843 if (LPriority+LBonus != RPriority+RBonus)
2844 return LPriority+LBonus < RPriority+RBonus;
2846 if (left->getDepth() != right->getDepth())
2847 return left->getDepth() < right->getDepth();
2849 if (left->NumSuccsLeft != right->NumSuccsLeft)
2850 return left->NumSuccsLeft > right->NumSuccsLeft;
2852 assert(left->NodeQueueId && right->NodeQueueId &&
2853 "NodeQueueId cannot be zero");
2854 return (left->NodeQueueId > right->NodeQueueId);
2857 //===----------------------------------------------------------------------===//
2858 // Public Constructor Functions
2859 //===----------------------------------------------------------------------===//
2861 llvm::ScheduleDAGSDNodes *
2862 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2863 CodeGenOpt::Level OptLevel) {
2864 const TargetMachine &TM = IS->TM;
2865 const TargetInstrInfo *TII = TM.getInstrInfo();
2866 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2868 BURegReductionPriorityQueue *PQ =
2869 new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2870 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2871 PQ->setScheduleDAG(SD);
2875 llvm::ScheduleDAGSDNodes *
2876 llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
2877 CodeGenOpt::Level OptLevel) {
2878 const TargetMachine &TM = IS->TM;
2879 const TargetInstrInfo *TII = TM.getInstrInfo();
2880 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2882 TDRegReductionPriorityQueue *PQ =
2883 new TDRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2884 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2885 PQ->setScheduleDAG(SD);
2889 llvm::ScheduleDAGSDNodes *
2890 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2891 CodeGenOpt::Level OptLevel) {
2892 const TargetMachine &TM = IS->TM;
2893 const TargetInstrInfo *TII = TM.getInstrInfo();
2894 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2896 SrcRegReductionPriorityQueue *PQ =
2897 new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2898 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2899 PQ->setScheduleDAG(SD);
2903 llvm::ScheduleDAGSDNodes *
2904 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
2905 CodeGenOpt::Level OptLevel) {
2906 const TargetMachine &TM = IS->TM;
2907 const TargetInstrInfo *TII = TM.getInstrInfo();
2908 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2909 const TargetLowering *TLI = &IS->getTargetLowering();
2911 HybridBURRPriorityQueue *PQ =
2912 new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2914 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2915 PQ->setScheduleDAG(SD);
2919 llvm::ScheduleDAGSDNodes *
2920 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
2921 CodeGenOpt::Level OptLevel) {
2922 const TargetMachine &TM = IS->TM;
2923 const TargetInstrInfo *TII = TM.getInstrInfo();
2924 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2925 const TargetLowering *TLI = &IS->getTargetLowering();
2927 ILPBURRPriorityQueue *PQ =
2928 new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2929 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2930 PQ->setScheduleDAG(SD);