1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/InlineAsm.h"
21 #include "llvm/CodeGen/SchedulerRegistry.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetLowering.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
38 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
39 STATISTIC(NumUnfolds, "Number of nodes unfolded");
40 STATISTIC(NumDups, "Number of duplicated nodes");
41 STATISTIC(NumPRCopies, "Number of physical register copies");
43 static RegisterScheduler
44 burrListDAGScheduler("list-burr",
45 "Bottom-up register reduction list scheduling",
46 createBURRListDAGScheduler);
47 static RegisterScheduler
48 tdrListrDAGScheduler("list-tdrr",
49 "Top-down register reduction list scheduling",
50 createTDRRListDAGScheduler);
51 static RegisterScheduler
52 sourceListDAGScheduler("source",
53 "Similar to list-burr but schedules in source "
54 "order when possible",
55 createSourceListDAGScheduler);
57 static RegisterScheduler
58 hybridListDAGScheduler("list-hybrid",
59 "Bottom-up register pressure aware list scheduling "
60 "which tries to balance latency and register pressure",
61 createHybridListDAGScheduler);
63 static RegisterScheduler
64 ILPListDAGScheduler("list-ilp",
65 "Bottom-up register pressure aware list scheduling "
66 "which tries to balance ILP and register pressure",
67 createILPListDAGScheduler);
69 static cl::opt<bool> DisableSchedCycles(
70 "disable-sched-cycles", cl::Hidden, cl::init(false),
71 cl::desc("Disable cycle-level precision during preRA scheduling"));
73 // Temporary sched=list-ilp flags until the heuristics are robust.
74 // Some options are also available under sched=list-hybrid.
75 static cl::opt<bool> DisableSchedRegPressure(
76 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
77 cl::desc("Disable regpressure priority in sched=list-ilp"));
78 static cl::opt<bool> DisableSchedLiveUses(
79 "disable-sched-live-uses", cl::Hidden, cl::init(true),
80 cl::desc("Disable live use priority in sched=list-ilp"));
81 static cl::opt<bool> DisableSchedVRegCycle(
82 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
83 cl::desc("Disable virtual register cycle interference checks"));
84 static cl::opt<bool> DisableSchedPhysRegJoin(
85 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
86 cl::desc("Disable physreg def-use affinity"));
87 static cl::opt<bool> DisableSchedStalls(
88 "disable-sched-stalls", cl::Hidden, cl::init(true),
89 cl::desc("Disable no-stall priority in sched=list-ilp"));
90 static cl::opt<bool> DisableSchedCriticalPath(
91 "disable-sched-critical-path", cl::Hidden, cl::init(false),
92 cl::desc("Disable critical path priority in sched=list-ilp"));
93 static cl::opt<bool> DisableSchedHeight(
94 "disable-sched-height", cl::Hidden, cl::init(false),
95 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
97 static cl::opt<int> MaxReorderWindow(
98 "max-sched-reorder", cl::Hidden, cl::init(6),
99 cl::desc("Number of instructions to allow ahead of the critical path "
100 "in sched=list-ilp"));
102 static cl::opt<unsigned> AvgIPC(
103 "sched-avg-ipc", cl::Hidden, cl::init(1),
104 cl::desc("Average inst/cycle whan no target itinerary exists."));
108 // For sched=list-ilp, Count the number of times each factor comes into play.
109 enum { FactPressureDiff, FactRegUses, FactStall, FactHeight, FactDepth,
110 FactStatic, FactOther, NumFactors };
112 static const char *FactorName[NumFactors] =
113 {"PressureDiff", "RegUses", "Stall", "Height", "Depth","Static", "Other"};
114 static int FactorCount[NumFactors];
118 //===----------------------------------------------------------------------===//
119 /// ScheduleDAGRRList - The actual register reduction list scheduler
120 /// implementation. This supports both top-down and bottom-up scheduling.
122 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
124 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
128 /// NeedLatency - True if the scheduler will make use of latency information.
132 /// AvailableQueue - The priority queue to use for the available SUnits.
133 SchedulingPriorityQueue *AvailableQueue;
135 /// PendingQueue - This contains all of the instructions whose operands have
136 /// been issued, but their results are not ready yet (due to the latency of
137 /// the operation). Once the operands becomes available, the instruction is
138 /// added to the AvailableQueue.
139 std::vector<SUnit*> PendingQueue;
141 /// HazardRec - The hazard recognizer to use.
142 ScheduleHazardRecognizer *HazardRec;
144 /// CurCycle - The current scheduler state corresponds to this cycle.
147 /// MinAvailableCycle - Cycle of the soonest available instruction.
148 unsigned MinAvailableCycle;
150 /// IssueCount - Count instructions issued in this cycle
151 /// Currently valid only for bottom-up scheduling.
154 /// LiveRegDefs - A set of physical registers and their definition
155 /// that are "live". These nodes must be scheduled before any other nodes that
156 /// modifies the registers can be scheduled.
157 unsigned NumLiveRegs;
158 std::vector<SUnit*> LiveRegDefs;
159 std::vector<SUnit*> LiveRegGens;
161 /// Topo - A topological ordering for SUnits which permits fast IsReachable
162 /// and similar queries.
163 ScheduleDAGTopologicalSort Topo;
166 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
167 SchedulingPriorityQueue *availqueue,
168 CodeGenOpt::Level OptLevel)
169 : ScheduleDAGSDNodes(mf), isBottomUp(availqueue->isBottomUp()),
170 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
173 const TargetMachine &tm = mf.getTarget();
174 if (DisableSchedCycles || !NeedLatency)
175 HazardRec = new ScheduleHazardRecognizer();
177 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
180 ~ScheduleDAGRRList() {
182 delete AvailableQueue;
187 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
189 /// IsReachable - Checks if SU is reachable from TargetSU.
190 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
191 return Topo.IsReachable(SU, TargetSU);
194 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
196 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
197 return Topo.WillCreateCycle(SU, TargetSU);
200 /// AddPred - adds a predecessor edge to SUnit SU.
201 /// This returns true if this is a new predecessor.
202 /// Updates the topological ordering if required.
203 void AddPred(SUnit *SU, const SDep &D) {
204 Topo.AddPred(SU, D.getSUnit());
208 /// RemovePred - removes a predecessor edge from SUnit SU.
209 /// This returns true if an edge was removed.
210 /// Updates the topological ordering if required.
211 void RemovePred(SUnit *SU, const SDep &D) {
212 Topo.RemovePred(SU, D.getSUnit());
217 bool isReady(SUnit *SU) {
218 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
219 AvailableQueue->isReady(SU);
222 void ReleasePred(SUnit *SU, const SDep *PredEdge);
223 void ReleasePredecessors(SUnit *SU);
224 void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
225 void ReleaseSuccessors(SUnit *SU);
226 void ReleasePending();
227 void AdvanceToCycle(unsigned NextCycle);
228 void AdvancePastStalls(SUnit *SU);
229 void EmitNode(SUnit *SU);
230 void ScheduleNodeBottomUp(SUnit*);
231 void CapturePred(SDep *PredEdge);
232 void UnscheduleNodeBottomUp(SUnit*);
233 void RestoreHazardCheckerBottomUp();
234 void BacktrackBottomUp(SUnit*, SUnit*);
235 SUnit *CopyAndMoveSuccessors(SUnit*);
236 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
237 const TargetRegisterClass*,
238 const TargetRegisterClass*,
239 SmallVector<SUnit*, 2>&);
240 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
242 SUnit *PickNodeToScheduleBottomUp();
243 void ListScheduleBottomUp();
245 void ScheduleNodeTopDown(SUnit*);
246 void ListScheduleTopDown();
249 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
250 /// Updates the topological ordering if required.
251 SUnit *CreateNewSUnit(SDNode *N) {
252 unsigned NumSUnits = SUnits.size();
253 SUnit *NewNode = NewSUnit(N);
254 // Update the topological ordering.
255 if (NewNode->NodeNum >= NumSUnits)
256 Topo.InitDAGTopologicalSorting();
260 /// CreateClone - Creates a new SUnit from an existing one.
261 /// Updates the topological ordering if required.
262 SUnit *CreateClone(SUnit *N) {
263 unsigned NumSUnits = SUnits.size();
264 SUnit *NewNode = Clone(N);
265 // Update the topological ordering.
266 if (NewNode->NodeNum >= NumSUnits)
267 Topo.InitDAGTopologicalSorting();
271 /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
272 /// need actual latency information but the hybrid scheduler does.
273 bool ForceUnitLatencies() const {
277 } // end anonymous namespace
279 /// GetCostForDef - Looks up the register class and cost for a given definition.
280 /// Typically this just means looking up the representative register class,
281 /// but for untyped values (MVT::untyped) it means inspecting the node's
282 /// opcode to determine what register class is being generated.
283 static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
284 const TargetLowering *TLI,
285 const TargetInstrInfo *TII,
286 const TargetRegisterInfo *TRI,
287 unsigned &RegClass, unsigned &Cost) {
288 EVT VT = RegDefPos.GetValue();
290 // Special handling for untyped values. These values can only come from
291 // the expansion of custom DAG-to-DAG patterns.
292 if (VT == MVT::untyped) {
293 unsigned Opcode = RegDefPos.GetNode()->getMachineOpcode();
294 unsigned Idx = RegDefPos.GetIdx();
295 const TargetInstrDesc Desc = TII->get(Opcode);
296 const TargetRegisterClass *RC = Desc.getRegClass(Idx, TRI);
297 RegClass = RC->getID();
298 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
299 // better way to determine it.
302 RegClass = TLI->getRepRegClassFor(VT)->getID();
303 Cost = TLI->getRepRegClassCostFor(VT);
307 /// Schedule - Schedule the DAG using list scheduling.
308 void ScheduleDAGRRList::Schedule() {
310 << "********** List Scheduling BB#" << BB->getNumber()
311 << " '" << BB->getName() << "' **********\n");
313 for (int i = 0; i < NumFactors; ++i) {
320 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
322 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
323 LiveRegGens.resize(TRI->getNumRegs(), NULL);
325 // Build the scheduling graph.
326 BuildSchedGraph(NULL);
328 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
329 SUnits[su].dumpAll(this));
330 Topo.InitDAGTopologicalSorting();
332 AvailableQueue->initNodes(SUnits);
336 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
338 ListScheduleBottomUp();
340 ListScheduleTopDown();
343 for (int i = 0; i < NumFactors; ++i) {
344 DEBUG(dbgs() << FactorName[i] << "\t" << FactorCount[i] << "\n");
347 AvailableQueue->releaseState();
350 //===----------------------------------------------------------------------===//
351 // Bottom-Up Scheduling
352 //===----------------------------------------------------------------------===//
354 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
355 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
356 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
357 SUnit *PredSU = PredEdge->getSUnit();
360 if (PredSU->NumSuccsLeft == 0) {
361 dbgs() << "*** Scheduling failed! ***\n";
363 dbgs() << " has been released too many times!\n";
367 --PredSU->NumSuccsLeft;
369 if (!ForceUnitLatencies()) {
370 // Updating predecessor's height. This is now the cycle when the
371 // predecessor can be scheduled without causing a pipeline stall.
372 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
375 // If all the node's successors are scheduled, this node is ready
376 // to be scheduled. Ignore the special EntrySU node.
377 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
378 PredSU->isAvailable = true;
380 unsigned Height = PredSU->getHeight();
381 if (Height < MinAvailableCycle)
382 MinAvailableCycle = Height;
384 if (isReady(PredSU)) {
385 AvailableQueue->push(PredSU);
387 // CapturePred and others may have left the node in the pending queue, avoid
389 else if (!PredSU->isPending) {
390 PredSU->isPending = true;
391 PendingQueue.push_back(PredSU);
396 /// Call ReleasePred for each predecessor, then update register live def/gen.
397 /// Always update LiveRegDefs for a register dependence even if the current SU
398 /// also defines the register. This effectively create one large live range
399 /// across a sequence of two-address node. This is important because the
400 /// entire chain must be scheduled together. Example:
403 /// flags = (2) addc flags
404 /// flags = (1) addc flags
408 /// LiveRegDefs[flags] = 3
409 /// LiveRegGens[flags] = 1
411 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
412 /// interference on flags.
413 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
414 // Bottom up: release predecessors
415 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
417 ReleasePred(SU, &*I);
418 if (I->isAssignedRegDep()) {
419 // This is a physical register dependency and it's impossible or
420 // expensive to copy the register. Make sure nothing that can
421 // clobber the register is scheduled between the predecessor and
423 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
424 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
425 "interference on register dependence");
426 LiveRegDefs[I->getReg()] = I->getSUnit();
427 if (!LiveRegGens[I->getReg()]) {
429 LiveRegGens[I->getReg()] = SU;
435 /// Check to see if any of the pending instructions are ready to issue. If
436 /// so, add them to the available queue.
437 void ScheduleDAGRRList::ReleasePending() {
438 if (DisableSchedCycles) {
439 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
443 // If the available queue is empty, it is safe to reset MinAvailableCycle.
444 if (AvailableQueue->empty())
445 MinAvailableCycle = UINT_MAX;
447 // Check to see if any of the pending instructions are ready to issue. If
448 // so, add them to the available queue.
449 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
450 unsigned ReadyCycle =
451 isBottomUp ? PendingQueue[i]->getHeight() : PendingQueue[i]->getDepth();
452 if (ReadyCycle < MinAvailableCycle)
453 MinAvailableCycle = ReadyCycle;
455 if (PendingQueue[i]->isAvailable) {
456 if (!isReady(PendingQueue[i]))
458 AvailableQueue->push(PendingQueue[i]);
460 PendingQueue[i]->isPending = false;
461 PendingQueue[i] = PendingQueue.back();
462 PendingQueue.pop_back();
467 /// Move the scheduler state forward by the specified number of Cycles.
468 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
469 if (NextCycle <= CurCycle)
473 AvailableQueue->setCurCycle(NextCycle);
474 if (!HazardRec->isEnabled()) {
475 // Bypass lots of virtual calls in case of long latency.
476 CurCycle = NextCycle;
479 for (; CurCycle != NextCycle; ++CurCycle) {
481 HazardRec->RecedeCycle();
483 HazardRec->AdvanceCycle();
486 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
487 // available Q to release pending nodes at least once before popping.
491 /// Move the scheduler state forward until the specified node's dependents are
492 /// ready and can be scheduled with no resource conflicts.
493 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
494 if (DisableSchedCycles)
497 // FIXME: Nodes such as CopyFromReg probably should not advance the current
498 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
499 // has predecessors the cycle will be advanced when they are scheduled.
500 // But given the crude nature of modeling latency though such nodes, we
501 // currently need to treat these nodes like real instructions.
502 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
504 unsigned ReadyCycle = isBottomUp ? SU->getHeight() : SU->getDepth();
506 // Bump CurCycle to account for latency. We assume the latency of other
507 // available instructions may be hidden by the stall (not a full pipe stall).
508 // This updates the hazard recognizer's cycle before reserving resources for
510 AdvanceToCycle(ReadyCycle);
512 // Calls are scheduled in their preceding cycle, so don't conflict with
513 // hazards from instructions after the call. EmitNode will reset the
514 // scoreboard state before emitting the call.
515 if (isBottomUp && SU->isCall)
518 // FIXME: For resource conflicts in very long non-pipelined stages, we
519 // should probably skip ahead here to avoid useless scoreboard checks.
522 ScheduleHazardRecognizer::HazardType HT =
523 HazardRec->getHazardType(SU, isBottomUp ? -Stalls : Stalls);
525 if (HT == ScheduleHazardRecognizer::NoHazard)
530 AdvanceToCycle(CurCycle + Stalls);
533 /// Record this SUnit in the HazardRecognizer.
534 /// Does not update CurCycle.
535 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
536 if (!HazardRec->isEnabled())
539 // Check for phys reg copy.
543 switch (SU->getNode()->getOpcode()) {
545 assert(SU->getNode()->isMachineOpcode() &&
546 "This target-independent node should not be scheduled.");
548 case ISD::MERGE_VALUES:
549 case ISD::TokenFactor:
551 case ISD::CopyFromReg:
553 // Noops don't affect the scoreboard state. Copies are likely to be
557 // For inline asm, clear the pipeline state.
561 if (isBottomUp && SU->isCall) {
562 // Calls are scheduled with their preceding instructions. For bottom-up
563 // scheduling, clear the pipeline state before emitting.
567 HazardRec->EmitInstruction(SU);
569 if (!isBottomUp && SU->isCall) {
574 static void resetVRegCycle(SUnit *SU);
576 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
577 /// count of its predecessors. If a predecessor pending count is zero, add it to
578 /// the Available queue.
579 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
580 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
581 DEBUG(SU->dump(this));
584 if (CurCycle < SU->getHeight())
585 DEBUG(dbgs() << " Height [" << SU->getHeight()
586 << "] pipeline stall!\n");
589 // FIXME: Do not modify node height. It may interfere with
590 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
591 // node its ready cycle can aid heuristics, and after scheduling it can
592 // indicate the scheduled cycle.
593 SU->setHeightToAtLeast(CurCycle);
595 // Reserve resources for the scheduled intruction.
598 Sequence.push_back(SU);
600 AvailableQueue->ScheduledNode(SU);
602 // If HazardRec is disabled, and each inst counts as one cycle, then
603 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
604 // PendingQueue for schedulers that implement HasReadyFilter.
605 if (!HazardRec->isEnabled() && AvgIPC < 2)
606 AdvanceToCycle(CurCycle + 1);
608 // Update liveness of predecessors before successors to avoid treating a
609 // two-address node as a live range def.
610 ReleasePredecessors(SU);
612 // Release all the implicit physical register defs that are live.
613 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
615 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
616 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
617 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
619 LiveRegDefs[I->getReg()] = NULL;
620 LiveRegGens[I->getReg()] = NULL;
626 SU->isScheduled = true;
628 // Conditions under which the scheduler should eagerly advance the cycle:
629 // (1) No available instructions
630 // (2) All pipelines full, so available instructions must have hazards.
632 // If HazardRec is disabled, the cycle was pre-advanced before calling
633 // ReleasePredecessors. In that case, IssueCount should remain 0.
635 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
636 if (HazardRec->isEnabled() || AvgIPC > 1) {
637 if (SU->getNode() && SU->getNode()->isMachineOpcode())
639 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
640 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
641 AdvanceToCycle(CurCycle + 1);
645 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
646 /// unscheduled, incrcease the succ left count of its predecessors. Remove
647 /// them from AvailableQueue if necessary.
648 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
649 SUnit *PredSU = PredEdge->getSUnit();
650 if (PredSU->isAvailable) {
651 PredSU->isAvailable = false;
652 if (!PredSU->isPending)
653 AvailableQueue->remove(PredSU);
656 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
657 ++PredSU->NumSuccsLeft;
660 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
661 /// its predecessor states to reflect the change.
662 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
663 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
664 DEBUG(SU->dump(this));
666 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
669 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
670 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
671 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
672 "Physical register dependency violated?");
674 LiveRegDefs[I->getReg()] = NULL;
675 LiveRegGens[I->getReg()] = NULL;
679 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
681 if (I->isAssignedRegDep()) {
682 // This becomes the nearest def. Note that an earlier def may still be
683 // pending if this is a two-address node.
684 LiveRegDefs[I->getReg()] = SU;
685 if (!LiveRegDefs[I->getReg()]) {
688 if (LiveRegGens[I->getReg()] == NULL ||
689 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
690 LiveRegGens[I->getReg()] = I->getSUnit();
693 if (SU->getHeight() < MinAvailableCycle)
694 MinAvailableCycle = SU->getHeight();
696 SU->setHeightDirty();
697 SU->isScheduled = false;
698 SU->isAvailable = true;
699 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
700 // Don't make available until backtracking is complete.
701 SU->isPending = true;
702 PendingQueue.push_back(SU);
705 AvailableQueue->push(SU);
707 AvailableQueue->UnscheduledNode(SU);
710 /// After backtracking, the hazard checker needs to be restored to a state
711 /// corresponding the the current cycle.
712 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
715 unsigned LookAhead = std::min((unsigned)Sequence.size(),
716 HazardRec->getMaxLookAhead());
720 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
721 unsigned HazardCycle = (*I)->getHeight();
722 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
724 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
725 HazardRec->RecedeCycle();
731 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
732 /// BTCycle in order to schedule a specific node.
733 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
734 SUnit *OldSU = Sequence.back();
737 if (SU->isSucc(OldSU))
738 // Don't try to remove SU from AvailableQueue.
739 SU->isAvailable = false;
740 // FIXME: use ready cycle instead of height
741 CurCycle = OldSU->getHeight();
742 UnscheduleNodeBottomUp(OldSU);
743 AvailableQueue->setCurCycle(CurCycle);
746 OldSU = Sequence.back();
749 assert(!SU->isSucc(OldSU) && "Something is wrong!");
751 RestoreHazardCheckerBottomUp();
758 static bool isOperandOf(const SUnit *SU, SDNode *N) {
759 for (const SDNode *SUNode = SU->getNode(); SUNode;
760 SUNode = SUNode->getGluedNode()) {
761 if (SUNode->isOperandOf(N))
767 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
768 /// successors to the newly created node.
769 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
770 SDNode *N = SU->getNode();
774 if (SU->getNode()->getGluedNode())
778 bool TryUnfold = false;
779 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
780 EVT VT = N->getValueType(i);
783 else if (VT == MVT::Other)
786 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
787 const SDValue &Op = N->getOperand(i);
788 EVT VT = Op.getNode()->getValueType(Op.getResNo());
794 SmallVector<SDNode*, 2> NewNodes;
795 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
798 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
799 assert(NewNodes.size() == 2 && "Expected a load folding node!");
802 SDNode *LoadNode = NewNodes[0];
803 unsigned NumVals = N->getNumValues();
804 unsigned OldNumVals = SU->getNode()->getNumValues();
805 for (unsigned i = 0; i != NumVals; ++i)
806 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
807 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
808 SDValue(LoadNode, 1));
810 // LoadNode may already exist. This can happen when there is another
811 // load from the same location and producing the same type of value
812 // but it has different alignment or volatileness.
813 bool isNewLoad = true;
815 if (LoadNode->getNodeId() != -1) {
816 LoadSU = &SUnits[LoadNode->getNodeId()];
819 LoadSU = CreateNewSUnit(LoadNode);
820 LoadNode->setNodeId(LoadSU->NodeNum);
822 InitNumRegDefsLeft(LoadSU);
823 ComputeLatency(LoadSU);
826 SUnit *NewSU = CreateNewSUnit(N);
827 assert(N->getNodeId() == -1 && "Node already inserted!");
828 N->setNodeId(NewSU->NodeNum);
830 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
831 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
832 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
833 NewSU->isTwoAddress = true;
837 if (TID.isCommutable())
838 NewSU->isCommutable = true;
840 InitNumRegDefsLeft(NewSU);
841 ComputeLatency(NewSU);
843 // Record all the edges to and from the old SU, by category.
844 SmallVector<SDep, 4> ChainPreds;
845 SmallVector<SDep, 4> ChainSuccs;
846 SmallVector<SDep, 4> LoadPreds;
847 SmallVector<SDep, 4> NodePreds;
848 SmallVector<SDep, 4> NodeSuccs;
849 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
852 ChainPreds.push_back(*I);
853 else if (isOperandOf(I->getSUnit(), LoadNode))
854 LoadPreds.push_back(*I);
856 NodePreds.push_back(*I);
858 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
861 ChainSuccs.push_back(*I);
863 NodeSuccs.push_back(*I);
866 // Now assign edges to the newly-created nodes.
867 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
868 const SDep &Pred = ChainPreds[i];
869 RemovePred(SU, Pred);
871 AddPred(LoadSU, Pred);
873 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
874 const SDep &Pred = LoadPreds[i];
875 RemovePred(SU, Pred);
877 AddPred(LoadSU, Pred);
879 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
880 const SDep &Pred = NodePreds[i];
881 RemovePred(SU, Pred);
882 AddPred(NewSU, Pred);
884 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
885 SDep D = NodeSuccs[i];
886 SUnit *SuccDep = D.getSUnit();
888 RemovePred(SuccDep, D);
891 // Balance register pressure.
892 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
893 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
894 --NewSU->NumRegDefsLeft;
896 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
897 SDep D = ChainSuccs[i];
898 SUnit *SuccDep = D.getSUnit();
900 RemovePred(SuccDep, D);
907 // Add a data dependency to reflect that NewSU reads the value defined
909 AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
912 AvailableQueue->addNode(LoadSU);
913 AvailableQueue->addNode(NewSU);
917 if (NewSU->NumSuccsLeft == 0) {
918 NewSU->isAvailable = true;
924 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
925 NewSU = CreateClone(SU);
927 // New SUnit has the exact same predecessors.
928 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
930 if (!I->isArtificial())
933 // Only copy scheduled successors. Cut them from old node's successor
934 // list and move them over.
935 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
936 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
938 if (I->isArtificial())
940 SUnit *SuccSU = I->getSUnit();
941 if (SuccSU->isScheduled) {
946 DelDeps.push_back(std::make_pair(SuccSU, D));
949 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
950 RemovePred(DelDeps[i].first, DelDeps[i].second);
952 AvailableQueue->updateNode(SU);
953 AvailableQueue->addNode(NewSU);
959 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
960 /// scheduled successors of the given SUnit to the last copy.
961 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
962 const TargetRegisterClass *DestRC,
963 const TargetRegisterClass *SrcRC,
964 SmallVector<SUnit*, 2> &Copies) {
965 SUnit *CopyFromSU = CreateNewSUnit(NULL);
966 CopyFromSU->CopySrcRC = SrcRC;
967 CopyFromSU->CopyDstRC = DestRC;
969 SUnit *CopyToSU = CreateNewSUnit(NULL);
970 CopyToSU->CopySrcRC = DestRC;
971 CopyToSU->CopyDstRC = SrcRC;
973 // Only copy scheduled successors. Cut them from old node's successor
974 // list and move them over.
975 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
976 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
978 if (I->isArtificial())
980 SUnit *SuccSU = I->getSUnit();
981 if (SuccSU->isScheduled) {
983 D.setSUnit(CopyToSU);
985 DelDeps.push_back(std::make_pair(SuccSU, *I));
988 // Avoid scheduling the def-side copy before other successors. Otherwise
989 // we could introduce another physreg interference on the copy and
990 // continue inserting copies indefinitely.
991 SDep D(CopyFromSU, SDep::Order, /*Latency=*/0,
992 /*Reg=*/0, /*isNormalMemory=*/false,
993 /*isMustAlias=*/false, /*isArtificial=*/true);
997 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
998 RemovePred(DelDeps[i].first, DelDeps[i].second);
1000 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
1001 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
1003 AvailableQueue->updateNode(SU);
1004 AvailableQueue->addNode(CopyFromSU);
1005 AvailableQueue->addNode(CopyToSU);
1006 Copies.push_back(CopyFromSU);
1007 Copies.push_back(CopyToSU);
1012 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
1013 /// definition of the specified node.
1014 /// FIXME: Move to SelectionDAG?
1015 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1016 const TargetInstrInfo *TII) {
1017 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
1018 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1019 unsigned NumRes = TID.getNumDefs();
1020 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1025 return N->getValueType(NumRes);
1028 /// CheckForLiveRegDef - Return true and update live register vector if the
1029 /// specified register def of the specified SUnit clobbers any "live" registers.
1030 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1031 std::vector<SUnit*> &LiveRegDefs,
1032 SmallSet<unsigned, 4> &RegAdded,
1033 SmallVector<unsigned, 4> &LRegs,
1034 const TargetRegisterInfo *TRI) {
1035 for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
1037 // Check if Ref is live.
1038 if (!LiveRegDefs[*AliasI]) continue;
1040 // Allow multiple uses of the same def.
1041 if (LiveRegDefs[*AliasI] == SU) continue;
1043 // Add Reg to the set of interfering live regs.
1044 if (RegAdded.insert(*AliasI)) {
1045 LRegs.push_back(*AliasI);
1050 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1051 /// scheduling of the given node to satisfy live physical register dependencies.
1052 /// If the specific node is the last one that's available to schedule, do
1053 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1054 bool ScheduleDAGRRList::
1055 DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
1056 if (NumLiveRegs == 0)
1059 SmallSet<unsigned, 4> RegAdded;
1060 // If this node would clobber any "live" register, then it's not ready.
1062 // If SU is the currently live definition of the same register that it uses,
1063 // then we are free to schedule it.
1064 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1066 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1067 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1068 RegAdded, LRegs, TRI);
1071 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1072 if (Node->getOpcode() == ISD::INLINEASM) {
1073 // Inline asm can clobber physical defs.
1074 unsigned NumOps = Node->getNumOperands();
1075 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1076 --NumOps; // Ignore the glue operand.
1078 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1080 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1081 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1083 ++i; // Skip the ID value.
1084 if (InlineAsm::isRegDefKind(Flags) ||
1085 InlineAsm::isRegDefEarlyClobberKind(Flags)) {
1086 // Check for def of register or earlyclobber register.
1087 for (; NumVals; --NumVals, ++i) {
1088 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1089 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1090 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1098 if (!Node->isMachineOpcode())
1100 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
1101 if (!TID.ImplicitDefs)
1103 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg)
1104 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1107 return !LRegs.empty();
1110 /// Return a node that can be scheduled in this cycle. Requirements:
1111 /// (1) Ready: latency has been satisfied
1112 /// (2) No Hazards: resources are available
1113 /// (3) No Interferences: may unschedule to break register interferences.
1114 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1115 SmallVector<SUnit*, 4> Interferences;
1116 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
1118 SUnit *CurSU = AvailableQueue->pop();
1120 SmallVector<unsigned, 4> LRegs;
1121 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1123 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1125 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1126 Interferences.push_back(CurSU);
1127 CurSU = AvailableQueue->pop();
1130 // Add the nodes that aren't ready back onto the available list.
1131 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1132 Interferences[i]->isPending = false;
1133 assert(Interferences[i]->isAvailable && "must still be available");
1134 AvailableQueue->push(Interferences[i]);
1139 // All candidates are delayed due to live physical reg dependencies.
1140 // Try backtracking, code duplication, or inserting cross class copies
1142 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1143 SUnit *TrySU = Interferences[i];
1144 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1146 // Try unscheduling up to the point where it's safe to schedule
1149 unsigned LiveCycle = UINT_MAX;
1150 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1151 unsigned Reg = LRegs[j];
1152 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1153 BtSU = LiveRegGens[Reg];
1154 LiveCycle = BtSU->getHeight();
1157 if (!WillCreateCycle(TrySU, BtSU)) {
1158 BacktrackBottomUp(TrySU, BtSU);
1160 // Force the current node to be scheduled before the node that
1161 // requires the physical reg dep.
1162 if (BtSU->isAvailable) {
1163 BtSU->isAvailable = false;
1164 if (!BtSU->isPending)
1165 AvailableQueue->remove(BtSU);
1167 AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1,
1168 /*Reg=*/0, /*isNormalMemory=*/false,
1169 /*isMustAlias=*/false, /*isArtificial=*/true));
1171 // If one or more successors has been unscheduled, then the current
1172 // node is no longer avaialable. Schedule a successor that's now
1173 // available instead.
1174 if (!TrySU->isAvailable) {
1175 CurSU = AvailableQueue->pop();
1179 TrySU->isPending = false;
1180 Interferences.erase(Interferences.begin()+i);
1187 // Can't backtrack. If it's too expensive to copy the value, then try
1188 // duplicate the nodes that produces these "too expensive to copy"
1189 // values to break the dependency. In case even that doesn't work,
1190 // insert cross class copies.
1191 // If it's not too expensive, i.e. cost != -1, issue copies.
1192 SUnit *TrySU = Interferences[0];
1193 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1194 assert(LRegs.size() == 1 && "Can't handle this yet!");
1195 unsigned Reg = LRegs[0];
1196 SUnit *LRDef = LiveRegDefs[Reg];
1197 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1198 const TargetRegisterClass *RC =
1199 TRI->getMinimalPhysRegClass(Reg, VT);
1200 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1202 // If cross copy register class is the same as RC, then it must be possible
1203 // copy the value directly. Do not try duplicate the def.
1204 // If cross copy register class is not the same as RC, then it's possible to
1205 // copy the value but it require cross register class copies and it is
1207 // If cross copy register class is null, then it's not possible to copy
1208 // the value at all.
1211 NewDef = CopyAndMoveSuccessors(LRDef);
1212 if (!DestRC && !NewDef)
1213 report_fatal_error("Can't handle live physical register dependency!");
1216 // Issue copies, these can be expensive cross register class copies.
1217 SmallVector<SUnit*, 2> Copies;
1218 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1219 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1220 << " to SU #" << Copies.front()->NodeNum << "\n");
1221 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
1222 /*Reg=*/0, /*isNormalMemory=*/false,
1223 /*isMustAlias=*/false,
1224 /*isArtificial=*/true));
1225 NewDef = Copies.back();
1228 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1229 << " to SU #" << TrySU->NodeNum << "\n");
1230 LiveRegDefs[Reg] = NewDef;
1231 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
1232 /*Reg=*/0, /*isNormalMemory=*/false,
1233 /*isMustAlias=*/false,
1234 /*isArtificial=*/true));
1235 TrySU->isAvailable = false;
1239 assert(CurSU && "Unable to resolve live physical register dependencies!");
1241 // Add the nodes that aren't ready back onto the available list.
1242 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1243 Interferences[i]->isPending = false;
1244 // May no longer be available due to backtracking.
1245 if (Interferences[i]->isAvailable) {
1246 AvailableQueue->push(Interferences[i]);
1252 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1254 void ScheduleDAGRRList::ListScheduleBottomUp() {
1255 // Release any predecessors of the special Exit node.
1256 ReleasePredecessors(&ExitSU);
1258 // Add root to Available queue.
1259 if (!SUnits.empty()) {
1260 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1261 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1262 RootSU->isAvailable = true;
1263 AvailableQueue->push(RootSU);
1266 // While Available queue is not empty, grab the node with the highest
1267 // priority. If it is not ready put it back. Schedule the node.
1268 Sequence.reserve(SUnits.size());
1269 while (!AvailableQueue->empty()) {
1270 DEBUG(dbgs() << "\nExamining Available:\n";
1271 AvailableQueue->dump(this));
1273 // Pick the best node to schedule taking all constraints into
1275 SUnit *SU = PickNodeToScheduleBottomUp();
1277 AdvancePastStalls(SU);
1279 ScheduleNodeBottomUp(SU);
1281 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1282 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1283 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1284 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1288 // Reverse the order if it is bottom up.
1289 std::reverse(Sequence.begin(), Sequence.end());
1292 VerifySchedule(isBottomUp);
1296 //===----------------------------------------------------------------------===//
1297 // Top-Down Scheduling
1298 //===----------------------------------------------------------------------===//
1300 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1301 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
1302 void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
1303 SUnit *SuccSU = SuccEdge->getSUnit();
1306 if (SuccSU->NumPredsLeft == 0) {
1307 dbgs() << "*** Scheduling failed! ***\n";
1309 dbgs() << " has been released too many times!\n";
1310 llvm_unreachable(0);
1313 --SuccSU->NumPredsLeft;
1315 // If all the node's predecessors are scheduled, this node is ready
1316 // to be scheduled. Ignore the special ExitSU node.
1317 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
1318 SuccSU->isAvailable = true;
1319 AvailableQueue->push(SuccSU);
1323 void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
1324 // Top down: release successors
1325 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1327 assert(!I->isAssignedRegDep() &&
1328 "The list-tdrr scheduler doesn't yet support physreg dependencies!");
1330 ReleaseSucc(SU, &*I);
1334 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1335 /// count of its successors. If a successor pending count is zero, add it to
1336 /// the Available queue.
1337 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU) {
1338 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
1339 DEBUG(SU->dump(this));
1341 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1342 SU->setDepthToAtLeast(CurCycle);
1343 Sequence.push_back(SU);
1345 ReleaseSuccessors(SU);
1346 SU->isScheduled = true;
1347 AvailableQueue->ScheduledNode(SU);
1350 /// ListScheduleTopDown - The main loop of list scheduling for top-down
1352 void ScheduleDAGRRList::ListScheduleTopDown() {
1353 AvailableQueue->setCurCycle(CurCycle);
1355 // Release any successors of the special Entry node.
1356 ReleaseSuccessors(&EntrySU);
1358 // All leaves to Available queue.
1359 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1360 // It is available if it has no predecessors.
1361 if (SUnits[i].Preds.empty()) {
1362 AvailableQueue->push(&SUnits[i]);
1363 SUnits[i].isAvailable = true;
1367 // While Available queue is not empty, grab the node with the highest
1368 // priority. If it is not ready put it back. Schedule the node.
1369 Sequence.reserve(SUnits.size());
1370 while (!AvailableQueue->empty()) {
1371 SUnit *CurSU = AvailableQueue->pop();
1374 ScheduleNodeTopDown(CurSU);
1376 AvailableQueue->setCurCycle(CurCycle);
1380 VerifySchedule(isBottomUp);
1385 //===----------------------------------------------------------------------===//
1386 // RegReductionPriorityQueue Definition
1387 //===----------------------------------------------------------------------===//
1389 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1390 // to reduce register pressure.
1393 class RegReductionPQBase;
1395 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1396 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1401 struct reverse_sort : public queue_sort {
1403 reverse_sort(SF &sf) : SortFunc(sf) {}
1404 reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {}
1406 bool operator()(SUnit* left, SUnit* right) const {
1407 // reverse left/right rather than simply !SortFunc(left, right)
1408 // to expose different paths in the comparison logic.
1409 return SortFunc(right, left);
1414 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1415 // reduction scheduler.
1416 struct bu_ls_rr_sort : public queue_sort {
1419 HasReadyFilter = false
1422 RegReductionPQBase *SPQ;
1423 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1424 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1426 bool operator()(SUnit* left, SUnit* right) const;
1429 // td_ls_rr_sort - Priority function for top down register pressure reduction
1431 struct td_ls_rr_sort : public queue_sort {
1434 HasReadyFilter = false
1437 RegReductionPQBase *SPQ;
1438 td_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1439 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1441 bool operator()(const SUnit* left, const SUnit* right) const;
1444 // src_ls_rr_sort - Priority function for source order scheduler.
1445 struct src_ls_rr_sort : public queue_sort {
1448 HasReadyFilter = false
1451 RegReductionPQBase *SPQ;
1452 src_ls_rr_sort(RegReductionPQBase *spq)
1454 src_ls_rr_sort(const src_ls_rr_sort &RHS)
1457 bool operator()(SUnit* left, SUnit* right) const;
1460 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1461 struct hybrid_ls_rr_sort : public queue_sort {
1464 HasReadyFilter = false
1467 RegReductionPQBase *SPQ;
1468 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1470 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
1473 bool isReady(SUnit *SU, unsigned CurCycle) const;
1475 bool operator()(SUnit* left, SUnit* right) const;
1478 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1480 struct ilp_ls_rr_sort : public queue_sort {
1483 HasReadyFilter = false
1486 RegReductionPQBase *SPQ;
1487 ilp_ls_rr_sort(RegReductionPQBase *spq)
1489 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
1492 bool isReady(SUnit *SU, unsigned CurCycle) const;
1494 bool operator()(SUnit* left, SUnit* right) const;
1497 class RegReductionPQBase : public SchedulingPriorityQueue {
1499 std::vector<SUnit*> Queue;
1500 unsigned CurQueueId;
1501 bool TracksRegPressure;
1503 // SUnits - The SUnits for the current graph.
1504 std::vector<SUnit> *SUnits;
1506 MachineFunction &MF;
1507 const TargetInstrInfo *TII;
1508 const TargetRegisterInfo *TRI;
1509 const TargetLowering *TLI;
1510 ScheduleDAGRRList *scheduleDAG;
1512 // SethiUllmanNumbers - The SethiUllman number for each node.
1513 std::vector<unsigned> SethiUllmanNumbers;
1515 /// RegPressure - Tracking current reg pressure per register class.
1517 std::vector<unsigned> RegPressure;
1519 /// RegLimit - Tracking the number of allocatable registers per register
1521 std::vector<unsigned> RegLimit;
1524 RegReductionPQBase(MachineFunction &mf,
1525 bool hasReadyFilter,
1527 const TargetInstrInfo *tii,
1528 const TargetRegisterInfo *tri,
1529 const TargetLowering *tli)
1530 : SchedulingPriorityQueue(hasReadyFilter),
1531 CurQueueId(0), TracksRegPressure(tracksrp),
1532 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1533 if (TracksRegPressure) {
1534 unsigned NumRC = TRI->getNumRegClasses();
1535 RegLimit.resize(NumRC);
1536 RegPressure.resize(NumRC);
1537 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1538 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1539 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1540 E = TRI->regclass_end(); I != E; ++I)
1541 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1545 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1546 scheduleDAG = scheduleDag;
1549 ScheduleHazardRecognizer* getHazardRec() {
1550 return scheduleDAG->getHazardRec();
1553 void initNodes(std::vector<SUnit> &sunits);
1555 void addNode(const SUnit *SU);
1557 void updateNode(const SUnit *SU);
1559 void releaseState() {
1561 SethiUllmanNumbers.clear();
1562 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1565 unsigned getNodePriority(const SUnit *SU) const;
1567 unsigned getNodeOrdering(const SUnit *SU) const {
1568 if (!SU->getNode()) return 0;
1570 return scheduleDAG->DAG->GetOrdering(SU->getNode());
1573 bool empty() const { return Queue.empty(); }
1575 void push(SUnit *U) {
1576 assert(!U->NodeQueueId && "Node in the queue already");
1577 U->NodeQueueId = ++CurQueueId;
1581 void remove(SUnit *SU) {
1582 assert(!Queue.empty() && "Queue is empty!");
1583 assert(SU->NodeQueueId != 0 && "Not in queue!");
1584 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1586 if (I != prior(Queue.end()))
1587 std::swap(*I, Queue.back());
1589 SU->NodeQueueId = 0;
1592 bool tracksRegPressure() const { return TracksRegPressure; }
1594 void dumpRegPressure() const;
1596 bool HighRegPressure(const SUnit *SU) const;
1598 bool MayReduceRegPressure(SUnit *SU) const;
1600 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1602 void ScheduledNode(SUnit *SU);
1604 void UnscheduledNode(SUnit *SU);
1607 bool canClobber(const SUnit *SU, const SUnit *Op);
1608 void AddPseudoTwoAddrDeps();
1609 void PrescheduleNodesWithMultipleUses();
1610 void CalculateSethiUllmanNumbers();
1614 static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1615 std::vector<SUnit *>::iterator Best = Q.begin();
1616 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
1617 E = Q.end(); I != E; ++I)
1618 if (Picker(*Best, *I))
1621 if (Best != prior(Q.end()))
1622 std::swap(*Best, Q.back());
1628 SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1630 if (DAG->StressSched) {
1631 reverse_sort<SF> RPicker(Picker);
1632 return popFromQueueImpl(Q, RPicker);
1636 return popFromQueueImpl(Q, Picker);
1640 class RegReductionPriorityQueue : public RegReductionPQBase {
1644 RegReductionPriorityQueue(MachineFunction &mf,
1646 const TargetInstrInfo *tii,
1647 const TargetRegisterInfo *tri,
1648 const TargetLowering *tli)
1649 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli),
1652 bool isBottomUp() const { return SF::IsBottomUp; }
1654 bool isReady(SUnit *U) const {
1655 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1659 if (Queue.empty()) return NULL;
1661 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1666 void dump(ScheduleDAG *DAG) const {
1667 // Emulate pop() without clobbering NodeQueueIds.
1668 std::vector<SUnit*> DumpQueue = Queue;
1669 SF DumpPicker = Picker;
1670 while (!DumpQueue.empty()) {
1671 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1673 dbgs() << "Height " << SU->getHeight() << ": ";
1675 dbgs() << "Depth " << SU->getDepth() << ": ";
1681 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1682 BURegReductionPriorityQueue;
1684 typedef RegReductionPriorityQueue<td_ls_rr_sort>
1685 TDRegReductionPriorityQueue;
1687 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1688 SrcRegReductionPriorityQueue;
1690 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1691 HybridBURRPriorityQueue;
1693 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1694 ILPBURRPriorityQueue;
1695 } // end anonymous namespace
1697 //===----------------------------------------------------------------------===//
1698 // Static Node Priority for Register Pressure Reduction
1699 //===----------------------------------------------------------------------===//
1701 // Check for special nodes that bypass scheduling heuristics.
1702 // Currently this pushes TokenFactor nodes down, but may be used for other
1703 // pseudo-ops as well.
1705 // Return -1 to schedule right above left, 1 for left above right.
1706 // Return 0 if no bias exists.
1707 static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1708 bool LSchedLow = left->isScheduleLow;
1709 bool RSchedLow = right->isScheduleLow;
1710 if (LSchedLow != RSchedLow)
1711 return LSchedLow < RSchedLow ? 1 : -1;
1715 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1716 /// Smaller number is the higher priority.
1718 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1719 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1720 if (SethiUllmanNumber != 0)
1721 return SethiUllmanNumber;
1724 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1726 if (I->isCtrl()) continue; // ignore chain preds
1727 SUnit *PredSU = I->getSUnit();
1728 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1729 if (PredSethiUllman > SethiUllmanNumber) {
1730 SethiUllmanNumber = PredSethiUllman;
1732 } else if (PredSethiUllman == SethiUllmanNumber)
1736 SethiUllmanNumber += Extra;
1738 if (SethiUllmanNumber == 0)
1739 SethiUllmanNumber = 1;
1741 return SethiUllmanNumber;
1744 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1745 /// scheduling units.
1746 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1747 SethiUllmanNumbers.assign(SUnits->size(), 0);
1749 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1750 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1753 void RegReductionPQBase::addNode(const SUnit *SU) {
1754 unsigned SUSize = SethiUllmanNumbers.size();
1755 if (SUnits->size() > SUSize)
1756 SethiUllmanNumbers.resize(SUSize*2, 0);
1757 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1760 void RegReductionPQBase::updateNode(const SUnit *SU) {
1761 SethiUllmanNumbers[SU->NodeNum] = 0;
1762 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1765 // Lower priority means schedule further down. For bottom-up scheduling, lower
1766 // priority SUs are scheduled before higher priority SUs.
1767 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1768 assert(SU->NodeNum < SethiUllmanNumbers.size());
1769 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1770 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1771 // CopyToReg should be close to its uses to facilitate coalescing and
1774 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1775 Opc == TargetOpcode::SUBREG_TO_REG ||
1776 Opc == TargetOpcode::INSERT_SUBREG)
1777 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1778 // close to their uses to facilitate coalescing.
1780 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1781 // If SU does not have a register use, i.e. it doesn't produce a value
1782 // that would be consumed (e.g. store), then it terminates a chain of
1783 // computation. Give it a large SethiUllman number so it will be
1784 // scheduled right before its predecessors that it doesn't lengthen
1785 // their live ranges.
1787 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1788 // If SU does not have a register def, schedule it close to its uses
1789 // because it does not lengthen any live ranges.
1792 return SethiUllmanNumbers[SU->NodeNum];
1794 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1796 // FIXME: This assumes all of the defs are used as call operands.
1797 int NP = (int)Priority - SU->getNode()->getNumValues();
1798 return (NP > 0) ? NP : 0;
1804 //===----------------------------------------------------------------------===//
1805 // Register Pressure Tracking
1806 //===----------------------------------------------------------------------===//
1808 void RegReductionPQBase::dumpRegPressure() const {
1809 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1810 E = TRI->regclass_end(); I != E; ++I) {
1811 const TargetRegisterClass *RC = *I;
1812 unsigned Id = RC->getID();
1813 unsigned RP = RegPressure[Id];
1815 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1820 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1824 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1828 SUnit *PredSU = I->getSUnit();
1829 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1830 // to cover the number of registers defined (they are all live).
1831 if (PredSU->NumRegDefsLeft == 0) {
1834 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1835 RegDefPos.IsValid(); RegDefPos.Advance()) {
1836 EVT VT = RegDefPos.GetValue();
1838 unsigned RCId, Cost;
1839 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
1841 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1848 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1849 const SDNode *N = SU->getNode();
1851 if (!N->isMachineOpcode() || !SU->NumSuccs)
1854 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1855 for (unsigned i = 0; i != NumDefs; ++i) {
1856 EVT VT = N->getValueType(i);
1857 if (!N->hasAnyUseOfValue(i))
1859 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1860 if (RegPressure[RCId] >= RegLimit[RCId])
1866 // Compute the register pressure contribution by this instruction by count up
1867 // for uses that are not live and down for defs. Only count register classes
1868 // that are already under high pressure. As a side effect, compute the number of
1869 // uses of registers that are already live.
1871 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1872 // so could probably be factored.
1873 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1876 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1880 SUnit *PredSU = I->getSUnit();
1881 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1882 // to cover the number of registers defined (they are all live).
1883 if (PredSU->NumRegDefsLeft == 0) {
1884 if (PredSU->getNode()->isMachineOpcode())
1888 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1889 RegDefPos.IsValid(); RegDefPos.Advance()) {
1890 EVT VT = RegDefPos.GetValue();
1891 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1892 if (RegPressure[RCId] >= RegLimit[RCId])
1896 const SDNode *N = SU->getNode();
1898 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
1901 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1902 for (unsigned i = 0; i != NumDefs; ++i) {
1903 EVT VT = N->getValueType(i);
1904 if (!N->hasAnyUseOfValue(i))
1906 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1907 if (RegPressure[RCId] >= RegLimit[RCId])
1913 void RegReductionPQBase::ScheduledNode(SUnit *SU) {
1914 if (!TracksRegPressure)
1920 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1924 SUnit *PredSU = I->getSUnit();
1925 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1926 // to cover the number of registers defined (they are all live).
1927 if (PredSU->NumRegDefsLeft == 0) {
1930 // FIXME: The ScheduleDAG currently loses information about which of a
1931 // node's values is consumed by each dependence. Consequently, if the node
1932 // defines multiple register classes, we don't know which to pressurize
1933 // here. Instead the following loop consumes the register defs in an
1934 // arbitrary order. At least it handles the common case of clustered loads
1935 // to the same class. For precise liveness, each SDep needs to indicate the
1936 // result number. But that tightly couples the ScheduleDAG with the
1937 // SelectionDAG making updates tricky. A simpler hack would be to attach a
1938 // value type or register class to SDep.
1940 // The most important aspect of register tracking is balancing the increase
1941 // here with the reduction further below. Note that this SU may use multiple
1942 // defs in PredSU. The can't be determined here, but we've already
1943 // compensated by reducing NumRegDefsLeft in PredSU during
1944 // ScheduleDAGSDNodes::AddSchedEdges.
1945 --PredSU->NumRegDefsLeft;
1946 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
1947 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1948 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
1952 unsigned RCId, Cost;
1953 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
1954 RegPressure[RCId] += Cost;
1959 // We should have this assert, but there may be dead SDNodes that never
1960 // materialize as SUnits, so they don't appear to generate liveness.
1961 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
1962 int SkipRegDefs = (int)SU->NumRegDefsLeft;
1963 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
1964 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
1965 if (SkipRegDefs > 0)
1967 unsigned RCId, Cost;
1968 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
1969 if (RegPressure[RCId] < Cost) {
1970 // Register pressure tracking is imprecise. This can happen. But we try
1971 // hard not to let it happen because it likely results in poor scheduling.
1972 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
1973 RegPressure[RCId] = 0;
1976 RegPressure[RCId] -= Cost;
1982 void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
1983 if (!TracksRegPressure)
1986 const SDNode *N = SU->getNode();
1989 if (!N->isMachineOpcode()) {
1990 if (N->getOpcode() != ISD::CopyToReg)
1993 unsigned Opc = N->getMachineOpcode();
1994 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1995 Opc == TargetOpcode::INSERT_SUBREG ||
1996 Opc == TargetOpcode::SUBREG_TO_REG ||
1997 Opc == TargetOpcode::REG_SEQUENCE ||
1998 Opc == TargetOpcode::IMPLICIT_DEF)
2002 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2006 SUnit *PredSU = I->getSUnit();
2007 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2008 // counts data deps.
2009 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2011 const SDNode *PN = PredSU->getNode();
2012 if (!PN->isMachineOpcode()) {
2013 if (PN->getOpcode() == ISD::CopyFromReg) {
2014 EVT VT = PN->getValueType(0);
2015 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2016 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2020 unsigned POpc = PN->getMachineOpcode();
2021 if (POpc == TargetOpcode::IMPLICIT_DEF)
2023 if (POpc == TargetOpcode::EXTRACT_SUBREG) {
2024 EVT VT = PN->getOperand(0).getValueType();
2025 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2026 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2028 } else if (POpc == TargetOpcode::INSERT_SUBREG ||
2029 POpc == TargetOpcode::SUBREG_TO_REG) {
2030 EVT VT = PN->getValueType(0);
2031 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2032 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2035 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2036 for (unsigned i = 0; i != NumDefs; ++i) {
2037 EVT VT = PN->getValueType(i);
2038 if (!PN->hasAnyUseOfValue(i))
2040 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2041 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2042 // Register pressure tracking is imprecise. This can happen.
2043 RegPressure[RCId] = 0;
2045 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2049 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2050 // may transfer data dependencies to CopyToReg.
2051 if (SU->NumSuccs && N->isMachineOpcode()) {
2052 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2053 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2054 EVT VT = N->getValueType(i);
2055 if (VT == MVT::Glue || VT == MVT::Other)
2057 if (!N->hasAnyUseOfValue(i))
2059 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2060 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2067 //===----------------------------------------------------------------------===//
2068 // Dynamic Node Priority for Register Pressure Reduction
2069 //===----------------------------------------------------------------------===//
2071 /// closestSucc - Returns the scheduled cycle of the successor which is
2072 /// closest to the current cycle.
2073 static unsigned closestSucc(const SUnit *SU) {
2074 unsigned MaxHeight = 0;
2075 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2077 if (I->isCtrl()) continue; // ignore chain succs
2078 unsigned Height = I->getSUnit()->getHeight();
2079 // If there are bunch of CopyToRegs stacked up, they should be considered
2080 // to be at the same position.
2081 if (I->getSUnit()->getNode() &&
2082 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2083 Height = closestSucc(I->getSUnit())+1;
2084 if (Height > MaxHeight)
2090 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2091 /// for scratch registers, i.e. number of data dependencies.
2092 static unsigned calcMaxScratches(const SUnit *SU) {
2093 unsigned Scratches = 0;
2094 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2096 if (I->isCtrl()) continue; // ignore chain preds
2102 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2103 /// CopyFromReg from a virtual register.
2104 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2105 bool RetVal = false;
2106 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2108 if (I->isCtrl()) continue;
2109 const SUnit *PredSU = I->getSUnit();
2110 if (PredSU->getNode() &&
2111 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2113 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2114 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2124 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2125 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2126 /// it has no other use. It should be scheduled closer to the terminator.
2127 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2128 bool RetVal = false;
2129 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2131 if (I->isCtrl()) continue;
2132 const SUnit *SuccSU = I->getSUnit();
2133 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2135 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2136 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2146 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2147 // set isVRegCycle for its CopyFromReg operands.
2149 // This is only relevant for single-block loops, in which case the VRegCycle
2150 // node is likely an induction variable in which the operand and target virtual
2151 // registers should be coalesced (e.g. pre/post increment values). Setting the
2152 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2153 // CopyFromReg so that this node becomes the virtual register "kill". This
2154 // avoids interference between the values live in and out of the block and
2155 // eliminates a copy inside the loop.
2156 static void initVRegCycle(SUnit *SU) {
2157 if (DisableSchedVRegCycle)
2160 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2163 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2165 SU->isVRegCycle = true;
2167 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2169 if (I->isCtrl()) continue;
2170 I->getSUnit()->isVRegCycle = true;
2174 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2175 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
2176 static void resetVRegCycle(SUnit *SU) {
2177 if (!SU->isVRegCycle)
2180 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2182 if (I->isCtrl()) continue; // ignore chain preds
2183 SUnit *PredSU = I->getSUnit();
2184 if (PredSU->isVRegCycle) {
2185 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2186 "VRegCycle def must be CopyFromReg");
2187 I->getSUnit()->isVRegCycle = 0;
2192 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2193 // means a node that defines the VRegCycle has not been scheduled yet.
2194 static bool hasVRegCycleUse(const SUnit *SU) {
2195 // If this SU also defines the VReg, don't hoist it as a "use".
2196 if (SU->isVRegCycle)
2199 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2201 if (I->isCtrl()) continue; // ignore chain preds
2202 if (I->getSUnit()->isVRegCycle &&
2203 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2204 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2211 // Check for either a dependence (latency) or resource (hazard) stall.
2213 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2214 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2215 if ((int)SPQ->getCurCycle() < Height) return true;
2216 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2217 != ScheduleHazardRecognizer::NoHazard)
2222 // Return -1 if left has higher priority, 1 if right has higher priority.
2223 // Return 0 if latency-based priority is equivalent.
2224 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2225 RegReductionPQBase *SPQ) {
2226 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2227 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2228 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2229 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2230 int LHeight = (int)left->getHeight() + LPenalty;
2231 int RHeight = (int)right->getHeight() + RPenalty;
2233 bool LStall = (!checkPref || left->SchedulingPref == Sched::Latency) &&
2234 BUHasStall(left, LHeight, SPQ);
2235 bool RStall = (!checkPref || right->SchedulingPref == Sched::Latency) &&
2236 BUHasStall(right, RHeight, SPQ);
2238 // If scheduling one of the node will cause a pipeline stall, delay it.
2239 // If scheduling either one of the node will cause a pipeline stall, sort
2240 // them according to their height.
2243 DEBUG(++FactorCount[FactStall]);
2246 if (LHeight != RHeight) {
2247 DEBUG(++FactorCount[FactStall]);
2248 return LHeight > RHeight ? 1 : -1;
2250 } else if (RStall) {
2251 DEBUG(++FactorCount[FactStall]);
2255 // If either node is scheduling for latency, sort them by height/depth
2257 if (!checkPref || (left->SchedulingPref == Sched::Latency ||
2258 right->SchedulingPref == Sched::Latency)) {
2259 if (DisableSchedCycles) {
2260 if (LHeight != RHeight) {
2261 DEBUG(++FactorCount[FactHeight]);
2262 return LHeight > RHeight ? 1 : -1;
2266 // If neither instruction stalls (!LStall && !RStall) then
2267 // its height is already covered so only its depth matters. We also reach
2268 // this if both stall but have the same height.
2269 int LDepth = left->getDepth() - LPenalty;
2270 int RDepth = right->getDepth() - RPenalty;
2271 if (LDepth != RDepth) {
2272 DEBUG(++FactorCount[FactDepth]);
2273 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2274 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2275 << ") depth " << RDepth << "\n");
2276 return LDepth < RDepth ? 1 : -1;
2279 if (left->Latency != right->Latency) {
2280 DEBUG(++FactorCount[FactOther]);
2281 return left->Latency > right->Latency ? 1 : -1;
2287 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2288 // Schedule physical register definitions close to their use. This is
2289 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2290 // long as shortening physreg live ranges is generally good, we can defer
2291 // creating a subtarget hook.
2292 if (!DisableSchedPhysRegJoin) {
2293 bool LHasPhysReg = left->hasPhysRegDefs;
2294 bool RHasPhysReg = right->hasPhysRegDefs;
2295 if (LHasPhysReg != RHasPhysReg) {
2296 DEBUG(++FactorCount[FactRegUses]);
2298 const char *PhysRegMsg[] = {" has no physreg", " defines a physreg"};
2300 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
2301 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2302 << PhysRegMsg[RHasPhysReg] << "\n");
2303 return LHasPhysReg < RHasPhysReg;
2307 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2308 unsigned LPriority = SPQ->getNodePriority(left);
2309 unsigned RPriority = SPQ->getNodePriority(right);
2311 // Be really careful about hoisting call operands above previous calls.
2312 // Only allows it if it would reduce register pressure.
2313 if (left->isCall && right->isCallOp) {
2314 unsigned RNumVals = right->getNode()->getNumValues();
2315 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2317 if (right->isCall && left->isCallOp) {
2318 unsigned LNumVals = left->getNode()->getNumValues();
2319 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2322 if (LPriority != RPriority) {
2323 DEBUG(++FactorCount[FactStatic]);
2324 return LPriority > RPriority;
2327 // One or both of the nodes are calls and their sethi-ullman numbers are the
2328 // same, then keep source order.
2329 if (left->isCall || right->isCall) {
2330 unsigned LOrder = SPQ->getNodeOrdering(left);
2331 unsigned ROrder = SPQ->getNodeOrdering(right);
2333 // Prefer an ordering where the lower the non-zero order number, the higher
2335 if ((LOrder || ROrder) && LOrder != ROrder)
2336 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2339 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2344 // and the following instructions are both ready.
2348 // Then schedule t2 = op first.
2355 // This creates more short live intervals.
2356 unsigned LDist = closestSucc(left);
2357 unsigned RDist = closestSucc(right);
2358 if (LDist != RDist) {
2359 DEBUG(++FactorCount[FactOther]);
2360 return LDist < RDist;
2363 // How many registers becomes live when the node is scheduled.
2364 unsigned LScratch = calcMaxScratches(left);
2365 unsigned RScratch = calcMaxScratches(right);
2366 if (LScratch != RScratch) {
2367 DEBUG(++FactorCount[FactOther]);
2368 return LScratch > RScratch;
2371 // Comparing latency against a call makes little sense unless the node
2372 // is register pressure-neutral.
2373 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2374 return (left->NodeQueueId > right->NodeQueueId);
2376 // Do not compare latencies when one or both of the nodes are calls.
2377 if (!DisableSchedCycles &&
2378 !(left->isCall || right->isCall)) {
2379 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2384 if (left->getHeight() != right->getHeight()) {
2385 DEBUG(++FactorCount[FactHeight]);
2386 return left->getHeight() > right->getHeight();
2389 if (left->getDepth() != right->getDepth()) {
2390 DEBUG(++FactorCount[FactDepth]);
2391 return left->getDepth() < right->getDepth();
2395 assert(left->NodeQueueId && right->NodeQueueId &&
2396 "NodeQueueId cannot be zero");
2397 DEBUG(++FactorCount[FactOther]);
2398 return (left->NodeQueueId > right->NodeQueueId);
2402 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2403 if (int res = checkSpecialNodes(left, right))
2406 return BURRSort(left, right, SPQ);
2409 // Source order, otherwise bottom up.
2410 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2411 if (int res = checkSpecialNodes(left, right))
2414 unsigned LOrder = SPQ->getNodeOrdering(left);
2415 unsigned ROrder = SPQ->getNodeOrdering(right);
2417 // Prefer an ordering where the lower the non-zero order number, the higher
2419 if ((LOrder || ROrder) && LOrder != ROrder)
2420 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2422 return BURRSort(left, right, SPQ);
2425 // If the time between now and when the instruction will be ready can cover
2426 // the spill code, then avoid adding it to the ready queue. This gives long
2427 // stalls highest priority and allows hoisting across calls. It should also
2428 // speed up processing the available queue.
2429 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2430 static const unsigned ReadyDelay = 3;
2432 if (SPQ->MayReduceRegPressure(SU)) return true;
2434 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2436 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2437 != ScheduleHazardRecognizer::NoHazard)
2443 // Return true if right should be scheduled with higher priority than left.
2444 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2445 if (int res = checkSpecialNodes(left, right))
2448 if (left->isCall || right->isCall)
2449 // No way to compute latency of calls.
2450 return BURRSort(left, right, SPQ);
2452 bool LHigh = SPQ->HighRegPressure(left);
2453 bool RHigh = SPQ->HighRegPressure(right);
2454 // Avoid causing spills. If register pressure is high, schedule for
2455 // register pressure reduction.
2456 if (LHigh && !RHigh) {
2457 DEBUG(++FactorCount[FactPressureDiff]);
2458 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2459 << right->NodeNum << ")\n");
2462 else if (!LHigh && RHigh) {
2463 DEBUG(++FactorCount[FactPressureDiff]);
2464 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2465 << left->NodeNum << ")\n");
2468 if (!LHigh && !RHigh) {
2469 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2473 return BURRSort(left, right, SPQ);
2476 // Schedule as many instructions in each cycle as possible. So don't make an
2477 // instruction available unless it is ready in the current cycle.
2478 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2479 if (SU->getHeight() > CurCycle) return false;
2481 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2482 != ScheduleHazardRecognizer::NoHazard)
2488 static bool canEnableCoalescing(SUnit *SU) {
2489 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2490 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2491 // CopyToReg should be close to its uses to facilitate coalescing and
2495 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2496 Opc == TargetOpcode::SUBREG_TO_REG ||
2497 Opc == TargetOpcode::INSERT_SUBREG)
2498 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2499 // close to their uses to facilitate coalescing.
2502 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2503 // If SU does not have a register def, schedule it close to its uses
2504 // because it does not lengthen any live ranges.
2510 // list-ilp is currently an experimental scheduler that allows various
2511 // heuristics to be enabled prior to the normal register reduction logic.
2512 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2513 if (int res = checkSpecialNodes(left, right))
2516 if (left->isCall || right->isCall)
2517 // No way to compute latency of calls.
2518 return BURRSort(left, right, SPQ);
2520 unsigned LLiveUses = 0, RLiveUses = 0;
2521 int LPDiff = 0, RPDiff = 0;
2522 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2523 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2524 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2526 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2527 DEBUG(++FactorCount[FactPressureDiff]);
2528 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2529 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2530 return LPDiff > RPDiff;
2533 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2534 bool LReduce = canEnableCoalescing(left);
2535 bool RReduce = canEnableCoalescing(right);
2536 DEBUG(if (LReduce != RReduce) ++FactorCount[FactPressureDiff]);
2537 if (LReduce && !RReduce) return false;
2538 if (RReduce && !LReduce) return true;
2541 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2542 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2543 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2544 DEBUG(++FactorCount[FactRegUses]);
2545 return LLiveUses < RLiveUses;
2548 if (!DisableSchedStalls) {
2549 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2550 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2551 if (LStall != RStall) {
2552 DEBUG(++FactorCount[FactHeight]);
2553 return left->getHeight() > right->getHeight();
2557 if (!DisableSchedCriticalPath) {
2558 int spread = (int)left->getDepth() - (int)right->getDepth();
2559 if (std::abs(spread) > MaxReorderWindow) {
2560 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2561 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2562 << right->getDepth() << "\n");
2563 DEBUG(++FactorCount[FactDepth]);
2564 return left->getDepth() < right->getDepth();
2568 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2569 int spread = (int)left->getHeight() - (int)right->getHeight();
2570 if (std::abs(spread) > MaxReorderWindow) {
2571 DEBUG(++FactorCount[FactHeight]);
2572 return left->getHeight() > right->getHeight();
2576 return BURRSort(left, right, SPQ);
2579 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2581 // Add pseudo dependency edges for two-address nodes.
2582 AddPseudoTwoAddrDeps();
2583 // Reroute edges to nodes with multiple uses.
2584 if (!TracksRegPressure)
2585 PrescheduleNodesWithMultipleUses();
2586 // Calculate node priorities.
2587 CalculateSethiUllmanNumbers();
2589 // For single block loops, mark nodes that look like canonical IV increments.
2590 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2591 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2592 initVRegCycle(&sunits[i]);
2597 //===----------------------------------------------------------------------===//
2598 // Preschedule for Register Pressure
2599 //===----------------------------------------------------------------------===//
2601 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2602 if (SU->isTwoAddress) {
2603 unsigned Opc = SU->getNode()->getMachineOpcode();
2604 const TargetInstrDesc &TID = TII->get(Opc);
2605 unsigned NumRes = TID.getNumDefs();
2606 unsigned NumOps = TID.getNumOperands() - NumRes;
2607 for (unsigned i = 0; i != NumOps; ++i) {
2608 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
2609 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2610 if (DU->getNodeId() != -1 &&
2611 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2619 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2620 /// physical register defs.
2621 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2622 const TargetInstrInfo *TII,
2623 const TargetRegisterInfo *TRI) {
2624 SDNode *N = SuccSU->getNode();
2625 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2626 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2627 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2628 for (const SDNode *SUNode = SU->getNode(); SUNode;
2629 SUNode = SUNode->getGluedNode()) {
2630 if (!SUNode->isMachineOpcode())
2632 const unsigned *SUImpDefs =
2633 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2636 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2637 EVT VT = N->getValueType(i);
2638 if (VT == MVT::Glue || VT == MVT::Other)
2640 if (!N->hasAnyUseOfValue(i))
2642 unsigned Reg = ImpDefs[i - NumDefs];
2643 for (;*SUImpDefs; ++SUImpDefs) {
2644 unsigned SUReg = *SUImpDefs;
2645 if (TRI->regsOverlap(Reg, SUReg))
2653 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2654 /// are not handled well by the general register pressure reduction
2655 /// heuristics. When presented with code like this:
2664 /// the heuristics tend to push the store up, but since the
2665 /// operand of the store has another use (U), this would increase
2666 /// the length of that other use (the U->N edge).
2668 /// This function transforms code like the above to route U's
2669 /// dependence through the store when possible, like this:
2680 /// This results in the store being scheduled immediately
2681 /// after N, which shortens the U->N live range, reducing
2682 /// register pressure.
2684 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2685 // Visit all the nodes in topological order, working top-down.
2686 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2687 SUnit *SU = &(*SUnits)[i];
2688 // For now, only look at nodes with no data successors, such as stores.
2689 // These are especially important, due to the heuristics in
2690 // getNodePriority for nodes with no data successors.
2691 if (SU->NumSuccs != 0)
2693 // For now, only look at nodes with exactly one data predecessor.
2694 if (SU->NumPreds != 1)
2696 // Avoid prescheduling copies to virtual registers, which don't behave
2697 // like other nodes from the perspective of scheduling heuristics.
2698 if (SDNode *N = SU->getNode())
2699 if (N->getOpcode() == ISD::CopyToReg &&
2700 TargetRegisterInfo::isVirtualRegister
2701 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2704 // Locate the single data predecessor.
2706 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2707 EE = SU->Preds.end(); II != EE; ++II)
2708 if (!II->isCtrl()) {
2709 PredSU = II->getSUnit();
2714 // Don't rewrite edges that carry physregs, because that requires additional
2715 // support infrastructure.
2716 if (PredSU->hasPhysRegDefs)
2718 // Short-circuit the case where SU is PredSU's only data successor.
2719 if (PredSU->NumSuccs == 1)
2721 // Avoid prescheduling to copies from virtual registers, which don't behave
2722 // like other nodes from the perspective of scheduling heuristics.
2723 if (SDNode *N = SU->getNode())
2724 if (N->getOpcode() == ISD::CopyFromReg &&
2725 TargetRegisterInfo::isVirtualRegister
2726 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2729 // Perform checks on the successors of PredSU.
2730 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2731 EE = PredSU->Succs.end(); II != EE; ++II) {
2732 SUnit *PredSuccSU = II->getSUnit();
2733 if (PredSuccSU == SU) continue;
2734 // If PredSU has another successor with no data successors, for
2735 // now don't attempt to choose either over the other.
2736 if (PredSuccSU->NumSuccs == 0)
2737 goto outer_loop_continue;
2738 // Don't break physical register dependencies.
2739 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2740 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2741 goto outer_loop_continue;
2742 // Don't introduce graph cycles.
2743 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2744 goto outer_loop_continue;
2747 // Ok, the transformation is safe and the heuristics suggest it is
2748 // profitable. Update the graph.
2749 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2750 << " next to PredSU #" << PredSU->NodeNum
2751 << " to guide scheduling in the presence of multiple uses\n");
2752 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2753 SDep Edge = PredSU->Succs[i];
2754 assert(!Edge.isAssignedRegDep());
2755 SUnit *SuccSU = Edge.getSUnit();
2757 Edge.setSUnit(PredSU);
2758 scheduleDAG->RemovePred(SuccSU, Edge);
2759 scheduleDAG->AddPred(SU, Edge);
2761 scheduleDAG->AddPred(SuccSU, Edge);
2765 outer_loop_continue:;
2769 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2770 /// it as a def&use operand. Add a pseudo control edge from it to the other
2771 /// node (if it won't create a cycle) so the two-address one will be scheduled
2772 /// first (lower in the schedule). If both nodes are two-address, favor the
2773 /// one that has a CopyToReg use (more likely to be a loop induction update).
2774 /// If both are two-address, but one is commutable while the other is not
2775 /// commutable, favor the one that's not commutable.
2776 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2777 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2778 SUnit *SU = &(*SUnits)[i];
2779 if (!SU->isTwoAddress)
2782 SDNode *Node = SU->getNode();
2783 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2786 bool isLiveOut = hasOnlyLiveOutUses(SU);
2787 unsigned Opc = Node->getMachineOpcode();
2788 const TargetInstrDesc &TID = TII->get(Opc);
2789 unsigned NumRes = TID.getNumDefs();
2790 unsigned NumOps = TID.getNumOperands() - NumRes;
2791 for (unsigned j = 0; j != NumOps; ++j) {
2792 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
2794 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2795 if (DU->getNodeId() == -1)
2797 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2798 if (!DUSU) continue;
2799 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2800 E = DUSU->Succs.end(); I != E; ++I) {
2801 if (I->isCtrl()) continue;
2802 SUnit *SuccSU = I->getSUnit();
2805 // Be conservative. Ignore if nodes aren't at roughly the same
2806 // depth and height.
2807 if (SuccSU->getHeight() < SU->getHeight() &&
2808 (SU->getHeight() - SuccSU->getHeight()) > 1)
2810 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2811 // constrains whatever is using the copy, instead of the copy
2812 // itself. In the case that the copy is coalesced, this
2813 // preserves the intent of the pseudo two-address heurietics.
2814 while (SuccSU->Succs.size() == 1 &&
2815 SuccSU->getNode()->isMachineOpcode() &&
2816 SuccSU->getNode()->getMachineOpcode() ==
2817 TargetOpcode::COPY_TO_REGCLASS)
2818 SuccSU = SuccSU->Succs.front().getSUnit();
2819 // Don't constrain non-instruction nodes.
2820 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2822 // Don't constrain nodes with physical register defs if the
2823 // predecessor can clobber them.
2824 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2825 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2828 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2829 // these may be coalesced away. We want them close to their uses.
2830 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2831 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2832 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2833 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2835 if ((!canClobber(SuccSU, DUSU) ||
2836 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2837 (!SU->isCommutable && SuccSU->isCommutable)) &&
2838 !scheduleDAG->IsReachable(SuccSU, SU)) {
2839 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2840 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2841 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
2842 /*Reg=*/0, /*isNormalMemory=*/false,
2843 /*isMustAlias=*/false,
2844 /*isArtificial=*/true));
2851 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
2852 /// predecessors of the successors of the SUnit SU. Stop when the provided
2853 /// limit is exceeded.
2854 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
2857 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2859 const SUnit *SuccSU = I->getSUnit();
2860 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
2861 EE = SuccSU->Preds.end(); II != EE; ++II) {
2862 SUnit *PredSU = II->getSUnit();
2863 if (!PredSU->isScheduled)
2873 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
2874 if (int res = checkSpecialNodes(left, right))
2877 unsigned LPriority = SPQ->getNodePriority(left);
2878 unsigned RPriority = SPQ->getNodePriority(right);
2879 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode();
2880 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode();
2881 bool LIsFloater = LIsTarget && left->NumPreds == 0;
2882 bool RIsFloater = RIsTarget && right->NumPreds == 0;
2883 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
2884 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
2886 if (left->NumSuccs == 0 && right->NumSuccs != 0)
2888 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
2895 if (left->NumSuccs == 1)
2897 if (right->NumSuccs == 1)
2900 if (LPriority+LBonus != RPriority+RBonus)
2901 return LPriority+LBonus < RPriority+RBonus;
2903 if (left->getDepth() != right->getDepth())
2904 return left->getDepth() < right->getDepth();
2906 if (left->NumSuccsLeft != right->NumSuccsLeft)
2907 return left->NumSuccsLeft > right->NumSuccsLeft;
2909 assert(left->NodeQueueId && right->NodeQueueId &&
2910 "NodeQueueId cannot be zero");
2911 return (left->NodeQueueId > right->NodeQueueId);
2914 //===----------------------------------------------------------------------===//
2915 // Public Constructor Functions
2916 //===----------------------------------------------------------------------===//
2918 llvm::ScheduleDAGSDNodes *
2919 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2920 CodeGenOpt::Level OptLevel) {
2921 const TargetMachine &TM = IS->TM;
2922 const TargetInstrInfo *TII = TM.getInstrInfo();
2923 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2925 BURegReductionPriorityQueue *PQ =
2926 new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2927 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2928 PQ->setScheduleDAG(SD);
2932 llvm::ScheduleDAGSDNodes *
2933 llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
2934 CodeGenOpt::Level OptLevel) {
2935 const TargetMachine &TM = IS->TM;
2936 const TargetInstrInfo *TII = TM.getInstrInfo();
2937 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2939 TDRegReductionPriorityQueue *PQ =
2940 new TDRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2941 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2942 PQ->setScheduleDAG(SD);
2946 llvm::ScheduleDAGSDNodes *
2947 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2948 CodeGenOpt::Level OptLevel) {
2949 const TargetMachine &TM = IS->TM;
2950 const TargetInstrInfo *TII = TM.getInstrInfo();
2951 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2953 SrcRegReductionPriorityQueue *PQ =
2954 new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2955 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2956 PQ->setScheduleDAG(SD);
2960 llvm::ScheduleDAGSDNodes *
2961 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
2962 CodeGenOpt::Level OptLevel) {
2963 const TargetMachine &TM = IS->TM;
2964 const TargetInstrInfo *TII = TM.getInstrInfo();
2965 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2966 const TargetLowering *TLI = &IS->getTargetLowering();
2968 HybridBURRPriorityQueue *PQ =
2969 new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2971 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2972 PQ->setScheduleDAG(SD);
2976 llvm::ScheduleDAGSDNodes *
2977 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
2978 CodeGenOpt::Level OptLevel) {
2979 const TargetMachine &TM = IS->TM;
2980 const TargetInstrInfo *TII = TM.getInstrInfo();
2981 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2982 const TargetLowering *TLI = &IS->getTargetLowering();
2984 ILPBURRPriorityQueue *PQ =
2985 new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2986 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2987 PQ->setScheduleDAG(SD);