1 //===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "llvm/CodeGen/ScheduleDAG.h"
20 #include "llvm/CodeGen/SchedulerRegistry.h"
21 #include "llvm/Target/TargetRegisterInfo.h"
22 #include "llvm/Target/TargetData.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetInstrInfo.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/ADT/BitVector.h"
28 #include "llvm/ADT/PriorityQueue.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/SmallSet.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/Support/CommandLine.h"
37 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
38 STATISTIC(NumUnfolds, "Number of nodes unfolded");
39 STATISTIC(NumDups, "Number of duplicated nodes");
40 STATISTIC(NumCCCopies, "Number of cross class copies");
42 static RegisterScheduler
43 burrListDAGScheduler("list-burr",
44 " Bottom-up register reduction list scheduling",
45 createBURRListDAGScheduler);
46 static RegisterScheduler
47 tdrListrDAGScheduler("list-tdrr",
48 " Top-down register reduction list scheduling",
49 createTDRRListDAGScheduler);
52 //===----------------------------------------------------------------------===//
53 /// ScheduleDAGRRList - The actual register reduction list scheduler
54 /// implementation. This supports both top-down and bottom-up scheduling.
56 class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
58 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
62 /// Fast - True if we are performing fast scheduling.
66 /// AvailableQueue - The priority queue to use for the available SUnits.
67 SchedulingPriorityQueue *AvailableQueue;
69 /// LiveRegs / LiveRegDefs - A set of physical registers and their definition
70 /// that are "live". These nodes must be scheduled before any other nodes that
71 /// modifies the registers can be scheduled.
72 SmallSet<unsigned, 4> LiveRegs;
73 std::vector<SUnit*> LiveRegDefs;
74 std::vector<unsigned> LiveRegCycles;
77 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
78 const TargetMachine &tm, bool isbottomup, bool f,
79 SchedulingPriorityQueue *availqueue)
80 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup), Fast(f),
81 AvailableQueue(availqueue) {
84 ~ScheduleDAGRRList() {
85 delete AvailableQueue;
90 /// IsReachable - Checks if SU is reachable from TargetSU.
91 bool IsReachable(SUnit *SU, SUnit *TargetSU);
93 /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
95 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
97 /// AddPred - This adds the specified node X as a predecessor of
98 /// the current node Y if not already.
99 /// This returns true if this is a new predecessor.
100 /// Updates the topological ordering if required.
101 bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
102 unsigned PhyReg = 0, int Cost = 1);
104 /// RemovePred - This removes the specified node N from the predecessors of
105 /// the current node M. Updates the topological ordering if required.
106 bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial);
109 void ReleasePred(SUnit*, bool, unsigned);
110 void ReleaseSucc(SUnit*, bool isChain, unsigned);
111 void CapturePred(SUnit*, SUnit*, bool);
112 void ScheduleNodeBottomUp(SUnit*, unsigned);
113 void ScheduleNodeTopDown(SUnit*, unsigned);
114 void UnscheduleNodeBottomUp(SUnit*);
115 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
116 SUnit *CopyAndMoveSuccessors(SUnit*);
117 void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned,
118 const TargetRegisterClass*,
119 const TargetRegisterClass*,
120 SmallVector<SUnit*, 2>&);
121 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
122 void ListScheduleTopDown();
123 void ListScheduleBottomUp();
124 void CommuteNodesToReducePressure();
127 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
128 /// Updates the topological ordering if required.
129 SUnit *CreateNewSUnit(SDNode *N) {
130 SUnit *NewNode = NewSUnit(N);
131 // Update the topological ordering.
132 if (NewNode->NodeNum >= Node2Index.size())
133 InitDAGTopologicalSorting();
137 /// CreateClone - Creates a new SUnit from an existing one.
138 /// Updates the topological ordering if required.
139 SUnit *CreateClone(SUnit *N) {
140 SUnit *NewNode = Clone(N);
141 // Update the topological ordering.
142 if (NewNode->NodeNum >= Node2Index.size())
143 InitDAGTopologicalSorting();
147 /// Functions for preserving the topological ordering
148 /// even after dynamic insertions of new edges.
149 /// This allows a very fast implementation of IsReachable.
151 /// InitDAGTopologicalSorting - create the initial topological
152 /// ordering from the DAG to be scheduled.
153 void InitDAGTopologicalSorting();
155 /// DFS - make a DFS traversal and mark all nodes affected by the
156 /// edge insertion. These nodes will later get new topological indexes
157 /// by means of the Shift method.
158 void DFS(SUnit *SU, int UpperBound, bool& HasLoop);
160 /// Shift - reassign topological indexes for the nodes in the DAG
161 /// to preserve the topological ordering.
162 void Shift(BitVector& Visited, int LowerBound, int UpperBound);
164 /// Allocate - assign the topological index to the node n.
165 void Allocate(int n, int index);
167 /// Index2Node - Maps topological index to the node number.
168 std::vector<int> Index2Node;
169 /// Node2Index - Maps the node number to its topological index.
170 std::vector<int> Node2Index;
171 /// Visited - a set of nodes visited during a DFS traversal.
174 } // end anonymous namespace
177 /// Schedule - Schedule the DAG using list scheduling.
178 void ScheduleDAGRRList::Schedule() {
179 DOUT << "********** List Scheduling **********\n";
181 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
182 LiveRegCycles.resize(TRI->getNumRegs(), 0);
184 // Build scheduling units.
187 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
188 SUnits[su].dumpAll(&DAG));
193 InitDAGTopologicalSorting();
195 AvailableQueue->initNodes(SUnits);
197 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
199 ListScheduleBottomUp();
201 ListScheduleTopDown();
203 AvailableQueue->releaseState();
206 CommuteNodesToReducePressure();
208 DOUT << "*** Final schedule ***\n";
209 DEBUG(dumpSchedule());
213 /// CommuteNodesToReducePressure - If a node is two-address and commutable, and
214 /// it is not the last use of its first operand, add it to the CommuteSet if
215 /// possible. It will be commuted when it is translated to a MI.
216 void ScheduleDAGRRList::CommuteNodesToReducePressure() {
217 SmallPtrSet<SUnit*, 4> OperandSeen;
218 for (unsigned i = Sequence.size(); i != 0; ) {
220 SUnit *SU = Sequence[i];
221 if (!SU || !SU->Node) continue;
222 if (SU->isCommutable) {
223 unsigned Opc = SU->Node->getTargetOpcode();
224 const TargetInstrDesc &TID = TII->get(Opc);
225 unsigned NumRes = TID.getNumDefs();
226 unsigned NumOps = TID.getNumOperands() - NumRes;
227 for (unsigned j = 0; j != NumOps; ++j) {
228 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
231 SDNode *OpN = SU->Node->getOperand(j).Val;
232 SUnit *OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
233 if (OpSU && OperandSeen.count(OpSU) == 1) {
234 // Ok, so SU is not the last use of OpSU, but SU is two-address so
235 // it will clobber OpSU. Try to commute SU if no other source operands
237 bool DoCommute = true;
238 for (unsigned k = 0; k < NumOps; ++k) {
240 OpN = SU->Node->getOperand(k).Val;
241 OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
242 if (OpSU && OperandSeen.count(OpSU) == 1) {
249 CommuteSet.insert(SU->Node);
252 // Only look at the first use&def node for now.
257 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
260 OperandSeen.insert(I->Dep->OrigNode);
265 //===----------------------------------------------------------------------===//
266 // Bottom-Up Scheduling
267 //===----------------------------------------------------------------------===//
269 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
270 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
271 void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
273 // FIXME: the distance between two nodes is not always == the predecessor's
274 // latency. For example, the reader can very well read the register written
275 // by the predecessor later than the issue cycle. It also depends on the
276 // interrupt model (drain vs. freeze).
277 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
279 --PredSU->NumSuccsLeft;
282 if (PredSU->NumSuccsLeft < 0) {
283 cerr << "*** List scheduling failed! ***\n";
285 cerr << " has been released too many times!\n";
290 if (PredSU->NumSuccsLeft == 0) {
291 PredSU->isAvailable = true;
292 AvailableQueue->push(PredSU);
296 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
297 /// count of its predecessors. If a predecessor pending count is zero, add it to
298 /// the Available queue.
299 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
300 DOUT << "*** Scheduling [" << CurCycle << "]: ";
301 DEBUG(SU->dump(&DAG));
302 SU->Cycle = CurCycle;
304 AvailableQueue->ScheduledNode(SU);
306 // Bottom up: release predecessors
307 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
309 ReleasePred(I->Dep, I->isCtrl, CurCycle);
311 // This is a physical register dependency and it's impossible or
312 // expensive to copy the register. Make sure nothing that can
313 // clobber the register is scheduled between the predecessor and
315 if (LiveRegs.insert(I->Reg)) {
316 LiveRegDefs[I->Reg] = I->Dep;
317 LiveRegCycles[I->Reg] = CurCycle;
322 // Release all the implicit physical register defs that are live.
323 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
326 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
327 LiveRegs.erase(I->Reg);
328 assert(LiveRegDefs[I->Reg] == SU &&
329 "Physical register dependency violated?");
330 LiveRegDefs[I->Reg] = NULL;
331 LiveRegCycles[I->Reg] = 0;
336 SU->isScheduled = true;
339 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
340 /// unscheduled, incrcease the succ left count of its predecessors. Remove
341 /// them from AvailableQueue if necessary.
342 void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
343 unsigned CycleBound = 0;
344 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
348 CycleBound = std::max(CycleBound,
349 I->Dep->Cycle + PredSU->Latency);
352 if (PredSU->isAvailable) {
353 PredSU->isAvailable = false;
354 if (!PredSU->isPending)
355 AvailableQueue->remove(PredSU);
358 PredSU->CycleBound = CycleBound;
359 ++PredSU->NumSuccsLeft;
362 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
363 /// its predecessor states to reflect the change.
364 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
365 DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
366 DEBUG(SU->dump(&DAG));
368 AvailableQueue->UnscheduledNode(SU);
370 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
372 CapturePred(I->Dep, SU, I->isCtrl);
373 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
374 LiveRegs.erase(I->Reg);
375 assert(LiveRegDefs[I->Reg] == I->Dep &&
376 "Physical register dependency violated?");
377 LiveRegDefs[I->Reg] = NULL;
378 LiveRegCycles[I->Reg] = 0;
382 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
385 if (LiveRegs.insert(I->Reg)) {
386 assert(!LiveRegDefs[I->Reg] &&
387 "Physical register dependency violated?");
388 LiveRegDefs[I->Reg] = SU;
390 if (I->Dep->Cycle < LiveRegCycles[I->Reg])
391 LiveRegCycles[I->Reg] = I->Dep->Cycle;
396 SU->isScheduled = false;
397 SU->isAvailable = true;
398 AvailableQueue->push(SU);
401 /// IsReachable - Checks if SU is reachable from TargetSU.
402 bool ScheduleDAGRRList::IsReachable(SUnit *SU, SUnit *TargetSU) {
403 // If insertion of the edge SU->TargetSU would create a cycle
404 // then there is a path from TargetSU to SU.
405 int UpperBound, LowerBound;
406 LowerBound = Node2Index[TargetSU->NodeNum];
407 UpperBound = Node2Index[SU->NodeNum];
408 bool HasLoop = false;
409 // Is Ord(TargetSU) < Ord(SU) ?
410 if (LowerBound < UpperBound) {
412 // There may be a path from TargetSU to SU. Check for it.
413 DFS(TargetSU, UpperBound, HasLoop);
418 /// Allocate - assign the topological index to the node n.
419 inline void ScheduleDAGRRList::Allocate(int n, int index) {
420 Node2Index[n] = index;
421 Index2Node[index] = n;
424 /// InitDAGTopologicalSorting - create the initial topological
425 /// ordering from the DAG to be scheduled.
427 /// The idea of the algorithm is taken from
428 /// "Online algorithms for managing the topological order of
429 /// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
430 /// This is the MNR algorithm, which was first introduced by
431 /// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
432 /// "Maintaining a topological order under edge insertions".
434 /// Short description of the algorithm:
436 /// Topological ordering, ord, of a DAG maps each node to a topological
437 /// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
439 /// This means that if there is a path from the node X to the node Z,
440 /// then ord(X) < ord(Z).
442 /// This property can be used to check for reachability of nodes:
443 /// if Z is reachable from X, then an insertion of the edge Z->X would
446 /// The algorithm first computes a topological ordering for the DAG by
447 /// initializing the Index2Node and Node2Index arrays and then tries to keep
448 /// the ordering up-to-date after edge insertions by reordering the DAG.
450 /// On insertion of the edge X->Y, the algorithm first marks by calling DFS
451 /// the nodes reachable from Y, and then shifts them using Shift to lie
452 /// immediately after X in Index2Node.
453 void ScheduleDAGRRList::InitDAGTopologicalSorting() {
454 unsigned DAGSize = SUnits.size();
455 std::vector<unsigned> InDegree(DAGSize);
456 std::vector<SUnit*> WorkList;
457 WorkList.reserve(DAGSize);
458 std::vector<SUnit*> TopOrder;
459 TopOrder.reserve(DAGSize);
461 // Initialize the data structures.
462 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
463 SUnit *SU = &SUnits[i];
464 int NodeNum = SU->NodeNum;
465 unsigned Degree = SU->Succs.size();
466 InDegree[NodeNum] = Degree;
468 // Is it a node without dependencies?
470 assert(SU->Succs.empty() && "SUnit should have no successors");
471 // Collect leaf nodes.
472 WorkList.push_back(SU);
476 while (!WorkList.empty()) {
477 SUnit *SU = WorkList.back();
479 TopOrder.push_back(SU);
480 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
483 if (!--InDegree[SU->NodeNum])
484 // If all dependencies of the node are processed already,
485 // then the node can be computed now.
486 WorkList.push_back(SU);
490 // Second pass, assign the actual topological order as node ids.
495 Index2Node.resize(DAGSize);
496 Node2Index.resize(DAGSize);
497 Visited.resize(DAGSize);
499 for (std::vector<SUnit*>::reverse_iterator TI = TopOrder.rbegin(),
500 TE = TopOrder.rend();TI != TE; ++TI) {
501 Allocate((*TI)->NodeNum, Id);
506 // Check correctness of the ordering
507 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
508 SUnit *SU = &SUnits[i];
509 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
511 assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] &&
512 "Wrong topological sorting");
518 /// AddPred - adds an edge from SUnit X to SUnit Y.
519 /// Updates the topological ordering if required.
520 bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
521 unsigned PhyReg, int Cost) {
522 int UpperBound, LowerBound;
523 LowerBound = Node2Index[Y->NodeNum];
524 UpperBound = Node2Index[X->NodeNum];
525 bool HasLoop = false;
526 // Is Ord(X) < Ord(Y) ?
527 if (LowerBound < UpperBound) {
528 // Update the topological order.
530 DFS(Y, UpperBound, HasLoop);
531 assert(!HasLoop && "Inserted edge creates a loop!");
532 // Recompute topological indexes.
533 Shift(Visited, LowerBound, UpperBound);
535 // Now really insert the edge.
536 return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost);
539 /// RemovePred - This removes the specified node N from the predecessors of
540 /// the current node M. Updates the topological ordering if required.
541 bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N,
542 bool isCtrl, bool isSpecial) {
543 // InitDAGTopologicalSorting();
544 return M->removePred(N, isCtrl, isSpecial);
547 /// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
548 /// all nodes affected by the edge insertion. These nodes will later get new
549 /// topological indexes by means of the Shift method.
550 void ScheduleDAGRRList::DFS(SUnit *SU, int UpperBound, bool& HasLoop) {
551 std::vector<SUnit*> WorkList;
552 WorkList.reserve(SUnits.size());
554 WorkList.push_back(SU);
555 while (!WorkList.empty()) {
556 SU = WorkList.back();
558 Visited.set(SU->NodeNum);
559 for (int I = SU->Succs.size()-1; I >= 0; --I) {
560 int s = SU->Succs[I].Dep->NodeNum;
561 if (Node2Index[s] == UpperBound) {
565 // Visit successors if not already and in affected region.
566 if (!Visited.test(s) && Node2Index[s] < UpperBound) {
567 WorkList.push_back(SU->Succs[I].Dep);
573 /// Shift - Renumber the nodes so that the topological ordering is
575 void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound,
581 for (i = LowerBound; i <= UpperBound; ++i) {
582 // w is node at topological index i.
583 int w = Index2Node[i];
584 if (Visited.test(w)) {
590 Allocate(w, i - shift);
594 for (unsigned j = 0; j < L.size(); ++j) {
595 Allocate(L[j], i - shift);
601 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
603 bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
604 if (IsReachable(TargetSU, SU))
606 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
608 if (I->Cost < 0 && IsReachable(TargetSU, I->Dep))
613 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
614 /// BTCycle in order to schedule a specific node. Returns the last unscheduled
615 /// SUnit. Also returns if a successor is unscheduled in the process.
616 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
617 unsigned &CurCycle) {
619 while (CurCycle > BtCycle) {
620 OldSU = Sequence.back();
622 if (SU->isSucc(OldSU))
623 // Don't try to remove SU from AvailableQueue.
624 SU->isAvailable = false;
625 UnscheduleNodeBottomUp(OldSU);
630 if (SU->isSucc(OldSU)) {
631 assert(false && "Something is wrong!");
638 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
639 /// successors to the newly created node.
640 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
641 if (SU->FlaggedNodes.size())
644 SDNode *N = SU->Node;
649 bool TryUnfold = false;
650 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
651 MVT VT = N->getValueType(i);
654 else if (VT == MVT::Other)
657 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
658 const SDOperand &Op = N->getOperand(i);
659 MVT VT = Op.Val->getValueType(Op.ResNo);
665 SmallVector<SDNode*, 2> NewNodes;
666 if (!TII->unfoldMemoryOperand(DAG, N, NewNodes))
669 DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
670 assert(NewNodes.size() == 2 && "Expected a load folding node!");
673 SDNode *LoadNode = NewNodes[0];
674 unsigned NumVals = N->getNumValues();
675 unsigned OldNumVals = SU->Node->getNumValues();
676 for (unsigned i = 0; i != NumVals; ++i)
677 DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, i), SDOperand(N, i));
678 DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, OldNumVals-1),
679 SDOperand(LoadNode, 1));
681 SUnit *NewSU = CreateNewSUnit(N);
682 assert(N->getNodeId() == -1 && "Node already inserted!");
683 N->setNodeId(NewSU->NodeNum);
685 const TargetInstrDesc &TID = TII->get(N->getTargetOpcode());
686 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
687 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
688 NewSU->isTwoAddress = true;
692 if (TID.isCommutable())
693 NewSU->isCommutable = true;
694 // FIXME: Calculate height / depth and propagate the changes?
695 NewSU->Depth = SU->Depth;
696 NewSU->Height = SU->Height;
697 ComputeLatency(NewSU);
699 // LoadNode may already exist. This can happen when there is another
700 // load from the same location and producing the same type of value
701 // but it has different alignment or volatileness.
702 bool isNewLoad = true;
704 if (LoadNode->getNodeId() != -1) {
705 LoadSU = &SUnits[LoadNode->getNodeId()];
708 LoadSU = CreateNewSUnit(LoadNode);
709 LoadNode->setNodeId(LoadSU->NodeNum);
711 LoadSU->Depth = SU->Depth;
712 LoadSU->Height = SU->Height;
713 ComputeLatency(LoadSU);
716 SUnit *ChainPred = NULL;
717 SmallVector<SDep, 4> ChainSuccs;
718 SmallVector<SDep, 4> LoadPreds;
719 SmallVector<SDep, 4> NodePreds;
720 SmallVector<SDep, 4> NodeSuccs;
721 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
725 else if (I->Dep->Node && I->Dep->Node->isOperandOf(LoadNode))
726 LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
728 NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
730 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
733 ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
734 I->isCtrl, I->isSpecial));
736 NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
737 I->isCtrl, I->isSpecial));
741 RemovePred(SU, ChainPred, true, false);
743 AddPred(LoadSU, ChainPred, true, false);
745 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
746 SDep *Pred = &LoadPreds[i];
747 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
749 AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
750 Pred->Reg, Pred->Cost);
753 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
754 SDep *Pred = &NodePreds[i];
755 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
756 AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
757 Pred->Reg, Pred->Cost);
759 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
760 SDep *Succ = &NodeSuccs[i];
761 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
762 AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial,
763 Succ->Reg, Succ->Cost);
765 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
766 SDep *Succ = &ChainSuccs[i];
767 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
769 AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial,
770 Succ->Reg, Succ->Cost);
774 AddPred(NewSU, LoadSU, false, false);
778 AvailableQueue->addNode(LoadSU);
779 AvailableQueue->addNode(NewSU);
783 if (NewSU->NumSuccsLeft == 0) {
784 NewSU->isAvailable = true;
790 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
791 NewSU = CreateClone(SU);
793 // New SUnit has the exact same predecessors.
794 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
797 AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost);
798 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
801 // Only copy scheduled successors. Cut them from old node's successor
802 // list and move them over.
803 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
804 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
808 if (I->Dep->isScheduled) {
809 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
810 AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost);
811 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
814 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
815 SUnit *Succ = DelDeps[i].first;
816 bool isCtrl = DelDeps[i].second;
817 RemovePred(Succ, SU, isCtrl, false);
820 AvailableQueue->updateNode(SU);
821 AvailableQueue->addNode(NewSU);
827 /// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies
828 /// and move all scheduled successors of the given SUnit to the last copy.
829 void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
830 const TargetRegisterClass *DestRC,
831 const TargetRegisterClass *SrcRC,
832 SmallVector<SUnit*, 2> &Copies) {
833 SUnit *CopyFromSU = CreateNewSUnit(NULL);
834 CopyFromSU->CopySrcRC = SrcRC;
835 CopyFromSU->CopyDstRC = DestRC;
836 CopyFromSU->Depth = SU->Depth;
837 CopyFromSU->Height = SU->Height;
839 SUnit *CopyToSU = CreateNewSUnit(NULL);
840 CopyToSU->CopySrcRC = DestRC;
841 CopyToSU->CopyDstRC = SrcRC;
843 // Only copy scheduled successors. Cut them from old node's successor
844 // list and move them over.
845 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
846 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
850 if (I->Dep->isScheduled) {
851 CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
852 AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
853 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
856 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
857 SUnit *Succ = DelDeps[i].first;
858 bool isCtrl = DelDeps[i].second;
859 RemovePred(Succ, SU, isCtrl, false);
862 AddPred(CopyFromSU, SU, false, false, Reg, -1);
863 AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1);
865 AvailableQueue->updateNode(SU);
866 AvailableQueue->addNode(CopyFromSU);
867 AvailableQueue->addNode(CopyToSU);
868 Copies.push_back(CopyFromSU);
869 Copies.push_back(CopyToSU);
874 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
875 /// definition of the specified node.
876 /// FIXME: Move to SelectionDAG?
877 static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
878 const TargetInstrInfo *TII) {
879 const TargetInstrDesc &TID = TII->get(N->getTargetOpcode());
880 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
881 unsigned NumRes = TID.getNumDefs();
882 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
887 return N->getValueType(NumRes);
890 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
891 /// scheduling of the given node to satisfy live physical register dependencies.
892 /// If the specific node is the last one that's available to schedule, do
893 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
894 bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
895 SmallVector<unsigned, 4> &LRegs){
896 if (LiveRegs.empty())
899 SmallSet<unsigned, 4> RegAdded;
900 // If this node would clobber any "live" register, then it's not ready.
901 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
904 unsigned Reg = I->Reg;
905 if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) {
906 if (RegAdded.insert(Reg))
907 LRegs.push_back(Reg);
909 for (const unsigned *Alias = TRI->getAliasSet(Reg);
911 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) {
912 if (RegAdded.insert(*Alias))
913 LRegs.push_back(*Alias);
918 for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) {
919 SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1];
920 if (!Node || !Node->isTargetOpcode())
922 const TargetInstrDesc &TID = TII->get(Node->getTargetOpcode());
923 if (!TID.ImplicitDefs)
925 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
926 if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) {
927 if (RegAdded.insert(*Reg))
928 LRegs.push_back(*Reg);
930 for (const unsigned *Alias = TRI->getAliasSet(*Reg);
932 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) {
933 if (RegAdded.insert(*Alias))
934 LRegs.push_back(*Alias);
938 return !LRegs.empty();
942 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
944 void ScheduleDAGRRList::ListScheduleBottomUp() {
945 unsigned CurCycle = 0;
946 // Add root to Available queue.
947 if (!SUnits.empty()) {
948 SUnit *RootSU = &SUnits[DAG.getRoot().Val->getNodeId()];
949 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
950 RootSU->isAvailable = true;
951 AvailableQueue->push(RootSU);
954 // While Available queue is not empty, grab the node with the highest
955 // priority. If it is not ready put it back. Schedule the node.
956 SmallVector<SUnit*, 4> NotReady;
957 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
958 Sequence.reserve(SUnits.size());
959 while (!AvailableQueue->empty()) {
960 bool Delayed = false;
962 SUnit *CurSU = AvailableQueue->pop();
964 if (CurSU->CycleBound <= CurCycle) {
965 SmallVector<unsigned, 4> LRegs;
966 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
969 LRegsMap.insert(std::make_pair(CurSU, LRegs));
972 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
973 NotReady.push_back(CurSU);
974 CurSU = AvailableQueue->pop();
977 // All candidates are delayed due to live physical reg dependencies.
978 // Try backtracking, code duplication, or inserting cross class copies
980 if (Delayed && !CurSU) {
981 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
982 SUnit *TrySU = NotReady[i];
983 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
985 // Try unscheduling up to the point where it's safe to schedule
987 unsigned LiveCycle = CurCycle;
988 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
989 unsigned Reg = LRegs[j];
990 unsigned LCycle = LiveRegCycles[Reg];
991 LiveCycle = std::min(LiveCycle, LCycle);
993 SUnit *OldSU = Sequence[LiveCycle];
994 if (!WillCreateCycle(TrySU, OldSU)) {
995 BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
996 // Force the current node to be scheduled before the node that
997 // requires the physical reg dep.
998 if (OldSU->isAvailable) {
999 OldSU->isAvailable = false;
1000 AvailableQueue->remove(OldSU);
1002 AddPred(TrySU, OldSU, true, true);
1003 // If one or more successors has been unscheduled, then the current
1004 // node is no longer avaialable. Schedule a successor that's now
1005 // available instead.
1006 if (!TrySU->isAvailable)
1007 CurSU = AvailableQueue->pop();
1010 TrySU->isPending = false;
1011 NotReady.erase(NotReady.begin()+i);
1018 // Can't backtrack. Try duplicating the nodes that produces these
1019 // "expensive to copy" values to break the dependency. In case even
1020 // that doesn't work, insert cross class copies.
1021 SUnit *TrySU = NotReady[0];
1022 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1023 assert(LRegs.size() == 1 && "Can't handle this yet!");
1024 unsigned Reg = LRegs[0];
1025 SUnit *LRDef = LiveRegDefs[Reg];
1026 SUnit *NewDef = CopyAndMoveSuccessors(LRDef);
1028 // Issue expensive cross register class copies.
1029 MVT VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII);
1030 const TargetRegisterClass *RC =
1031 TRI->getPhysicalRegisterRegClass(Reg, VT);
1032 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1034 assert(false && "Don't know how to copy this physical register!");
1037 SmallVector<SUnit*, 2> Copies;
1038 InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1039 DOUT << "Adding an edge from SU # " << TrySU->NodeNum
1040 << " to SU #" << Copies.front()->NodeNum << "\n";
1041 AddPred(TrySU, Copies.front(), true, true);
1042 NewDef = Copies.back();
1045 DOUT << "Adding an edge from SU # " << NewDef->NodeNum
1046 << " to SU #" << TrySU->NodeNum << "\n";
1047 LiveRegDefs[Reg] = NewDef;
1048 AddPred(NewDef, TrySU, true, true);
1049 TrySU->isAvailable = false;
1054 assert(false && "Unable to resolve live physical register dependencies!");
1059 // Add the nodes that aren't ready back onto the available list.
1060 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
1061 NotReady[i]->isPending = false;
1062 // May no longer be available due to backtracking.
1063 if (NotReady[i]->isAvailable)
1064 AvailableQueue->push(NotReady[i]);
1069 Sequence.push_back(0);
1071 ScheduleNodeBottomUp(CurSU, CurCycle);
1072 Sequence.push_back(CurSU);
1077 // Reverse the order if it is bottom up.
1078 std::reverse(Sequence.begin(), Sequence.end());
1082 // Verify that all SUnits were scheduled.
1083 bool AnyNotSched = false;
1084 unsigned DeadNodes = 0;
1086 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1087 if (!SUnits[i].isScheduled) {
1088 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1093 cerr << "*** List scheduling failed! ***\n";
1094 SUnits[i].dump(&DAG);
1095 cerr << "has not been scheduled!\n";
1098 if (SUnits[i].NumSuccsLeft != 0) {
1100 cerr << "*** List scheduling failed! ***\n";
1101 SUnits[i].dump(&DAG);
1102 cerr << "has successors left!\n";
1106 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1109 assert(!AnyNotSched);
1110 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
1111 "The number of nodes scheduled doesn't match the expected number!");
1115 //===----------------------------------------------------------------------===//
1116 // Top-Down Scheduling
1117 //===----------------------------------------------------------------------===//
1119 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1120 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
1121 void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
1122 unsigned CurCycle) {
1123 // FIXME: the distance between two nodes is not always == the predecessor's
1124 // latency. For example, the reader can very well read the register written
1125 // by the predecessor later than the issue cycle. It also depends on the
1126 // interrupt model (drain vs. freeze).
1127 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
1129 --SuccSU->NumPredsLeft;
1132 if (SuccSU->NumPredsLeft < 0) {
1133 cerr << "*** List scheduling failed! ***\n";
1135 cerr << " has been released too many times!\n";
1140 if (SuccSU->NumPredsLeft == 0) {
1141 SuccSU->isAvailable = true;
1142 AvailableQueue->push(SuccSU);
1147 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1148 /// count of its successors. If a successor pending count is zero, add it to
1149 /// the Available queue.
1150 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
1151 DOUT << "*** Scheduling [" << CurCycle << "]: ";
1152 DEBUG(SU->dump(&DAG));
1153 SU->Cycle = CurCycle;
1155 AvailableQueue->ScheduledNode(SU);
1157 // Top down: release successors
1158 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1160 ReleaseSucc(I->Dep, I->isCtrl, CurCycle);
1161 SU->isScheduled = true;
1164 /// ListScheduleTopDown - The main loop of list scheduling for top-down
1166 void ScheduleDAGRRList::ListScheduleTopDown() {
1167 unsigned CurCycle = 0;
1169 // All leaves to Available queue.
1170 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1171 // It is available if it has no predecessors.
1172 if (SUnits[i].Preds.empty()) {
1173 AvailableQueue->push(&SUnits[i]);
1174 SUnits[i].isAvailable = true;
1178 // While Available queue is not empty, grab the node with the highest
1179 // priority. If it is not ready put it back. Schedule the node.
1180 std::vector<SUnit*> NotReady;
1181 Sequence.reserve(SUnits.size());
1182 while (!AvailableQueue->empty()) {
1183 SUnit *CurSU = AvailableQueue->pop();
1184 while (CurSU && CurSU->CycleBound > CurCycle) {
1185 NotReady.push_back(CurSU);
1186 CurSU = AvailableQueue->pop();
1189 // Add the nodes that aren't ready back onto the available list.
1190 AvailableQueue->push_all(NotReady);
1194 Sequence.push_back(0);
1196 ScheduleNodeTopDown(CurSU, CurCycle);
1197 Sequence.push_back(CurSU);
1204 // Verify that all SUnits were scheduled.
1205 bool AnyNotSched = false;
1206 unsigned DeadNodes = 0;
1208 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1209 if (!SUnits[i].isScheduled) {
1210 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1215 cerr << "*** List scheduling failed! ***\n";
1216 SUnits[i].dump(&DAG);
1217 cerr << "has not been scheduled!\n";
1220 if (SUnits[i].NumPredsLeft != 0) {
1222 cerr << "*** List scheduling failed! ***\n";
1223 SUnits[i].dump(&DAG);
1224 cerr << "has predecessors left!\n";
1228 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1231 assert(!AnyNotSched);
1232 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
1233 "The number of nodes scheduled doesn't match the expected number!");
1239 //===----------------------------------------------------------------------===//
1240 // RegReductionPriorityQueue Implementation
1241 //===----------------------------------------------------------------------===//
1243 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1244 // to reduce register pressure.
1248 class RegReductionPriorityQueue;
1250 /// Sorting functions for the Available queue.
1251 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1252 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
1253 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
1254 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1256 bool operator()(const SUnit* left, const SUnit* right) const;
1259 struct bu_ls_rr_fast_sort : public std::binary_function<SUnit*, SUnit*, bool>{
1260 RegReductionPriorityQueue<bu_ls_rr_fast_sort> *SPQ;
1261 bu_ls_rr_fast_sort(RegReductionPriorityQueue<bu_ls_rr_fast_sort> *spq)
1263 bu_ls_rr_fast_sort(const bu_ls_rr_fast_sort &RHS) : SPQ(RHS.SPQ) {}
1265 bool operator()(const SUnit* left, const SUnit* right) const;
1268 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1269 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
1270 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
1271 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1273 bool operator()(const SUnit* left, const SUnit* right) const;
1275 } // end anonymous namespace
1277 static inline bool isCopyFromLiveIn(const SUnit *SU) {
1278 SDNode *N = SU->Node;
1279 return N && N->getOpcode() == ISD::CopyFromReg &&
1280 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
1283 /// CalcNodeBUSethiUllmanNumber - Compute Sethi Ullman number for bottom up
1284 /// scheduling. Smaller number is the higher priority.
1286 CalcNodeBUSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1287 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1288 if (SethiUllmanNumber != 0)
1289 return SethiUllmanNumber;
1292 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1294 if (I->isCtrl) continue; // ignore chain preds
1295 SUnit *PredSU = I->Dep;
1296 unsigned PredSethiUllman = CalcNodeBUSethiUllmanNumber(PredSU, SUNumbers);
1297 if (PredSethiUllman > SethiUllmanNumber) {
1298 SethiUllmanNumber = PredSethiUllman;
1300 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1304 SethiUllmanNumber += Extra;
1306 if (SethiUllmanNumber == 0)
1307 SethiUllmanNumber = 1;
1309 return SethiUllmanNumber;
1312 /// CalcNodeTDSethiUllmanNumber - Compute Sethi Ullman number for top down
1313 /// scheduling. Smaller number is the higher priority.
1315 CalcNodeTDSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1316 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1317 if (SethiUllmanNumber != 0)
1318 return SethiUllmanNumber;
1320 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
1321 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1322 SethiUllmanNumber = 0xffff;
1323 else if (SU->NumSuccsLeft == 0)
1324 // If SU does not have a use, i.e. it doesn't produce a value that would
1325 // be consumed (e.g. store), then it terminates a chain of computation.
1326 // Give it a small SethiUllman number so it will be scheduled right before
1327 // its predecessors that it doesn't lengthen their live ranges.
1328 SethiUllmanNumber = 0;
1329 else if (SU->NumPredsLeft == 0 &&
1330 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
1331 SethiUllmanNumber = 0xffff;
1334 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1336 if (I->isCtrl) continue; // ignore chain preds
1337 SUnit *PredSU = I->Dep;
1338 unsigned PredSethiUllman = CalcNodeTDSethiUllmanNumber(PredSU, SUNumbers);
1339 if (PredSethiUllman > SethiUllmanNumber) {
1340 SethiUllmanNumber = PredSethiUllman;
1342 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1346 SethiUllmanNumber += Extra;
1349 return SethiUllmanNumber;
1355 class VISIBILITY_HIDDEN RegReductionPriorityQueue
1356 : public SchedulingPriorityQueue {
1357 PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
1358 unsigned currentQueueId;
1361 RegReductionPriorityQueue() :
1362 Queue(SF(this)), currentQueueId(0) {}
1364 virtual void initNodes(std::vector<SUnit> &sunits) {}
1366 virtual void addNode(const SUnit *SU) {}
1368 virtual void updateNode(const SUnit *SU) {}
1370 virtual void releaseState() {}
1372 virtual unsigned getNodePriority(const SUnit *SU) const {
1376 unsigned size() const { return Queue.size(); }
1378 bool empty() const { return Queue.empty(); }
1380 void push(SUnit *U) {
1381 assert(!U->NodeQueueId && "Node in the queue already");
1382 U->NodeQueueId = ++currentQueueId;
1386 void push_all(const std::vector<SUnit *> &Nodes) {
1387 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
1392 if (empty()) return NULL;
1393 SUnit *V = Queue.top();
1399 void remove(SUnit *SU) {
1400 assert(!Queue.empty() && "Queue is empty!");
1401 assert(SU->NodeQueueId != 0 && "Not in queue!");
1402 Queue.erase_one(SU);
1403 SU->NodeQueueId = 0;
1407 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
1408 : public RegReductionPriorityQueue<bu_ls_rr_sort> {
1409 // SUnits - The SUnits for the current graph.
1410 const std::vector<SUnit> *SUnits;
1412 // SethiUllmanNumbers - The SethiUllman number for each node.
1413 std::vector<unsigned> SethiUllmanNumbers;
1415 const TargetInstrInfo *TII;
1416 const TargetRegisterInfo *TRI;
1417 ScheduleDAGRRList *scheduleDAG;
1420 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii,
1421 const TargetRegisterInfo *tri)
1422 : TII(tii), TRI(tri), scheduleDAG(NULL) {}
1424 void initNodes(std::vector<SUnit> &sunits) {
1426 // Add pseudo dependency edges for two-address nodes.
1427 AddPseudoTwoAddrDeps();
1428 // Calculate node priorities.
1429 CalculateSethiUllmanNumbers();
1432 void addNode(const SUnit *SU) {
1433 unsigned SUSize = SethiUllmanNumbers.size();
1434 if (SUnits->size() > SUSize)
1435 SethiUllmanNumbers.resize(SUSize*2, 0);
1436 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1439 void updateNode(const SUnit *SU) {
1440 SethiUllmanNumbers[SU->NodeNum] = 0;
1441 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1444 void releaseState() {
1446 SethiUllmanNumbers.clear();
1449 unsigned getNodePriority(const SUnit *SU) const {
1450 assert(SU->NodeNum < SethiUllmanNumbers.size());
1451 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
1452 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
1453 // CopyFromReg should be close to its def because it restricts
1454 // allocation choices. But if it is a livein then perhaps we want it
1455 // closer to its uses so it can be coalesced.
1457 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1458 // CopyToReg should be close to its uses to facilitate coalescing and
1461 else if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
1462 Opc == TargetInstrInfo::INSERT_SUBREG)
1463 // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to
1464 // facilitate coalescing.
1466 else if (SU->NumSuccs == 0)
1467 // If SU does not have a use, i.e. it doesn't produce a value that would
1468 // be consumed (e.g. store), then it terminates a chain of computation.
1469 // Give it a large SethiUllman number so it will be scheduled right
1470 // before its predecessors that it doesn't lengthen their live ranges.
1472 else if (SU->NumPreds == 0)
1473 // If SU does not have a def, schedule it close to its uses because it
1474 // does not lengthen any live ranges.
1477 return SethiUllmanNumbers[SU->NodeNum];
1480 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1481 scheduleDAG = scheduleDag;
1485 bool canClobber(const SUnit *SU, const SUnit *Op);
1486 void AddPseudoTwoAddrDeps();
1487 void CalculateSethiUllmanNumbers();
1491 class VISIBILITY_HIDDEN BURegReductionFastPriorityQueue
1492 : public RegReductionPriorityQueue<bu_ls_rr_fast_sort> {
1493 // SUnits - The SUnits for the current graph.
1494 const std::vector<SUnit> *SUnits;
1496 // SethiUllmanNumbers - The SethiUllman number for each node.
1497 std::vector<unsigned> SethiUllmanNumbers;
1499 explicit BURegReductionFastPriorityQueue() {}
1501 void initNodes(std::vector<SUnit> &sunits) {
1503 // Calculate node priorities.
1504 CalculateSethiUllmanNumbers();
1507 void addNode(const SUnit *SU) {
1508 unsigned SUSize = SethiUllmanNumbers.size();
1509 if (SUnits->size() > SUSize)
1510 SethiUllmanNumbers.resize(SUSize*2, 0);
1511 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1514 void updateNode(const SUnit *SU) {
1515 SethiUllmanNumbers[SU->NodeNum] = 0;
1516 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1519 void releaseState() {
1521 SethiUllmanNumbers.clear();
1524 unsigned getNodePriority(const SUnit *SU) const {
1525 return SethiUllmanNumbers[SU->NodeNum];
1529 void CalculateSethiUllmanNumbers();
1533 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
1534 : public RegReductionPriorityQueue<td_ls_rr_sort> {
1535 // SUnits - The SUnits for the current graph.
1536 const std::vector<SUnit> *SUnits;
1538 // SethiUllmanNumbers - The SethiUllman number for each node.
1539 std::vector<unsigned> SethiUllmanNumbers;
1542 TDRegReductionPriorityQueue() {}
1544 void initNodes(std::vector<SUnit> &sunits) {
1546 // Calculate node priorities.
1547 CalculateSethiUllmanNumbers();
1550 void addNode(const SUnit *SU) {
1551 unsigned SUSize = SethiUllmanNumbers.size();
1552 if (SUnits->size() > SUSize)
1553 SethiUllmanNumbers.resize(SUSize*2, 0);
1554 CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
1557 void updateNode(const SUnit *SU) {
1558 SethiUllmanNumbers[SU->NodeNum] = 0;
1559 CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
1562 void releaseState() {
1564 SethiUllmanNumbers.clear();
1567 unsigned getNodePriority(const SUnit *SU) const {
1568 assert(SU->NodeNum < SethiUllmanNumbers.size());
1569 return SethiUllmanNumbers[SU->NodeNum];
1573 void CalculateSethiUllmanNumbers();
1577 /// closestSucc - Returns the scheduled cycle of the successor which is
1578 /// closet to the current cycle.
1579 static unsigned closestSucc(const SUnit *SU) {
1580 unsigned MaxCycle = 0;
1581 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1583 unsigned Cycle = I->Dep->Cycle;
1584 // If there are bunch of CopyToRegs stacked up, they should be considered
1585 // to be at the same position.
1586 if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg)
1587 Cycle = closestSucc(I->Dep)+1;
1588 if (Cycle > MaxCycle)
1594 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
1595 /// for scratch registers. Live-in operands and live-out results don't count
1596 /// since they are "fixed".
1597 static unsigned calcMaxScratches(const SUnit *SU) {
1598 unsigned Scratches = 0;
1599 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1601 if (I->isCtrl) continue; // ignore chain preds
1602 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg)
1605 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1607 if (I->isCtrl) continue; // ignore chain succs
1608 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg)
1615 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1616 unsigned LPriority = SPQ->getNodePriority(left);
1617 unsigned RPriority = SPQ->getNodePriority(right);
1618 if (LPriority != RPriority)
1619 return LPriority > RPriority;
1621 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
1626 // and the following instructions are both ready.
1630 // Then schedule t2 = op first.
1637 // This creates more short live intervals.
1638 unsigned LDist = closestSucc(left);
1639 unsigned RDist = closestSucc(right);
1641 return LDist < RDist;
1643 // Intuitively, it's good to push down instructions whose results are
1644 // liveout so their long live ranges won't conflict with other values
1645 // which are needed inside the BB. Further prioritize liveout instructions
1646 // by the number of operands which are calculated within the BB.
1647 unsigned LScratch = calcMaxScratches(left);
1648 unsigned RScratch = calcMaxScratches(right);
1649 if (LScratch != RScratch)
1650 return LScratch > RScratch;
1652 if (left->Height != right->Height)
1653 return left->Height > right->Height;
1655 if (left->Depth != right->Depth)
1656 return left->Depth < right->Depth;
1658 if (left->CycleBound != right->CycleBound)
1659 return left->CycleBound > right->CycleBound;
1661 assert(left->NodeQueueId && right->NodeQueueId &&
1662 "NodeQueueId cannot be zero");
1663 return (left->NodeQueueId > right->NodeQueueId);
1667 bu_ls_rr_fast_sort::operator()(const SUnit *left, const SUnit *right) const {
1668 unsigned LPriority = SPQ->getNodePriority(left);
1669 unsigned RPriority = SPQ->getNodePriority(right);
1670 if (LPriority != RPriority)
1671 return LPriority > RPriority;
1672 assert(left->NodeQueueId && right->NodeQueueId &&
1673 "NodeQueueId cannot be zero");
1674 return (left->NodeQueueId > right->NodeQueueId);
1678 BURegReductionPriorityQueue::canClobber(const SUnit *SU, const SUnit *Op) {
1679 if (SU->isTwoAddress) {
1680 unsigned Opc = SU->Node->getTargetOpcode();
1681 const TargetInstrDesc &TID = TII->get(Opc);
1682 unsigned NumRes = TID.getNumDefs();
1683 unsigned NumOps = TID.getNumOperands() - NumRes;
1684 for (unsigned i = 0; i != NumOps; ++i) {
1685 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
1686 SDNode *DU = SU->Node->getOperand(i).Val;
1687 if (DU->getNodeId() != -1 &&
1688 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
1697 /// hasCopyToRegUse - Return true if SU has a value successor that is a
1699 static bool hasCopyToRegUse(SUnit *SU) {
1700 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1702 if (I->isCtrl) continue;
1703 SUnit *SuccSU = I->Dep;
1704 if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg)
1710 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
1711 /// physical register defs.
1712 static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU,
1713 const TargetInstrInfo *TII,
1714 const TargetRegisterInfo *TRI) {
1715 SDNode *N = SuccSU->Node;
1716 unsigned NumDefs = TII->get(N->getTargetOpcode()).getNumDefs();
1717 const unsigned *ImpDefs = TII->get(N->getTargetOpcode()).getImplicitDefs();
1718 assert(ImpDefs && "Caller should check hasPhysRegDefs");
1719 const unsigned *SUImpDefs =
1720 TII->get(SU->Node->getTargetOpcode()).getImplicitDefs();
1723 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1724 MVT VT = N->getValueType(i);
1725 if (VT == MVT::Flag || VT == MVT::Other)
1727 unsigned Reg = ImpDefs[i - NumDefs];
1728 for (;*SUImpDefs; ++SUImpDefs) {
1729 unsigned SUReg = *SUImpDefs;
1730 if (TRI->regsOverlap(Reg, SUReg))
1737 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1738 /// it as a def&use operand. Add a pseudo control edge from it to the other
1739 /// node (if it won't create a cycle) so the two-address one will be scheduled
1740 /// first (lower in the schedule). If both nodes are two-address, favor the
1741 /// one that has a CopyToReg use (more likely to be a loop induction update).
1742 /// If both are two-address, but one is commutable while the other is not
1743 /// commutable, favor the one that's not commutable.
1744 void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() {
1745 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1746 SUnit *SU = (SUnit *)&((*SUnits)[i]);
1747 if (!SU->isTwoAddress)
1750 SDNode *Node = SU->Node;
1751 if (!Node || !Node->isTargetOpcode() || SU->FlaggedNodes.size() > 0)
1754 unsigned Opc = Node->getTargetOpcode();
1755 const TargetInstrDesc &TID = TII->get(Opc);
1756 unsigned NumRes = TID.getNumDefs();
1757 unsigned NumOps = TID.getNumOperands() - NumRes;
1758 for (unsigned j = 0; j != NumOps; ++j) {
1759 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) {
1760 SDNode *DU = SU->Node->getOperand(j).Val;
1761 if (DU->getNodeId() == -1)
1763 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
1764 if (!DUSU) continue;
1765 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
1766 E = DUSU->Succs.end(); I != E; ++I) {
1767 if (I->isCtrl) continue;
1768 SUnit *SuccSU = I->Dep;
1771 // Be conservative. Ignore if nodes aren't at roughly the same
1772 // depth and height.
1773 if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1)
1775 if (!SuccSU->Node || !SuccSU->Node->isTargetOpcode())
1777 // Don't constrain nodes with physical register defs if the
1778 // predecessor can clobber them.
1779 if (SuccSU->hasPhysRegDefs) {
1780 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
1783 // Don't constraint extract_subreg / insert_subreg these may be
1784 // coalesced away. We don't them close to their uses.
1785 unsigned SuccOpc = SuccSU->Node->getTargetOpcode();
1786 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG ||
1787 SuccOpc == TargetInstrInfo::INSERT_SUBREG)
1789 if ((!canClobber(SuccSU, DUSU) ||
1790 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
1791 (!SU->isCommutable && SuccSU->isCommutable)) &&
1792 !scheduleDAG->IsReachable(SuccSU, SU)) {
1793 DOUT << "Adding an edge from SU # " << SU->NodeNum
1794 << " to SU #" << SuccSU->NodeNum << "\n";
1795 scheduleDAG->AddPred(SU, SuccSU, true, true);
1803 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1804 /// scheduling units.
1805 void BURegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
1806 SethiUllmanNumbers.assign(SUnits->size(), 0);
1808 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1809 CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1811 void BURegReductionFastPriorityQueue::CalculateSethiUllmanNumbers() {
1812 SethiUllmanNumbers.assign(SUnits->size(), 0);
1814 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1815 CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1818 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
1819 /// predecessors of the successors of the SUnit SU. Stop when the provided
1820 /// limit is exceeded.
1821 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
1824 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1826 SUnit *SuccSU = I->Dep;
1827 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1828 EE = SuccSU->Preds.end(); II != EE; ++II) {
1829 SUnit *PredSU = II->Dep;
1830 if (!PredSU->isScheduled)
1840 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1841 unsigned LPriority = SPQ->getNodePriority(left);
1842 unsigned RPriority = SPQ->getNodePriority(right);
1843 bool LIsTarget = left->Node && left->Node->isTargetOpcode();
1844 bool RIsTarget = right->Node && right->Node->isTargetOpcode();
1845 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1846 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1847 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
1848 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
1850 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1852 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1859 if (left->NumSuccs == 1)
1861 if (right->NumSuccs == 1)
1864 if (LPriority+LBonus != RPriority+RBonus)
1865 return LPriority+LBonus < RPriority+RBonus;
1867 if (left->Depth != right->Depth)
1868 return left->Depth < right->Depth;
1870 if (left->NumSuccsLeft != right->NumSuccsLeft)
1871 return left->NumSuccsLeft > right->NumSuccsLeft;
1873 if (left->CycleBound != right->CycleBound)
1874 return left->CycleBound > right->CycleBound;
1876 assert(left->NodeQueueId && right->NodeQueueId &&
1877 "NodeQueueId cannot be zero");
1878 return (left->NodeQueueId > right->NodeQueueId);
1881 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1882 /// scheduling units.
1883 void TDRegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
1884 SethiUllmanNumbers.assign(SUnits->size(), 0);
1886 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1887 CalcNodeTDSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1890 //===----------------------------------------------------------------------===//
1891 // Public Constructor Functions
1892 //===----------------------------------------------------------------------===//
1894 llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1896 MachineBasicBlock *BB,
1899 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, true,
1900 new BURegReductionFastPriorityQueue());
1902 const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo();
1903 const TargetRegisterInfo *TRI = DAG->getTarget().getRegisterInfo();
1905 BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI);
1907 ScheduleDAGRRList *SD =
1908 new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(),true,false, PQ);
1909 PQ->setScheduleDAG(SD);
1913 llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1915 MachineBasicBlock *BB,
1917 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false, Fast,
1918 new TDRegReductionPriorityQueue());