1 //===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Evan Cheng and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "sched"
19 #include "llvm/CodeGen/ScheduleDAG.h"
20 #include "llvm/CodeGen/SchedulerRegistry.h"
21 #include "llvm/CodeGen/SSARegMap.h"
22 #include "llvm/Target/MRegisterInfo.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetInstrInfo.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/Compiler.h"
28 #include "llvm/ADT/Statistic.h"
32 #include "llvm/Support/CommandLine.h"
35 static RegisterScheduler
36 burrListDAGScheduler("list-burr",
37 " Bottom-up register reduction list scheduling",
38 createBURRListDAGScheduler);
39 static RegisterScheduler
40 tdrListrDAGScheduler("list-tdrr",
41 " Top-down register reduction list scheduling",
42 createTDRRListDAGScheduler);
45 //===----------------------------------------------------------------------===//
46 /// ScheduleDAGRRList - The actual register reduction list scheduler
47 /// implementation. This supports both top-down and bottom-up scheduling.
50 class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
52 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
56 /// AvailableQueue - The priority queue to use for the available SUnits.
58 SchedulingPriorityQueue *AvailableQueue;
61 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
62 const TargetMachine &tm, bool isbottomup,
63 SchedulingPriorityQueue *availqueue)
64 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup),
65 AvailableQueue(availqueue) {
68 ~ScheduleDAGRRList() {
69 delete AvailableQueue;
75 void ReleasePred(SUnit *PredSU, bool isChain, unsigned CurCycle);
76 void ReleaseSucc(SUnit *SuccSU, bool isChain, unsigned CurCycle);
77 void ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle);
78 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
79 void ListScheduleTopDown();
80 void ListScheduleBottomUp();
81 void CommuteNodesToReducePressure();
83 } // end anonymous namespace
86 /// Schedule - Schedule the DAG using list scheduling.
87 void ScheduleDAGRRList::Schedule() {
88 DEBUG(std::cerr << "********** List Scheduling **********\n");
90 // Build scheduling units.
93 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
94 SUnits[su].dumpAll(&DAG));
98 AvailableQueue->initNodes(SUnits);
100 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
102 ListScheduleBottomUp();
104 ListScheduleTopDown();
106 AvailableQueue->releaseState();
108 CommuteNodesToReducePressure();
110 DEBUG(std::cerr << "*** Final schedule ***\n");
111 DEBUG(dumpSchedule());
112 DEBUG(std::cerr << "\n");
114 // Emit in scheduled order
118 /// CommuteNodesToReducePressure - Is a node is two-address and commutable, and
119 /// it is not the last use of its first operand, add it to the CommuteSet if
120 /// possible. It will be commuted when it is translated to a MI.
121 void ScheduleDAGRRList::CommuteNodesToReducePressure() {
122 std::set<SUnit *> OperandSeen;
123 for (unsigned i = Sequence.size()-1; i != 0; --i) { // Ignore first node.
124 SUnit *SU = Sequence[i];
126 if (SU->isTwoAddress && SU->isCommutable) {
127 SDNode *OpN = SU->Node->getOperand(0).Val;
128 SUnit *OpSU = SUnitMap[OpN];
129 if (OpSU && OperandSeen.count(OpSU) == 1) {
130 // Ok, so SU is not the last use of OpSU, but SU is two-address so
131 // it will clobber OpSU. Try to commute it if possible.
132 bool DoCommute = true;
133 for (unsigned j = 1, e = SU->Node->getNumOperands(); j != e; ++j) {
134 OpN = SU->Node->getOperand(j).Val;
135 OpSU = SUnitMap[OpN];
136 if (OpSU && OperandSeen.count(OpSU) == 1) {
142 CommuteSet.insert(SU->Node);
146 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
149 OperandSeen.insert(I->first);
154 //===----------------------------------------------------------------------===//
155 // Bottom-Up Scheduling
156 //===----------------------------------------------------------------------===//
158 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
159 /// the Available queue is the count reaches zero. Also update its cycle bound.
160 void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
162 // FIXME: the distance between two nodes is not always == the predecessor's
163 // latency. For example, the reader can very well read the register written
164 // by the predecessor later than the issue cycle. It also depends on the
165 // interrupt model (drain vs. freeze).
166 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
169 PredSU->NumSuccsLeft--;
171 PredSU->NumChainSuccsLeft--;
174 if (PredSU->NumSuccsLeft < 0 || PredSU->NumChainSuccsLeft < 0) {
175 std::cerr << "*** List scheduling failed! ***\n";
177 std::cerr << " has been released too many times!\n";
182 if ((PredSU->NumSuccsLeft + PredSU->NumChainSuccsLeft) == 0) {
183 // EntryToken has to go last! Special case it here.
184 if (PredSU->Node->getOpcode() != ISD::EntryToken) {
185 PredSU->isAvailable = true;
186 AvailableQueue->push(PredSU);
191 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
192 /// count of its predecessors. If a predecessor pending count is zero, add it to
193 /// the Available queue.
194 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
195 DEBUG(std::cerr << "*** Scheduling [" << CurCycle << "]: ");
196 DEBUG(SU->dump(&DAG));
197 SU->Cycle = CurCycle;
199 AvailableQueue->ScheduledNode(SU);
200 Sequence.push_back(SU);
202 // Bottom up: release predecessors
203 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
205 ReleasePred(I->first, I->second, CurCycle);
206 SU->isScheduled = true;
209 /// isReady - True if node's lower cycle bound is less or equal to the current
210 /// scheduling cycle. Always true if all nodes have uniform latency 1.
211 static inline bool isReady(SUnit *SU, unsigned CurCycle) {
212 return SU->CycleBound <= CurCycle;
215 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
217 void ScheduleDAGRRList::ListScheduleBottomUp() {
218 unsigned CurCycle = 0;
219 // Add root to Available queue.
220 AvailableQueue->push(SUnitMap[DAG.getRoot().Val]);
222 // While Available queue is not empty, grab the node with the highest
223 // priority. If it is not ready put it back. Schedule the node.
224 std::vector<SUnit*> NotReady;
225 while (!AvailableQueue->empty()) {
226 SUnit *CurNode = AvailableQueue->pop();
227 while (CurNode && !isReady(CurNode, CurCycle)) {
228 NotReady.push_back(CurNode);
229 CurNode = AvailableQueue->pop();
232 // Add the nodes that aren't ready back onto the available list.
233 AvailableQueue->push_all(NotReady);
237 ScheduleNodeBottomUp(CurNode, CurCycle);
241 // Add entry node last
242 if (DAG.getEntryNode().Val != DAG.getRoot().Val) {
243 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val];
244 Sequence.push_back(Entry);
247 // Reverse the order if it is bottom up.
248 std::reverse(Sequence.begin(), Sequence.end());
252 // Verify that all SUnits were scheduled.
253 bool AnyNotSched = false;
254 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
255 if (SUnits[i].NumSuccsLeft != 0 || SUnits[i].NumChainSuccsLeft != 0) {
257 std::cerr << "*** List scheduling failed! ***\n";
258 SUnits[i].dump(&DAG);
259 std::cerr << "has not been scheduled!\n";
263 assert(!AnyNotSched);
267 //===----------------------------------------------------------------------===//
268 // Top-Down Scheduling
269 //===----------------------------------------------------------------------===//
271 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
272 /// the PendingQueue if the count reaches zero.
273 void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
275 // FIXME: the distance between two nodes is not always == the predecessor's
276 // latency. For example, the reader can very well read the register written
277 // by the predecessor later than the issue cycle. It also depends on the
278 // interrupt model (drain vs. freeze).
279 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
282 SuccSU->NumPredsLeft--;
284 SuccSU->NumChainPredsLeft--;
287 if (SuccSU->NumPredsLeft < 0 || SuccSU->NumChainPredsLeft < 0) {
288 std::cerr << "*** List scheduling failed! ***\n";
290 std::cerr << " has been released too many times!\n";
295 if ((SuccSU->NumPredsLeft + SuccSU->NumChainPredsLeft) == 0) {
296 SuccSU->isAvailable = true;
297 AvailableQueue->push(SuccSU);
302 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
303 /// count of its successors. If a successor pending count is zero, add it to
304 /// the Available queue.
305 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
306 DEBUG(std::cerr << "*** Scheduling [" << CurCycle << "]: ");
307 DEBUG(SU->dump(&DAG));
308 SU->Cycle = CurCycle;
310 AvailableQueue->ScheduledNode(SU);
311 Sequence.push_back(SU);
313 // Top down: release successors
314 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
316 ReleaseSucc(I->first, I->second, CurCycle);
317 SU->isScheduled = true;
320 void ScheduleDAGRRList::ListScheduleTopDown() {
321 unsigned CurCycle = 0;
322 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val];
324 // All leaves to Available queue.
325 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
326 // It is available if it has no predecessors.
327 if (SUnits[i].Preds.size() == 0 && &SUnits[i] != Entry) {
328 AvailableQueue->push(&SUnits[i]);
329 SUnits[i].isAvailable = true;
333 // Emit the entry node first.
334 ScheduleNodeTopDown(Entry, CurCycle);
337 // While Available queue is not empty, grab the node with the highest
338 // priority. If it is not ready put it back. Schedule the node.
339 std::vector<SUnit*> NotReady;
340 while (!AvailableQueue->empty()) {
341 SUnit *CurNode = AvailableQueue->pop();
342 while (CurNode && !isReady(CurNode, CurCycle)) {
343 NotReady.push_back(CurNode);
344 CurNode = AvailableQueue->pop();
347 // Add the nodes that aren't ready back onto the available list.
348 AvailableQueue->push_all(NotReady);
352 ScheduleNodeTopDown(CurNode, CurCycle);
358 // Verify that all SUnits were scheduled.
359 bool AnyNotSched = false;
360 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
361 if (!SUnits[i].isScheduled) {
363 std::cerr << "*** List scheduling failed! ***\n";
364 SUnits[i].dump(&DAG);
365 std::cerr << "has not been scheduled!\n";
369 assert(!AnyNotSched);
375 //===----------------------------------------------------------------------===//
376 // RegReductionPriorityQueue Implementation
377 //===----------------------------------------------------------------------===//
379 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
380 // to reduce register pressure.
384 class RegReductionPriorityQueue;
386 /// Sorting functions for the Available queue.
387 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
388 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
389 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
390 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
392 bool operator()(const SUnit* left, const SUnit* right) const;
395 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
396 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
397 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
398 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
400 bool operator()(const SUnit* left, const SUnit* right) const;
402 } // end anonymous namespace
406 class VISIBILITY_HIDDEN RegReductionPriorityQueue
407 : public SchedulingPriorityQueue {
408 std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue;
411 RegReductionPriorityQueue() :
414 virtual void initNodes(std::vector<SUnit> &sunits) {}
415 virtual void releaseState() {}
417 virtual int getSethiUllmanNumber(unsigned NodeNum) const {
421 bool empty() const { return Queue.empty(); }
423 void push(SUnit *U) {
426 void push_all(const std::vector<SUnit *> &Nodes) {
427 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
428 Queue.push(Nodes[i]);
432 if (empty()) return NULL;
433 SUnit *V = Queue.top();
440 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
441 : public RegReductionPriorityQueue<SF> {
442 // SUnits - The SUnits for the current graph.
443 const std::vector<SUnit> *SUnits;
445 // SethiUllmanNumbers - The SethiUllman number for each node.
446 std::vector<int> SethiUllmanNumbers;
449 BURegReductionPriorityQueue() {}
451 void initNodes(std::vector<SUnit> &sunits) {
453 // Add pseudo dependency edges for two-address nodes.
454 AddPseudoTwoAddrDeps();
455 // Calculate node priorities.
456 CalculatePriorities();
459 void releaseState() {
461 SethiUllmanNumbers.clear();
464 int getSethiUllmanNumber(unsigned NodeNum) const {
465 assert(NodeNum < SethiUllmanNumbers.size());
466 return SethiUllmanNumbers[NodeNum];
470 void AddPseudoTwoAddrDeps();
471 void CalculatePriorities();
472 int CalcNodePriority(const SUnit *SU);
477 class TDRegReductionPriorityQueue : public RegReductionPriorityQueue<SF> {
478 // SUnits - The SUnits for the current graph.
479 const std::vector<SUnit> *SUnits;
481 // SethiUllmanNumbers - The SethiUllman number for each node.
482 std::vector<int> SethiUllmanNumbers;
485 TDRegReductionPriorityQueue() {}
487 void initNodes(std::vector<SUnit> &sunits) {
489 // Calculate node priorities.
490 CalculatePriorities();
493 void releaseState() {
495 SethiUllmanNumbers.clear();
498 int getSethiUllmanNumber(unsigned NodeNum) const {
499 assert(NodeNum < SethiUllmanNumbers.size());
500 return SethiUllmanNumbers[NodeNum];
504 void CalculatePriorities();
505 int CalcNodePriority(const SUnit *SU);
509 static bool isFloater(const SUnit *SU) {
510 if (SU->Node->isTargetOpcode()) {
511 if (SU->NumPreds == 0)
513 if (SU->NumPreds == 1) {
514 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
516 if (I->second) continue;
518 SUnit *PredSU = I->first;
519 unsigned Opc = PredSU->Node->getOpcode();
520 if (Opc != ISD::EntryToken && Opc != ISD::TokenFactor &&
521 Opc != ISD::CopyToReg)
530 static bool isSimpleFloaterUse(const SUnit *SU) {
532 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
534 if (I->second) continue;
537 if (!isFloater(I->first))
544 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
545 unsigned LeftNum = left->NodeNum;
546 unsigned RightNum = right->NodeNum;
547 bool LIsTarget = left->Node->isTargetOpcode();
548 bool RIsTarget = right->Node->isTargetOpcode();
549 int LPriority = SPQ->getSethiUllmanNumber(LeftNum);
550 int RPriority = SPQ->getSethiUllmanNumber(RightNum);
554 // Schedule floaters (e.g. load from some constant address) and those nodes
555 // with a single predecessor each first. They maintain / reduce register
557 if (isFloater(left) || isSimpleFloaterUse(left))
559 if (isFloater(right) || isSimpleFloaterUse(right))
562 // Special tie breaker: if two nodes share a operand, the one that use it
563 // as a def&use operand is preferred.
564 if (LIsTarget && RIsTarget) {
565 if (left->isTwoAddress && !right->isTwoAddress) {
566 SDNode *DUNode = left->Node->getOperand(0).Val;
567 if (DUNode->isOperand(right->Node))
570 if (!left->isTwoAddress && right->isTwoAddress) {
571 SDNode *DUNode = right->Node->getOperand(0).Val;
572 if (DUNode->isOperand(left->Node))
577 if (LPriority+LBonus < RPriority+RBonus)
579 else if (LPriority+LBonus == RPriority+RBonus)
580 if (left->Height > right->Height)
582 else if (left->Height == right->Height)
583 if (left->Depth < right->Depth)
585 else if (left->Depth == right->Depth)
586 if (left->CycleBound > right->CycleBound)
591 static inline bool isCopyFromLiveIn(const SUnit *SU) {
592 SDNode *N = SU->Node;
593 return N->getOpcode() == ISD::CopyFromReg &&
594 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
597 // FIXME: This is probably too slow!
598 static void isReachable(SUnit *SU, SUnit *TargetSU,
599 std::set<SUnit *> &Visited, bool &Reached) {
601 if (SU == TargetSU) {
605 if (!Visited.insert(SU).second) return;
607 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E;
609 isReachable(I->first, TargetSU, Visited, Reached);
612 static bool isReachable(SUnit *SU, SUnit *TargetSU) {
613 std::set<SUnit *> Visited;
614 bool Reached = false;
615 isReachable(SU, TargetSU, Visited, Reached);
619 static SUnit *getDefUsePredecessor(SUnit *SU) {
620 SDNode *DU = SU->Node->getOperand(0).Val;
621 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
623 if (I->second) continue; // ignore chain preds
624 SUnit *PredSU = I->first;
625 if (PredSU->Node == DU)
633 static bool canClobber(SUnit *SU, SUnit *Op) {
634 if (SU->isTwoAddress)
635 return Op == getDefUsePredecessor(SU);
639 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
640 /// it as a def&use operand. Add a pseudo control edge from it to the other
641 /// node (if it won't create a cycle) so the two-address one will be scheduled
642 /// first (lower in the schedule).
644 void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
645 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
646 SUnit *SU = (SUnit *)&((*SUnits)[i]);
647 SDNode *Node = SU->Node;
648 if (!Node->isTargetOpcode())
651 if (SU->isTwoAddress) {
652 SUnit *DUSU = getDefUsePredecessor(SU);
655 for (SUnit::succ_iterator I = DUSU->Succs.begin(), E = DUSU->Succs.end();
657 if (I->second) continue;
658 SUnit *SuccSU = I->first;
660 (!canClobber(SuccSU, DUSU) ||
661 (!SU->isCommutable && SuccSU->isCommutable))){
662 if (SuccSU->Depth == SU->Depth && !isReachable(SuccSU, SU)) {
663 DEBUG(std::cerr << "Adding an edge from SU # " << SU->NodeNum
664 << " to SU #" << SuccSU->NodeNum << "\n");
665 if (SU->addPred(SuccSU, true))
666 SU->NumChainPredsLeft++;
667 if (SuccSU->addSucc(SU, true))
668 SuccSU->NumChainSuccsLeft++;
676 /// CalcNodePriority - Priority is the Sethi Ullman number.
677 /// Smaller number is the higher priority.
679 int BURegReductionPriorityQueue<SF>::CalcNodePriority(const SUnit *SU) {
680 int &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
681 if (SethiUllmanNumber != 0)
682 return SethiUllmanNumber;
684 unsigned Opc = SU->Node->getOpcode();
685 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
686 // CopyFromReg should be close to its def because it restricts allocation
687 // choices. But if it is a livein then perhaps we want it closer to the
688 // uses so it can be coalesced.
689 SethiUllmanNumber = INT_MIN + 10;
690 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
691 // CopyToReg should be close to its uses to facilitate coalescing and avoid
693 SethiUllmanNumber = INT_MAX - 10;
694 else if (SU->NumSuccsLeft == 0)
695 // If SU does not have a use, i.e. it doesn't produce a value that would
696 // be consumed (e.g. store), then it terminates a chain of computation.
697 // Give it a small SethiUllman number so it will be scheduled right before its
698 // predecessors that it doesn't lengthen their live ranges.
699 SethiUllmanNumber = INT_MIN + 10;
700 else if (SU->NumPredsLeft == 0)
701 // If SU does not have a def, schedule it close to its uses because it does
702 // not lengthen any live ranges.
703 SethiUllmanNumber = INT_MAX - 10;
706 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
708 if (I->second) continue; // ignore chain preds
709 SUnit *PredSU = I->first;
710 int PredSethiUllman = CalcNodePriority(PredSU);
711 if (PredSethiUllman > SethiUllmanNumber) {
712 SethiUllmanNumber = PredSethiUllman;
714 } else if (PredSethiUllman == SethiUllmanNumber && !I->second)
718 SethiUllmanNumber += Extra;
721 return SethiUllmanNumber;
724 /// CalculatePriorities - Calculate priorities of all scheduling units.
726 void BURegReductionPriorityQueue<SF>::CalculatePriorities() {
727 SethiUllmanNumbers.assign(SUnits->size(), 0);
729 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
730 CalcNodePriority(&(*SUnits)[i]);
733 static unsigned SumOfUnscheduledPredsOfSuccs(const SUnit *SU) {
735 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
737 SUnit *SuccSU = I->first;
738 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
739 EE = SuccSU->Preds.end(); II != EE; ++II) {
740 SUnit *PredSU = II->first;
741 if (!PredSU->isScheduled)
751 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
752 unsigned LeftNum = left->NodeNum;
753 unsigned RightNum = right->NodeNum;
754 int LPriority = SPQ->getSethiUllmanNumber(LeftNum);
755 int RPriority = SPQ->getSethiUllmanNumber(RightNum);
756 bool LIsTarget = left->Node->isTargetOpcode();
757 bool RIsTarget = right->Node->isTargetOpcode();
758 bool LIsFloater = LIsTarget && left->NumPreds == 0;
759 bool RIsFloater = RIsTarget && right->NumPreds == 0;
760 unsigned LBonus = (SumOfUnscheduledPredsOfSuccs(left) == 1) ? 2 : 0;
761 unsigned RBonus = (SumOfUnscheduledPredsOfSuccs(right) == 1) ? 2 : 0;
763 if (left->NumSuccs == 0 && right->NumSuccs != 0)
765 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
768 // Special tie breaker: if two nodes share a operand, the one that use it
769 // as a def&use operand is preferred.
770 if (LIsTarget && RIsTarget) {
771 if (left->isTwoAddress && !right->isTwoAddress) {
772 SDNode *DUNode = left->Node->getOperand(0).Val;
773 if (DUNode->isOperand(right->Node))
776 if (!left->isTwoAddress && right->isTwoAddress) {
777 SDNode *DUNode = right->Node->getOperand(0).Val;
778 if (DUNode->isOperand(left->Node))
786 if (left->NumSuccs == 1)
788 if (right->NumSuccs == 1)
791 if (LPriority+LBonus < RPriority+RBonus)
793 else if (LPriority == RPriority)
794 if (left->Depth < right->Depth)
796 else if (left->Depth == right->Depth)
797 if (left->NumSuccsLeft > right->NumSuccsLeft)
799 else if (left->NumSuccsLeft == right->NumSuccsLeft)
800 if (left->CycleBound > right->CycleBound)
805 /// CalcNodePriority - Priority is the Sethi Ullman number.
806 /// Smaller number is the higher priority.
808 int TDRegReductionPriorityQueue<SF>::CalcNodePriority(const SUnit *SU) {
809 int &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
810 if (SethiUllmanNumber != 0)
811 return SethiUllmanNumber;
813 unsigned Opc = SU->Node->getOpcode();
814 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
815 SethiUllmanNumber = INT_MAX - 10;
816 else if (SU->NumSuccsLeft == 0)
817 // If SU does not have a use, i.e. it doesn't produce a value that would
818 // be consumed (e.g. store), then it terminates a chain of computation.
819 // Give it a small SethiUllman number so it will be scheduled right before its
820 // predecessors that it doesn't lengthen their live ranges.
821 SethiUllmanNumber = INT_MIN + 10;
822 else if (SU->NumPredsLeft == 0 &&
823 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
824 SethiUllmanNumber = 1;
827 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
829 if (I->second) continue; // ignore chain preds
830 SUnit *PredSU = I->first;
831 int PredSethiUllman = CalcNodePriority(PredSU);
832 if (PredSethiUllman > SethiUllmanNumber) {
833 SethiUllmanNumber = PredSethiUllman;
835 } else if (PredSethiUllman == SethiUllmanNumber && !I->second)
839 SethiUllmanNumber += Extra;
842 return SethiUllmanNumber;
845 /// CalculatePriorities - Calculate priorities of all scheduling units.
847 void TDRegReductionPriorityQueue<SF>::CalculatePriorities() {
848 SethiUllmanNumbers.assign(SUnits->size(), 0);
850 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
851 CalcNodePriority(&(*SUnits)[i]);
854 //===----------------------------------------------------------------------===//
855 // Public Constructor Functions
856 //===----------------------------------------------------------------------===//
858 llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
860 MachineBasicBlock *BB) {
861 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true,
862 new BURegReductionPriorityQueue<bu_ls_rr_sort>());
865 llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
867 MachineBasicBlock *BB) {
868 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false,
869 new TDRegReductionPriorityQueue<td_ls_rr_sort>());