1 //===----- ScheduleDAGFast.cpp - Fast poor list scheduler -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a fast scheduler.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "pre-RA-sched"
15 #include "ScheduleDAGSDNodes.h"
16 #include "InstrEmitter.h"
17 #include "llvm/InlineAsm.h"
18 #include "llvm/CodeGen/SchedulerRegistry.h"
19 #include "llvm/CodeGen/SelectionDAGISel.h"
20 #include "llvm/Target/TargetRegisterInfo.h"
21 #include "llvm/DataLayout.h"
22 #include "llvm/Target/TargetInstrInfo.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/STLExtras.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/raw_ostream.h"
31 STATISTIC(NumUnfolds, "Number of nodes unfolded");
32 STATISTIC(NumDups, "Number of duplicated nodes");
33 STATISTIC(NumPRCopies, "Number of physical copies");
35 static RegisterScheduler
36 fastDAGScheduler("fast", "Fast suboptimal list scheduling",
37 createFastDAGScheduler);
38 static RegisterScheduler
39 linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling",
44 /// FastPriorityQueue - A degenerate priority queue that considers
45 /// all nodes to have the same priority.
47 struct FastPriorityQueue {
48 SmallVector<SUnit *, 16> Queue;
50 bool empty() const { return Queue.empty(); }
57 if (empty()) return NULL;
58 SUnit *V = Queue.back();
64 //===----------------------------------------------------------------------===//
65 /// ScheduleDAGFast - The actual "fast" list scheduler implementation.
67 class ScheduleDAGFast : public ScheduleDAGSDNodes {
69 /// AvailableQueue - The priority queue to use for the available SUnits.
70 FastPriorityQueue AvailableQueue;
72 /// LiveRegDefs - A set of physical registers and their definition
73 /// that are "live". These nodes must be scheduled before any other nodes that
74 /// modifies the registers can be scheduled.
76 std::vector<SUnit*> LiveRegDefs;
77 std::vector<unsigned> LiveRegCycles;
80 ScheduleDAGFast(MachineFunction &mf)
81 : ScheduleDAGSDNodes(mf) {}
85 /// AddPred - adds a predecessor edge to SUnit SU.
86 /// This returns true if this is a new predecessor.
87 void AddPred(SUnit *SU, const SDep &D) {
91 /// RemovePred - removes a predecessor edge from SUnit SU.
92 /// This returns true if an edge was removed.
93 void RemovePred(SUnit *SU, const SDep &D) {
98 void ReleasePred(SUnit *SU, SDep *PredEdge);
99 void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
100 void ScheduleNodeBottomUp(SUnit*, unsigned);
101 SUnit *CopyAndMoveSuccessors(SUnit*);
102 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
103 const TargetRegisterClass*,
104 const TargetRegisterClass*,
105 SmallVector<SUnit*, 2>&);
106 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
107 void ListScheduleBottomUp();
109 /// forceUnitLatencies - The fast scheduler doesn't care about real latencies.
110 bool forceUnitLatencies() const { return true; }
112 } // end anonymous namespace
115 /// Schedule - Schedule the DAG using list scheduling.
116 void ScheduleDAGFast::Schedule() {
117 DEBUG(dbgs() << "********** List Scheduling **********\n");
120 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
121 LiveRegCycles.resize(TRI->getNumRegs(), 0);
123 // Build the scheduling graph.
124 BuildSchedGraph(NULL);
126 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
127 SUnits[su].dumpAll(this));
129 // Execute the actual scheduling loop.
130 ListScheduleBottomUp();
133 //===----------------------------------------------------------------------===//
134 // Bottom-Up Scheduling
135 //===----------------------------------------------------------------------===//
137 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
138 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
139 void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
140 SUnit *PredSU = PredEdge->getSUnit();
143 if (PredSU->NumSuccsLeft == 0) {
144 dbgs() << "*** Scheduling failed! ***\n";
146 dbgs() << " has been released too many times!\n";
150 --PredSU->NumSuccsLeft;
152 // If all the node's successors are scheduled, this node is ready
153 // to be scheduled. Ignore the special EntrySU node.
154 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
155 PredSU->isAvailable = true;
156 AvailableQueue.push(PredSU);
160 void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
161 // Bottom up: release predecessors
162 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
164 ReleasePred(SU, &*I);
165 if (I->isAssignedRegDep()) {
166 // This is a physical register dependency and it's impossible or
167 // expensive to copy the register. Make sure nothing that can
168 // clobber the register is scheduled between the predecessor and
170 if (!LiveRegDefs[I->getReg()]) {
172 LiveRegDefs[I->getReg()] = I->getSUnit();
173 LiveRegCycles[I->getReg()] = CurCycle;
179 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
180 /// count of its predecessors. If a predecessor pending count is zero, add it to
181 /// the Available queue.
182 void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
183 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
184 DEBUG(SU->dump(this));
186 assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
187 SU->setHeightToAtLeast(CurCycle);
188 Sequence.push_back(SU);
190 ReleasePredecessors(SU, CurCycle);
192 // Release all the implicit physical register defs that are live.
193 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
195 if (I->isAssignedRegDep()) {
196 if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
197 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
198 assert(LiveRegDefs[I->getReg()] == SU &&
199 "Physical register dependency violated?");
201 LiveRegDefs[I->getReg()] = NULL;
202 LiveRegCycles[I->getReg()] = 0;
207 SU->isScheduled = true;
210 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
211 /// successors to the newly created node.
212 SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
213 if (SU->getNode()->getGluedNode())
216 SDNode *N = SU->getNode();
221 bool TryUnfold = false;
222 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
223 EVT VT = N->getValueType(i);
226 else if (VT == MVT::Other)
229 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
230 const SDValue &Op = N->getOperand(i);
231 EVT VT = Op.getNode()->getValueType(Op.getResNo());
237 SmallVector<SDNode*, 2> NewNodes;
238 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
241 DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n");
242 assert(NewNodes.size() == 2 && "Expected a load folding node!");
245 SDNode *LoadNode = NewNodes[0];
246 unsigned NumVals = N->getNumValues();
247 unsigned OldNumVals = SU->getNode()->getNumValues();
248 for (unsigned i = 0; i != NumVals; ++i)
249 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
250 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
251 SDValue(LoadNode, 1));
253 SUnit *NewSU = newSUnit(N);
254 assert(N->getNodeId() == -1 && "Node already inserted!");
255 N->setNodeId(NewSU->NodeNum);
257 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
258 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
259 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
260 NewSU->isTwoAddress = true;
264 if (MCID.isCommutable())
265 NewSU->isCommutable = true;
267 // LoadNode may already exist. This can happen when there is another
268 // load from the same location and producing the same type of value
269 // but it has different alignment or volatileness.
270 bool isNewLoad = true;
272 if (LoadNode->getNodeId() != -1) {
273 LoadSU = &SUnits[LoadNode->getNodeId()];
276 LoadSU = newSUnit(LoadNode);
277 LoadNode->setNodeId(LoadSU->NodeNum);
281 SmallVector<SDep, 4> ChainSuccs;
282 SmallVector<SDep, 4> LoadPreds;
283 SmallVector<SDep, 4> NodePreds;
284 SmallVector<SDep, 4> NodeSuccs;
285 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
289 else if (I->getSUnit()->getNode() &&
290 I->getSUnit()->getNode()->isOperandOf(LoadNode))
291 LoadPreds.push_back(*I);
293 NodePreds.push_back(*I);
295 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
298 ChainSuccs.push_back(*I);
300 NodeSuccs.push_back(*I);
303 if (ChainPred.getSUnit()) {
304 RemovePred(SU, ChainPred);
306 AddPred(LoadSU, ChainPred);
308 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
309 const SDep &Pred = LoadPreds[i];
310 RemovePred(SU, Pred);
312 AddPred(LoadSU, Pred);
315 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
316 const SDep &Pred = NodePreds[i];
317 RemovePred(SU, Pred);
318 AddPred(NewSU, Pred);
320 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
321 SDep D = NodeSuccs[i];
322 SUnit *SuccDep = D.getSUnit();
324 RemovePred(SuccDep, D);
328 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
329 SDep D = ChainSuccs[i];
330 SUnit *SuccDep = D.getSUnit();
332 RemovePred(SuccDep, D);
339 AddPred(NewSU, SDep(LoadSU, SDep::Order, LoadSU->Latency));
344 if (NewSU->NumSuccsLeft == 0) {
345 NewSU->isAvailable = true;
351 DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n");
354 // New SUnit has the exact same predecessors.
355 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
357 if (!I->isArtificial())
360 // Only copy scheduled successors. Cut them from old node's successor
361 // list and move them over.
362 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
363 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
365 if (I->isArtificial())
367 SUnit *SuccSU = I->getSUnit();
368 if (SuccSU->isScheduled) {
373 DelDeps.push_back(std::make_pair(SuccSU, D));
376 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
377 RemovePred(DelDeps[i].first, DelDeps[i].second);
383 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
384 /// scheduled successors of the given SUnit to the last copy.
385 void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
386 const TargetRegisterClass *DestRC,
387 const TargetRegisterClass *SrcRC,
388 SmallVector<SUnit*, 2> &Copies) {
389 SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(NULL));
390 CopyFromSU->CopySrcRC = SrcRC;
391 CopyFromSU->CopyDstRC = DestRC;
393 SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(NULL));
394 CopyToSU->CopySrcRC = DestRC;
395 CopyToSU->CopyDstRC = SrcRC;
397 // Only copy scheduled successors. Cut them from old node's successor
398 // list and move them over.
399 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
400 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
402 if (I->isArtificial())
404 SUnit *SuccSU = I->getSUnit();
405 if (SuccSU->isScheduled) {
407 D.setSUnit(CopyToSU);
409 DelDeps.push_back(std::make_pair(SuccSU, *I));
412 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
413 RemovePred(DelDeps[i].first, DelDeps[i].second);
416 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
417 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
419 Copies.push_back(CopyFromSU);
420 Copies.push_back(CopyToSU);
425 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
426 /// definition of the specified node.
427 /// FIXME: Move to SelectionDAG?
428 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
429 const TargetInstrInfo *TII) {
430 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
431 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
432 unsigned NumRes = MCID.getNumDefs();
433 for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
438 return N->getValueType(NumRes);
441 /// CheckForLiveRegDef - Return true and update live register vector if the
442 /// specified register def of the specified SUnit clobbers any "live" registers.
443 static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
444 std::vector<SUnit*> &LiveRegDefs,
445 SmallSet<unsigned, 4> &RegAdded,
446 SmallVector<unsigned, 4> &LRegs,
447 const TargetRegisterInfo *TRI) {
449 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
450 if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) {
451 if (RegAdded.insert(*AI)) {
452 LRegs.push_back(*AI);
460 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
461 /// scheduling of the given node to satisfy live physical register dependencies.
462 /// If the specific node is the last one that's available to schedule, do
463 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
464 bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
465 SmallVector<unsigned, 4> &LRegs){
466 if (NumLiveRegs == 0)
469 SmallSet<unsigned, 4> RegAdded;
470 // If this node would clobber any "live" register, then it's not ready.
471 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
473 if (I->isAssignedRegDep()) {
474 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
475 RegAdded, LRegs, TRI);
479 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
480 if (Node->getOpcode() == ISD::INLINEASM) {
481 // Inline asm can clobber physical defs.
482 unsigned NumOps = Node->getNumOperands();
483 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
484 --NumOps; // Ignore the glue operand.
486 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
488 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
489 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
491 ++i; // Skip the ID value.
492 if (InlineAsm::isRegDefKind(Flags) ||
493 InlineAsm::isRegDefEarlyClobberKind(Flags) ||
494 InlineAsm::isClobberKind(Flags)) {
495 // Check for def of register or earlyclobber register.
496 for (; NumVals; --NumVals, ++i) {
497 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
498 if (TargetRegisterInfo::isPhysicalRegister(Reg))
499 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
506 if (!Node->isMachineOpcode())
508 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
509 if (!MCID.ImplicitDefs)
511 for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) {
512 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
515 return !LRegs.empty();
519 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
521 void ScheduleDAGFast::ListScheduleBottomUp() {
522 unsigned CurCycle = 0;
524 // Release any predecessors of the special Exit node.
525 ReleasePredecessors(&ExitSU, CurCycle);
527 // Add root to Available queue.
528 if (!SUnits.empty()) {
529 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
530 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
531 RootSU->isAvailable = true;
532 AvailableQueue.push(RootSU);
535 // While Available queue is not empty, grab the node with the highest
536 // priority. If it is not ready put it back. Schedule the node.
537 SmallVector<SUnit*, 4> NotReady;
538 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
539 Sequence.reserve(SUnits.size());
540 while (!AvailableQueue.empty()) {
541 bool Delayed = false;
543 SUnit *CurSU = AvailableQueue.pop();
545 SmallVector<unsigned, 4> LRegs;
546 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
549 LRegsMap.insert(std::make_pair(CurSU, LRegs));
551 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
552 NotReady.push_back(CurSU);
553 CurSU = AvailableQueue.pop();
556 // All candidates are delayed due to live physical reg dependencies.
557 // Try code duplication or inserting cross class copies
559 if (Delayed && !CurSU) {
561 // Try duplicating the nodes that produces these
562 // "expensive to copy" values to break the dependency. In case even
563 // that doesn't work, insert cross class copies.
564 SUnit *TrySU = NotReady[0];
565 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
566 assert(LRegs.size() == 1 && "Can't handle this yet!");
567 unsigned Reg = LRegs[0];
568 SUnit *LRDef = LiveRegDefs[Reg];
569 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
570 const TargetRegisterClass *RC =
571 TRI->getMinimalPhysRegClass(Reg, VT);
572 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
574 // If cross copy register class is the same as RC, then it must be
575 // possible copy the value directly. Do not try duplicate the def.
576 // If cross copy register class is not the same as RC, then it's
577 // possible to copy the value but it require cross register class copies
578 // and it is expensive.
579 // If cross copy register class is null, then it's not possible to copy
583 NewDef = CopyAndMoveSuccessors(LRDef);
584 if (!DestRC && !NewDef)
585 report_fatal_error("Can't handle live physical "
586 "register dependency!");
589 // Issue copies, these can be expensive cross register class copies.
590 SmallVector<SUnit*, 2> Copies;
591 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
592 DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum
593 << " to SU #" << Copies.front()->NodeNum << "\n");
594 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
595 /*Reg=*/0, /*isNormalMemory=*/false,
596 /*isMustAlias=*/false, /*isArtificial=*/true));
597 NewDef = Copies.back();
600 DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum
601 << " to SU #" << TrySU->NodeNum << "\n");
602 LiveRegDefs[Reg] = NewDef;
603 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
604 /*Reg=*/0, /*isNormalMemory=*/false,
605 /*isMustAlias=*/false, /*isArtificial=*/true));
606 TrySU->isAvailable = false;
611 llvm_unreachable("Unable to resolve live physical register dependencies!");
615 // Add the nodes that aren't ready back onto the available list.
616 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
617 NotReady[i]->isPending = false;
618 // May no longer be available due to backtracking.
619 if (NotReady[i]->isAvailable)
620 AvailableQueue.push(NotReady[i]);
625 ScheduleNodeBottomUp(CurSU, CurCycle);
629 // Reverse the order since it is bottom up.
630 std::reverse(Sequence.begin(), Sequence.end());
633 VerifyScheduledSequence(/*isBottomUp=*/true);
638 //===----------------------------------------------------------------------===//
639 // ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the
640 // DAG in topological order.
641 // IMPORTANT: this may not work for targets with phyreg dependency.
643 class ScheduleDAGLinearize : public ScheduleDAGSDNodes {
645 ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {}
649 MachineBasicBlock *EmitSchedule(MachineBasicBlock::iterator &InsertPos);
652 std::vector<SDNode*> Sequence;
653 DenseMap<SDNode*, SDNode*> GluedMap; // Cache glue to its user
655 void ScheduleNode(SDNode *N);
658 void ScheduleDAGLinearize::ScheduleNode(SDNode *N) {
659 if (N->getNodeId() != 0)
662 if (!N->isMachineOpcode() &&
663 (N->getOpcode() == ISD::EntryToken || isPassiveNode(N)))
664 // These nodes do not need to be translated into MIs.
667 DEBUG(dbgs() << "\n*** Scheduling: ");
669 Sequence.push_back(N);
671 unsigned NumOps = N->getNumOperands();
672 if (unsigned NumLeft = NumOps) {
673 SDNode *GluedOpN = 0;
675 const SDValue &Op = N->getOperand(NumLeft-1);
676 SDNode *OpN = Op.getNode();
678 if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) {
679 // Schedule glue operand right above N.
681 assert(OpN->getNodeId() != 0 && "Glue operand not ready?");
688 // Glue operand is already scheduled.
691 DenseMap<SDNode*, SDNode*>::iterator DI = GluedMap.find(OpN);
692 if (DI != GluedMap.end() && DI->second != N)
693 // Users of glues are counted against the glued users.
696 unsigned Degree = OpN->getNodeId();
697 assert(Degree > 0 && "Predecessor over-released!");
698 OpN->setNodeId(--Degree);
705 /// findGluedUser - Find the representative use of a glue value by walking
707 static SDNode *findGluedUser(SDNode *N) {
708 while (SDNode *Glued = N->getGluedUser())
713 void ScheduleDAGLinearize::Schedule() {
714 DEBUG(dbgs() << "********** DAG Linearization **********\n");
716 SmallVector<SDNode*, 8> Glues;
717 unsigned DAGSize = 0;
718 for (SelectionDAG::allnodes_iterator I = DAG->allnodes_begin(),
719 E = DAG->allnodes_end(); I != E; ++I) {
722 // Use node id to record degree.
723 unsigned Degree = N->use_size();
724 N->setNodeId(Degree);
725 unsigned NumVals = N->getNumValues();
726 if (NumVals && N->getValueType(NumVals-1) == MVT::Glue &&
727 N->hasAnyUseOfValue(NumVals-1)) {
728 SDNode *User = findGluedUser(N);
731 GluedMap.insert(std::make_pair(N, User));
735 if (N->isMachineOpcode() ||
736 (N->getOpcode() != ISD::EntryToken && !isPassiveNode(N)))
740 for (unsigned i = 0, e = Glues.size(); i != e; ++i) {
741 SDNode *Glue = Glues[i];
742 SDNode *GUser = GluedMap[Glue];
743 unsigned Degree = Glue->getNodeId();
744 unsigned UDegree = GUser->getNodeId();
746 // Glue user must be scheduled together with the glue operand. So other
747 // users of the glue operand must be treated as its users.
748 SDNode *ImmGUser = Glue->getGluedUser();
749 for (SDNode::use_iterator ui = Glue->use_begin(), ue = Glue->use_end();
753 GUser->setNodeId(UDegree + Degree);
757 Sequence.reserve(DAGSize);
758 ScheduleNode(DAG->getRoot().getNode());
762 ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
763 InstrEmitter Emitter(BB, InsertPos);
764 DenseMap<SDValue, unsigned> VRBaseMap;
767 dbgs() << "\n*** Final schedule ***\n";
770 // FIXME: Handle dbg_values.
771 unsigned NumNodes = Sequence.size();
772 for (unsigned i = 0; i != NumNodes; ++i) {
773 SDNode *N = Sequence[NumNodes-i-1];
775 Emitter.EmitNode(N, false, false, VRBaseMap);
778 DEBUG(dbgs() << '\n');
780 InsertPos = Emitter.getInsertPos();
781 return Emitter.getBlock();
784 //===----------------------------------------------------------------------===//
785 // Public Constructor Functions
786 //===----------------------------------------------------------------------===//
788 llvm::ScheduleDAGSDNodes *
789 llvm::createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
790 return new ScheduleDAGFast(*IS->MF);
793 llvm::ScheduleDAGSDNodes *
794 llvm::createDAGLinearizer(SelectionDAGISel *IS, CodeGenOpt::Level) {
795 return new ScheduleDAGLinearize(*IS->MF);