1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/InlineAsm.h"
21 #include "llvm/CodeGen/SchedulerRegistry.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetLowering.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
38 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
39 STATISTIC(NumUnfolds, "Number of nodes unfolded");
40 STATISTIC(NumDups, "Number of duplicated nodes");
41 STATISTIC(NumPRCopies, "Number of physical register copies");
43 static RegisterScheduler
44 burrListDAGScheduler("list-burr",
45 "Bottom-up register reduction list scheduling",
46 createBURRListDAGScheduler);
47 static RegisterScheduler
48 sourceListDAGScheduler("source",
49 "Similar to list-burr but schedules in source "
50 "order when possible",
51 createSourceListDAGScheduler);
53 static RegisterScheduler
54 hybridListDAGScheduler("list-hybrid",
55 "Bottom-up register pressure aware list scheduling "
56 "which tries to balance latency and register pressure",
57 createHybridListDAGScheduler);
59 static RegisterScheduler
60 ILPListDAGScheduler("list-ilp",
61 "Bottom-up register pressure aware list scheduling "
62 "which tries to balance ILP and register pressure",
63 createILPListDAGScheduler);
65 static cl::opt<bool> DisableSchedCycles(
66 "disable-sched-cycles", cl::Hidden, cl::init(false),
67 cl::desc("Disable cycle-level precision during preRA scheduling"));
69 // Temporary sched=list-ilp flags until the heuristics are robust.
70 // Some options are also available under sched=list-hybrid.
71 static cl::opt<bool> DisableSchedRegPressure(
72 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
73 cl::desc("Disable regpressure priority in sched=list-ilp"));
74 static cl::opt<bool> DisableSchedLiveUses(
75 "disable-sched-live-uses", cl::Hidden, cl::init(true),
76 cl::desc("Disable live use priority in sched=list-ilp"));
77 static cl::opt<bool> DisableSchedVRegCycle(
78 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
79 cl::desc("Disable virtual register cycle interference checks"));
80 static cl::opt<bool> DisableSchedPhysRegJoin(
81 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
82 cl::desc("Disable physreg def-use affinity"));
83 static cl::opt<bool> DisableSchedStalls(
84 "disable-sched-stalls", cl::Hidden, cl::init(true),
85 cl::desc("Disable no-stall priority in sched=list-ilp"));
86 static cl::opt<bool> DisableSchedCriticalPath(
87 "disable-sched-critical-path", cl::Hidden, cl::init(false),
88 cl::desc("Disable critical path priority in sched=list-ilp"));
89 static cl::opt<bool> DisableSchedHeight(
90 "disable-sched-height", cl::Hidden, cl::init(false),
91 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
93 static cl::opt<int> MaxReorderWindow(
94 "max-sched-reorder", cl::Hidden, cl::init(6),
95 cl::desc("Number of instructions to allow ahead of the critical path "
96 "in sched=list-ilp"));
98 static cl::opt<unsigned> AvgIPC(
99 "sched-avg-ipc", cl::Hidden, cl::init(1),
100 cl::desc("Average inst/cycle whan no target itinerary exists."));
104 // For sched=list-ilp, Count the number of times each factor comes into play.
105 enum { FactPressureDiff, FactRegUses, FactStall, FactHeight, FactDepth,
106 FactStatic, FactOther, NumFactors };
108 static const char *FactorName[NumFactors] =
109 {"PressureDiff", "RegUses", "Stall", "Height", "Depth","Static", "Other"};
110 static int FactorCount[NumFactors];
114 //===----------------------------------------------------------------------===//
115 /// ScheduleDAGRRList - The actual register reduction list scheduler
116 /// implementation. This supports both top-down and bottom-up scheduling.
118 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
120 /// NeedLatency - True if the scheduler will make use of latency information.
124 /// AvailableQueue - The priority queue to use for the available SUnits.
125 SchedulingPriorityQueue *AvailableQueue;
127 /// PendingQueue - This contains all of the instructions whose operands have
128 /// been issued, but their results are not ready yet (due to the latency of
129 /// the operation). Once the operands becomes available, the instruction is
130 /// added to the AvailableQueue.
131 std::vector<SUnit*> PendingQueue;
133 /// HazardRec - The hazard recognizer to use.
134 ScheduleHazardRecognizer *HazardRec;
136 /// CurCycle - The current scheduler state corresponds to this cycle.
139 /// MinAvailableCycle - Cycle of the soonest available instruction.
140 unsigned MinAvailableCycle;
142 /// IssueCount - Count instructions issued in this cycle
143 /// Currently valid only for bottom-up scheduling.
146 /// LiveRegDefs - A set of physical registers and their definition
147 /// that are "live". These nodes must be scheduled before any other nodes that
148 /// modifies the registers can be scheduled.
149 unsigned NumLiveRegs;
150 std::vector<SUnit*> LiveRegDefs;
151 std::vector<SUnit*> LiveRegGens;
153 /// Topo - A topological ordering for SUnits which permits fast IsReachable
154 /// and similar queries.
155 ScheduleDAGTopologicalSort Topo;
158 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
159 SchedulingPriorityQueue *availqueue,
160 CodeGenOpt::Level OptLevel)
161 : ScheduleDAGSDNodes(mf),
162 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
165 const TargetMachine &tm = mf.getTarget();
166 if (DisableSchedCycles || !NeedLatency)
167 HazardRec = new ScheduleHazardRecognizer();
169 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
172 ~ScheduleDAGRRList() {
174 delete AvailableQueue;
179 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
181 /// IsReachable - Checks if SU is reachable from TargetSU.
182 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
183 return Topo.IsReachable(SU, TargetSU);
186 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
188 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
189 return Topo.WillCreateCycle(SU, TargetSU);
192 /// AddPred - adds a predecessor edge to SUnit SU.
193 /// This returns true if this is a new predecessor.
194 /// Updates the topological ordering if required.
195 void AddPred(SUnit *SU, const SDep &D) {
196 Topo.AddPred(SU, D.getSUnit());
200 /// RemovePred - removes a predecessor edge from SUnit SU.
201 /// This returns true if an edge was removed.
202 /// Updates the topological ordering if required.
203 void RemovePred(SUnit *SU, const SDep &D) {
204 Topo.RemovePred(SU, D.getSUnit());
209 bool isReady(SUnit *SU) {
210 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
211 AvailableQueue->isReady(SU);
214 void ReleasePred(SUnit *SU, const SDep *PredEdge);
215 void ReleasePredecessors(SUnit *SU);
216 void ReleasePending();
217 void AdvanceToCycle(unsigned NextCycle);
218 void AdvancePastStalls(SUnit *SU);
219 void EmitNode(SUnit *SU);
220 void ScheduleNodeBottomUp(SUnit*);
221 void CapturePred(SDep *PredEdge);
222 void UnscheduleNodeBottomUp(SUnit*);
223 void RestoreHazardCheckerBottomUp();
224 void BacktrackBottomUp(SUnit*, SUnit*);
225 SUnit *CopyAndMoveSuccessors(SUnit*);
226 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
227 const TargetRegisterClass*,
228 const TargetRegisterClass*,
229 SmallVector<SUnit*, 2>&);
230 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
232 SUnit *PickNodeToScheduleBottomUp();
233 void ListScheduleBottomUp();
235 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
236 /// Updates the topological ordering if required.
237 SUnit *CreateNewSUnit(SDNode *N) {
238 unsigned NumSUnits = SUnits.size();
239 SUnit *NewNode = NewSUnit(N);
240 // Update the topological ordering.
241 if (NewNode->NodeNum >= NumSUnits)
242 Topo.InitDAGTopologicalSorting();
246 /// CreateClone - Creates a new SUnit from an existing one.
247 /// Updates the topological ordering if required.
248 SUnit *CreateClone(SUnit *N) {
249 unsigned NumSUnits = SUnits.size();
250 SUnit *NewNode = Clone(N);
251 // Update the topological ordering.
252 if (NewNode->NodeNum >= NumSUnits)
253 Topo.InitDAGTopologicalSorting();
257 /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
258 /// need actual latency information but the hybrid scheduler does.
259 bool ForceUnitLatencies() const {
263 } // end anonymous namespace
265 /// GetCostForDef - Looks up the register class and cost for a given definition.
266 /// Typically this just means looking up the representative register class,
267 /// but for untyped values (MVT::untyped) it means inspecting the node's
268 /// opcode to determine what register class is being generated.
269 static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
270 const TargetLowering *TLI,
271 const TargetInstrInfo *TII,
272 const TargetRegisterInfo *TRI,
273 unsigned &RegClass, unsigned &Cost) {
274 EVT VT = RegDefPos.GetValue();
276 // Special handling for untyped values. These values can only come from
277 // the expansion of custom DAG-to-DAG patterns.
278 if (VT == MVT::untyped) {
279 const SDNode *Node = RegDefPos.GetNode();
280 unsigned Opcode = Node->getMachineOpcode();
282 if (Opcode == TargetOpcode::REG_SEQUENCE) {
283 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
284 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
285 RegClass = RC->getID();
290 unsigned Idx = RegDefPos.GetIdx();
291 const MCInstrDesc Desc = TII->get(Opcode);
292 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI);
293 RegClass = RC->getID();
294 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
295 // better way to determine it.
298 RegClass = TLI->getRepRegClassFor(VT)->getID();
299 Cost = TLI->getRepRegClassCostFor(VT);
303 /// Schedule - Schedule the DAG using list scheduling.
304 void ScheduleDAGRRList::Schedule() {
306 << "********** List Scheduling BB#" << BB->getNumber()
307 << " '" << BB->getName() << "' **********\n");
309 for (int i = 0; i < NumFactors; ++i) {
316 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
318 // Allocate slots for each physical register, plus one for a special register
319 // to track the virtual resource of a calling sequence.
320 LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL);
321 LiveRegGens.resize(TRI->getNumRegs() + 1, NULL);
323 // Build the scheduling graph.
324 BuildSchedGraph(NULL);
326 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
327 SUnits[su].dumpAll(this));
328 Topo.InitDAGTopologicalSorting();
330 AvailableQueue->initNodes(SUnits);
334 // Execute the actual scheduling loop.
335 ListScheduleBottomUp();
338 for (int i = 0; i < NumFactors; ++i) {
339 DEBUG(dbgs() << FactorName[i] << "\t" << FactorCount[i] << "\n");
342 AvailableQueue->releaseState();
345 //===----------------------------------------------------------------------===//
346 // Bottom-Up Scheduling
347 //===----------------------------------------------------------------------===//
349 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
350 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
351 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
352 SUnit *PredSU = PredEdge->getSUnit();
355 if (PredSU->NumSuccsLeft == 0) {
356 dbgs() << "*** Scheduling failed! ***\n";
358 dbgs() << " has been released too many times!\n";
362 --PredSU->NumSuccsLeft;
364 if (!ForceUnitLatencies()) {
365 // Updating predecessor's height. This is now the cycle when the
366 // predecessor can be scheduled without causing a pipeline stall.
367 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
370 // If all the node's successors are scheduled, this node is ready
371 // to be scheduled. Ignore the special EntrySU node.
372 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
373 PredSU->isAvailable = true;
375 unsigned Height = PredSU->getHeight();
376 if (Height < MinAvailableCycle)
377 MinAvailableCycle = Height;
379 if (isReady(PredSU)) {
380 AvailableQueue->push(PredSU);
382 // CapturePred and others may have left the node in the pending queue, avoid
384 else if (!PredSU->isPending) {
385 PredSU->isPending = true;
386 PendingQueue.push_back(PredSU);
391 /// IsChainDependent - Test if Outer is reachable from Inner through
392 /// chain dependencies.
393 static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
395 const TargetInstrInfo *TII) {
400 // For a TokenFactor, examine each operand. There may be multiple ways
401 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
402 // most nesting in order to ensure that we find the corresponding match.
403 if (N->getOpcode() == ISD::TokenFactor) {
404 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
405 if (IsChainDependent(N->getOperand(i).getNode(), Inner, NestLevel, TII))
409 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
410 if (N->isMachineOpcode()) {
411 if (N->getMachineOpcode() ==
412 (unsigned)TII->getCallFrameDestroyOpcode()) {
414 } else if (N->getMachineOpcode() ==
415 (unsigned)TII->getCallFrameSetupOpcode()) {
421 // Otherwise, find the chain and continue climbing.
422 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
423 if (N->getOperand(i).getValueType() == MVT::Other) {
424 N = N->getOperand(i).getNode();
425 goto found_chain_operand;
428 found_chain_operand:;
429 if (N->getOpcode() == ISD::EntryToken)
434 /// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
435 /// the corresponding (lowered) CALLSEQ_BEGIN node.
437 /// NestLevel and MaxNested are used in recursion to indcate the current level
438 /// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
439 /// level seen so far.
441 /// TODO: It would be better to give CALLSEQ_END an explicit operand to point
442 /// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
444 FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
445 const TargetInstrInfo *TII) {
447 // For a TokenFactor, examine each operand. There may be multiple ways
448 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
449 // most nesting in order to ensure that we find the corresponding match.
450 if (N->getOpcode() == ISD::TokenFactor) {
452 unsigned BestMaxNest = MaxNest;
453 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
454 unsigned MyNestLevel = NestLevel;
455 unsigned MyMaxNest = MaxNest;
456 if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(),
457 MyNestLevel, MyMaxNest, TII))
458 if (!Best || (MyMaxNest > BestMaxNest)) {
460 BestMaxNest = MyMaxNest;
464 MaxNest = BestMaxNest;
467 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
468 if (N->isMachineOpcode()) {
469 if (N->getMachineOpcode() ==
470 (unsigned)TII->getCallFrameDestroyOpcode()) {
472 MaxNest = std::max(MaxNest, NestLevel);
473 } else if (N->getMachineOpcode() ==
474 (unsigned)TII->getCallFrameSetupOpcode()) {
475 assert(NestLevel != 0);
481 // Otherwise, find the chain and continue climbing.
482 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
483 if (N->getOperand(i).getValueType() == MVT::Other) {
484 N = N->getOperand(i).getNode();
485 goto found_chain_operand;
488 found_chain_operand:;
489 if (N->getOpcode() == ISD::EntryToken)
494 /// Call ReleasePred for each predecessor, then update register live def/gen.
495 /// Always update LiveRegDefs for a register dependence even if the current SU
496 /// also defines the register. This effectively create one large live range
497 /// across a sequence of two-address node. This is important because the
498 /// entire chain must be scheduled together. Example:
501 /// flags = (2) addc flags
502 /// flags = (1) addc flags
506 /// LiveRegDefs[flags] = 3
507 /// LiveRegGens[flags] = 1
509 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
510 /// interference on flags.
511 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
512 // Bottom up: release predecessors
513 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
515 ReleasePred(SU, &*I);
516 if (I->isAssignedRegDep()) {
517 // This is a physical register dependency and it's impossible or
518 // expensive to copy the register. Make sure nothing that can
519 // clobber the register is scheduled between the predecessor and
521 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
522 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
523 "interference on register dependence");
524 LiveRegDefs[I->getReg()] = I->getSUnit();
525 if (!LiveRegGens[I->getReg()]) {
527 LiveRegGens[I->getReg()] = SU;
532 // If we're scheduling a lowered CALLSEQ_END, find the corresponding
533 // CALLSEQ_BEGIN. Inject an artificial physical register dependence between
534 // these nodes, to prevent other calls from being interscheduled with them.
535 unsigned CallResource = TRI->getNumRegs();
536 if (!LiveRegDefs[CallResource])
537 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
538 if (Node->isMachineOpcode() &&
539 Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
540 unsigned NestLevel = 0;
541 unsigned MaxNest = 0;
542 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
544 SUnit *Def = &SUnits[N->getNodeId()];
546 LiveRegDefs[CallResource] = Def;
547 LiveRegGens[CallResource] = SU;
552 /// Check to see if any of the pending instructions are ready to issue. If
553 /// so, add them to the available queue.
554 void ScheduleDAGRRList::ReleasePending() {
555 if (DisableSchedCycles) {
556 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
560 // If the available queue is empty, it is safe to reset MinAvailableCycle.
561 if (AvailableQueue->empty())
562 MinAvailableCycle = UINT_MAX;
564 // Check to see if any of the pending instructions are ready to issue. If
565 // so, add them to the available queue.
566 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
567 unsigned ReadyCycle = PendingQueue[i]->getHeight();
568 if (ReadyCycle < MinAvailableCycle)
569 MinAvailableCycle = ReadyCycle;
571 if (PendingQueue[i]->isAvailable) {
572 if (!isReady(PendingQueue[i]))
574 AvailableQueue->push(PendingQueue[i]);
576 PendingQueue[i]->isPending = false;
577 PendingQueue[i] = PendingQueue.back();
578 PendingQueue.pop_back();
583 /// Move the scheduler state forward by the specified number of Cycles.
584 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
585 if (NextCycle <= CurCycle)
589 AvailableQueue->setCurCycle(NextCycle);
590 if (!HazardRec->isEnabled()) {
591 // Bypass lots of virtual calls in case of long latency.
592 CurCycle = NextCycle;
595 for (; CurCycle != NextCycle; ++CurCycle) {
596 HazardRec->RecedeCycle();
599 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
600 // available Q to release pending nodes at least once before popping.
604 /// Move the scheduler state forward until the specified node's dependents are
605 /// ready and can be scheduled with no resource conflicts.
606 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
607 if (DisableSchedCycles)
610 // FIXME: Nodes such as CopyFromReg probably should not advance the current
611 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
612 // has predecessors the cycle will be advanced when they are scheduled.
613 // But given the crude nature of modeling latency though such nodes, we
614 // currently need to treat these nodes like real instructions.
615 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
617 unsigned ReadyCycle = SU->getHeight();
619 // Bump CurCycle to account for latency. We assume the latency of other
620 // available instructions may be hidden by the stall (not a full pipe stall).
621 // This updates the hazard recognizer's cycle before reserving resources for
623 AdvanceToCycle(ReadyCycle);
625 // Calls are scheduled in their preceding cycle, so don't conflict with
626 // hazards from instructions after the call. EmitNode will reset the
627 // scoreboard state before emitting the call.
631 // FIXME: For resource conflicts in very long non-pipelined stages, we
632 // should probably skip ahead here to avoid useless scoreboard checks.
635 ScheduleHazardRecognizer::HazardType HT =
636 HazardRec->getHazardType(SU, -Stalls);
638 if (HT == ScheduleHazardRecognizer::NoHazard)
643 AdvanceToCycle(CurCycle + Stalls);
646 /// Record this SUnit in the HazardRecognizer.
647 /// Does not update CurCycle.
648 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
649 if (!HazardRec->isEnabled())
652 // Check for phys reg copy.
656 switch (SU->getNode()->getOpcode()) {
658 assert(SU->getNode()->isMachineOpcode() &&
659 "This target-independent node should not be scheduled.");
661 case ISD::MERGE_VALUES:
662 case ISD::TokenFactor:
664 case ISD::CopyFromReg:
666 // Noops don't affect the scoreboard state. Copies are likely to be
670 // For inline asm, clear the pipeline state.
675 // Calls are scheduled with their preceding instructions. For bottom-up
676 // scheduling, clear the pipeline state before emitting.
680 HazardRec->EmitInstruction(SU);
683 static void resetVRegCycle(SUnit *SU);
685 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
686 /// count of its predecessors. If a predecessor pending count is zero, add it to
687 /// the Available queue.
688 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
689 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
690 DEBUG(SU->dump(this));
693 if (CurCycle < SU->getHeight())
694 DEBUG(dbgs() << " Height [" << SU->getHeight()
695 << "] pipeline stall!\n");
698 // FIXME: Do not modify node height. It may interfere with
699 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
700 // node its ready cycle can aid heuristics, and after scheduling it can
701 // indicate the scheduled cycle.
702 SU->setHeightToAtLeast(CurCycle);
704 // Reserve resources for the scheduled intruction.
707 Sequence.push_back(SU);
709 AvailableQueue->ScheduledNode(SU);
711 // If HazardRec is disabled, and each inst counts as one cycle, then
712 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
713 // PendingQueue for schedulers that implement HasReadyFilter.
714 if (!HazardRec->isEnabled() && AvgIPC < 2)
715 AdvanceToCycle(CurCycle + 1);
717 // Update liveness of predecessors before successors to avoid treating a
718 // two-address node as a live range def.
719 ReleasePredecessors(SU);
721 // Release all the implicit physical register defs that are live.
722 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
724 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
725 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
726 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
728 LiveRegDefs[I->getReg()] = NULL;
729 LiveRegGens[I->getReg()] = NULL;
732 // Release the special call resource dependence, if this is the beginning
734 unsigned CallResource = TRI->getNumRegs();
735 if (LiveRegDefs[CallResource] == SU)
736 for (const SDNode *SUNode = SU->getNode(); SUNode;
737 SUNode = SUNode->getGluedNode()) {
738 if (SUNode->isMachineOpcode() &&
739 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
740 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
742 LiveRegDefs[CallResource] = NULL;
743 LiveRegGens[CallResource] = NULL;
749 SU->isScheduled = true;
751 // Conditions under which the scheduler should eagerly advance the cycle:
752 // (1) No available instructions
753 // (2) All pipelines full, so available instructions must have hazards.
755 // If HazardRec is disabled, the cycle was pre-advanced before calling
756 // ReleasePredecessors. In that case, IssueCount should remain 0.
758 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
759 if (HazardRec->isEnabled() || AvgIPC > 1) {
760 if (SU->getNode() && SU->getNode()->isMachineOpcode())
762 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
763 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
764 AdvanceToCycle(CurCycle + 1);
768 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
769 /// unscheduled, incrcease the succ left count of its predecessors. Remove
770 /// them from AvailableQueue if necessary.
771 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
772 SUnit *PredSU = PredEdge->getSUnit();
773 if (PredSU->isAvailable) {
774 PredSU->isAvailable = false;
775 if (!PredSU->isPending)
776 AvailableQueue->remove(PredSU);
779 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
780 ++PredSU->NumSuccsLeft;
783 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
784 /// its predecessor states to reflect the change.
785 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
786 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
787 DEBUG(SU->dump(this));
789 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
792 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
793 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
794 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
795 "Physical register dependency violated?");
797 LiveRegDefs[I->getReg()] = NULL;
798 LiveRegGens[I->getReg()] = NULL;
802 // Reclaim the special call resource dependence, if this is the beginning
804 unsigned CallResource = TRI->getNumRegs();
805 for (const SDNode *SUNode = SU->getNode(); SUNode;
806 SUNode = SUNode->getGluedNode()) {
807 if (SUNode->isMachineOpcode() &&
808 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
810 LiveRegDefs[CallResource] = SU;
811 LiveRegGens[CallResource] = NULL;
815 // Release the special call resource dependence, if this is the end
817 if (LiveRegGens[CallResource] == SU)
818 for (const SDNode *SUNode = SU->getNode(); SUNode;
819 SUNode = SUNode->getGluedNode()) {
820 if (SUNode->isMachineOpcode() &&
821 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
822 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
824 LiveRegDefs[CallResource] = NULL;
825 LiveRegGens[CallResource] = NULL;
829 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
831 if (I->isAssignedRegDep()) {
832 // This becomes the nearest def. Note that an earlier def may still be
833 // pending if this is a two-address node.
834 LiveRegDefs[I->getReg()] = SU;
835 if (!LiveRegDefs[I->getReg()]) {
838 if (LiveRegGens[I->getReg()] == NULL ||
839 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
840 LiveRegGens[I->getReg()] = I->getSUnit();
843 if (SU->getHeight() < MinAvailableCycle)
844 MinAvailableCycle = SU->getHeight();
846 SU->setHeightDirty();
847 SU->isScheduled = false;
848 SU->isAvailable = true;
849 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
850 // Don't make available until backtracking is complete.
851 SU->isPending = true;
852 PendingQueue.push_back(SU);
855 AvailableQueue->push(SU);
857 AvailableQueue->UnscheduledNode(SU);
860 /// After backtracking, the hazard checker needs to be restored to a state
861 /// corresponding the the current cycle.
862 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
865 unsigned LookAhead = std::min((unsigned)Sequence.size(),
866 HazardRec->getMaxLookAhead());
870 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
871 unsigned HazardCycle = (*I)->getHeight();
872 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
874 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
875 HazardRec->RecedeCycle();
881 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
882 /// BTCycle in order to schedule a specific node.
883 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
884 SUnit *OldSU = Sequence.back();
887 if (SU->isSucc(OldSU))
888 // Don't try to remove SU from AvailableQueue.
889 SU->isAvailable = false;
890 // FIXME: use ready cycle instead of height
891 CurCycle = OldSU->getHeight();
892 UnscheduleNodeBottomUp(OldSU);
893 AvailableQueue->setCurCycle(CurCycle);
896 OldSU = Sequence.back();
899 assert(!SU->isSucc(OldSU) && "Something is wrong!");
901 RestoreHazardCheckerBottomUp();
908 static bool isOperandOf(const SUnit *SU, SDNode *N) {
909 for (const SDNode *SUNode = SU->getNode(); SUNode;
910 SUNode = SUNode->getGluedNode()) {
911 if (SUNode->isOperandOf(N))
917 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
918 /// successors to the newly created node.
919 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
920 SDNode *N = SU->getNode();
924 if (SU->getNode()->getGluedNode())
928 bool TryUnfold = false;
929 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
930 EVT VT = N->getValueType(i);
933 else if (VT == MVT::Other)
936 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
937 const SDValue &Op = N->getOperand(i);
938 EVT VT = Op.getNode()->getValueType(Op.getResNo());
944 SmallVector<SDNode*, 2> NewNodes;
945 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
948 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
949 assert(NewNodes.size() == 2 && "Expected a load folding node!");
952 SDNode *LoadNode = NewNodes[0];
953 unsigned NumVals = N->getNumValues();
954 unsigned OldNumVals = SU->getNode()->getNumValues();
955 for (unsigned i = 0; i != NumVals; ++i)
956 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
957 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
958 SDValue(LoadNode, 1));
960 // LoadNode may already exist. This can happen when there is another
961 // load from the same location and producing the same type of value
962 // but it has different alignment or volatileness.
963 bool isNewLoad = true;
965 if (LoadNode->getNodeId() != -1) {
966 LoadSU = &SUnits[LoadNode->getNodeId()];
969 LoadSU = CreateNewSUnit(LoadNode);
970 LoadNode->setNodeId(LoadSU->NodeNum);
972 InitNumRegDefsLeft(LoadSU);
973 ComputeLatency(LoadSU);
976 SUnit *NewSU = CreateNewSUnit(N);
977 assert(N->getNodeId() == -1 && "Node already inserted!");
978 N->setNodeId(NewSU->NodeNum);
980 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
981 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
982 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
983 NewSU->isTwoAddress = true;
987 if (MCID.isCommutable())
988 NewSU->isCommutable = true;
990 InitNumRegDefsLeft(NewSU);
991 ComputeLatency(NewSU);
993 // Record all the edges to and from the old SU, by category.
994 SmallVector<SDep, 4> ChainPreds;
995 SmallVector<SDep, 4> ChainSuccs;
996 SmallVector<SDep, 4> LoadPreds;
997 SmallVector<SDep, 4> NodePreds;
998 SmallVector<SDep, 4> NodeSuccs;
999 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1002 ChainPreds.push_back(*I);
1003 else if (isOperandOf(I->getSUnit(), LoadNode))
1004 LoadPreds.push_back(*I);
1006 NodePreds.push_back(*I);
1008 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1011 ChainSuccs.push_back(*I);
1013 NodeSuccs.push_back(*I);
1016 // Now assign edges to the newly-created nodes.
1017 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
1018 const SDep &Pred = ChainPreds[i];
1019 RemovePred(SU, Pred);
1021 AddPred(LoadSU, Pred);
1023 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
1024 const SDep &Pred = LoadPreds[i];
1025 RemovePred(SU, Pred);
1027 AddPred(LoadSU, Pred);
1029 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
1030 const SDep &Pred = NodePreds[i];
1031 RemovePred(SU, Pred);
1032 AddPred(NewSU, Pred);
1034 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
1035 SDep D = NodeSuccs[i];
1036 SUnit *SuccDep = D.getSUnit();
1038 RemovePred(SuccDep, D);
1040 AddPred(SuccDep, D);
1041 // Balance register pressure.
1042 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
1043 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
1044 --NewSU->NumRegDefsLeft;
1046 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
1047 SDep D = ChainSuccs[i];
1048 SUnit *SuccDep = D.getSUnit();
1050 RemovePred(SuccDep, D);
1053 AddPred(SuccDep, D);
1057 // Add a data dependency to reflect that NewSU reads the value defined
1059 AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
1062 AvailableQueue->addNode(LoadSU);
1063 AvailableQueue->addNode(NewSU);
1067 if (NewSU->NumSuccsLeft == 0) {
1068 NewSU->isAvailable = true;
1074 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
1075 NewSU = CreateClone(SU);
1077 // New SUnit has the exact same predecessors.
1078 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1080 if (!I->isArtificial())
1083 // Only copy scheduled successors. Cut them from old node's successor
1084 // list and move them over.
1085 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1086 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1088 if (I->isArtificial())
1090 SUnit *SuccSU = I->getSUnit();
1091 if (SuccSU->isScheduled) {
1096 DelDeps.push_back(std::make_pair(SuccSU, D));
1099 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1100 RemovePred(DelDeps[i].first, DelDeps[i].second);
1102 AvailableQueue->updateNode(SU);
1103 AvailableQueue->addNode(NewSU);
1109 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
1110 /// scheduled successors of the given SUnit to the last copy.
1111 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
1112 const TargetRegisterClass *DestRC,
1113 const TargetRegisterClass *SrcRC,
1114 SmallVector<SUnit*, 2> &Copies) {
1115 SUnit *CopyFromSU = CreateNewSUnit(NULL);
1116 CopyFromSU->CopySrcRC = SrcRC;
1117 CopyFromSU->CopyDstRC = DestRC;
1119 SUnit *CopyToSU = CreateNewSUnit(NULL);
1120 CopyToSU->CopySrcRC = DestRC;
1121 CopyToSU->CopyDstRC = SrcRC;
1123 // Only copy scheduled successors. Cut them from old node's successor
1124 // list and move them over.
1125 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1126 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1128 if (I->isArtificial())
1130 SUnit *SuccSU = I->getSUnit();
1131 if (SuccSU->isScheduled) {
1133 D.setSUnit(CopyToSU);
1135 DelDeps.push_back(std::make_pair(SuccSU, *I));
1138 // Avoid scheduling the def-side copy before other successors. Otherwise
1139 // we could introduce another physreg interference on the copy and
1140 // continue inserting copies indefinitely.
1141 SDep D(CopyFromSU, SDep::Order, /*Latency=*/0,
1142 /*Reg=*/0, /*isNormalMemory=*/false,
1143 /*isMustAlias=*/false, /*isArtificial=*/true);
1147 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1148 RemovePred(DelDeps[i].first, DelDeps[i].second);
1150 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
1151 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
1153 AvailableQueue->updateNode(SU);
1154 AvailableQueue->addNode(CopyFromSU);
1155 AvailableQueue->addNode(CopyToSU);
1156 Copies.push_back(CopyFromSU);
1157 Copies.push_back(CopyToSU);
1162 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
1163 /// definition of the specified node.
1164 /// FIXME: Move to SelectionDAG?
1165 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1166 const TargetInstrInfo *TII) {
1167 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1168 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1169 unsigned NumRes = MCID.getNumDefs();
1170 for (const unsigned *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1175 return N->getValueType(NumRes);
1178 /// CheckForLiveRegDef - Return true and update live register vector if the
1179 /// specified register def of the specified SUnit clobbers any "live" registers.
1180 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1181 std::vector<SUnit*> &LiveRegDefs,
1182 SmallSet<unsigned, 4> &RegAdded,
1183 SmallVector<unsigned, 4> &LRegs,
1184 const TargetRegisterInfo *TRI) {
1185 for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
1187 // Check if Ref is live.
1188 if (!LiveRegDefs[*AliasI]) continue;
1190 // Allow multiple uses of the same def.
1191 if (LiveRegDefs[*AliasI] == SU) continue;
1193 // Add Reg to the set of interfering live regs.
1194 if (RegAdded.insert(*AliasI)) {
1195 LRegs.push_back(*AliasI);
1200 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1201 /// scheduling of the given node to satisfy live physical register dependencies.
1202 /// If the specific node is the last one that's available to schedule, do
1203 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1204 bool ScheduleDAGRRList::
1205 DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
1206 if (NumLiveRegs == 0)
1209 SmallSet<unsigned, 4> RegAdded;
1210 // If this node would clobber any "live" register, then it's not ready.
1212 // If SU is the currently live definition of the same register that it uses,
1213 // then we are free to schedule it.
1214 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1216 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1217 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1218 RegAdded, LRegs, TRI);
1221 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1222 if (Node->getOpcode() == ISD::INLINEASM) {
1223 // Inline asm can clobber physical defs.
1224 unsigned NumOps = Node->getNumOperands();
1225 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1226 --NumOps; // Ignore the glue operand.
1228 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1230 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1231 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1233 ++i; // Skip the ID value.
1234 if (InlineAsm::isRegDefKind(Flags) ||
1235 InlineAsm::isRegDefEarlyClobberKind(Flags) ||
1236 InlineAsm::isClobberKind(Flags)) {
1237 // Check for def of register or earlyclobber register.
1238 for (; NumVals; --NumVals, ++i) {
1239 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1240 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1241 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1249 if (!Node->isMachineOpcode())
1251 // If we're in the middle of scheduling a call, don't begin scheduling
1252 // another call. Also, don't allow any physical registers to be live across
1254 if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
1255 // Check the special calling-sequence resource.
1256 unsigned CallResource = TRI->getNumRegs();
1257 if (LiveRegDefs[CallResource]) {
1258 SDNode *Gen = LiveRegGens[CallResource]->getNode();
1259 while (SDNode *Glued = Gen->getGluedNode())
1261 if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource))
1262 LRegs.push_back(CallResource);
1265 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
1266 if (!MCID.ImplicitDefs)
1268 for (const unsigned *Reg = MCID.ImplicitDefs; *Reg; ++Reg)
1269 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1272 return !LRegs.empty();
1275 /// Return a node that can be scheduled in this cycle. Requirements:
1276 /// (1) Ready: latency has been satisfied
1277 /// (2) No Hazards: resources are available
1278 /// (3) No Interferences: may unschedule to break register interferences.
1279 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1280 SmallVector<SUnit*, 4> Interferences;
1281 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
1283 SUnit *CurSU = AvailableQueue->pop();
1285 SmallVector<unsigned, 4> LRegs;
1286 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1288 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1290 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1291 Interferences.push_back(CurSU);
1292 CurSU = AvailableQueue->pop();
1295 // Add the nodes that aren't ready back onto the available list.
1296 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1297 Interferences[i]->isPending = false;
1298 assert(Interferences[i]->isAvailable && "must still be available");
1299 AvailableQueue->push(Interferences[i]);
1304 // All candidates are delayed due to live physical reg dependencies.
1305 // Try backtracking, code duplication, or inserting cross class copies
1307 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1308 SUnit *TrySU = Interferences[i];
1309 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1311 // Try unscheduling up to the point where it's safe to schedule
1314 unsigned LiveCycle = UINT_MAX;
1315 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1316 unsigned Reg = LRegs[j];
1317 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1318 BtSU = LiveRegGens[Reg];
1319 LiveCycle = BtSU->getHeight();
1322 if (!WillCreateCycle(TrySU, BtSU)) {
1323 BacktrackBottomUp(TrySU, BtSU);
1325 // Force the current node to be scheduled before the node that
1326 // requires the physical reg dep.
1327 if (BtSU->isAvailable) {
1328 BtSU->isAvailable = false;
1329 if (!BtSU->isPending)
1330 AvailableQueue->remove(BtSU);
1332 AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1,
1333 /*Reg=*/0, /*isNormalMemory=*/false,
1334 /*isMustAlias=*/false, /*isArtificial=*/true));
1336 // If one or more successors has been unscheduled, then the current
1337 // node is no longer avaialable. Schedule a successor that's now
1338 // available instead.
1339 if (!TrySU->isAvailable) {
1340 CurSU = AvailableQueue->pop();
1344 TrySU->isPending = false;
1345 Interferences.erase(Interferences.begin()+i);
1352 // Can't backtrack. If it's too expensive to copy the value, then try
1353 // duplicate the nodes that produces these "too expensive to copy"
1354 // values to break the dependency. In case even that doesn't work,
1355 // insert cross class copies.
1356 // If it's not too expensive, i.e. cost != -1, issue copies.
1357 SUnit *TrySU = Interferences[0];
1358 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1359 assert(LRegs.size() == 1 && "Can't handle this yet!");
1360 unsigned Reg = LRegs[0];
1361 SUnit *LRDef = LiveRegDefs[Reg];
1362 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1363 const TargetRegisterClass *RC =
1364 TRI->getMinimalPhysRegClass(Reg, VT);
1365 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1367 // If cross copy register class is the same as RC, then it must be possible
1368 // copy the value directly. Do not try duplicate the def.
1369 // If cross copy register class is not the same as RC, then it's possible to
1370 // copy the value but it require cross register class copies and it is
1372 // If cross copy register class is null, then it's not possible to copy
1373 // the value at all.
1376 NewDef = CopyAndMoveSuccessors(LRDef);
1377 if (!DestRC && !NewDef)
1378 report_fatal_error("Can't handle live physical register dependency!");
1381 // Issue copies, these can be expensive cross register class copies.
1382 SmallVector<SUnit*, 2> Copies;
1383 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1384 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1385 << " to SU #" << Copies.front()->NodeNum << "\n");
1386 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
1387 /*Reg=*/0, /*isNormalMemory=*/false,
1388 /*isMustAlias=*/false,
1389 /*isArtificial=*/true));
1390 NewDef = Copies.back();
1393 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1394 << " to SU #" << TrySU->NodeNum << "\n");
1395 LiveRegDefs[Reg] = NewDef;
1396 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
1397 /*Reg=*/0, /*isNormalMemory=*/false,
1398 /*isMustAlias=*/false,
1399 /*isArtificial=*/true));
1400 TrySU->isAvailable = false;
1404 assert(CurSU && "Unable to resolve live physical register dependencies!");
1406 // Add the nodes that aren't ready back onto the available list.
1407 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1408 Interferences[i]->isPending = false;
1409 // May no longer be available due to backtracking.
1410 if (Interferences[i]->isAvailable) {
1411 AvailableQueue->push(Interferences[i]);
1417 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1419 void ScheduleDAGRRList::ListScheduleBottomUp() {
1420 // Release any predecessors of the special Exit node.
1421 ReleasePredecessors(&ExitSU);
1423 // Add root to Available queue.
1424 if (!SUnits.empty()) {
1425 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1426 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1427 RootSU->isAvailable = true;
1428 AvailableQueue->push(RootSU);
1431 // While Available queue is not empty, grab the node with the highest
1432 // priority. If it is not ready put it back. Schedule the node.
1433 Sequence.reserve(SUnits.size());
1434 while (!AvailableQueue->empty()) {
1435 DEBUG(dbgs() << "\nExamining Available:\n";
1436 AvailableQueue->dump(this));
1438 // Pick the best node to schedule taking all constraints into
1440 SUnit *SU = PickNodeToScheduleBottomUp();
1442 AdvancePastStalls(SU);
1444 ScheduleNodeBottomUp(SU);
1446 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1447 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1448 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1449 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1453 // Reverse the order if it is bottom up.
1454 std::reverse(Sequence.begin(), Sequence.end());
1457 VerifySchedule(/*isBottomUp=*/true);
1461 //===----------------------------------------------------------------------===//
1462 // RegReductionPriorityQueue Definition
1463 //===----------------------------------------------------------------------===//
1465 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1466 // to reduce register pressure.
1469 class RegReductionPQBase;
1471 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1472 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1477 struct reverse_sort : public queue_sort {
1479 reverse_sort(SF &sf) : SortFunc(sf) {}
1480 reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {}
1482 bool operator()(SUnit* left, SUnit* right) const {
1483 // reverse left/right rather than simply !SortFunc(left, right)
1484 // to expose different paths in the comparison logic.
1485 return SortFunc(right, left);
1490 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1491 // reduction scheduler.
1492 struct bu_ls_rr_sort : public queue_sort {
1495 HasReadyFilter = false
1498 RegReductionPQBase *SPQ;
1499 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1500 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1502 bool operator()(SUnit* left, SUnit* right) const;
1505 // src_ls_rr_sort - Priority function for source order scheduler.
1506 struct src_ls_rr_sort : public queue_sort {
1509 HasReadyFilter = false
1512 RegReductionPQBase *SPQ;
1513 src_ls_rr_sort(RegReductionPQBase *spq)
1515 src_ls_rr_sort(const src_ls_rr_sort &RHS)
1518 bool operator()(SUnit* left, SUnit* right) const;
1521 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1522 struct hybrid_ls_rr_sort : public queue_sort {
1525 HasReadyFilter = false
1528 RegReductionPQBase *SPQ;
1529 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1531 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
1534 bool isReady(SUnit *SU, unsigned CurCycle) const;
1536 bool operator()(SUnit* left, SUnit* right) const;
1539 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1541 struct ilp_ls_rr_sort : public queue_sort {
1544 HasReadyFilter = false
1547 RegReductionPQBase *SPQ;
1548 ilp_ls_rr_sort(RegReductionPQBase *spq)
1550 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
1553 bool isReady(SUnit *SU, unsigned CurCycle) const;
1555 bool operator()(SUnit* left, SUnit* right) const;
1558 class RegReductionPQBase : public SchedulingPriorityQueue {
1560 std::vector<SUnit*> Queue;
1561 unsigned CurQueueId;
1562 bool TracksRegPressure;
1564 // SUnits - The SUnits for the current graph.
1565 std::vector<SUnit> *SUnits;
1567 MachineFunction &MF;
1568 const TargetInstrInfo *TII;
1569 const TargetRegisterInfo *TRI;
1570 const TargetLowering *TLI;
1571 ScheduleDAGRRList *scheduleDAG;
1573 // SethiUllmanNumbers - The SethiUllman number for each node.
1574 std::vector<unsigned> SethiUllmanNumbers;
1576 /// RegPressure - Tracking current reg pressure per register class.
1578 std::vector<unsigned> RegPressure;
1580 /// RegLimit - Tracking the number of allocatable registers per register
1582 std::vector<unsigned> RegLimit;
1585 RegReductionPQBase(MachineFunction &mf,
1586 bool hasReadyFilter,
1588 const TargetInstrInfo *tii,
1589 const TargetRegisterInfo *tri,
1590 const TargetLowering *tli)
1591 : SchedulingPriorityQueue(hasReadyFilter),
1592 CurQueueId(0), TracksRegPressure(tracksrp),
1593 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1594 if (TracksRegPressure) {
1595 unsigned NumRC = TRI->getNumRegClasses();
1596 RegLimit.resize(NumRC);
1597 RegPressure.resize(NumRC);
1598 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1599 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1600 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1601 E = TRI->regclass_end(); I != E; ++I)
1602 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1606 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1607 scheduleDAG = scheduleDag;
1610 ScheduleHazardRecognizer* getHazardRec() {
1611 return scheduleDAG->getHazardRec();
1614 void initNodes(std::vector<SUnit> &sunits);
1616 void addNode(const SUnit *SU);
1618 void updateNode(const SUnit *SU);
1620 void releaseState() {
1622 SethiUllmanNumbers.clear();
1623 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1626 unsigned getNodePriority(const SUnit *SU) const;
1628 unsigned getNodeOrdering(const SUnit *SU) const {
1629 if (!SU->getNode()) return 0;
1631 return scheduleDAG->DAG->GetOrdering(SU->getNode());
1634 bool empty() const { return Queue.empty(); }
1636 void push(SUnit *U) {
1637 assert(!U->NodeQueueId && "Node in the queue already");
1638 U->NodeQueueId = ++CurQueueId;
1642 void remove(SUnit *SU) {
1643 assert(!Queue.empty() && "Queue is empty!");
1644 assert(SU->NodeQueueId != 0 && "Not in queue!");
1645 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1647 if (I != prior(Queue.end()))
1648 std::swap(*I, Queue.back());
1650 SU->NodeQueueId = 0;
1653 bool tracksRegPressure() const { return TracksRegPressure; }
1655 void dumpRegPressure() const;
1657 bool HighRegPressure(const SUnit *SU) const;
1659 bool MayReduceRegPressure(SUnit *SU) const;
1661 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1663 void ScheduledNode(SUnit *SU);
1665 void UnscheduledNode(SUnit *SU);
1668 bool canClobber(const SUnit *SU, const SUnit *Op);
1669 void AddPseudoTwoAddrDeps();
1670 void PrescheduleNodesWithMultipleUses();
1671 void CalculateSethiUllmanNumbers();
1675 static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1676 std::vector<SUnit *>::iterator Best = Q.begin();
1677 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
1678 E = Q.end(); I != E; ++I)
1679 if (Picker(*Best, *I))
1682 if (Best != prior(Q.end()))
1683 std::swap(*Best, Q.back());
1689 SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1691 if (DAG->StressSched) {
1692 reverse_sort<SF> RPicker(Picker);
1693 return popFromQueueImpl(Q, RPicker);
1697 return popFromQueueImpl(Q, Picker);
1701 class RegReductionPriorityQueue : public RegReductionPQBase {
1705 RegReductionPriorityQueue(MachineFunction &mf,
1707 const TargetInstrInfo *tii,
1708 const TargetRegisterInfo *tri,
1709 const TargetLowering *tli)
1710 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli),
1713 bool isBottomUp() const { return SF::IsBottomUp; }
1715 bool isReady(SUnit *U) const {
1716 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1720 if (Queue.empty()) return NULL;
1722 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1727 void dump(ScheduleDAG *DAG) const {
1728 // Emulate pop() without clobbering NodeQueueIds.
1729 std::vector<SUnit*> DumpQueue = Queue;
1730 SF DumpPicker = Picker;
1731 while (!DumpQueue.empty()) {
1732 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1733 dbgs() << "Height " << SU->getHeight() << ": ";
1739 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1740 BURegReductionPriorityQueue;
1742 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1743 SrcRegReductionPriorityQueue;
1745 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1746 HybridBURRPriorityQueue;
1748 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1749 ILPBURRPriorityQueue;
1750 } // end anonymous namespace
1752 //===----------------------------------------------------------------------===//
1753 // Static Node Priority for Register Pressure Reduction
1754 //===----------------------------------------------------------------------===//
1756 // Check for special nodes that bypass scheduling heuristics.
1757 // Currently this pushes TokenFactor nodes down, but may be used for other
1758 // pseudo-ops as well.
1760 // Return -1 to schedule right above left, 1 for left above right.
1761 // Return 0 if no bias exists.
1762 static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1763 bool LSchedLow = left->isScheduleLow;
1764 bool RSchedLow = right->isScheduleLow;
1765 if (LSchedLow != RSchedLow)
1766 return LSchedLow < RSchedLow ? 1 : -1;
1770 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1771 /// Smaller number is the higher priority.
1773 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1774 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1775 if (SethiUllmanNumber != 0)
1776 return SethiUllmanNumber;
1779 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1781 if (I->isCtrl()) continue; // ignore chain preds
1782 SUnit *PredSU = I->getSUnit();
1783 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1784 if (PredSethiUllman > SethiUllmanNumber) {
1785 SethiUllmanNumber = PredSethiUllman;
1787 } else if (PredSethiUllman == SethiUllmanNumber)
1791 SethiUllmanNumber += Extra;
1793 if (SethiUllmanNumber == 0)
1794 SethiUllmanNumber = 1;
1796 return SethiUllmanNumber;
1799 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1800 /// scheduling units.
1801 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1802 SethiUllmanNumbers.assign(SUnits->size(), 0);
1804 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1805 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1808 void RegReductionPQBase::addNode(const SUnit *SU) {
1809 unsigned SUSize = SethiUllmanNumbers.size();
1810 if (SUnits->size() > SUSize)
1811 SethiUllmanNumbers.resize(SUSize*2, 0);
1812 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1815 void RegReductionPQBase::updateNode(const SUnit *SU) {
1816 SethiUllmanNumbers[SU->NodeNum] = 0;
1817 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1820 // Lower priority means schedule further down. For bottom-up scheduling, lower
1821 // priority SUs are scheduled before higher priority SUs.
1822 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1823 assert(SU->NodeNum < SethiUllmanNumbers.size());
1824 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1825 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1826 // CopyToReg should be close to its uses to facilitate coalescing and
1829 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1830 Opc == TargetOpcode::SUBREG_TO_REG ||
1831 Opc == TargetOpcode::INSERT_SUBREG)
1832 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1833 // close to their uses to facilitate coalescing.
1835 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1836 // If SU does not have a register use, i.e. it doesn't produce a value
1837 // that would be consumed (e.g. store), then it terminates a chain of
1838 // computation. Give it a large SethiUllman number so it will be
1839 // scheduled right before its predecessors that it doesn't lengthen
1840 // their live ranges.
1842 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1843 // If SU does not have a register def, schedule it close to its uses
1844 // because it does not lengthen any live ranges.
1847 return SethiUllmanNumbers[SU->NodeNum];
1849 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1851 // FIXME: This assumes all of the defs are used as call operands.
1852 int NP = (int)Priority - SU->getNode()->getNumValues();
1853 return (NP > 0) ? NP : 0;
1859 //===----------------------------------------------------------------------===//
1860 // Register Pressure Tracking
1861 //===----------------------------------------------------------------------===//
1863 void RegReductionPQBase::dumpRegPressure() const {
1864 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1865 E = TRI->regclass_end(); I != E; ++I) {
1866 const TargetRegisterClass *RC = *I;
1867 unsigned Id = RC->getID();
1868 unsigned RP = RegPressure[Id];
1870 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1875 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1879 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1883 SUnit *PredSU = I->getSUnit();
1884 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1885 // to cover the number of registers defined (they are all live).
1886 if (PredSU->NumRegDefsLeft == 0) {
1889 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1890 RegDefPos.IsValid(); RegDefPos.Advance()) {
1891 unsigned RCId, Cost;
1892 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
1894 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1901 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1902 const SDNode *N = SU->getNode();
1904 if (!N->isMachineOpcode() || !SU->NumSuccs)
1907 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1908 for (unsigned i = 0; i != NumDefs; ++i) {
1909 EVT VT = N->getValueType(i);
1910 if (!N->hasAnyUseOfValue(i))
1912 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1913 if (RegPressure[RCId] >= RegLimit[RCId])
1919 // Compute the register pressure contribution by this instruction by count up
1920 // for uses that are not live and down for defs. Only count register classes
1921 // that are already under high pressure. As a side effect, compute the number of
1922 // uses of registers that are already live.
1924 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1925 // so could probably be factored.
1926 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1929 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1933 SUnit *PredSU = I->getSUnit();
1934 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1935 // to cover the number of registers defined (they are all live).
1936 if (PredSU->NumRegDefsLeft == 0) {
1937 if (PredSU->getNode()->isMachineOpcode())
1941 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1942 RegDefPos.IsValid(); RegDefPos.Advance()) {
1943 EVT VT = RegDefPos.GetValue();
1944 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1945 if (RegPressure[RCId] >= RegLimit[RCId])
1949 const SDNode *N = SU->getNode();
1951 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
1954 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1955 for (unsigned i = 0; i != NumDefs; ++i) {
1956 EVT VT = N->getValueType(i);
1957 if (!N->hasAnyUseOfValue(i))
1959 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1960 if (RegPressure[RCId] >= RegLimit[RCId])
1966 void RegReductionPQBase::ScheduledNode(SUnit *SU) {
1967 if (!TracksRegPressure)
1973 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1977 SUnit *PredSU = I->getSUnit();
1978 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1979 // to cover the number of registers defined (they are all live).
1980 if (PredSU->NumRegDefsLeft == 0) {
1983 // FIXME: The ScheduleDAG currently loses information about which of a
1984 // node's values is consumed by each dependence. Consequently, if the node
1985 // defines multiple register classes, we don't know which to pressurize
1986 // here. Instead the following loop consumes the register defs in an
1987 // arbitrary order. At least it handles the common case of clustered loads
1988 // to the same class. For precise liveness, each SDep needs to indicate the
1989 // result number. But that tightly couples the ScheduleDAG with the
1990 // SelectionDAG making updates tricky. A simpler hack would be to attach a
1991 // value type or register class to SDep.
1993 // The most important aspect of register tracking is balancing the increase
1994 // here with the reduction further below. Note that this SU may use multiple
1995 // defs in PredSU. The can't be determined here, but we've already
1996 // compensated by reducing NumRegDefsLeft in PredSU during
1997 // ScheduleDAGSDNodes::AddSchedEdges.
1998 --PredSU->NumRegDefsLeft;
1999 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
2000 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2001 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2005 unsigned RCId, Cost;
2006 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
2007 RegPressure[RCId] += Cost;
2012 // We should have this assert, but there may be dead SDNodes that never
2013 // materialize as SUnits, so they don't appear to generate liveness.
2014 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
2015 int SkipRegDefs = (int)SU->NumRegDefsLeft;
2016 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
2017 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2018 if (SkipRegDefs > 0)
2020 unsigned RCId, Cost;
2021 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
2022 if (RegPressure[RCId] < Cost) {
2023 // Register pressure tracking is imprecise. This can happen. But we try
2024 // hard not to let it happen because it likely results in poor scheduling.
2025 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
2026 RegPressure[RCId] = 0;
2029 RegPressure[RCId] -= Cost;
2035 void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
2036 if (!TracksRegPressure)
2039 const SDNode *N = SU->getNode();
2042 if (!N->isMachineOpcode()) {
2043 if (N->getOpcode() != ISD::CopyToReg)
2046 unsigned Opc = N->getMachineOpcode();
2047 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2048 Opc == TargetOpcode::INSERT_SUBREG ||
2049 Opc == TargetOpcode::SUBREG_TO_REG ||
2050 Opc == TargetOpcode::REG_SEQUENCE ||
2051 Opc == TargetOpcode::IMPLICIT_DEF)
2055 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2059 SUnit *PredSU = I->getSUnit();
2060 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2061 // counts data deps.
2062 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2064 const SDNode *PN = PredSU->getNode();
2065 if (!PN->isMachineOpcode()) {
2066 if (PN->getOpcode() == ISD::CopyFromReg) {
2067 EVT VT = PN->getValueType(0);
2068 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2069 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2073 unsigned POpc = PN->getMachineOpcode();
2074 if (POpc == TargetOpcode::IMPLICIT_DEF)
2076 if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2077 POpc == TargetOpcode::INSERT_SUBREG ||
2078 POpc == TargetOpcode::SUBREG_TO_REG) {
2079 EVT VT = PN->getValueType(0);
2080 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2081 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2084 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2085 for (unsigned i = 0; i != NumDefs; ++i) {
2086 EVT VT = PN->getValueType(i);
2087 if (!PN->hasAnyUseOfValue(i))
2089 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2090 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2091 // Register pressure tracking is imprecise. This can happen.
2092 RegPressure[RCId] = 0;
2094 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2098 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2099 // may transfer data dependencies to CopyToReg.
2100 if (SU->NumSuccs && N->isMachineOpcode()) {
2101 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2102 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2103 EVT VT = N->getValueType(i);
2104 if (VT == MVT::Glue || VT == MVT::Other)
2106 if (!N->hasAnyUseOfValue(i))
2108 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2109 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2116 //===----------------------------------------------------------------------===//
2117 // Dynamic Node Priority for Register Pressure Reduction
2118 //===----------------------------------------------------------------------===//
2120 /// closestSucc - Returns the scheduled cycle of the successor which is
2121 /// closest to the current cycle.
2122 static unsigned closestSucc(const SUnit *SU) {
2123 unsigned MaxHeight = 0;
2124 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2126 if (I->isCtrl()) continue; // ignore chain succs
2127 unsigned Height = I->getSUnit()->getHeight();
2128 // If there are bunch of CopyToRegs stacked up, they should be considered
2129 // to be at the same position.
2130 if (I->getSUnit()->getNode() &&
2131 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2132 Height = closestSucc(I->getSUnit())+1;
2133 if (Height > MaxHeight)
2139 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2140 /// for scratch registers, i.e. number of data dependencies.
2141 static unsigned calcMaxScratches(const SUnit *SU) {
2142 unsigned Scratches = 0;
2143 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2145 if (I->isCtrl()) continue; // ignore chain preds
2151 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2152 /// CopyFromReg from a virtual register.
2153 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2154 bool RetVal = false;
2155 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2157 if (I->isCtrl()) continue;
2158 const SUnit *PredSU = I->getSUnit();
2159 if (PredSU->getNode() &&
2160 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2162 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2163 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2173 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2174 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2175 /// it has no other use. It should be scheduled closer to the terminator.
2176 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2177 bool RetVal = false;
2178 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2180 if (I->isCtrl()) continue;
2181 const SUnit *SuccSU = I->getSUnit();
2182 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2184 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2185 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2195 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2196 // set isVRegCycle for its CopyFromReg operands.
2198 // This is only relevant for single-block loops, in which case the VRegCycle
2199 // node is likely an induction variable in which the operand and target virtual
2200 // registers should be coalesced (e.g. pre/post increment values). Setting the
2201 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2202 // CopyFromReg so that this node becomes the virtual register "kill". This
2203 // avoids interference between the values live in and out of the block and
2204 // eliminates a copy inside the loop.
2205 static void initVRegCycle(SUnit *SU) {
2206 if (DisableSchedVRegCycle)
2209 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2212 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2214 SU->isVRegCycle = true;
2216 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2218 if (I->isCtrl()) continue;
2219 I->getSUnit()->isVRegCycle = true;
2223 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2224 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
2225 static void resetVRegCycle(SUnit *SU) {
2226 if (!SU->isVRegCycle)
2229 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2231 if (I->isCtrl()) continue; // ignore chain preds
2232 SUnit *PredSU = I->getSUnit();
2233 if (PredSU->isVRegCycle) {
2234 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2235 "VRegCycle def must be CopyFromReg");
2236 I->getSUnit()->isVRegCycle = 0;
2241 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2242 // means a node that defines the VRegCycle has not been scheduled yet.
2243 static bool hasVRegCycleUse(const SUnit *SU) {
2244 // If this SU also defines the VReg, don't hoist it as a "use".
2245 if (SU->isVRegCycle)
2248 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2250 if (I->isCtrl()) continue; // ignore chain preds
2251 if (I->getSUnit()->isVRegCycle &&
2252 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2253 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2260 // Check for either a dependence (latency) or resource (hazard) stall.
2262 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2263 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2264 if ((int)SPQ->getCurCycle() < Height) return true;
2265 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2266 != ScheduleHazardRecognizer::NoHazard)
2271 // Return -1 if left has higher priority, 1 if right has higher priority.
2272 // Return 0 if latency-based priority is equivalent.
2273 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2274 RegReductionPQBase *SPQ) {
2275 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2276 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2277 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2278 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2279 int LHeight = (int)left->getHeight() + LPenalty;
2280 int RHeight = (int)right->getHeight() + RPenalty;
2282 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
2283 BUHasStall(left, LHeight, SPQ);
2284 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
2285 BUHasStall(right, RHeight, SPQ);
2287 // If scheduling one of the node will cause a pipeline stall, delay it.
2288 // If scheduling either one of the node will cause a pipeline stall, sort
2289 // them according to their height.
2292 DEBUG(++FactorCount[FactStall]);
2295 if (LHeight != RHeight) {
2296 DEBUG(++FactorCount[FactStall]);
2297 return LHeight > RHeight ? 1 : -1;
2299 } else if (RStall) {
2300 DEBUG(++FactorCount[FactStall]);
2304 // If either node is scheduling for latency, sort them by height/depth
2306 if (!checkPref || (left->SchedulingPref == Sched::ILP ||
2307 right->SchedulingPref == Sched::ILP)) {
2308 if (DisableSchedCycles) {
2309 if (LHeight != RHeight) {
2310 DEBUG(++FactorCount[FactHeight]);
2311 return LHeight > RHeight ? 1 : -1;
2315 // If neither instruction stalls (!LStall && !RStall) then
2316 // its height is already covered so only its depth matters. We also reach
2317 // this if both stall but have the same height.
2318 int LDepth = left->getDepth() - LPenalty;
2319 int RDepth = right->getDepth() - RPenalty;
2320 if (LDepth != RDepth) {
2321 DEBUG(++FactorCount[FactDepth]);
2322 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2323 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2324 << ") depth " << RDepth << "\n");
2325 return LDepth < RDepth ? 1 : -1;
2328 if (left->Latency != right->Latency) {
2329 DEBUG(++FactorCount[FactOther]);
2330 return left->Latency > right->Latency ? 1 : -1;
2336 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2337 // Schedule physical register definitions close to their use. This is
2338 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2339 // long as shortening physreg live ranges is generally good, we can defer
2340 // creating a subtarget hook.
2341 if (!DisableSchedPhysRegJoin) {
2342 bool LHasPhysReg = left->hasPhysRegDefs;
2343 bool RHasPhysReg = right->hasPhysRegDefs;
2344 if (LHasPhysReg != RHasPhysReg) {
2345 DEBUG(++FactorCount[FactRegUses]);
2347 const char *PhysRegMsg[] = {" has no physreg", " defines a physreg"};
2349 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
2350 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2351 << PhysRegMsg[RHasPhysReg] << "\n");
2352 return LHasPhysReg < RHasPhysReg;
2356 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2357 unsigned LPriority = SPQ->getNodePriority(left);
2358 unsigned RPriority = SPQ->getNodePriority(right);
2360 // Be really careful about hoisting call operands above previous calls.
2361 // Only allows it if it would reduce register pressure.
2362 if (left->isCall && right->isCallOp) {
2363 unsigned RNumVals = right->getNode()->getNumValues();
2364 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2366 if (right->isCall && left->isCallOp) {
2367 unsigned LNumVals = left->getNode()->getNumValues();
2368 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2371 if (LPriority != RPriority) {
2372 DEBUG(++FactorCount[FactStatic]);
2373 return LPriority > RPriority;
2376 // One or both of the nodes are calls and their sethi-ullman numbers are the
2377 // same, then keep source order.
2378 if (left->isCall || right->isCall) {
2379 unsigned LOrder = SPQ->getNodeOrdering(left);
2380 unsigned ROrder = SPQ->getNodeOrdering(right);
2382 // Prefer an ordering where the lower the non-zero order number, the higher
2384 if ((LOrder || ROrder) && LOrder != ROrder)
2385 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2388 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2393 // and the following instructions are both ready.
2397 // Then schedule t2 = op first.
2404 // This creates more short live intervals.
2405 unsigned LDist = closestSucc(left);
2406 unsigned RDist = closestSucc(right);
2407 if (LDist != RDist) {
2408 DEBUG(++FactorCount[FactOther]);
2409 return LDist < RDist;
2412 // How many registers becomes live when the node is scheduled.
2413 unsigned LScratch = calcMaxScratches(left);
2414 unsigned RScratch = calcMaxScratches(right);
2415 if (LScratch != RScratch) {
2416 DEBUG(++FactorCount[FactOther]);
2417 return LScratch > RScratch;
2420 // Comparing latency against a call makes little sense unless the node
2421 // is register pressure-neutral.
2422 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2423 return (left->NodeQueueId > right->NodeQueueId);
2425 // Do not compare latencies when one or both of the nodes are calls.
2426 if (!DisableSchedCycles &&
2427 !(left->isCall || right->isCall)) {
2428 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2433 if (left->getHeight() != right->getHeight()) {
2434 DEBUG(++FactorCount[FactHeight]);
2435 return left->getHeight() > right->getHeight();
2438 if (left->getDepth() != right->getDepth()) {
2439 DEBUG(++FactorCount[FactDepth]);
2440 return left->getDepth() < right->getDepth();
2444 assert(left->NodeQueueId && right->NodeQueueId &&
2445 "NodeQueueId cannot be zero");
2446 DEBUG(++FactorCount[FactOther]);
2447 return (left->NodeQueueId > right->NodeQueueId);
2451 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2452 if (int res = checkSpecialNodes(left, right))
2455 return BURRSort(left, right, SPQ);
2458 // Source order, otherwise bottom up.
2459 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2460 if (int res = checkSpecialNodes(left, right))
2463 unsigned LOrder = SPQ->getNodeOrdering(left);
2464 unsigned ROrder = SPQ->getNodeOrdering(right);
2466 // Prefer an ordering where the lower the non-zero order number, the higher
2468 if ((LOrder || ROrder) && LOrder != ROrder)
2469 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2471 return BURRSort(left, right, SPQ);
2474 // If the time between now and when the instruction will be ready can cover
2475 // the spill code, then avoid adding it to the ready queue. This gives long
2476 // stalls highest priority and allows hoisting across calls. It should also
2477 // speed up processing the available queue.
2478 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2479 static const unsigned ReadyDelay = 3;
2481 if (SPQ->MayReduceRegPressure(SU)) return true;
2483 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2485 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2486 != ScheduleHazardRecognizer::NoHazard)
2492 // Return true if right should be scheduled with higher priority than left.
2493 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2494 if (int res = checkSpecialNodes(left, right))
2497 if (left->isCall || right->isCall)
2498 // No way to compute latency of calls.
2499 return BURRSort(left, right, SPQ);
2501 bool LHigh = SPQ->HighRegPressure(left);
2502 bool RHigh = SPQ->HighRegPressure(right);
2503 // Avoid causing spills. If register pressure is high, schedule for
2504 // register pressure reduction.
2505 if (LHigh && !RHigh) {
2506 DEBUG(++FactorCount[FactPressureDiff]);
2507 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2508 << right->NodeNum << ")\n");
2511 else if (!LHigh && RHigh) {
2512 DEBUG(++FactorCount[FactPressureDiff]);
2513 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2514 << left->NodeNum << ")\n");
2517 if (!LHigh && !RHigh) {
2518 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2522 return BURRSort(left, right, SPQ);
2525 // Schedule as many instructions in each cycle as possible. So don't make an
2526 // instruction available unless it is ready in the current cycle.
2527 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2528 if (SU->getHeight() > CurCycle) return false;
2530 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2531 != ScheduleHazardRecognizer::NoHazard)
2537 static bool canEnableCoalescing(SUnit *SU) {
2538 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2539 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2540 // CopyToReg should be close to its uses to facilitate coalescing and
2544 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2545 Opc == TargetOpcode::SUBREG_TO_REG ||
2546 Opc == TargetOpcode::INSERT_SUBREG)
2547 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2548 // close to their uses to facilitate coalescing.
2551 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2552 // If SU does not have a register def, schedule it close to its uses
2553 // because it does not lengthen any live ranges.
2559 // list-ilp is currently an experimental scheduler that allows various
2560 // heuristics to be enabled prior to the normal register reduction logic.
2561 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2562 if (int res = checkSpecialNodes(left, right))
2565 if (left->isCall || right->isCall)
2566 // No way to compute latency of calls.
2567 return BURRSort(left, right, SPQ);
2569 unsigned LLiveUses = 0, RLiveUses = 0;
2570 int LPDiff = 0, RPDiff = 0;
2571 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2572 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2573 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2575 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2576 DEBUG(++FactorCount[FactPressureDiff]);
2577 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2578 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2579 return LPDiff > RPDiff;
2582 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2583 bool LReduce = canEnableCoalescing(left);
2584 bool RReduce = canEnableCoalescing(right);
2585 DEBUG(if (LReduce != RReduce) ++FactorCount[FactPressureDiff]);
2586 if (LReduce && !RReduce) return false;
2587 if (RReduce && !LReduce) return true;
2590 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2591 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2592 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2593 DEBUG(++FactorCount[FactRegUses]);
2594 return LLiveUses < RLiveUses;
2597 if (!DisableSchedStalls) {
2598 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2599 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2600 if (LStall != RStall) {
2601 DEBUG(++FactorCount[FactHeight]);
2602 return left->getHeight() > right->getHeight();
2606 if (!DisableSchedCriticalPath) {
2607 int spread = (int)left->getDepth() - (int)right->getDepth();
2608 if (std::abs(spread) > MaxReorderWindow) {
2609 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2610 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2611 << right->getDepth() << "\n");
2612 DEBUG(++FactorCount[FactDepth]);
2613 return left->getDepth() < right->getDepth();
2617 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2618 int spread = (int)left->getHeight() - (int)right->getHeight();
2619 if (std::abs(spread) > MaxReorderWindow) {
2620 DEBUG(++FactorCount[FactHeight]);
2621 return left->getHeight() > right->getHeight();
2625 return BURRSort(left, right, SPQ);
2628 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2630 // Add pseudo dependency edges for two-address nodes.
2631 AddPseudoTwoAddrDeps();
2632 // Reroute edges to nodes with multiple uses.
2633 if (!TracksRegPressure)
2634 PrescheduleNodesWithMultipleUses();
2635 // Calculate node priorities.
2636 CalculateSethiUllmanNumbers();
2638 // For single block loops, mark nodes that look like canonical IV increments.
2639 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2640 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2641 initVRegCycle(&sunits[i]);
2646 //===----------------------------------------------------------------------===//
2647 // Preschedule for Register Pressure
2648 //===----------------------------------------------------------------------===//
2650 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2651 if (SU->isTwoAddress) {
2652 unsigned Opc = SU->getNode()->getMachineOpcode();
2653 const MCInstrDesc &MCID = TII->get(Opc);
2654 unsigned NumRes = MCID.getNumDefs();
2655 unsigned NumOps = MCID.getNumOperands() - NumRes;
2656 for (unsigned i = 0; i != NumOps; ++i) {
2657 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
2658 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2659 if (DU->getNodeId() != -1 &&
2660 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2668 /// canClobberReachingPhysRegUse - True if SU would clobber one of it's
2669 /// successor's explicit physregs whose definition can reach DepSU.
2670 /// i.e. DepSU should not be scheduled above SU.
2671 static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
2672 ScheduleDAGRRList *scheduleDAG,
2673 const TargetInstrInfo *TII,
2674 const TargetRegisterInfo *TRI) {
2675 const unsigned *ImpDefs
2676 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
2680 for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
2682 SUnit *SuccSU = SI->getSUnit();
2683 for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(),
2684 PE = SuccSU->Preds.end(); PI != PE; ++PI) {
2685 if (!PI->isAssignedRegDep())
2688 for (const unsigned *ImpDef = ImpDefs; *ImpDef; ++ImpDef) {
2689 // Return true if SU clobbers this physical register use and the
2690 // definition of the register reaches from DepSU. IsReachable queries a
2691 // topological forward sort of the DAG (following the successors).
2692 if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
2693 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2701 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2702 /// physical register defs.
2703 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2704 const TargetInstrInfo *TII,
2705 const TargetRegisterInfo *TRI) {
2706 SDNode *N = SuccSU->getNode();
2707 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2708 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2709 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2710 for (const SDNode *SUNode = SU->getNode(); SUNode;
2711 SUNode = SUNode->getGluedNode()) {
2712 if (!SUNode->isMachineOpcode())
2714 const unsigned *SUImpDefs =
2715 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2718 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2719 EVT VT = N->getValueType(i);
2720 if (VT == MVT::Glue || VT == MVT::Other)
2722 if (!N->hasAnyUseOfValue(i))
2724 unsigned Reg = ImpDefs[i - NumDefs];
2725 for (;*SUImpDefs; ++SUImpDefs) {
2726 unsigned SUReg = *SUImpDefs;
2727 if (TRI->regsOverlap(Reg, SUReg))
2735 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2736 /// are not handled well by the general register pressure reduction
2737 /// heuristics. When presented with code like this:
2746 /// the heuristics tend to push the store up, but since the
2747 /// operand of the store has another use (U), this would increase
2748 /// the length of that other use (the U->N edge).
2750 /// This function transforms code like the above to route U's
2751 /// dependence through the store when possible, like this:
2762 /// This results in the store being scheduled immediately
2763 /// after N, which shortens the U->N live range, reducing
2764 /// register pressure.
2766 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2767 // Visit all the nodes in topological order, working top-down.
2768 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2769 SUnit *SU = &(*SUnits)[i];
2770 // For now, only look at nodes with no data successors, such as stores.
2771 // These are especially important, due to the heuristics in
2772 // getNodePriority for nodes with no data successors.
2773 if (SU->NumSuccs != 0)
2775 // For now, only look at nodes with exactly one data predecessor.
2776 if (SU->NumPreds != 1)
2778 // Avoid prescheduling copies to virtual registers, which don't behave
2779 // like other nodes from the perspective of scheduling heuristics.
2780 if (SDNode *N = SU->getNode())
2781 if (N->getOpcode() == ISD::CopyToReg &&
2782 TargetRegisterInfo::isVirtualRegister
2783 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2786 // Locate the single data predecessor.
2788 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2789 EE = SU->Preds.end(); II != EE; ++II)
2790 if (!II->isCtrl()) {
2791 PredSU = II->getSUnit();
2796 // Don't rewrite edges that carry physregs, because that requires additional
2797 // support infrastructure.
2798 if (PredSU->hasPhysRegDefs)
2800 // Short-circuit the case where SU is PredSU's only data successor.
2801 if (PredSU->NumSuccs == 1)
2803 // Avoid prescheduling to copies from virtual registers, which don't behave
2804 // like other nodes from the perspective of scheduling heuristics.
2805 if (SDNode *N = SU->getNode())
2806 if (N->getOpcode() == ISD::CopyFromReg &&
2807 TargetRegisterInfo::isVirtualRegister
2808 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2811 // Perform checks on the successors of PredSU.
2812 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2813 EE = PredSU->Succs.end(); II != EE; ++II) {
2814 SUnit *PredSuccSU = II->getSUnit();
2815 if (PredSuccSU == SU) continue;
2816 // If PredSU has another successor with no data successors, for
2817 // now don't attempt to choose either over the other.
2818 if (PredSuccSU->NumSuccs == 0)
2819 goto outer_loop_continue;
2820 // Don't break physical register dependencies.
2821 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2822 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2823 goto outer_loop_continue;
2824 // Don't introduce graph cycles.
2825 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2826 goto outer_loop_continue;
2829 // Ok, the transformation is safe and the heuristics suggest it is
2830 // profitable. Update the graph.
2831 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2832 << " next to PredSU #" << PredSU->NodeNum
2833 << " to guide scheduling in the presence of multiple uses\n");
2834 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2835 SDep Edge = PredSU->Succs[i];
2836 assert(!Edge.isAssignedRegDep());
2837 SUnit *SuccSU = Edge.getSUnit();
2839 Edge.setSUnit(PredSU);
2840 scheduleDAG->RemovePred(SuccSU, Edge);
2841 scheduleDAG->AddPred(SU, Edge);
2843 scheduleDAG->AddPred(SuccSU, Edge);
2847 outer_loop_continue:;
2851 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2852 /// it as a def&use operand. Add a pseudo control edge from it to the other
2853 /// node (if it won't create a cycle) so the two-address one will be scheduled
2854 /// first (lower in the schedule). If both nodes are two-address, favor the
2855 /// one that has a CopyToReg use (more likely to be a loop induction update).
2856 /// If both are two-address, but one is commutable while the other is not
2857 /// commutable, favor the one that's not commutable.
2858 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2859 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2860 SUnit *SU = &(*SUnits)[i];
2861 if (!SU->isTwoAddress)
2864 SDNode *Node = SU->getNode();
2865 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2868 bool isLiveOut = hasOnlyLiveOutUses(SU);
2869 unsigned Opc = Node->getMachineOpcode();
2870 const MCInstrDesc &MCID = TII->get(Opc);
2871 unsigned NumRes = MCID.getNumDefs();
2872 unsigned NumOps = MCID.getNumOperands() - NumRes;
2873 for (unsigned j = 0; j != NumOps; ++j) {
2874 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
2876 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2877 if (DU->getNodeId() == -1)
2879 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2880 if (!DUSU) continue;
2881 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2882 E = DUSU->Succs.end(); I != E; ++I) {
2883 if (I->isCtrl()) continue;
2884 SUnit *SuccSU = I->getSUnit();
2887 // Be conservative. Ignore if nodes aren't at roughly the same
2888 // depth and height.
2889 if (SuccSU->getHeight() < SU->getHeight() &&
2890 (SU->getHeight() - SuccSU->getHeight()) > 1)
2892 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2893 // constrains whatever is using the copy, instead of the copy
2894 // itself. In the case that the copy is coalesced, this
2895 // preserves the intent of the pseudo two-address heurietics.
2896 while (SuccSU->Succs.size() == 1 &&
2897 SuccSU->getNode()->isMachineOpcode() &&
2898 SuccSU->getNode()->getMachineOpcode() ==
2899 TargetOpcode::COPY_TO_REGCLASS)
2900 SuccSU = SuccSU->Succs.front().getSUnit();
2901 // Don't constrain non-instruction nodes.
2902 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2904 // Don't constrain nodes with physical register defs if the
2905 // predecessor can clobber them.
2906 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2907 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2910 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2911 // these may be coalesced away. We want them close to their uses.
2912 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2913 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2914 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2915 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2917 if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) &&
2918 (!canClobber(SuccSU, DUSU) ||
2919 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2920 (!SU->isCommutable && SuccSU->isCommutable)) &&
2921 !scheduleDAG->IsReachable(SuccSU, SU)) {
2922 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2923 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2924 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
2925 /*Reg=*/0, /*isNormalMemory=*/false,
2926 /*isMustAlias=*/false,
2927 /*isArtificial=*/true));
2934 //===----------------------------------------------------------------------===//
2935 // Public Constructor Functions
2936 //===----------------------------------------------------------------------===//
2938 llvm::ScheduleDAGSDNodes *
2939 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2940 CodeGenOpt::Level OptLevel) {
2941 const TargetMachine &TM = IS->TM;
2942 const TargetInstrInfo *TII = TM.getInstrInfo();
2943 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2945 BURegReductionPriorityQueue *PQ =
2946 new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2947 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2948 PQ->setScheduleDAG(SD);
2952 llvm::ScheduleDAGSDNodes *
2953 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2954 CodeGenOpt::Level OptLevel) {
2955 const TargetMachine &TM = IS->TM;
2956 const TargetInstrInfo *TII = TM.getInstrInfo();
2957 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2959 SrcRegReductionPriorityQueue *PQ =
2960 new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2961 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2962 PQ->setScheduleDAG(SD);
2966 llvm::ScheduleDAGSDNodes *
2967 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
2968 CodeGenOpt::Level OptLevel) {
2969 const TargetMachine &TM = IS->TM;
2970 const TargetInstrInfo *TII = TM.getInstrInfo();
2971 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2972 const TargetLowering *TLI = &IS->getTargetLowering();
2974 HybridBURRPriorityQueue *PQ =
2975 new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2977 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2978 PQ->setScheduleDAG(SD);
2982 llvm::ScheduleDAGSDNodes *
2983 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
2984 CodeGenOpt::Level OptLevel) {
2985 const TargetMachine &TM = IS->TM;
2986 const TargetInstrInfo *TII = TM.getInstrInfo();
2987 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2988 const TargetLowering *TLI = &IS->getTargetLowering();
2990 ILPBURRPriorityQueue *PQ =
2991 new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2992 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2993 PQ->setScheduleDAG(SD);