1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "llvm/CodeGen/SchedulerRegistry.h"
20 #include "ScheduleDAGSDNodes.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/InlineAsm.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Target/TargetInstrInfo.h"
33 #include "llvm/Target/TargetLowering.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetRegisterInfo.h"
39 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
40 STATISTIC(NumUnfolds, "Number of nodes unfolded");
41 STATISTIC(NumDups, "Number of duplicated nodes");
42 STATISTIC(NumPRCopies, "Number of physical register copies");
44 static RegisterScheduler
45 burrListDAGScheduler("list-burr",
46 "Bottom-up register reduction list scheduling",
47 createBURRListDAGScheduler);
48 static RegisterScheduler
49 sourceListDAGScheduler("source",
50 "Similar to list-burr but schedules in source "
51 "order when possible",
52 createSourceListDAGScheduler);
54 static RegisterScheduler
55 hybridListDAGScheduler("list-hybrid",
56 "Bottom-up register pressure aware list scheduling "
57 "which tries to balance latency and register pressure",
58 createHybridListDAGScheduler);
60 static RegisterScheduler
61 ILPListDAGScheduler("list-ilp",
62 "Bottom-up register pressure aware list scheduling "
63 "which tries to balance ILP and register pressure",
64 createILPListDAGScheduler);
66 static cl::opt<bool> DisableSchedCycles(
67 "disable-sched-cycles", cl::Hidden, cl::init(false),
68 cl::desc("Disable cycle-level precision during preRA scheduling"));
70 // Temporary sched=list-ilp flags until the heuristics are robust.
71 // Some options are also available under sched=list-hybrid.
72 static cl::opt<bool> DisableSchedRegPressure(
73 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
74 cl::desc("Disable regpressure priority in sched=list-ilp"));
75 static cl::opt<bool> DisableSchedLiveUses(
76 "disable-sched-live-uses", cl::Hidden, cl::init(true),
77 cl::desc("Disable live use priority in sched=list-ilp"));
78 static cl::opt<bool> DisableSchedVRegCycle(
79 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
80 cl::desc("Disable virtual register cycle interference checks"));
81 static cl::opt<bool> DisableSchedPhysRegJoin(
82 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
83 cl::desc("Disable physreg def-use affinity"));
84 static cl::opt<bool> DisableSchedStalls(
85 "disable-sched-stalls", cl::Hidden, cl::init(true),
86 cl::desc("Disable no-stall priority in sched=list-ilp"));
87 static cl::opt<bool> DisableSchedCriticalPath(
88 "disable-sched-critical-path", cl::Hidden, cl::init(false),
89 cl::desc("Disable critical path priority in sched=list-ilp"));
90 static cl::opt<bool> DisableSchedHeight(
91 "disable-sched-height", cl::Hidden, cl::init(false),
92 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
93 static cl::opt<bool> Disable2AddrHack(
94 "disable-2addr-hack", cl::Hidden, cl::init(true),
95 cl::desc("Disable scheduler's two-address hack"));
97 static cl::opt<int> MaxReorderWindow(
98 "max-sched-reorder", cl::Hidden, cl::init(6),
99 cl::desc("Number of instructions to allow ahead of the critical path "
100 "in sched=list-ilp"));
102 static cl::opt<unsigned> AvgIPC(
103 "sched-avg-ipc", cl::Hidden, cl::init(1),
104 cl::desc("Average inst/cycle whan no target itinerary exists."));
107 //===----------------------------------------------------------------------===//
108 /// ScheduleDAGRRList - The actual register reduction list scheduler
109 /// implementation. This supports both top-down and bottom-up scheduling.
111 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
113 /// NeedLatency - True if the scheduler will make use of latency information.
117 /// AvailableQueue - The priority queue to use for the available SUnits.
118 SchedulingPriorityQueue *AvailableQueue;
120 /// PendingQueue - This contains all of the instructions whose operands have
121 /// been issued, but their results are not ready yet (due to the latency of
122 /// the operation). Once the operands becomes available, the instruction is
123 /// added to the AvailableQueue.
124 std::vector<SUnit*> PendingQueue;
126 /// HazardRec - The hazard recognizer to use.
127 ScheduleHazardRecognizer *HazardRec;
129 /// CurCycle - The current scheduler state corresponds to this cycle.
132 /// MinAvailableCycle - Cycle of the soonest available instruction.
133 unsigned MinAvailableCycle;
135 /// IssueCount - Count instructions issued in this cycle
136 /// Currently valid only for bottom-up scheduling.
139 /// LiveRegDefs - A set of physical registers and their definition
140 /// that are "live". These nodes must be scheduled before any other nodes that
141 /// modifies the registers can be scheduled.
142 unsigned NumLiveRegs;
143 std::vector<SUnit*> LiveRegDefs;
144 std::vector<SUnit*> LiveRegGens;
146 // Collect interferences between physical register use/defs.
147 // Each interference is an SUnit and set of physical registers.
148 SmallVector<SUnit*, 4> Interferences;
149 typedef DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMapT;
152 /// Topo - A topological ordering for SUnits which permits fast IsReachable
153 /// and similar queries.
154 ScheduleDAGTopologicalSort Topo;
156 // Hack to keep track of the inverse of FindCallSeqStart without more crazy
158 DenseMap<SUnit*, SUnit*> CallSeqEndForStart;
161 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
162 SchedulingPriorityQueue *availqueue,
163 CodeGenOpt::Level OptLevel)
164 : ScheduleDAGSDNodes(mf),
165 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
168 const TargetMachine &tm = mf.getTarget();
169 if (DisableSchedCycles || !NeedLatency)
170 HazardRec = new ScheduleHazardRecognizer();
172 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
175 ~ScheduleDAGRRList() {
177 delete AvailableQueue;
182 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
184 /// IsReachable - Checks if SU is reachable from TargetSU.
185 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
186 return Topo.IsReachable(SU, TargetSU);
189 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
191 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
192 return Topo.WillCreateCycle(SU, TargetSU);
195 /// AddPred - adds a predecessor edge to SUnit SU.
196 /// This returns true if this is a new predecessor.
197 /// Updates the topological ordering if required.
198 void AddPred(SUnit *SU, const SDep &D) {
199 Topo.AddPred(SU, D.getSUnit());
203 /// RemovePred - removes a predecessor edge from SUnit SU.
204 /// This returns true if an edge was removed.
205 /// Updates the topological ordering if required.
206 void RemovePred(SUnit *SU, const SDep &D) {
207 Topo.RemovePred(SU, D.getSUnit());
212 bool isReady(SUnit *SU) {
213 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
214 AvailableQueue->isReady(SU);
217 void ReleasePred(SUnit *SU, const SDep *PredEdge);
218 void ReleasePredecessors(SUnit *SU);
219 void ReleasePending();
220 void AdvanceToCycle(unsigned NextCycle);
221 void AdvancePastStalls(SUnit *SU);
222 void EmitNode(SUnit *SU);
223 void ScheduleNodeBottomUp(SUnit*);
224 void CapturePred(SDep *PredEdge);
225 void UnscheduleNodeBottomUp(SUnit*);
226 void RestoreHazardCheckerBottomUp();
227 void BacktrackBottomUp(SUnit*, SUnit*);
228 SUnit *CopyAndMoveSuccessors(SUnit*);
229 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
230 const TargetRegisterClass*,
231 const TargetRegisterClass*,
232 SmallVector<SUnit*, 2>&);
233 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
235 void releaseInterferences(unsigned Reg = 0);
237 SUnit *PickNodeToScheduleBottomUp();
238 void ListScheduleBottomUp();
240 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
241 /// Updates the topological ordering if required.
242 SUnit *CreateNewSUnit(SDNode *N) {
243 unsigned NumSUnits = SUnits.size();
244 SUnit *NewNode = newSUnit(N);
245 // Update the topological ordering.
246 if (NewNode->NodeNum >= NumSUnits)
247 Topo.InitDAGTopologicalSorting();
251 /// CreateClone - Creates a new SUnit from an existing one.
252 /// Updates the topological ordering if required.
253 SUnit *CreateClone(SUnit *N) {
254 unsigned NumSUnits = SUnits.size();
255 SUnit *NewNode = Clone(N);
256 // Update the topological ordering.
257 if (NewNode->NodeNum >= NumSUnits)
258 Topo.InitDAGTopologicalSorting();
262 /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
263 /// need actual latency information but the hybrid scheduler does.
264 bool forceUnitLatencies() const {
268 } // end anonymous namespace
270 /// GetCostForDef - Looks up the register class and cost for a given definition.
271 /// Typically this just means looking up the representative register class,
272 /// but for untyped values (MVT::Untyped) it means inspecting the node's
273 /// opcode to determine what register class is being generated.
274 static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
275 const TargetLowering *TLI,
276 const TargetInstrInfo *TII,
277 const TargetRegisterInfo *TRI,
278 unsigned &RegClass, unsigned &Cost,
279 const MachineFunction &MF) {
280 MVT VT = RegDefPos.GetValue();
282 // Special handling for untyped values. These values can only come from
283 // the expansion of custom DAG-to-DAG patterns.
284 if (VT == MVT::Untyped) {
285 const SDNode *Node = RegDefPos.GetNode();
287 // Special handling for CopyFromReg of untyped values.
288 if (!Node->isMachineOpcode() && Node->getOpcode() == ISD::CopyFromReg) {
289 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
290 const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg);
291 RegClass = RC->getID();
296 unsigned Opcode = Node->getMachineOpcode();
297 if (Opcode == TargetOpcode::REG_SEQUENCE) {
298 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
299 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
300 RegClass = RC->getID();
305 unsigned Idx = RegDefPos.GetIdx();
306 const MCInstrDesc Desc = TII->get(Opcode);
307 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF);
308 RegClass = RC->getID();
309 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
310 // better way to determine it.
313 RegClass = TLI->getRepRegClassFor(VT)->getID();
314 Cost = TLI->getRepRegClassCostFor(VT);
318 /// Schedule - Schedule the DAG using list scheduling.
319 void ScheduleDAGRRList::Schedule() {
321 << "********** List Scheduling BB#" << BB->getNumber()
322 << " '" << BB->getName() << "' **********\n");
326 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
328 // Allocate slots for each physical register, plus one for a special register
329 // to track the virtual resource of a calling sequence.
330 LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL);
331 LiveRegGens.resize(TRI->getNumRegs() + 1, NULL);
332 CallSeqEndForStart.clear();
333 assert(Interferences.empty() && LRegsMap.empty() && "stale Interferences");
335 // Build the scheduling graph.
336 BuildSchedGraph(NULL);
338 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
339 SUnits[su].dumpAll(this));
340 Topo.InitDAGTopologicalSorting();
342 AvailableQueue->initNodes(SUnits);
346 // Execute the actual scheduling loop.
347 ListScheduleBottomUp();
349 AvailableQueue->releaseState();
352 dbgs() << "*** Final schedule ***\n";
358 //===----------------------------------------------------------------------===//
359 // Bottom-Up Scheduling
360 //===----------------------------------------------------------------------===//
362 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
363 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
364 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
365 SUnit *PredSU = PredEdge->getSUnit();
368 if (PredSU->NumSuccsLeft == 0) {
369 dbgs() << "*** Scheduling failed! ***\n";
371 dbgs() << " has been released too many times!\n";
375 --PredSU->NumSuccsLeft;
377 if (!forceUnitLatencies()) {
378 // Updating predecessor's height. This is now the cycle when the
379 // predecessor can be scheduled without causing a pipeline stall.
380 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
383 // If all the node's successors are scheduled, this node is ready
384 // to be scheduled. Ignore the special EntrySU node.
385 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
386 PredSU->isAvailable = true;
388 unsigned Height = PredSU->getHeight();
389 if (Height < MinAvailableCycle)
390 MinAvailableCycle = Height;
392 if (isReady(PredSU)) {
393 AvailableQueue->push(PredSU);
395 // CapturePred and others may have left the node in the pending queue, avoid
397 else if (!PredSU->isPending) {
398 PredSU->isPending = true;
399 PendingQueue.push_back(PredSU);
404 /// IsChainDependent - Test if Outer is reachable from Inner through
405 /// chain dependencies.
406 static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
408 const TargetInstrInfo *TII) {
413 // For a TokenFactor, examine each operand. There may be multiple ways
414 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
415 // most nesting in order to ensure that we find the corresponding match.
416 if (N->getOpcode() == ISD::TokenFactor) {
417 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
418 if (IsChainDependent(N->getOperand(i).getNode(), Inner, NestLevel, TII))
422 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
423 if (N->isMachineOpcode()) {
424 if (N->getMachineOpcode() ==
425 (unsigned)TII->getCallFrameDestroyOpcode()) {
427 } else if (N->getMachineOpcode() ==
428 (unsigned)TII->getCallFrameSetupOpcode()) {
434 // Otherwise, find the chain and continue climbing.
435 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
436 if (N->getOperand(i).getValueType() == MVT::Other) {
437 N = N->getOperand(i).getNode();
438 goto found_chain_operand;
441 found_chain_operand:;
442 if (N->getOpcode() == ISD::EntryToken)
447 /// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
448 /// the corresponding (lowered) CALLSEQ_BEGIN node.
450 /// NestLevel and MaxNested are used in recursion to indcate the current level
451 /// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
452 /// level seen so far.
454 /// TODO: It would be better to give CALLSEQ_END an explicit operand to point
455 /// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
457 FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
458 const TargetInstrInfo *TII) {
460 // For a TokenFactor, examine each operand. There may be multiple ways
461 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
462 // most nesting in order to ensure that we find the corresponding match.
463 if (N->getOpcode() == ISD::TokenFactor) {
465 unsigned BestMaxNest = MaxNest;
466 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
467 unsigned MyNestLevel = NestLevel;
468 unsigned MyMaxNest = MaxNest;
469 if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(),
470 MyNestLevel, MyMaxNest, TII))
471 if (!Best || (MyMaxNest > BestMaxNest)) {
473 BestMaxNest = MyMaxNest;
477 MaxNest = BestMaxNest;
480 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
481 if (N->isMachineOpcode()) {
482 if (N->getMachineOpcode() ==
483 (unsigned)TII->getCallFrameDestroyOpcode()) {
485 MaxNest = std::max(MaxNest, NestLevel);
486 } else if (N->getMachineOpcode() ==
487 (unsigned)TII->getCallFrameSetupOpcode()) {
488 assert(NestLevel != 0);
494 // Otherwise, find the chain and continue climbing.
495 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
496 if (N->getOperand(i).getValueType() == MVT::Other) {
497 N = N->getOperand(i).getNode();
498 goto found_chain_operand;
501 found_chain_operand:;
502 if (N->getOpcode() == ISD::EntryToken)
507 /// Call ReleasePred for each predecessor, then update register live def/gen.
508 /// Always update LiveRegDefs for a register dependence even if the current SU
509 /// also defines the register. This effectively create one large live range
510 /// across a sequence of two-address node. This is important because the
511 /// entire chain must be scheduled together. Example:
514 /// flags = (2) addc flags
515 /// flags = (1) addc flags
519 /// LiveRegDefs[flags] = 3
520 /// LiveRegGens[flags] = 1
522 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
523 /// interference on flags.
524 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
525 // Bottom up: release predecessors
526 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
528 ReleasePred(SU, &*I);
529 if (I->isAssignedRegDep()) {
530 // This is a physical register dependency and it's impossible or
531 // expensive to copy the register. Make sure nothing that can
532 // clobber the register is scheduled between the predecessor and
534 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
535 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
536 "interference on register dependence");
537 LiveRegDefs[I->getReg()] = I->getSUnit();
538 if (!LiveRegGens[I->getReg()]) {
540 LiveRegGens[I->getReg()] = SU;
545 // If we're scheduling a lowered CALLSEQ_END, find the corresponding
546 // CALLSEQ_BEGIN. Inject an artificial physical register dependence between
547 // these nodes, to prevent other calls from being interscheduled with them.
548 unsigned CallResource = TRI->getNumRegs();
549 if (!LiveRegDefs[CallResource])
550 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
551 if (Node->isMachineOpcode() &&
552 Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
553 unsigned NestLevel = 0;
554 unsigned MaxNest = 0;
555 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
557 SUnit *Def = &SUnits[N->getNodeId()];
558 CallSeqEndForStart[Def] = SU;
561 LiveRegDefs[CallResource] = Def;
562 LiveRegGens[CallResource] = SU;
567 /// Check to see if any of the pending instructions are ready to issue. If
568 /// so, add them to the available queue.
569 void ScheduleDAGRRList::ReleasePending() {
570 if (DisableSchedCycles) {
571 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
575 // If the available queue is empty, it is safe to reset MinAvailableCycle.
576 if (AvailableQueue->empty())
577 MinAvailableCycle = UINT_MAX;
579 // Check to see if any of the pending instructions are ready to issue. If
580 // so, add them to the available queue.
581 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
582 unsigned ReadyCycle = PendingQueue[i]->getHeight();
583 if (ReadyCycle < MinAvailableCycle)
584 MinAvailableCycle = ReadyCycle;
586 if (PendingQueue[i]->isAvailable) {
587 if (!isReady(PendingQueue[i]))
589 AvailableQueue->push(PendingQueue[i]);
591 PendingQueue[i]->isPending = false;
592 PendingQueue[i] = PendingQueue.back();
593 PendingQueue.pop_back();
598 /// Move the scheduler state forward by the specified number of Cycles.
599 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
600 if (NextCycle <= CurCycle)
604 AvailableQueue->setCurCycle(NextCycle);
605 if (!HazardRec->isEnabled()) {
606 // Bypass lots of virtual calls in case of long latency.
607 CurCycle = NextCycle;
610 for (; CurCycle != NextCycle; ++CurCycle) {
611 HazardRec->RecedeCycle();
614 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
615 // available Q to release pending nodes at least once before popping.
619 /// Move the scheduler state forward until the specified node's dependents are
620 /// ready and can be scheduled with no resource conflicts.
621 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
622 if (DisableSchedCycles)
625 // FIXME: Nodes such as CopyFromReg probably should not advance the current
626 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
627 // has predecessors the cycle will be advanced when they are scheduled.
628 // But given the crude nature of modeling latency though such nodes, we
629 // currently need to treat these nodes like real instructions.
630 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
632 unsigned ReadyCycle = SU->getHeight();
634 // Bump CurCycle to account for latency. We assume the latency of other
635 // available instructions may be hidden by the stall (not a full pipe stall).
636 // This updates the hazard recognizer's cycle before reserving resources for
638 AdvanceToCycle(ReadyCycle);
640 // Calls are scheduled in their preceding cycle, so don't conflict with
641 // hazards from instructions after the call. EmitNode will reset the
642 // scoreboard state before emitting the call.
646 // FIXME: For resource conflicts in very long non-pipelined stages, we
647 // should probably skip ahead here to avoid useless scoreboard checks.
650 ScheduleHazardRecognizer::HazardType HT =
651 HazardRec->getHazardType(SU, -Stalls);
653 if (HT == ScheduleHazardRecognizer::NoHazard)
658 AdvanceToCycle(CurCycle + Stalls);
661 /// Record this SUnit in the HazardRecognizer.
662 /// Does not update CurCycle.
663 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
664 if (!HazardRec->isEnabled())
667 // Check for phys reg copy.
671 switch (SU->getNode()->getOpcode()) {
673 assert(SU->getNode()->isMachineOpcode() &&
674 "This target-independent node should not be scheduled.");
676 case ISD::MERGE_VALUES:
677 case ISD::TokenFactor:
678 case ISD::LIFETIME_START:
679 case ISD::LIFETIME_END:
681 case ISD::CopyFromReg:
683 // Noops don't affect the scoreboard state. Copies are likely to be
687 // For inline asm, clear the pipeline state.
692 // Calls are scheduled with their preceding instructions. For bottom-up
693 // scheduling, clear the pipeline state before emitting.
697 HazardRec->EmitInstruction(SU);
700 static void resetVRegCycle(SUnit *SU);
702 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
703 /// count of its predecessors. If a predecessor pending count is zero, add it to
704 /// the Available queue.
705 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
706 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
707 DEBUG(SU->dump(this));
710 if (CurCycle < SU->getHeight())
711 DEBUG(dbgs() << " Height [" << SU->getHeight()
712 << "] pipeline stall!\n");
715 // FIXME: Do not modify node height. It may interfere with
716 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
717 // node its ready cycle can aid heuristics, and after scheduling it can
718 // indicate the scheduled cycle.
719 SU->setHeightToAtLeast(CurCycle);
721 // Reserve resources for the scheduled intruction.
724 Sequence.push_back(SU);
726 AvailableQueue->scheduledNode(SU);
728 // If HazardRec is disabled, and each inst counts as one cycle, then
729 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
730 // PendingQueue for schedulers that implement HasReadyFilter.
731 if (!HazardRec->isEnabled() && AvgIPC < 2)
732 AdvanceToCycle(CurCycle + 1);
734 // Update liveness of predecessors before successors to avoid treating a
735 // two-address node as a live range def.
736 ReleasePredecessors(SU);
738 // Release all the implicit physical register defs that are live.
739 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
741 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
742 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
743 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
745 LiveRegDefs[I->getReg()] = NULL;
746 LiveRegGens[I->getReg()] = NULL;
747 releaseInterferences(I->getReg());
750 // Release the special call resource dependence, if this is the beginning
752 unsigned CallResource = TRI->getNumRegs();
753 if (LiveRegDefs[CallResource] == SU)
754 for (const SDNode *SUNode = SU->getNode(); SUNode;
755 SUNode = SUNode->getGluedNode()) {
756 if (SUNode->isMachineOpcode() &&
757 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
758 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
760 LiveRegDefs[CallResource] = NULL;
761 LiveRegGens[CallResource] = NULL;
762 releaseInterferences(CallResource);
768 SU->isScheduled = true;
770 // Conditions under which the scheduler should eagerly advance the cycle:
771 // (1) No available instructions
772 // (2) All pipelines full, so available instructions must have hazards.
774 // If HazardRec is disabled, the cycle was pre-advanced before calling
775 // ReleasePredecessors. In that case, IssueCount should remain 0.
777 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
778 if (HazardRec->isEnabled() || AvgIPC > 1) {
779 if (SU->getNode() && SU->getNode()->isMachineOpcode())
781 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
782 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
783 AdvanceToCycle(CurCycle + 1);
787 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
788 /// unscheduled, incrcease the succ left count of its predecessors. Remove
789 /// them from AvailableQueue if necessary.
790 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
791 SUnit *PredSU = PredEdge->getSUnit();
792 if (PredSU->isAvailable) {
793 PredSU->isAvailable = false;
794 if (!PredSU->isPending)
795 AvailableQueue->remove(PredSU);
798 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
799 ++PredSU->NumSuccsLeft;
802 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
803 /// its predecessor states to reflect the change.
804 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
805 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
806 DEBUG(SU->dump(this));
808 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
811 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
812 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
813 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
814 "Physical register dependency violated?");
816 LiveRegDefs[I->getReg()] = NULL;
817 LiveRegGens[I->getReg()] = NULL;
818 releaseInterferences(I->getReg());
822 // Reclaim the special call resource dependence, if this is the beginning
824 unsigned CallResource = TRI->getNumRegs();
825 for (const SDNode *SUNode = SU->getNode(); SUNode;
826 SUNode = SUNode->getGluedNode()) {
827 if (SUNode->isMachineOpcode() &&
828 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
830 LiveRegDefs[CallResource] = SU;
831 LiveRegGens[CallResource] = CallSeqEndForStart[SU];
835 // Release the special call resource dependence, if this is the end
837 if (LiveRegGens[CallResource] == SU)
838 for (const SDNode *SUNode = SU->getNode(); SUNode;
839 SUNode = SUNode->getGluedNode()) {
840 if (SUNode->isMachineOpcode() &&
841 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
842 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
844 LiveRegDefs[CallResource] = NULL;
845 LiveRegGens[CallResource] = NULL;
846 releaseInterferences(CallResource);
850 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
852 if (I->isAssignedRegDep()) {
853 if (!LiveRegDefs[I->getReg()])
855 // This becomes the nearest def. Note that an earlier def may still be
856 // pending if this is a two-address node.
857 LiveRegDefs[I->getReg()] = SU;
858 if (LiveRegGens[I->getReg()] == NULL ||
859 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
860 LiveRegGens[I->getReg()] = I->getSUnit();
863 if (SU->getHeight() < MinAvailableCycle)
864 MinAvailableCycle = SU->getHeight();
866 SU->setHeightDirty();
867 SU->isScheduled = false;
868 SU->isAvailable = true;
869 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
870 // Don't make available until backtracking is complete.
871 SU->isPending = true;
872 PendingQueue.push_back(SU);
875 AvailableQueue->push(SU);
877 AvailableQueue->unscheduledNode(SU);
880 /// After backtracking, the hazard checker needs to be restored to a state
881 /// corresponding the current cycle.
882 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
885 unsigned LookAhead = std::min((unsigned)Sequence.size(),
886 HazardRec->getMaxLookAhead());
890 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
891 unsigned HazardCycle = (*I)->getHeight();
892 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
894 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
895 HazardRec->RecedeCycle();
901 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
902 /// BTCycle in order to schedule a specific node.
903 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
904 SUnit *OldSU = Sequence.back();
907 // FIXME: use ready cycle instead of height
908 CurCycle = OldSU->getHeight();
909 UnscheduleNodeBottomUp(OldSU);
910 AvailableQueue->setCurCycle(CurCycle);
913 OldSU = Sequence.back();
916 assert(!SU->isSucc(OldSU) && "Something is wrong!");
918 RestoreHazardCheckerBottomUp();
925 static bool isOperandOf(const SUnit *SU, SDNode *N) {
926 for (const SDNode *SUNode = SU->getNode(); SUNode;
927 SUNode = SUNode->getGluedNode()) {
928 if (SUNode->isOperandOf(N))
934 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
935 /// successors to the newly created node.
936 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
937 SDNode *N = SU->getNode();
941 if (SU->getNode()->getGluedNode())
945 bool TryUnfold = false;
946 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
947 EVT VT = N->getValueType(i);
950 else if (VT == MVT::Other)
953 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
954 const SDValue &Op = N->getOperand(i);
955 EVT VT = Op.getNode()->getValueType(Op.getResNo());
961 SmallVector<SDNode*, 2> NewNodes;
962 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
965 // unfolding an x86 DEC64m operation results in store, dec, load which
966 // can't be handled here so quit
967 if (NewNodes.size() == 3)
970 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
971 assert(NewNodes.size() == 2 && "Expected a load folding node!");
974 SDNode *LoadNode = NewNodes[0];
975 unsigned NumVals = N->getNumValues();
976 unsigned OldNumVals = SU->getNode()->getNumValues();
977 for (unsigned i = 0; i != NumVals; ++i)
978 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
979 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
980 SDValue(LoadNode, 1));
982 // LoadNode may already exist. This can happen when there is another
983 // load from the same location and producing the same type of value
984 // but it has different alignment or volatileness.
985 bool isNewLoad = true;
987 if (LoadNode->getNodeId() != -1) {
988 LoadSU = &SUnits[LoadNode->getNodeId()];
991 LoadSU = CreateNewSUnit(LoadNode);
992 LoadNode->setNodeId(LoadSU->NodeNum);
994 InitNumRegDefsLeft(LoadSU);
995 computeLatency(LoadSU);
998 SUnit *NewSU = CreateNewSUnit(N);
999 assert(N->getNodeId() == -1 && "Node already inserted!");
1000 N->setNodeId(NewSU->NodeNum);
1002 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1003 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
1004 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
1005 NewSU->isTwoAddress = true;
1009 if (MCID.isCommutable())
1010 NewSU->isCommutable = true;
1012 InitNumRegDefsLeft(NewSU);
1013 computeLatency(NewSU);
1015 // Record all the edges to and from the old SU, by category.
1016 SmallVector<SDep, 4> ChainPreds;
1017 SmallVector<SDep, 4> ChainSuccs;
1018 SmallVector<SDep, 4> LoadPreds;
1019 SmallVector<SDep, 4> NodePreds;
1020 SmallVector<SDep, 4> NodeSuccs;
1021 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1024 ChainPreds.push_back(*I);
1025 else if (isOperandOf(I->getSUnit(), LoadNode))
1026 LoadPreds.push_back(*I);
1028 NodePreds.push_back(*I);
1030 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1033 ChainSuccs.push_back(*I);
1035 NodeSuccs.push_back(*I);
1038 // Now assign edges to the newly-created nodes.
1039 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
1040 const SDep &Pred = ChainPreds[i];
1041 RemovePred(SU, Pred);
1043 AddPred(LoadSU, Pred);
1045 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
1046 const SDep &Pred = LoadPreds[i];
1047 RemovePred(SU, Pred);
1049 AddPred(LoadSU, Pred);
1051 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
1052 const SDep &Pred = NodePreds[i];
1053 RemovePred(SU, Pred);
1054 AddPred(NewSU, Pred);
1056 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
1057 SDep D = NodeSuccs[i];
1058 SUnit *SuccDep = D.getSUnit();
1060 RemovePred(SuccDep, D);
1062 AddPred(SuccDep, D);
1063 // Balance register pressure.
1064 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
1065 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
1066 --NewSU->NumRegDefsLeft;
1068 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
1069 SDep D = ChainSuccs[i];
1070 SUnit *SuccDep = D.getSUnit();
1072 RemovePred(SuccDep, D);
1075 AddPred(SuccDep, D);
1079 // Add a data dependency to reflect that NewSU reads the value defined
1081 SDep D(LoadSU, SDep::Data, 0);
1082 D.setLatency(LoadSU->Latency);
1086 AvailableQueue->addNode(LoadSU);
1087 AvailableQueue->addNode(NewSU);
1091 if (NewSU->NumSuccsLeft == 0) {
1092 NewSU->isAvailable = true;
1098 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
1099 NewSU = CreateClone(SU);
1101 // New SUnit has the exact same predecessors.
1102 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1104 if (!I->isArtificial())
1107 // Only copy scheduled successors. Cut them from old node's successor
1108 // list and move them over.
1109 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1110 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1112 if (I->isArtificial())
1114 SUnit *SuccSU = I->getSUnit();
1115 if (SuccSU->isScheduled) {
1120 DelDeps.push_back(std::make_pair(SuccSU, D));
1123 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1124 RemovePred(DelDeps[i].first, DelDeps[i].second);
1126 AvailableQueue->updateNode(SU);
1127 AvailableQueue->addNode(NewSU);
1133 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
1134 /// scheduled successors of the given SUnit to the last copy.
1135 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
1136 const TargetRegisterClass *DestRC,
1137 const TargetRegisterClass *SrcRC,
1138 SmallVector<SUnit*, 2> &Copies) {
1139 SUnit *CopyFromSU = CreateNewSUnit(NULL);
1140 CopyFromSU->CopySrcRC = SrcRC;
1141 CopyFromSU->CopyDstRC = DestRC;
1143 SUnit *CopyToSU = CreateNewSUnit(NULL);
1144 CopyToSU->CopySrcRC = DestRC;
1145 CopyToSU->CopyDstRC = SrcRC;
1147 // Only copy scheduled successors. Cut them from old node's successor
1148 // list and move them over.
1149 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1150 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1152 if (I->isArtificial())
1154 SUnit *SuccSU = I->getSUnit();
1155 if (SuccSU->isScheduled) {
1157 D.setSUnit(CopyToSU);
1159 DelDeps.push_back(std::make_pair(SuccSU, *I));
1162 // Avoid scheduling the def-side copy before other successors. Otherwise
1163 // we could introduce another physreg interference on the copy and
1164 // continue inserting copies indefinitely.
1165 AddPred(SuccSU, SDep(CopyFromSU, SDep::Artificial));
1168 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1169 RemovePred(DelDeps[i].first, DelDeps[i].second);
1171 SDep FromDep(SU, SDep::Data, Reg);
1172 FromDep.setLatency(SU->Latency);
1173 AddPred(CopyFromSU, FromDep);
1174 SDep ToDep(CopyFromSU, SDep::Data, 0);
1175 ToDep.setLatency(CopyFromSU->Latency);
1176 AddPred(CopyToSU, ToDep);
1178 AvailableQueue->updateNode(SU);
1179 AvailableQueue->addNode(CopyFromSU);
1180 AvailableQueue->addNode(CopyToSU);
1181 Copies.push_back(CopyFromSU);
1182 Copies.push_back(CopyToSU);
1187 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
1188 /// definition of the specified node.
1189 /// FIXME: Move to SelectionDAG?
1190 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1191 const TargetInstrInfo *TII) {
1192 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1193 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1194 unsigned NumRes = MCID.getNumDefs();
1195 for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1200 return N->getValueType(NumRes);
1203 /// CheckForLiveRegDef - Return true and update live register vector if the
1204 /// specified register def of the specified SUnit clobbers any "live" registers.
1205 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1206 std::vector<SUnit*> &LiveRegDefs,
1207 SmallSet<unsigned, 4> &RegAdded,
1208 SmallVector<unsigned, 4> &LRegs,
1209 const TargetRegisterInfo *TRI) {
1210 for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
1212 // Check if Ref is live.
1213 if (!LiveRegDefs[*AliasI]) continue;
1215 // Allow multiple uses of the same def.
1216 if (LiveRegDefs[*AliasI] == SU) continue;
1218 // Add Reg to the set of interfering live regs.
1219 if (RegAdded.insert(*AliasI)) {
1220 LRegs.push_back(*AliasI);
1225 /// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered
1226 /// by RegMask, and add them to LRegs.
1227 static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
1228 std::vector<SUnit*> &LiveRegDefs,
1229 SmallSet<unsigned, 4> &RegAdded,
1230 SmallVector<unsigned, 4> &LRegs) {
1231 // Look at all live registers. Skip Reg0 and the special CallResource.
1232 for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
1233 if (!LiveRegDefs[i]) continue;
1234 if (LiveRegDefs[i] == SU) continue;
1235 if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue;
1236 if (RegAdded.insert(i))
1241 /// getNodeRegMask - Returns the register mask attached to an SDNode, if any.
1242 static const uint32_t *getNodeRegMask(const SDNode *N) {
1243 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1244 if (const RegisterMaskSDNode *Op =
1245 dyn_cast<RegisterMaskSDNode>(N->getOperand(i).getNode()))
1246 return Op->getRegMask();
1250 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1251 /// scheduling of the given node to satisfy live physical register dependencies.
1252 /// If the specific node is the last one that's available to schedule, do
1253 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1254 bool ScheduleDAGRRList::
1255 DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
1256 if (NumLiveRegs == 0)
1259 SmallSet<unsigned, 4> RegAdded;
1260 // If this node would clobber any "live" register, then it's not ready.
1262 // If SU is the currently live definition of the same register that it uses,
1263 // then we are free to schedule it.
1264 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1266 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1267 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1268 RegAdded, LRegs, TRI);
1271 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1272 if (Node->getOpcode() == ISD::INLINEASM) {
1273 // Inline asm can clobber physical defs.
1274 unsigned NumOps = Node->getNumOperands();
1275 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1276 --NumOps; // Ignore the glue operand.
1278 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1280 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1281 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1283 ++i; // Skip the ID value.
1284 if (InlineAsm::isRegDefKind(Flags) ||
1285 InlineAsm::isRegDefEarlyClobberKind(Flags) ||
1286 InlineAsm::isClobberKind(Flags)) {
1287 // Check for def of register or earlyclobber register.
1288 for (; NumVals; --NumVals, ++i) {
1289 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1290 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1291 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1299 if (!Node->isMachineOpcode())
1301 // If we're in the middle of scheduling a call, don't begin scheduling
1302 // another call. Also, don't allow any physical registers to be live across
1304 if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
1305 // Check the special calling-sequence resource.
1306 unsigned CallResource = TRI->getNumRegs();
1307 if (LiveRegDefs[CallResource]) {
1308 SDNode *Gen = LiveRegGens[CallResource]->getNode();
1309 while (SDNode *Glued = Gen->getGluedNode())
1311 if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource))
1312 LRegs.push_back(CallResource);
1315 if (const uint32_t *RegMask = getNodeRegMask(Node))
1316 CheckForLiveRegDefMasked(SU, RegMask, LiveRegDefs, RegAdded, LRegs);
1318 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
1319 if (!MCID.ImplicitDefs)
1321 for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg)
1322 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1325 return !LRegs.empty();
1328 void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
1329 // Add the nodes that aren't ready back onto the available list.
1330 for (unsigned i = Interferences.size(); i > 0; --i) {
1331 SUnit *SU = Interferences[i-1];
1332 LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
1334 SmallVector<unsigned, 4> &LRegs = LRegsPos->second;
1335 if (std::find(LRegs.begin(), LRegs.end(), Reg) == LRegs.end())
1338 SU->isPending = false;
1339 // The interfering node may no longer be available due to backtracking.
1340 // Furthermore, it may have been made available again, in which case it is
1341 // now already in the AvailableQueue.
1342 if (SU->isAvailable && !SU->NodeQueueId) {
1343 DEBUG(dbgs() << " Repushing SU #" << SU->NodeNum << '\n');
1344 AvailableQueue->push(SU);
1346 if (i < Interferences.size())
1347 Interferences[i-1] = Interferences.back();
1348 Interferences.pop_back();
1349 LRegsMap.erase(LRegsPos);
1353 /// Return a node that can be scheduled in this cycle. Requirements:
1354 /// (1) Ready: latency has been satisfied
1355 /// (2) No Hazards: resources are available
1356 /// (3) No Interferences: may unschedule to break register interferences.
1357 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1358 SUnit *CurSU = AvailableQueue->empty() ? 0 : AvailableQueue->pop();
1360 SmallVector<unsigned, 4> LRegs;
1361 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1363 DEBUG(dbgs() << " Interfering reg " << TRI->getName(LRegs[0])
1364 << " SU #" << CurSU->NodeNum << '\n');
1365 std::pair<LRegsMapT::iterator, bool> LRegsPair =
1366 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1367 if (LRegsPair.second) {
1368 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1369 Interferences.push_back(CurSU);
1372 assert(CurSU->isPending && "Intereferences are pending");
1373 // Update the interference with current live regs.
1374 LRegsPair.first->second = LRegs;
1376 CurSU = AvailableQueue->pop();
1381 // All candidates are delayed due to live physical reg dependencies.
1382 // Try backtracking, code duplication, or inserting cross class copies
1384 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1385 SUnit *TrySU = Interferences[i];
1386 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1388 // Try unscheduling up to the point where it's safe to schedule
1391 unsigned LiveCycle = UINT_MAX;
1392 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1393 unsigned Reg = LRegs[j];
1394 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1395 BtSU = LiveRegGens[Reg];
1396 LiveCycle = BtSU->getHeight();
1399 if (!WillCreateCycle(TrySU, BtSU)) {
1400 // BacktrackBottomUp mutates Interferences!
1401 BacktrackBottomUp(TrySU, BtSU);
1403 // Force the current node to be scheduled before the node that
1404 // requires the physical reg dep.
1405 if (BtSU->isAvailable) {
1406 BtSU->isAvailable = false;
1407 if (!BtSU->isPending)
1408 AvailableQueue->remove(BtSU);
1410 DEBUG(dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum << ") to SU("
1411 << TrySU->NodeNum << ")\n");
1412 AddPred(TrySU, SDep(BtSU, SDep::Artificial));
1414 // If one or more successors has been unscheduled, then the current
1415 // node is no longer available.
1416 if (!TrySU->isAvailable)
1417 CurSU = AvailableQueue->pop();
1419 AvailableQueue->remove(TrySU);
1422 // Interferences has been mutated. We must break.
1428 // Can't backtrack. If it's too expensive to copy the value, then try
1429 // duplicate the nodes that produces these "too expensive to copy"
1430 // values to break the dependency. In case even that doesn't work,
1431 // insert cross class copies.
1432 // If it's not too expensive, i.e. cost != -1, issue copies.
1433 SUnit *TrySU = Interferences[0];
1434 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1435 assert(LRegs.size() == 1 && "Can't handle this yet!");
1436 unsigned Reg = LRegs[0];
1437 SUnit *LRDef = LiveRegDefs[Reg];
1438 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1439 const TargetRegisterClass *RC =
1440 TRI->getMinimalPhysRegClass(Reg, VT);
1441 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1443 // If cross copy register class is the same as RC, then it must be possible
1444 // copy the value directly. Do not try duplicate the def.
1445 // If cross copy register class is not the same as RC, then it's possible to
1446 // copy the value but it require cross register class copies and it is
1448 // If cross copy register class is null, then it's not possible to copy
1449 // the value at all.
1452 NewDef = CopyAndMoveSuccessors(LRDef);
1453 if (!DestRC && !NewDef)
1454 report_fatal_error("Can't handle live physical register dependency!");
1457 // Issue copies, these can be expensive cross register class copies.
1458 SmallVector<SUnit*, 2> Copies;
1459 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1460 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1461 << " to SU #" << Copies.front()->NodeNum << "\n");
1462 AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
1463 NewDef = Copies.back();
1466 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1467 << " to SU #" << TrySU->NodeNum << "\n");
1468 LiveRegDefs[Reg] = NewDef;
1469 AddPred(NewDef, SDep(TrySU, SDep::Artificial));
1470 TrySU->isAvailable = false;
1473 assert(CurSU && "Unable to resolve live physical register dependencies!");
1477 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1479 void ScheduleDAGRRList::ListScheduleBottomUp() {
1480 // Release any predecessors of the special Exit node.
1481 ReleasePredecessors(&ExitSU);
1483 // Add root to Available queue.
1484 if (!SUnits.empty()) {
1485 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1486 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1487 RootSU->isAvailable = true;
1488 AvailableQueue->push(RootSU);
1491 // While Available queue is not empty, grab the node with the highest
1492 // priority. If it is not ready put it back. Schedule the node.
1493 Sequence.reserve(SUnits.size());
1494 while (!AvailableQueue->empty() || !Interferences.empty()) {
1495 DEBUG(dbgs() << "\nExamining Available:\n";
1496 AvailableQueue->dump(this));
1498 // Pick the best node to schedule taking all constraints into
1500 SUnit *SU = PickNodeToScheduleBottomUp();
1502 AdvancePastStalls(SU);
1504 ScheduleNodeBottomUp(SU);
1506 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1507 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1508 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1509 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1513 // Reverse the order if it is bottom up.
1514 std::reverse(Sequence.begin(), Sequence.end());
1517 VerifyScheduledSequence(/*isBottomUp=*/true);
1521 //===----------------------------------------------------------------------===//
1522 // RegReductionPriorityQueue Definition
1523 //===----------------------------------------------------------------------===//
1525 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1526 // to reduce register pressure.
1529 class RegReductionPQBase;
1531 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1532 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1537 struct reverse_sort : public queue_sort {
1539 reverse_sort(SF &sf) : SortFunc(sf) {}
1540 reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {}
1542 bool operator()(SUnit* left, SUnit* right) const {
1543 // reverse left/right rather than simply !SortFunc(left, right)
1544 // to expose different paths in the comparison logic.
1545 return SortFunc(right, left);
1550 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1551 // reduction scheduler.
1552 struct bu_ls_rr_sort : public queue_sort {
1555 HasReadyFilter = false
1558 RegReductionPQBase *SPQ;
1559 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1560 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1562 bool operator()(SUnit* left, SUnit* right) const;
1565 // src_ls_rr_sort - Priority function for source order scheduler.
1566 struct src_ls_rr_sort : public queue_sort {
1569 HasReadyFilter = false
1572 RegReductionPQBase *SPQ;
1573 src_ls_rr_sort(RegReductionPQBase *spq)
1575 src_ls_rr_sort(const src_ls_rr_sort &RHS)
1578 bool operator()(SUnit* left, SUnit* right) const;
1581 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1582 struct hybrid_ls_rr_sort : public queue_sort {
1585 HasReadyFilter = false
1588 RegReductionPQBase *SPQ;
1589 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1591 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
1594 bool isReady(SUnit *SU, unsigned CurCycle) const;
1596 bool operator()(SUnit* left, SUnit* right) const;
1599 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1601 struct ilp_ls_rr_sort : public queue_sort {
1604 HasReadyFilter = false
1607 RegReductionPQBase *SPQ;
1608 ilp_ls_rr_sort(RegReductionPQBase *spq)
1610 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
1613 bool isReady(SUnit *SU, unsigned CurCycle) const;
1615 bool operator()(SUnit* left, SUnit* right) const;
1618 class RegReductionPQBase : public SchedulingPriorityQueue {
1620 std::vector<SUnit*> Queue;
1621 unsigned CurQueueId;
1622 bool TracksRegPressure;
1625 // SUnits - The SUnits for the current graph.
1626 std::vector<SUnit> *SUnits;
1628 MachineFunction &MF;
1629 const TargetInstrInfo *TII;
1630 const TargetRegisterInfo *TRI;
1631 const TargetLowering *TLI;
1632 ScheduleDAGRRList *scheduleDAG;
1634 // SethiUllmanNumbers - The SethiUllman number for each node.
1635 std::vector<unsigned> SethiUllmanNumbers;
1637 /// RegPressure - Tracking current reg pressure per register class.
1639 std::vector<unsigned> RegPressure;
1641 /// RegLimit - Tracking the number of allocatable registers per register
1643 std::vector<unsigned> RegLimit;
1646 RegReductionPQBase(MachineFunction &mf,
1647 bool hasReadyFilter,
1650 const TargetInstrInfo *tii,
1651 const TargetRegisterInfo *tri,
1652 const TargetLowering *tli)
1653 : SchedulingPriorityQueue(hasReadyFilter),
1654 CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
1655 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1656 if (TracksRegPressure) {
1657 unsigned NumRC = TRI->getNumRegClasses();
1658 RegLimit.resize(NumRC);
1659 RegPressure.resize(NumRC);
1660 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1661 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1662 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1663 E = TRI->regclass_end(); I != E; ++I)
1664 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1668 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1669 scheduleDAG = scheduleDag;
1672 ScheduleHazardRecognizer* getHazardRec() {
1673 return scheduleDAG->getHazardRec();
1676 void initNodes(std::vector<SUnit> &sunits);
1678 void addNode(const SUnit *SU);
1680 void updateNode(const SUnit *SU);
1682 void releaseState() {
1684 SethiUllmanNumbers.clear();
1685 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1688 unsigned getNodePriority(const SUnit *SU) const;
1690 unsigned getNodeOrdering(const SUnit *SU) const {
1691 if (!SU->getNode()) return 0;
1693 return scheduleDAG->DAG->GetOrdering(SU->getNode());
1696 bool empty() const { return Queue.empty(); }
1698 void push(SUnit *U) {
1699 assert(!U->NodeQueueId && "Node in the queue already");
1700 U->NodeQueueId = ++CurQueueId;
1704 void remove(SUnit *SU) {
1705 assert(!Queue.empty() && "Queue is empty!");
1706 assert(SU->NodeQueueId != 0 && "Not in queue!");
1707 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1709 if (I != prior(Queue.end()))
1710 std::swap(*I, Queue.back());
1712 SU->NodeQueueId = 0;
1715 bool tracksRegPressure() const { return TracksRegPressure; }
1717 void dumpRegPressure() const;
1719 bool HighRegPressure(const SUnit *SU) const;
1721 bool MayReduceRegPressure(SUnit *SU) const;
1723 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1725 void scheduledNode(SUnit *SU);
1727 void unscheduledNode(SUnit *SU);
1730 bool canClobber(const SUnit *SU, const SUnit *Op);
1731 void AddPseudoTwoAddrDeps();
1732 void PrescheduleNodesWithMultipleUses();
1733 void CalculateSethiUllmanNumbers();
1737 static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1738 std::vector<SUnit *>::iterator Best = Q.begin();
1739 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
1740 E = Q.end(); I != E; ++I)
1741 if (Picker(*Best, *I))
1744 if (Best != prior(Q.end()))
1745 std::swap(*Best, Q.back());
1751 SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1753 if (DAG->StressSched) {
1754 reverse_sort<SF> RPicker(Picker);
1755 return popFromQueueImpl(Q, RPicker);
1759 return popFromQueueImpl(Q, Picker);
1763 class RegReductionPriorityQueue : public RegReductionPQBase {
1767 RegReductionPriorityQueue(MachineFunction &mf,
1770 const TargetInstrInfo *tii,
1771 const TargetRegisterInfo *tri,
1772 const TargetLowering *tli)
1773 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
1777 bool isBottomUp() const { return SF::IsBottomUp; }
1779 bool isReady(SUnit *U) const {
1780 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1784 if (Queue.empty()) return NULL;
1786 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1791 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1792 void dump(ScheduleDAG *DAG) const {
1793 // Emulate pop() without clobbering NodeQueueIds.
1794 std::vector<SUnit*> DumpQueue = Queue;
1795 SF DumpPicker = Picker;
1796 while (!DumpQueue.empty()) {
1797 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1798 dbgs() << "Height " << SU->getHeight() << ": ";
1805 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1806 BURegReductionPriorityQueue;
1808 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1809 SrcRegReductionPriorityQueue;
1811 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1812 HybridBURRPriorityQueue;
1814 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1815 ILPBURRPriorityQueue;
1816 } // end anonymous namespace
1818 //===----------------------------------------------------------------------===//
1819 // Static Node Priority for Register Pressure Reduction
1820 //===----------------------------------------------------------------------===//
1822 // Check for special nodes that bypass scheduling heuristics.
1823 // Currently this pushes TokenFactor nodes down, but may be used for other
1824 // pseudo-ops as well.
1826 // Return -1 to schedule right above left, 1 for left above right.
1827 // Return 0 if no bias exists.
1828 static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1829 bool LSchedLow = left->isScheduleLow;
1830 bool RSchedLow = right->isScheduleLow;
1831 if (LSchedLow != RSchedLow)
1832 return LSchedLow < RSchedLow ? 1 : -1;
1836 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1837 /// Smaller number is the higher priority.
1839 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1840 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1841 if (SethiUllmanNumber != 0)
1842 return SethiUllmanNumber;
1845 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1847 if (I->isCtrl()) continue; // ignore chain preds
1848 SUnit *PredSU = I->getSUnit();
1849 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1850 if (PredSethiUllman > SethiUllmanNumber) {
1851 SethiUllmanNumber = PredSethiUllman;
1853 } else if (PredSethiUllman == SethiUllmanNumber)
1857 SethiUllmanNumber += Extra;
1859 if (SethiUllmanNumber == 0)
1860 SethiUllmanNumber = 1;
1862 return SethiUllmanNumber;
1865 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1866 /// scheduling units.
1867 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1868 SethiUllmanNumbers.assign(SUnits->size(), 0);
1870 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1871 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1874 void RegReductionPQBase::addNode(const SUnit *SU) {
1875 unsigned SUSize = SethiUllmanNumbers.size();
1876 if (SUnits->size() > SUSize)
1877 SethiUllmanNumbers.resize(SUSize*2, 0);
1878 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1881 void RegReductionPQBase::updateNode(const SUnit *SU) {
1882 SethiUllmanNumbers[SU->NodeNum] = 0;
1883 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1886 // Lower priority means schedule further down. For bottom-up scheduling, lower
1887 // priority SUs are scheduled before higher priority SUs.
1888 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1889 assert(SU->NodeNum < SethiUllmanNumbers.size());
1890 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1891 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1892 // CopyToReg should be close to its uses to facilitate coalescing and
1895 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1896 Opc == TargetOpcode::SUBREG_TO_REG ||
1897 Opc == TargetOpcode::INSERT_SUBREG)
1898 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1899 // close to their uses to facilitate coalescing.
1901 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1902 // If SU does not have a register use, i.e. it doesn't produce a value
1903 // that would be consumed (e.g. store), then it terminates a chain of
1904 // computation. Give it a large SethiUllman number so it will be
1905 // scheduled right before its predecessors that it doesn't lengthen
1906 // their live ranges.
1908 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1909 // If SU does not have a register def, schedule it close to its uses
1910 // because it does not lengthen any live ranges.
1913 return SethiUllmanNumbers[SU->NodeNum];
1915 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1917 // FIXME: This assumes all of the defs are used as call operands.
1918 int NP = (int)Priority - SU->getNode()->getNumValues();
1919 return (NP > 0) ? NP : 0;
1925 //===----------------------------------------------------------------------===//
1926 // Register Pressure Tracking
1927 //===----------------------------------------------------------------------===//
1929 void RegReductionPQBase::dumpRegPressure() const {
1930 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1931 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1932 E = TRI->regclass_end(); I != E; ++I) {
1933 const TargetRegisterClass *RC = *I;
1934 unsigned Id = RC->getID();
1935 unsigned RP = RegPressure[Id];
1937 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1943 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1947 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1951 SUnit *PredSU = I->getSUnit();
1952 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1953 // to cover the number of registers defined (they are all live).
1954 if (PredSU->NumRegDefsLeft == 0) {
1957 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1958 RegDefPos.IsValid(); RegDefPos.Advance()) {
1959 unsigned RCId, Cost;
1960 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
1962 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1969 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1970 const SDNode *N = SU->getNode();
1972 if (!N->isMachineOpcode() || !SU->NumSuccs)
1975 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1976 for (unsigned i = 0; i != NumDefs; ++i) {
1977 MVT VT = N->getSimpleValueType(i);
1978 if (!N->hasAnyUseOfValue(i))
1980 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1981 if (RegPressure[RCId] >= RegLimit[RCId])
1987 // Compute the register pressure contribution by this instruction by count up
1988 // for uses that are not live and down for defs. Only count register classes
1989 // that are already under high pressure. As a side effect, compute the number of
1990 // uses of registers that are already live.
1992 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1993 // so could probably be factored.
1994 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1997 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2001 SUnit *PredSU = I->getSUnit();
2002 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
2003 // to cover the number of registers defined (they are all live).
2004 if (PredSU->NumRegDefsLeft == 0) {
2005 if (PredSU->getNode()->isMachineOpcode())
2009 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2010 RegDefPos.IsValid(); RegDefPos.Advance()) {
2011 MVT VT = RegDefPos.GetValue();
2012 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2013 if (RegPressure[RCId] >= RegLimit[RCId])
2017 const SDNode *N = SU->getNode();
2019 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
2022 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2023 for (unsigned i = 0; i != NumDefs; ++i) {
2024 MVT VT = N->getSimpleValueType(i);
2025 if (!N->hasAnyUseOfValue(i))
2027 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2028 if (RegPressure[RCId] >= RegLimit[RCId])
2034 void RegReductionPQBase::scheduledNode(SUnit *SU) {
2035 if (!TracksRegPressure)
2041 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2045 SUnit *PredSU = I->getSUnit();
2046 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
2047 // to cover the number of registers defined (they are all live).
2048 if (PredSU->NumRegDefsLeft == 0) {
2051 // FIXME: The ScheduleDAG currently loses information about which of a
2052 // node's values is consumed by each dependence. Consequently, if the node
2053 // defines multiple register classes, we don't know which to pressurize
2054 // here. Instead the following loop consumes the register defs in an
2055 // arbitrary order. At least it handles the common case of clustered loads
2056 // to the same class. For precise liveness, each SDep needs to indicate the
2057 // result number. But that tightly couples the ScheduleDAG with the
2058 // SelectionDAG making updates tricky. A simpler hack would be to attach a
2059 // value type or register class to SDep.
2061 // The most important aspect of register tracking is balancing the increase
2062 // here with the reduction further below. Note that this SU may use multiple
2063 // defs in PredSU. The can't be determined here, but we've already
2064 // compensated by reducing NumRegDefsLeft in PredSU during
2065 // ScheduleDAGSDNodes::AddSchedEdges.
2066 --PredSU->NumRegDefsLeft;
2067 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
2068 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2069 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2073 unsigned RCId, Cost;
2074 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2075 RegPressure[RCId] += Cost;
2080 // We should have this assert, but there may be dead SDNodes that never
2081 // materialize as SUnits, so they don't appear to generate liveness.
2082 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
2083 int SkipRegDefs = (int)SU->NumRegDefsLeft;
2084 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
2085 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2086 if (SkipRegDefs > 0)
2088 unsigned RCId, Cost;
2089 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2090 if (RegPressure[RCId] < Cost) {
2091 // Register pressure tracking is imprecise. This can happen. But we try
2092 // hard not to let it happen because it likely results in poor scheduling.
2093 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
2094 RegPressure[RCId] = 0;
2097 RegPressure[RCId] -= Cost;
2103 void RegReductionPQBase::unscheduledNode(SUnit *SU) {
2104 if (!TracksRegPressure)
2107 const SDNode *N = SU->getNode();
2110 if (!N->isMachineOpcode()) {
2111 if (N->getOpcode() != ISD::CopyToReg)
2114 unsigned Opc = N->getMachineOpcode();
2115 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2116 Opc == TargetOpcode::INSERT_SUBREG ||
2117 Opc == TargetOpcode::SUBREG_TO_REG ||
2118 Opc == TargetOpcode::REG_SEQUENCE ||
2119 Opc == TargetOpcode::IMPLICIT_DEF)
2123 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2127 SUnit *PredSU = I->getSUnit();
2128 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2129 // counts data deps.
2130 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2132 const SDNode *PN = PredSU->getNode();
2133 if (!PN->isMachineOpcode()) {
2134 if (PN->getOpcode() == ISD::CopyFromReg) {
2135 MVT VT = PN->getSimpleValueType(0);
2136 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2137 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2141 unsigned POpc = PN->getMachineOpcode();
2142 if (POpc == TargetOpcode::IMPLICIT_DEF)
2144 if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2145 POpc == TargetOpcode::INSERT_SUBREG ||
2146 POpc == TargetOpcode::SUBREG_TO_REG) {
2147 MVT VT = PN->getSimpleValueType(0);
2148 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2149 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2152 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2153 for (unsigned i = 0; i != NumDefs; ++i) {
2154 MVT VT = PN->getSimpleValueType(i);
2155 if (!PN->hasAnyUseOfValue(i))
2157 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2158 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2159 // Register pressure tracking is imprecise. This can happen.
2160 RegPressure[RCId] = 0;
2162 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2166 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2167 // may transfer data dependencies to CopyToReg.
2168 if (SU->NumSuccs && N->isMachineOpcode()) {
2169 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2170 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2171 MVT VT = N->getSimpleValueType(i);
2172 if (VT == MVT::Glue || VT == MVT::Other)
2174 if (!N->hasAnyUseOfValue(i))
2176 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2177 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2184 //===----------------------------------------------------------------------===//
2185 // Dynamic Node Priority for Register Pressure Reduction
2186 //===----------------------------------------------------------------------===//
2188 /// closestSucc - Returns the scheduled cycle of the successor which is
2189 /// closest to the current cycle.
2190 static unsigned closestSucc(const SUnit *SU) {
2191 unsigned MaxHeight = 0;
2192 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2194 if (I->isCtrl()) continue; // ignore chain succs
2195 unsigned Height = I->getSUnit()->getHeight();
2196 // If there are bunch of CopyToRegs stacked up, they should be considered
2197 // to be at the same position.
2198 if (I->getSUnit()->getNode() &&
2199 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2200 Height = closestSucc(I->getSUnit())+1;
2201 if (Height > MaxHeight)
2207 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2208 /// for scratch registers, i.e. number of data dependencies.
2209 static unsigned calcMaxScratches(const SUnit *SU) {
2210 unsigned Scratches = 0;
2211 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2213 if (I->isCtrl()) continue; // ignore chain preds
2219 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2220 /// CopyFromReg from a virtual register.
2221 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2222 bool RetVal = false;
2223 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2225 if (I->isCtrl()) continue;
2226 const SUnit *PredSU = I->getSUnit();
2227 if (PredSU->getNode() &&
2228 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2230 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2231 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2241 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2242 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2243 /// it has no other use. It should be scheduled closer to the terminator.
2244 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2245 bool RetVal = false;
2246 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2248 if (I->isCtrl()) continue;
2249 const SUnit *SuccSU = I->getSUnit();
2250 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2252 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2253 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2263 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2264 // set isVRegCycle for its CopyFromReg operands.
2266 // This is only relevant for single-block loops, in which case the VRegCycle
2267 // node is likely an induction variable in which the operand and target virtual
2268 // registers should be coalesced (e.g. pre/post increment values). Setting the
2269 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2270 // CopyFromReg so that this node becomes the virtual register "kill". This
2271 // avoids interference between the values live in and out of the block and
2272 // eliminates a copy inside the loop.
2273 static void initVRegCycle(SUnit *SU) {
2274 if (DisableSchedVRegCycle)
2277 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2280 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2282 SU->isVRegCycle = true;
2284 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2286 if (I->isCtrl()) continue;
2287 I->getSUnit()->isVRegCycle = true;
2291 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2292 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
2293 static void resetVRegCycle(SUnit *SU) {
2294 if (!SU->isVRegCycle)
2297 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2299 if (I->isCtrl()) continue; // ignore chain preds
2300 SUnit *PredSU = I->getSUnit();
2301 if (PredSU->isVRegCycle) {
2302 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2303 "VRegCycle def must be CopyFromReg");
2304 I->getSUnit()->isVRegCycle = 0;
2309 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2310 // means a node that defines the VRegCycle has not been scheduled yet.
2311 static bool hasVRegCycleUse(const SUnit *SU) {
2312 // If this SU also defines the VReg, don't hoist it as a "use".
2313 if (SU->isVRegCycle)
2316 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2318 if (I->isCtrl()) continue; // ignore chain preds
2319 if (I->getSUnit()->isVRegCycle &&
2320 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2321 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2328 // Check for either a dependence (latency) or resource (hazard) stall.
2330 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2331 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2332 if ((int)SPQ->getCurCycle() < Height) return true;
2333 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2334 != ScheduleHazardRecognizer::NoHazard)
2339 // Return -1 if left has higher priority, 1 if right has higher priority.
2340 // Return 0 if latency-based priority is equivalent.
2341 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2342 RegReductionPQBase *SPQ) {
2343 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2344 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2345 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2346 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2347 int LHeight = (int)left->getHeight() + LPenalty;
2348 int RHeight = (int)right->getHeight() + RPenalty;
2350 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
2351 BUHasStall(left, LHeight, SPQ);
2352 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
2353 BUHasStall(right, RHeight, SPQ);
2355 // If scheduling one of the node will cause a pipeline stall, delay it.
2356 // If scheduling either one of the node will cause a pipeline stall, sort
2357 // them according to their height.
2361 if (LHeight != RHeight)
2362 return LHeight > RHeight ? 1 : -1;
2366 // If either node is scheduling for latency, sort them by height/depth
2368 if (!checkPref || (left->SchedulingPref == Sched::ILP ||
2369 right->SchedulingPref == Sched::ILP)) {
2370 // If neither instruction stalls (!LStall && !RStall) and HazardRecognizer
2371 // is enabled, grouping instructions by cycle, then its height is already
2372 // covered so only its depth matters. We also reach this point if both stall
2373 // but have the same height.
2374 if (!SPQ->getHazardRec()->isEnabled()) {
2375 if (LHeight != RHeight)
2376 return LHeight > RHeight ? 1 : -1;
2378 int LDepth = left->getDepth() - LPenalty;
2379 int RDepth = right->getDepth() - RPenalty;
2380 if (LDepth != RDepth) {
2381 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2382 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2383 << ") depth " << RDepth << "\n");
2384 return LDepth < RDepth ? 1 : -1;
2386 if (left->Latency != right->Latency)
2387 return left->Latency > right->Latency ? 1 : -1;
2392 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2393 // Schedule physical register definitions close to their use. This is
2394 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2395 // long as shortening physreg live ranges is generally good, we can defer
2396 // creating a subtarget hook.
2397 if (!DisableSchedPhysRegJoin) {
2398 bool LHasPhysReg = left->hasPhysRegDefs;
2399 bool RHasPhysReg = right->hasPhysRegDefs;
2400 if (LHasPhysReg != RHasPhysReg) {
2402 const char *const PhysRegMsg[] = {" has no physreg"," defines a physreg"};
2404 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
2405 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2406 << PhysRegMsg[RHasPhysReg] << "\n");
2407 return LHasPhysReg < RHasPhysReg;
2411 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2412 unsigned LPriority = SPQ->getNodePriority(left);
2413 unsigned RPriority = SPQ->getNodePriority(right);
2415 // Be really careful about hoisting call operands above previous calls.
2416 // Only allows it if it would reduce register pressure.
2417 if (left->isCall && right->isCallOp) {
2418 unsigned RNumVals = right->getNode()->getNumValues();
2419 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2421 if (right->isCall && left->isCallOp) {
2422 unsigned LNumVals = left->getNode()->getNumValues();
2423 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2426 if (LPriority != RPriority)
2427 return LPriority > RPriority;
2429 // One or both of the nodes are calls and their sethi-ullman numbers are the
2430 // same, then keep source order.
2431 if (left->isCall || right->isCall) {
2432 unsigned LOrder = SPQ->getNodeOrdering(left);
2433 unsigned ROrder = SPQ->getNodeOrdering(right);
2435 // Prefer an ordering where the lower the non-zero order number, the higher
2437 if ((LOrder || ROrder) && LOrder != ROrder)
2438 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2441 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2446 // and the following instructions are both ready.
2450 // Then schedule t2 = op first.
2457 // This creates more short live intervals.
2458 unsigned LDist = closestSucc(left);
2459 unsigned RDist = closestSucc(right);
2461 return LDist < RDist;
2463 // How many registers becomes live when the node is scheduled.
2464 unsigned LScratch = calcMaxScratches(left);
2465 unsigned RScratch = calcMaxScratches(right);
2466 if (LScratch != RScratch)
2467 return LScratch > RScratch;
2469 // Comparing latency against a call makes little sense unless the node
2470 // is register pressure-neutral.
2471 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2472 return (left->NodeQueueId > right->NodeQueueId);
2474 // Do not compare latencies when one or both of the nodes are calls.
2475 if (!DisableSchedCycles &&
2476 !(left->isCall || right->isCall)) {
2477 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2482 if (left->getHeight() != right->getHeight())
2483 return left->getHeight() > right->getHeight();
2485 if (left->getDepth() != right->getDepth())
2486 return left->getDepth() < right->getDepth();
2489 assert(left->NodeQueueId && right->NodeQueueId &&
2490 "NodeQueueId cannot be zero");
2491 return (left->NodeQueueId > right->NodeQueueId);
2495 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2496 if (int res = checkSpecialNodes(left, right))
2499 return BURRSort(left, right, SPQ);
2502 // Source order, otherwise bottom up.
2503 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2504 if (int res = checkSpecialNodes(left, right))
2507 unsigned LOrder = SPQ->getNodeOrdering(left);
2508 unsigned ROrder = SPQ->getNodeOrdering(right);
2510 // Prefer an ordering where the lower the non-zero order number, the higher
2512 if ((LOrder || ROrder) && LOrder != ROrder)
2513 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2515 return BURRSort(left, right, SPQ);
2518 // If the time between now and when the instruction will be ready can cover
2519 // the spill code, then avoid adding it to the ready queue. This gives long
2520 // stalls highest priority and allows hoisting across calls. It should also
2521 // speed up processing the available queue.
2522 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2523 static const unsigned ReadyDelay = 3;
2525 if (SPQ->MayReduceRegPressure(SU)) return true;
2527 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2529 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2530 != ScheduleHazardRecognizer::NoHazard)
2536 // Return true if right should be scheduled with higher priority than left.
2537 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2538 if (int res = checkSpecialNodes(left, right))
2541 if (left->isCall || right->isCall)
2542 // No way to compute latency of calls.
2543 return BURRSort(left, right, SPQ);
2545 bool LHigh = SPQ->HighRegPressure(left);
2546 bool RHigh = SPQ->HighRegPressure(right);
2547 // Avoid causing spills. If register pressure is high, schedule for
2548 // register pressure reduction.
2549 if (LHigh && !RHigh) {
2550 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2551 << right->NodeNum << ")\n");
2554 else if (!LHigh && RHigh) {
2555 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2556 << left->NodeNum << ")\n");
2559 if (!LHigh && !RHigh) {
2560 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2564 return BURRSort(left, right, SPQ);
2567 // Schedule as many instructions in each cycle as possible. So don't make an
2568 // instruction available unless it is ready in the current cycle.
2569 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2570 if (SU->getHeight() > CurCycle) return false;
2572 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2573 != ScheduleHazardRecognizer::NoHazard)
2579 static bool canEnableCoalescing(SUnit *SU) {
2580 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2581 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2582 // CopyToReg should be close to its uses to facilitate coalescing and
2586 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2587 Opc == TargetOpcode::SUBREG_TO_REG ||
2588 Opc == TargetOpcode::INSERT_SUBREG)
2589 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2590 // close to their uses to facilitate coalescing.
2593 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2594 // If SU does not have a register def, schedule it close to its uses
2595 // because it does not lengthen any live ranges.
2601 // list-ilp is currently an experimental scheduler that allows various
2602 // heuristics to be enabled prior to the normal register reduction logic.
2603 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2604 if (int res = checkSpecialNodes(left, right))
2607 if (left->isCall || right->isCall)
2608 // No way to compute latency of calls.
2609 return BURRSort(left, right, SPQ);
2611 unsigned LLiveUses = 0, RLiveUses = 0;
2612 int LPDiff = 0, RPDiff = 0;
2613 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2614 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2615 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2617 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2618 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2619 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2620 return LPDiff > RPDiff;
2623 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2624 bool LReduce = canEnableCoalescing(left);
2625 bool RReduce = canEnableCoalescing(right);
2626 if (LReduce && !RReduce) return false;
2627 if (RReduce && !LReduce) return true;
2630 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2631 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2632 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2633 return LLiveUses < RLiveUses;
2636 if (!DisableSchedStalls) {
2637 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2638 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2639 if (LStall != RStall)
2640 return left->getHeight() > right->getHeight();
2643 if (!DisableSchedCriticalPath) {
2644 int spread = (int)left->getDepth() - (int)right->getDepth();
2645 if (std::abs(spread) > MaxReorderWindow) {
2646 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2647 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2648 << right->getDepth() << "\n");
2649 return left->getDepth() < right->getDepth();
2653 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2654 int spread = (int)left->getHeight() - (int)right->getHeight();
2655 if (std::abs(spread) > MaxReorderWindow)
2656 return left->getHeight() > right->getHeight();
2659 return BURRSort(left, right, SPQ);
2662 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2664 // Add pseudo dependency edges for two-address nodes.
2665 if (!Disable2AddrHack)
2666 AddPseudoTwoAddrDeps();
2667 // Reroute edges to nodes with multiple uses.
2668 if (!TracksRegPressure && !SrcOrder)
2669 PrescheduleNodesWithMultipleUses();
2670 // Calculate node priorities.
2671 CalculateSethiUllmanNumbers();
2673 // For single block loops, mark nodes that look like canonical IV increments.
2674 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2675 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2676 initVRegCycle(&sunits[i]);
2681 //===----------------------------------------------------------------------===//
2682 // Preschedule for Register Pressure
2683 //===----------------------------------------------------------------------===//
2685 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2686 if (SU->isTwoAddress) {
2687 unsigned Opc = SU->getNode()->getMachineOpcode();
2688 const MCInstrDesc &MCID = TII->get(Opc);
2689 unsigned NumRes = MCID.getNumDefs();
2690 unsigned NumOps = MCID.getNumOperands() - NumRes;
2691 for (unsigned i = 0; i != NumOps; ++i) {
2692 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
2693 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2694 if (DU->getNodeId() != -1 &&
2695 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2703 /// canClobberReachingPhysRegUse - True if SU would clobber one of it's
2704 /// successor's explicit physregs whose definition can reach DepSU.
2705 /// i.e. DepSU should not be scheduled above SU.
2706 static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
2707 ScheduleDAGRRList *scheduleDAG,
2708 const TargetInstrInfo *TII,
2709 const TargetRegisterInfo *TRI) {
2710 const uint16_t *ImpDefs
2711 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
2712 const uint32_t *RegMask = getNodeRegMask(SU->getNode());
2713 if(!ImpDefs && !RegMask)
2716 for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
2718 SUnit *SuccSU = SI->getSUnit();
2719 for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(),
2720 PE = SuccSU->Preds.end(); PI != PE; ++PI) {
2721 if (!PI->isAssignedRegDep())
2724 if (RegMask && MachineOperand::clobbersPhysReg(RegMask, PI->getReg()) &&
2725 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2729 for (const uint16_t *ImpDef = ImpDefs; *ImpDef; ++ImpDef)
2730 // Return true if SU clobbers this physical register use and the
2731 // definition of the register reaches from DepSU. IsReachable queries
2732 // a topological forward sort of the DAG (following the successors).
2733 if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
2734 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2741 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2742 /// physical register defs.
2743 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2744 const TargetInstrInfo *TII,
2745 const TargetRegisterInfo *TRI) {
2746 SDNode *N = SuccSU->getNode();
2747 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2748 const uint16_t *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2749 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2750 for (const SDNode *SUNode = SU->getNode(); SUNode;
2751 SUNode = SUNode->getGluedNode()) {
2752 if (!SUNode->isMachineOpcode())
2754 const uint16_t *SUImpDefs =
2755 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2756 const uint32_t *SURegMask = getNodeRegMask(SUNode);
2757 if (!SUImpDefs && !SURegMask)
2759 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2760 EVT VT = N->getValueType(i);
2761 if (VT == MVT::Glue || VT == MVT::Other)
2763 if (!N->hasAnyUseOfValue(i))
2765 unsigned Reg = ImpDefs[i - NumDefs];
2766 if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg))
2770 for (;*SUImpDefs; ++SUImpDefs) {
2771 unsigned SUReg = *SUImpDefs;
2772 if (TRI->regsOverlap(Reg, SUReg))
2780 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2781 /// are not handled well by the general register pressure reduction
2782 /// heuristics. When presented with code like this:
2791 /// the heuristics tend to push the store up, but since the
2792 /// operand of the store has another use (U), this would increase
2793 /// the length of that other use (the U->N edge).
2795 /// This function transforms code like the above to route U's
2796 /// dependence through the store when possible, like this:
2807 /// This results in the store being scheduled immediately
2808 /// after N, which shortens the U->N live range, reducing
2809 /// register pressure.
2811 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2812 // Visit all the nodes in topological order, working top-down.
2813 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2814 SUnit *SU = &(*SUnits)[i];
2815 // For now, only look at nodes with no data successors, such as stores.
2816 // These are especially important, due to the heuristics in
2817 // getNodePriority for nodes with no data successors.
2818 if (SU->NumSuccs != 0)
2820 // For now, only look at nodes with exactly one data predecessor.
2821 if (SU->NumPreds != 1)
2823 // Avoid prescheduling copies to virtual registers, which don't behave
2824 // like other nodes from the perspective of scheduling heuristics.
2825 if (SDNode *N = SU->getNode())
2826 if (N->getOpcode() == ISD::CopyToReg &&
2827 TargetRegisterInfo::isVirtualRegister
2828 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2831 // Locate the single data predecessor.
2833 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2834 EE = SU->Preds.end(); II != EE; ++II)
2835 if (!II->isCtrl()) {
2836 PredSU = II->getSUnit();
2841 // Don't rewrite edges that carry physregs, because that requires additional
2842 // support infrastructure.
2843 if (PredSU->hasPhysRegDefs)
2845 // Short-circuit the case where SU is PredSU's only data successor.
2846 if (PredSU->NumSuccs == 1)
2848 // Avoid prescheduling to copies from virtual registers, which don't behave
2849 // like other nodes from the perspective of scheduling heuristics.
2850 if (SDNode *N = SU->getNode())
2851 if (N->getOpcode() == ISD::CopyFromReg &&
2852 TargetRegisterInfo::isVirtualRegister
2853 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2856 // Perform checks on the successors of PredSU.
2857 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2858 EE = PredSU->Succs.end(); II != EE; ++II) {
2859 SUnit *PredSuccSU = II->getSUnit();
2860 if (PredSuccSU == SU) continue;
2861 // If PredSU has another successor with no data successors, for
2862 // now don't attempt to choose either over the other.
2863 if (PredSuccSU->NumSuccs == 0)
2864 goto outer_loop_continue;
2865 // Don't break physical register dependencies.
2866 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2867 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2868 goto outer_loop_continue;
2869 // Don't introduce graph cycles.
2870 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2871 goto outer_loop_continue;
2874 // Ok, the transformation is safe and the heuristics suggest it is
2875 // profitable. Update the graph.
2876 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2877 << " next to PredSU #" << PredSU->NodeNum
2878 << " to guide scheduling in the presence of multiple uses\n");
2879 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2880 SDep Edge = PredSU->Succs[i];
2881 assert(!Edge.isAssignedRegDep());
2882 SUnit *SuccSU = Edge.getSUnit();
2884 Edge.setSUnit(PredSU);
2885 scheduleDAG->RemovePred(SuccSU, Edge);
2886 scheduleDAG->AddPred(SU, Edge);
2888 scheduleDAG->AddPred(SuccSU, Edge);
2892 outer_loop_continue:;
2896 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2897 /// it as a def&use operand. Add a pseudo control edge from it to the other
2898 /// node (if it won't create a cycle) so the two-address one will be scheduled
2899 /// first (lower in the schedule). If both nodes are two-address, favor the
2900 /// one that has a CopyToReg use (more likely to be a loop induction update).
2901 /// If both are two-address, but one is commutable while the other is not
2902 /// commutable, favor the one that's not commutable.
2903 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2904 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2905 SUnit *SU = &(*SUnits)[i];
2906 if (!SU->isTwoAddress)
2909 SDNode *Node = SU->getNode();
2910 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2913 bool isLiveOut = hasOnlyLiveOutUses(SU);
2914 unsigned Opc = Node->getMachineOpcode();
2915 const MCInstrDesc &MCID = TII->get(Opc);
2916 unsigned NumRes = MCID.getNumDefs();
2917 unsigned NumOps = MCID.getNumOperands() - NumRes;
2918 for (unsigned j = 0; j != NumOps; ++j) {
2919 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
2921 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2922 if (DU->getNodeId() == -1)
2924 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2925 if (!DUSU) continue;
2926 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2927 E = DUSU->Succs.end(); I != E; ++I) {
2928 if (I->isCtrl()) continue;
2929 SUnit *SuccSU = I->getSUnit();
2932 // Be conservative. Ignore if nodes aren't at roughly the same
2933 // depth and height.
2934 if (SuccSU->getHeight() < SU->getHeight() &&
2935 (SU->getHeight() - SuccSU->getHeight()) > 1)
2937 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2938 // constrains whatever is using the copy, instead of the copy
2939 // itself. In the case that the copy is coalesced, this
2940 // preserves the intent of the pseudo two-address heurietics.
2941 while (SuccSU->Succs.size() == 1 &&
2942 SuccSU->getNode()->isMachineOpcode() &&
2943 SuccSU->getNode()->getMachineOpcode() ==
2944 TargetOpcode::COPY_TO_REGCLASS)
2945 SuccSU = SuccSU->Succs.front().getSUnit();
2946 // Don't constrain non-instruction nodes.
2947 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2949 // Don't constrain nodes with physical register defs if the
2950 // predecessor can clobber them.
2951 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2952 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2955 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2956 // these may be coalesced away. We want them close to their uses.
2957 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2958 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2959 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2960 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2962 if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) &&
2963 (!canClobber(SuccSU, DUSU) ||
2964 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2965 (!SU->isCommutable && SuccSU->isCommutable)) &&
2966 !scheduleDAG->IsReachable(SuccSU, SU)) {
2967 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2968 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2969 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Artificial));
2976 //===----------------------------------------------------------------------===//
2977 // Public Constructor Functions
2978 //===----------------------------------------------------------------------===//
2980 llvm::ScheduleDAGSDNodes *
2981 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2982 CodeGenOpt::Level OptLevel) {
2983 const TargetMachine &TM = IS->TM;
2984 const TargetInstrInfo *TII = TM.getInstrInfo();
2985 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2987 BURegReductionPriorityQueue *PQ =
2988 new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, 0);
2989 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2990 PQ->setScheduleDAG(SD);
2994 llvm::ScheduleDAGSDNodes *
2995 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2996 CodeGenOpt::Level OptLevel) {
2997 const TargetMachine &TM = IS->TM;
2998 const TargetInstrInfo *TII = TM.getInstrInfo();
2999 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
3001 SrcRegReductionPriorityQueue *PQ =
3002 new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, 0);
3003 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
3004 PQ->setScheduleDAG(SD);
3008 llvm::ScheduleDAGSDNodes *
3009 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
3010 CodeGenOpt::Level OptLevel) {
3011 const TargetMachine &TM = IS->TM;
3012 const TargetInstrInfo *TII = TM.getInstrInfo();
3013 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
3014 const TargetLowering *TLI = &IS->getTargetLowering();
3016 HybridBURRPriorityQueue *PQ =
3017 new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3019 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3020 PQ->setScheduleDAG(SD);
3024 llvm::ScheduleDAGSDNodes *
3025 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
3026 CodeGenOpt::Level OptLevel) {
3027 const TargetMachine &TM = IS->TM;
3028 const TargetInstrInfo *TII = TM.getInstrInfo();
3029 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
3030 const TargetLowering *TLI = &IS->getTargetLowering();
3032 ILPBURRPriorityQueue *PQ =
3033 new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3034 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3035 PQ->setScheduleDAG(SD);