//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "pre-RA-sched"
-#include "llvm/CodeGen/ScheduleDAGSDNodes.h"
+#include "ScheduleDAGSDNodes.h"
+#include "llvm/InlineAsm.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/PriorityQueue.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
#include <climits>
-#include "llvm/Support/CommandLine.h"
using namespace llvm;
STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
STATISTIC(NumUnfolds, "Number of nodes unfolded");
STATISTIC(NumDups, "Number of duplicated nodes");
-STATISTIC(NumCCCopies, "Number of cross class copies");
+STATISTIC(NumPRCopies, "Number of physical register copies");
static RegisterScheduler
burrListDAGScheduler("list-burr",
tdrListrDAGScheduler("list-tdrr",
"Top-down register reduction list scheduling",
createTDRRListDAGScheduler);
+static RegisterScheduler
+ sourceListDAGScheduler("source",
+ "Similar to list-burr but schedules in source "
+ "order when possible",
+ createSourceListDAGScheduler);
+
+static RegisterScheduler
+ hybridListDAGScheduler("list-hybrid",
+ "Bottom-up register pressure aware list scheduling "
+ "which tries to balance latency and register pressure",
+ createHybridListDAGScheduler);
+
+static RegisterScheduler
+ ILPListDAGScheduler("list-ilp",
+ "Bottom-up register pressure aware list scheduling "
+ "which tries to balance ILP and register pressure",
+ createILPListDAGScheduler);
namespace {
//===----------------------------------------------------------------------===//
/// ScheduleDAGRRList - The actual register reduction list scheduler
/// implementation. This supports both top-down and bottom-up scheduling.
///
-class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAGSDNodes {
+class ScheduleDAGRRList : public ScheduleDAGSDNodes {
private:
/// isBottomUp - This is true if the scheduling problem is bottom-up, false if
/// it is top-down.
bool isBottomUp;
- /// Fast - True if we are performing fast scheduling.
+ /// NeedLatency - True if the scheduler will make use of latency information.
///
- bool Fast;
-
+ bool NeedLatency;
+
/// AvailableQueue - The priority queue to use for the available SUnits.
SchedulingPriorityQueue *AvailableQueue;
std::vector<SUnit*> LiveRegDefs;
std::vector<unsigned> LiveRegCycles;
+ /// Topo - A topological ordering for SUnits which permits fast IsReachable
+ /// and similar queries.
+ ScheduleDAGTopologicalSort Topo;
+
public:
- ScheduleDAGRRList(SelectionDAG *dag, MachineBasicBlock *bb,
- const TargetMachine &tm, bool isbottomup, bool f,
+ ScheduleDAGRRList(MachineFunction &mf,
+ bool isbottomup, bool needlatency,
SchedulingPriorityQueue *availqueue)
- : ScheduleDAGSDNodes(dag, bb, tm), isBottomUp(isbottomup), Fast(f),
- AvailableQueue(availqueue) {
+ : ScheduleDAGSDNodes(mf), isBottomUp(isbottomup), NeedLatency(needlatency),
+ AvailableQueue(availqueue), Topo(SUnits) {
}
~ScheduleDAGRRList() {
void Schedule();
/// IsReachable - Checks if SU is reachable from TargetSU.
- bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
+ bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
+ return Topo.IsReachable(SU, TargetSU);
+ }
- /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
+ /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
/// create a cycle.
- bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
+ bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
+ return Topo.WillCreateCycle(SU, TargetSU);
+ }
- /// AddPred - This adds the specified node X as a predecessor of
- /// the current node Y if not already.
+ /// AddPred - adds a predecessor edge to SUnit SU.
/// This returns true if this is a new predecessor.
/// Updates the topological ordering if required.
- bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
- unsigned PhyReg = 0, int Cost = 1);
+ void AddPred(SUnit *SU, const SDep &D) {
+ Topo.AddPred(SU, D.getSUnit());
+ SU->addPred(D);
+ }
- /// RemovePred - This removes the specified node N from the predecessors of
- /// the current node M. Updates the topological ordering if required.
- bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial);
+ /// RemovePred - removes a predecessor edge from SUnit SU.
+ /// This returns true if an edge was removed.
+ /// Updates the topological ordering if required.
+ void RemovePred(SUnit *SU, const SDep &D) {
+ Topo.RemovePred(SU, D.getSUnit());
+ SU->removePred(D);
+ }
private:
- void ReleasePred(SUnit *SU, SUnit *PredSU, bool isChain);
- void ReleaseSucc(SUnit *SU, SUnit *SuccSU, bool isChain);
- void CapturePred(SUnit*, SUnit*, bool);
+ void ReleasePred(SUnit *SU, const SDep *PredEdge);
+ void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
+ void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
+ void ReleaseSuccessors(SUnit *SU);
+ void CapturePred(SDep *PredEdge);
void ScheduleNodeBottomUp(SUnit*, unsigned);
void ScheduleNodeTopDown(SUnit*, unsigned);
void UnscheduleNodeBottomUp(SUnit*);
void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
SUnit *CopyAndMoveSuccessors(SUnit*);
- void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned,
- const TargetRegisterClass*,
- const TargetRegisterClass*,
- SmallVector<SUnit*, 2>&);
+ void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
+ const TargetRegisterClass*,
+ const TargetRegisterClass*,
+ SmallVector<SUnit*, 2>&);
bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
void ListScheduleTopDown();
void ListScheduleBottomUp();
- void CommuteNodesToReducePressure();
/// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
/// Updates the topological ordering if required.
SUnit *CreateNewSUnit(SDNode *N) {
+ unsigned NumSUnits = SUnits.size();
SUnit *NewNode = NewSUnit(N);
// Update the topological ordering.
- if (NewNode->NodeNum >= Node2Index.size())
- InitDAGTopologicalSorting();
+ if (NewNode->NodeNum >= NumSUnits)
+ Topo.InitDAGTopologicalSorting();
return NewNode;
}
/// CreateClone - Creates a new SUnit from an existing one.
/// Updates the topological ordering if required.
SUnit *CreateClone(SUnit *N) {
+ unsigned NumSUnits = SUnits.size();
SUnit *NewNode = Clone(N);
// Update the topological ordering.
- if (NewNode->NodeNum >= Node2Index.size())
- InitDAGTopologicalSorting();
+ if (NewNode->NodeNum >= NumSUnits)
+ Topo.InitDAGTopologicalSorting();
return NewNode;
}
- /// Functions for preserving the topological ordering
- /// even after dynamic insertions of new edges.
- /// This allows a very fast implementation of IsReachable.
-
- /// InitDAGTopologicalSorting - create the initial topological
- /// ordering from the DAG to be scheduled.
- void InitDAGTopologicalSorting();
-
- /// DFS - make a DFS traversal and mark all nodes affected by the
- /// edge insertion. These nodes will later get new topological indexes
- /// by means of the Shift method.
- void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
-
- /// Shift - reassign topological indexes for the nodes in the DAG
- /// to preserve the topological ordering.
- void Shift(BitVector& Visited, int LowerBound, int UpperBound);
-
- /// Allocate - assign the topological index to the node n.
- void Allocate(int n, int index);
-
- /// Index2Node - Maps topological index to the node number.
- std::vector<int> Index2Node;
- /// Node2Index - Maps the node number to its topological index.
- std::vector<int> Node2Index;
- /// Visited - a set of nodes visited during a DFS traversal.
- BitVector Visited;
+ /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
+ /// need actual latency information but the hybrid scheduler does.
+ bool ForceUnitLatencies() const {
+ return !NeedLatency;
+ }
};
} // end anonymous namespace
/// Schedule - Schedule the DAG using list scheduling.
void ScheduleDAGRRList::Schedule() {
- DOUT << "********** List Scheduling **********\n";
+ DEBUG(dbgs()
+ << "********** List Scheduling BB#" << BB->getNumber()
+ << " **********\n");
NumLiveRegs = 0;
LiveRegDefs.resize(TRI->getNumRegs(), NULL);
LiveRegCycles.resize(TRI->getNumRegs(), 0);
- // Build scheduling units.
- BuildSchedUnits();
+ // Build the scheduling graph.
+ BuildSchedGraph(NULL);
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
- if (!Fast) {
- CalculateDepths();
- CalculateHeights();
- }
- InitDAGTopologicalSorting();
+ Topo.InitDAGTopologicalSorting();
AvailableQueue->initNodes(SUnits);
ListScheduleTopDown();
AvailableQueue->releaseState();
-
- if (!Fast)
- CommuteNodesToReducePressure();
-}
-
-/// CommuteNodesToReducePressure - If a node is two-address and commutable, and
-/// it is not the last use of its first operand, add it to the CommuteSet if
-/// possible. It will be commuted when it is translated to a MI.
-void ScheduleDAGRRList::CommuteNodesToReducePressure() {
- SmallPtrSet<SUnit*, 4> OperandSeen;
- for (unsigned i = Sequence.size(); i != 0; ) {
- --i;
- SUnit *SU = Sequence[i];
- if (!SU || !SU->getNode()) continue;
- if (SU->isCommutable) {
- unsigned Opc = SU->getNode()->getMachineOpcode();
- const TargetInstrDesc &TID = TII->get(Opc);
- unsigned NumRes = TID.getNumDefs();
- unsigned NumOps = TID.getNumOperands() - NumRes;
- for (unsigned j = 0; j != NumOps; ++j) {
- if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
- continue;
-
- SDNode *OpN = SU->getNode()->getOperand(j).getNode();
- SUnit *OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
- if (OpSU && OperandSeen.count(OpSU) == 1) {
- // Ok, so SU is not the last use of OpSU, but SU is two-address so
- // it will clobber OpSU. Try to commute SU if no other source operands
- // are live below.
- bool DoCommute = true;
- for (unsigned k = 0; k < NumOps; ++k) {
- if (k != j) {
- OpN = SU->getNode()->getOperand(k).getNode();
- OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
- if (OpSU && OperandSeen.count(OpSU) == 1) {
- DoCommute = false;
- break;
- }
- }
- }
- if (DoCommute)
- CommuteSet.insert(SU->getNode());
- }
-
- // Only look at the first use&def node for now.
- break;
- }
- }
-
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (!I->isCtrl)
- OperandSeen.insert(I->Dep->OrigNode);
- }
- }
}
//===----------------------------------------------------------------------===//
/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
-void ScheduleDAGRRList::ReleasePred(SUnit *SU, SUnit *PredSU, bool isChain) {
- --PredSU->NumSuccsLeft;
-
+void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
+ SUnit *PredSU = PredEdge->getSUnit();
+
#ifndef NDEBUG
- if (PredSU->NumSuccsLeft < 0) {
- cerr << "*** Scheduling failed! ***\n";
+ if (PredSU->NumSuccsLeft == 0) {
+ dbgs() << "*** Scheduling failed! ***\n";
PredSU->dump(this);
- cerr << " has been released too many times!\n";
- assert(0);
+ dbgs() << " has been released too many times!\n";
+ llvm_unreachable(0);
}
#endif
-
- // Compute how many cycles it will be before this actually becomes
- // available. This is the max of the start time of all predecessors plus
- // their latencies.
- // If this is a token edge, we don't need to wait for the latency of the
- // preceeding instruction (e.g. a long-latency load) unless there is also
- // some other data dependence.
- unsigned PredDoneCycle = SU->Cycle;
- if (!isChain)
- PredDoneCycle += PredSU->Latency;
- else if (SU->Latency)
- PredDoneCycle += 1;
- PredSU->CycleBound = std::max(PredSU->CycleBound, PredDoneCycle);
+ --PredSU->NumSuccsLeft;
- if (PredSU->NumSuccsLeft == 0) {
+ if (!ForceUnitLatencies()) {
+ // Updating predecessor's height. This is now the cycle when the
+ // predecessor can be scheduled without causing a pipeline stall.
+ PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
+ }
+
+ // If all the node's successors are scheduled, this node is ready
+ // to be scheduled. Ignore the special EntrySU node.
+ if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
PredSU->isAvailable = true;
AvailableQueue->push(PredSU);
}
}
-/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
-/// count of its predecessors. If a predecessor pending count is zero, add it to
-/// the Available queue.
-void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
- DOUT << "*** Scheduling [" << CurCycle << "]: ";
- DEBUG(SU->dump(this));
-
- SU->Cycle = CurCycle;
- Sequence.push_back(SU);
-
+void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
// Bottom up: release predecessors
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
- ReleasePred(SU, I->Dep, I->isCtrl);
- if (I->Cost < 0) {
+ ReleasePred(SU, &*I);
+ if (I->isAssignedRegDep()) {
// This is a physical register dependency and it's impossible or
// expensive to copy the register. Make sure nothing that can
// clobber the register is scheduled between the predecessor and
// this node.
- if (!LiveRegDefs[I->Reg]) {
+ if (!LiveRegDefs[I->getReg()]) {
++NumLiveRegs;
- LiveRegDefs[I->Reg] = I->Dep;
- LiveRegCycles[I->Reg] = CurCycle;
+ LiveRegDefs[I->getReg()] = I->getSUnit();
+ LiveRegCycles[I->getReg()] = CurCycle;
}
}
}
+}
+
+/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
+/// count of its predecessors. If a predecessor pending count is zero, add it to
+/// the Available queue.
+void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
+ DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
+ DEBUG(SU->dump(this));
+
+#ifndef NDEBUG
+ if (CurCycle < SU->getHeight())
+ DEBUG(dbgs() << " Height [" << SU->getHeight() << "] pipeline stall!\n");
+#endif
+
+ // FIXME: Handle noop hazard.
+ SU->setHeightToAtLeast(CurCycle);
+ Sequence.push_back(SU);
+
+ AvailableQueue->ScheduledNode(SU);
+
+ ReleasePredecessors(SU, CurCycle);
// Release all the implicit physical register defs that are live.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
- if (I->Cost < 0) {
- if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
+ if (I->isAssignedRegDep()) {
+ if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
- assert(LiveRegDefs[I->Reg] == SU &&
+ assert(LiveRegDefs[I->getReg()] == SU &&
"Physical register dependency violated?");
--NumLiveRegs;
- LiveRegDefs[I->Reg] = NULL;
- LiveRegCycles[I->Reg] = 0;
+ LiveRegDefs[I->getReg()] = NULL;
+ LiveRegCycles[I->getReg()] = 0;
}
}
}
SU->isScheduled = true;
- AvailableQueue->ScheduledNode(SU);
}
/// CapturePred - This does the opposite of ReleasePred. Since SU is being
/// unscheduled, incrcease the succ left count of its predecessors. Remove
/// them from AvailableQueue if necessary.
-void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
- unsigned CycleBound = 0;
- for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
- I != E; ++I) {
- if (I->Dep == SU)
- continue;
- CycleBound = std::max(CycleBound,
- I->Dep->Cycle + PredSU->Latency);
- }
-
+void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
+ SUnit *PredSU = PredEdge->getSUnit();
if (PredSU->isAvailable) {
PredSU->isAvailable = false;
if (!PredSU->isPending)
AvailableQueue->remove(PredSU);
}
- PredSU->CycleBound = CycleBound;
+ assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
++PredSU->NumSuccsLeft;
}
/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
/// its predecessor states to reflect the change.
void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
- DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
+ DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
DEBUG(SU->dump(this));
- AvailableQueue->UnscheduledNode(SU);
-
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
- CapturePred(I->Dep, SU, I->isCtrl);
- if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
+ CapturePred(&*I);
+ if (I->isAssignedRegDep() && SU->getHeight() == LiveRegCycles[I->getReg()]){
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
- assert(LiveRegDefs[I->Reg] == I->Dep &&
+ assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
"Physical register dependency violated?");
--NumLiveRegs;
- LiveRegDefs[I->Reg] = NULL;
- LiveRegCycles[I->Reg] = 0;
+ LiveRegDefs[I->getReg()] = NULL;
+ LiveRegCycles[I->getReg()] = 0;
}
}
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
- if (I->Cost < 0) {
- if (!LiveRegDefs[I->Reg]) {
- LiveRegDefs[I->Reg] = SU;
+ if (I->isAssignedRegDep()) {
+ if (!LiveRegDefs[I->getReg()]) {
+ LiveRegDefs[I->getReg()] = SU;
++NumLiveRegs;
}
- if (I->Dep->Cycle < LiveRegCycles[I->Reg])
- LiveRegCycles[I->Reg] = I->Dep->Cycle;
+ if (I->getSUnit()->getHeight() < LiveRegCycles[I->getReg()])
+ LiveRegCycles[I->getReg()] = I->getSUnit()->getHeight();
}
}
- SU->Cycle = 0;
+ SU->setHeightDirty();
SU->isScheduled = false;
SU->isAvailable = true;
AvailableQueue->push(SU);
-}
-
-/// IsReachable - Checks if SU is reachable from TargetSU.
-bool ScheduleDAGRRList::IsReachable(const SUnit *SU, const SUnit *TargetSU) {
- // If insertion of the edge SU->TargetSU would create a cycle
- // then there is a path from TargetSU to SU.
- int UpperBound, LowerBound;
- LowerBound = Node2Index[TargetSU->NodeNum];
- UpperBound = Node2Index[SU->NodeNum];
- bool HasLoop = false;
- // Is Ord(TargetSU) < Ord(SU) ?
- if (LowerBound < UpperBound) {
- Visited.reset();
- // There may be a path from TargetSU to SU. Check for it.
- DFS(TargetSU, UpperBound, HasLoop);
- }
- return HasLoop;
-}
-
-/// Allocate - assign the topological index to the node n.
-inline void ScheduleDAGRRList::Allocate(int n, int index) {
- Node2Index[n] = index;
- Index2Node[index] = n;
-}
-
-/// InitDAGTopologicalSorting - create the initial topological
-/// ordering from the DAG to be scheduled.
-
-/// The idea of the algorithm is taken from
-/// "Online algorithms for managing the topological order of
-/// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
-/// This is the MNR algorithm, which was first introduced by
-/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
-/// "Maintaining a topological order under edge insertions".
-///
-/// Short description of the algorithm:
-///
-/// Topological ordering, ord, of a DAG maps each node to a topological
-/// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
-///
-/// This means that if there is a path from the node X to the node Z,
-/// then ord(X) < ord(Z).
-///
-/// This property can be used to check for reachability of nodes:
-/// if Z is reachable from X, then an insertion of the edge Z->X would
-/// create a cycle.
-///
-/// The algorithm first computes a topological ordering for the DAG by
-/// initializing the Index2Node and Node2Index arrays and then tries to keep
-/// the ordering up-to-date after edge insertions by reordering the DAG.
-///
-/// On insertion of the edge X->Y, the algorithm first marks by calling DFS
-/// the nodes reachable from Y, and then shifts them using Shift to lie
-/// immediately after X in Index2Node.
-void ScheduleDAGRRList::InitDAGTopologicalSorting() {
- unsigned DAGSize = SUnits.size();
- std::vector<SUnit*> WorkList;
- WorkList.reserve(DAGSize);
-
- Index2Node.resize(DAGSize);
- Node2Index.resize(DAGSize);
-
- // Initialize the data structures.
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &SUnits[i];
- int NodeNum = SU->NodeNum;
- unsigned Degree = SU->Succs.size();
- // Temporarily use the Node2Index array as scratch space for degree counts.
- Node2Index[NodeNum] = Degree;
-
- // Is it a node without dependencies?
- if (Degree == 0) {
- assert(SU->Succs.empty() && "SUnit should have no successors");
- // Collect leaf nodes.
- WorkList.push_back(SU);
- }
- }
-
- int Id = DAGSize;
- while (!WorkList.empty()) {
- SUnit *SU = WorkList.back();
- WorkList.pop_back();
- Allocate(SU->NodeNum, --Id);
- for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- SUnit *SU = I->Dep;
- if (!--Node2Index[SU->NodeNum])
- // If all dependencies of the node are processed already,
- // then the node can be computed now.
- WorkList.push_back(SU);
- }
- }
-
- Visited.resize(DAGSize);
-
-#ifndef NDEBUG
- // Check correctness of the ordering
- for (unsigned i = 0, e = DAGSize; i != e; ++i) {
- SUnit *SU = &SUnits[i];
- for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] &&
- "Wrong topological sorting");
- }
- }
-#endif
-}
-
-/// AddPred - adds an edge from SUnit X to SUnit Y.
-/// Updates the topological ordering if required.
-bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
- unsigned PhyReg, int Cost) {
- int UpperBound, LowerBound;
- LowerBound = Node2Index[Y->NodeNum];
- UpperBound = Node2Index[X->NodeNum];
- bool HasLoop = false;
- // Is Ord(X) < Ord(Y) ?
- if (LowerBound < UpperBound) {
- // Update the topological order.
- Visited.reset();
- DFS(Y, UpperBound, HasLoop);
- assert(!HasLoop && "Inserted edge creates a loop!");
- // Recompute topological indexes.
- Shift(Visited, LowerBound, UpperBound);
- }
- // Now really insert the edge.
- return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost);
-}
-
-/// RemovePred - This removes the specified node N from the predecessors of
-/// the current node M. Updates the topological ordering if required.
-bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N,
- bool isCtrl, bool isSpecial) {
- // InitDAGTopologicalSorting();
- return M->removePred(N, isCtrl, isSpecial);
-}
-
-/// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
-/// all nodes affected by the edge insertion. These nodes will later get new
-/// topological indexes by means of the Shift method.
-void ScheduleDAGRRList::DFS(const SUnit *SU, int UpperBound, bool& HasLoop) {
- std::vector<const SUnit*> WorkList;
- WorkList.reserve(SUnits.size());
-
- WorkList.push_back(SU);
- while (!WorkList.empty()) {
- SU = WorkList.back();
- WorkList.pop_back();
- Visited.set(SU->NodeNum);
- for (int I = SU->Succs.size()-1; I >= 0; --I) {
- int s = SU->Succs[I].Dep->NodeNum;
- if (Node2Index[s] == UpperBound) {
- HasLoop = true;
- return;
- }
- // Visit successors if not already and in affected region.
- if (!Visited.test(s) && Node2Index[s] < UpperBound) {
- WorkList.push_back(SU->Succs[I].Dep);
- }
- }
- }
-}
-
-/// Shift - Renumber the nodes so that the topological ordering is
-/// preserved.
-void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound,
- int UpperBound) {
- std::vector<int> L;
- int shift = 0;
- int i;
-
- for (i = LowerBound; i <= UpperBound; ++i) {
- // w is node at topological index i.
- int w = Index2Node[i];
- if (Visited.test(w)) {
- // Unmark.
- Visited.reset(w);
- L.push_back(w);
- shift = shift + 1;
- } else {
- Allocate(w, i - shift);
- }
- }
-
- for (unsigned j = 0; j < L.size(); ++j) {
- Allocate(L[j], i - shift);
- i = i + 1;
- }
-}
-
-
-/// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
-/// create a cycle.
-bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
- if (IsReachable(TargetSU, SU))
- return true;
- for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I)
- if (I->Cost < 0 && IsReachable(TargetSU, I->Dep))
- return true;
- return false;
+ AvailableQueue->UnscheduledNode(SU);
}
/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
-/// BTCycle in order to schedule a specific node. Returns the last unscheduled
-/// SUnit. Also returns if a successor is unscheduled in the process.
+/// BTCycle in order to schedule a specific node.
void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
unsigned &CurCycle) {
SUnit *OldSU = NULL;
SU->isAvailable = false;
UnscheduleNodeBottomUp(OldSU);
--CurCycle;
+ AvailableQueue->setCurCycle(CurCycle);
}
-
- if (SU->isSucc(OldSU)) {
- assert(false && "Something is wrong!");
- abort();
- }
+ assert(!SU->isSucc(OldSU) && "Something is wrong!");
++NumBacktracks;
}
+static bool isOperandOf(const SUnit *SU, SDNode *N) {
+ for (const SDNode *SUNode = SU->getNode(); SUNode;
+ SUNode = SUNode->getFlaggedNode()) {
+ if (SUNode->isOperandOf(N))
+ return true;
+ }
+ return false;
+}
+
/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
/// successors to the newly created node.
SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
SUnit *NewSU;
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
- MVT VT = N->getValueType(i);
+ EVT VT = N->getValueType(i);
if (VT == MVT::Flag)
return NULL;
else if (VT == MVT::Other)
}
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
const SDValue &Op = N->getOperand(i);
- MVT VT = Op.getNode()->getValueType(Op.getResNo());
+ EVT VT = Op.getNode()->getValueType(Op.getResNo());
if (VT == MVT::Flag)
return NULL;
}
if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
return NULL;
- DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
+ DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
assert(NewNodes.size() == 2 && "Expected a load folding node!");
N = NewNodes[1];
} else {
LoadSU = CreateNewSUnit(LoadNode);
LoadNode->setNodeId(LoadSU->NodeNum);
-
- LoadSU->Depth = SU->Depth;
- LoadSU->Height = SU->Height;
ComputeLatency(LoadSU);
}
}
if (TID.isCommutable())
NewSU->isCommutable = true;
- // FIXME: Calculate height / depth and propagate the changes?
- NewSU->Depth = SU->Depth;
- NewSU->Height = SU->Height;
ComputeLatency(NewSU);
- SUnit *ChainPred = NULL;
+ // Record all the edges to and from the old SU, by category.
+ SmallVector<SDep, 4> ChainPreds;
SmallVector<SDep, 4> ChainSuccs;
SmallVector<SDep, 4> LoadPreds;
SmallVector<SDep, 4> NodePreds;
SmallVector<SDep, 4> NodeSuccs;
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
- if (I->isCtrl)
- ChainPred = I->Dep;
- else if (I->Dep->getNode() && I->Dep->getNode()->isOperandOf(LoadNode))
- LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
+ if (I->isCtrl())
+ ChainPreds.push_back(*I);
+ else if (isOperandOf(I->getSUnit(), LoadNode))
+ LoadPreds.push_back(*I);
else
- NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
+ NodePreds.push_back(*I);
}
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
- if (I->isCtrl)
- ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
- I->isCtrl, I->isSpecial));
+ if (I->isCtrl())
+ ChainSuccs.push_back(*I);
else
- NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
- I->isCtrl, I->isSpecial));
+ NodeSuccs.push_back(*I);
}
- if (ChainPred) {
- RemovePred(SU, ChainPred, true, false);
+ // Now assign edges to the newly-created nodes.
+ for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
+ const SDep &Pred = ChainPreds[i];
+ RemovePred(SU, Pred);
if (isNewLoad)
- AddPred(LoadSU, ChainPred, true, false);
+ AddPred(LoadSU, Pred);
}
for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
- SDep *Pred = &LoadPreds[i];
- RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
- if (isNewLoad) {
- AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
- Pred->Reg, Pred->Cost);
- }
+ const SDep &Pred = LoadPreds[i];
+ RemovePred(SU, Pred);
+ if (isNewLoad)
+ AddPred(LoadSU, Pred);
}
for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
- SDep *Pred = &NodePreds[i];
- RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
- AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
- Pred->Reg, Pred->Cost);
+ const SDep &Pred = NodePreds[i];
+ RemovePred(SU, Pred);
+ AddPred(NewSU, Pred);
}
for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
- SDep *Succ = &NodeSuccs[i];
- RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
- AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial,
- Succ->Reg, Succ->Cost);
+ SDep D = NodeSuccs[i];
+ SUnit *SuccDep = D.getSUnit();
+ D.setSUnit(SU);
+ RemovePred(SuccDep, D);
+ D.setSUnit(NewSU);
+ AddPred(SuccDep, D);
}
for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
- SDep *Succ = &ChainSuccs[i];
- RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
+ SDep D = ChainSuccs[i];
+ SUnit *SuccDep = D.getSUnit();
+ D.setSUnit(SU);
+ RemovePred(SuccDep, D);
if (isNewLoad) {
- AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial,
- Succ->Reg, Succ->Cost);
+ D.setSUnit(LoadSU);
+ AddPred(SuccDep, D);
}
}
- if (isNewLoad) {
- AddPred(NewSU, LoadSU, false, false);
- }
+
+ // Add a data dependency to reflect that NewSU reads the value defined
+ // by LoadSU.
+ AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
if (isNewLoad)
AvailableQueue->addNode(LoadSU);
SU = NewSU;
}
- DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
+ DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
NewSU = CreateClone(SU);
// New SUnit has the exact same predecessors.
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I)
- if (!I->isSpecial) {
- AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost);
- NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
- }
+ if (!I->isArtificial())
+ AddPred(NewSU, *I);
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
- SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
+ SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
- if (I->isSpecial)
+ if (I->isArtificial())
continue;
- if (I->Dep->isScheduled) {
- NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
- AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost);
- DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
+ SUnit *SuccSU = I->getSUnit();
+ if (SuccSU->isScheduled) {
+ SDep D = *I;
+ D.setSUnit(NewSU);
+ AddPred(SuccSU, D);
+ D.setSUnit(SU);
+ DelDeps.push_back(std::make_pair(SuccSU, D));
}
}
- for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
- SUnit *Succ = DelDeps[i].first;
- bool isCtrl = DelDeps[i].second;
- RemovePred(Succ, SU, isCtrl, false);
- }
+ for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
+ RemovePred(DelDeps[i].first, DelDeps[i].second);
AvailableQueue->updateNode(SU);
AvailableQueue->addNode(NewSU);
return NewSU;
}
-/// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies
-/// and move all scheduled successors of the given SUnit to the last copy.
-void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
- const TargetRegisterClass *DestRC,
- const TargetRegisterClass *SrcRC,
+/// InsertCopiesAndMoveSuccs - Insert register copies and move all
+/// scheduled successors of the given SUnit to the last copy.
+void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
+ const TargetRegisterClass *DestRC,
+ const TargetRegisterClass *SrcRC,
SmallVector<SUnit*, 2> &Copies) {
SUnit *CopyFromSU = CreateNewSUnit(NULL);
CopyFromSU->CopySrcRC = SrcRC;
CopyFromSU->CopyDstRC = DestRC;
- CopyFromSU->Depth = SU->Depth;
- CopyFromSU->Height = SU->Height;
SUnit *CopyToSU = CreateNewSUnit(NULL);
CopyToSU->CopySrcRC = DestRC;
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
- SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
+ SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
- if (I->isSpecial)
+ if (I->isArtificial())
continue;
- if (I->Dep->isScheduled) {
- CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
- AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
- DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
+ SUnit *SuccSU = I->getSUnit();
+ if (SuccSU->isScheduled) {
+ SDep D = *I;
+ D.setSUnit(CopyToSU);
+ AddPred(SuccSU, D);
+ DelDeps.push_back(std::make_pair(SuccSU, *I));
}
}
- for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
- SUnit *Succ = DelDeps[i].first;
- bool isCtrl = DelDeps[i].second;
- RemovePred(Succ, SU, isCtrl, false);
- }
+ for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
+ RemovePred(DelDeps[i].first, DelDeps[i].second);
- AddPred(CopyFromSU, SU, false, false, Reg, -1);
- AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1);
+ AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
+ AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
AvailableQueue->updateNode(SU);
AvailableQueue->addNode(CopyFromSU);
Copies.push_back(CopyFromSU);
Copies.push_back(CopyToSU);
- ++NumCCCopies;
+ ++NumPRCopies;
}
/// getPhysicalRegisterVT - Returns the ValueType of the physical register
/// definition of the specified node.
/// FIXME: Move to SelectionDAG?
-static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
+static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
const TargetInstrInfo *TII) {
const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
return N->getValueType(NumRes);
}
+/// CheckForLiveRegDef - Return true and update live register vector if the
+/// specified register def of the specified SUnit clobbers any "live" registers.
+static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
+ std::vector<SUnit*> &LiveRegDefs,
+ SmallSet<unsigned, 4> &RegAdded,
+ SmallVector<unsigned, 4> &LRegs,
+ const TargetRegisterInfo *TRI) {
+ bool Added = false;
+ if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != SU) {
+ if (RegAdded.insert(Reg)) {
+ LRegs.push_back(Reg);
+ Added = true;
+ }
+ }
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
+ if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
+ if (RegAdded.insert(*Alias)) {
+ LRegs.push_back(*Alias);
+ Added = true;
+ }
+ }
+ return Added;
+}
+
/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
/// scheduling of the given node to satisfy live physical register dependencies.
/// If the specific node is the last one that's available to schedule, do
// If this node would clobber any "live" register, then it's not ready.
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
- if (I->Cost < 0) {
- unsigned Reg = I->Reg;
- if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != I->Dep) {
- if (RegAdded.insert(Reg))
- LRegs.push_back(Reg);
- }
- for (const unsigned *Alias = TRI->getAliasSet(Reg);
- *Alias; ++Alias)
- if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != I->Dep) {
- if (RegAdded.insert(*Alias))
- LRegs.push_back(*Alias);
- }
- }
+ if (I->isAssignedRegDep())
+ CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
+ RegAdded, LRegs, TRI);
}
for (SDNode *Node = SU->getNode(); Node; Node = Node->getFlaggedNode()) {
+ if (Node->getOpcode() == ISD::INLINEASM) {
+ // Inline asm can clobber physical defs.
+ unsigned NumOps = Node->getNumOperands();
+ if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
+ --NumOps; // Ignore the flag operand.
+
+ for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
+ unsigned Flags =
+ cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
+ unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
+
+ ++i; // Skip the ID value.
+ if (InlineAsm::isRegDefKind(Flags) ||
+ InlineAsm::isRegDefEarlyClobberKind(Flags)) {
+ // Check for def of register or earlyclobber register.
+ for (; NumVals; --NumVals, ++i) {
+ unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(Reg))
+ CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
+ }
+ } else
+ i += NumVals;
+ }
+ continue;
+ }
+
if (!Node->isMachineOpcode())
continue;
const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
if (!TID.ImplicitDefs)
continue;
- for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
- if (LiveRegDefs[*Reg] && LiveRegDefs[*Reg] != SU) {
- if (RegAdded.insert(*Reg))
- LRegs.push_back(*Reg);
- }
- for (const unsigned *Alias = TRI->getAliasSet(*Reg);
- *Alias; ++Alias)
- if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
- if (RegAdded.insert(*Alias))
- LRegs.push_back(*Alias);
- }
- }
+ for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg)
+ CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
}
return !LRegs.empty();
}
/// schedulers.
void ScheduleDAGRRList::ListScheduleBottomUp() {
unsigned CurCycle = 0;
+
+ // Release any predecessors of the special Exit node.
+ ReleasePredecessors(&ExitSU, CurCycle);
+
// Add root to Available queue.
if (!SUnits.empty()) {
SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
LRegsMap.clear();
SUnit *CurSU = AvailableQueue->pop();
while (CurSU) {
- if (CurSU->CycleBound <= CurCycle) {
- SmallVector<unsigned, 4> LRegs;
- if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
- break;
- Delayed = true;
- LRegsMap.insert(std::make_pair(CurSU, LRegs));
- }
+ SmallVector<unsigned, 4> LRegs;
+ if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
+ break;
+ Delayed = true;
+ LRegsMap.insert(std::make_pair(CurSU, LRegs));
CurSU->isPending = true; // This SU is not in AvailableQueue right now.
NotReady.push_back(CurSU);
OldSU->isAvailable = false;
AvailableQueue->remove(OldSU);
}
- AddPred(TrySU, OldSU, true, true);
+ AddPred(TrySU, SDep(OldSU, SDep::Order, /*Latency=*/1,
+ /*Reg=*/0, /*isNormalMemory=*/false,
+ /*isMustAlias=*/false, /*isArtificial=*/true));
// If one or more successors has been unscheduled, then the current
// node is no longer avaialable. Schedule a successor that's now
// available instead.
}
if (!CurSU) {
- // Can't backtrack. Try duplicating the nodes that produces these
- // "expensive to copy" values to break the dependency. In case even
- // that doesn't work, insert cross class copies.
+ // Can't backtrack. If it's too expensive to copy the value, then try
+ // duplicate the nodes that produces these "too expensive to copy"
+ // values to break the dependency. In case even that doesn't work,
+ // insert cross class copies.
+ // If it's not too expensive, i.e. cost != -1, issue copies.
SUnit *TrySU = NotReady[0];
SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
assert(LRegs.size() == 1 && "Can't handle this yet!");
unsigned Reg = LRegs[0];
SUnit *LRDef = LiveRegDefs[Reg];
- SUnit *NewDef = CopyAndMoveSuccessors(LRDef);
+ EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
+ const TargetRegisterClass *RC =
+ TRI->getMinimalPhysRegClass(Reg, VT);
+ const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
+
+ // If cross copy register class is null, then it must be possible copy
+ // the value directly. Do not try duplicate the def.
+ SUnit *NewDef = 0;
+ if (DestRC)
+ NewDef = CopyAndMoveSuccessors(LRDef);
+ else
+ DestRC = RC;
if (!NewDef) {
- // Issue expensive cross register class copies.
- MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
- const TargetRegisterClass *RC =
- TRI->getPhysicalRegisterRegClass(Reg, VT);
- const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
- if (!DestRC) {
- assert(false && "Don't know how to copy this physical register!");
- abort();
- }
+ // Issue copies, these can be expensive cross register class copies.
SmallVector<SUnit*, 2> Copies;
- InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
- DOUT << "Adding an edge from SU # " << TrySU->NodeNum
- << " to SU #" << Copies.front()->NodeNum << "\n";
- AddPred(TrySU, Copies.front(), true, true);
+ InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
+ DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
+ << " to SU #" << Copies.front()->NodeNum << "\n");
+ AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
+ /*Reg=*/0, /*isNormalMemory=*/false,
+ /*isMustAlias=*/false,
+ /*isArtificial=*/true));
NewDef = Copies.back();
}
- DOUT << "Adding an edge from SU # " << NewDef->NodeNum
- << " to SU #" << TrySU->NodeNum << "\n";
+ DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
+ << " to SU #" << TrySU->NodeNum << "\n");
LiveRegDefs[Reg] = NewDef;
- AddPred(NewDef, TrySU, true, true);
+ AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
+ /*Reg=*/0, /*isNormalMemory=*/false,
+ /*isMustAlias=*/false,
+ /*isArtificial=*/true));
TrySU->isAvailable = false;
CurSU = NewDef;
}
- if (!CurSU) {
- assert(false && "Unable to resolve live physical register dependencies!");
- abort();
- }
+ assert(CurSU && "Unable to resolve live physical register dependencies!");
}
// Add the nodes that aren't ready back onto the available list.
}
NotReady.clear();
- if (!CurSU)
- Sequence.push_back(0);
- else
+ if (CurSU)
ScheduleNodeBottomUp(CurSU, CurCycle);
++CurCycle;
+ AvailableQueue->setCurCycle(CurCycle);
}
// Reverse the order if it is bottom up.
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
-void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, SUnit *SuccSU, bool isChain) {
- --SuccSU->NumPredsLeft;
-
+void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
+ SUnit *SuccSU = SuccEdge->getSUnit();
+
#ifndef NDEBUG
- if (SuccSU->NumPredsLeft < 0) {
- cerr << "*** Scheduling failed! ***\n";
+ if (SuccSU->NumPredsLeft == 0) {
+ dbgs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
- cerr << " has been released too many times!\n";
- assert(0);
+ dbgs() << " has been released too many times!\n";
+ llvm_unreachable(0);
}
#endif
-
- // Compute how many cycles it will be before this actually becomes
- // available. This is the max of the start time of all predecessors plus
- // their latencies.
- // If this is a token edge, we don't need to wait for the latency of the
- // preceeding instruction (e.g. a long-latency load) unless there is also
- // some other data dependence.
- unsigned PredDoneCycle = SU->Cycle;
- if (!isChain)
- PredDoneCycle += SU->Latency;
- else if (SU->Latency)
- PredDoneCycle += 1;
- SuccSU->CycleBound = std::max(SuccSU->CycleBound, PredDoneCycle);
+ --SuccSU->NumPredsLeft;
- if (SuccSU->NumPredsLeft == 0) {
+ // If all the node's predecessors are scheduled, this node is ready
+ // to be scheduled. Ignore the special ExitSU node.
+ if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
SuccSU->isAvailable = true;
AvailableQueue->push(SuccSU);
}
}
+void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
+ // Top down: release successors
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ assert(!I->isAssignedRegDep() &&
+ "The list-tdrr scheduler doesn't yet support physreg dependencies!");
+
+ ReleaseSucc(SU, &*I);
+ }
+}
/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
- DOUT << "*** Scheduling [" << CurCycle << "]: ";
+ DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
- SU->Cycle = CurCycle;
+ assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
+ SU->setDepthToAtLeast(CurCycle);
Sequence.push_back(SU);
- // Top down: release successors
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I)
- ReleaseSucc(SU, I->Dep, I->isCtrl);
-
+ ReleaseSuccessors(SU);
SU->isScheduled = true;
AvailableQueue->ScheduledNode(SU);
}
/// schedulers.
void ScheduleDAGRRList::ListScheduleTopDown() {
unsigned CurCycle = 0;
+ AvailableQueue->setCurCycle(CurCycle);
+
+ // Release any successors of the special Entry node.
+ ReleaseSuccessors(&EntrySU);
// All leaves to Available queue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
- std::vector<SUnit*> NotReady;
Sequence.reserve(SUnits.size());
while (!AvailableQueue->empty()) {
SUnit *CurSU = AvailableQueue->pop();
- while (CurSU && CurSU->CycleBound > CurCycle) {
- NotReady.push_back(CurSU);
- CurSU = AvailableQueue->pop();
- }
- // Add the nodes that aren't ready back onto the available list.
- AvailableQueue->push_all(NotReady);
- NotReady.clear();
-
- if (!CurSU)
- Sequence.push_back(0);
- else
+ if (CurSU)
ScheduleNodeTopDown(CurSU, CurCycle);
++CurCycle;
+ AvailableQueue->setCurCycle(CurCycle);
}
#ifndef NDEBUG
template<class SF>
class RegReductionPriorityQueue;
- /// Sorting functions for the Available queue.
+ /// bu_ls_rr_sort - Priority function for bottom up register pressure
+ // reduction scheduler.
struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
bool operator()(const SUnit* left, const SUnit* right) const;
};
- struct bu_ls_rr_fast_sort : public std::binary_function<SUnit*, SUnit*, bool>{
- RegReductionPriorityQueue<bu_ls_rr_fast_sort> *SPQ;
- bu_ls_rr_fast_sort(RegReductionPriorityQueue<bu_ls_rr_fast_sort> *spq)
- : SPQ(spq) {}
- bu_ls_rr_fast_sort(const bu_ls_rr_fast_sort &RHS) : SPQ(RHS.SPQ) {}
-
- bool operator()(const SUnit* left, const SUnit* right) const;
- };
-
+ // td_ls_rr_sort - Priority function for top down register pressure reduction
+ // scheduler.
struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
bool operator()(const SUnit* left, const SUnit* right) const;
};
-} // end anonymous namespace
-static inline bool isCopyFromLiveIn(const SUnit *SU) {
- SDNode *N = SU->getNode();
- return N && N->getOpcode() == ISD::CopyFromReg &&
- N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
-}
+ // src_ls_rr_sort - Priority function for source order scheduler.
+ struct src_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+ RegReductionPriorityQueue<src_ls_rr_sort> *SPQ;
+ src_ls_rr_sort(RegReductionPriorityQueue<src_ls_rr_sort> *spq)
+ : SPQ(spq) {}
+ src_ls_rr_sort(const src_ls_rr_sort &RHS)
+ : SPQ(RHS.SPQ) {}
+
+ bool operator()(const SUnit* left, const SUnit* right) const;
+ };
+
+ // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
+ struct hybrid_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+ RegReductionPriorityQueue<hybrid_ls_rr_sort> *SPQ;
+ hybrid_ls_rr_sort(RegReductionPriorityQueue<hybrid_ls_rr_sort> *spq)
+ : SPQ(spq) {}
+ hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
+ : SPQ(RHS.SPQ) {}
-/// CalcNodeBUSethiUllmanNumber - Compute Sethi Ullman number for bottom up
-/// scheduling. Smaller number is the higher priority.
+ bool operator()(const SUnit* left, const SUnit* right) const;
+ };
+
+ // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
+ // scheduler.
+ struct ilp_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+ RegReductionPriorityQueue<ilp_ls_rr_sort> *SPQ;
+ ilp_ls_rr_sort(RegReductionPriorityQueue<ilp_ls_rr_sort> *spq)
+ : SPQ(spq) {}
+ ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
+ : SPQ(RHS.SPQ) {}
+
+ bool operator()(const SUnit* left, const SUnit* right) const;
+ };
+} // end anonymous namespace
+
+/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
+/// Smaller number is the higher priority.
static unsigned
-CalcNodeBUSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
+CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
if (SethiUllmanNumber != 0)
return SethiUllmanNumber;
unsigned Extra = 0;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
- if (I->isCtrl) continue; // ignore chain preds
- SUnit *PredSU = I->Dep;
- unsigned PredSethiUllman = CalcNodeBUSethiUllmanNumber(PredSU, SUNumbers);
+ if (I->isCtrl()) continue; // ignore chain preds
+ SUnit *PredSU = I->getSUnit();
+ unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
if (PredSethiUllman > SethiUllmanNumber) {
SethiUllmanNumber = PredSethiUllman;
Extra = 0;
- } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
+ } else if (PredSethiUllman == SethiUllmanNumber)
++Extra;
}
return SethiUllmanNumber;
}
-/// CalcNodeTDSethiUllmanNumber - Compute Sethi Ullman number for top down
-/// scheduling. Smaller number is the higher priority.
-static unsigned
-CalcNodeTDSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
- unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
- if (SethiUllmanNumber != 0)
- return SethiUllmanNumber;
-
- unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
- if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
- SethiUllmanNumber = 0xffff;
- else if (SU->NumSuccsLeft == 0)
- // If SU does not have a use, i.e. it doesn't produce a value that would
- // be consumed (e.g. store), then it terminates a chain of computation.
- // Give it a small SethiUllman number so it will be scheduled right before
- // its predecessors that it doesn't lengthen their live ranges.
- SethiUllmanNumber = 0;
- else if (SU->NumPredsLeft == 0 &&
- (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
- SethiUllmanNumber = 0xffff;
- else {
- int Extra = 0;
- for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
- I != E; ++I) {
- if (I->isCtrl) continue; // ignore chain preds
- SUnit *PredSU = I->Dep;
- unsigned PredSethiUllman = CalcNodeTDSethiUllmanNumber(PredSU, SUNumbers);
- if (PredSethiUllman > SethiUllmanNumber) {
- SethiUllmanNumber = PredSethiUllman;
- Extra = 0;
- } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
- ++Extra;
- }
-
- SethiUllmanNumber += Extra;
- }
-
- return SethiUllmanNumber;
-}
-
-
namespace {
template<class SF>
- class VISIBILITY_HIDDEN RegReductionPriorityQueue
- : public SchedulingPriorityQueue {
- PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
- unsigned currentQueueId;
-
- public:
- RegReductionPriorityQueue() :
- Queue(SF(this)), currentQueueId(0) {}
-
- virtual void initNodes(std::vector<SUnit> &sunits) = 0;
-
- virtual void addNode(const SUnit *SU) = 0;
-
- virtual void updateNode(const SUnit *SU) = 0;
-
- virtual void releaseState() = 0;
-
- virtual unsigned getNodePriority(const SUnit *SU) const = 0;
-
- unsigned size() const { return Queue.size(); }
-
- bool empty() const { return Queue.empty(); }
-
- void push(SUnit *U) {
- assert(!U->NodeQueueId && "Node in the queue already");
- U->NodeQueueId = ++currentQueueId;
- Queue.push(U);
- }
-
- void push_all(const std::vector<SUnit *> &Nodes) {
- for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
- push(Nodes[i]);
- }
-
- SUnit *pop() {
- if (empty()) return NULL;
- SUnit *V = Queue.top();
- Queue.pop();
- V->NodeQueueId = 0;
- return V;
- }
-
- void remove(SUnit *SU) {
- assert(!Queue.empty() && "Queue is empty!");
- assert(SU->NodeQueueId != 0 && "Not in queue!");
- Queue.erase_one(SU);
- SU->NodeQueueId = 0;
- }
- };
+ class RegReductionPriorityQueue : public SchedulingPriorityQueue {
+ std::vector<SUnit*> Queue;
+ SF Picker;
+ unsigned CurQueueId;
+ bool TracksRegPressure;
- class VISIBILITY_HIDDEN BURegReductionPriorityQueue
- : public RegReductionPriorityQueue<bu_ls_rr_sort> {
+ protected:
// SUnits - The SUnits for the current graph.
std::vector<SUnit> *SUnits;
-
- // SethiUllmanNumbers - The SethiUllman number for each node.
- std::vector<unsigned> SethiUllmanNumbers;
+ MachineFunction &MF;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
+ const TargetLowering *TLI;
ScheduleDAGRRList *scheduleDAG;
- public:
- explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii,
- const TargetRegisterInfo *tri)
- : TII(tii), TRI(tri), scheduleDAG(NULL) {}
+ // SethiUllmanNumbers - The SethiUllman number for each node.
+ std::vector<unsigned> SethiUllmanNumbers;
+ /// RegPressure - Tracking current reg pressure per register class.
+ ///
+ std::vector<unsigned> RegPressure;
+
+ /// RegLimit - Tracking the number of allocatable registers per register
+ /// class.
+ std::vector<unsigned> RegLimit;
+
+ public:
+ RegReductionPriorityQueue(MachineFunction &mf,
+ bool tracksrp,
+ const TargetInstrInfo *tii,
+ const TargetRegisterInfo *tri,
+ const TargetLowering *tli)
+ : Picker(this), CurQueueId(0), TracksRegPressure(tracksrp),
+ MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
+ if (TracksRegPressure) {
+ unsigned NumRC = TRI->getNumRegClasses();
+ RegLimit.resize(NumRC);
+ RegPressure.resize(NumRC);
+ std::fill(RegLimit.begin(), RegLimit.end(), 0);
+ std::fill(RegPressure.begin(), RegPressure.end(), 0);
+ for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
+ E = TRI->regclass_end(); I != E; ++I)
+ RegLimit[(*I)->getID()] = tli->getRegPressureLimit(*I, MF);
+ }
+ }
+
void initNodes(std::vector<SUnit> &sunits) {
SUnits = &sunits;
// Add pseudo dependency edges for two-address nodes.
AddPseudoTwoAddrDeps();
+ // Reroute edges to nodes with multiple uses.
+ PrescheduleNodesWithMultipleUses();
// Calculate node priorities.
CalculateSethiUllmanNumbers();
}
unsigned SUSize = SethiUllmanNumbers.size();
if (SUnits->size() > SUSize)
SethiUllmanNumbers.resize(SUSize*2, 0);
- CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
+ CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
}
void updateNode(const SUnit *SU) {
SethiUllmanNumbers[SU->NodeNum] = 0;
- CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
+ CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
}
void releaseState() {
SUnits = 0;
SethiUllmanNumbers.clear();
+ std::fill(RegPressure.begin(), RegPressure.end(), 0);
}
unsigned getNodePriority(const SUnit *SU) const {
assert(SU->NodeNum < SethiUllmanNumbers.size());
unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
- if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
- // CopyFromReg should be close to its def because it restricts
- // allocation choices. But if it is a livein then perhaps we want it
- // closer to its uses so it can be coalesced.
- return 0xffff;
- else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
+ if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
// CopyToReg should be close to its uses to facilitate coalescing and
// avoid spilling.
return 0;
- else if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
- Opc == TargetInstrInfo::INSERT_SUBREG)
- // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to
- // facilitate coalescing.
+ if (Opc == TargetOpcode::EXTRACT_SUBREG ||
+ Opc == TargetOpcode::SUBREG_TO_REG ||
+ Opc == TargetOpcode::INSERT_SUBREG)
+ // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
+ // close to their uses to facilitate coalescing.
return 0;
- else if (SU->NumSuccs == 0)
- // If SU does not have a use, i.e. it doesn't produce a value that would
- // be consumed (e.g. store), then it terminates a chain of computation.
- // Give it a large SethiUllman number so it will be scheduled right
- // before its predecessors that it doesn't lengthen their live ranges.
+ if (SU->NumSuccs == 0 && SU->NumPreds != 0)
+ // If SU does not have a register use, i.e. it doesn't produce a value
+ // that would be consumed (e.g. store), then it terminates a chain of
+ // computation. Give it a large SethiUllman number so it will be
+ // scheduled right before its predecessors that it doesn't lengthen
+ // their live ranges.
return 0xffff;
- else if (SU->NumPreds == 0)
- // If SU does not have a def, schedule it close to its uses because it
- // does not lengthen any live ranges.
+ if (SU->NumPreds == 0 && SU->NumSuccs != 0)
+ // If SU does not have a register def, schedule it close to its uses
+ // because it does not lengthen any live ranges.
return 0;
- else
- return SethiUllmanNumbers[SU->NodeNum];
+ return SethiUllmanNumbers[SU->NodeNum];
}
- void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
- scheduleDAG = scheduleDag;
+ unsigned getNodeOrdering(const SUnit *SU) const {
+ return scheduleDAG->DAG->GetOrdering(SU->getNode());
}
- private:
- bool canClobber(const SUnit *SU, const SUnit *Op);
- void AddPseudoTwoAddrDeps();
- void CalculateSethiUllmanNumbers();
- };
-
-
- class VISIBILITY_HIDDEN BURegReductionFastPriorityQueue
- : public RegReductionPriorityQueue<bu_ls_rr_fast_sort> {
- // SUnits - The SUnits for the current graph.
- const std::vector<SUnit> *SUnits;
+ bool empty() const { return Queue.empty(); }
- // SethiUllmanNumbers - The SethiUllman number for each node.
- std::vector<unsigned> SethiUllmanNumbers;
- public:
- explicit BURegReductionFastPriorityQueue() {}
-
- void initNodes(std::vector<SUnit> &sunits) {
- SUnits = &sunits;
- // Calculate node priorities.
- CalculateSethiUllmanNumbers();
+ void push(SUnit *U) {
+ assert(!U->NodeQueueId && "Node in the queue already");
+ U->NodeQueueId = ++CurQueueId;
+ Queue.push_back(U);
}
- void addNode(const SUnit *SU) {
- unsigned SUSize = SethiUllmanNumbers.size();
- if (SUnits->size() > SUSize)
- SethiUllmanNumbers.resize(SUSize*2, 0);
- CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
+ SUnit *pop() {
+ if (empty()) return NULL;
+ std::vector<SUnit *>::iterator Best = Queue.begin();
+ for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()),
+ E = Queue.end(); I != E; ++I)
+ if (Picker(*Best, *I))
+ Best = I;
+ SUnit *V = *Best;
+ if (Best != prior(Queue.end()))
+ std::swap(*Best, Queue.back());
+ Queue.pop_back();
+ V->NodeQueueId = 0;
+ return V;
}
- void updateNode(const SUnit *SU) {
- SethiUllmanNumbers[SU->NodeNum] = 0;
- CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
+ void remove(SUnit *SU) {
+ assert(!Queue.empty() && "Queue is empty!");
+ assert(SU->NodeQueueId != 0 && "Not in queue!");
+ std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
+ SU);
+ if (I != prior(Queue.end()))
+ std::swap(*I, Queue.back());
+ Queue.pop_back();
+ SU->NodeQueueId = 0;
}
- void releaseState() {
- SUnits = 0;
- SethiUllmanNumbers.clear();
- }
+ bool HighRegPressure(const SUnit *SU) const {
+ if (!TLI)
+ return false;
- unsigned getNodePriority(const SUnit *SU) const {
- return SethiUllmanNumbers[SU->NodeNum];
+ for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl())
+ continue;
+ SUnit *PredSU = I->getSUnit();
+ const SDNode *PN = PredSU->getNode();
+ if (!PN->isMachineOpcode()) {
+ if (PN->getOpcode() == ISD::CopyFromReg) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned Cost = TLI->getRepRegClassCostFor(VT);
+ if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
+ return true;
+ }
+ continue;
+ }
+ unsigned POpc = PN->getMachineOpcode();
+ if (POpc == TargetOpcode::IMPLICIT_DEF)
+ continue;
+ if (POpc == TargetOpcode::EXTRACT_SUBREG) {
+ EVT VT = PN->getOperand(0).getValueType();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned Cost = TLI->getRepRegClassCostFor(VT);
+ // Check if this increases register pressure of the specific register
+ // class to the point where it would cause spills.
+ if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
+ return true;
+ continue;
+ } else if (POpc == TargetOpcode::INSERT_SUBREG ||
+ POpc == TargetOpcode::SUBREG_TO_REG) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ unsigned Cost = TLI->getRepRegClassCostFor(VT);
+ // Check if this increases register pressure of the specific register
+ // class to the point where it would cause spills.
+ if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
+ return true;
+ continue;
+ }
+ unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
+ for (unsigned i = 0; i != NumDefs; ++i) {
+ EVT VT = PN->getValueType(i);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ if (RegPressure[RCId] >= RegLimit[RCId])
+ return true; // Reg pressure already high.
+ unsigned Cost = TLI->getRepRegClassCostFor(VT);
+ if (!PN->hasAnyUseOfValue(i))
+ continue;
+ // Check if this increases register pressure of the specific register
+ // class to the point where it would cause spills.
+ if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
+ return true;
+ }
+ }
+
+ return false;
}
- private:
- void CalculateSethiUllmanNumbers();
- };
+ void ScheduledNode(SUnit *SU) {
+ if (!TracksRegPressure)
+ return;
+ const SDNode *N = SU->getNode();
+ if (!N->isMachineOpcode()) {
+ if (N->getOpcode() != ISD::CopyToReg)
+ return;
+ } else {
+ unsigned Opc = N->getMachineOpcode();
+ if (Opc == TargetOpcode::EXTRACT_SUBREG ||
+ Opc == TargetOpcode::INSERT_SUBREG ||
+ Opc == TargetOpcode::SUBREG_TO_REG ||
+ Opc == TargetOpcode::REG_SEQUENCE ||
+ Opc == TargetOpcode::IMPLICIT_DEF)
+ return;
+ }
- class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
- : public RegReductionPriorityQueue<td_ls_rr_sort> {
- // SUnits - The SUnits for the current graph.
- const std::vector<SUnit> *SUnits;
-
- // SethiUllmanNumbers - The SethiUllman number for each node.
- std::vector<unsigned> SethiUllmanNumbers;
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl())
+ continue;
+ SUnit *PredSU = I->getSUnit();
+ if (PredSU->NumSuccsLeft != PredSU->NumSuccs)
+ continue;
+ const SDNode *PN = PredSU->getNode();
+ if (!PN->isMachineOpcode()) {
+ if (PN->getOpcode() == ISD::CopyFromReg) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ }
+ continue;
+ }
+ unsigned POpc = PN->getMachineOpcode();
+ if (POpc == TargetOpcode::IMPLICIT_DEF)
+ continue;
+ if (POpc == TargetOpcode::EXTRACT_SUBREG) {
+ EVT VT = PN->getOperand(0).getValueType();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ continue;
+ } else if (POpc == TargetOpcode::INSERT_SUBREG ||
+ POpc == TargetOpcode::SUBREG_TO_REG) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ continue;
+ }
+ unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
+ for (unsigned i = 0; i != NumDefs; ++i) {
+ EVT VT = PN->getValueType(i);
+ if (!PN->hasAnyUseOfValue(i))
+ continue;
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ }
+ }
- public:
- TDRegReductionPriorityQueue() {}
+ // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
+ // may transfer data dependencies to CopyToReg.
+ if (SU->NumSuccs && N->isMachineOpcode()) {
+ unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
+ for (unsigned i = 0; i != NumDefs; ++i) {
+ EVT VT = N->getValueType(i);
+ if (!N->hasAnyUseOfValue(i))
+ continue;
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
+ // Register pressure tracking is imprecise. This can happen.
+ RegPressure[RCId] = 0;
+ else
+ RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
+ }
+ }
- void initNodes(std::vector<SUnit> &sunits) {
- SUnits = &sunits;
- // Calculate node priorities.
- CalculateSethiUllmanNumbers();
+ dumpRegPressure();
}
- void addNode(const SUnit *SU) {
- unsigned SUSize = SethiUllmanNumbers.size();
- if (SUnits->size() > SUSize)
- SethiUllmanNumbers.resize(SUSize*2, 0);
- CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
- }
+ void UnscheduledNode(SUnit *SU) {
+ if (!TracksRegPressure)
+ return;
- void updateNode(const SUnit *SU) {
- SethiUllmanNumbers[SU->NodeNum] = 0;
- CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
+ const SDNode *N = SU->getNode();
+ if (!N->isMachineOpcode()) {
+ if (N->getOpcode() != ISD::CopyToReg)
+ return;
+ } else {
+ unsigned Opc = N->getMachineOpcode();
+ if (Opc == TargetOpcode::EXTRACT_SUBREG ||
+ Opc == TargetOpcode::INSERT_SUBREG ||
+ Opc == TargetOpcode::SUBREG_TO_REG ||
+ Opc == TargetOpcode::REG_SEQUENCE ||
+ Opc == TargetOpcode::IMPLICIT_DEF)
+ return;
+ }
+
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl())
+ continue;
+ SUnit *PredSU = I->getSUnit();
+ if (PredSU->NumSuccsLeft != PredSU->NumSuccs)
+ continue;
+ const SDNode *PN = PredSU->getNode();
+ if (!PN->isMachineOpcode()) {
+ if (PN->getOpcode() == ISD::CopyFromReg) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ }
+ continue;
+ }
+ unsigned POpc = PN->getMachineOpcode();
+ if (POpc == TargetOpcode::IMPLICIT_DEF)
+ continue;
+ if (POpc == TargetOpcode::EXTRACT_SUBREG) {
+ EVT VT = PN->getOperand(0).getValueType();
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ continue;
+ } else if (POpc == TargetOpcode::INSERT_SUBREG ||
+ POpc == TargetOpcode::SUBREG_TO_REG) {
+ EVT VT = PN->getValueType(0);
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ continue;
+ }
+ unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
+ for (unsigned i = 0; i != NumDefs; ++i) {
+ EVT VT = PN->getValueType(i);
+ if (!PN->hasAnyUseOfValue(i))
+ continue;
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
+ // Register pressure tracking is imprecise. This can happen.
+ RegPressure[RCId] = 0;
+ else
+ RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
+ }
+ }
+
+ // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
+ // may transfer data dependencies to CopyToReg.
+ if (SU->NumSuccs && N->isMachineOpcode()) {
+ unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
+ for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
+ EVT VT = N->getValueType(i);
+ if (VT == MVT::Flag || VT == MVT::Other)
+ continue;
+ if (!N->hasAnyUseOfValue(i))
+ continue;
+ unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
+ RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
+ }
+ }
+
+ dumpRegPressure();
}
- void releaseState() {
- SUnits = 0;
- SethiUllmanNumbers.clear();
+ void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
+ scheduleDAG = scheduleDag;
}
- unsigned getNodePriority(const SUnit *SU) const {
- assert(SU->NodeNum < SethiUllmanNumbers.size());
- return SethiUllmanNumbers[SU->NodeNum];
+ void dumpRegPressure() const {
+ for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
+ E = TRI->regclass_end(); I != E; ++I) {
+ const TargetRegisterClass *RC = *I;
+ unsigned Id = RC->getID();
+ unsigned RP = RegPressure[Id];
+ if (!RP) continue;
+ DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
+ << '\n');
+ }
}
- private:
+ protected:
+ bool canClobber(const SUnit *SU, const SUnit *Op);
+ void AddPseudoTwoAddrDeps();
+ void PrescheduleNodesWithMultipleUses();
void CalculateSethiUllmanNumbers();
};
+
+ typedef RegReductionPriorityQueue<bu_ls_rr_sort>
+ BURegReductionPriorityQueue;
+
+ typedef RegReductionPriorityQueue<td_ls_rr_sort>
+ TDRegReductionPriorityQueue;
+
+ typedef RegReductionPriorityQueue<src_ls_rr_sort>
+ SrcRegReductionPriorityQueue;
+
+ typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
+ HybridBURRPriorityQueue;
+
+ typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
+ ILPBURRPriorityQueue;
}
/// closestSucc - Returns the scheduled cycle of the successor which is
-/// closet to the current cycle.
+/// closest to the current cycle.
static unsigned closestSucc(const SUnit *SU) {
- unsigned MaxCycle = 0;
+ unsigned MaxHeight = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
- unsigned Cycle = I->Dep->Cycle;
+ if (I->isCtrl()) continue; // ignore chain succs
+ unsigned Height = I->getSUnit()->getHeight();
// If there are bunch of CopyToRegs stacked up, they should be considered
// to be at the same position.
- if (I->Dep->getNode() && I->Dep->getNode()->getOpcode() == ISD::CopyToReg)
- Cycle = closestSucc(I->Dep)+1;
- if (Cycle > MaxCycle)
- MaxCycle = Cycle;
+ if (I->getSUnit()->getNode() &&
+ I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
+ Height = closestSucc(I->getSUnit())+1;
+ if (Height > MaxHeight)
+ MaxHeight = Height;
}
- return MaxCycle;
+ return MaxHeight;
}
/// calcMaxScratches - Returns an cost estimate of the worse case requirement
-/// for scratch registers. Live-in operands and live-out results don't count
-/// since they are "fixed".
+/// for scratch registers, i.e. number of data dependencies.
static unsigned calcMaxScratches(const SUnit *SU) {
unsigned Scratches = 0;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
- if (I->isCtrl) continue; // ignore chain preds
- if (!I->Dep->getNode() || I->Dep->getNode()->getOpcode() != ISD::CopyFromReg)
- Scratches++;
- }
- for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I) {
- if (I->isCtrl) continue; // ignore chain succs
- if (!I->Dep->getNode() || I->Dep->getNode()->getOpcode() != ISD::CopyToReg)
- Scratches += 10;
+ if (I->isCtrl()) continue; // ignore chain preds
+ Scratches++;
}
return Scratches;
}
-// Bottom up
-bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
+template <typename RRSort>
+static bool BURRSort(const SUnit *left, const SUnit *right,
+ const RegReductionPriorityQueue<RRSort> *SPQ) {
unsigned LPriority = SPQ->getNodePriority(left);
unsigned RPriority = SPQ->getNodePriority(right);
if (LPriority != RPriority)
if (LDist != RDist)
return LDist < RDist;
- // Intuitively, it's good to push down instructions whose results are
- // liveout so their long live ranges won't conflict with other values
- // which are needed inside the BB. Further prioritize liveout instructions
- // by the number of operands which are calculated within the BB.
+ // How many registers becomes live when the node is scheduled.
unsigned LScratch = calcMaxScratches(left);
unsigned RScratch = calcMaxScratches(right);
if (LScratch != RScratch)
return LScratch > RScratch;
- if (left->Height != right->Height)
- return left->Height > right->Height;
+ if (left->getHeight() != right->getHeight())
+ return left->getHeight() > right->getHeight();
- if (left->Depth != right->Depth)
- return left->Depth < right->Depth;
-
- if (left->CycleBound != right->CycleBound)
- return left->CycleBound > right->CycleBound;
+ if (left->getDepth() != right->getDepth())
+ return left->getDepth() < right->getDepth();
assert(left->NodeQueueId && right->NodeQueueId &&
"NodeQueueId cannot be zero");
return (left->NodeQueueId > right->NodeQueueId);
}
-bool
-bu_ls_rr_fast_sort::operator()(const SUnit *left, const SUnit *right) const {
- unsigned LPriority = SPQ->getNodePriority(left);
- unsigned RPriority = SPQ->getNodePriority(right);
- if (LPriority != RPriority)
- return LPriority > RPriority;
- assert(left->NodeQueueId && right->NodeQueueId &&
- "NodeQueueId cannot be zero");
- return (left->NodeQueueId > right->NodeQueueId);
+// Bottom up
+bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
+ return BURRSort(left, right, SPQ);
+}
+
+// Source order, otherwise bottom up.
+bool src_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
+ unsigned LOrder = SPQ->getNodeOrdering(left);
+ unsigned ROrder = SPQ->getNodeOrdering(right);
+
+ // Prefer an ordering where the lower the non-zero order number, the higher
+ // the preference.
+ if ((LOrder || ROrder) && LOrder != ROrder)
+ return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
+
+ return BURRSort(left, right, SPQ);
}
+bool hybrid_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const{
+ bool LHigh = SPQ->HighRegPressure(left);
+ bool RHigh = SPQ->HighRegPressure(right);
+ // Avoid causing spills. If register pressure is high, schedule for
+ // register pressure reduction.
+ if (LHigh && !RHigh)
+ return true;
+ else if (!LHigh && RHigh)
+ return false;
+ else if (!LHigh && !RHigh) {
+ // Low register pressure situation, schedule for latency if possible.
+ bool LStall = left->SchedulingPref == Sched::Latency &&
+ SPQ->getCurCycle() < left->getHeight();
+ bool RStall = right->SchedulingPref == Sched::Latency &&
+ SPQ->getCurCycle() < right->getHeight();
+ // If scheduling one of the node will cause a pipeline stall, delay it.
+ // If scheduling either one of the node will cause a pipeline stall, sort
+ // them according to their height.
+ // If neither will cause a pipeline stall, try to reduce register pressure.
+ if (LStall) {
+ if (!RStall)
+ return true;
+ if (left->getHeight() != right->getHeight())
+ return left->getHeight() > right->getHeight();
+ } else if (RStall)
+ return false;
+
+ // If either node is scheduling for latency, sort them by height and latency
+ // first.
+ if (left->SchedulingPref == Sched::Latency ||
+ right->SchedulingPref == Sched::Latency) {
+ if (left->getHeight() != right->getHeight())
+ return left->getHeight() > right->getHeight();
+ if (left->Latency != right->Latency)
+ return left->Latency > right->Latency;
+ }
+ }
+
+ return BURRSort(left, right, SPQ);
+}
+
+bool ilp_ls_rr_sort::operator()(const SUnit *left,
+ const SUnit *right) const {
+ bool LHigh = SPQ->HighRegPressure(left);
+ bool RHigh = SPQ->HighRegPressure(right);
+ // Avoid causing spills. If register pressure is high, schedule for
+ // register pressure reduction.
+ if (LHigh && !RHigh)
+ return true;
+ else if (!LHigh && RHigh)
+ return false;
+ else if (!LHigh && !RHigh) {
+ // Low register pressure situation, schedule to maximize instruction level
+ // parallelism.
+ if (left->NumPreds > right->NumPreds)
+ return false;
+ else if (left->NumPreds < right->NumPreds)
+ return false;
+ }
+
+ return BURRSort(left, right, SPQ);
+}
+
+template<class SF>
bool
-BURegReductionPriorityQueue::canClobber(const SUnit *SU, const SUnit *Op) {
+RegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) {
if (SU->isTwoAddress) {
unsigned Opc = SU->getNode()->getMachineOpcode();
const TargetInstrDesc &TID = TII->get(Opc);
return false;
}
-
/// hasCopyToRegUse - Return true if SU has a value successor that is a
/// CopyToReg node.
static bool hasCopyToRegUse(const SUnit *SU) {
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
- if (I->isCtrl) continue;
- const SUnit *SuccSU = I->Dep;
+ if (I->isCtrl()) continue;
+ const SUnit *SuccSU = I->getSUnit();
if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg)
return true;
}
unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
assert(ImpDefs && "Caller should check hasPhysRegDefs");
- const unsigned *SUImpDefs =
- TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
- if (!SUImpDefs)
- return false;
- for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
- MVT VT = N->getValueType(i);
- if (VT == MVT::Flag || VT == MVT::Other)
- continue;
- if (!N->hasAnyUseOfValue(i))
+ for (const SDNode *SUNode = SU->getNode(); SUNode;
+ SUNode = SUNode->getFlaggedNode()) {
+ if (!SUNode->isMachineOpcode())
continue;
- unsigned Reg = ImpDefs[i - NumDefs];
- for (;*SUImpDefs; ++SUImpDefs) {
- unsigned SUReg = *SUImpDefs;
- if (TRI->regsOverlap(Reg, SUReg))
- return true;
+ const unsigned *SUImpDefs =
+ TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
+ if (!SUImpDefs)
+ return false;
+ for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
+ EVT VT = N->getValueType(i);
+ if (VT == MVT::Flag || VT == MVT::Other)
+ continue;
+ if (!N->hasAnyUseOfValue(i))
+ continue;
+ unsigned Reg = ImpDefs[i - NumDefs];
+ for (;*SUImpDefs; ++SUImpDefs) {
+ unsigned SUReg = *SUImpDefs;
+ if (TRI->regsOverlap(Reg, SUReg))
+ return true;
+ }
}
}
return false;
}
+/// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
+/// are not handled well by the general register pressure reduction
+/// heuristics. When presented with code like this:
+///
+/// N
+/// / |
+/// / |
+/// U store
+/// |
+/// ...
+///
+/// the heuristics tend to push the store up, but since the
+/// operand of the store has another use (U), this would increase
+/// the length of that other use (the U->N edge).
+///
+/// This function transforms code like the above to route U's
+/// dependence through the store when possible, like this:
+///
+/// N
+/// ||
+/// ||
+/// store
+/// |
+/// U
+/// |
+/// ...
+///
+/// This results in the store being scheduled immediately
+/// after N, which shortens the U->N live range, reducing
+/// register pressure.
+///
+template<class SF>
+void RegReductionPriorityQueue<SF>::PrescheduleNodesWithMultipleUses() {
+ // Visit all the nodes in topological order, working top-down.
+ for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
+ SUnit *SU = &(*SUnits)[i];
+ // For now, only look at nodes with no data successors, such as stores.
+ // These are especially important, due to the heuristics in
+ // getNodePriority for nodes with no data successors.
+ if (SU->NumSuccs != 0)
+ continue;
+ // For now, only look at nodes with exactly one data predecessor.
+ if (SU->NumPreds != 1)
+ continue;
+ // Avoid prescheduling copies to virtual registers, which don't behave
+ // like other nodes from the perspective of scheduling heuristics.
+ if (SDNode *N = SU->getNode())
+ if (N->getOpcode() == ISD::CopyToReg &&
+ TargetRegisterInfo::isVirtualRegister
+ (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
+ continue;
+
+ // Locate the single data predecessor.
+ SUnit *PredSU = 0;
+ for (SUnit::const_pred_iterator II = SU->Preds.begin(),
+ EE = SU->Preds.end(); II != EE; ++II)
+ if (!II->isCtrl()) {
+ PredSU = II->getSUnit();
+ break;
+ }
+ assert(PredSU);
+
+ // Don't rewrite edges that carry physregs, because that requires additional
+ // support infrastructure.
+ if (PredSU->hasPhysRegDefs)
+ continue;
+ // Short-circuit the case where SU is PredSU's only data successor.
+ if (PredSU->NumSuccs == 1)
+ continue;
+ // Avoid prescheduling to copies from virtual registers, which don't behave
+ // like other nodes from the perspective of scheduling // heuristics.
+ if (SDNode *N = SU->getNode())
+ if (N->getOpcode() == ISD::CopyFromReg &&
+ TargetRegisterInfo::isVirtualRegister
+ (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
+ continue;
+
+ // Perform checks on the successors of PredSU.
+ for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
+ EE = PredSU->Succs.end(); II != EE; ++II) {
+ SUnit *PredSuccSU = II->getSUnit();
+ if (PredSuccSU == SU) continue;
+ // If PredSU has another successor with no data successors, for
+ // now don't attempt to choose either over the other.
+ if (PredSuccSU->NumSuccs == 0)
+ goto outer_loop_continue;
+ // Don't break physical register dependencies.
+ if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
+ if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
+ goto outer_loop_continue;
+ // Don't introduce graph cycles.
+ if (scheduleDAG->IsReachable(SU, PredSuccSU))
+ goto outer_loop_continue;
+ }
+
+ // Ok, the transformation is safe and the heuristics suggest it is
+ // profitable. Update the graph.
+ DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
+ << " next to PredSU #" << PredSU->NodeNum
+ << " to guide scheduling in the presence of multiple uses\n");
+ for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
+ SDep Edge = PredSU->Succs[i];
+ assert(!Edge.isAssignedRegDep());
+ SUnit *SuccSU = Edge.getSUnit();
+ if (SuccSU != SU) {
+ Edge.setSUnit(PredSU);
+ scheduleDAG->RemovePred(SuccSU, Edge);
+ scheduleDAG->AddPred(SU, Edge);
+ Edge.setSUnit(SU);
+ scheduleDAG->AddPred(SuccSU, Edge);
+ --i;
+ }
+ }
+ outer_loop_continue:;
+ }
+}
+
/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
/// it as a def&use operand. Add a pseudo control edge from it to the other
/// node (if it won't create a cycle) so the two-address one will be scheduled
/// one that has a CopyToReg use (more likely to be a loop induction update).
/// If both are two-address, but one is commutable while the other is not
/// commutable, favor the one that's not commutable.
-void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() {
+template<class SF>
+void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
SUnit *SU = &(*SUnits)[i];
if (!SU->isTwoAddress)
if (!DUSU) continue;
for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
E = DUSU->Succs.end(); I != E; ++I) {
- if (I->isCtrl) continue;
- SUnit *SuccSU = I->Dep;
+ if (I->isCtrl()) continue;
+ SUnit *SuccSU = I->getSUnit();
if (SuccSU == SU)
continue;
// Be conservative. Ignore if nodes aren't at roughly the same
// depth and height.
- if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1)
+ if (SuccSU->getHeight() < SU->getHeight() &&
+ (SU->getHeight() - SuccSU->getHeight()) > 1)
continue;
+ // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
+ // constrains whatever is using the copy, instead of the copy
+ // itself. In the case that the copy is coalesced, this
+ // preserves the intent of the pseudo two-address heurietics.
+ while (SuccSU->Succs.size() == 1 &&
+ SuccSU->getNode()->isMachineOpcode() &&
+ SuccSU->getNode()->getMachineOpcode() ==
+ TargetOpcode::COPY_TO_REGCLASS)
+ SuccSU = SuccSU->Succs.front().getSUnit();
+ // Don't constrain non-instruction nodes.
if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
continue;
// Don't constrain nodes with physical register defs if the
// predecessor can clobber them.
- if (SuccSU->hasPhysRegDefs) {
+ if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
continue;
}
- // Don't constraint extract_subreg / insert_subreg these may be
- // coalesced away. We don't them close to their uses.
+ // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
+ // these may be coalesced away. We want them close to their uses.
unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
- if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG ||
- SuccOpc == TargetInstrInfo::INSERT_SUBREG)
+ if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
+ SuccOpc == TargetOpcode::INSERT_SUBREG ||
+ SuccOpc == TargetOpcode::SUBREG_TO_REG)
continue;
if ((!canClobber(SuccSU, DUSU) ||
(hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
(!SU->isCommutable && SuccSU->isCommutable)) &&
!scheduleDAG->IsReachable(SuccSU, SU)) {
- DOUT << "Adding an edge from SU # " << SU->NodeNum
- << " to SU #" << SuccSU->NodeNum << "\n";
- scheduleDAG->AddPred(SU, SuccSU, true, true);
+ DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
+ << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
+ scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
+ /*Reg=*/0, /*isNormalMemory=*/false,
+ /*isMustAlias=*/false,
+ /*isArtificial=*/true));
}
}
}
/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
/// scheduling units.
-void BURegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
- SethiUllmanNumbers.assign(SUnits->size(), 0);
-
- for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
- CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
-}
-void BURegReductionFastPriorityQueue::CalculateSethiUllmanNumbers() {
+template<class SF>
+void RegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
SethiUllmanNumbers.assign(SUnits->size(), 0);
for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
- CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
+ CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
}
/// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
unsigned Sum = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
- const SUnit *SuccSU = I->Dep;
+ const SUnit *SuccSU = I->getSUnit();
for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
EE = SuccSU->Preds.end(); II != EE; ++II) {
- SUnit *PredSU = II->Dep;
+ SUnit *PredSU = II->getSUnit();
if (!PredSU->isScheduled)
if (++Sum > Limit)
return Sum;
if (LPriority+LBonus != RPriority+RBonus)
return LPriority+LBonus < RPriority+RBonus;
- if (left->Depth != right->Depth)
- return left->Depth < right->Depth;
+ if (left->getDepth() != right->getDepth())
+ return left->getDepth() < right->getDepth();
if (left->NumSuccsLeft != right->NumSuccsLeft)
return left->NumSuccsLeft > right->NumSuccsLeft;
- if (left->CycleBound != right->CycleBound)
- return left->CycleBound > right->CycleBound;
-
assert(left->NodeQueueId && right->NodeQueueId &&
"NodeQueueId cannot be zero");
return (left->NodeQueueId > right->NodeQueueId);
}
-/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
-/// scheduling units.
-void TDRegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
- SethiUllmanNumbers.assign(SUnits->size(), 0);
-
- for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
- CalcNodeTDSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
-}
-
//===----------------------------------------------------------------------===//
// Public Constructor Functions
//===----------------------------------------------------------------------===//
-llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
- SelectionDAG *DAG,
- const TargetMachine *TM,
- MachineBasicBlock *BB,
- bool Fast) {
- if (Fast)
- return new ScheduleDAGRRList(DAG, BB, *TM, true, true,
- new BURegReductionFastPriorityQueue());
-
- const TargetInstrInfo *TII = TM->getInstrInfo();
- const TargetRegisterInfo *TRI = TM->getRegisterInfo();
+llvm::ScheduleDAGSDNodes *
+llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+ const TargetMachine &TM = IS->TM;
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+
+ BURegReductionPriorityQueue *PQ =
+ new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, false, PQ);
+ PQ->setScheduleDAG(SD);
+ return SD;
+}
+
+llvm::ScheduleDAGSDNodes *
+llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+ const TargetMachine &TM = IS->TM;
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
- BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI);
+ TDRegReductionPriorityQueue *PQ =
+ new TDRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, false, PQ);
+ PQ->setScheduleDAG(SD);
+ return SD;
+}
- ScheduleDAGRRList *SD =
- new ScheduleDAGRRList(DAG, BB, *TM, true, false, PQ);
+llvm::ScheduleDAGSDNodes *
+llvm::createSourceListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+ const TargetMachine &TM = IS->TM;
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+
+ SrcRegReductionPriorityQueue *PQ =
+ new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, false, PQ);
PQ->setScheduleDAG(SD);
return SD;
}
-llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
- SelectionDAG *DAG,
- const TargetMachine *TM,
- MachineBasicBlock *BB,
- bool Fast) {
- return new ScheduleDAGRRList(DAG, BB, *TM, false, Fast,
- new TDRegReductionPriorityQueue());
+llvm::ScheduleDAGSDNodes *
+llvm::createHybridListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+ const TargetMachine &TM = IS->TM;
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetLowering *TLI = &IS->getTargetLowering();
+
+ HybridBURRPriorityQueue *PQ =
+ new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, true, PQ);
+ PQ->setScheduleDAG(SD);
+ return SD;
+}
+
+llvm::ScheduleDAGSDNodes *
+llvm::createILPListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
+ const TargetMachine &TM = IS->TM;
+ const TargetInstrInfo *TII = TM.getInstrInfo();
+ const TargetRegisterInfo *TRI = TM.getRegisterInfo();
+ const TargetLowering *TLI = &IS->getTargetLowering();
+
+ ILPBURRPriorityQueue *PQ =
+ new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
+ ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, true, PQ);
+ PQ->setScheduleDAG(SD);
+ return SD;
}