//
//===----------------------------------------------------------------------===//
-#define DEBUG_TYPE "post-RA-sched"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "AggressiveAntiDepBreaker.h"
+#include "AntiDepBreaker.h"
+#include "CriticalAntiDepBreaker.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
-#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/ScheduleDAGInstrs.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/SmallVector.h"
-#include <map>
-#include <climits>
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
+#define DEBUG_TYPE "post-RA-sched"
+
+STATISTIC(NumNoops, "Number of noops inserted");
STATISTIC(NumStalls, "Number of pipeline stalls");
+STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
+// Post-RA scheduling is enabled with
+// TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to
+// override the target.
static cl::opt<bool>
+EnablePostRAScheduler("post-RA-scheduler",
+ cl::desc("Enable scheduling after register allocation"),
+ cl::init(false), cl::Hidden);
+static cl::opt<std::string>
EnableAntiDepBreaking("break-anti-dependencies",
- cl::desc("Break scheduling anti-dependencies"),
- cl::init(false));
+ cl::desc("Break post-RA scheduling anti-dependencies: "
+ "\"critical\", \"all\", or \"none\""),
+ cl::init("none"), cl::Hidden);
+
+// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
+static cl::opt<int>
+DebugDiv("postra-sched-debugdiv",
+ cl::desc("Debug control MBBs that are scheduled"),
+ cl::init(0), cl::Hidden);
+static cl::opt<int>
+DebugMod("postra-sched-debugmod",
+ cl::desc("Debug control MBBs that are scheduled"),
+ cl::init(0), cl::Hidden);
+
+AntiDepBreaker::~AntiDepBreaker() { }
namespace {
- class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
+ class PostRAScheduler : public MachineFunctionPass {
+ const TargetInstrInfo *TII;
+ RegisterClassInfo RegClassInfo;
+
public:
static char ID;
- PostRAScheduler() : MachineFunctionPass(&ID) {}
+ PostRAScheduler() : MachineFunctionPass(ID) {}
- void getAnalysisUsage(AnalysisUsage &AU) const {
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ AU.addRequired<AAResultsWrapperPass>();
+ AU.addRequired<TargetPassConfig>();
AU.addRequired<MachineDominatorTree>();
AU.addPreserved<MachineDominatorTree>();
AU.addRequired<MachineLoopInfo>();
MachineFunctionPass::getAnalysisUsage(AU);
}
- const char *getPassName() const {
- return "Post RA top-down list latency scheduler";
- }
+ bool runOnMachineFunction(MachineFunction &Fn) override;
- bool runOnMachineFunction(MachineFunction &Fn);
+ bool enablePostRAScheduler(
+ const TargetSubtargetInfo &ST, CodeGenOpt::Level OptLevel,
+ TargetSubtargetInfo::AntiDepBreakMode &Mode,
+ TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const;
};
char PostRAScheduler::ID = 0;
- class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
+ class SchedulePostRATDList : public ScheduleDAGInstrs {
/// AvailableQueue - The priority queue to use for the available SUnits.
///
LatencyPriorityQueue AvailableQueue;
-
+
/// PendingQueue - This contains all of the instructions whose operands have
/// been issued, but their results are not ready yet (due to the latency of
/// the operation). Once the operands becomes available, the instruction is
/// added to the AvailableQueue.
std::vector<SUnit*> PendingQueue;
- /// Topo - A topological ordering for SUnits.
- ScheduleDAGTopologicalSort Topo;
+ /// HazardRec - The hazard recognizer to use.
+ ScheduleHazardRecognizer *HazardRec;
+
+ /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
+ AntiDepBreaker *AntiDepBreak;
+
+ /// AA - AliasAnalysis for making memory reference queries.
+ AliasAnalysis *AA;
+
+ /// The schedule. Null SUnit*'s represent noop instructions.
+ std::vector<SUnit*> Sequence;
+
+ /// The index in BB of RegionEnd.
+ ///
+ /// This is the instruction number from the top of the current block, not
+ /// the SlotIndex. It is only used by the AntiDepBreaker.
+ unsigned EndIndex;
public:
- SchedulePostRATDList(MachineBasicBlock *mbb, const TargetMachine &tm,
- const MachineLoopInfo &MLI,
- const MachineDominatorTree &MDT)
- : ScheduleDAGInstrs(mbb, tm, MLI, MDT), Topo(SUnits) {}
+ SchedulePostRATDList(
+ MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA,
+ const RegisterClassInfo &,
+ TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
+ SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs);
- void Schedule();
+ ~SchedulePostRATDList() override;
+
+ /// startBlock - Initialize register live-range state for scheduling in
+ /// this block.
+ ///
+ void startBlock(MachineBasicBlock *BB) override;
+
+ // Set the index of RegionEnd within the current BB.
+ void setEndIndex(unsigned EndIdx) { EndIndex = EndIdx; }
+
+ /// Initialize the scheduler state for the next scheduling region.
+ void enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned regioninstrs) override;
+
+ /// Notify that the scheduler has finished scheduling the current region.
+ void exitRegion() override;
+
+ /// Schedule - Schedule the instruction range using list scheduling.
+ ///
+ void schedule() override;
+
+ void EmitSchedule();
+
+ /// Observe - Update liveness information to account for the current
+ /// instruction, which will not be scheduled.
+ ///
+ void Observe(MachineInstr *MI, unsigned Count);
+
+ /// finishBlock - Clean up register live-range state.
+ ///
+ void finishBlock() override;
private:
void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
+ void ReleaseSuccessors(SUnit *SU);
void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
void ListScheduleTopDown();
- bool BreakAntiDependencies();
+
+ void dumpSchedule() const;
+ void emitNoop(unsigned CurCycle);
};
}
+char &llvm::PostRASchedulerID = PostRAScheduler::ID;
+
+INITIALIZE_PASS(PostRAScheduler, "post-RA-sched",
+ "Post RA top-down list latency scheduler", false, false)
+
+SchedulePostRATDList::SchedulePostRATDList(
+ MachineFunction &MF, MachineLoopInfo &MLI, AliasAnalysis *AA,
+ const RegisterClassInfo &RCI,
+ TargetSubtargetInfo::AntiDepBreakMode AntiDepMode,
+ SmallVectorImpl<const TargetRegisterClass *> &CriticalPathRCs)
+ : ScheduleDAGInstrs(MF, &MLI), AA(AA), EndIndex(0) {
+
+ const InstrItineraryData *InstrItins =
+ MF.getSubtarget().getInstrItineraryData();
+ HazardRec =
+ MF.getSubtarget().getInstrInfo()->CreateTargetPostRAHazardRecognizer(
+ InstrItins, this);
+
+ assert((AntiDepMode == TargetSubtargetInfo::ANTIDEP_NONE ||
+ MRI.tracksLiveness()) &&
+ "Live-ins must be accurate for anti-dependency breaking");
+ AntiDepBreak =
+ ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ?
+ (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) :
+ ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ?
+ (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : nullptr));
+}
+
+SchedulePostRATDList::~SchedulePostRATDList() {
+ delete HazardRec;
+ delete AntiDepBreak;
+}
+
+/// Initialize state associated with the next scheduling region.
+void SchedulePostRATDList::enterRegion(MachineBasicBlock *bb,
+ MachineBasicBlock::iterator begin,
+ MachineBasicBlock::iterator end,
+ unsigned regioninstrs) {
+ ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
+ Sequence.clear();
+}
+
+/// Print the schedule before exiting the region.
+void SchedulePostRATDList::exitRegion() {
+ DEBUG({
+ dbgs() << "*** Final schedule ***\n";
+ dumpSchedule();
+ dbgs() << '\n';
+ });
+ ScheduleDAGInstrs::exitRegion();
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+/// dumpSchedule - dump the scheduled Sequence.
+void SchedulePostRATDList::dumpSchedule() const {
+ for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
+ if (SUnit *SU = Sequence[i])
+ SU->dump(this);
+ else
+ dbgs() << "**** NOOP ****\n";
+ }
+}
+#endif
+
+bool PostRAScheduler::enablePostRAScheduler(
+ const TargetSubtargetInfo &ST,
+ CodeGenOpt::Level OptLevel,
+ TargetSubtargetInfo::AntiDepBreakMode &Mode,
+ TargetSubtargetInfo::RegClassVector &CriticalPathRCs) const {
+ Mode = ST.getAntiDepBreakMode();
+ ST.getCriticalPathRCs(CriticalPathRCs);
+ return ST.enablePostRAScheduler() &&
+ OptLevel >= ST.getOptLevelToEnablePostRAScheduler();
+}
+
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
- DOUT << "PostRAScheduler\n";
+ if (skipOptnoneFunction(*Fn.getFunction()))
+ return false;
+
+ TII = Fn.getSubtarget().getInstrInfo();
+ MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
+ AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
+ TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>();
+
+ RegClassInfo.runOnMachineFunction(Fn);
+
+ // Check for explicit enable/disable of post-ra scheduling.
+ TargetSubtargetInfo::AntiDepBreakMode AntiDepMode =
+ TargetSubtargetInfo::ANTIDEP_NONE;
+ SmallVector<const TargetRegisterClass*, 4> CriticalPathRCs;
+ if (EnablePostRAScheduler.getPosition() > 0) {
+ if (!EnablePostRAScheduler)
+ return false;
+ } else {
+ // Check that post-RA scheduling is enabled for this target.
+ // This may upgrade the AntiDepMode.
+ if (!enablePostRAScheduler(Fn.getSubtarget(), PassConfig->getOptLevel(),
+ AntiDepMode, CriticalPathRCs))
+ return false;
+ }
- const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
- const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
+ // Check for antidep breaking override...
+ if (EnableAntiDepBreaking.getPosition() > 0) {
+ AntiDepMode = (EnableAntiDepBreaking == "all")
+ ? TargetSubtargetInfo::ANTIDEP_ALL
+ : ((EnableAntiDepBreaking == "critical")
+ ? TargetSubtargetInfo::ANTIDEP_CRITICAL
+ : TargetSubtargetInfo::ANTIDEP_NONE);
+ }
- // Loop over all of the basic blocks
- for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
- MBB != MBBe; ++MBB) {
+ DEBUG(dbgs() << "PostRAScheduler\n");
- SchedulePostRATDList Scheduler(MBB, Fn.getTarget(), MLI, MDT);
+ SchedulePostRATDList Scheduler(Fn, MLI, AA, RegClassInfo, AntiDepMode,
+ CriticalPathRCs);
- Scheduler.Run();
+ // Loop over all of the basic blocks
+ for (auto &MBB : Fn) {
+#ifndef NDEBUG
+ // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
+ if (DebugDiv > 0) {
+ static int bbcnt = 0;
+ if (bbcnt++ % DebugDiv != DebugMod)
+ continue;
+ dbgs() << "*** DEBUG scheduling " << Fn.getName()
+ << ":BB#" << MBB.getNumber() << " ***\n";
+ }
+#endif
+ // Initialize register live-range state for scheduling in this block.
+ Scheduler.startBlock(&MBB);
+
+ // Schedule each sequence of instructions not interrupted by a label
+ // or anything else that effectively needs to shut down scheduling.
+ MachineBasicBlock::iterator Current = MBB.end();
+ unsigned Count = MBB.size(), CurrentCount = Count;
+ for (MachineBasicBlock::iterator I = Current; I != MBB.begin();) {
+ MachineInstr *MI = std::prev(I);
+ --Count;
+ // Calls are not scheduling boundaries before register allocation, but
+ // post-ra we don't gain anything by scheduling across calls since we
+ // don't need to worry about register pressure.
+ if (MI->isCall() || TII->isSchedulingBoundary(MI, &MBB, Fn)) {
+ Scheduler.enterRegion(&MBB, I, Current, CurrentCount - Count);
+ Scheduler.setEndIndex(CurrentCount);
+ Scheduler.schedule();
+ Scheduler.exitRegion();
+ Scheduler.EmitSchedule();
+ Current = MI;
+ CurrentCount = Count;
+ Scheduler.Observe(MI, CurrentCount);
+ }
+ I = MI;
+ if (MI->isBundle())
+ Count -= MI->getBundleSize();
+ }
+ assert(Count == 0 && "Instruction count mismatch!");
+ assert((MBB.begin() == Current || CurrentCount != 0) &&
+ "Instruction count mismatch!");
+ Scheduler.enterRegion(&MBB, MBB.begin(), Current, CurrentCount);
+ Scheduler.setEndIndex(CurrentCount);
+ Scheduler.schedule();
+ Scheduler.exitRegion();
Scheduler.EmitSchedule();
+
+ // Clean up register live-range state.
+ Scheduler.finishBlock();
+
+ // Update register kills
+ Scheduler.fixupKills(&MBB);
}
return true;
}
-
-/// Schedule - Schedule the DAG using list scheduling.
-void SchedulePostRATDList::Schedule() {
- DOUT << "********** List Scheduling **********\n";
-
- // Build scheduling units.
- BuildSchedUnits();
-
- if (EnableAntiDepBreaking) {
- if (BreakAntiDependencies()) {
+
+/// StartBlock - Initialize register live-range state for scheduling in
+/// this block.
+///
+void SchedulePostRATDList::startBlock(MachineBasicBlock *BB) {
+ // Call the superclass.
+ ScheduleDAGInstrs::startBlock(BB);
+
+ // Reset the hazard recognizer and anti-dep breaker.
+ HazardRec->Reset();
+ if (AntiDepBreak)
+ AntiDepBreak->StartBlock(BB);
+}
+
+/// Schedule - Schedule the instruction range using list scheduling.
+///
+void SchedulePostRATDList::schedule() {
+ // Build the scheduling graph.
+ buildSchedGraph(AA);
+
+ if (AntiDepBreak) {
+ unsigned Broken =
+ AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd,
+ EndIndex, DbgValues);
+
+ if (Broken != 0) {
// We made changes. Update the dependency graph.
// Theoretically we could update the graph in place:
// When a live range is changed to use a different register, remove
// the def's anti-dependence *and* output-dependence edges due to
// that register, and add new anti-dependence and output-dependence
// edges based on the next live range of the register.
- SUnits.clear();
- BuildSchedUnits();
+ ScheduleDAG::clearDAG();
+ buildSchedGraph(AA);
+
+ NumFixedAnti += Broken;
}
}
- AvailableQueue.initNodes(SUnits);
+ DEBUG(dbgs() << "********** List Scheduling **********\n");
+ DEBUG(
+ for (const SUnit &SU : SUnits) {
+ SU.dumpAll(this);
+ dbgs() << '\n';
+ }
+ );
+ AvailableQueue.initNodes(SUnits);
ListScheduleTopDown();
-
AvailableQueue.releaseState();
}
-/// getInstrOperandRegClass - Return register class of the operand of an
-/// instruction of the specified TargetInstrDesc.
-static const TargetRegisterClass*
-getInstrOperandRegClass(const TargetRegisterInfo *TRI,
- const TargetInstrInfo *TII, const TargetInstrDesc &II,
- unsigned Op) {
- if (Op >= II.getNumOperands())
- return NULL;
- if (II.OpInfo[Op].isLookupPtrRegClass())
- return TII->getPointerRegClass();
- return TRI->getRegClass(II.OpInfo[Op].RegClass);
-}
-
-/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
-/// critical path.
-static SDep *CriticalPathStep(SUnit *SU) {
- SDep *Next = 0;
- unsigned NextDepth = 0;
- // Find the predecessor edge with the greatest depth.
- for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
- P != PE; ++P) {
- SUnit *PredSU = P->getSUnit();
- unsigned PredLatency = P->getLatency();
- unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
- // In the case of a latency tie, prefer an anti-dependency edge over
- // other types of edges.
- if (NextDepth < PredTotalLatency ||
- (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
- NextDepth = PredTotalLatency;
- Next = &*P;
- }
- }
- return Next;
+/// Observe - Update liveness information to account for the current
+/// instruction, which will not be scheduled.
+///
+void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
+ if (AntiDepBreak)
+ AntiDepBreak->Observe(MI, Count, EndIndex);
}
-/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
-/// of the ScheduleDAG and break them by renaming registers.
+/// FinishBlock - Clean up register live-range state.
///
-bool SchedulePostRATDList::BreakAntiDependencies() {
- // The code below assumes that there is at least one instruction,
- // so just duck out immediately if the block is empty.
- if (BB->empty()) return false;
-
- // Find the node at the bottom of the critical path.
- SUnit *Max = 0;
- for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
- SUnit *SU = &SUnits[i];
- if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
- Max = SU;
- }
-
- DOUT << "Critical path has total latency "
- << (Max ? Max->getDepth() + Max->Latency : 0) << "\n";
+void SchedulePostRATDList::finishBlock() {
+ if (AntiDepBreak)
+ AntiDepBreak->FinishBlock();
- // Walk the critical path from the bottom up. Collect all anti-dependence
- // edges on the critical path. Skip anti-dependencies between SUnits that
- // are connected with other edges, since such units won't be able to be
- // scheduled past each other anyway.
- //
- // The heuristic is that edges on the critical path are more important to
- // break than other edges. And since there are a limited number of
- // registers, we don't want to waste them breaking edges that aren't
- // important.
- //
- // TODO: Instructions with multiple defs could have multiple
- // anti-dependencies. The current code here only knows how to break one
- // edge per instruction. Note that we'd have to be able to break all of
- // the anti-dependencies in an instruction in order to be effective.
- BitVector AllocatableSet = TRI->getAllocatableSet(*MF);
- DenseMap<MachineInstr *, unsigned> CriticalAntiDeps;
- SUnit *SU = Max;
- for (SDep *Edge = CriticalPathStep(SU); Edge;
- Edge = CriticalPathStep(SU = Edge->getSUnit())) {
- SUnit *NextSU = Edge->getSUnit();
- // Only consider anti-dependence edges.
- if (Edge->getKind() != SDep::Anti)
- continue;
- unsigned AntiDepReg = Edge->getReg();
- assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
- // Don't break anti-dependencies on non-allocatable registers.
- if (!AllocatableSet.test(AntiDepReg))
- continue;
- // If the SUnit has other dependencies on the SUnit that it
- // anti-depends on, don't bother breaking the anti-dependency.
- // Also, if there are dependencies on other SUnits with the
- // same register as the anti-dependency, don't attempt to
- // break it.
- for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
- P != PE; ++P)
- if (P->getSUnit() == NextSU ?
- (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
- (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
- AntiDepReg = 0;
- break;
- }
- if (AntiDepReg != 0)
- CriticalAntiDeps[SU->getInstr()] = AntiDepReg;
- }
-
- // For live regs that are only used in one register class in a live range,
- // the register class. If the register is not live, the corresponding value
- // is null. If the register is live but used in multiple register classes,
- // the corresponding value is -1 casted to a pointer.
- const TargetRegisterClass *
- Classes[TargetRegisterInfo::FirstVirtualRegister] = {};
-
- // Map registers to all their references within a live range.
- std::multimap<unsigned, MachineOperand *> RegRefs;
-
- // The index of the most recent kill (proceding bottom-up), or -1 if
- // the register is not live.
- unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
- std::fill(KillIndices, array_endof(KillIndices), -1);
- // The index of the most recent def (proceding bottom up), or -1 if
- // the register is live.
- unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
- std::fill(DefIndices, array_endof(DefIndices), BB->size());
-
- // Determine the live-out physregs for this block.
- if (!BB->empty() && BB->back().getDesc().isReturn())
- // In a return block, examine the function live-out regs.
- for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
- E = MRI.liveout_end(); I != E; ++I) {
- unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = -1;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = -1;
- }
- }
- else
- // In a non-return block, examine the live-in regs of all successors.
- for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
- SE = BB->succ_end(); SI != SE; ++SI)
- for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
- E = (*SI)->livein_end(); I != E; ++I) {
- unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = -1;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = -1;
- }
- }
-
- // Consider callee-saved registers as live-out, since we're running after
- // prologue/epilogue insertion so there's no way to add additional
- // saved registers.
- //
- // TODO: If the callee saves and restores these, then we can potentially
- // use them between the save and the restore. To do that, we could scan
- // the exit blocks to see which of these registers are defined.
- // Alternatively, calle-saved registers that aren't saved and restored
- // could be marked live-in in every block.
- for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
- unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = -1;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = -1;
- }
- }
-
- // Consider this pattern:
- // A = ...
- // ... = A
- // A = ...
- // ... = A
- // A = ...
- // ... = A
- // A = ...
- // ... = A
- // There are three anti-dependencies here, and without special care,
- // we'd break all of them using the same register:
- // A = ...
- // ... = A
- // B = ...
- // ... = B
- // B = ...
- // ... = B
- // B = ...
- // ... = B
- // because at each anti-dependence, B is the first register that
- // isn't A which is free. This re-introduces anti-dependencies
- // at all but one of the original anti-dependencies that we were
- // trying to break. To avoid this, keep track of the most recent
- // register that each register was replaced with, avoid avoid
- // using it to repair an anti-dependence on the same register.
- // This lets us produce this:
- // A = ...
- // ... = A
- // B = ...
- // ... = B
- // C = ...
- // ... = C
- // B = ...
- // ... = B
- // This still has an anti-dependence on B, but at least it isn't on the
- // original critical path.
- //
- // TODO: If we tracked more than one register here, we could potentially
- // fix that remaining critical edge too. This is a little more involved,
- // because unlike the most recent register, less recent registers should
- // still be considered, though only if no other registers are available.
- unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
-
- // Attempt to break anti-dependence edges on the critical path. Walk the
- // instructions from the bottom up, tracking information about liveness
- // as we go to help determine which registers are available.
- bool Changed = false;
- unsigned Count = BB->size() - 1;
- for (MachineBasicBlock::reverse_iterator I = BB->rbegin(), E = BB->rend();
- I != E; ++I, --Count) {
- MachineInstr *MI = &*I;
-
- // After regalloc, IMPLICIT_DEF instructions aren't safe to treat as
- // dependence-breaking. In the case of an INSERT_SUBREG, the IMPLICIT_DEF
- // is left behind appearing to clobber the super-register, while the
- // subregister needs to remain live. So we just ignore them.
- if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
- continue;
-
- // Check if this instruction has an anti-dependence that we're
- // interested in.
- DenseMap<MachineInstr *, unsigned>::iterator C = CriticalAntiDeps.find(MI);
- unsigned AntiDepReg = C != CriticalAntiDeps.end() ?
- C->second : 0;
-
- // Scan the register operands for this instruction and update
- // Classes and RegRefs.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
- const TargetRegisterClass *NewRC =
- getInstrOperandRegClass(TRI, TII, MI->getDesc(), i);
-
- // If this instruction has a use of AntiDepReg, breaking it
- // is invalid.
- if (MO.isUse() && AntiDepReg == Reg)
- AntiDepReg = 0;
-
- // For now, only allow the register to be changed if its register
- // class is consistent across all uses.
- if (!Classes[Reg] && NewRC)
- Classes[Reg] = NewRC;
- else if (!NewRC || Classes[Reg] != NewRC)
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
-
- // Now check for aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- // If an alias of the reg is used during the live range, give up.
- // Note that this allows us to skip checking if AntiDepReg
- // overlaps with any of the aliases, among other things.
- unsigned AliasReg = *Alias;
- if (Classes[AliasReg]) {
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- }
- }
-
- // If we're still willing to consider this register, note the reference.
- if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
- RegRefs.insert(std::make_pair(Reg, &MO));
- }
-
- // Determine AntiDepReg's register class, if it is live and is
- // consistently used within a single class.
- const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
- assert((AntiDepReg == 0 || RC != NULL) &&
- "Register should be live if it's causing an anti-dependence!");
- if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
- AntiDepReg = 0;
-
- // Look for a suitable register to use to break the anti-depenence.
- //
- // TODO: Instead of picking the first free register, consider which might
- // be the best.
- if (AntiDepReg != 0) {
- for (TargetRegisterClass::iterator R = RC->allocation_order_begin(*MF),
- RE = RC->allocation_order_end(*MF); R != RE; ++R) {
- unsigned NewReg = *R;
- // Don't replace a register with itself.
- if (NewReg == AntiDepReg) continue;
- // Don't replace a register with one that was recently used to repair
- // an anti-dependence with this AntiDepReg, because that would
- // re-introduce that anti-dependence.
- if (NewReg == LastNewReg[AntiDepReg]) continue;
- // If NewReg is dead and NewReg's most recent def is not before
- // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
- assert(((KillIndices[AntiDepReg] == -1u) != (DefIndices[AntiDepReg] == -1u)) &&
- "Kill and Def maps aren't consistent for AntiDepReg!");
- assert(((KillIndices[NewReg] == -1u) != (DefIndices[NewReg] == -1u)) &&
- "Kill and Def maps aren't consistent for NewReg!");
- if (KillIndices[NewReg] == -1u &&
- Classes[NewReg] != reinterpret_cast<TargetRegisterClass *>(-1) &&
- KillIndices[AntiDepReg] <= DefIndices[NewReg]) {
- DOUT << "Breaking anti-dependence edge on "
- << TRI->getName(AntiDepReg)
- << " with " << RegRefs.count(AntiDepReg) << " references"
- << " using " << TRI->getName(NewReg) << "!\n";
-
- // Update the references to the old register to refer to the new
- // register.
- std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
- std::multimap<unsigned, MachineOperand *>::iterator>
- Range = RegRefs.equal_range(AntiDepReg);
- for (std::multimap<unsigned, MachineOperand *>::iterator
- Q = Range.first, QE = Range.second; Q != QE; ++Q)
- Q->second->setReg(NewReg);
-
- // We just went back in time and modified history; the
- // liveness information for the anti-depenence reg is now
- // inconsistent. Set the state as if it were dead.
- Classes[NewReg] = Classes[AntiDepReg];
- DefIndices[NewReg] = DefIndices[AntiDepReg];
- KillIndices[NewReg] = KillIndices[AntiDepReg];
-
- Classes[AntiDepReg] = 0;
- DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
- KillIndices[AntiDepReg] = -1;
-
- RegRefs.erase(AntiDepReg);
- Changed = true;
- LastNewReg[AntiDepReg] = NewReg;
- break;
- }
- }
- }
-
- // Update liveness.
- // Proceding upwards, registers that are defed but not used in this
- // instruction are now dead.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
- if (!MO.isDef()) continue;
- // Ignore two-addr defs.
- if (MI->isRegReDefinedByTwoAddr(i)) continue;
-
- DefIndices[Reg] = Count;
- KillIndices[Reg] = -1;
- Classes[Reg] = 0;
- RegRefs.erase(Reg);
- // Repeat, for all subregs.
- for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
- *Subreg; ++Subreg) {
- unsigned SubregReg = *Subreg;
- DefIndices[SubregReg] = Count;
- KillIndices[SubregReg] = -1;
- Classes[SubregReg] = 0;
- RegRefs.erase(SubregReg);
- }
- for (const unsigned *Super = TRI->getSuperRegisters(Reg);
- *Super; ++Super) {
- unsigned SuperReg = *Super;
- Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- }
- }
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
- if (!MO.isUse()) continue;
-
- const TargetRegisterClass *NewRC =
- getInstrOperandRegClass(TRI, TII, MI->getDesc(), i);
-
- // For now, only allow the register to be changed if its register
- // class is consistent across all uses.
- if (!Classes[Reg] && NewRC)
- Classes[Reg] = NewRC;
- else if (!NewRC || Classes[Reg] != NewRC)
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
-
- RegRefs.insert(std::make_pair(Reg, &MO));
-
- // It wasn't previously live but now it is, this is a kill.
- if (KillIndices[Reg] == -1u) {
- KillIndices[Reg] = Count;
- DefIndices[Reg] = -1u;
- }
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- if (KillIndices[AliasReg] == -1u) {
- KillIndices[AliasReg] = Count;
- DefIndices[AliasReg] = -1u;
- }
- }
- }
- }
- assert(Count == -1u && "Count mismatch!");
-
- return Changed;
+ // Call the superclass.
+ ScheduleDAGInstrs::finishBlock();
}
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
-/// the PendingQueue if the count reaches zero. Also update its cycle bound.
+/// the PendingQueue if the count reaches zero.
void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
SUnit *SuccSU = SuccEdge->getSUnit();
- --SuccSU->NumPredsLeft;
-
+
+ if (SuccEdge->isWeak()) {
+ --SuccSU->WeakPredsLeft;
+ return;
+ }
#ifndef NDEBUG
- if (SuccSU->NumPredsLeft < 0) {
- cerr << "*** Scheduling failed! ***\n";
+ if (SuccSU->NumPredsLeft == 0) {
+ dbgs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
- cerr << " has been released too many times!\n";
- assert(0);
+ dbgs() << " has been released too many times!\n";
+ llvm_unreachable(nullptr);
}
#endif
-
- // Compute how many cycles it will be before this actually becomes
- // available. This is the max of the start time of all predecessors plus
- // their latencies.
- SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
-
- if (SuccSU->NumPredsLeft == 0) {
+ --SuccSU->NumPredsLeft;
+
+ // Standard scheduler algorithms will recompute the depth of the successor
+ // here as such:
+ // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
+ //
+ // However, we lazily compute node depth instead. Note that
+ // ScheduleNodeTopDown has already updated the depth of this node which causes
+ // all descendents to be marked dirty. Setting the successor depth explicitly
+ // here would cause depth to be recomputed for all its ancestors. If the
+ // successor is not yet ready (because of a transitively redundant edge) then
+ // this causes depth computation to be quadratic in the size of the DAG.
+
+ // If all the node's predecessors are scheduled, this node is ready
+ // to be scheduled. Ignore the special ExitSU node.
+ if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
PendingQueue.push_back(SuccSU);
+}
+
+/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
+void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ ReleaseSucc(SU, &*I);
}
}
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
- DOUT << "*** Scheduling [" << CurCycle << "]: ";
+ DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
-
+
Sequence.push_back(SU);
- assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
+ assert(CurCycle >= SU->getDepth() &&
+ "Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
- // Top down: release successors.
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I)
- ReleaseSucc(SU, &*I);
-
+ ReleaseSuccessors(SU);
SU->isScheduled = true;
- AvailableQueue.ScheduledNode(SU);
+ AvailableQueue.scheduledNode(SU);
+}
+
+/// emitNoop - Add a noop to the current instruction sequence.
+void SchedulePostRATDList::emitNoop(unsigned CurCycle) {
+ DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
+ HazardRec->EmitNoop();
+ Sequence.push_back(nullptr); // NULL here means noop
+ ++NumNoops;
}
/// ListScheduleTopDown - The main loop of list scheduling for top-down
void SchedulePostRATDList::ListScheduleTopDown() {
unsigned CurCycle = 0;
- // All leaves to Available queue.
+ // We're scheduling top-down but we're visiting the regions in
+ // bottom-up order, so we don't know the hazards at the start of a
+ // region. So assume no hazards (this should usually be ok as most
+ // blocks are a single region).
+ HazardRec->Reset();
+
+ // Release any successors of the special Entry node.
+ ReleaseSuccessors(&EntrySU);
+
+ // Add all leaves to Available queue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// It is available if it has no predecessors.
- if (SUnits[i].Preds.empty()) {
+ if (!SUnits[i].NumPredsLeft && !SUnits[i].isAvailable) {
AvailableQueue.push(&SUnits[i]);
SUnits[i].isAvailable = true;
}
}
-
+
+ // In any cycle where we can't schedule any instructions, we must
+ // stall or emit a noop, depending on the target.
+ bool CycleHasInsts = false;
+
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
+ std::vector<SUnit*> NotReady;
Sequence.reserve(SUnits.size());
while (!AvailableQueue.empty() || !PendingQueue.empty()) {
// Check to see if any of the pending instructions are ready to issue. If
} else if (PendingQueue[i]->getDepth() < MinDepth)
MinDepth = PendingQueue[i]->getDepth();
}
-
- // If there are no instructions available, don't try to issue anything.
- if (AvailableQueue.empty()) {
- CurCycle = MinDepth != ~0u ? MinDepth : CurCycle + 1;
- continue;
+
+ DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this));
+
+ SUnit *FoundSUnit = nullptr, *NotPreferredSUnit = nullptr;
+ bool HasNoopHazards = false;
+ while (!AvailableQueue.empty()) {
+ SUnit *CurSUnit = AvailableQueue.pop();
+
+ ScheduleHazardRecognizer::HazardType HT =
+ HazardRec->getHazardType(CurSUnit, 0/*no stalls*/);
+ if (HT == ScheduleHazardRecognizer::NoHazard) {
+ if (HazardRec->ShouldPreferAnother(CurSUnit)) {
+ if (!NotPreferredSUnit) {
+ // If this is the first non-preferred node for this cycle, then
+ // record it and continue searching for a preferred node. If this
+ // is not the first non-preferred node, then treat it as though
+ // there had been a hazard.
+ NotPreferredSUnit = CurSUnit;
+ continue;
+ }
+ } else {
+ FoundSUnit = CurSUnit;
+ break;
+ }
+ }
+
+ // Remember if this is a noop hazard.
+ HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
+
+ NotReady.push_back(CurSUnit);
+ }
+
+ // If we have a non-preferred node, push it back onto the available list.
+ // If we did not find a preferred node, then schedule this first
+ // non-preferred node.
+ if (NotPreferredSUnit) {
+ if (!FoundSUnit) {
+ DEBUG(dbgs() << "*** Will schedule a non-preferred instruction...\n");
+ FoundSUnit = NotPreferredSUnit;
+ } else {
+ AvailableQueue.push(NotPreferredSUnit);
+ }
+
+ NotPreferredSUnit = nullptr;
+ }
+
+ // Add the nodes that aren't ready back onto the available list.
+ if (!NotReady.empty()) {
+ AvailableQueue.push_all(NotReady);
+ NotReady.clear();
}
- SUnit *FoundSUnit = AvailableQueue.pop();
-
- // If we found a node to schedule, do it now.
+ // If we found a node to schedule...
if (FoundSUnit) {
- ScheduleNodeTopDown(FoundSUnit, CurCycle);
+ // If we need to emit noops prior to this instruction, then do so.
+ unsigned NumPreNoops = HazardRec->PreEmitNoops(FoundSUnit);
+ for (unsigned i = 0; i != NumPreNoops; ++i)
+ emitNoop(CurCycle);
- // If this is a pseudo-op node, we don't want to increment the current
- // cycle.
- if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
- ++CurCycle;
+ // ... schedule the node...
+ ScheduleNodeTopDown(FoundSUnit, CurCycle);
+ HazardRec->EmitInstruction(FoundSUnit);
+ CycleHasInsts = true;
+ if (HazardRec->atIssueLimit()) {
+ DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n');
+ HazardRec->AdvanceCycle();
+ ++CurCycle;
+ CycleHasInsts = false;
+ }
} else {
- // Otherwise, we have a pipeline stall, but no other problem, just advance
- // the current cycle and try again.
- DOUT << "*** Advancing cycle, no work to do\n";
- ++NumStalls;
+ if (CycleHasInsts) {
+ DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
+ HazardRec->AdvanceCycle();
+ } else if (!HasNoopHazards) {
+ // Otherwise, we have a pipeline stall, but no other problem,
+ // just advance the current cycle and try again.
+ DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
+ HazardRec->AdvanceCycle();
+ ++NumStalls;
+ } else {
+ // Otherwise, we have no instructions to issue and we have instructions
+ // that will fault if we don't do this right. This is the case for
+ // processors without pipeline interlocks and other cases.
+ emitNoop(CurCycle);
+ }
+
++CurCycle;
+ CycleHasInsts = false;
}
}
#ifndef NDEBUG
- VerifySchedule(/*isBottomUp=*/false);
-#endif
+ unsigned ScheduledNodes = VerifyScheduledDAG(/*isBottomUp=*/false);
+ unsigned Noops = 0;
+ for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
+ if (!Sequence[i])
+ ++Noops;
+ assert(Sequence.size() - Noops == ScheduledNodes &&
+ "The number of nodes scheduled doesn't match the expected number!");
+#endif // NDEBUG
}
-//===----------------------------------------------------------------------===//
-// Public Constructor Functions
-//===----------------------------------------------------------------------===//
+// EmitSchedule - Emit the machine code in scheduled order.
+void SchedulePostRATDList::EmitSchedule() {
+ RegionBegin = RegionEnd;
+
+ // If first instruction was a DBG_VALUE then put it back.
+ if (FirstDbgValue)
+ BB->splice(RegionEnd, BB, FirstDbgValue);
+
+ // Then re-insert them according to the given schedule.
+ for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
+ if (SUnit *SU = Sequence[i])
+ BB->splice(RegionEnd, BB, SU->getInstr());
+ else
+ // Null SUnit* is a noop.
+ TII->insertNoop(*BB, RegionEnd);
+
+ // Update the Begin iterator, as the first instruction in the block
+ // may have been scheduled later.
+ if (i == 0)
+ RegionBegin = std::prev(RegionEnd);
+ }
-FunctionPass *llvm::createPostRAScheduler() {
- return new PostRAScheduler();
+ // Reinsert any remaining debug_values.
+ for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
+ DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
+ std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
+ MachineInstr *DbgValue = P.first;
+ MachineBasicBlock::iterator OrigPrivMI = P.second;
+ BB->splice(++OrigPrivMI, BB, DbgValue);
+ }
+ DbgValues.clear();
+ FirstDbgValue = nullptr;
}