//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "post-RA-sched"
+#include "AntiDepBreaker.h"
+#include "AggressiveAntiDepBreaker.h"
+#include "CriticalAntiDepBreaker.h"
+#include "ScheduleDAGInstrs.h"
#include "llvm/CodeGen/Passes.h"
-#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
-#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetSubtarget.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/DenseSet.h"
-#include <map>
-#include <climits>
+#include <set>
using namespace llvm;
+STATISTIC(NumNoops, "Number of noops inserted");
STATISTIC(NumStalls, "Number of pipeline stalls");
+STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
+// Post-RA scheduling is enabled with
+// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
+// override the target.
static cl::opt<bool>
+EnablePostRAScheduler("post-RA-scheduler",
+ cl::desc("Enable scheduling after register allocation"),
+ cl::init(false), cl::Hidden);
+static cl::opt<std::string>
EnableAntiDepBreaking("break-anti-dependencies",
- cl::desc("Break scheduling anti-dependencies"),
- cl::init(false));
+ cl::desc("Break post-RA scheduling anti-dependencies: "
+ "\"critical\", \"all\", or \"none\""),
+ cl::init("none"), cl::Hidden);
+
+// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
+static cl::opt<int>
+DebugDiv("postra-sched-debugdiv",
+ cl::desc("Debug control MBBs that are scheduled"),
+ cl::init(0), cl::Hidden);
+static cl::opt<int>
+DebugMod("postra-sched-debugmod",
+ cl::desc("Debug control MBBs that are scheduled"),
+ cl::init(0), cl::Hidden);
+
+AntiDepBreaker::~AntiDepBreaker() { }
namespace {
- class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
+ class PostRAScheduler : public MachineFunctionPass {
+ AliasAnalysis *AA;
+ const TargetInstrInfo *TII;
+ CodeGenOpt::Level OptLevel;
+
public:
static char ID;
- PostRAScheduler() : MachineFunctionPass(&ID) {}
+ PostRAScheduler(CodeGenOpt::Level ol) :
+ MachineFunctionPass(&ID), OptLevel(ol) {}
+
+ void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesCFG();
+ AU.addRequired<AliasAnalysis>();
+ AU.addRequired<MachineDominatorTree>();
+ AU.addPreserved<MachineDominatorTree>();
+ AU.addRequired<MachineLoopInfo>();
+ AU.addPreserved<MachineLoopInfo>();
+ MachineFunctionPass::getAnalysisUsage(AU);
+ }
const char *getPassName() const {
return "Post RA top-down list latency scheduler";
};
char PostRAScheduler::ID = 0;
- class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
+ class SchedulePostRATDList : public ScheduleDAGInstrs {
/// AvailableQueue - The priority queue to use for the available SUnits.
///
LatencyPriorityQueue AvailableQueue;
-
+
/// PendingQueue - This contains all of the instructions whose operands have
/// been issued, but their results are not ready yet (due to the latency of
/// the operation). Once the operands becomes available, the instruction is
/// Topo - A topological ordering for SUnits.
ScheduleDAGTopologicalSort Topo;
+ /// HazardRec - The hazard recognizer to use.
+ ScheduleHazardRecognizer *HazardRec;
+
+ /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
+ AntiDepBreaker *AntiDepBreak;
+
+ /// AA - AliasAnalysis for making memory reference queries.
+ AliasAnalysis *AA;
+
+ /// KillIndices - The index of the most recent kill (proceding bottom-up),
+ /// or ~0u if the register is not live.
+ unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
+
public:
- SchedulePostRATDList(MachineBasicBlock *mbb, const TargetMachine &tm)
- : ScheduleDAGInstrs(mbb, tm), Topo(SUnits) {}
+ SchedulePostRATDList(MachineFunction &MF,
+ const MachineLoopInfo &MLI,
+ const MachineDominatorTree &MDT,
+ ScheduleHazardRecognizer *HR,
+ AntiDepBreaker *ADB,
+ AliasAnalysis *aa)
+ : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
+ HazardRec(HR), AntiDepBreak(ADB), AA(aa) {}
+
+ ~SchedulePostRATDList() {
+ }
+ /// StartBlock - Initialize register live-range state for scheduling in
+ /// this block.
+ ///
+ void StartBlock(MachineBasicBlock *BB);
+
+ /// Schedule - Schedule the instruction range using list scheduling.
+ ///
void Schedule();
+ /// Observe - Update liveness information to account for the current
+ /// instruction, which will not be scheduled.
+ ///
+ void Observe(MachineInstr *MI, unsigned Count);
+
+ /// FinishBlock - Clean up register live-range state.
+ ///
+ void FinishBlock();
+
+ /// FixupKills - Fix register kill flags that have been made
+ /// invalid due to scheduling
+ ///
+ void FixupKills(MachineBasicBlock *MBB);
+
private:
- void ReleaseSucc(SUnit *SU, SUnit *SuccSU, bool isChain);
+ void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
+ void ReleaseSuccessors(SUnit *SU);
void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
void ListScheduleTopDown();
- bool BreakAntiDependencies();
+ void StartBlockForKills(MachineBasicBlock *BB);
+
+ // ToggleKillFlag - Toggle a register operand kill flag. Other
+ // adjustments may be made to the instruction if necessary. Return
+ // true if the operand has been deleted, false if not.
+ bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
};
}
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
- DOUT << "PostRAScheduler\n";
+ AA = &getAnalysis<AliasAnalysis>();
+ TII = Fn.getTarget().getInstrInfo();
+
+ // Check for explicit enable/disable of post-ra scheduling.
+ TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
+ SmallVector<TargetRegisterClass*, 4> CriticalPathRCs;
+ if (EnablePostRAScheduler.getPosition() > 0) {
+ if (!EnablePostRAScheduler)
+ return false;
+ } else {
+ // Check that post-RA scheduling is enabled for this target.
+ const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
+ if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs))
+ return false;
+ }
+
+ // Check for antidep breaking override...
+ if (EnableAntiDepBreaking.getPosition() > 0) {
+ AntiDepMode = (EnableAntiDepBreaking == "all") ?
+ TargetSubtarget::ANTIDEP_ALL :
+ (EnableAntiDepBreaking == "critical")
+ ? TargetSubtarget::ANTIDEP_CRITICAL : TargetSubtarget::ANTIDEP_NONE;
+ }
+
+ DEBUG(dbgs() << "PostRAScheduler\n");
+
+ const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
+ const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
+ const TargetMachine &TM = Fn.getTarget();
+ const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
+ ScheduleHazardRecognizer *HR =
+ TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins);
+ AntiDepBreaker *ADB =
+ ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
+ (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) :
+ ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
+ (AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL));
+
+ SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA);
// Loop over all of the basic blocks
for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
MBB != MBBe; ++MBB) {
+#ifndef NDEBUG
+ // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
+ if (DebugDiv > 0) {
+ static int bbcnt = 0;
+ if (bbcnt++ % DebugDiv != DebugMod)
+ continue;
+ dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
+ ":BB#" << MBB->getNumber() << " ***\n";
+ }
+#endif
- SchedulePostRATDList Scheduler(MBB, Fn.getTarget());
+ // Initialize register live-range state for scheduling in this block.
+ Scheduler.StartBlock(MBB);
+
+ // Schedule each sequence of instructions not interrupted by a label
+ // or anything else that effectively needs to shut down scheduling.
+ MachineBasicBlock::iterator Current = MBB->end();
+ unsigned Count = MBB->size(), CurrentCount = Count;
+ for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
+ MachineInstr *MI = llvm::prior(I);
+ if (TII->isSchedulingBoundary(MI, MBB, Fn)) {
+ Scheduler.Run(MBB, I, Current, CurrentCount);
+ Scheduler.EmitSchedule();
+ Current = MI;
+ CurrentCount = Count - 1;
+ Scheduler.Observe(MI, CurrentCount);
+ }
+ I = MI;
+ --Count;
+ }
+ assert(Count == 0 && "Instruction count mismatch!");
+ assert((MBB->begin() == Current || CurrentCount != 0) &&
+ "Instruction count mismatch!");
+ Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
+ Scheduler.EmitSchedule();
- Scheduler.Run();
+ // Clean up register live-range state.
+ Scheduler.FinishBlock();
- Scheduler.EmitSchedule();
+ // Update register kills
+ Scheduler.FixupKills(MBB);
}
+ delete HR;
+ delete ADB;
+
return true;
}
-
-/// Schedule - Schedule the DAG using list scheduling.
+
+/// StartBlock - Initialize register live-range state for scheduling in
+/// this block.
+///
+void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
+ // Call the superclass.
+ ScheduleDAGInstrs::StartBlock(BB);
+
+ // Reset the hazard recognizer and anti-dep breaker.
+ HazardRec->Reset();
+ if (AntiDepBreak != NULL)
+ AntiDepBreak->StartBlock(BB);
+}
+
+/// Schedule - Schedule the instruction range using list scheduling.
+///
void SchedulePostRATDList::Schedule() {
- DOUT << "********** List Scheduling **********\n";
-
- // Build scheduling units.
- BuildSchedUnits();
+ // Build the scheduling graph.
+ BuildSchedGraph(AA);
- if (EnableAntiDepBreaking) {
- if (BreakAntiDependencies()) {
+ if (AntiDepBreak != NULL) {
+ unsigned Broken =
+ AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
+ InsertPosIndex);
+
+ if (Broken != 0) {
// We made changes. Update the dependency graph.
// Theoretically we could update the graph in place:
// When a live range is changed to use a different register, remove
// that register, and add new anti-dependence and output-dependence
// edges based on the next live range of the register.
SUnits.clear();
- BuildSchedUnits();
+ Sequence.clear();
+ EntrySU = SUnit();
+ ExitSU = SUnit();
+ BuildSchedGraph(AA);
+
+ NumFixedAnti += Broken;
}
}
- AvailableQueue.initNodes(SUnits);
+ DEBUG(dbgs() << "********** List Scheduling **********\n");
+ DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
+ SUnits[su].dumpAll(this));
+ AvailableQueue.initNodes(SUnits);
ListScheduleTopDown();
-
AvailableQueue.releaseState();
}
-/// getInstrOperandRegClass - Return register class of the operand of an
-/// instruction of the specified TargetInstrDesc.
-static const TargetRegisterClass*
-getInstrOperandRegClass(const TargetRegisterInfo *TRI,
- const TargetInstrInfo *TII, const TargetInstrDesc &II,
- unsigned Op) {
- if (Op >= II.getNumOperands())
- return NULL;
- if (II.OpInfo[Op].isLookupPtrRegClass())
- return TII->getPointerRegClass();
- return TRI->getRegClass(II.OpInfo[Op].RegClass);
+/// Observe - Update liveness information to account for the current
+/// instruction, which will not be scheduled.
+///
+void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
+ if (AntiDepBreak != NULL)
+ AntiDepBreak->Observe(MI, Count, InsertPosIndex);
}
-/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
-/// of the ScheduleDAG and break them by renaming registers.
+/// FinishBlock - Clean up register live-range state.
///
-bool SchedulePostRATDList::BreakAntiDependencies() {
- // The code below assumes that there is at least one instruction,
- // so just duck out immediately if the block is empty.
- if (BB->empty()) return false;
-
- Topo.InitDAGTopologicalSorting();
-
- // Compute a critical path for the DAG.
- SUnit *Max = 0;
- std::vector<SDep *> CriticalPath(SUnits.size());
- for (ScheduleDAGTopologicalSort::const_iterator I = Topo.begin(),
- E = Topo.end(); I != E; ++I) {
- SUnit *SU = &SUnits[*I];
- for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
- P != PE; ++P) {
- SUnit *PredSU = P->Dep;
- // This assumes that there's no delay for reusing registers.
- unsigned PredLatency = (P->isCtrl && P->Reg != 0) ? 1 : PredSU->Latency;
- unsigned PredTotalLatency = PredSU->CycleBound + PredLatency;
- if (SU->CycleBound < PredTotalLatency ||
- (SU->CycleBound == PredTotalLatency && !P->isAntiDep)) {
- SU->CycleBound = PredTotalLatency;
- CriticalPath[*I] = &*P;
- }
- }
- // Keep track of the node at the end of the critical path.
- if (!Max || SU->CycleBound + SU->Latency > Max->CycleBound + Max->Latency)
- Max = SU;
- }
+void SchedulePostRATDList::FinishBlock() {
+ if (AntiDepBreak != NULL)
+ AntiDepBreak->FinishBlock();
- DOUT << "Critical path has total latency "
- << (Max ? Max->CycleBound + Max->Latency : 0) << "\n";
-
- // Walk the critical path from the bottom up. Collect all anti-dependence
- // edges on the critical path. Skip anti-dependencies between SUnits that
- // are connected with other edges, since such units won't be able to be
- // scheduled past each other anyway.
- //
- // The heuristic is that edges on the critical path are more important to
- // break than other edges. And since there are a limited number of
- // registers, we don't want to waste them breaking edges that aren't
- // important.
- //
- // TODO: Instructions with multiple defs could have multiple
- // anti-dependencies. The current code here only knows how to break one
- // edge per instruction. Note that we'd have to be able to break all of
- // the anti-dependencies in an instruction in order to be effective.
- BitVector AllocatableSet = TRI->getAllocatableSet(*MF);
- DenseMap<MachineInstr *, unsigned> CriticalAntiDeps;
- for (SUnit *SU = Max; CriticalPath[SU->NodeNum];
- SU = CriticalPath[SU->NodeNum]->Dep) {
- SDep *Edge = CriticalPath[SU->NodeNum];
- SUnit *NextSU = Edge->Dep;
- unsigned AntiDepReg = Edge->Reg;
- // Only consider anti-dependence edges.
- if (!Edge->isAntiDep)
- continue;
- assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
- // Don't break anti-dependencies on non-allocatable registers.
- if (!AllocatableSet.test(AntiDepReg))
- continue;
- // If the SUnit has other dependencies on the SUnit that it
- // anti-depends on, don't bother breaking the anti-dependency.
- // Also, if there are dependencies on other SUnits with the
- // same register as the anti-dependency, don't attempt to
- // break it.
- for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
- P != PE; ++P)
- if (P->Dep == NextSU ?
- (!P->isAntiDep || P->Reg != AntiDepReg) :
- (!P->isCtrl && !P->isAntiDep && P->Reg == AntiDepReg)) {
- AntiDepReg = 0;
- break;
- }
- if (AntiDepReg != 0)
- CriticalAntiDeps[SU->getInstr()] = AntiDepReg;
- }
+ // Call the superclass.
+ ScheduleDAGInstrs::FinishBlock();
+}
- // For live regs that are only used in one register class in a live range,
- // the register class. If the register is not live or is referenced in
- // multiple register classes, the corresponding value is null. If the
- // register is used in multiple register classes, the corresponding value
- // is -1 casted to a pointer.
- const TargetRegisterClass *
- Classes[TargetRegisterInfo::FirstVirtualRegister] = {};
-
- // Map registers to all their references within a live range.
- std::multimap<unsigned, MachineOperand *> RegRefs;
-
- // The index of the most recent kill (proceding bottom-up), or -1 if
- // the register is not live.
- unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
- std::fill(KillIndices, array_endof(KillIndices), -1);
- // The index of the most recent def (proceding bottom up), or -1 if
- // the register is live.
- unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
- std::fill(DefIndices, array_endof(DefIndices), BB->size());
+/// StartBlockForKills - Initialize register live-range state for updating kills
+///
+void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
+ // Initialize the indices to indicate that no registers are live.
+ for (unsigned i = 0; i < TRI->getNumRegs(); ++i)
+ KillIndices[i] = ~0u;
// Determine the live-out physregs for this block.
- if (!BB->empty() && BB->back().getDesc().isReturn())
+ if (!BB->empty() && BB->back().getDesc().isReturn()) {
// In a return block, examine the function live-out regs.
for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
- E = MRI.liveout_end(); I != E; ++I) {
+ E = MRI.liveout_end(); I != E; ++I) {
unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
KillIndices[Reg] = BB->size();
- DefIndices[Reg] = -1;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = -1;
+ // Repeat, for all subregs.
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ KillIndices[*Subreg] = BB->size();
}
}
- else
+ }
+ else {
// In a non-return block, examine the live-in regs of all successors.
for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
- SE = BB->succ_end(); SI != SE; ++SI)
+ SE = BB->succ_end(); SI != SE; ++SI) {
for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
- E = (*SI)->livein_end(); I != E; ++I) {
+ E = (*SI)->livein_end(); I != E; ++I) {
unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
KillIndices[Reg] = BB->size();
- DefIndices[Reg] = -1;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = -1;
+ // Repeat, for all subregs.
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ KillIndices[*Subreg] = BB->size();
}
}
+ }
+ }
+}
+
+bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
+ MachineOperand &MO) {
+ // Setting kill flag...
+ if (!MO.isKill()) {
+ MO.setIsKill(true);
+ return false;
+ }
- // Consider callee-saved registers as live-out, since we're running after
- // prologue/epilogue insertion so there's no way to add additional
- // saved registers.
- //
- // TODO: If the callee saves and restores these, then we can potentially
- // use them between the save and the restore. To do that, we could scan
- // the exit blocks to see which of these registers are defined.
- // Alternatively, calle-saved registers that aren't saved and restored
- // could be marked live-in in every block.
- for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
- unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = -1;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = -1;
+ // If MO itself is live, clear the kill flag...
+ if (KillIndices[MO.getReg()] != ~0u) {
+ MO.setIsKill(false);
+ return false;
+ }
+
+ // If any subreg of MO is live, then create an imp-def for that
+ // subreg and keep MO marked as killed.
+ MO.setIsKill(false);
+ bool AllDead = true;
+ const unsigned SuperReg = MO.getReg();
+ for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
+ *Subreg; ++Subreg) {
+ if (KillIndices[*Subreg] != ~0u) {
+ MI->addOperand(MachineOperand::CreateReg(*Subreg,
+ true /*IsDef*/,
+ true /*IsImp*/,
+ false /*IsKill*/,
+ false /*IsDead*/));
+ AllDead = false;
}
}
- // Consider this pattern:
- // A = ...
- // ... = A
- // A = ...
- // ... = A
- // A = ...
- // ... = A
- // A = ...
- // ... = A
- // There are three anti-dependencies here, and without special care,
- // we'd break all of them using the same register:
- // A = ...
- // ... = A
- // B = ...
- // ... = B
- // B = ...
- // ... = B
- // B = ...
- // ... = B
- // because at each anti-dependence, B is the first register that
- // isn't A which is free. This re-introduces anti-dependencies
- // at all but one of the original anti-dependencies that we were
- // trying to break. To avoid this, keep track of the most recent
- // register that each register was replaced with, avoid avoid
- // using it to repair an anti-dependence on the same register.
- // This lets us produce this:
- // A = ...
- // ... = A
- // B = ...
- // ... = B
- // C = ...
- // ... = C
- // B = ...
- // ... = B
- // This still has an anti-dependence on B, but at least it isn't on the
- // original critical path.
- //
- // TODO: If we tracked more than one register here, we could potentially
- // fix that remaining critical edge too. This is a little more involved,
- // because unlike the most recent register, less recent registers should
- // still be considered, though only if no other registers are available.
- unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
-
- // A registers defined and not used in an instruction. This is used for
- // liveness tracking and is declared outside the loop only to avoid
- // having it be re-allocated on each iteration.
- DenseSet<unsigned> Defs;
-
- // Attempt to break anti-dependence edges on the critical path. Walk the
- // instructions from the bottom up, tracking information about liveness
- // as we go to help determine which registers are available.
- bool Changed = false;
- unsigned Count = BB->size() - 1;
- for (MachineBasicBlock::reverse_iterator I = BB->rbegin(), E = BB->rend();
- I != E; ++I, --Count) {
- MachineInstr *MI = &*I;
-
- // Check if this instruction has an anti-dependence that we're
- // interested in.
- DenseMap<MachineInstr *, unsigned>::iterator C = CriticalAntiDeps.find(MI);
- unsigned AntiDepReg = C != CriticalAntiDeps.end() ?
- C->second : 0;
-
- // Scan the register operands for this instruction and update
- // Classes and RegRefs.
+ if(AllDead)
+ MO.setIsKill(true);
+ return false;
+}
+
+/// FixupKills - Fix the register kill flags, they may have been made
+/// incorrect by instruction reordering.
+///
+void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
+ DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
+
+ std::set<unsigned> killedRegs;
+ BitVector ReservedRegs = TRI->getReservedRegs(MF);
+
+ StartBlockForKills(MBB);
+
+ // Examine block from end to start...
+ unsigned Count = MBB->size();
+ for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
+ I != E; --Count) {
+ MachineInstr *MI = --I;
+ if (MI->isDebugValue())
+ continue;
+
+ // Update liveness. Registers that are defed but not used in this
+ // instruction are now dead. Mark register and all subregs as they
+ // are completely defined.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
- const TargetRegisterClass *NewRC =
- getInstrOperandRegClass(TRI, TII, MI->getDesc(), i);
-
- // If this instruction has a use of AntiDepReg, breaking it
- // is invalid.
- if (MO.isUse() && AntiDepReg == Reg)
- AntiDepReg = 0;
-
- // For now, only allow the register to be changed if its register
- // class is consistent across all uses.
- if (!Classes[Reg] && NewRC)
- Classes[Reg] = NewRC;
- else if (!NewRC || Classes[Reg] != NewRC)
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
-
- // Now check for aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- // If an alias of the reg is used during the live range, give up.
- // Note that this allows us to skip checking if AntiDepReg
- // overlaps with any of the aliases, among other things.
- unsigned AliasReg = *Alias;
- if (Classes[AliasReg]) {
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- }
- }
+ if (!MO.isDef()) continue;
+ // Ignore two-addr defs.
+ if (MI->isRegTiedToUseOperand(i)) continue;
- // If we're still willing to consider this register, note the reference.
- if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
- RegRefs.insert(std::make_pair(Reg, &MO));
- }
+ KillIndices[Reg] = ~0u;
- // Determine AntiDepReg's register class, if it is live and is
- // consistently used within a single class.
- const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
- assert((AntiDepReg == 0 || RC != NULL) &&
- "Register should be live if it's causing an anti-dependence!");
- if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
- AntiDepReg = 0;
-
- // Look for a suitable register to use to break the anti-depenence.
- //
- // TODO: Instead of picking the first free register, consider which might
- // be the best.
- if (AntiDepReg != 0) {
- for (TargetRegisterClass::iterator R = RC->allocation_order_begin(*MF),
- RE = RC->allocation_order_end(*MF); R != RE; ++R) {
- unsigned NewReg = *R;
- // Don't replace a register with itself.
- if (NewReg == AntiDepReg) continue;
- // Don't replace a register with one that was recently used to repair
- // an anti-dependence with this AntiDepReg, because that would
- // re-introduce that anti-dependence.
- if (NewReg == LastNewReg[AntiDepReg]) continue;
- // If NewReg is dead and NewReg's most recent def is not before
- // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
- assert(((KillIndices[AntiDepReg] == -1u) != (DefIndices[AntiDepReg] == -1u)) &&
- "Kill and Def maps aren't consistent for AntiDepReg!");
- assert(((KillIndices[NewReg] == -1u) != (DefIndices[NewReg] == -1u)) &&
- "Kill and Def maps aren't consistent for NewReg!");
- if (KillIndices[NewReg] == -1u &&
- KillIndices[AntiDepReg] <= DefIndices[NewReg]) {
- DOUT << "Breaking anti-dependence edge on reg " << AntiDepReg
- << " with reg " << NewReg << "!\n";
-
- // Update the references to the old register to refer to the new
- // register.
- std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
- std::multimap<unsigned, MachineOperand *>::iterator>
- Range = RegRefs.equal_range(AntiDepReg);
- for (std::multimap<unsigned, MachineOperand *>::iterator
- Q = Range.first, QE = Range.second; Q != QE; ++Q)
- Q->second->setReg(NewReg);
-
- // We just went back in time and modified history; the
- // liveness information for the anti-depenence reg is now
- // inconsistent. Set the state as if it were dead.
- Classes[NewReg] = Classes[AntiDepReg];
- DefIndices[NewReg] = DefIndices[AntiDepReg];
- KillIndices[NewReg] = KillIndices[AntiDepReg];
-
- Classes[AntiDepReg] = 0;
- DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
- KillIndices[AntiDepReg] = -1;
-
- RegRefs.erase(AntiDepReg);
- Changed = true;
- LastNewReg[AntiDepReg] = NewReg;
- break;
- }
+ // Repeat for all subregs.
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ KillIndices[*Subreg] = ~0u;
}
}
- // Update liveness.
- Defs.clear();
+ // Examine all used registers and set/clear kill flag. When a
+ // register is used multiple times we only set the kill flag on
+ // the first use.
+ killedRegs.clear();
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg()) continue;
+ if (!MO.isReg() || !MO.isUse()) continue;
unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
- if (MO.isDef())
- Defs.insert(Reg);
- else {
- // Treat a use in the same instruction as a def as an extension of
- // a live range.
- Defs.erase(Reg);
- // It wasn't previously live but now it is, this is a kill.
- if (KillIndices[Reg] == -1u) {
- KillIndices[Reg] = Count;
- DefIndices[Reg] = -1u;
- }
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Defs.erase(AliasReg);
- if (KillIndices[AliasReg] == -1u) {
- KillIndices[AliasReg] = Count;
- DefIndices[AliasReg] = -1u;
+ if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
+
+ bool kill = false;
+ if (killedRegs.find(Reg) == killedRegs.end()) {
+ kill = true;
+ // A register is not killed if any subregs are live...
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ if (KillIndices[*Subreg] != ~0u) {
+ kill = false;
+ break;
}
}
+
+ // If subreg is not live, then register is killed if it became
+ // live in this instruction
+ if (kill)
+ kill = (KillIndices[Reg] == ~0u);
}
+
+ if (MO.isKill() != kill) {
+ DEBUG(dbgs() << "Fixing " << MO << " in ");
+ // Warning: ToggleKillFlag may invalidate MO.
+ ToggleKillFlag(MI, MO);
+ DEBUG(MI->dump());
+ }
+
+ killedRegs.insert(Reg);
}
- // Proceding upwards, registers that are defed but not used in this
- // instruction are now dead.
- for (DenseSet<unsigned>::iterator D = Defs.begin(), DE = Defs.end();
- D != DE; ++D) {
- unsigned Reg = *D;
- DefIndices[Reg] = Count;
- KillIndices[Reg] = -1;
- Classes[Reg] = 0;
- RegRefs.erase(Reg);
- // Repeat, for all subregs.
+
+ // Mark any used register (that is not using undef) and subregs as
+ // now live...
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
+ unsigned Reg = MO.getReg();
+ if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
+
+ KillIndices[Reg] = Count;
+
for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
*Subreg; ++Subreg) {
- unsigned SubregReg = *Subreg;
- DefIndices[SubregReg] = Count;
- KillIndices[SubregReg] = -1;
- Classes[SubregReg] = 0;
- RegRefs.erase(SubregReg);
+ KillIndices[*Subreg] = Count;
}
}
}
- assert(Count == -1u && "Count mismatch!");
-
- return Changed;
}
//===----------------------------------------------------------------------===//
/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
/// the PendingQueue if the count reaches zero. Also update its cycle bound.
-void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SUnit *SuccSU, bool isChain) {
- --SuccSU->NumPredsLeft;
-
+void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
+ SUnit *SuccSU = SuccEdge->getSUnit();
+
#ifndef NDEBUG
- if (SuccSU->NumPredsLeft < 0) {
- cerr << "*** Scheduling failed! ***\n";
+ if (SuccSU->NumPredsLeft == 0) {
+ dbgs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
- cerr << " has been released too many times!\n";
- assert(0);
+ dbgs() << " has been released too many times!\n";
+ llvm_unreachable(0);
}
#endif
-
+ --SuccSU->NumPredsLeft;
+
// Compute how many cycles it will be before this actually becomes
// available. This is the max of the start time of all predecessors plus
// their latencies.
- // If this is a token edge, we don't need to wait for the latency of the
- // preceeding instruction (e.g. a long-latency load) unless there is also
- // some other data dependence.
- unsigned PredDoneCycle = SU->Cycle;
- if (!isChain)
- PredDoneCycle += SU->Latency;
- else if (SU->Latency)
- PredDoneCycle += 1;
- SuccSU->CycleBound = std::max(SuccSU->CycleBound, PredDoneCycle);
-
- if (SuccSU->NumPredsLeft == 0) {
+ SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
+
+ // If all the node's predecessors are scheduled, this node is ready
+ // to be scheduled. Ignore the special ExitSU node.
+ if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
PendingQueue.push_back(SuccSU);
+}
+
+/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
+void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ ReleaseSucc(SU, &*I);
}
}
/// count of its successors. If a successor pending count is zero, add it to
/// the Available queue.
void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
- DOUT << "*** Scheduling [" << CurCycle << "]: ";
+ DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
DEBUG(SU->dump(this));
-
- Sequence.push_back(SU);
- SU->Cycle = CurCycle;
- // Top down: release successors.
- for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I)
- ReleaseSucc(SU, I->Dep, I->isCtrl);
+ Sequence.push_back(SU);
+ assert(CurCycle >= SU->getDepth() &&
+ "Node scheduled above its depth!");
+ SU->setDepthToAtLeast(CurCycle);
+ ReleaseSuccessors(SU);
SU->isScheduled = true;
AvailableQueue.ScheduledNode(SU);
}
void SchedulePostRATDList::ListScheduleTopDown() {
unsigned CurCycle = 0;
- // All leaves to Available queue.
+ // We're scheduling top-down but we're visiting the regions in
+ // bottom-up order, so we don't know the hazards at the start of a
+ // region. So assume no hazards (this should usually be ok as most
+ // blocks are a single region).
+ HazardRec->Reset();
+
+ // Release any successors of the special Entry node.
+ ReleaseSuccessors(&EntrySU);
+
+ // Add all leaves to Available queue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// It is available if it has no predecessors.
- if (SUnits[i].Preds.empty()) {
+ bool available = SUnits[i].Preds.empty();
+ if (available) {
AvailableQueue.push(&SUnits[i]);
SUnits[i].isAvailable = true;
}
}
-
+
+ // In any cycle where we can't schedule any instructions, we must
+ // stall or emit a noop, depending on the target.
+ bool CycleHasInsts = false;
+
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
+ std::vector<SUnit*> NotReady;
Sequence.reserve(SUnits.size());
while (!AvailableQueue.empty() || !PendingQueue.empty()) {
// Check to see if any of the pending instructions are ready to issue. If
// so, add them to the available queue.
+ unsigned MinDepth = ~0u;
for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
- if (PendingQueue[i]->CycleBound == CurCycle) {
+ if (PendingQueue[i]->getDepth() <= CurCycle) {
AvailableQueue.push(PendingQueue[i]);
PendingQueue[i]->isAvailable = true;
PendingQueue[i] = PendingQueue.back();
PendingQueue.pop_back();
--i; --e;
- } else {
- assert(PendingQueue[i]->CycleBound > CurCycle && "Negative latency?");
+ } else if (PendingQueue[i]->getDepth() < MinDepth)
+ MinDepth = PendingQueue[i]->getDepth();
+ }
+
+ DEBUG(dbgs() << "\n*** Examining Available\n";
+ LatencyPriorityQueue q = AvailableQueue;
+ while (!q.empty()) {
+ SUnit *su = q.pop();
+ dbgs() << "Height " << su->getHeight() << ": ";
+ su->dump(this);
+ });
+
+ SUnit *FoundSUnit = 0;
+ bool HasNoopHazards = false;
+ while (!AvailableQueue.empty()) {
+ SUnit *CurSUnit = AvailableQueue.pop();
+
+ ScheduleHazardRecognizer::HazardType HT =
+ HazardRec->getHazardType(CurSUnit);
+ if (HT == ScheduleHazardRecognizer::NoHazard) {
+ FoundSUnit = CurSUnit;
+ break;
}
+
+ // Remember if this is a noop hazard.
+ HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
+
+ NotReady.push_back(CurSUnit);
}
-
- // If there are no instructions available, don't try to issue anything.
- if (AvailableQueue.empty()) {
- ++CurCycle;
- continue;
+
+ // Add the nodes that aren't ready back onto the available list.
+ if (!NotReady.empty()) {
+ AvailableQueue.push_all(NotReady);
+ NotReady.clear();
}
- SUnit *FoundSUnit = AvailableQueue.pop();
-
- // If we found a node to schedule, do it now.
+ // If we found a node to schedule...
if (FoundSUnit) {
+ // ... schedule the node...
ScheduleNodeTopDown(FoundSUnit, CurCycle);
-
- // If this is a pseudo-op node, we don't want to increment the current
- // cycle.
- if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
- ++CurCycle;
+ HazardRec->EmitInstruction(FoundSUnit);
+ CycleHasInsts = true;
} else {
- // Otherwise, we have a pipeline stall, but no other problem, just advance
- // the current cycle and try again.
- DOUT << "*** Advancing cycle, no work to do\n";
- ++NumStalls;
+ if (CycleHasInsts) {
+ DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n');
+ HazardRec->AdvanceCycle();
+ } else if (!HasNoopHazards) {
+ // Otherwise, we have a pipeline stall, but no other problem,
+ // just advance the current cycle and try again.
+ DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n');
+ HazardRec->AdvanceCycle();
+ ++NumStalls;
+ } else {
+ // Otherwise, we have no instructions to issue and we have instructions
+ // that will fault if we don't do this right. This is the case for
+ // processors without pipeline interlocks and other cases.
+ DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n');
+ HazardRec->EmitNoop();
+ Sequence.push_back(0); // NULL here means noop
+ ++NumNoops;
+ }
+
++CurCycle;
+ CycleHasInsts = false;
}
}
// Public Constructor Functions
//===----------------------------------------------------------------------===//
-FunctionPass *llvm::createPostRAScheduler() {
- return new PostRAScheduler();
+FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
+ return new PostRAScheduler(OptLevel);
}