#include "llvm/CodeGen/LatencyPriorityQueue.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/MachineDominators.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtarget.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/ADT/Statistic.h"
#include <map>
+#include <set>
using namespace llvm;
STATISTIC(NumNoops, "Number of noops inserted");
static cl::opt<bool>
EnablePostRAHazardAvoidance("avoid-hazards",
cl::desc("Enable exact hazard avoidance"),
- cl::init(false), cl::Hidden);
+ cl::init(true), cl::Hidden);
+
+// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
+static cl::opt<int>
+DebugDiv("postra-sched-debugdiv",
+ cl::desc("Debug control MBBs that are scheduled"),
+ cl::init(0), cl::Hidden);
+static cl::opt<int>
+DebugMod("postra-sched-debugmod",
+ cl::desc("Debug control MBBs that are scheduled"),
+ cl::init(0), cl::Hidden);
namespace {
class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
/// RegRegs - Map registers to all their references within a live range.
std::multimap<unsigned, MachineOperand *> RegRefs;
- /// The index of the most recent kill (proceding bottom-up), or ~0u if
- /// the register is not live.
+ /// KillIndices - The index of the most recent kill (proceding bottom-up),
+ /// or ~0u if the register is not live.
unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
- /// The index of the most recent complete def (proceding bottom up), or ~0u
- /// if the register is live.
+ /// DefIndices - The index of the most recent complete def (proceding bottom
+ /// up), or ~0u if the register is live.
unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
+ /// KeepRegs - A set of registers which are live and cannot be changed to
+ /// break anti-dependencies.
+ SmallSet<unsigned, 4> KeepRegs;
+
public:
SchedulePostRATDList(MachineFunction &MF,
const MachineLoopInfo &MLI,
/// Schedule - Schedule the instruction range using list scheduling.
///
void Schedule();
+
+ /// FixupKills - Fix register kill flags that have been made
+ /// invalid due to scheduling
+ ///
+ void FixupKills(MachineBasicBlock *MBB);
/// Observe - Update liveness information to account for the current
/// instruction, which will not be scheduled.
unsigned findSuitableFreeRegister(unsigned AntiDepReg,
unsigned LastNewReg,
const TargetRegisterClass *);
+ void StartBlockForKills(MachineBasicBlock *BB);
+
+ // ToggleKillFlag - Toggle a register operand kill flag. Other
+ // adjustments may be made to the instruction if necessary. Return
+ // true if the operand has been deleted, false if not.
+ bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
};
}
}
bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
+ // Check that post-RA scheduling is enabled for this function
+ const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
+ if (!ST.enablePostRAScheduler())
+ return true;
+
DEBUG(errs() << "PostRAScheduler\n");
const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
// Loop over all of the basic blocks
for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
MBB != MBBe; ++MBB) {
+#ifndef NDEBUG
+ // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
+ if (DebugDiv > 0) {
+ static int bbcnt = 0;
+ if (bbcnt++ % DebugDiv != DebugMod)
+ continue;
+ errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
+ ":MBB ID#" << MBB->getNumber() << " ***\n";
+ }
+#endif
+
// Initialize register live-range state for scheduling in this block.
Scheduler.StartBlock(MBB);
MachineInstr *MI = prior(I);
if (isSchedulingBoundary(MI, Fn)) {
Scheduler.Run(MBB, I, Current, CurrentCount);
- Scheduler.EmitSchedule();
+ Scheduler.EmitSchedule(0);
Current = MI;
CurrentCount = Count - 1;
Scheduler.Observe(MI, CurrentCount);
assert((MBB->begin() == Current || CurrentCount != 0) &&
"Instruction count mismatch!");
Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
- Scheduler.EmitSchedule();
+ Scheduler.EmitSchedule(0);
// Clean up register live-range state.
Scheduler.FinishBlock();
+
+ // Update register kills
+ Scheduler.FixupKills(MBB);
}
return true;
std::fill(KillIndices, array_endof(KillIndices), ~0u);
std::fill(DefIndices, array_endof(DefIndices), BB->size());
+ // Clear "do not change" set.
+ KeepRegs.clear();
+
// Determine the live-out physregs for this block.
- if (!BB->empty() && BB->back().getDesc().isReturn())
+ if (!BB->empty() && BB->back().getDesc().isReturn()) {
// In a return block, examine the function live-out regs.
for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
E = MRI.liveout_end(); I != E; ++I) {
DefIndices[AliasReg] = ~0u;
}
}
- else
+ } else {
// In a non-return block, examine the live-in regs of all successors.
for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
SE = BB->succ_end(); SI != SE; ++SI)
}
}
- // Consider callee-saved registers as live-out, since we're running after
- // prologue/epilogue insertion so there's no way to add additional
- // saved registers.
- //
- // TODO: If the callee saves and restores these, then we can potentially
- // use them between the save and the restore. To do that, we could scan
- // the exit blocks to see which of these registers are defined.
- // Alternatively, callee-saved registers that aren't saved and restored
- // could be marked live-in in every block.
- for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
- unsigned Reg = *I;
- Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[Reg] = BB->size();
- DefIndices[Reg] = ~0u;
- // Repeat, for all aliases.
- for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
- unsigned AliasReg = *Alias;
- Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
- KillIndices[AliasReg] = BB->size();
- DefIndices[AliasReg] = ~0u;
+ // Also mark as live-out any callee-saved registers that were not
+ // saved in the prolog.
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+ BitVector Pristine = MFI->getPristineRegs(BB);
+ for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
+ unsigned Reg = *I;
+ if (!Pristine.test(Reg)) continue;
+ Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[Reg] = BB->size();
+ DefIndices[Reg] = ~0u;
+ // Repeat, for all aliases.
+ for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
+ unsigned AliasReg = *Alias;
+ Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
+ KillIndices[AliasReg] = BB->size();
+ DefIndices[AliasReg] = ~0u;
+ }
}
}
}
// If we're still willing to consider this register, note the reference.
if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
RegRefs.insert(std::make_pair(Reg, &MO));
+
+ // It's not safe to change register allocation for source operands of
+ // that have special allocation requirements.
+ if (MO.isUse() && MI->getDesc().hasExtraSrcRegAllocReq()) {
+ if (KeepRegs.insert(Reg)) {
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg)
+ KeepRegs.insert(*Subreg);
+ }
+ }
}
}
DefIndices[Reg] = Count;
KillIndices[Reg] = ~0u;
- assert(((KillIndices[Reg] == ~0u) !=
- (DefIndices[Reg] == ~0u)) &&
- "Kill and Def maps aren't consistent for Reg!");
+ assert(((KillIndices[Reg] == ~0u) !=
+ (DefIndices[Reg] == ~0u)) &&
+ "Kill and Def maps aren't consistent for Reg!");
+ KeepRegs.erase(Reg);
Classes[Reg] = 0;
RegRefs.erase(Reg);
// Repeat, for all subregs.
unsigned SubregReg = *Subreg;
DefIndices[SubregReg] = Count;
KillIndices[SubregReg] = ~0u;
+ KeepRegs.erase(SubregReg);
Classes[SubregReg] = 0;
RegRefs.erase(SubregReg);
}
I != E; --Count) {
MachineInstr *MI = --I;
- // After regalloc, IMPLICIT_DEF instructions aren't safe to treat as
- // dependence-breaking. In the case of an INSERT_SUBREG, the IMPLICIT_DEF
- // is left behind appearing to clobber the super-register, while the
- // subregister needs to remain live. So we just ignore them.
- if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
- continue;
-
// Check if this instruction has a dependence on the critical path that
// is an anti-dependence that we may be able to break. If it is, set
// AntiDepReg to the non-zero register associated with the anti-dependence.
if (Edge->getKind() == SDep::Anti) {
AntiDepReg = Edge->getReg();
assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
- // Don't break anti-dependencies on non-allocatable registers.
if (!AllocatableSet.test(AntiDepReg))
+ // Don't break anti-dependencies on non-allocatable registers.
+ AntiDepReg = 0;
+ else if (KeepRegs.count(AntiDepReg))
+ // Don't break anti-dependencies if an use down below requires
+ // this exact register.
AntiDepReg = 0;
else {
// If the SUnit has other dependencies on the SUnit that it
PrescanInstruction(MI);
- // If this instruction has a use of AntiDepReg, breaking it
- // is invalid.
- for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
- MachineOperand &MO = MI->getOperand(i);
- if (!MO.isReg()) continue;
- unsigned Reg = MO.getReg();
- if (Reg == 0) continue;
- if (MO.isUse() && AntiDepReg == Reg) {
- AntiDepReg = 0;
- break;
+ if (MI->getDesc().hasExtraDefRegAllocReq())
+ // If this instruction's defs have special allocation requirement, don't
+ // break this anti-dependency.
+ AntiDepReg = 0;
+ else if (AntiDepReg) {
+ // If this instruction has a use of AntiDepReg, breaking it
+ // is invalid.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+ if (MO.isUse() && AntiDepReg == Reg) {
+ AntiDepReg = 0;
+ break;
+ }
}
}
return Changed;
}
+/// StartBlockForKills - Initialize register live-range state for updating kills
+///
+void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
+ // Initialize the indices to indicate that no registers are live.
+ std::fill(KillIndices, array_endof(KillIndices), ~0u);
+
+ // Determine the live-out physregs for this block.
+ if (!BB->empty() && BB->back().getDesc().isReturn()) {
+ // In a return block, examine the function live-out regs.
+ for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
+ E = MRI.liveout_end(); I != E; ++I) {
+ unsigned Reg = *I;
+ KillIndices[Reg] = BB->size();
+ // Repeat, for all subregs.
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ KillIndices[*Subreg] = BB->size();
+ }
+ }
+ }
+ else {
+ // In a non-return block, examine the live-in regs of all successors.
+ for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
+ SE = BB->succ_end(); SI != SE; ++SI) {
+ for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
+ E = (*SI)->livein_end(); I != E; ++I) {
+ unsigned Reg = *I;
+ KillIndices[Reg] = BB->size();
+ // Repeat, for all subregs.
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ KillIndices[*Subreg] = BB->size();
+ }
+ }
+ }
+ }
+}
+
+bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
+ MachineOperand &MO) {
+ // Setting kill flag...
+ if (!MO.isKill()) {
+ MO.setIsKill(true);
+ return false;
+ }
+
+ // If MO itself is live, clear the kill flag...
+ if (KillIndices[MO.getReg()] != ~0u) {
+ MO.setIsKill(false);
+ return false;
+ }
+
+ // If any subreg of MO is live, then create an imp-def for that
+ // subreg and keep MO marked as killed.
+ bool AllDead = true;
+ const unsigned SuperReg = MO.getReg();
+ for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
+ *Subreg; ++Subreg) {
+ if (KillIndices[*Subreg] != ~0u) {
+ MI->addOperand(MachineOperand::CreateReg(*Subreg,
+ true /*IsDef*/,
+ true /*IsImp*/,
+ false /*IsKill*/,
+ false /*IsDead*/));
+ AllDead = false;
+ }
+ }
+
+ MO.setIsKill(AllDead);
+ return false;
+}
+
+/// FixupKills - Fix the register kill flags, they may have been made
+/// incorrect by instruction reordering.
+///
+void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
+ DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
+
+ std::set<unsigned> killedRegs;
+ BitVector ReservedRegs = TRI->getReservedRegs(MF);
+
+ StartBlockForKills(MBB);
+
+ // Examine block from end to start...
+ unsigned Count = MBB->size();
+ for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
+ I != E; --Count) {
+ MachineInstr *MI = --I;
+
+ // Update liveness. Registers that are defed but not used in this
+ // instruction are now dead. Mark register and all subregs as they
+ // are completely defined.
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg()) continue;
+ unsigned Reg = MO.getReg();
+ if (Reg == 0) continue;
+ if (!MO.isDef()) continue;
+ // Ignore two-addr defs.
+ if (MI->isRegTiedToUseOperand(i)) continue;
+
+ KillIndices[Reg] = ~0u;
+
+ // Repeat for all subregs.
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ KillIndices[*Subreg] = ~0u;
+ }
+ }
+
+ // Examine all used registers and set/clear kill flag. When a
+ // register is used multiple times we only set the kill flag on
+ // the first use.
+ killedRegs.clear();
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse()) continue;
+ unsigned Reg = MO.getReg();
+ if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
+
+ bool kill = false;
+ if (killedRegs.find(Reg) == killedRegs.end()) {
+ kill = true;
+ // A register is not killed if any subregs are live...
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ if (KillIndices[*Subreg] != ~0u) {
+ kill = false;
+ break;
+ }
+ }
+
+ // If subreg is not live, then register is killed if it became
+ // live in this instruction
+ if (kill)
+ kill = (KillIndices[Reg] == ~0u);
+ }
+
+ if (MO.isKill() != kill) {
+ bool removed = ToggleKillFlag(MI, MO);
+ if (removed) {
+ DEBUG(errs() << "Fixed <removed> in ");
+ } else {
+ DEBUG(errs() << "Fixed " << MO << " in ");
+ }
+ DEBUG(MI->dump());
+ }
+
+ killedRegs.insert(Reg);
+ }
+
+ // Mark any used register (that is not using undef) and subregs as
+ // now live...
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
+ unsigned Reg = MO.getReg();
+ if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
+
+ KillIndices[Reg] = Count;
+
+ for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
+ *Subreg; ++Subreg) {
+ KillIndices[*Subreg] = Count;
+ }
+ }
+ }
+}
+
//===----------------------------------------------------------------------===//
// Top-Down Scheduling
//===----------------------------------------------------------------------===//
/// the PendingQueue if the count reaches zero. Also update its cycle bound.
void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
SUnit *SuccSU = SuccEdge->getSUnit();
- --SuccSU->NumPredsLeft;
-
+
#ifndef NDEBUG
- if (SuccSU->NumPredsLeft < 0) {
- cerr << "*** Scheduling failed! ***\n";
+ if (SuccSU->NumPredsLeft == 0) {
+ errs() << "*** Scheduling failed! ***\n";
SuccSU->dump(this);
- cerr << " has been released too many times!\n";
+ errs() << " has been released too many times!\n";
llvm_unreachable(0);
}
#endif
-
+ --SuccSU->NumPredsLeft;
+
// Compute how many cycles it will be before this actually becomes
// available. This is the max of the start time of all predecessors plus
// their latencies.
}
}
+ // In any cycle where we can't schedule any instructions, we must
+ // stall or emit a noop, depending on the target.
+ bool CycleHasInsts = false;
+
// While Available queue is not empty, grab the node with the highest
// priority. If it is not ready put it back. Schedule the node.
std::vector<SUnit*> NotReady;
if (FoundSUnit) {
ScheduleNodeTopDown(FoundSUnit, CurCycle);
HazardRec->EmitInstruction(FoundSUnit);
+ CycleHasInsts = true;
// If we are using the target-specific hazards, then don't
// advance the cycle time just because we schedule a node. If
if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
++CurCycle;
}
- } else if (!HasNoopHazards) {
- // Otherwise, we have a pipeline stall, but no other problem, just advance
- // the current cycle and try again.
- DEBUG(errs() << "*** Advancing cycle, no work to do\n");
- HazardRec->AdvanceCycle();
- ++NumStalls;
- ++CurCycle;
} else {
- // Otherwise, we have no instructions to issue and we have instructions
- // that will fault if we don't do this right. This is the case for
- // processors without pipeline interlocks and other cases.
- DEBUG(errs() << "*** Emitting noop\n");
- HazardRec->EmitNoop();
- Sequence.push_back(0); // NULL here means noop
- ++NumNoops;
+ if (CycleHasInsts) {
+ DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
+ HazardRec->AdvanceCycle();
+ } else if (!HasNoopHazards) {
+ // Otherwise, we have a pipeline stall, but no other problem,
+ // just advance the current cycle and try again.
+ DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
+ HazardRec->AdvanceCycle();
+ ++NumStalls;
+ } else {
+ // Otherwise, we have no instructions to issue and we have instructions
+ // that will fault if we don't do this right. This is the case for
+ // processors without pipeline interlocks and other cases.
+ DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
+ HazardRec->EmitNoop();
+ Sequence.push_back(0); // NULL here means noop
+ ++NumNoops;
+ }
+
++CurCycle;
+ CycleHasInsts = false;
}
}