X-Git-Url: http://plrg.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FScheduleDAGInstrs.cpp;h=ed066724374b0cef9b8737ac88cbda115a68ba8c;hb=68675c6c5b173021807e4e12cd250eeba63f6d0d;hp=b02f3b6e1e8ddb142dc58b996dc1df2fa52281d8;hpb=fcb0a278951d5e6bd878e183f2f1e9b3abf2e200;p=oota-llvm.git diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index b02f3b6e1e8..ed066724374 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -13,14 +13,15 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "sched-instrs" -#include "ScheduleDAGInstrs.h" #include "llvm/Operator.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/ValueTracking.h" +#include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/CodeGen/ScheduleDAGInstrs.h" #include "llvm/MC/MCInstrItineraries.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetInstrInfo.h" @@ -33,25 +34,17 @@ using namespace llvm; ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, const MachineLoopInfo &mli, - const MachineDominatorTree &mdt) + const MachineDominatorTree &mdt, + bool IsPostRAFlag, + LiveIntervals *lis) : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), - InstrItins(mf.getTarget().getInstrItineraryData()), - Defs(TRI->getNumRegs()), Uses(TRI->getNumRegs()), - LoopRegs(MLI, MDT), FirstDbgValue(0) { + InstrItins(mf.getTarget().getInstrItineraryData()), LIS(lis), + IsPostRA(IsPostRAFlag), UnitLatencies(false), LoopRegs(MLI, MDT), + FirstDbgValue(0) { + assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals"); DbgValues.clear(); -} - -/// Run - perform scheduling. -/// -void ScheduleDAGInstrs::Run(MachineBasicBlock *bb, - MachineBasicBlock::iterator begin, - MachineBasicBlock::iterator end, - unsigned endcount) { - BB = bb; - Begin = begin; - InsertPosIndex = endcount; - - ScheduleDAG::Run(bb, end); + assert(!(IsPostRA && MRI.getNumVirtRegs()) && + "Virtual registers must be removed prior to PostRA scheduling"); } /// getUnderlyingObjectFromInt - This is the function that does the work of @@ -133,19 +126,57 @@ static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI, return 0; } -void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { +void ScheduleDAGInstrs::startBlock(MachineBasicBlock *BB) { LoopRegs.Deps.clear(); if (MachineLoop *ML = MLI.getLoopFor(BB)) - if (BB == ML->getLoopLatch()) { - MachineBasicBlock *Header = ML->getHeader(); - for (MachineBasicBlock::livein_iterator I = Header->livein_begin(), - E = Header->livein_end(); I != E; ++I) - LoopLiveInRegs.insert(*I); + if (BB == ML->getLoopLatch()) LoopRegs.VisitLoop(ML); - } } -/// AddSchedBarrierDeps - Add dependencies from instructions in the current +void ScheduleDAGInstrs::finishBlock() { + // Nothing to do. +} + +/// Initialize the map with the number of registers. +void Reg2SUnitsMap::setRegLimit(unsigned Limit) { + PhysRegSet.setUniverse(Limit); + SUnits.resize(Limit); +} + +/// Clear the map without deallocating storage. +void Reg2SUnitsMap::clear() { + for (const_iterator I = reg_begin(), E = reg_end(); I != E; ++I) { + SUnits[*I].clear(); + } + PhysRegSet.clear(); +} + +/// Initialize the DAG and common scheduler state for the current scheduling +/// region. This does not actually create the DAG, only clears it. The +/// scheduling driver may call BuildSchedGraph multiple times per scheduling +/// region. +void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb, + MachineBasicBlock::iterator begin, + MachineBasicBlock::iterator end, + unsigned endcount) { + BB = bb; + RegionBegin = begin; + RegionEnd = end; + EndIndex = endcount; + + // Check to see if the scheduler cares about latencies. + UnitLatencies = forceUnitLatencies(); + + ScheduleDAG::clearDAG(); +} + +/// Close the current scheduling region. Don't clear any state in case the +/// driver wants to refer to the previous scheduling region. +void ScheduleDAGInstrs::exitRegion() { + // Nothing to do. +} + +/// addSchedBarrierDeps - Add dependencies from instructions in the current /// list of instructions being scheduled to scheduling barrier by adding /// the exit SU to the register defs and use list. This is because we want to /// make sure instructions which define registers that are either used by @@ -153,8 +184,8 @@ void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { /// especially important when the definition latency of the return value(s) /// are too high to be hidden by the branch or when the liveout registers /// used by instructions in the fallthrough block. -void ScheduleDAGInstrs::AddSchedBarrierDeps() { - MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0; +void ScheduleDAGInstrs::addSchedBarrierDeps() { + MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : 0; ExitSU.setInstr(ExitMI); bool AllDepKnown = ExitMI && (ExitMI->isCall() || ExitMI->isBarrier()); @@ -167,8 +198,10 @@ void ScheduleDAGInstrs::AddSchedBarrierDeps() { unsigned Reg = MO.getReg(); if (Reg == 0) continue; - assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); - Uses[Reg].push_back(&ExitSU); + if (TRI->isPhysicalRegister(Reg)) + Uses[Reg].push_back(&ExitSU); + else + assert(!IsPostRA && "Virtual register encountered after regalloc."); } } else { // For others, e.g. fallthrough, conditional branch, assume the exit @@ -185,11 +218,288 @@ void ScheduleDAGInstrs::AddSchedBarrierDeps() { } } -void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { - // We'll be allocating one SUnit for each instruction, plus one for - // the region exit node. +/// MO is an operand of SU's instruction that defines a physical register. Add +/// data dependencies from SU to any uses of the physical register. +void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, + const MachineOperand &MO) { + assert(MO.isDef() && "expect physreg def"); + + // Ask the target if address-backscheduling is desirable, and if so how much. + const TargetSubtargetInfo &ST = TM.getSubtarget(); + unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); + unsigned DataLatency = SU->Latency; + + for (const uint16_t *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) { + if (!Uses.contains(*Alias)) + continue; + std::vector &UseList = Uses[*Alias]; + for (unsigned i = 0, e = UseList.size(); i != e; ++i) { + SUnit *UseSU = UseList[i]; + if (UseSU == SU) + continue; + unsigned LDataLatency = DataLatency; + // Optionally add in a special extra latency for nodes that + // feed addresses. + // TODO: Perhaps we should get rid of + // SpecialAddressLatency and just move this into + // adjustSchedDependency for the targets that care about it. + if (SpecialAddressLatency != 0 && !UnitLatencies && + UseSU != &ExitSU) { + MachineInstr *UseMI = UseSU->getInstr(); + const MCInstrDesc &UseMCID = UseMI->getDesc(); + int RegUseIndex = UseMI->findRegisterUseOperandIdx(*Alias); + assert(RegUseIndex >= 0 && "UseMI doesn't use register!"); + if (RegUseIndex >= 0 && + (UseMI->mayLoad() || UseMI->mayStore()) && + (unsigned)RegUseIndex < UseMCID.getNumOperands() && + UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass()) + LDataLatency += SpecialAddressLatency; + } + // Adjust the dependence latency using operand def/use + // information (if any), and then allow the target to + // perform its own adjustments. + const SDep& dep = SDep(SU, SDep::Data, LDataLatency, *Alias); + if (!UnitLatencies) { + computeOperandLatency(SU, UseSU, const_cast(dep)); + ST.adjustSchedDependency(SU, UseSU, const_cast(dep)); + } + UseSU->addPred(dep); + } + } +} + +/// addPhysRegDeps - Add register dependencies (data, anti, and output) from +/// this SUnit to following instructions in the same scheduling region that +/// depend the physical register referenced at OperIdx. +void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { + const MachineInstr *MI = SU->getInstr(); + const MachineOperand &MO = MI->getOperand(OperIdx); + + // Optionally add output and anti dependencies. For anti + // dependencies we use a latency of 0 because for a multi-issue + // target we want to allow the defining instruction to issue + // in the same cycle as the using instruction. + // TODO: Using a latency of 1 here for output dependencies assumes + // there's no cost for reusing registers. + SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; + for (const uint16_t *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) { + if (!Defs.contains(*Alias)) + continue; + std::vector &DefList = Defs[*Alias]; + for (unsigned i = 0, e = DefList.size(); i != e; ++i) { + SUnit *DefSU = DefList[i]; + if (DefSU == &ExitSU) + continue; + if (DefSU != SU && + (Kind != SDep::Output || !MO.isDead() || + !DefSU->getInstr()->registerDefIsDead(*Alias))) { + if (Kind == SDep::Anti) + DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias)); + else { + unsigned AOLat = TII->getOutputLatency(InstrItins, MI, OperIdx, + DefSU->getInstr()); + DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias)); + } + } + } + } + + if (!MO.isDef()) { + // Either insert a new Reg2SUnits entry with an empty SUnits list, or + // retrieve the existing SUnits list for this register's uses. + // Push this SUnit on the use list. + Uses[MO.getReg()].push_back(SU); + } + else { + addPhysRegDataDeps(SU, MO); + + // Either insert a new Reg2SUnits entry with an empty SUnits list, or + // retrieve the existing SUnits list for this register's defs. + std::vector &DefList = Defs[MO.getReg()]; + + // If a def is going to wrap back around to the top of the loop, + // backschedule it. + if (!UnitLatencies && DefList.empty()) { + LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(MO.getReg()); + if (I != LoopRegs.Deps.end()) { + const MachineOperand *UseMO = I->second.first; + unsigned Count = I->second.second; + const MachineInstr *UseMI = UseMO->getParent(); + unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); + const MCInstrDesc &UseMCID = UseMI->getDesc(); + const TargetSubtargetInfo &ST = + TM.getSubtarget(); + unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); + // TODO: If we knew the total depth of the region here, we could + // handle the case where the whole loop is inside the region but + // is large enough that the isScheduleHigh trick isn't needed. + if (UseMOIdx < UseMCID.getNumOperands()) { + // Currently, we only support scheduling regions consisting of + // single basic blocks. Check to see if the instruction is in + // the same region by checking to see if it has the same parent. + if (UseMI->getParent() != MI->getParent()) { + unsigned Latency = SU->Latency; + if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) + Latency += SpecialAddressLatency; + // This is a wild guess as to the portion of the latency which + // will be overlapped by work done outside the current + // scheduling region. + Latency -= std::min(Latency, Count); + // Add the artificial edge. + ExitSU.addPred(SDep(SU, SDep::Order, Latency, + /*Reg=*/0, /*isNormalMemory=*/false, + /*isMustAlias=*/false, + /*isArtificial=*/true)); + } else if (SpecialAddressLatency > 0 && + UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { + // The entire loop body is within the current scheduling region + // and the latency of this operation is assumed to be greater + // than the latency of the loop. + // TODO: Recursively mark data-edge predecessors as + // isScheduleHigh too. + SU->isScheduleHigh = true; + } + } + LoopRegs.Deps.erase(I); + } + } + + // clear this register's use list + if (Uses.contains(MO.getReg())) + Uses[MO.getReg()].clear(); + + if (!MO.isDead()) + DefList.clear(); + + // Calls will not be reordered because of chain dependencies (see + // below). Since call operands are dead, calls may continue to be added + // to the DefList making dependence checking quadratic in the size of + // the block. Instead, we leave only one call at the back of the + // DefList. + if (SU->isCall) { + while (!DefList.empty() && DefList.back()->isCall) + DefList.pop_back(); + } + // Defs are pushed in the order they are visited and never reordered. + DefList.push_back(SU); + } +} + +/// addVRegDefDeps - Add register output and data dependencies from this SUnit +/// to instructions that occur later in the same scheduling region if they read +/// from or write to the virtual register defined at OperIdx. +/// +/// TODO: Hoist loop induction variable increments. This has to be +/// reevaluated. Generally, IV scheduling should be done before coalescing. +void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { + const MachineInstr *MI = SU->getInstr(); + unsigned Reg = MI->getOperand(OperIdx).getReg(); + + // SSA defs do not have output/anti dependencies. + // The current operand is a def, so we have at least one. + if (llvm::next(MRI.def_begin(Reg)) == MRI.def_end()) + return; + + // Add output dependence to the next nearest def of this vreg. + // + // Unless this definition is dead, the output dependence should be + // transitively redundant with antidependencies from this definition's + // uses. We're conservative for now until we have a way to guarantee the uses + // are not eliminated sometime during scheduling. The output dependence edge + // is also useful if output latency exceeds def-use latency. + VReg2SUnitMap::iterator DefI = findVRegDef(Reg); + if (DefI == VRegDefs.end()) + VRegDefs.insert(VReg2SUnit(Reg, SU)); + else { + SUnit *DefSU = DefI->SU; + if (DefSU != SU && DefSU != &ExitSU) { + unsigned OutLatency = TII->getOutputLatency(InstrItins, MI, OperIdx, + DefSU->getInstr()); + DefSU->addPred(SDep(SU, SDep::Output, OutLatency, Reg)); + } + DefI->SU = SU; + } +} + +/// addVRegUseDeps - Add a register data dependency if the instruction that +/// defines the virtual register used at OperIdx is mapped to an SUnit. Add a +/// register antidependency from this SUnit to instructions that occur later in +/// the same scheduling region if they write the virtual register. +/// +/// TODO: Handle ExitSU "uses" properly. +void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { + MachineInstr *MI = SU->getInstr(); + unsigned Reg = MI->getOperand(OperIdx).getReg(); + + // Lookup this operand's reaching definition. + assert(LIS && "vreg dependencies requires LiveIntervals"); + SlotIndex UseIdx = LIS->getInstructionIndex(MI).getRegSlot(); + LiveInterval *LI = &LIS->getInterval(Reg); + VNInfo *VNI = LI->getVNInfoBefore(UseIdx); + // VNI will be valid because MachineOperand::readsReg() is checked by caller. + MachineInstr *Def = LIS->getInstructionFromIndex(VNI->def); + // Phis and other noninstructions (after coalescing) have a NULL Def. + if (Def) { + SUnit *DefSU = getSUnit(Def); + if (DefSU) { + // The reaching Def lives within this scheduling region. + // Create a data dependence. + // + // TODO: Handle "special" address latencies cleanly. + const SDep &dep = SDep(DefSU, SDep::Data, DefSU->Latency, Reg); + if (!UnitLatencies) { + // Adjust the dependence latency using operand def/use information, then + // allow the target to perform its own adjustments. + computeOperandLatency(DefSU, SU, const_cast(dep)); + const TargetSubtargetInfo &ST = TM.getSubtarget(); + ST.adjustSchedDependency(DefSU, SU, const_cast(dep)); + } + SU->addPred(dep); + } + } + + // Add antidependence to the following def of the vreg it uses. + VReg2SUnitMap::iterator DefI = findVRegDef(Reg); + if (DefI != VRegDefs.end() && DefI->SU != SU) + DefI->SU->addPred(SDep(SU, SDep::Anti, 0, Reg)); +} + +/// Create an SUnit for each real instruction, numbered in top-down toplological +/// order. The instruction order A < B, implies that no edge exists from B to A. +/// +/// Map each real instruction to its SUnit. +/// +/// After initSUnits, the SUnits vector is cannot be resized and the scheduler +/// may hang onto SUnit pointers. We may relax this in the future by using SUnit +/// IDs instead of pointers. +void ScheduleDAGInstrs::initSUnits() { + // We'll be allocating one SUnit for each real instruction in the region, + // which is contained within a basic block. SUnits.reserve(BB->size()); + for (MachineBasicBlock::iterator I = RegionBegin; I != RegionEnd; ++I) { + MachineInstr *MI = I; + if (MI->isDebugValue()) + continue; + + SUnit *SU = newSUnit(MI); + MISUnitMap[MI] = SU; + + SU->isCall = MI->isCall(); + SU->isCommutable = MI->isCommutable(); + + // Assign the Latency field of SU using target-provided information. + if (UnitLatencies) + SU->Latency = 1; + else + computeLatency(SU); + } +} + +void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) { + // Create an SUnit for each real instruction. + initSUnits(); + // We build scheduling units by walking a block's instruction list from bottom // to top. @@ -203,29 +513,29 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { std::map AliasMemDefs, NonAliasMemDefs; std::map > AliasMemUses, NonAliasMemUses; - // Check to see if the scheduler cares about latencies. - bool UnitLatencies = ForceUnitLatencies(); - - // Ask the target if address-backscheduling is desirable, and if so how much. - const TargetSubtargetInfo &ST = TM.getSubtarget(); - unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); - // Remove any stale debug info; sometimes BuildSchedGraph is called again // without emitting the info from the previous call. DbgValues.clear(); FirstDbgValue = NULL; + assert(Defs.empty() && Uses.empty() && + "Only BuildGraph should update Defs/Uses"); + Defs.setRegLimit(TRI->getNumRegs()); + Uses.setRegLimit(TRI->getNumRegs()); + + assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs"); + // FIXME: Allow SparseSet to reserve space for the creation of virtual + // registers during scheduling. Don't artificially inflate the Universe + // because we want to assert that vregs are not created during DAG building. + VRegDefs.setUniverse(MRI.getNumVirtRegs()); + // Model data dependencies between instructions being scheduled and the // ExitSU. - AddSchedBarrierDeps(); - - for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { - assert(Defs[i].empty() && "Only BuildGraph should push/pop Defs"); - } + addSchedBarrierDeps(); // Walk the list of instructions, from bottom moving up. MachineInstr *PrevMI = NULL; - for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin; + for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin; MII != MIE; --MII) { MachineInstr *MI = prior(MII); if (MI && PrevMI) { @@ -240,16 +550,9 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { assert(!MI->isTerminator() && !MI->isLabel() && "Cannot schedule terminators or labels!"); - // Create the SUnit for this MI. - SUnit *SU = NewSUnit(MI); - SU->isCall = MI->isCall(); - SU->isCommutable = MI->isCommutable(); - // Assign the Latency field of SU using target-provided information. - if (UnitLatencies) - SU->Latency = 1; - else - ComputeLatency(SU); + SUnit *SU = MISUnitMap[MI]; + assert(SU && "No SUnit mapped to this MI"); // Add register-based dependencies (data, anti, and output). for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { @@ -258,152 +561,14 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { unsigned Reg = MO.getReg(); if (Reg == 0) continue; - assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); - - // Optionally add output and anti dependencies. For anti - // dependencies we use a latency of 0 because for a multi-issue - // target we want to allow the defining instruction to issue - // in the same cycle as the using instruction. - // TODO: Using a latency of 1 here for output dependencies assumes - // there's no cost for reusing registers. - SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; - for (const unsigned *Alias = TRI->getOverlaps(Reg); *Alias; ++Alias) { - std::vector &DefList = Defs[*Alias]; - for (unsigned i = 0, e = DefList.size(); i != e; ++i) { - SUnit *DefSU = DefList[i]; - if (DefSU == &ExitSU) - continue; - if (DefSU != SU && - (Kind != SDep::Output || !MO.isDead() || - !DefSU->getInstr()->registerDefIsDead(*Alias))) { - if (Kind == SDep::Anti) - DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias)); - else { - unsigned AOLat = TII->getOutputLatency(InstrItins, MI, j, - DefSU->getInstr()); - DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias)); - } - } - } - } - - // Retrieve the UseList to add data dependencies and update uses. - std::vector &UseList = Uses[Reg]; - if (MO.isDef()) { - // Update DefList. Defs are pushed in the order they are visited and - // never reordered. - std::vector &DefList = Defs[Reg]; - - // Add any data dependencies. - unsigned DataLatency = SU->Latency; - for (unsigned i = 0, e = UseList.size(); i != e; ++i) { - SUnit *UseSU = UseList[i]; - if (UseSU == SU) - continue; - unsigned LDataLatency = DataLatency; - // Optionally add in a special extra latency for nodes that - // feed addresses. - // TODO: Do this for register aliases too. - // TODO: Perhaps we should get rid of - // SpecialAddressLatency and just move this into - // adjustSchedDependency for the targets that care about it. - if (SpecialAddressLatency != 0 && !UnitLatencies && - UseSU != &ExitSU) { - MachineInstr *UseMI = UseSU->getInstr(); - const MCInstrDesc &UseMCID = UseMI->getDesc(); - int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); - assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); - if (RegUseIndex >= 0 && - (UseMI->mayLoad() || UseMI->mayStore()) && - (unsigned)RegUseIndex < UseMCID.getNumOperands() && - UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass()) - LDataLatency += SpecialAddressLatency; - } - // Adjust the dependence latency using operand def/use - // information (if any), and then allow the target to - // perform its own adjustments. - const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg); - if (!UnitLatencies) { - ComputeOperandLatency(SU, UseSU, const_cast(dep)); - ST.adjustSchedDependency(SU, UseSU, const_cast(dep)); - } - UseSU->addPred(dep); - } - for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { - std::vector &UseList = Uses[*Alias]; - for (unsigned i = 0, e = UseList.size(); i != e; ++i) { - SUnit *UseSU = UseList[i]; - if (UseSU == SU) - continue; - const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias); - if (!UnitLatencies) { - ComputeOperandLatency(SU, UseSU, const_cast(dep)); - ST.adjustSchedDependency(SU, UseSU, const_cast(dep)); - } - UseSU->addPred(dep); - } - } - - // If a def is going to wrap back around to the top of the loop, - // backschedule it. - if (!UnitLatencies && DefList.empty()) { - LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg); - if (I != LoopRegs.Deps.end()) { - const MachineOperand *UseMO = I->second.first; - unsigned Count = I->second.second; - const MachineInstr *UseMI = UseMO->getParent(); - unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); - const MCInstrDesc &UseMCID = UseMI->getDesc(); - // TODO: If we knew the total depth of the region here, we could - // handle the case where the whole loop is inside the region but - // is large enough that the isScheduleHigh trick isn't needed. - if (UseMOIdx < UseMCID.getNumOperands()) { - // Currently, we only support scheduling regions consisting of - // single basic blocks. Check to see if the instruction is in - // the same region by checking to see if it has the same parent. - if (UseMI->getParent() != MI->getParent()) { - unsigned Latency = SU->Latency; - if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) - Latency += SpecialAddressLatency; - // This is a wild guess as to the portion of the latency which - // will be overlapped by work done outside the current - // scheduling region. - Latency -= std::min(Latency, Count); - // Add the artificial edge. - ExitSU.addPred(SDep(SU, SDep::Order, Latency, - /*Reg=*/0, /*isNormalMemory=*/false, - /*isMustAlias=*/false, - /*isArtificial=*/true)); - } else if (SpecialAddressLatency > 0 && - UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { - // The entire loop body is within the current scheduling region - // and the latency of this operation is assumed to be greater - // than the latency of the loop. - // TODO: Recursively mark data-edge predecessors as - // isScheduleHigh too. - SU->isScheduleHigh = true; - } - } - LoopRegs.Deps.erase(I); - } - } - - UseList.clear(); - if (!MO.isDead()) - DefList.clear(); - - // Calls will not be reordered because of chain dependencies (see - // below). Since call operands are dead, calls may continue to be added - // to the DefList making dependence checking quadratic in the size of - // the block. Instead, we leave only one call at the back of the - // DefList. - if (SU->isCall) { - while (!DefList.empty() && DefList.back()->isCall) - DefList.pop_back(); - } - DefList.push_back(SU); - } else { - UseList.push_back(SU); + if (TRI->isPhysicalRegister(Reg)) + addPhysRegDeps(SU, j); + else { + assert(!IsPostRA && "Virtual register encountered!"); + if (MO.isDef()) + addVRegDefDeps(SU, j); + else if (MO.readsReg()) // ignore undef operands + addVRegUseDeps(SU, j); } } @@ -557,18 +722,14 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { if (PrevMI) FirstDbgValue = PrevMI; - for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { - Defs[i].clear(); - Uses[i].clear(); - } + Defs.clear(); + Uses.clear(); + VRegDefs.clear(); PendingLoads.clear(); + MISUnitMap.clear(); } -void ScheduleDAGInstrs::FinishBlock() { - // Nothing to do. -} - -void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { +void ScheduleDAGInstrs::computeLatency(SUnit *SU) { // Compute the latency for the node. if (!InstrItins || InstrItins->isEmpty()) { SU->Latency = 1; @@ -582,7 +743,7 @@ void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { } } -void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, +void ScheduleDAGInstrs::computeOperandLatency(SUnit *Def, SUnit *Use, SDep& dep) const { if (!InstrItins || InstrItins->isEmpty()) return; @@ -607,7 +768,9 @@ void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, // %Q1 = VMULv8i16 %Q1, %Q3, ... // What we want is to compute latency between def of %D6/%D7 and use of // %Q3 instead. - DefIdx = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI); + unsigned Op2 = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI); + if (DefMI->getOperand(Op2).isReg()) + DefIdx = Op2; } MachineInstr *UseMI = Use->getInstr(); // For all uses of the register, calculate the maxmimum latency @@ -655,37 +818,8 @@ std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { return oss.str(); } -// EmitSchedule - Emit the machine code in scheduled order. -MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() { - Begin = InsertPos; - - // If first instruction was a DBG_VALUE then put it back. - if (FirstDbgValue) - BB->splice(InsertPos, BB, FirstDbgValue); - - // Then re-insert them according to the given schedule. - for (unsigned i = 0, e = Sequence.size(); i != e; i++) { - if (SUnit *SU = Sequence[i]) - BB->splice(InsertPos, BB, SU->getInstr()); - else - // Null SUnit* is a noop. - EmitNoop(); - - // Update the Begin iterator, as the first instruction in the block - // may have been scheduled later. - if (i == 0) - Begin = prior(InsertPos); - } - - // Reinsert any remaining debug_values. - for (std::vector >::iterator - DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { - std::pair P = *prior(DI); - MachineInstr *DbgValue = P.first; - MachineBasicBlock::iterator OrigPrivMI = P.second; - BB->splice(++OrigPrivMI, BB, DbgValue); - } - DbgValues.clear(); - FirstDbgValue = NULL; - return BB; +/// Return the basic block label. It is not necessarilly unique because a block +/// contains multiple scheduling regions. But it is fine for visualization. +std::string ScheduleDAGInstrs::getDAGName() const { + return "dag." + BB->getFullName(); }