//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/TargetSchedule.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
-#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
-#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/raw_ostream.h"
using namespace llvm;
// effectively means infinite latency. Since users of the TargetSchedule API
// don't know how to handle this, we convert it to a very large latency that is
// easy to distinguish when debugging the DAG but won't induce overflow.
-static unsigned convertLatency(int Cycles) {
+static unsigned capLatency(int Cycles) {
return Cycles >= 0 ? Cycles : 1000;
}
-/// If we can determine the operand latency from the def only, without machine
-/// model or itinerary lookup, do so. Otherwise return -1.
-int TargetSchedModel::getDefLatency(const MachineInstr *DefMI,
- bool FindMin) const {
-
- // Return a latency based on the itinerary properties and defining instruction
- // if possible. Some common subtargets don't require per-operand latency,
- // especially for minimum latencies.
- if (FindMin) {
- // If MinLatency is invalid, then use the itinerary for MinLatency. If no
- // itinerary exists either, then use single cycle latency.
- if (SchedModel.MinLatency < 0 && !hasInstrItineraries()) {
- return 1;
- }
- return SchedModel.MinLatency;
- }
- else if (!hasInstrSchedModel() && !hasInstrItineraries()) {
- return TII->defaultDefLatency(&SchedModel, DefMI);
- }
- // ...operand lookup required
- return -1;
-}
-
/// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
/// evaluation of predicates that depend on instruction operands or flags.
const MCSchedClassDesc *TargetSchedModel::
// Get the definition's scheduling class descriptor from this machine model.
unsigned SchedClass = MI->getDesc().getSchedClass();
const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
+ if (!SCDesc->isValid())
+ return SCDesc;
#ifndef NDEBUG
unsigned NIter = 0;
// Top-level API for clients that know the operand indices.
unsigned TargetSchedModel::computeOperandLatency(
const MachineInstr *DefMI, unsigned DefOperIdx,
- const MachineInstr *UseMI, unsigned UseOperIdx,
- bool FindMin) const {
+ const MachineInstr *UseMI, unsigned UseOperIdx) const {
- int DefLatency = getDefLatency(DefMI, FindMin);
- if (DefLatency >= 0)
- return DefLatency;
+ if (!hasInstrSchedModel() && !hasInstrItineraries())
+ return TII->defaultDefLatency(SchedModel, DefMI);
if (hasInstrItineraries()) {
int OperLatency = 0;
if (UseMI) {
- OperLatency =
- TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx, UseMI, UseOperIdx);
+ OperLatency = TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx,
+ UseMI, UseOperIdx);
}
else {
unsigned DefClass = DefMI->getDesc().getSchedClass();
// hook to allow subtargets to specialize latency. This hook is only
// applicable to the InstrItins model. InstrSchedModel should model all
// special cases without TII hooks.
- if (!FindMin)
- InstrLatency = std::max(InstrLatency,
- TII->defaultDefLatency(&SchedModel, DefMI));
+ InstrLatency = std::max(InstrLatency,
+ TII->defaultDefLatency(SchedModel, DefMI));
return InstrLatency;
}
- assert(!FindMin && hasInstrSchedModel() &&
- "Expected a SchedModel for this cpu");
+ // hasInstrSchedModel()
const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
if (DefIdx < SCDesc->NumWriteLatencyEntries) {
const MCWriteLatencyEntry *WLEntry =
STI->getWriteLatencyEntry(SCDesc, DefIdx);
unsigned WriteID = WLEntry->WriteResourceID;
- unsigned Latency = convertLatency(WLEntry->Cycles);
+ unsigned Latency = capLatency(WLEntry->Cycles);
if (!UseMI)
return Latency;
if (UseDesc->NumReadAdvanceEntries == 0)
return Latency;
unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
- return Latency - STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
+ int Advance = STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
+ if (Advance > 0 && (unsigned)Advance > Latency) // unsigned wrap
+ return 0;
+ return Latency - Advance;
}
// If DefIdx does not exist in the model (e.g. implicit defs), then return
// unit latency (defaultDefLatency may be too conservative).
#ifndef NDEBUG
if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
- && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()) {
- std::string Err;
- raw_string_ostream ss(Err);
- ss << "DefIdx " << DefIdx << " exceeds machine model writes for "
- << *DefMI;
- report_fatal_error(ss.str());
+ && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()
+ && SchedModel.isComplete()) {
+ errs() << "DefIdx " << DefIdx << " exceeds machine model writes for "
+ << *DefMI << " (Try with MCSchedModel.CompleteModel set to false)";
+ llvm_unreachable("incomplete machine model");
}
#endif
- return DefMI->isTransient() ? 0 : 1;
+ // FIXME: Automatically giving all implicit defs defaultDefLatency is
+ // undesirable. We should only do it for defs that are known to the MC
+ // desc like flags. Truly implicit defs should get 1 cycle latency.
+ return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, DefMI);
}
-unsigned TargetSchedModel::computeInstrLatency(const MachineInstr *MI) const {
+unsigned
+TargetSchedModel::computeInstrLatency(const MCSchedClassDesc &SCDesc) const {
+ unsigned Latency = 0;
+ for (unsigned DefIdx = 0, DefEnd = SCDesc.NumWriteLatencyEntries;
+ DefIdx != DefEnd; ++DefIdx) {
+ // Lookup the definition's write latency in SubtargetInfo.
+ const MCWriteLatencyEntry *WLEntry =
+ STI->getWriteLatencyEntry(&SCDesc, DefIdx);
+ Latency = std::max(Latency, capLatency(WLEntry->Cycles));
+ }
+ return Latency;
+}
+
+unsigned TargetSchedModel::computeInstrLatency(unsigned Opcode) const {
+ assert(hasInstrSchedModel() && "Only call this function with a SchedModel");
+
+ unsigned SCIdx = TII->get(Opcode).getSchedClass();
+ const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SCIdx);
+
+ if (SCDesc->isValid() && !SCDesc->isVariant())
+ return computeInstrLatency(*SCDesc);
+
+ llvm_unreachable("No MI sched latency");
+}
+
+unsigned
+TargetSchedModel::computeInstrLatency(const MachineInstr *MI,
+ bool UseDefaultDefLatency) const {
// For the itinerary model, fall back to the old subtarget hook.
// Allow subtargets to compute Bundle latencies outside the machine model.
- if (hasInstrItineraries() || MI->isBundle())
+ if (hasInstrItineraries() || MI->isBundle() ||
+ (!hasInstrSchedModel() && !UseDefaultDefLatency))
return TII->getInstrLatency(&InstrItins, MI);
if (hasInstrSchedModel()) {
const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
- if (SCDesc->isValid()) {
- unsigned Latency = 0;
- for (unsigned DefIdx = 0, DefEnd = SCDesc->NumWriteLatencyEntries;
- DefIdx != DefEnd; ++DefIdx) {
- // Lookup the definition's write latency in SubtargetInfo.
- const MCWriteLatencyEntry *WLEntry =
- STI->getWriteLatencyEntry(SCDesc, DefIdx);
- Latency = std::max(Latency, convertLatency(WLEntry->Cycles));
- }
- return Latency;
- }
+ if (SCDesc->isValid())
+ return computeInstrLatency(*SCDesc);
}
- return TII->defaultDefLatency(&SchedModel, MI);
+ return TII->defaultDefLatency(SchedModel, MI);
}
unsigned TargetSchedModel::
computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
const MachineInstr *DepMI) const {
- // MinLatency == -1 is for in-order processors that always have unit
- // MinLatency. MinLatency > 0 is for in-order processors with varying min
- // latencies, but since this is not a RAW dep, we always use unit latency.
- if (SchedModel.MinLatency != 0)
+ if (SchedModel.MicroOpBufferSize <= 1)
return 1;
- // MinLatency == 0 indicates an out-of-order processor that can dispatch
+ // MicroOpBufferSize > 1 indicates an out-of-order processor that can dispatch
// WAW dependencies in the same cycle.
// Treat predication as a data dependency for out-of-order cpus. In-order
// for predicated defs.
unsigned Reg = DefMI->getOperand(DefOperIdx).getReg();
const MachineFunction &MF = *DefMI->getParent()->getParent();
- const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(DepMI))
return computeInstrLatency(DefMI);
if (SCDesc->isValid()) {
for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc),
*PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
- if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->IsBuffered)
+ if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->BufferSize)
return 1;
}
}