1 //===-- llvm/Target/TargetSchedule.cpp - Sched Machine Model ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a wrapper around MCSchedModel that allows the interface
11 // to benefit from information currently only available in TargetInstrInfo.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/TargetSchedule.h"
16 #include "llvm/Target/TargetInstrInfo.h"
17 #include "llvm/Target/TargetMachine.h"
18 #include "llvm/Target/TargetRegisterInfo.h"
19 #include "llvm/Target/TargetSubtargetInfo.h"
20 #include "llvm/Support/CommandLine.h"
21 #include "llvm/Support/raw_ostream.h"
25 static cl::opt<bool> EnableSchedModel("schedmodel", cl::Hidden, cl::init(true),
26 cl::desc("Use TargetSchedModel for latency lookup"));
28 static cl::opt<bool> EnableSchedItins("scheditins", cl::Hidden, cl::init(true),
29 cl::desc("Use InstrItineraryData for latency lookup"));
31 bool TargetSchedModel::hasInstrSchedModel() const {
32 return EnableSchedModel && SchedModel.hasInstrSchedModel();
35 bool TargetSchedModel::hasInstrItineraries() const {
36 return EnableSchedItins && !InstrItins.isEmpty();
39 void TargetSchedModel::init(const MCSchedModel &sm,
40 const TargetSubtargetInfo *sti,
41 const TargetInstrInfo *tii) {
45 STI->initInstrItins(InstrItins);
48 unsigned TargetSchedModel::getNumMicroOps(MachineInstr *MI) const {
49 if (hasInstrItineraries()) {
50 int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
51 return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, MI);
53 if (hasInstrSchedModel()) {
54 const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
55 if (SCDesc->isValid())
56 return SCDesc->NumMicroOps;
58 return MI->isTransient() ? 0 : 1;
61 // The machine model may explicitly specify an invalid latency, which
62 // effectively means infinite latency. Since users of the TargetSchedule API
63 // don't know how to handle this, we convert it to a very large latency that is
64 // easy to distinguish when debugging the DAG but won't induce overflow.
65 static unsigned convertLatency(int Cycles) {
66 return Cycles >= 0 ? Cycles : 1000;
69 /// If we can determine the operand latency from the def only, without machine
70 /// model or itinerary lookup, do so. Otherwise return -1.
71 int TargetSchedModel::getDefLatency(const MachineInstr *DefMI,
74 // Return a latency based on the itinerary properties and defining instruction
75 // if possible. Some common subtargets don't require per-operand latency,
76 // especially for minimum latencies.
78 // If MinLatency is invalid, then use the itinerary for MinLatency. If no
79 // itinerary exists either, then use single cycle latency.
80 if (SchedModel.MinLatency < 0 && !hasInstrItineraries()) {
83 return SchedModel.MinLatency;
85 else if (!hasInstrSchedModel() && !hasInstrItineraries()) {
86 return TII->defaultDefLatency(&SchedModel, DefMI);
88 // ...operand lookup required
92 /// Return the MCSchedClassDesc for this instruction. Some SchedClasses require
93 /// evaluation of predicates that depend on instruction operands or flags.
94 const MCSchedClassDesc *TargetSchedModel::
95 resolveSchedClass(const MachineInstr *MI) const {
97 // Get the definition's scheduling class descriptor from this machine model.
98 unsigned SchedClass = MI->getDesc().getSchedClass();
99 const MCSchedClassDesc *SCDesc = SchedModel.getSchedClassDesc(SchedClass);
104 while (SCDesc->isVariant()) {
105 assert(++NIter < 6 && "Variants are nested deeper than the magic number");
107 SchedClass = STI->resolveSchedClass(SchedClass, MI, this);
108 SCDesc = SchedModel.getSchedClassDesc(SchedClass);
113 /// Find the def index of this operand. This index maps to the machine model and
114 /// is independent of use operands. Def operands may be reordered with uses or
115 /// merged with uses without affecting the def index (e.g. before/after
116 /// regalloc). However, an instruction's def operands must never be reordered
117 /// with respect to each other.
118 static unsigned findDefIdx(const MachineInstr *MI, unsigned DefOperIdx) {
120 for (unsigned i = 0; i != DefOperIdx; ++i) {
121 const MachineOperand &MO = MI->getOperand(i);
122 if (MO.isReg() && MO.isDef())
128 /// Find the use index of this operand. This is independent of the instruction's
131 /// Note that uses are not determined by the operand's isUse property, which
132 /// is simply the inverse of isDef. Here we consider any readsReg operand to be
133 /// a "use". The machine model allows an operand to be both a Def and Use.
134 static unsigned findUseIdx(const MachineInstr *MI, unsigned UseOperIdx) {
136 for (unsigned i = 0; i != UseOperIdx; ++i) {
137 const MachineOperand &MO = MI->getOperand(i);
138 if (MO.isReg() && MO.readsReg())
144 // Top-level API for clients that know the operand indices.
145 unsigned TargetSchedModel::computeOperandLatency(
146 const MachineInstr *DefMI, unsigned DefOperIdx,
147 const MachineInstr *UseMI, unsigned UseOperIdx,
148 bool FindMin) const {
150 int DefLatency = getDefLatency(DefMI, FindMin);
154 if (hasInstrItineraries()) {
158 TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx, UseMI, UseOperIdx);
161 unsigned DefClass = DefMI->getDesc().getSchedClass();
162 OperLatency = InstrItins.getOperandCycle(DefClass, DefOperIdx);
164 if (OperLatency >= 0)
167 // No operand latency was found.
168 unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI);
170 // Expected latency is the max of the stage latency and itinerary props.
171 // Rather than directly querying InstrItins stage latency, we call a TII
172 // hook to allow subtargets to specialize latency. This hook is only
173 // applicable to the InstrItins model. InstrSchedModel should model all
174 // special cases without TII hooks.
176 InstrLatency = std::max(InstrLatency,
177 TII->defaultDefLatency(&SchedModel, DefMI));
180 assert(!FindMin && hasInstrSchedModel() &&
181 "Expected a SchedModel for this cpu");
182 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
183 unsigned DefIdx = findDefIdx(DefMI, DefOperIdx);
184 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
185 // Lookup the definition's write latency in SubtargetInfo.
186 const MCWriteLatencyEntry *WLEntry =
187 STI->getWriteLatencyEntry(SCDesc, DefIdx);
188 unsigned WriteID = WLEntry->WriteResourceID;
189 unsigned Latency = convertLatency(WLEntry->Cycles);
193 // Lookup the use's latency adjustment in SubtargetInfo.
194 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI);
195 if (UseDesc->NumReadAdvanceEntries == 0)
197 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx);
198 return Latency - STI->getReadAdvanceCycles(UseDesc, UseIdx, WriteID);
200 // If DefIdx does not exist in the model (e.g. implicit defs), then return
201 // unit latency (defaultDefLatency may be too conservative).
203 if (SCDesc->isValid() && !DefMI->getOperand(DefOperIdx).isImplicit()
204 && !DefMI->getDesc().OpInfo[DefOperIdx].isOptionalDef()) {
206 raw_string_ostream ss(Err);
207 ss << "DefIdx " << DefIdx << " exceeds machine model writes for "
209 report_fatal_error(ss.str());
212 return DefMI->isTransient() ? 0 : 1;
215 unsigned TargetSchedModel::computeInstrLatency(const MachineInstr *MI) const {
216 // For the itinerary model, fall back to the old subtarget hook.
217 // Allow subtargets to compute Bundle latencies outside the machine model.
218 if (hasInstrItineraries() || MI->isBundle())
219 return TII->getInstrLatency(&InstrItins, MI);
221 if (hasInstrSchedModel()) {
222 const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
223 if (SCDesc->isValid()) {
224 unsigned Latency = 0;
225 for (unsigned DefIdx = 0, DefEnd = SCDesc->NumWriteLatencyEntries;
226 DefIdx != DefEnd; ++DefIdx) {
227 // Lookup the definition's write latency in SubtargetInfo.
228 const MCWriteLatencyEntry *WLEntry =
229 STI->getWriteLatencyEntry(SCDesc, DefIdx);
230 Latency = std::max(Latency, convertLatency(WLEntry->Cycles));
235 return TII->defaultDefLatency(&SchedModel, MI);
238 unsigned TargetSchedModel::
239 computeOutputLatency(const MachineInstr *DefMI, unsigned DefOperIdx,
240 const MachineInstr *DepMI) const {
241 // MinLatency == -1 is for in-order processors that always have unit
242 // MinLatency. MinLatency > 0 is for in-order processors with varying min
243 // latencies, but since this is not a RAW dep, we always use unit latency.
244 if (SchedModel.MinLatency != 0)
247 // MinLatency == 0 indicates an out-of-order processor that can dispatch
248 // WAW dependencies in the same cycle.
250 // Treat predication as a data dependency for out-of-order cpus. In-order
251 // cpus do not need to treat predicated writes specially.
253 // TODO: The following hack exists because predication passes do not
254 // correctly append imp-use operands, and readsReg() strangely returns false
255 // for predicated defs.
256 unsigned Reg = DefMI->getOperand(DefOperIdx).getReg();
257 const MachineFunction &MF = *DefMI->getParent()->getParent();
258 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
259 if (!DepMI->readsRegister(Reg, TRI) && TII->isPredicated(DepMI))
260 return computeInstrLatency(DefMI);
262 // If we have a per operand scheduling model, check if this def is writing
263 // an unbuffered resource. If so, it treated like an in-order cpu.
264 if (hasInstrSchedModel()) {
265 const MCSchedClassDesc *SCDesc = resolveSchedClass(DefMI);
266 if (SCDesc->isValid()) {
267 for (const MCWriteProcResEntry *PRI = STI->getWriteProcResBegin(SCDesc),
268 *PRE = STI->getWriteProcResEnd(SCDesc); PRI != PRE; ++PRI) {
269 if (!SchedModel.getProcResource(PRI->ProcResourceIdx)->IsBuffered)