1 //===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements the ScheduleDAGInstrs class, which implements re-scheduling
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "sched-instrs"
16 #include "ScheduleDAGInstrs.h"
17 #include "llvm/Operator.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/ValueTracking.h"
20 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
21 #include "llvm/CodeGen/MachineFunctionPass.h"
22 #include "llvm/CodeGen/MachineMemOperand.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/PseudoSourceValue.h"
25 #include "llvm/MC/MCInstrItineraries.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetSubtargetInfo.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/ADT/SmallSet.h"
35 ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
36 const MachineLoopInfo &mli,
37 const MachineDominatorTree &mdt,
40 : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()),
41 InstrItins(mf.getTarget().getInstrItineraryData()), IsPostRA(IsPostRAFlag),
42 LIS(lis), UnitLatencies(false), LoopRegs(MLI, MDT), FirstDbgValue(0) {
43 assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");
45 assert(!(IsPostRA && MRI.getNumVirtRegs()) &&
46 "Virtual registers must be removed prior to PostRA scheduling");
49 /// Run - perform scheduling.
51 void ScheduleDAGInstrs::Run(MachineBasicBlock *bb,
52 MachineBasicBlock::iterator begin,
53 MachineBasicBlock::iterator end,
57 InsertPosIndex = endcount;
59 // Check to see if the scheduler cares about latencies.
60 UnitLatencies = ForceUnitLatencies();
62 ScheduleDAG::Run(bb, end);
65 /// getUnderlyingObjectFromInt - This is the function that does the work of
66 /// looking through basic ptrtoint+arithmetic+inttoptr sequences.
67 static const Value *getUnderlyingObjectFromInt(const Value *V) {
69 if (const Operator *U = dyn_cast<Operator>(V)) {
70 // If we find a ptrtoint, we can transfer control back to the
71 // regular getUnderlyingObjectFromInt.
72 if (U->getOpcode() == Instruction::PtrToInt)
73 return U->getOperand(0);
74 // If we find an add of a constant or a multiplied value, it's
75 // likely that the other operand will lead us to the base
76 // object. We don't have to worry about the case where the
77 // object address is somehow being computed by the multiply,
78 // because our callers only care when the result is an
79 // identifibale object.
80 if (U->getOpcode() != Instruction::Add ||
81 (!isa<ConstantInt>(U->getOperand(1)) &&
82 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul))
88 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
92 /// getUnderlyingObject - This is a wrapper around GetUnderlyingObject
93 /// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
94 static const Value *getUnderlyingObject(const Value *V) {
95 // First just call Value::getUnderlyingObject to let it do what it does.
97 V = GetUnderlyingObject(V);
98 // If it found an inttoptr, use special code to continue climing.
99 if (Operator::getOpcode(V) != Instruction::IntToPtr)
101 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
102 // If that succeeded in finding a pointer, continue the search.
103 if (!O->getType()->isPointerTy())
110 /// getUnderlyingObjectForInstr - If this machine instr has memory reference
111 /// information and it can be tracked to a normal reference to a known
112 /// object, return the Value for that object. Otherwise return null.
113 static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI,
114 const MachineFrameInfo *MFI,
117 if (!MI->hasOneMemOperand() ||
118 !(*MI->memoperands_begin())->getValue() ||
119 (*MI->memoperands_begin())->isVolatile())
122 const Value *V = (*MI->memoperands_begin())->getValue();
126 V = getUnderlyingObject(V);
127 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
128 // For now, ignore PseudoSourceValues which may alias LLVM IR values
129 // because the code that uses this function has no way to cope with
131 if (PSV->isAliased(MFI))
134 MayAlias = PSV->mayAlias(MFI);
138 if (isIdentifiedObject(V))
144 void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
145 LoopRegs.Deps.clear();
146 if (MachineLoop *ML = MLI.getLoopFor(BB))
147 if (BB == ML->getLoopLatch())
148 LoopRegs.VisitLoop(ML);
151 /// AddSchedBarrierDeps - Add dependencies from instructions in the current
152 /// list of instructions being scheduled to scheduling barrier by adding
153 /// the exit SU to the register defs and use list. This is because we want to
154 /// make sure instructions which define registers that are either used by
155 /// the terminator or are live-out are properly scheduled. This is
156 /// especially important when the definition latency of the return value(s)
157 /// are too high to be hidden by the branch or when the liveout registers
158 /// used by instructions in the fallthrough block.
159 void ScheduleDAGInstrs::AddSchedBarrierDeps() {
160 MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0;
161 ExitSU.setInstr(ExitMI);
162 bool AllDepKnown = ExitMI &&
163 (ExitMI->isCall() || ExitMI->isBarrier());
164 if (ExitMI && AllDepKnown) {
165 // If it's a call or a barrier, add dependencies on the defs and uses of
167 for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) {
168 const MachineOperand &MO = ExitMI->getOperand(i);
169 if (!MO.isReg() || MO.isDef()) continue;
170 unsigned Reg = MO.getReg();
171 if (Reg == 0) continue;
173 if (TRI->isPhysicalRegister(Reg))
174 Uses[Reg].SUnits.push_back(&ExitSU);
176 assert(!IsPostRA && "Virtual register encountered after regalloc.");
179 // For others, e.g. fallthrough, conditional branch, assume the exit
180 // uses all the registers that are livein to the successor blocks.
181 SmallSet<unsigned, 8> Seen;
182 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
183 SE = BB->succ_end(); SI != SE; ++SI)
184 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
185 E = (*SI)->livein_end(); I != E; ++I) {
187 if (Seen.insert(Reg))
188 Uses[Reg].SUnits.push_back(&ExitSU);
193 /// MO is an operand of SU's instruction that defines a physical register. Add
194 /// data dependencies from SU to any uses of the physical register.
195 void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU,
196 const MachineOperand &MO) {
197 assert(MO.isDef() && "expect physreg def");
199 // Ask the target if address-backscheduling is desirable, and if so how much.
200 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
201 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
202 unsigned DataLatency = SU->Latency;
204 for (const unsigned *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) {
205 Reg2SUnitsMap::iterator UsesI = Uses.find(*Alias);
206 if (UsesI == Uses.end())
208 std::vector<SUnit*> &UseList = UsesI->SUnits;
209 for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
210 SUnit *UseSU = UseList[i];
213 unsigned LDataLatency = DataLatency;
214 // Optionally add in a special extra latency for nodes that
216 // TODO: Perhaps we should get rid of
217 // SpecialAddressLatency and just move this into
218 // adjustSchedDependency for the targets that care about it.
219 if (SpecialAddressLatency != 0 && !UnitLatencies &&
221 MachineInstr *UseMI = UseSU->getInstr();
222 const MCInstrDesc &UseMCID = UseMI->getDesc();
223 int RegUseIndex = UseMI->findRegisterUseOperandIdx(*Alias);
224 assert(RegUseIndex >= 0 && "UseMI doesn't use register!");
225 if (RegUseIndex >= 0 &&
226 (UseMI->mayLoad() || UseMI->mayStore()) &&
227 (unsigned)RegUseIndex < UseMCID.getNumOperands() &&
228 UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass())
229 LDataLatency += SpecialAddressLatency;
231 // Adjust the dependence latency using operand def/use
232 // information (if any), and then allow the target to
233 // perform its own adjustments.
234 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, *Alias);
235 if (!UnitLatencies) {
236 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
237 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
244 /// addPhysRegDeps - Add register dependencies (data, anti, and output) from
245 /// this SUnit to following instructions in the same scheduling region that
246 /// depend the physical register referenced at OperIdx.
247 void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
248 const MachineInstr *MI = SU->getInstr();
249 const MachineOperand &MO = MI->getOperand(OperIdx);
251 // Optionally add output and anti dependencies. For anti
252 // dependencies we use a latency of 0 because for a multi-issue
253 // target we want to allow the defining instruction to issue
254 // in the same cycle as the using instruction.
255 // TODO: Using a latency of 1 here for output dependencies assumes
256 // there's no cost for reusing registers.
257 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
258 for (const unsigned *Alias = TRI->getOverlaps(MO.getReg()); *Alias; ++Alias) {
259 Reg2SUnitsMap::iterator DefI = Defs.find(*Alias);
260 if (DefI == Defs.end())
262 std::vector<SUnit *> &DefList = DefI->SUnits;
263 for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
264 SUnit *DefSU = DefList[i];
265 if (DefSU == &ExitSU)
268 (Kind != SDep::Output || !MO.isDead() ||
269 !DefSU->getInstr()->registerDefIsDead(*Alias))) {
270 if (Kind == SDep::Anti)
271 DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias));
273 unsigned AOLat = TII->getOutputLatency(InstrItins, MI, OperIdx,
275 DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias));
282 // Either insert a new Reg2SUnits entry with an empty SUnits list, or
283 // retrieve the existing SUnits list for this register's uses.
284 // Push this SUnit on the use list.
285 Uses[MO.getReg()].SUnits.push_back(SU);
288 addPhysRegDataDeps(SU, MO);
290 // Either insert a new Reg2SUnits entry with an empty SUnits list, or
291 // retrieve the existing SUnits list for this register's defs.
292 std::vector<SUnit *> &DefList = Defs[MO.getReg()].SUnits;
294 // If a def is going to wrap back around to the top of the loop,
296 if (!UnitLatencies && DefList.empty()) {
297 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(MO.getReg());
298 if (I != LoopRegs.Deps.end()) {
299 const MachineOperand *UseMO = I->second.first;
300 unsigned Count = I->second.second;
301 const MachineInstr *UseMI = UseMO->getParent();
302 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0);
303 const MCInstrDesc &UseMCID = UseMI->getDesc();
304 const TargetSubtargetInfo &ST =
305 TM.getSubtarget<TargetSubtargetInfo>();
306 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
307 // TODO: If we knew the total depth of the region here, we could
308 // handle the case where the whole loop is inside the region but
309 // is large enough that the isScheduleHigh trick isn't needed.
310 if (UseMOIdx < UseMCID.getNumOperands()) {
311 // Currently, we only support scheduling regions consisting of
312 // single basic blocks. Check to see if the instruction is in
313 // the same region by checking to see if it has the same parent.
314 if (UseMI->getParent() != MI->getParent()) {
315 unsigned Latency = SU->Latency;
316 if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass())
317 Latency += SpecialAddressLatency;
318 // This is a wild guess as to the portion of the latency which
319 // will be overlapped by work done outside the current
320 // scheduling region.
321 Latency -= std::min(Latency, Count);
322 // Add the artificial edge.
323 ExitSU.addPred(SDep(SU, SDep::Order, Latency,
324 /*Reg=*/0, /*isNormalMemory=*/false,
325 /*isMustAlias=*/false,
326 /*isArtificial=*/true));
327 } else if (SpecialAddressLatency > 0 &&
328 UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) {
329 // The entire loop body is within the current scheduling region
330 // and the latency of this operation is assumed to be greater
331 // than the latency of the loop.
332 // TODO: Recursively mark data-edge predecessors as
333 // isScheduleHigh too.
334 SU->isScheduleHigh = true;
337 LoopRegs.Deps.erase(I);
341 // clear this register's use list
342 Reg2SUnitsMap::iterator UsesI = Uses.find(MO.getReg());
343 if (UsesI != Uses.end())
344 UsesI->SUnits.clear();
349 // Calls will not be reordered because of chain dependencies (see
350 // below). Since call operands are dead, calls may continue to be added
351 // to the DefList making dependence checking quadratic in the size of
352 // the block. Instead, we leave only one call at the back of the
355 while (!DefList.empty() && DefList.back()->isCall)
358 // Defs are pushed in the order they are visited and never reordered.
359 DefList.push_back(SU);
363 /// addVRegDefDeps - Add register output and data dependencies from this SUnit
364 /// to instructions that occur later in the same scheduling region if they read
365 /// from or write to the virtual register defined at OperIdx.
367 /// TODO: Hoist loop induction variable increments. This has to be
368 /// reevaluated. Generally, IV scheduling should be done before coalescing.
369 void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
370 const MachineInstr *MI = SU->getInstr();
371 unsigned Reg = MI->getOperand(OperIdx).getReg();
373 // SSA defs do not have output/anti dependencies.
374 // The current operand is a def, so we have at least one.
375 if (llvm::next(MRI.def_begin(Reg)) == MRI.def_end())
378 // Add output dependence to the next nearest def of this vreg.
380 // Unless this definition is dead, the output dependence should be
381 // transitively redundant with antidependencies from this definition's
382 // uses. We're conservative for now until we have a way to guarantee the uses
383 // are not eliminated sometime during scheduling. The output dependence edge
384 // is also useful if output latency exceeds def-use latency.
385 VReg2SUnitMap::iterator DefI = findVRegDef(Reg);
386 if (DefI == VRegDefs.end())
387 VRegDefs.insert(VReg2SUnit(Reg, SU));
389 SUnit *DefSU = DefI->SU;
390 if (DefSU != SU && DefSU != &ExitSU) {
391 unsigned OutLatency = TII->getOutputLatency(InstrItins, MI, OperIdx,
393 DefSU->addPred(SDep(SU, SDep::Output, OutLatency, Reg));
399 /// addVRegUseDeps - Add a register data dependency if the instruction that
400 /// defines the virtual register used at OperIdx is mapped to an SUnit. Add a
401 /// register antidependency from this SUnit to instructions that occur later in
402 /// the same scheduling region if they write the virtual register.
404 /// TODO: Handle ExitSU "uses" properly.
405 void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
406 MachineInstr *MI = SU->getInstr();
407 unsigned Reg = MI->getOperand(OperIdx).getReg();
409 // Lookup this operand's reaching definition.
410 assert(LIS && "vreg dependencies requires LiveIntervals");
411 SlotIndex UseIdx = LIS->getSlotIndexes()->getInstructionIndex(MI);
412 LiveInterval *LI = &LIS->getInterval(Reg);
413 VNInfo *VNI = LI->getVNInfoAt(UseIdx);
414 MachineInstr *Def = LIS->getInstructionFromIndex(VNI->def);
416 SUnit *DefSU = getSUnit(Def);
418 // The reaching Def lives within this scheduling region.
419 // Create a data dependence.
421 // TODO: Handle "special" address latencies cleanly.
422 const SDep &dep = SDep(DefSU, SDep::Data, DefSU->Latency, Reg);
423 if (!UnitLatencies) {
424 // Adjust the dependence latency using operand def/use information, then
425 // allow the target to perform its own adjustments.
426 ComputeOperandLatency(DefSU, SU, const_cast<SDep &>(dep));
427 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
428 ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep));
434 // Add antidependence to the following def of the vreg it uses.
435 VReg2SUnitMap::iterator DefI = findVRegDef(Reg);
436 if (DefI != VRegDefs.end() && DefI->SU != SU)
437 DefI->SU->addPred(SDep(SU, SDep::Anti, 0, Reg));
440 /// Create an SUnit for each real instruction, numbered in top-down toplological
441 /// order. The instruction order A < B, implies that no edge exists from B to A.
443 /// Map each real instruction to its SUnit.
445 /// After initSUnits, the SUnits vector is cannot be resized and the scheduler
446 /// may hang onto SUnit pointers. We may relax this in the future by using SUnit
447 /// IDs instead of pointers.
448 void ScheduleDAGInstrs::initSUnits() {
449 // We'll be allocating one SUnit for each real instruction in the region,
450 // which is contained within a basic block.
451 SUnits.reserve(BB->size());
453 for (MachineBasicBlock::iterator I = Begin; I != InsertPos; ++I) {
454 MachineInstr *MI = I;
455 if (MI->isDebugValue())
458 SUnit *SU = NewSUnit(MI);
461 SU->isCall = MI->isCall();
462 SU->isCommutable = MI->isCommutable();
464 // Assign the Latency field of SU using target-provided information.
472 void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
473 // Create an SUnit for each real instruction.
476 // We build scheduling units by walking a block's instruction list from bottom
479 // Remember where a generic side-effecting instruction is as we procede.
480 SUnit *BarrierChain = 0, *AliasChain = 0;
482 // Memory references to specific known memory locations are tracked
483 // so that they can be given more precise dependencies. We track
484 // separately the known memory locations that may alias and those
485 // that are known not to alias
486 std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs;
487 std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
489 // Remove any stale debug info; sometimes BuildSchedGraph is called again
490 // without emitting the info from the previous call.
492 FirstDbgValue = NULL;
494 assert(Defs.empty() && Uses.empty() &&
495 "Only BuildGraph should update Defs/Uses");
496 Defs.setUniverse(TRI->getNumRegs());
497 Uses.setUniverse(TRI->getNumRegs());
499 assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs");
500 // FIXME: Allow SparseSet to reserve space for the creation of virtual
501 // registers during scheduling. Don't artificially inflate the Universe
502 // because we want to assert that vregs are not created during DAG building.
503 VRegDefs.setUniverse(MRI.getNumVirtRegs());
505 // Model data dependencies between instructions being scheduled and the
507 AddSchedBarrierDeps();
509 // Walk the list of instructions, from bottom moving up.
510 MachineInstr *PrevMI = NULL;
511 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
513 MachineInstr *MI = prior(MII);
515 DbgValues.push_back(std::make_pair(PrevMI, MI));
519 if (MI->isDebugValue()) {
524 assert(!MI->isTerminator() && !MI->isLabel() &&
525 "Cannot schedule terminators or labels!");
527 SUnit *SU = MISUnitMap[MI];
528 assert(SU && "No SUnit mapped to this MI");
530 // Add register-based dependencies (data, anti, and output).
531 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
532 const MachineOperand &MO = MI->getOperand(j);
533 if (!MO.isReg()) continue;
534 unsigned Reg = MO.getReg();
535 if (Reg == 0) continue;
537 if (TRI->isPhysicalRegister(Reg))
538 addPhysRegDeps(SU, j);
540 assert(!IsPostRA && "Virtual register encountered!");
542 addVRegDefDeps(SU, j);
544 addVRegUseDeps(SU, j);
548 // Add chain dependencies.
549 // Chain dependencies used to enforce memory order should have
550 // latency of 0 (except for true dependency of Store followed by
551 // aliased Load... we estimate that with a single cycle of latency
552 // assuming the hardware will bypass)
553 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
554 // after stack slots are lowered to actual addresses.
555 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
556 // produce more precise dependence information.
557 #define STORE_LOAD_LATENCY 1
558 unsigned TrueMemOrderLatency = 0;
559 if (MI->isCall() || MI->hasUnmodeledSideEffects() ||
560 (MI->hasVolatileMemoryRef() &&
561 (!MI->mayLoad() || !MI->isInvariantLoad(AA)))) {
562 // Be conservative with these and add dependencies on all memory
563 // references, even those that are known to not alias.
564 for (std::map<const Value *, SUnit *>::iterator I =
565 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) {
566 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
568 for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
569 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) {
570 for (unsigned i = 0, e = I->second.size(); i != e; ++i)
571 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
573 NonAliasMemDefs.clear();
574 NonAliasMemUses.clear();
575 // Add SU to the barrier chain.
577 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
582 // Chain all possibly aliasing memory references though SU.
584 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
586 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
587 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
588 for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(),
589 E = AliasMemDefs.end(); I != E; ++I) {
590 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
592 for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
593 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {
594 for (unsigned i = 0, e = I->second.size(); i != e; ++i)
595 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
597 PendingLoads.clear();
598 AliasMemDefs.clear();
599 AliasMemUses.clear();
600 } else if (MI->mayStore()) {
601 bool MayAlias = true;
602 TrueMemOrderLatency = STORE_LOAD_LATENCY;
603 if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
604 // A store to a specific PseudoSourceValue. Add precise dependencies.
605 // Record the def in MemDefs, first adding a dep if there is
607 std::map<const Value *, SUnit *>::iterator I =
608 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
609 std::map<const Value *, SUnit *>::iterator IE =
610 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
612 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
613 /*isNormalMemory=*/true));
617 AliasMemDefs[V] = SU;
619 NonAliasMemDefs[V] = SU;
621 // Handle the uses in MemUses, if there are any.
622 std::map<const Value *, std::vector<SUnit *> >::iterator J =
623 ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V));
624 std::map<const Value *, std::vector<SUnit *> >::iterator JE =
625 ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());
627 for (unsigned i = 0, e = J->second.size(); i != e; ++i)
628 J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency,
629 /*Reg=*/0, /*isNormalMemory=*/true));
633 // Add dependencies from all the PendingLoads, i.e. loads
634 // with no underlying object.
635 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
636 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
637 // Add dependence on alias chain, if needed.
639 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
641 // Add dependence on barrier chain, if needed.
643 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
645 // Treat all other stores conservatively.
646 goto new_alias_chain;
649 if (!ExitSU.isPred(SU))
650 // Push store's up a bit to avoid them getting in between cmp
652 ExitSU.addPred(SDep(SU, SDep::Order, 0,
653 /*Reg=*/0, /*isNormalMemory=*/false,
654 /*isMustAlias=*/false,
655 /*isArtificial=*/true));
656 } else if (MI->mayLoad()) {
657 bool MayAlias = true;
658 TrueMemOrderLatency = 0;
659 if (MI->isInvariantLoad(AA)) {
660 // Invariant load, no chain dependencies needed!
663 getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
664 // A load from a specific PseudoSourceValue. Add precise dependencies.
665 std::map<const Value *, SUnit *>::iterator I =
666 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
667 std::map<const Value *, SUnit *>::iterator IE =
668 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
670 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
671 /*isNormalMemory=*/true));
673 AliasMemUses[V].push_back(SU);
675 NonAliasMemUses[V].push_back(SU);
677 // A load with no underlying object. Depend on all
678 // potentially aliasing stores.
679 for (std::map<const Value *, SUnit *>::iterator I =
680 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I)
681 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
683 PendingLoads.push_back(SU);
687 // Add dependencies on alias and barrier chains, if needed.
688 if (MayAlias && AliasChain)
689 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
691 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
696 FirstDbgValue = PrevMI;
701 PendingLoads.clear();
705 void ScheduleDAGInstrs::FinishBlock() {
709 void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) {
710 // Compute the latency for the node.
711 if (!InstrItins || InstrItins->isEmpty()) {
714 // Simplistic target-independent heuristic: assume that loads take
716 if (SU->getInstr()->mayLoad())
719 SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr());
723 void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use,
725 if (!InstrItins || InstrItins->isEmpty())
728 // For a data dependency with a known register...
729 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0))
732 const unsigned Reg = dep.getReg();
734 // ... find the definition of the register in the defining
736 MachineInstr *DefMI = Def->getInstr();
737 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
739 const MachineOperand &MO = DefMI->getOperand(DefIdx);
740 if (MO.isReg() && MO.isImplicit() &&
741 DefIdx >= (int)DefMI->getDesc().getNumOperands()) {
742 // This is an implicit def, getOperandLatency() won't return the correct
744 // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def>
745 // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ...
746 // What we want is to compute latency between def of %D6/%D7 and use of
748 unsigned Op2 = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI);
749 if (DefMI->getOperand(Op2).isReg())
752 MachineInstr *UseMI = Use->getInstr();
753 // For all uses of the register, calculate the maxmimum latency
756 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
757 const MachineOperand &MO = UseMI->getOperand(i);
758 if (!MO.isReg() || !MO.isUse())
760 unsigned MOReg = MO.getReg();
764 int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx,
766 Latency = std::max(Latency, UseCycle);
769 // UseMI is null, then it must be a scheduling barrier.
770 if (!InstrItins || InstrItins->isEmpty())
772 unsigned DefClass = DefMI->getDesc().getSchedClass();
773 Latency = InstrItins->getOperandCycle(DefClass, DefIdx);
776 // If we found a latency, then replace the existing dependence latency.
778 dep.setLatency(Latency);
782 void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
783 SU->getInstr()->dump();
786 std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
788 raw_string_ostream oss(s);
791 else if (SU == &ExitSU)
794 SU->getInstr()->print(oss);
798 // EmitSchedule - Emit the machine code in scheduled order.
799 MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
802 // If first instruction was a DBG_VALUE then put it back.
804 BB->splice(InsertPos, BB, FirstDbgValue);
806 // Then re-insert them according to the given schedule.
807 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
808 if (SUnit *SU = Sequence[i])
809 BB->splice(InsertPos, BB, SU->getInstr());
811 // Null SUnit* is a noop.
814 // Update the Begin iterator, as the first instruction in the block
815 // may have been scheduled later.
817 Begin = prior(InsertPos);
820 // Reinsert any remaining debug_values.
821 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
822 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
823 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI);
824 MachineInstr *DbgValue = P.first;
825 MachineBasicBlock::iterator OrigPrivMI = P.second;
826 BB->splice(++OrigPrivMI, BB, DbgValue);
829 FirstDbgValue = NULL;