1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "AggressiveAntiDepBreaker.h"
23 #include "CriticalAntiDepBreaker.h"
24 #include "ExactHazardRecognizer.h"
25 #include "SimpleHazardRecognizer.h"
26 #include "ScheduleDAGInstrs.h"
27 #include "llvm/CodeGen/AntiDepBreaker.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/LatencyPriorityQueue.h"
30 #include "llvm/CodeGen/SchedulerRegistry.h"
31 #include "llvm/CodeGen/MachineDominators.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineFunctionPass.h"
34 #include "llvm/CodeGen/MachineLoopInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
37 #include "llvm/Analysis/AliasAnalysis.h"
38 #include "llvm/Target/TargetLowering.h"
39 #include "llvm/Target/TargetMachine.h"
40 #include "llvm/Target/TargetInstrInfo.h"
41 #include "llvm/Target/TargetRegisterInfo.h"
42 #include "llvm/Target/TargetSubtarget.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/raw_ostream.h"
46 #include "llvm/ADT/BitVector.h"
47 #include "llvm/ADT/Statistic.h"
52 STATISTIC(NumNoops, "Number of noops inserted");
53 STATISTIC(NumStalls, "Number of pipeline stalls");
54 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
56 // Post-RA scheduling is enabled with
57 // TargetSubtarget.enablePostRAScheduler(). This flag can be used to
58 // override the target.
60 EnablePostRAScheduler("post-RA-scheduler",
61 cl::desc("Enable scheduling after register allocation"),
62 cl::init(false), cl::Hidden);
63 static cl::opt<std::string>
64 EnableAntiDepBreaking("break-anti-dependencies",
65 cl::desc("Break post-RA scheduling anti-dependencies: "
66 "\"critical\", \"all\", or \"none\""),
67 cl::init("none"), cl::Hidden);
69 EnablePostRAHazardAvoidance("avoid-hazards",
70 cl::desc("Enable exact hazard avoidance"),
71 cl::init(true), cl::Hidden);
73 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
75 DebugDiv("postra-sched-debugdiv",
76 cl::desc("Debug control MBBs that are scheduled"),
77 cl::init(0), cl::Hidden);
79 DebugMod("postra-sched-debugmod",
80 cl::desc("Debug control MBBs that are scheduled"),
81 cl::init(0), cl::Hidden);
83 AntiDepBreaker::~AntiDepBreaker() { }
86 class PostRAScheduler : public MachineFunctionPass {
88 CodeGenOpt::Level OptLevel;
92 PostRAScheduler(CodeGenOpt::Level ol) :
93 MachineFunctionPass(&ID), OptLevel(ol) {}
95 void getAnalysisUsage(AnalysisUsage &AU) const {
97 AU.addRequired<AliasAnalysis>();
98 AU.addRequired<MachineDominatorTree>();
99 AU.addPreserved<MachineDominatorTree>();
100 AU.addRequired<MachineLoopInfo>();
101 AU.addPreserved<MachineLoopInfo>();
102 MachineFunctionPass::getAnalysisUsage(AU);
105 const char *getPassName() const {
106 return "Post RA top-down list latency scheduler";
109 bool runOnMachineFunction(MachineFunction &Fn);
111 char PostRAScheduler::ID = 0;
113 class SchedulePostRATDList : public ScheduleDAGInstrs {
114 /// AvailableQueue - The priority queue to use for the available SUnits.
116 LatencyPriorityQueue AvailableQueue;
118 /// PendingQueue - This contains all of the instructions whose operands have
119 /// been issued, but their results are not ready yet (due to the latency of
120 /// the operation). Once the operands becomes available, the instruction is
121 /// added to the AvailableQueue.
122 std::vector<SUnit*> PendingQueue;
124 /// Topo - A topological ordering for SUnits.
125 ScheduleDAGTopologicalSort Topo;
127 /// HazardRec - The hazard recognizer to use.
128 ScheduleHazardRecognizer *HazardRec;
130 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
131 AntiDepBreaker *AntiDepBreak;
133 /// AA - AliasAnalysis for making memory reference queries.
136 /// KillIndices - The index of the most recent kill (proceding bottom-up),
137 /// or ~0u if the register is not live.
138 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
141 SchedulePostRATDList(MachineFunction &MF,
142 const MachineLoopInfo &MLI,
143 const MachineDominatorTree &MDT,
144 ScheduleHazardRecognizer *HR,
147 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
148 HazardRec(HR), AntiDepBreak(ADB), AA(aa) {}
150 ~SchedulePostRATDList() {
153 /// StartBlock - Initialize register live-range state for scheduling in
156 void StartBlock(MachineBasicBlock *BB);
158 /// Schedule - Schedule the instruction range using list scheduling.
162 /// Observe - Update liveness information to account for the current
163 /// instruction, which will not be scheduled.
165 void Observe(MachineInstr *MI, unsigned Count);
167 /// FinishBlock - Clean up register live-range state.
171 /// FixupKills - Fix register kill flags that have been made
172 /// invalid due to scheduling
174 void FixupKills(MachineBasicBlock *MBB);
177 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
178 void ReleaseSuccessors(SUnit *SU);
179 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
180 void ListScheduleTopDown();
181 void StartBlockForKills(MachineBasicBlock *BB);
183 // ToggleKillFlag - Toggle a register operand kill flag. Other
184 // adjustments may be made to the instruction if necessary. Return
185 // true if the operand has been deleted, false if not.
186 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
190 /// isSchedulingBoundary - Test if the given instruction should be
191 /// considered a scheduling boundary. This primarily includes labels
194 static bool isSchedulingBoundary(const MachineInstr *MI,
195 const MachineFunction &MF) {
196 // Terminators and labels can't be scheduled around.
197 if (MI->getDesc().isTerminator() || MI->isLabel())
200 // Don't attempt to schedule around any instruction that modifies
201 // a stack-oriented pointer, as it's unlikely to be profitable. This
202 // saves compile time, because it doesn't require every single
203 // stack slot reference to depend on the instruction that does the
205 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
206 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
212 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
213 AA = &getAnalysis<AliasAnalysis>();
215 // Check for explicit enable/disable of post-ra scheduling.
216 TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
217 if (EnablePostRAScheduler.getPosition() > 0) {
218 if (!EnablePostRAScheduler)
221 // Check that post-RA scheduling is enabled for this target.
222 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
223 if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode))
227 // Check for antidep breaking override...
228 if (EnableAntiDepBreaking.getPosition() > 0) {
229 AntiDepMode = (EnableAntiDepBreaking == "all") ? TargetSubtarget::ANTIDEP_ALL :
230 (EnableAntiDepBreaking == "critical") ? TargetSubtarget::ANTIDEP_CRITICAL :
231 TargetSubtarget::ANTIDEP_NONE;
234 DEBUG(errs() << "PostRAScheduler\n");
236 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
237 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
238 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
239 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
240 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
241 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
242 AntiDepBreaker *ADB =
243 ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
244 (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn) :
245 ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
246 (AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL));
248 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA);
250 // Loop over all of the basic blocks
251 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
252 MBB != MBBe; ++MBB) {
254 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
256 static int bbcnt = 0;
257 if (bbcnt++ % DebugDiv != DebugMod)
259 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
260 ":MBB ID#" << MBB->getNumber() << " ***\n";
264 // Initialize register live-range state for scheduling in this block.
265 Scheduler.StartBlock(MBB);
267 // Schedule each sequence of instructions not interrupted by a label
268 // or anything else that effectively needs to shut down scheduling.
269 MachineBasicBlock::iterator Current = MBB->end();
270 unsigned Count = MBB->size(), CurrentCount = Count;
271 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
272 MachineInstr *MI = prior(I);
273 if (isSchedulingBoundary(MI, Fn)) {
274 Scheduler.Run(MBB, I, Current, CurrentCount);
275 Scheduler.EmitSchedule(0);
277 CurrentCount = Count - 1;
278 Scheduler.Observe(MI, CurrentCount);
283 assert(Count == 0 && "Instruction count mismatch!");
284 assert((MBB->begin() == Current || CurrentCount != 0) &&
285 "Instruction count mismatch!");
286 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
287 Scheduler.EmitSchedule(0);
289 // Clean up register live-range state.
290 Scheduler.FinishBlock();
292 // Update register kills
293 Scheduler.FixupKills(MBB);
302 /// StartBlock - Initialize register live-range state for scheduling in
305 void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
306 // Call the superclass.
307 ScheduleDAGInstrs::StartBlock(BB);
309 // Reset the hazard recognizer and anti-dep breaker.
311 if (AntiDepBreak != NULL)
312 AntiDepBreak->StartBlock(BB);
315 /// Schedule - Schedule the instruction range using list scheduling.
317 void SchedulePostRATDList::Schedule() {
318 DEBUG(errs() << "********** List Scheduling **********\n");
320 // Build the scheduling graph.
323 if (AntiDepBreak != NULL) {
325 AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
328 // We made changes. Update the dependency graph.
329 // Theoretically we could update the graph in place:
330 // When a live range is changed to use a different register, remove
331 // the def's anti-dependence *and* output-dependence edges due to
332 // that register, and add new anti-dependence and output-dependence
333 // edges based on the next live range of the register.
339 NumFixedAnti += Broken;
343 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
344 SUnits[su].dumpAll(this));
346 AvailableQueue.initNodes(SUnits);
348 ListScheduleTopDown();
350 AvailableQueue.releaseState();
353 /// Observe - Update liveness information to account for the current
354 /// instruction, which will not be scheduled.
356 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
357 if (AntiDepBreak != NULL)
358 AntiDepBreak->Observe(MI, Count, InsertPosIndex);
361 /// FinishBlock - Clean up register live-range state.
363 void SchedulePostRATDList::FinishBlock() {
364 if (AntiDepBreak != NULL)
365 AntiDepBreak->FinishBlock();
367 // Call the superclass.
368 ScheduleDAGInstrs::FinishBlock();
371 /// StartBlockForKills - Initialize register live-range state for updating kills
373 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
374 // Initialize the indices to indicate that no registers are live.
375 std::fill(KillIndices, array_endof(KillIndices), ~0u);
377 // Determine the live-out physregs for this block.
378 if (!BB->empty() && BB->back().getDesc().isReturn()) {
379 // In a return block, examine the function live-out regs.
380 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
381 E = MRI.liveout_end(); I != E; ++I) {
383 KillIndices[Reg] = BB->size();
384 // Repeat, for all subregs.
385 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
387 KillIndices[*Subreg] = BB->size();
392 // In a non-return block, examine the live-in regs of all successors.
393 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
394 SE = BB->succ_end(); SI != SE; ++SI) {
395 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
396 E = (*SI)->livein_end(); I != E; ++I) {
398 KillIndices[Reg] = BB->size();
399 // Repeat, for all subregs.
400 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
402 KillIndices[*Subreg] = BB->size();
409 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
410 MachineOperand &MO) {
411 // Setting kill flag...
417 // If MO itself is live, clear the kill flag...
418 if (KillIndices[MO.getReg()] != ~0u) {
423 // If any subreg of MO is live, then create an imp-def for that
424 // subreg and keep MO marked as killed.
427 const unsigned SuperReg = MO.getReg();
428 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
430 if (KillIndices[*Subreg] != ~0u) {
431 MI->addOperand(MachineOperand::CreateReg(*Subreg,
445 /// FixupKills - Fix the register kill flags, they may have been made
446 /// incorrect by instruction reordering.
448 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
449 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
451 std::set<unsigned> killedRegs;
452 BitVector ReservedRegs = TRI->getReservedRegs(MF);
454 StartBlockForKills(MBB);
456 // Examine block from end to start...
457 unsigned Count = MBB->size();
458 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
460 MachineInstr *MI = --I;
462 // Update liveness. Registers that are defed but not used in this
463 // instruction are now dead. Mark register and all subregs as they
464 // are completely defined.
465 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
466 MachineOperand &MO = MI->getOperand(i);
467 if (!MO.isReg()) continue;
468 unsigned Reg = MO.getReg();
469 if (Reg == 0) continue;
470 if (!MO.isDef()) continue;
471 // Ignore two-addr defs.
472 if (MI->isRegTiedToUseOperand(i)) continue;
474 KillIndices[Reg] = ~0u;
476 // Repeat for all subregs.
477 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
479 KillIndices[*Subreg] = ~0u;
483 // Examine all used registers and set/clear kill flag. When a
484 // register is used multiple times we only set the kill flag on
487 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
488 MachineOperand &MO = MI->getOperand(i);
489 if (!MO.isReg() || !MO.isUse()) continue;
490 unsigned Reg = MO.getReg();
491 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
494 if (killedRegs.find(Reg) == killedRegs.end()) {
496 // A register is not killed if any subregs are live...
497 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
499 if (KillIndices[*Subreg] != ~0u) {
505 // If subreg is not live, then register is killed if it became
506 // live in this instruction
508 kill = (KillIndices[Reg] == ~0u);
511 if (MO.isKill() != kill) {
512 bool removed = ToggleKillFlag(MI, MO);
514 DEBUG(errs() << "Fixed <removed> in ");
516 DEBUG(errs() << "Fixed " << MO << " in ");
521 killedRegs.insert(Reg);
524 // Mark any used register (that is not using undef) and subregs as
526 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
527 MachineOperand &MO = MI->getOperand(i);
528 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
529 unsigned Reg = MO.getReg();
530 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
532 KillIndices[Reg] = Count;
534 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
536 KillIndices[*Subreg] = Count;
542 //===----------------------------------------------------------------------===//
543 // Top-Down Scheduling
544 //===----------------------------------------------------------------------===//
546 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
547 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
548 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
549 SUnit *SuccSU = SuccEdge->getSUnit();
552 if (SuccSU->NumPredsLeft == 0) {
553 errs() << "*** Scheduling failed! ***\n";
555 errs() << " has been released too many times!\n";
559 --SuccSU->NumPredsLeft;
561 // Compute how many cycles it will be before this actually becomes
562 // available. This is the max of the start time of all predecessors plus
564 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
566 // If all the node's predecessors are scheduled, this node is ready
567 // to be scheduled. Ignore the special ExitSU node.
568 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
569 PendingQueue.push_back(SuccSU);
572 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
573 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
574 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
576 ReleaseSucc(SU, &*I);
579 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
580 /// count of its successors. If a successor pending count is zero, add it to
581 /// the Available queue.
582 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
583 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
584 DEBUG(SU->dump(this));
586 Sequence.push_back(SU);
587 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
588 SU->setDepthToAtLeast(CurCycle);
590 ReleaseSuccessors(SU);
591 SU->isScheduled = true;
592 AvailableQueue.ScheduledNode(SU);
595 /// ListScheduleTopDown - The main loop of list scheduling for top-down
597 void SchedulePostRATDList::ListScheduleTopDown() {
598 unsigned CurCycle = 0;
600 // Release any successors of the special Entry node.
601 ReleaseSuccessors(&EntrySU);
603 // All leaves to Available queue.
604 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
605 // It is available if it has no predecessors.
606 if (SUnits[i].Preds.empty()) {
607 AvailableQueue.push(&SUnits[i]);
608 SUnits[i].isAvailable = true;
612 // In any cycle where we can't schedule any instructions, we must
613 // stall or emit a noop, depending on the target.
614 bool CycleHasInsts = false;
616 // While Available queue is not empty, grab the node with the highest
617 // priority. If it is not ready put it back. Schedule the node.
618 std::vector<SUnit*> NotReady;
619 Sequence.reserve(SUnits.size());
620 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
621 // Check to see if any of the pending instructions are ready to issue. If
622 // so, add them to the available queue.
623 unsigned MinDepth = ~0u;
624 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
625 if (PendingQueue[i]->getDepth() <= CurCycle) {
626 AvailableQueue.push(PendingQueue[i]);
627 PendingQueue[i]->isAvailable = true;
628 PendingQueue[i] = PendingQueue.back();
629 PendingQueue.pop_back();
631 } else if (PendingQueue[i]->getDepth() < MinDepth)
632 MinDepth = PendingQueue[i]->getDepth();
635 DEBUG(errs() << "\n*** Examining Available\n";
636 LatencyPriorityQueue q = AvailableQueue;
639 errs() << "Height " << su->getHeight() << ": ";
643 SUnit *FoundSUnit = 0;
645 bool HasNoopHazards = false;
646 while (!AvailableQueue.empty()) {
647 SUnit *CurSUnit = AvailableQueue.pop();
649 ScheduleHazardRecognizer::HazardType HT =
650 HazardRec->getHazardType(CurSUnit);
651 if (HT == ScheduleHazardRecognizer::NoHazard) {
652 FoundSUnit = CurSUnit;
656 // Remember if this is a noop hazard.
657 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
659 NotReady.push_back(CurSUnit);
662 // Add the nodes that aren't ready back onto the available list.
663 if (!NotReady.empty()) {
664 AvailableQueue.push_all(NotReady);
668 // If we found a node to schedule, do it now.
670 ScheduleNodeTopDown(FoundSUnit, CurCycle);
671 HazardRec->EmitInstruction(FoundSUnit);
672 CycleHasInsts = true;
674 // If we are using the target-specific hazards, then don't
675 // advance the cycle time just because we schedule a node. If
676 // the target allows it we can schedule multiple nodes in the
678 if (!EnablePostRAHazardAvoidance) {
679 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
684 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
685 HazardRec->AdvanceCycle();
686 } else if (!HasNoopHazards) {
687 // Otherwise, we have a pipeline stall, but no other problem,
688 // just advance the current cycle and try again.
689 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
690 HazardRec->AdvanceCycle();
693 // Otherwise, we have no instructions to issue and we have instructions
694 // that will fault if we don't do this right. This is the case for
695 // processors without pipeline interlocks and other cases.
696 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
697 HazardRec->EmitNoop();
698 Sequence.push_back(0); // NULL here means noop
703 CycleHasInsts = false;
708 VerifySchedule(/*isBottomUp=*/false);
712 //===----------------------------------------------------------------------===//
713 // Public Constructor Functions
714 //===----------------------------------------------------------------------===//
716 FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
717 return new PostRAScheduler(OptLevel);