1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "AggressiveAntiDepBreaker.h"
23 #include "CriticalAntiDepBreaker.h"
24 #include "ExactHazardRecognizer.h"
25 #include "SimpleHazardRecognizer.h"
26 #include "ScheduleDAGInstrs.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/CodeGen/LatencyPriorityQueue.h"
29 #include "llvm/CodeGen/SchedulerRegistry.h"
30 #include "llvm/CodeGen/MachineDominators.h"
31 #include "llvm/CodeGen/MachineFrameInfo.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineLoopInfo.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
36 #include "llvm/Analysis/AliasAnalysis.h"
37 #include "llvm/Target/TargetLowering.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Target/TargetInstrInfo.h"
40 #include "llvm/Target/TargetRegisterInfo.h"
41 #include "llvm/Target/TargetSubtarget.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/ADT/BitVector.h"
46 #include "llvm/ADT/Statistic.h"
51 STATISTIC(NumNoops, "Number of noops inserted");
52 STATISTIC(NumStalls, "Number of pipeline stalls");
53 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
55 // Post-RA scheduling is enabled with
56 // TargetSubtarget.enablePostRAScheduler(). This flag can be used to
57 // override the target.
59 EnablePostRAScheduler("post-RA-scheduler",
60 cl::desc("Enable scheduling after register allocation"),
61 cl::init(false), cl::Hidden);
62 static cl::opt<std::string>
63 EnableAntiDepBreaking("break-anti-dependencies",
64 cl::desc("Break post-RA scheduling anti-dependencies: "
65 "\"critical\", \"all\", or \"none\""),
66 cl::init("none"), cl::Hidden);
68 EnablePostRAHazardAvoidance("avoid-hazards",
69 cl::desc("Enable exact hazard avoidance"),
70 cl::init(true), cl::Hidden);
72 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
74 DebugDiv("postra-sched-debugdiv",
75 cl::desc("Debug control MBBs that are scheduled"),
76 cl::init(0), cl::Hidden);
78 DebugMod("postra-sched-debugmod",
79 cl::desc("Debug control MBBs that are scheduled"),
80 cl::init(0), cl::Hidden);
83 class PostRAScheduler : public MachineFunctionPass {
85 CodeGenOpt::Level OptLevel;
89 PostRAScheduler(CodeGenOpt::Level ol) :
90 MachineFunctionPass(&ID), OptLevel(ol) {}
92 void getAnalysisUsage(AnalysisUsage &AU) const {
94 AU.addRequired<AliasAnalysis>();
95 AU.addRequired<MachineDominatorTree>();
96 AU.addPreserved<MachineDominatorTree>();
97 AU.addRequired<MachineLoopInfo>();
98 AU.addPreserved<MachineLoopInfo>();
99 MachineFunctionPass::getAnalysisUsage(AU);
102 const char *getPassName() const {
103 return "Post RA top-down list latency scheduler";
106 bool runOnMachineFunction(MachineFunction &Fn);
108 char PostRAScheduler::ID = 0;
110 class SchedulePostRATDList : public ScheduleDAGInstrs {
111 /// AvailableQueue - The priority queue to use for the available SUnits.
113 LatencyPriorityQueue AvailableQueue;
115 /// PendingQueue - This contains all of the instructions whose operands have
116 /// been issued, but their results are not ready yet (due to the latency of
117 /// the operation). Once the operands becomes available, the instruction is
118 /// added to the AvailableQueue.
119 std::vector<SUnit*> PendingQueue;
121 /// Topo - A topological ordering for SUnits.
122 ScheduleDAGTopologicalSort Topo;
124 /// HazardRec - The hazard recognizer to use.
125 ScheduleHazardRecognizer *HazardRec;
127 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
128 AntiDepBreaker *AntiDepBreak;
130 /// AA - AliasAnalysis for making memory reference queries.
133 /// KillIndices - The index of the most recent kill (proceding bottom-up),
134 /// or ~0u if the register is not live.
135 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
138 SchedulePostRATDList(MachineFunction &MF,
139 const MachineLoopInfo &MLI,
140 const MachineDominatorTree &MDT,
141 ScheduleHazardRecognizer *HR,
144 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
145 HazardRec(HR), AntiDepBreak(ADB), AA(aa) {}
147 ~SchedulePostRATDList() {
150 /// StartBlock - Initialize register live-range state for scheduling in
153 void StartBlock(MachineBasicBlock *BB);
155 /// Schedule - Schedule the instruction range using list scheduling.
159 /// Observe - Update liveness information to account for the current
160 /// instruction, which will not be scheduled.
162 void Observe(MachineInstr *MI, unsigned Count);
164 /// FinishBlock - Clean up register live-range state.
168 /// FixupKills - Fix register kill flags that have been made
169 /// invalid due to scheduling
171 void FixupKills(MachineBasicBlock *MBB);
174 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
175 void ReleaseSuccessors(SUnit *SU);
176 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
177 void ListScheduleTopDown();
178 void StartBlockForKills(MachineBasicBlock *BB);
180 // ToggleKillFlag - Toggle a register operand kill flag. Other
181 // adjustments may be made to the instruction if necessary. Return
182 // true if the operand has been deleted, false if not.
183 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
187 /// isSchedulingBoundary - Test if the given instruction should be
188 /// considered a scheduling boundary. This primarily includes labels
191 static bool isSchedulingBoundary(const MachineInstr *MI,
192 const MachineFunction &MF) {
193 // Terminators and labels can't be scheduled around.
194 if (MI->getDesc().isTerminator() || MI->isLabel())
197 // Don't attempt to schedule around any instruction that modifies
198 // a stack-oriented pointer, as it's unlikely to be profitable. This
199 // saves compile time, because it doesn't require every single
200 // stack slot reference to depend on the instruction that does the
202 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
203 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
209 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
210 AA = &getAnalysis<AliasAnalysis>();
212 // Check for explicit enable/disable of post-ra scheduling.
213 TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
214 if (EnablePostRAScheduler.getPosition() > 0) {
215 if (!EnablePostRAScheduler)
218 // Check that post-RA scheduling is enabled for this target.
219 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
220 if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode))
224 // Check for antidep breaking override...
225 if (EnableAntiDepBreaking.getPosition() > 0) {
226 AntiDepMode = (EnableAntiDepBreaking == "all") ? TargetSubtarget::ANTIDEP_ALL :
227 (EnableAntiDepBreaking == "critical") ? TargetSubtarget::ANTIDEP_CRITICAL :
228 TargetSubtarget::ANTIDEP_NONE;
231 DEBUG(errs() << "PostRAScheduler\n");
233 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
234 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
235 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
236 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
237 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
238 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
239 AntiDepBreaker *ADB =
240 ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
241 (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn) :
242 ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
243 (AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL));
245 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA);
247 // Loop over all of the basic blocks
248 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
249 MBB != MBBe; ++MBB) {
251 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
253 static int bbcnt = 0;
254 if (bbcnt++ % DebugDiv != DebugMod)
256 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
257 ":MBB ID#" << MBB->getNumber() << " ***\n";
261 // Initialize register live-range state for scheduling in this block.
262 Scheduler.StartBlock(MBB);
264 // Schedule each sequence of instructions not interrupted by a label
265 // or anything else that effectively needs to shut down scheduling.
266 MachineBasicBlock::iterator Current = MBB->end();
267 unsigned Count = MBB->size(), CurrentCount = Count;
268 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
269 MachineInstr *MI = prior(I);
270 if (isSchedulingBoundary(MI, Fn)) {
271 Scheduler.Run(MBB, I, Current, CurrentCount);
272 Scheduler.EmitSchedule(0);
274 CurrentCount = Count - 1;
275 Scheduler.Observe(MI, CurrentCount);
280 assert(Count == 0 && "Instruction count mismatch!");
281 assert((MBB->begin() == Current || CurrentCount != 0) &&
282 "Instruction count mismatch!");
283 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
284 Scheduler.EmitSchedule(0);
286 // Clean up register live-range state.
287 Scheduler.FinishBlock();
289 // Update register kills
290 Scheduler.FixupKills(MBB);
299 /// StartBlock - Initialize register live-range state for scheduling in
302 void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
303 // Call the superclass.
304 ScheduleDAGInstrs::StartBlock(BB);
306 // Reset the hazard recognizer and anti-dep breaker.
308 if (AntiDepBreak != NULL)
309 AntiDepBreak->StartBlock(BB);
312 /// Schedule - Schedule the instruction range using list scheduling.
314 void SchedulePostRATDList::Schedule() {
315 DEBUG(errs() << "********** List Scheduling **********\n");
317 // Build the scheduling graph.
320 if (AntiDepBreak != NULL) {
322 AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
325 // We made changes. Update the dependency graph.
326 // Theoretically we could update the graph in place:
327 // When a live range is changed to use a different register, remove
328 // the def's anti-dependence *and* output-dependence edges due to
329 // that register, and add new anti-dependence and output-dependence
330 // edges based on the next live range of the register.
336 NumFixedAnti += Broken;
340 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
341 SUnits[su].dumpAll(this));
343 AvailableQueue.initNodes(SUnits);
345 ListScheduleTopDown();
347 AvailableQueue.releaseState();
350 /// Observe - Update liveness information to account for the current
351 /// instruction, which will not be scheduled.
353 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
354 if (AntiDepBreak != NULL)
355 AntiDepBreak->Observe(MI, Count, InsertPosIndex);
358 /// FinishBlock - Clean up register live-range state.
360 void SchedulePostRATDList::FinishBlock() {
361 if (AntiDepBreak != NULL)
362 AntiDepBreak->FinishBlock();
364 // Call the superclass.
365 ScheduleDAGInstrs::FinishBlock();
368 /// StartBlockForKills - Initialize register live-range state for updating kills
370 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
371 // Initialize the indices to indicate that no registers are live.
372 std::fill(KillIndices, array_endof(KillIndices), ~0u);
374 // Determine the live-out physregs for this block.
375 if (!BB->empty() && BB->back().getDesc().isReturn()) {
376 // In a return block, examine the function live-out regs.
377 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
378 E = MRI.liveout_end(); I != E; ++I) {
380 KillIndices[Reg] = BB->size();
381 // Repeat, for all subregs.
382 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
384 KillIndices[*Subreg] = BB->size();
389 // In a non-return block, examine the live-in regs of all successors.
390 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
391 SE = BB->succ_end(); SI != SE; ++SI) {
392 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
393 E = (*SI)->livein_end(); I != E; ++I) {
395 KillIndices[Reg] = BB->size();
396 // Repeat, for all subregs.
397 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
399 KillIndices[*Subreg] = BB->size();
406 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
407 MachineOperand &MO) {
408 // Setting kill flag...
414 // If MO itself is live, clear the kill flag...
415 if (KillIndices[MO.getReg()] != ~0u) {
420 // If any subreg of MO is live, then create an imp-def for that
421 // subreg and keep MO marked as killed.
424 const unsigned SuperReg = MO.getReg();
425 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
427 if (KillIndices[*Subreg] != ~0u) {
428 MI->addOperand(MachineOperand::CreateReg(*Subreg,
442 /// FixupKills - Fix the register kill flags, they may have been made
443 /// incorrect by instruction reordering.
445 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
446 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
448 std::set<unsigned> killedRegs;
449 BitVector ReservedRegs = TRI->getReservedRegs(MF);
451 StartBlockForKills(MBB);
453 // Examine block from end to start...
454 unsigned Count = MBB->size();
455 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
457 MachineInstr *MI = --I;
459 // Update liveness. Registers that are defed but not used in this
460 // instruction are now dead. Mark register and all subregs as they
461 // are completely defined.
462 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
463 MachineOperand &MO = MI->getOperand(i);
464 if (!MO.isReg()) continue;
465 unsigned Reg = MO.getReg();
466 if (Reg == 0) continue;
467 if (!MO.isDef()) continue;
468 // Ignore two-addr defs.
469 if (MI->isRegTiedToUseOperand(i)) continue;
471 KillIndices[Reg] = ~0u;
473 // Repeat for all subregs.
474 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
476 KillIndices[*Subreg] = ~0u;
480 // Examine all used registers and set/clear kill flag. When a
481 // register is used multiple times we only set the kill flag on
484 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
485 MachineOperand &MO = MI->getOperand(i);
486 if (!MO.isReg() || !MO.isUse()) continue;
487 unsigned Reg = MO.getReg();
488 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
491 if (killedRegs.find(Reg) == killedRegs.end()) {
493 // A register is not killed if any subregs are live...
494 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
496 if (KillIndices[*Subreg] != ~0u) {
502 // If subreg is not live, then register is killed if it became
503 // live in this instruction
505 kill = (KillIndices[Reg] == ~0u);
508 if (MO.isKill() != kill) {
509 bool removed = ToggleKillFlag(MI, MO);
511 DEBUG(errs() << "Fixed <removed> in ");
513 DEBUG(errs() << "Fixed " << MO << " in ");
518 killedRegs.insert(Reg);
521 // Mark any used register (that is not using undef) and subregs as
523 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
524 MachineOperand &MO = MI->getOperand(i);
525 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
526 unsigned Reg = MO.getReg();
527 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
529 KillIndices[Reg] = Count;
531 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
533 KillIndices[*Subreg] = Count;
539 //===----------------------------------------------------------------------===//
540 // Top-Down Scheduling
541 //===----------------------------------------------------------------------===//
543 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
544 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
545 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
546 SUnit *SuccSU = SuccEdge->getSUnit();
549 if (SuccSU->NumPredsLeft == 0) {
550 errs() << "*** Scheduling failed! ***\n";
552 errs() << " has been released too many times!\n";
556 --SuccSU->NumPredsLeft;
558 // Compute how many cycles it will be before this actually becomes
559 // available. This is the max of the start time of all predecessors plus
561 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
563 // If all the node's predecessors are scheduled, this node is ready
564 // to be scheduled. Ignore the special ExitSU node.
565 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
566 PendingQueue.push_back(SuccSU);
569 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
570 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
571 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
573 ReleaseSucc(SU, &*I);
576 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
577 /// count of its successors. If a successor pending count is zero, add it to
578 /// the Available queue.
579 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
580 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
581 DEBUG(SU->dump(this));
583 Sequence.push_back(SU);
584 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
585 SU->setDepthToAtLeast(CurCycle);
587 ReleaseSuccessors(SU);
588 SU->isScheduled = true;
589 AvailableQueue.ScheduledNode(SU);
592 /// ListScheduleTopDown - The main loop of list scheduling for top-down
594 void SchedulePostRATDList::ListScheduleTopDown() {
595 unsigned CurCycle = 0;
597 // Release any successors of the special Entry node.
598 ReleaseSuccessors(&EntrySU);
600 // All leaves to Available queue.
601 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
602 // It is available if it has no predecessors.
603 if (SUnits[i].Preds.empty()) {
604 AvailableQueue.push(&SUnits[i]);
605 SUnits[i].isAvailable = true;
609 // In any cycle where we can't schedule any instructions, we must
610 // stall or emit a noop, depending on the target.
611 bool CycleHasInsts = false;
613 // While Available queue is not empty, grab the node with the highest
614 // priority. If it is not ready put it back. Schedule the node.
615 std::vector<SUnit*> NotReady;
616 Sequence.reserve(SUnits.size());
617 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
618 // Check to see if any of the pending instructions are ready to issue. If
619 // so, add them to the available queue.
620 unsigned MinDepth = ~0u;
621 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
622 if (PendingQueue[i]->getDepth() <= CurCycle) {
623 AvailableQueue.push(PendingQueue[i]);
624 PendingQueue[i]->isAvailable = true;
625 PendingQueue[i] = PendingQueue.back();
626 PendingQueue.pop_back();
628 } else if (PendingQueue[i]->getDepth() < MinDepth)
629 MinDepth = PendingQueue[i]->getDepth();
632 DEBUG(errs() << "\n*** Examining Available\n";
633 LatencyPriorityQueue q = AvailableQueue;
636 errs() << "Height " << su->getHeight() << ": ";
640 SUnit *FoundSUnit = 0;
642 bool HasNoopHazards = false;
643 while (!AvailableQueue.empty()) {
644 SUnit *CurSUnit = AvailableQueue.pop();
646 ScheduleHazardRecognizer::HazardType HT =
647 HazardRec->getHazardType(CurSUnit);
648 if (HT == ScheduleHazardRecognizer::NoHazard) {
649 FoundSUnit = CurSUnit;
653 // Remember if this is a noop hazard.
654 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
656 NotReady.push_back(CurSUnit);
659 // Add the nodes that aren't ready back onto the available list.
660 if (!NotReady.empty()) {
661 AvailableQueue.push_all(NotReady);
665 // If we found a node to schedule, do it now.
667 ScheduleNodeTopDown(FoundSUnit, CurCycle);
668 HazardRec->EmitInstruction(FoundSUnit);
669 CycleHasInsts = true;
671 // If we are using the target-specific hazards, then don't
672 // advance the cycle time just because we schedule a node. If
673 // the target allows it we can schedule multiple nodes in the
675 if (!EnablePostRAHazardAvoidance) {
676 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
681 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
682 HazardRec->AdvanceCycle();
683 } else if (!HasNoopHazards) {
684 // Otherwise, we have a pipeline stall, but no other problem,
685 // just advance the current cycle and try again.
686 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
687 HazardRec->AdvanceCycle();
690 // Otherwise, we have no instructions to issue and we have instructions
691 // that will fault if we don't do this right. This is the case for
692 // processors without pipeline interlocks and other cases.
693 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
694 HazardRec->EmitNoop();
695 Sequence.push_back(0); // NULL here means noop
700 CycleHasInsts = false;
705 VerifySchedule(/*isBottomUp=*/false);
709 //===----------------------------------------------------------------------===//
710 // Public Constructor Functions
711 //===----------------------------------------------------------------------===//
713 FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
714 return new PostRAScheduler(OptLevel);