1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/PriorityQueue.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/CodeGen/LiveInterval.h"
25 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineDominators.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineLoopInfo.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachinePassRegistry.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/MachineValueType.h"
36 #include "llvm/CodeGen/Passes.h"
37 #include "llvm/CodeGen/RegisterClassInfo.h"
38 #include "llvm/CodeGen/RegisterPressure.h"
39 #include "llvm/CodeGen/ScheduleDAG.h"
40 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
41 #include "llvm/CodeGen/ScheduleDAGMutation.h"
42 #include "llvm/CodeGen/ScheduleDFS.h"
43 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
44 #include "llvm/CodeGen/SlotIndexes.h"
45 #include "llvm/CodeGen/TargetPassConfig.h"
46 #include "llvm/CodeGen/TargetSchedule.h"
47 #include "llvm/MC/LaneBitmask.h"
48 #include "llvm/Pass.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/Debug.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/GraphWriter.h"
54 #include "llvm/Support/raw_ostream.h"
55 #include "llvm/Target/TargetInstrInfo.h"
56 #include "llvm/Target/TargetLowering.h"
57 #include "llvm/Target/TargetRegisterInfo.h"
58 #include "llvm/Target/TargetSubtargetInfo.h"
72 #define DEBUG_TYPE "machine-scheduler"
76 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
77 cl::desc("Force top-down list scheduling"));
78 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
79 cl::desc("Force bottom-up list scheduling"));
81 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
82 cl::desc("Print critical path length to stdout"));
84 } // end namespace llvm
87 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
88 cl::desc("Pop up a window to show MISched dags after they are processed"));
90 /// In some situations a few uninteresting nodes depend on nearly all other
91 /// nodes in the graph, provide a cutoff to hide them.
92 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
93 cl::desc("Hide nodes with more predecessor/successor than cutoff"));
95 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
96 cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
98 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
99 cl::desc("Only schedule this function"));
100 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
101 cl::desc("Only schedule this MBB#"));
103 static bool ViewMISchedDAGs = false;
106 /// Avoid quadratic complexity in unusually large basic blocks by limiting the
107 /// size of the ready lists.
108 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
109 cl::desc("Limit ready list to N instructions"), cl::init(256));
111 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
112 cl::desc("Enable register pressure scheduling."), cl::init(true));
114 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
115 cl::desc("Enable cyclic critical path analysis."), cl::init(true));
117 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
118 cl::desc("Enable memop clustering."),
121 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
122 cl::desc("Verify machine instrs before and after machine scheduling"));
124 // DAG subtrees must have at least this many nodes.
125 static const unsigned MinSubtreeSize = 8;
127 // Pin the vtables to this file.
128 void MachineSchedStrategy::anchor() {}
130 void ScheduleDAGMutation::anchor() {}
132 //===----------------------------------------------------------------------===//
133 // Machine Instruction Scheduling Pass and Registry
134 //===----------------------------------------------------------------------===//
136 MachineSchedContext::MachineSchedContext() {
137 RegClassInfo = new RegisterClassInfo();
140 MachineSchedContext::~MachineSchedContext() {
146 /// Base class for a machine scheduler class that can run at any point.
147 class MachineSchedulerBase : public MachineSchedContext,
148 public MachineFunctionPass {
150 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
152 void print(raw_ostream &O, const Module* = nullptr) const override;
155 void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
158 /// MachineScheduler runs after coalescing and before register allocation.
159 class MachineScheduler : public MachineSchedulerBase {
163 void getAnalysisUsage(AnalysisUsage &AU) const override;
165 bool runOnMachineFunction(MachineFunction&) override;
167 static char ID; // Class identification, replacement for typeinfo
170 ScheduleDAGInstrs *createMachineScheduler();
173 /// PostMachineScheduler runs after shortly before code emission.
174 class PostMachineScheduler : public MachineSchedulerBase {
176 PostMachineScheduler();
178 void getAnalysisUsage(AnalysisUsage &AU) const override;
180 bool runOnMachineFunction(MachineFunction&) override;
182 static char ID; // Class identification, replacement for typeinfo
185 ScheduleDAGInstrs *createPostMachineScheduler();
188 } // end anonymous namespace
190 char MachineScheduler::ID = 0;
192 char &llvm::MachineSchedulerID = MachineScheduler::ID;
194 INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE,
195 "Machine Instruction Scheduler", false, false)
196 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
197 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
198 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
199 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
200 INITIALIZE_PASS_END(MachineScheduler, DEBUG_TYPE,
201 "Machine Instruction Scheduler", false, false)
203 MachineScheduler::MachineScheduler()
204 : MachineSchedulerBase(ID) {
205 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
208 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
209 AU.setPreservesCFG();
210 AU.addRequiredID(MachineDominatorsID);
211 AU.addRequired<MachineLoopInfo>();
212 AU.addRequired<AAResultsWrapperPass>();
213 AU.addRequired<TargetPassConfig>();
214 AU.addRequired<SlotIndexes>();
215 AU.addPreserved<SlotIndexes>();
216 AU.addRequired<LiveIntervals>();
217 AU.addPreserved<LiveIntervals>();
218 MachineFunctionPass::getAnalysisUsage(AU);
221 char PostMachineScheduler::ID = 0;
223 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
225 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
226 "PostRA Machine Instruction Scheduler", false, false)
228 PostMachineScheduler::PostMachineScheduler()
229 : MachineSchedulerBase(ID) {
230 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
233 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
234 AU.setPreservesCFG();
235 AU.addRequiredID(MachineDominatorsID);
236 AU.addRequired<MachineLoopInfo>();
237 AU.addRequired<TargetPassConfig>();
238 MachineFunctionPass::getAnalysisUsage(AU);
241 MachinePassRegistry MachineSchedRegistry::Registry;
243 /// A dummy default scheduler factory indicates whether the scheduler
244 /// is overridden on the command line.
245 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
249 /// MachineSchedOpt allows command line selection of the scheduler.
250 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
251 RegisterPassParser<MachineSchedRegistry>>
252 MachineSchedOpt("misched",
253 cl::init(&useDefaultMachineSched), cl::Hidden,
254 cl::desc("Machine instruction scheduler to use"));
256 static MachineSchedRegistry
257 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
258 useDefaultMachineSched);
260 static cl::opt<bool> EnableMachineSched(
262 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
265 static cl::opt<bool> EnablePostRAMachineSched(
266 "enable-post-misched",
267 cl::desc("Enable the post-ra machine instruction scheduling pass."),
268 cl::init(true), cl::Hidden);
270 /// Decrement this iterator until reaching the top or a non-debug instr.
271 static MachineBasicBlock::const_iterator
272 priorNonDebug(MachineBasicBlock::const_iterator I,
273 MachineBasicBlock::const_iterator Beg) {
274 assert(I != Beg && "reached the top of the region, cannot decrement");
276 if (!I->isDebugValue())
282 /// Non-const version.
283 static MachineBasicBlock::iterator
284 priorNonDebug(MachineBasicBlock::iterator I,
285 MachineBasicBlock::const_iterator Beg) {
286 return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)
287 .getNonConstIterator();
290 /// If this iterator is a debug value, increment until reaching the End or a
291 /// non-debug instruction.
292 static MachineBasicBlock::const_iterator
293 nextIfDebug(MachineBasicBlock::const_iterator I,
294 MachineBasicBlock::const_iterator End) {
295 for(; I != End; ++I) {
296 if (!I->isDebugValue())
302 /// Non-const version.
303 static MachineBasicBlock::iterator
304 nextIfDebug(MachineBasicBlock::iterator I,
305 MachineBasicBlock::const_iterator End) {
306 return nextIfDebug(MachineBasicBlock::const_iterator(I), End)
307 .getNonConstIterator();
310 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
311 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
312 // Select the scheduler, or set the default.
313 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
314 if (Ctor != useDefaultMachineSched)
317 // Get the default scheduler set by the target for this function.
318 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
322 // Default to GenericScheduler.
323 return createGenericSchedLive(this);
326 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
327 /// the caller. We don't have a command line option to override the postRA
328 /// scheduler. The Target must configure it.
329 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
330 // Get the postRA scheduler set by the target for this function.
331 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
335 // Default to GenericScheduler.
336 return createGenericSchedPostRA(this);
339 /// Top-level MachineScheduler pass driver.
341 /// Visit blocks in function order. Divide each block into scheduling regions
342 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
343 /// consistent with the DAG builder, which traverses the interior of the
344 /// scheduling regions bottom-up.
346 /// This design avoids exposing scheduling boundaries to the DAG builder,
347 /// simplifying the DAG builder's support for "special" target instructions.
348 /// At the same time the design allows target schedulers to operate across
349 /// scheduling boundaries, for example to bundle the boudary instructions
350 /// without reordering them. This creates complexity, because the target
351 /// scheduler must update the RegionBegin and RegionEnd positions cached by
352 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
353 /// design would be to split blocks at scheduling boundaries, but LLVM has a
354 /// general bias against block splitting purely for implementation simplicity.
355 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
356 if (skipFunction(*mf.getFunction()))
359 if (EnableMachineSched.getNumOccurrences()) {
360 if (!EnableMachineSched)
362 } else if (!mf.getSubtarget().enableMachineScheduler())
365 DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
367 // Initialize the context of the pass.
369 MLI = &getAnalysis<MachineLoopInfo>();
370 MDT = &getAnalysis<MachineDominatorTree>();
371 PassConfig = &getAnalysis<TargetPassConfig>();
372 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
374 LIS = &getAnalysis<LiveIntervals>();
376 if (VerifyScheduling) {
378 MF->verify(this, "Before machine scheduling.");
380 RegClassInfo->runOnMachineFunction(*MF);
382 // Instantiate the selected scheduler for this target, function, and
383 // optimization level.
384 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
385 scheduleRegions(*Scheduler, false);
388 if (VerifyScheduling)
389 MF->verify(this, "After machine scheduling.");
393 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
394 if (skipFunction(*mf.getFunction()))
397 if (EnablePostRAMachineSched.getNumOccurrences()) {
398 if (!EnablePostRAMachineSched)
400 } else if (!mf.getSubtarget().enablePostRAScheduler()) {
401 DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
404 DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
406 // Initialize the context of the pass.
408 PassConfig = &getAnalysis<TargetPassConfig>();
410 if (VerifyScheduling)
411 MF->verify(this, "Before post machine scheduling.");
413 // Instantiate the selected scheduler for this target, function, and
414 // optimization level.
415 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
416 scheduleRegions(*Scheduler, true);
418 if (VerifyScheduling)
419 MF->verify(this, "After post machine scheduling.");
423 /// Return true of the given instruction should not be included in a scheduling
426 /// MachineScheduler does not currently support scheduling across calls. To
427 /// handle calls, the DAG builder needs to be modified to create register
428 /// anti/output dependencies on the registers clobbered by the call's regmask
429 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
430 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
431 /// the boundary, but there would be no benefit to postRA scheduling across
432 /// calls this late anyway.
433 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
434 MachineBasicBlock *MBB,
436 const TargetInstrInfo *TII) {
437 return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
440 /// Main driver for both MachineScheduler and PostMachineScheduler.
441 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
443 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
445 // Visit all machine basic blocks.
447 // TODO: Visit blocks in global postorder or postorder within the bottom-up
448 // loop tree. Then we can optionally compute global RegPressure.
449 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
450 MBB != MBBEnd; ++MBB) {
452 Scheduler.startBlock(&*MBB);
455 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
457 if (SchedOnlyBlock.getNumOccurrences()
458 && (int)SchedOnlyBlock != MBB->getNumber())
462 // Break the block into scheduling regions [I, RegionEnd), and schedule each
463 // region as soon as it is discovered. RegionEnd points the scheduling
464 // boundary at the bottom of the region. The DAG does not include RegionEnd,
465 // but the region does (i.e. the next RegionEnd is above the previous
466 // RegionBegin). If the current block has no terminator then RegionEnd ==
467 // MBB->end() for the bottom region.
469 // The Scheduler may insert instructions during either schedule() or
470 // exitRegion(), even for empty regions. So the local iterators 'I' and
471 // 'RegionEnd' are invalid across these calls.
473 // MBB::size() uses instr_iterator to count. Here we need a bundle to count
474 // as a single instruction.
475 for(MachineBasicBlock::iterator RegionEnd = MBB->end();
476 RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
478 // Avoid decrementing RegionEnd for blocks with no terminator.
479 if (RegionEnd != MBB->end() ||
480 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
484 // The next region starts above the previous region. Look backward in the
485 // instruction stream until we find the nearest boundary.
486 unsigned NumRegionInstrs = 0;
487 MachineBasicBlock::iterator I = RegionEnd;
488 for (; I != MBB->begin(); --I) {
489 MachineInstr &MI = *std::prev(I);
490 if (isSchedBoundary(&MI, &*MBB, MF, TII))
492 if (!MI.isDebugValue())
495 // Notify the scheduler of the region, even if we may skip scheduling
496 // it. Perhaps it still needs to be bundled.
497 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
499 // Skip empty scheduling regions (0 or 1 schedulable instructions).
500 if (I == RegionEnd || I == std::prev(RegionEnd)) {
501 // Close the current region. Bundle the terminator if needed.
502 // This invalidates 'RegionEnd' and 'I'.
503 Scheduler.exitRegion();
506 DEBUG(dbgs() << "********** MI Scheduling **********\n");
507 DEBUG(dbgs() << MF->getName()
508 << ":BB#" << MBB->getNumber() << " " << MBB->getName()
509 << "\n From: " << *I << " To: ";
510 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
511 else dbgs() << "End";
512 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
513 if (DumpCriticalPathLength) {
514 errs() << MF->getName();
515 errs() << ":BB# " << MBB->getNumber();
516 errs() << " " << MBB->getName() << " \n";
519 // Schedule a region: possibly reorder instructions.
520 // This invalidates 'RegionEnd' and 'I'.
521 Scheduler.schedule();
523 // Close the current region.
524 Scheduler.exitRegion();
526 // Scheduling has invalidated the current iterator 'I'. Ask the
527 // scheduler for the top of it's scheduled region.
528 RegionEnd = Scheduler.begin();
530 Scheduler.finishBlock();
531 // FIXME: Ideally, no further passes should rely on kill flags. However,
532 // thumb2 size reduction is currently an exception, so the PostMIScheduler
535 Scheduler.fixupKills(*MBB);
537 Scheduler.finalizeSchedule();
540 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
544 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
545 LLVM_DUMP_METHOD void ReadyQueue::dump() const {
546 dbgs() << "Queue " << Name << ": ";
547 for (const SUnit *SU : Queue)
548 dbgs() << SU->NodeNum << " ";
553 //===----------------------------------------------------------------------===//
554 // ScheduleDAGMI - Basic machine instruction scheduling. This is
555 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
556 // virtual registers.
557 // ===----------------------------------------------------------------------===/
559 // Provide a vtable anchor.
560 ScheduleDAGMI::~ScheduleDAGMI() = default;
562 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
563 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
566 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
567 if (SuccSU != &ExitSU) {
568 // Do not use WillCreateCycle, it assumes SD scheduling.
569 // If Pred is reachable from Succ, then the edge creates a cycle.
570 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
572 Topo.AddPred(SuccSU, PredDep.getSUnit());
574 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
575 // Return true regardless of whether a new edge needed to be inserted.
579 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
580 /// NumPredsLeft reaches zero, release the successor node.
582 /// FIXME: Adjust SuccSU height based on MinLatency.
583 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
584 SUnit *SuccSU = SuccEdge->getSUnit();
586 if (SuccEdge->isWeak()) {
587 --SuccSU->WeakPredsLeft;
588 if (SuccEdge->isCluster())
589 NextClusterSucc = SuccSU;
593 if (SuccSU->NumPredsLeft == 0) {
594 dbgs() << "*** Scheduling failed! ***\n";
596 dbgs() << " has been released too many times!\n";
597 llvm_unreachable(nullptr);
600 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
601 // CurrCycle may have advanced since then.
602 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
603 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
605 --SuccSU->NumPredsLeft;
606 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
607 SchedImpl->releaseTopNode(SuccSU);
610 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
611 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
612 for (SDep &Succ : SU->Succs)
613 releaseSucc(SU, &Succ);
616 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
617 /// NumSuccsLeft reaches zero, release the predecessor node.
619 /// FIXME: Adjust PredSU height based on MinLatency.
620 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
621 SUnit *PredSU = PredEdge->getSUnit();
623 if (PredEdge->isWeak()) {
624 --PredSU->WeakSuccsLeft;
625 if (PredEdge->isCluster())
626 NextClusterPred = PredSU;
630 if (PredSU->NumSuccsLeft == 0) {
631 dbgs() << "*** Scheduling failed! ***\n";
633 dbgs() << " has been released too many times!\n";
634 llvm_unreachable(nullptr);
637 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
638 // CurrCycle may have advanced since then.
639 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
640 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
642 --PredSU->NumSuccsLeft;
643 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
644 SchedImpl->releaseBottomNode(PredSU);
647 /// releasePredecessors - Call releasePred on each of SU's predecessors.
648 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
649 for (SDep &Pred : SU->Preds)
650 releasePred(SU, &Pred);
653 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
654 /// crossing a scheduling boundary. [begin, end) includes all instructions in
655 /// the region, including the boundary itself and single-instruction regions
656 /// that don't get scheduled.
657 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
658 MachineBasicBlock::iterator begin,
659 MachineBasicBlock::iterator end,
660 unsigned regioninstrs)
662 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
664 SchedImpl->initPolicy(begin, end, regioninstrs);
667 /// This is normally called from the main scheduler loop but may also be invoked
668 /// by the scheduling strategy to perform additional code motion.
669 void ScheduleDAGMI::moveInstruction(
670 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
671 // Advance RegionBegin if the first instruction moves down.
672 if (&*RegionBegin == MI)
675 // Update the instruction stream.
676 BB->splice(InsertPos, BB, MI);
678 // Update LiveIntervals
680 LIS->handleMove(*MI, /*UpdateFlags=*/true);
682 // Recede RegionBegin if an instruction moves above the first.
683 if (RegionBegin == InsertPos)
687 bool ScheduleDAGMI::checkSchedLimit() {
689 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
690 CurrentTop = CurrentBottom;
693 ++NumInstrsScheduled;
698 /// Per-region scheduling driver, called back from
699 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
700 /// does not consider liveness or register pressure. It is useful for PostRA
701 /// scheduling and potentially other custom schedulers.
702 void ScheduleDAGMI::schedule() {
703 DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
704 DEBUG(SchedImpl->dumpPolicy());
709 Topo.InitDAGTopologicalSorting();
713 SmallVector<SUnit*, 8> TopRoots, BotRoots;
714 findRootsAndBiasEdges(TopRoots, BotRoots);
716 // Initialize the strategy before modifying the DAG.
717 // This may initialize a DFSResult to be used for queue priority.
718 SchedImpl->initialize(this);
721 if (EntrySU.getInstr() != nullptr)
722 EntrySU.dumpAll(this);
723 for (const SUnit &SU : SUnits)
725 if (ExitSU.getInstr() != nullptr)
726 ExitSU.dumpAll(this);
728 if (ViewMISchedDAGs) viewGraph();
730 // Initialize ready queues now that the DAG and priority data are finalized.
731 initQueues(TopRoots, BotRoots);
733 bool IsTopNode = false;
735 DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
736 SUnit *SU = SchedImpl->pickNode(IsTopNode);
739 assert(!SU->isScheduled && "Node already scheduled");
740 if (!checkSchedLimit())
743 MachineInstr *MI = SU->getInstr();
745 assert(SU->isTopReady() && "node still has unscheduled dependencies");
746 if (&*CurrentTop == MI)
747 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
749 moveInstruction(MI, CurrentTop);
751 assert(SU->isBottomReady() && "node still has unscheduled dependencies");
752 MachineBasicBlock::iterator priorII =
753 priorNonDebug(CurrentBottom, CurrentTop);
755 CurrentBottom = priorII;
757 if (&*CurrentTop == MI)
758 CurrentTop = nextIfDebug(++CurrentTop, priorII);
759 moveInstruction(MI, CurrentBottom);
763 // Notify the scheduling strategy before updating the DAG.
764 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
765 // runs, it can then use the accurate ReadyCycle time to determine whether
766 // newly released nodes can move to the readyQ.
767 SchedImpl->schedNode(SU, IsTopNode);
769 updateQueues(SU, IsTopNode);
771 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
776 unsigned BBNum = begin()->getParent()->getNumber();
777 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
783 /// Apply each ScheduleDAGMutation step in order.
784 void ScheduleDAGMI::postprocessDAG() {
785 for (auto &m : Mutations)
790 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
791 SmallVectorImpl<SUnit*> &BotRoots) {
792 for (SUnit &SU : SUnits) {
793 assert(!SU.isBoundaryNode() && "Boundary node should not be in SUnits");
795 // Order predecessors so DFSResult follows the critical path.
796 SU.biasCriticalPath();
798 // A SUnit is ready to top schedule if it has no predecessors.
799 if (!SU.NumPredsLeft)
800 TopRoots.push_back(&SU);
801 // A SUnit is ready to bottom schedule if it has no successors.
802 if (!SU.NumSuccsLeft)
803 BotRoots.push_back(&SU);
805 ExitSU.biasCriticalPath();
808 /// Identify DAG roots and setup scheduler queues.
809 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
810 ArrayRef<SUnit*> BotRoots) {
811 NextClusterSucc = nullptr;
812 NextClusterPred = nullptr;
814 // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
816 // Nodes with unreleased weak edges can still be roots.
817 // Release top roots in forward order.
818 for (SUnit *SU : TopRoots)
819 SchedImpl->releaseTopNode(SU);
821 // Release bottom roots in reverse order so the higher priority nodes appear
822 // first. This is more natural and slightly more efficient.
823 for (SmallVectorImpl<SUnit*>::const_reverse_iterator
824 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
825 SchedImpl->releaseBottomNode(*I);
828 releaseSuccessors(&EntrySU);
829 releasePredecessors(&ExitSU);
831 SchedImpl->registerRoots();
833 // Advance past initial DebugValues.
834 CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
835 CurrentBottom = RegionEnd;
838 /// Update scheduler queues after scheduling an instruction.
839 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
840 // Release dependent instructions for scheduling.
842 releaseSuccessors(SU);
844 releasePredecessors(SU);
846 SU->isScheduled = true;
849 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
850 void ScheduleDAGMI::placeDebugValues() {
851 // If first instruction was a DBG_VALUE then put it back.
853 BB->splice(RegionBegin, BB, FirstDbgValue);
854 RegionBegin = FirstDbgValue;
857 for (std::vector<std::pair<MachineInstr *, MachineInstr *>>::iterator
858 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
859 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
860 MachineInstr *DbgValue = P.first;
861 MachineBasicBlock::iterator OrigPrevMI = P.second;
862 if (&*RegionBegin == DbgValue)
864 BB->splice(++OrigPrevMI, BB, DbgValue);
865 if (OrigPrevMI == std::prev(RegionEnd))
866 RegionEnd = DbgValue;
869 FirstDbgValue = nullptr;
872 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
873 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpSchedule() const {
874 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
875 if (SUnit *SU = getSUnit(&(*MI)))
878 dbgs() << "Missing SUnit\n";
883 //===----------------------------------------------------------------------===//
884 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
886 //===----------------------------------------------------------------------===//
888 ScheduleDAGMILive::~ScheduleDAGMILive() {
892 void ScheduleDAGMILive::collectVRegUses(SUnit &SU) {
893 const MachineInstr &MI = *SU.getInstr();
894 for (const MachineOperand &MO : MI.operands()) {
899 if (TrackLaneMasks && !MO.isUse())
902 unsigned Reg = MO.getReg();
903 if (!TargetRegisterInfo::isVirtualRegister(Reg))
907 if (TrackLaneMasks) {
908 bool FoundDef = false;
909 for (const MachineOperand &MO2 : MI.operands()) {
910 if (MO2.isReg() && MO2.isDef() && MO2.getReg() == Reg && !MO2.isDead()) {
919 // Record this local VReg use.
920 VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg);
921 for (; UI != VRegUses.end(); ++UI) {
925 if (UI == VRegUses.end())
926 VRegUses.insert(VReg2SUnit(Reg, LaneBitmask::getNone(), &SU));
930 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
931 /// crossing a scheduling boundary. [begin, end) includes all instructions in
932 /// the region, including the boundary itself and single-instruction regions
933 /// that don't get scheduled.
934 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
935 MachineBasicBlock::iterator begin,
936 MachineBasicBlock::iterator end,
937 unsigned regioninstrs)
939 // ScheduleDAGMI initializes SchedImpl's per-region policy.
940 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
942 // For convenience remember the end of the liveness region.
943 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
945 SUPressureDiffs.clear();
947 ShouldTrackPressure = SchedImpl->shouldTrackPressure();
948 ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
950 assert((!ShouldTrackLaneMasks || ShouldTrackPressure) &&
951 "ShouldTrackLaneMasks requires ShouldTrackPressure");
954 // Setup the register pressure trackers for the top scheduled top and bottom
955 // scheduled regions.
956 void ScheduleDAGMILive::initRegPressure() {
958 VRegUses.setUniverse(MRI.getNumVirtRegs());
959 for (SUnit &SU : SUnits)
962 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
963 ShouldTrackLaneMasks, false);
964 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
965 ShouldTrackLaneMasks, false);
967 // Close the RPTracker to finalize live ins.
968 RPTracker.closeRegion();
970 DEBUG(RPTracker.dump());
972 // Initialize the live ins and live outs.
973 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
974 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
976 // Close one end of the tracker so we can call
977 // getMaxUpward/DownwardPressureDelta before advancing across any
978 // instructions. This converts currently live regs into live ins/outs.
979 TopRPTracker.closeTop();
980 BotRPTracker.closeBottom();
982 BotRPTracker.initLiveThru(RPTracker);
983 if (!BotRPTracker.getLiveThru().empty()) {
984 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
985 DEBUG(dbgs() << "Live Thru: ";
986 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
989 // For each live out vreg reduce the pressure change associated with other
990 // uses of the same vreg below the live-out reaching def.
991 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
993 // Account for liveness generated by the region boundary.
994 if (LiveRegionEnd != RegionEnd) {
995 SmallVector<RegisterMaskPair, 8> LiveUses;
996 BotRPTracker.recede(&LiveUses);
997 updatePressureDiffs(LiveUses);
1001 dbgs() << "Top Pressure:\n";
1002 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1003 dbgs() << "Bottom Pressure:\n";
1004 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1007 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
1009 // Cache the list of excess pressure sets in this region. This will also track
1010 // the max pressure in the scheduled code for these sets.
1011 RegionCriticalPSets.clear();
1012 const std::vector<unsigned> &RegionPressure =
1013 RPTracker.getPressure().MaxSetPressure;
1014 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
1015 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
1016 if (RegionPressure[i] > Limit) {
1017 DEBUG(dbgs() << TRI->getRegPressureSetName(i)
1018 << " Limit " << Limit
1019 << " Actual " << RegionPressure[i] << "\n");
1020 RegionCriticalPSets.push_back(PressureChange(i));
1023 DEBUG(dbgs() << "Excess PSets: ";
1024 for (const PressureChange &RCPS : RegionCriticalPSets)
1025 dbgs() << TRI->getRegPressureSetName(
1026 RCPS.getPSet()) << " ";
1030 void ScheduleDAGMILive::
1031 updateScheduledPressure(const SUnit *SU,
1032 const std::vector<unsigned> &NewMaxPressure) {
1033 const PressureDiff &PDiff = getPressureDiff(SU);
1034 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
1035 for (const PressureChange &PC : PDiff) {
1038 unsigned ID = PC.getPSet();
1039 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
1041 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
1042 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
1043 && NewMaxPressure[ID] <= (unsigned)std::numeric_limits<int16_t>::max())
1044 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
1046 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
1047 if (NewMaxPressure[ID] >= Limit - 2) {
1048 DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": "
1049 << NewMaxPressure[ID]
1050 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
1051 << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
1056 /// Update the PressureDiff array for liveness after scheduling this
1058 void ScheduleDAGMILive::updatePressureDiffs(
1059 ArrayRef<RegisterMaskPair> LiveUses) {
1060 for (const RegisterMaskPair &P : LiveUses) {
1061 unsigned Reg = P.RegUnit;
1062 /// FIXME: Currently assuming single-use physregs.
1063 if (!TRI->isVirtualRegister(Reg))
1066 if (ShouldTrackLaneMasks) {
1067 // If the register has just become live then other uses won't change
1068 // this fact anymore => decrement pressure.
1069 // If the register has just become dead then other uses make it come
1070 // back to life => increment pressure.
1071 bool Decrement = P.LaneMask.any();
1073 for (const VReg2SUnit &V2SU
1074 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1075 SUnit &SU = *V2SU.SU;
1076 if (SU.isScheduled || &SU == &ExitSU)
1079 PressureDiff &PDiff = getPressureDiff(&SU);
1080 PDiff.addPressureChange(Reg, Decrement, &MRI);
1082 dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") "
1083 << PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
1084 << ' ' << *SU.getInstr();
1090 assert(P.LaneMask.any());
1091 DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
1092 // This may be called before CurrentBottom has been initialized. However,
1093 // BotRPTracker must have a valid position. We want the value live into the
1094 // instruction or live out of the block, so ask for the previous
1095 // instruction's live-out.
1096 const LiveInterval &LI = LIS->getInterval(Reg);
1098 MachineBasicBlock::const_iterator I =
1099 nextIfDebug(BotRPTracker.getPos(), BB->end());
1101 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1103 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
1104 VNI = LRQ.valueIn();
1106 // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1107 assert(VNI && "No live value at use.");
1108 for (const VReg2SUnit &V2SU
1109 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1110 SUnit *SU = V2SU.SU;
1111 // If this use comes before the reaching def, it cannot be a last use,
1112 // so decrease its pressure change.
1113 if (!SU->isScheduled && SU != &ExitSU) {
1114 LiveQueryResult LRQ =
1115 LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1116 if (LRQ.valueIn() == VNI) {
1117 PressureDiff &PDiff = getPressureDiff(SU);
1118 PDiff.addPressureChange(Reg, true, &MRI);
1120 dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") "
1132 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1133 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1134 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1136 /// This is a skeletal driver, with all the functionality pushed into helpers,
1137 /// so that it can be easily extended by experimental schedulers. Generally,
1138 /// implementing MachineSchedStrategy should be sufficient to implement a new
1139 /// scheduling algorithm. However, if a scheduler further subclasses
1140 /// ScheduleDAGMILive then it will want to override this virtual method in order
1141 /// to update any specialized state.
1142 void ScheduleDAGMILive::schedule() {
1143 DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1144 DEBUG(SchedImpl->dumpPolicy());
1145 buildDAGWithRegPressure();
1147 Topo.InitDAGTopologicalSorting();
1151 SmallVector<SUnit*, 8> TopRoots, BotRoots;
1152 findRootsAndBiasEdges(TopRoots, BotRoots);
1154 // Initialize the strategy before modifying the DAG.
1155 // This may initialize a DFSResult to be used for queue priority.
1156 SchedImpl->initialize(this);
1159 if (EntrySU.getInstr() != nullptr)
1160 EntrySU.dumpAll(this);
1161 for (const SUnit &SU : SUnits) {
1163 if (ShouldTrackPressure) {
1164 dbgs() << " Pressure Diff : ";
1165 getPressureDiff(&SU).dump(*TRI);
1167 dbgs() << " Single Issue : ";
1168 if (SchedModel.mustBeginGroup(SU.getInstr()) &&
1169 SchedModel.mustEndGroup(SU.getInstr()))
1175 if (ExitSU.getInstr() != nullptr)
1176 ExitSU.dumpAll(this);
1178 if (ViewMISchedDAGs) viewGraph();
1180 // Initialize ready queues now that the DAG and priority data are finalized.
1181 initQueues(TopRoots, BotRoots);
1183 bool IsTopNode = false;
1185 DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1186 SUnit *SU = SchedImpl->pickNode(IsTopNode);
1189 assert(!SU->isScheduled && "Node already scheduled");
1190 if (!checkSchedLimit())
1193 scheduleMI(SU, IsTopNode);
1196 unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1197 if (!ScheduledTrees.test(SubtreeID)) {
1198 ScheduledTrees.set(SubtreeID);
1199 DFSResult->scheduleTree(SubtreeID);
1200 SchedImpl->scheduleTree(SubtreeID);
1204 // Notify the scheduling strategy after updating the DAG.
1205 SchedImpl->schedNode(SU, IsTopNode);
1207 updateQueues(SU, IsTopNode);
1209 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1214 unsigned BBNum = begin()->getParent()->getNumber();
1215 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1221 /// Build the DAG and setup three register pressure trackers.
1222 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1223 if (!ShouldTrackPressure) {
1225 RegionCriticalPSets.clear();
1226 buildSchedGraph(AA);
1230 // Initialize the register pressure tracker used by buildSchedGraph.
1231 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1232 ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1234 // Account for liveness generate by the region boundary.
1235 if (LiveRegionEnd != RegionEnd)
1238 // Build the DAG, and compute current register pressure.
1239 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
1241 // Initialize top/bottom trackers after computing region pressure.
1245 void ScheduleDAGMILive::computeDFSResult() {
1247 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1249 ScheduledTrees.clear();
1250 DFSResult->resize(SUnits.size());
1251 DFSResult->compute(SUnits);
1252 ScheduledTrees.resize(DFSResult->getNumSubtrees());
1255 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1256 /// only provides the critical path for single block loops. To handle loops that
1257 /// span blocks, we could use the vreg path latencies provided by
1258 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1259 /// available for use in the scheduler.
1261 /// The cyclic path estimation identifies a def-use pair that crosses the back
1262 /// edge and considers the depth and height of the nodes. For example, consider
1263 /// the following instruction sequence where each instruction has unit latency
1264 /// and defines an epomymous virtual register:
1266 /// a->b(a,c)->c(b)->d(c)->exit
1268 /// The cyclic critical path is a two cycles: b->c->b
1269 /// The acyclic critical path is four cycles: a->b->c->d->exit
1270 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1271 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1272 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1273 /// LiveInDepth = depth(b) = len(a->b) = 1
1275 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1276 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1277 /// CyclicCriticalPath = min(2, 2) = 2
1279 /// This could be relevant to PostRA scheduling, but is currently implemented
1280 /// assuming LiveIntervals.
1281 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1282 // This only applies to single block loop.
1283 if (!BB->isSuccessor(BB))
1286 unsigned MaxCyclicLatency = 0;
1287 // Visit each live out vreg def to find def/use pairs that cross iterations.
1288 for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1289 unsigned Reg = P.RegUnit;
1290 if (!TRI->isVirtualRegister(Reg))
1292 const LiveInterval &LI = LIS->getInterval(Reg);
1293 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1297 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1298 const SUnit *DefSU = getSUnit(DefMI);
1302 unsigned LiveOutHeight = DefSU->getHeight();
1303 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1304 // Visit all local users of the vreg def.
1305 for (const VReg2SUnit &V2SU
1306 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1307 SUnit *SU = V2SU.SU;
1311 // Only consider uses of the phi.
1312 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1313 if (!LRQ.valueIn()->isPHIDef())
1316 // Assume that a path spanning two iterations is a cycle, which could
1317 // overestimate in strange cases. This allows cyclic latency to be
1318 // estimated as the minimum slack of the vreg's depth or height.
1319 unsigned CyclicLatency = 0;
1320 if (LiveOutDepth > SU->getDepth())
1321 CyclicLatency = LiveOutDepth - SU->getDepth();
1323 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1324 if (LiveInHeight > LiveOutHeight) {
1325 if (LiveInHeight - LiveOutHeight < CyclicLatency)
1326 CyclicLatency = LiveInHeight - LiveOutHeight;
1330 DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1331 << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1332 if (CyclicLatency > MaxCyclicLatency)
1333 MaxCyclicLatency = CyclicLatency;
1336 DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1337 return MaxCyclicLatency;
1340 /// Release ExitSU predecessors and setup scheduler queues. Re-position
1341 /// the Top RP tracker in case the region beginning has changed.
1342 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
1343 ArrayRef<SUnit*> BotRoots) {
1344 ScheduleDAGMI::initQueues(TopRoots, BotRoots);
1345 if (ShouldTrackPressure) {
1346 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1347 TopRPTracker.setPos(CurrentTop);
1351 /// Move an instruction and update register pressure.
1352 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1353 // Move the instruction to its new location in the instruction stream.
1354 MachineInstr *MI = SU->getInstr();
1357 assert(SU->isTopReady() && "node still has unscheduled dependencies");
1358 if (&*CurrentTop == MI)
1359 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1361 moveInstruction(MI, CurrentTop);
1362 TopRPTracker.setPos(MI);
1365 if (ShouldTrackPressure) {
1366 // Update top scheduled pressure.
1367 RegisterOperands RegOpers;
1368 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1369 if (ShouldTrackLaneMasks) {
1370 // Adjust liveness and add missing dead+read-undef flags.
1371 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1372 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1374 // Adjust for missing dead-def flags.
1375 RegOpers.detectDeadDefs(*MI, *LIS);
1378 TopRPTracker.advance(RegOpers);
1379 assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1381 dbgs() << "Top Pressure:\n";
1382 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1385 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1388 assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1389 MachineBasicBlock::iterator priorII =
1390 priorNonDebug(CurrentBottom, CurrentTop);
1391 if (&*priorII == MI)
1392 CurrentBottom = priorII;
1394 if (&*CurrentTop == MI) {
1395 CurrentTop = nextIfDebug(++CurrentTop, priorII);
1396 TopRPTracker.setPos(CurrentTop);
1398 moveInstruction(MI, CurrentBottom);
1401 if (ShouldTrackPressure) {
1402 RegisterOperands RegOpers;
1403 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1404 if (ShouldTrackLaneMasks) {
1405 // Adjust liveness and add missing dead+read-undef flags.
1406 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1407 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1409 // Adjust for missing dead-def flags.
1410 RegOpers.detectDeadDefs(*MI, *LIS);
1413 BotRPTracker.recedeSkipDebugValues();
1414 SmallVector<RegisterMaskPair, 8> LiveUses;
1415 BotRPTracker.recede(RegOpers, &LiveUses);
1416 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1418 dbgs() << "Bottom Pressure:\n";
1419 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1422 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1423 updatePressureDiffs(LiveUses);
1428 //===----------------------------------------------------------------------===//
1429 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1430 //===----------------------------------------------------------------------===//
1434 /// \brief Post-process the DAG to create cluster edges between neighboring
1435 /// loads or between neighboring stores.
1436 class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1442 MemOpInfo(SUnit *su, unsigned reg, int64_t ofs)
1443 : SU(su), BaseReg(reg), Offset(ofs) {}
1445 bool operator<(const MemOpInfo&RHS) const {
1446 return std::tie(BaseReg, Offset, SU->NodeNum) <
1447 std::tie(RHS.BaseReg, RHS.Offset, RHS.SU->NodeNum);
1451 const TargetInstrInfo *TII;
1452 const TargetRegisterInfo *TRI;
1456 BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1457 const TargetRegisterInfo *tri, bool IsLoad)
1458 : TII(tii), TRI(tri), IsLoad(IsLoad) {}
1460 void apply(ScheduleDAGInstrs *DAGInstrs) override;
1463 void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG);
1466 class StoreClusterMutation : public BaseMemOpClusterMutation {
1468 StoreClusterMutation(const TargetInstrInfo *tii,
1469 const TargetRegisterInfo *tri)
1470 : BaseMemOpClusterMutation(tii, tri, false) {}
1473 class LoadClusterMutation : public BaseMemOpClusterMutation {
1475 LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1476 : BaseMemOpClusterMutation(tii, tri, true) {}
1479 } // end anonymous namespace
1483 std::unique_ptr<ScheduleDAGMutation>
1484 createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1485 const TargetRegisterInfo *TRI) {
1486 return EnableMemOpCluster ? llvm::make_unique<LoadClusterMutation>(TII, TRI)
1490 std::unique_ptr<ScheduleDAGMutation>
1491 createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1492 const TargetRegisterInfo *TRI) {
1493 return EnableMemOpCluster ? llvm::make_unique<StoreClusterMutation>(TII, TRI)
1497 } // end namespace llvm
1499 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1500 ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
1501 SmallVector<MemOpInfo, 32> MemOpRecords;
1502 for (SUnit *SU : MemOps) {
1505 if (TII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseReg, Offset, TRI))
1506 MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
1508 if (MemOpRecords.size() < 2)
1511 std::sort(MemOpRecords.begin(), MemOpRecords.end());
1512 unsigned ClusterLength = 1;
1513 for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1514 if (MemOpRecords[Idx].BaseReg != MemOpRecords[Idx+1].BaseReg) {
1519 SUnit *SUa = MemOpRecords[Idx].SU;
1520 SUnit *SUb = MemOpRecords[Idx+1].SU;
1521 if (TII->shouldClusterMemOps(*SUa->getInstr(), *SUb->getInstr(),
1523 DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1524 DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
1525 << SUb->NodeNum << ")\n");
1526 // Copy successor edges from SUa to SUb. Interleaving computation
1527 // dependent on SUa can prevent load combining due to register reuse.
1528 // Predecessor edges do not need to be copied from SUb to SUa since nearby
1529 // loads should have effectively the same inputs.
1530 for (const SDep &Succ : SUa->Succs) {
1531 if (Succ.getSUnit() == SUb)
1533 DEBUG(dbgs() << " Copy Succ SU(" << Succ.getSUnit()->NodeNum << ")\n");
1534 DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial));
1542 /// \brief Callback from DAG postProcessing to create cluster edges for loads.
1543 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
1545 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1547 // Map DAG NodeNum to store chain ID.
1548 DenseMap<unsigned, unsigned> StoreChainIDs;
1549 // Map each store chain to a set of dependent MemOps.
1550 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1551 for (SUnit &SU : DAG->SUnits) {
1552 if ((IsLoad && !SU.getInstr()->mayLoad()) ||
1553 (!IsLoad && !SU.getInstr()->mayStore()))
1556 unsigned ChainPredID = DAG->SUnits.size();
1557 for (const SDep &Pred : SU.Preds) {
1558 if (Pred.isCtrl()) {
1559 ChainPredID = Pred.getSUnit()->NodeNum;
1563 // Check if this chain-like pred has been seen
1564 // before. ChainPredID==MaxNodeID at the top of the schedule.
1565 unsigned NumChains = StoreChainDependents.size();
1566 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1567 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1569 StoreChainDependents.resize(NumChains + 1);
1570 StoreChainDependents[Result.first->second].push_back(&SU);
1573 // Iterate over the store chains.
1574 for (auto &SCD : StoreChainDependents)
1575 clusterNeighboringMemOps(SCD, DAG);
1578 //===----------------------------------------------------------------------===//
1579 // CopyConstrain - DAG post-processing to encourage copy elimination.
1580 //===----------------------------------------------------------------------===//
1584 /// \brief Post-process the DAG to create weak edges from all uses of a copy to
1585 /// the one use that defines the copy's source vreg, most likely an induction
1586 /// variable increment.
1587 class CopyConstrain : public ScheduleDAGMutation {
1589 SlotIndex RegionBeginIdx;
1590 // RegionEndIdx is the slot index of the last non-debug instruction in the
1591 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1592 SlotIndex RegionEndIdx;
1595 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1597 void apply(ScheduleDAGInstrs *DAGInstrs) override;
1600 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1603 } // end anonymous namespace
1607 std::unique_ptr<ScheduleDAGMutation>
1608 createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
1609 const TargetRegisterInfo *TRI) {
1610 return llvm::make_unique<CopyConstrain>(TII, TRI);
1613 } // end namespace llvm
1615 /// constrainLocalCopy handles two possibilities:
1620 /// I3: dst = src (copy)
1621 /// (create pred->succ edges I0->I1, I2->I1)
1624 /// I0: dst = src (copy)
1628 /// (create pred->succ edges I1->I2, I3->I2)
1630 /// Although the MachineScheduler is currently constrained to single blocks,
1631 /// this algorithm should handle extended blocks. An EBB is a set of
1632 /// contiguously numbered blocks such that the previous block in the EBB is
1633 /// always the single predecessor.
1634 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1635 LiveIntervals *LIS = DAG->getLIS();
1636 MachineInstr *Copy = CopySU->getInstr();
1638 // Check for pure vreg copies.
1639 const MachineOperand &SrcOp = Copy->getOperand(1);
1640 unsigned SrcReg = SrcOp.getReg();
1641 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
1644 const MachineOperand &DstOp = Copy->getOperand(0);
1645 unsigned DstReg = DstOp.getReg();
1646 if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
1649 // Check if either the dest or source is local. If it's live across a back
1650 // edge, it's not local. Note that if both vregs are live across the back
1651 // edge, we cannot successfully contrain the copy without cyclic scheduling.
1652 // If both the copy's source and dest are local live intervals, then we
1653 // should treat the dest as the global for the purpose of adding
1654 // constraints. This adds edges from source's other uses to the copy.
1655 unsigned LocalReg = SrcReg;
1656 unsigned GlobalReg = DstReg;
1657 LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1658 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1661 LocalLI = &LIS->getInterval(LocalReg);
1662 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1665 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1667 // Find the global segment after the start of the local LI.
1668 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1669 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1670 // local live range. We could create edges from other global uses to the local
1671 // start, but the coalescer should have already eliminated these cases, so
1672 // don't bother dealing with it.
1673 if (GlobalSegment == GlobalLI->end())
1676 // If GlobalSegment is killed at the LocalLI->start, the call to find()
1677 // returned the next global segment. But if GlobalSegment overlaps with
1678 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1679 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1680 if (GlobalSegment->contains(LocalLI->beginIndex()))
1683 if (GlobalSegment == GlobalLI->end())
1686 // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1687 if (GlobalSegment != GlobalLI->begin()) {
1688 // Two address defs have no hole.
1689 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1690 GlobalSegment->start)) {
1693 // If the prior global segment may be defined by the same two-address
1694 // instruction that also defines LocalLI, then can't make a hole here.
1695 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1696 LocalLI->beginIndex())) {
1699 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1700 // it would be a disconnected component in the live range.
1701 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1702 "Disconnected LRG within the scheduling region.");
1704 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1708 SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1712 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1713 // constraining the uses of the last local def to precede GlobalDef.
1714 SmallVector<SUnit*,8> LocalUses;
1715 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1716 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1717 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1718 for (const SDep &Succ : LastLocalSU->Succs) {
1719 if (Succ.getKind() != SDep::Data || Succ.getReg() != LocalReg)
1721 if (Succ.getSUnit() == GlobalSU)
1723 if (!DAG->canAddEdge(GlobalSU, Succ.getSUnit()))
1725 LocalUses.push_back(Succ.getSUnit());
1727 // Open the top of the GlobalLI hole by constraining any earlier global uses
1728 // to precede the start of LocalLI.
1729 SmallVector<SUnit*,8> GlobalUses;
1730 MachineInstr *FirstLocalDef =
1731 LIS->getInstructionFromIndex(LocalLI->beginIndex());
1732 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1733 for (const SDep &Pred : GlobalSU->Preds) {
1734 if (Pred.getKind() != SDep::Anti || Pred.getReg() != GlobalReg)
1736 if (Pred.getSUnit() == FirstLocalSU)
1738 if (!DAG->canAddEdge(FirstLocalSU, Pred.getSUnit()))
1740 GlobalUses.push_back(Pred.getSUnit());
1742 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1743 // Add the weak edges.
1744 for (SmallVectorImpl<SUnit*>::const_iterator
1745 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1746 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU("
1747 << GlobalSU->NodeNum << ")\n");
1748 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1750 for (SmallVectorImpl<SUnit*>::const_iterator
1751 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1752 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU("
1753 << FirstLocalSU->NodeNum << ")\n");
1754 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1758 /// \brief Callback from DAG postProcessing to create weak edges to encourage
1759 /// copy elimination.
1760 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
1761 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1762 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1764 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1765 if (FirstPos == DAG->end())
1767 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
1768 RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1769 *priorNonDebug(DAG->end(), DAG->begin()));
1771 for (SUnit &SU : DAG->SUnits) {
1772 if (!SU.getInstr()->isCopy())
1775 constrainLocalCopy(&SU, static_cast<ScheduleDAGMILive*>(DAG));
1779 //===----------------------------------------------------------------------===//
1780 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1781 // and possibly other custom schedulers.
1782 //===----------------------------------------------------------------------===//
1784 static const unsigned InvalidCycle = ~0U;
1786 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1788 void SchedBoundary::reset() {
1789 // A new HazardRec is created for each DAG and owned by SchedBoundary.
1790 // Destroying and reconstructing it is very expensive though. So keep
1791 // invalid, placeholder HazardRecs.
1792 if (HazardRec && HazardRec->isEnabled()) {
1794 HazardRec = nullptr;
1798 CheckPending = false;
1801 MinReadyCycle = std::numeric_limits<unsigned>::max();
1802 ExpectedLatency = 0;
1803 DependentLatency = 0;
1805 MaxExecutedResCount = 0;
1807 IsResourceLimited = false;
1808 ReservedCycles.clear();
1810 // Track the maximum number of stall cycles that could arise either from the
1811 // latency of a DAG edge or the number of cycles that a processor resource is
1812 // reserved (SchedBoundary::ReservedCycles).
1813 MaxObservedStall = 0;
1815 // Reserve a zero-count for invalid CritResIdx.
1816 ExecutedResCounts.resize(1);
1817 assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1820 void SchedRemainder::
1821 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1823 if (!SchedModel->hasInstrSchedModel())
1825 RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1826 for (SUnit &SU : DAG->SUnits) {
1827 const MCSchedClassDesc *SC = DAG->getSchedClass(&SU);
1828 RemIssueCount += SchedModel->getNumMicroOps(SU.getInstr(), SC)
1829 * SchedModel->getMicroOpFactor();
1830 for (TargetSchedModel::ProcResIter
1831 PI = SchedModel->getWriteProcResBegin(SC),
1832 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1833 unsigned PIdx = PI->ProcResourceIdx;
1834 unsigned Factor = SchedModel->getResourceFactor(PIdx);
1835 RemainingCounts[PIdx] += (Factor * PI->Cycles);
1840 void SchedBoundary::
1841 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1844 SchedModel = smodel;
1846 if (SchedModel->hasInstrSchedModel()) {
1847 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1848 ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1852 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1853 /// these "soft stalls" differently than the hard stall cycles based on CPU
1854 /// resources and computed by checkHazard(). A fully in-order model
1855 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1856 /// available for scheduling until they are ready. However, a weaker in-order
1857 /// model may use this for heuristics. For example, if a processor has in-order
1858 /// behavior when reading certain resources, this may come into play.
1859 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1860 if (!SU->isUnbuffered)
1863 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1864 if (ReadyCycle > CurrCycle)
1865 return ReadyCycle - CurrCycle;
1869 /// Compute the next cycle at which the given processor resource can be
1871 unsigned SchedBoundary::
1872 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1873 unsigned NextUnreserved = ReservedCycles[PIdx];
1874 // If this resource has never been used, always return cycle zero.
1875 if (NextUnreserved == InvalidCycle)
1877 // For bottom-up scheduling add the cycles needed for the current operation.
1879 NextUnreserved += Cycles;
1880 return NextUnreserved;
1883 /// Does this SU have a hazard within the current instruction group.
1885 /// The scheduler supports two modes of hazard recognition. The first is the
1886 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1887 /// supports highly complicated in-order reservation tables
1888 /// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1890 /// The second is a streamlined mechanism that checks for hazards based on
1891 /// simple counters that the scheduler itself maintains. It explicitly checks
1892 /// for instruction dispatch limitations, including the number of micro-ops that
1893 /// can dispatch per cycle.
1895 /// TODO: Also check whether the SU must start a new group.
1896 bool SchedBoundary::checkHazard(SUnit *SU) {
1897 if (HazardRec->isEnabled()
1898 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1902 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1903 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1904 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
1905 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1910 ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) ||
1911 (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) {
1912 DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must "
1913 << (isTop()? "begin" : "end") << " group\n");
1917 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1918 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1919 for (TargetSchedModel::ProcResIter
1920 PI = SchedModel->getWriteProcResBegin(SC),
1921 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1922 unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1923 if (NRCycle > CurrCycle) {
1925 MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
1927 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") "
1928 << SchedModel->getResourceName(PI->ProcResourceIdx)
1929 << "=" << NRCycle << "c\n");
1937 // Find the unscheduled node in ReadySUs with the highest latency.
1938 unsigned SchedBoundary::
1939 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
1940 SUnit *LateSU = nullptr;
1941 unsigned RemLatency = 0;
1942 for (SUnit *SU : ReadySUs) {
1943 unsigned L = getUnscheduledLatency(SU);
1944 if (L > RemLatency) {
1950 DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1951 << LateSU->NodeNum << ") " << RemLatency << "c\n");
1956 // Count resources in this zone and the remaining unscheduled
1957 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1958 // resource index, or zero if the zone is issue limited.
1959 unsigned SchedBoundary::
1960 getOtherResourceCount(unsigned &OtherCritIdx) {
1962 if (!SchedModel->hasInstrSchedModel())
1965 unsigned OtherCritCount = Rem->RemIssueCount
1966 + (RetiredMOps * SchedModel->getMicroOpFactor());
1967 DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: "
1968 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
1969 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1970 PIdx != PEnd; ++PIdx) {
1971 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1972 if (OtherCount > OtherCritCount) {
1973 OtherCritCount = OtherCount;
1974 OtherCritIdx = PIdx;
1978 DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: "
1979 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
1980 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
1982 return OtherCritCount;
1985 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
1986 assert(SU->getInstr() && "Scheduled SUnit must have instr");
1989 // ReadyCycle was been bumped up to the CurrCycle when this node was
1990 // scheduled, but CurrCycle may have been eagerly advanced immediately after
1991 // scheduling, so may now be greater than ReadyCycle.
1992 if (ReadyCycle > CurrCycle)
1993 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
1996 if (ReadyCycle < MinReadyCycle)
1997 MinReadyCycle = ReadyCycle;
1999 // Check for interlocks first. For the purpose of other heuristics, an
2000 // instruction that cannot issue appears as if it's not in the ReadyQueue.
2001 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2002 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) ||
2003 Available.size() >= ReadyListLimit)
2009 /// Move the boundary of scheduled code by one cycle.
2010 void SchedBoundary::bumpCycle(unsigned NextCycle) {
2011 if (SchedModel->getMicroOpBufferSize() == 0) {
2012 assert(MinReadyCycle < std::numeric_limits<unsigned>::max() &&
2013 "MinReadyCycle uninitialized");
2014 if (MinReadyCycle > NextCycle)
2015 NextCycle = MinReadyCycle;
2017 // Update the current micro-ops, which will issue in the next cycle.
2018 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
2019 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
2021 // Decrement DependentLatency based on the next cycle.
2022 if ((NextCycle - CurrCycle) > DependentLatency)
2023 DependentLatency = 0;
2025 DependentLatency -= (NextCycle - CurrCycle);
2027 if (!HazardRec->isEnabled()) {
2028 // Bypass HazardRec virtual calls.
2029 CurrCycle = NextCycle;
2031 // Bypass getHazardType calls in case of long latency.
2032 for (; CurrCycle != NextCycle; ++CurrCycle) {
2034 HazardRec->AdvanceCycle();
2036 HazardRec->RecedeCycle();
2039 CheckPending = true;
2040 unsigned LFactor = SchedModel->getLatencyFactor();
2042 (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2045 DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
2048 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
2049 ExecutedResCounts[PIdx] += Count;
2050 if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2051 MaxExecutedResCount = ExecutedResCounts[PIdx];
2054 /// Add the given processor resource to this scheduled zone.
2056 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2057 /// during which this resource is consumed.
2059 /// \return the next cycle at which the instruction may execute without
2060 /// oversubscribing resources.
2061 unsigned SchedBoundary::
2062 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
2063 unsigned Factor = SchedModel->getResourceFactor(PIdx);
2064 unsigned Count = Factor * Cycles;
2065 DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx)
2066 << " +" << Cycles << "x" << Factor << "u\n");
2068 // Update Executed resources counts.
2069 incExecutedResources(PIdx, Count);
2070 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2071 Rem->RemainingCounts[PIdx] -= Count;
2073 // Check if this resource exceeds the current critical resource. If so, it
2074 // becomes the critical resource.
2075 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2076 ZoneCritResIdx = PIdx;
2077 DEBUG(dbgs() << " *** Critical resource "
2078 << SchedModel->getResourceName(PIdx) << ": "
2079 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
2081 // For reserved resources, record the highest cycle using the resource.
2082 unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
2083 if (NextAvailable > CurrCycle) {
2084 DEBUG(dbgs() << " Resource conflict: "
2085 << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
2086 << NextAvailable << "\n");
2088 return NextAvailable;
2091 /// Move the boundary of scheduled code by one SUnit.
2092 void SchedBoundary::bumpNode(SUnit *SU) {
2093 // Update the reservation table.
2094 if (HazardRec->isEnabled()) {
2095 if (!isTop() && SU->isCall) {
2096 // Calls are scheduled with their preceding instructions. For bottom-up
2097 // scheduling, clear the pipeline state before emitting.
2100 HazardRec->EmitInstruction(SU);
2102 // checkHazard should prevent scheduling multiple instructions per cycle that
2103 // exceed the issue width.
2104 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2105 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2107 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2108 "Cannot schedule this instruction's MicroOps in the current cycle.");
2110 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2111 DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
2113 unsigned NextCycle = CurrCycle;
2114 switch (SchedModel->getMicroOpBufferSize()) {
2116 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2119 if (ReadyCycle > NextCycle) {
2120 NextCycle = ReadyCycle;
2121 DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n");
2125 // We don't currently model the OOO reorder buffer, so consider all
2126 // scheduled MOps to be "retired". We do loosely model in-order resource
2127 // latency. If this instruction uses an in-order resource, account for any
2128 // likely stall cycles.
2129 if (SU->isUnbuffered && ReadyCycle > NextCycle)
2130 NextCycle = ReadyCycle;
2133 RetiredMOps += IncMOps;
2135 // Update resource counts and critical resource.
2136 if (SchedModel->hasInstrSchedModel()) {
2137 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2138 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2139 Rem->RemIssueCount -= DecRemIssue;
2140 if (ZoneCritResIdx) {
2141 // Scale scheduled micro-ops for comparing with the critical resource.
2142 unsigned ScaledMOps =
2143 RetiredMOps * SchedModel->getMicroOpFactor();
2145 // If scaled micro-ops are now more than the previous critical resource by
2146 // a full cycle, then micro-ops issue becomes critical.
2147 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2148 >= (int)SchedModel->getLatencyFactor()) {
2150 DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
2151 << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
2154 for (TargetSchedModel::ProcResIter
2155 PI = SchedModel->getWriteProcResBegin(SC),
2156 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2158 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
2159 if (RCycle > NextCycle)
2162 if (SU->hasReservedResource) {
2163 // For reserved resources, record the highest cycle using the resource.
2164 // For top-down scheduling, this is the cycle in which we schedule this
2165 // instruction plus the number of cycles the operations reserves the
2166 // resource. For bottom-up is it simply the instruction's cycle.
2167 for (TargetSchedModel::ProcResIter
2168 PI = SchedModel->getWriteProcResBegin(SC),
2169 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2170 unsigned PIdx = PI->ProcResourceIdx;
2171 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2173 ReservedCycles[PIdx] =
2174 std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2177 ReservedCycles[PIdx] = NextCycle;
2182 // Update ExpectedLatency and DependentLatency.
2183 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2184 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2185 if (SU->getDepth() > TopLatency) {
2186 TopLatency = SU->getDepth();
2187 DEBUG(dbgs() << " " << Available.getName()
2188 << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2190 if (SU->getHeight() > BotLatency) {
2191 BotLatency = SU->getHeight();
2192 DEBUG(dbgs() << " " << Available.getName()
2193 << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2195 // If we stall for any reason, bump the cycle.
2196 if (NextCycle > CurrCycle) {
2197 bumpCycle(NextCycle);
2199 // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2200 // resource limited. If a stall occurred, bumpCycle does this.
2201 unsigned LFactor = SchedModel->getLatencyFactor();
2203 (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2206 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2207 // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2208 // one cycle. Since we commonly reach the max MOps here, opportunistically
2209 // bump the cycle to avoid uselessly checking everything in the readyQ.
2210 CurrMOps += IncMOps;
2212 // Bump the cycle count for issue group constraints.
2213 // This must be done after NextCycle has been adjust for all other stalls.
2214 // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set
2216 if ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) ||
2217 (!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) {
2218 DEBUG(dbgs() << " Bump cycle to "
2219 << (isTop() ? "end" : "begin") << " group\n");
2220 bumpCycle(++NextCycle);
2223 while (CurrMOps >= SchedModel->getIssueWidth()) {
2224 DEBUG(dbgs() << " *** Max MOps " << CurrMOps
2225 << " at cycle " << CurrCycle << '\n');
2226 bumpCycle(++NextCycle);
2228 DEBUG(dumpScheduledState());
2231 /// Release pending ready nodes in to the available queue. This makes them
2232 /// visible to heuristics.
2233 void SchedBoundary::releasePending() {
2234 // If the available queue is empty, it is safe to reset MinReadyCycle.
2235 if (Available.empty())
2236 MinReadyCycle = std::numeric_limits<unsigned>::max();
2238 // Check to see if any of the pending instructions are ready to issue. If
2239 // so, add them to the available queue.
2240 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2241 for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2242 SUnit *SU = *(Pending.begin()+i);
2243 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2245 if (ReadyCycle < MinReadyCycle)
2246 MinReadyCycle = ReadyCycle;
2248 if (!IsBuffered && ReadyCycle > CurrCycle)
2251 if (checkHazard(SU))
2254 if (Available.size() >= ReadyListLimit)
2258 Pending.remove(Pending.begin()+i);
2261 CheckPending = false;
2264 /// Remove SU from the ready set for this boundary.
2265 void SchedBoundary::removeReady(SUnit *SU) {
2266 if (Available.isInQueue(SU))
2267 Available.remove(Available.find(SU));
2269 assert(Pending.isInQueue(SU) && "bad ready count");
2270 Pending.remove(Pending.find(SU));
2274 /// If this queue only has one ready candidate, return it. As a side effect,
2275 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2276 /// one node is ready. If multiple instructions are ready, return NULL.
2277 SUnit *SchedBoundary::pickOnlyChoice() {
2282 // Defer any ready instrs that now have a hazard.
2283 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2284 if (checkHazard(*I)) {
2286 I = Available.remove(I);
2292 for (unsigned i = 0; Available.empty(); ++i) {
2293 // FIXME: Re-enable assert once PR20057 is resolved.
2294 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2295 // "permanent hazard");
2297 bumpCycle(CurrCycle + 1);
2301 DEBUG(Pending.dump());
2302 DEBUG(Available.dump());
2304 if (Available.size() == 1)
2305 return *Available.begin();
2309 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2310 // This is useful information to dump after bumpNode.
2311 // Note that the Queue contents are more useful before pickNodeFromQueue.
2312 LLVM_DUMP_METHOD void SchedBoundary::dumpScheduledState() const {
2315 if (ZoneCritResIdx) {
2316 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2317 ResCount = getResourceCount(ZoneCritResIdx);
2319 ResFactor = SchedModel->getMicroOpFactor();
2320 ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
2322 unsigned LFactor = SchedModel->getLatencyFactor();
2323 dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2324 << " Retired: " << RetiredMOps;
2325 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c";
2326 dbgs() << "\n Critical: " << ResCount / LFactor << "c, "
2327 << ResCount / ResFactor << " "
2328 << SchedModel->getResourceName(ZoneCritResIdx)
2329 << "\n ExpectedLatency: " << ExpectedLatency << "c\n"
2330 << (IsResourceLimited ? " - Resource" : " - Latency")
2335 //===----------------------------------------------------------------------===//
2336 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2337 //===----------------------------------------------------------------------===//
2339 void GenericSchedulerBase::SchedCandidate::
2340 initResourceDelta(const ScheduleDAGMI *DAG,
2341 const TargetSchedModel *SchedModel) {
2342 if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2345 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2346 for (TargetSchedModel::ProcResIter
2347 PI = SchedModel->getWriteProcResBegin(SC),
2348 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2349 if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2350 ResDelta.CritResources += PI->Cycles;
2351 if (PI->ProcResourceIdx == Policy.DemandResIdx)
2352 ResDelta.DemandedResources += PI->Cycles;
2356 /// Set the CandPolicy given a scheduling zone given the current resources and
2357 /// latencies inside and outside the zone.
2358 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
2359 SchedBoundary &CurrZone,
2360 SchedBoundary *OtherZone) {
2361 // Apply preemptive heuristics based on the total latency and resources
2362 // inside and outside this zone. Potential stalls should be considered before
2363 // following this policy.
2365 // Compute remaining latency. We need this both to determine whether the
2366 // overall schedule has become latency-limited and whether the instructions
2367 // outside this zone are resource or latency limited.
2369 // The "dependent" latency is updated incrementally during scheduling as the
2370 // max height/depth of scheduled nodes minus the cycles since it was
2372 // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2374 // The "independent" latency is the max ready queue depth:
2375 // ILat = max N.depth for N in Available|Pending
2377 // RemainingLatency is the greater of independent and dependent latency.
2378 unsigned RemLatency = CurrZone.getDependentLatency();
2379 RemLatency = std::max(RemLatency,
2380 CurrZone.findMaxLatency(CurrZone.Available.elements()));
2381 RemLatency = std::max(RemLatency,
2382 CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2384 // Compute the critical resource outside the zone.
2385 unsigned OtherCritIdx = 0;
2386 unsigned OtherCount =
2387 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2389 bool OtherResLimited = false;
2390 if (SchedModel->hasInstrSchedModel()) {
2391 unsigned LFactor = SchedModel->getLatencyFactor();
2392 OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2394 // Schedule aggressively for latency in PostRA mode. We don't check for
2395 // acyclic latency during PostRA, and highly out-of-order processors will
2396 // skip PostRA scheduling.
2397 if (!OtherResLimited) {
2398 if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2399 Policy.ReduceLatency |= true;
2400 DEBUG(dbgs() << " " << CurrZone.Available.getName()
2401 << " RemainingLatency " << RemLatency << " + "
2402 << CurrZone.getCurrCycle() << "c > CritPath "
2403 << Rem.CriticalPath << "\n");
2406 // If the same resource is limiting inside and outside the zone, do nothing.
2407 if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2411 if (CurrZone.isResourceLimited()) {
2412 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
2413 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2416 if (OtherResLimited)
2417 dbgs() << " RemainingLimit: "
2418 << SchedModel->getResourceName(OtherCritIdx) << "\n";
2419 if (!CurrZone.isResourceLimited() && !OtherResLimited)
2420 dbgs() << " Latency limited both directions.\n");
2422 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2423 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2425 if (OtherResLimited)
2426 Policy.DemandResIdx = OtherCritIdx;
2430 const char *GenericSchedulerBase::getReasonStr(
2431 GenericSchedulerBase::CandReason Reason) {
2433 case NoCand: return "NOCAND ";
2434 case Only1: return "ONLY1 ";
2435 case PhysRegCopy: return "PREG-COPY ";
2436 case RegExcess: return "REG-EXCESS";
2437 case RegCritical: return "REG-CRIT ";
2438 case Stall: return "STALL ";
2439 case Cluster: return "CLUSTER ";
2440 case Weak: return "WEAK ";
2441 case RegMax: return "REG-MAX ";
2442 case ResourceReduce: return "RES-REDUCE";
2443 case ResourceDemand: return "RES-DEMAND";
2444 case TopDepthReduce: return "TOP-DEPTH ";
2445 case TopPathReduce: return "TOP-PATH ";
2446 case BotHeightReduce:return "BOT-HEIGHT";
2447 case BotPathReduce: return "BOT-PATH ";
2448 case NextDefUse: return "DEF-USE ";
2449 case NodeOrder: return "ORDER ";
2451 llvm_unreachable("Unknown reason!");
2454 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2456 unsigned ResIdx = 0;
2457 unsigned Latency = 0;
2458 switch (Cand.Reason) {
2462 P = Cand.RPDelta.Excess;
2465 P = Cand.RPDelta.CriticalMax;
2468 P = Cand.RPDelta.CurrentMax;
2470 case ResourceReduce:
2471 ResIdx = Cand.Policy.ReduceResIdx;
2473 case ResourceDemand:
2474 ResIdx = Cand.Policy.DemandResIdx;
2476 case TopDepthReduce:
2477 Latency = Cand.SU->getDepth();
2480 Latency = Cand.SU->getHeight();
2482 case BotHeightReduce:
2483 Latency = Cand.SU->getHeight();
2486 Latency = Cand.SU->getDepth();
2489 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2491 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2492 << ":" << P.getUnitInc() << " ";
2496 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2500 dbgs() << " " << Latency << " cycles ";
2507 /// Return true if this heuristic determines order.
2508 static bool tryLess(int TryVal, int CandVal,
2509 GenericSchedulerBase::SchedCandidate &TryCand,
2510 GenericSchedulerBase::SchedCandidate &Cand,
2511 GenericSchedulerBase::CandReason Reason) {
2512 if (TryVal < CandVal) {
2513 TryCand.Reason = Reason;
2516 if (TryVal > CandVal) {
2517 if (Cand.Reason > Reason)
2518 Cand.Reason = Reason;
2524 static bool tryGreater(int TryVal, int CandVal,
2525 GenericSchedulerBase::SchedCandidate &TryCand,
2526 GenericSchedulerBase::SchedCandidate &Cand,
2527 GenericSchedulerBase::CandReason Reason) {
2528 if (TryVal > CandVal) {
2529 TryCand.Reason = Reason;
2532 if (TryVal < CandVal) {
2533 if (Cand.Reason > Reason)
2534 Cand.Reason = Reason;
2540 static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2541 GenericSchedulerBase::SchedCandidate &Cand,
2542 SchedBoundary &Zone) {
2544 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2545 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2546 TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2549 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2550 TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2553 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2554 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2555 TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2558 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2559 TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2565 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
2566 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2567 << GenericSchedulerBase::getReasonStr(Reason) << '\n');
2570 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) {
2571 tracePick(Cand.Reason, Cand.AtTop);
2574 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2575 assert(dag->hasVRegLiveness() &&
2576 "(PreRA)GenericScheduler needs vreg liveness");
2577 DAG = static_cast<ScheduleDAGMILive*>(dag);
2578 SchedModel = DAG->getSchedModel();
2581 Rem.init(DAG, SchedModel);
2582 Top.init(DAG, SchedModel, &Rem);
2583 Bot.init(DAG, SchedModel, &Rem);
2585 // Initialize resource counts.
2587 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2588 // are disabled, then these HazardRecs will be disabled.
2589 const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2590 if (!Top.HazardRec) {
2592 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2595 if (!Bot.HazardRec) {
2597 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2600 TopCand.SU = nullptr;
2601 BotCand.SU = nullptr;
2604 /// Initialize the per-region scheduling policy.
2605 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2606 MachineBasicBlock::iterator End,
2607 unsigned NumRegionInstrs) {
2608 const MachineFunction &MF = *Begin->getParent()->getParent();
2609 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2611 // Avoid setting up the register pressure tracker for small regions to save
2612 // compile time. As a rough heuristic, only track pressure when the number of
2613 // schedulable instructions exceeds half the integer register file.
2614 RegionPolicy.ShouldTrackPressure = true;
2615 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2616 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2617 if (TLI->isTypeLegal(LegalIntVT)) {
2618 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2619 TLI->getRegClassFor(LegalIntVT));
2620 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2624 // For generic targets, we default to bottom-up, because it's simpler and more
2625 // compile-time optimizations have been implemented in that direction.
2626 RegionPolicy.OnlyBottomUp = true;
2628 // Allow the subtarget to override default policy.
2629 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs);
2631 // After subtarget overrides, apply command line options.
2632 if (!EnableRegPressure)
2633 RegionPolicy.ShouldTrackPressure = false;
2635 // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2636 // e.g. -misched-bottomup=false allows scheduling in both directions.
2637 assert((!ForceTopDown || !ForceBottomUp) &&
2638 "-misched-topdown incompatible with -misched-bottomup");
2639 if (ForceBottomUp.getNumOccurrences() > 0) {
2640 RegionPolicy.OnlyBottomUp = ForceBottomUp;
2641 if (RegionPolicy.OnlyBottomUp)
2642 RegionPolicy.OnlyTopDown = false;
2644 if (ForceTopDown.getNumOccurrences() > 0) {
2645 RegionPolicy.OnlyTopDown = ForceTopDown;
2646 if (RegionPolicy.OnlyTopDown)
2647 RegionPolicy.OnlyBottomUp = false;
2651 void GenericScheduler::dumpPolicy() const {
2652 // Cannot completely remove virtual function even in release mode.
2653 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2654 dbgs() << "GenericScheduler RegionPolicy: "
2655 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2656 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2657 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2662 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2663 /// critical path by more cycles than it takes to drain the instruction buffer.
2664 /// We estimate an upper bounds on in-flight instructions as:
2666 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2667 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2668 /// InFlightResources = InFlightIterations * LoopResources
2670 /// TODO: Check execution resources in addition to IssueCount.
2671 void GenericScheduler::checkAcyclicLatency() {
2672 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2675 // Scaled number of cycles per loop iteration.
2676 unsigned IterCount =
2677 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2679 // Scaled acyclic critical path.
2680 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2681 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2682 unsigned InFlightCount =
2683 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2684 unsigned BufferLimit =
2685 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2687 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2689 DEBUG(dbgs() << "IssueCycles="
2690 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2691 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2692 << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2693 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2694 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2695 if (Rem.IsAcyclicLatencyLimited)
2696 dbgs() << " ACYCLIC LATENCY LIMIT\n");
2699 void GenericScheduler::registerRoots() {
2700 Rem.CriticalPath = DAG->ExitSU.getDepth();
2702 // Some roots may not feed into ExitSU. Check all of them in case.
2703 for (const SUnit *SU : Bot.Available) {
2704 if (SU->getDepth() > Rem.CriticalPath)
2705 Rem.CriticalPath = SU->getDepth();
2707 DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2708 if (DumpCriticalPathLength) {
2709 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2712 if (EnableCyclicPath && SchedModel->getMicroOpBufferSize() > 0) {
2713 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2714 checkAcyclicLatency();
2718 static bool tryPressure(const PressureChange &TryP,
2719 const PressureChange &CandP,
2720 GenericSchedulerBase::SchedCandidate &TryCand,
2721 GenericSchedulerBase::SchedCandidate &Cand,
2722 GenericSchedulerBase::CandReason Reason,
2723 const TargetRegisterInfo *TRI,
2724 const MachineFunction &MF) {
2725 // If one candidate decreases and the other increases, go with it.
2726 // Invalid candidates have UnitInc==0.
2727 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2731 // Do not compare the magnitude of pressure changes between top and bottom
2733 if (Cand.AtTop != TryCand.AtTop)
2736 // If both candidates affect the same set in the same boundary, go with the
2737 // smallest increase.
2738 unsigned TryPSet = TryP.getPSetOrMax();
2739 unsigned CandPSet = CandP.getPSetOrMax();
2740 if (TryPSet == CandPSet) {
2741 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2745 int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2746 std::numeric_limits<int>::max();
2748 int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2749 std::numeric_limits<int>::max();
2751 // If the candidates are decreasing pressure, reverse priority.
2752 if (TryP.getUnitInc() < 0)
2753 std::swap(TryRank, CandRank);
2754 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2757 static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2758 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2761 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2762 /// their physreg def/use.
2764 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2765 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2766 /// with the operation that produces or consumes the physreg. We'll do this when
2767 /// regalloc has support for parallel copies.
2768 static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2769 const MachineInstr *MI = SU->getInstr();
2773 unsigned ScheduledOper = isTop ? 1 : 0;
2774 unsigned UnscheduledOper = isTop ? 0 : 1;
2775 // If we have already scheduled the physreg produce/consumer, immediately
2776 // schedule the copy.
2777 if (TargetRegisterInfo::isPhysicalRegister(
2778 MI->getOperand(ScheduledOper).getReg()))
2780 // If the physreg is at the boundary, defer it. Otherwise schedule it
2781 // immediately to free the dependent. We can hoist the copy later.
2782 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2783 if (TargetRegisterInfo::isPhysicalRegister(
2784 MI->getOperand(UnscheduledOper).getReg()))
2785 return AtBoundary ? -1 : 1;
2789 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
2791 const RegPressureTracker &RPTracker,
2792 RegPressureTracker &TempTracker) {
2795 if (DAG->isTrackingPressure()) {
2797 TempTracker.getMaxDownwardPressureDelta(
2798 Cand.SU->getInstr(),
2800 DAG->getRegionCriticalPSets(),
2801 DAG->getRegPressure().MaxSetPressure);
2803 if (VerifyScheduling) {
2804 TempTracker.getMaxUpwardPressureDelta(
2805 Cand.SU->getInstr(),
2806 &DAG->getPressureDiff(Cand.SU),
2808 DAG->getRegionCriticalPSets(),
2809 DAG->getRegPressure().MaxSetPressure);
2811 RPTracker.getUpwardPressureDelta(
2812 Cand.SU->getInstr(),
2813 DAG->getPressureDiff(Cand.SU),
2815 DAG->getRegionCriticalPSets(),
2816 DAG->getRegPressure().MaxSetPressure);
2820 DEBUG(if (Cand.RPDelta.Excess.isValid())
2821 dbgs() << " Try SU(" << Cand.SU->NodeNum << ") "
2822 << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet())
2823 << ":" << Cand.RPDelta.Excess.getUnitInc() << "\n");
2826 /// Apply a set of heursitics to a new candidate. Heuristics are currently
2827 /// hierarchical. This may be more efficient than a graduated cost model because
2828 /// we don't need to evaluate all aspects of the model for each node in the
2829 /// queue. But it's really done to make the heuristics easier to debug and
2830 /// statistically analyze.
2832 /// \param Cand provides the policy and current best candidate.
2833 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2834 /// \param Zone describes the scheduled zone that we are extending, or nullptr
2835 // if Cand is from a different zone than TryCand.
2836 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2837 SchedCandidate &TryCand,
2838 SchedBoundary *Zone) {
2839 // Initialize the candidate if needed.
2840 if (!Cand.isValid()) {
2841 TryCand.Reason = NodeOrder;
2845 if (tryGreater(biasPhysRegCopy(TryCand.SU, TryCand.AtTop),
2846 biasPhysRegCopy(Cand.SU, Cand.AtTop),
2847 TryCand, Cand, PhysRegCopy))
2850 // Avoid exceeding the target's limit.
2851 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2852 Cand.RPDelta.Excess,
2853 TryCand, Cand, RegExcess, TRI,
2857 // Avoid increasing the max critical pressure in the scheduled region.
2858 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2859 Cand.RPDelta.CriticalMax,
2860 TryCand, Cand, RegCritical, TRI,
2864 // We only compare a subset of features when comparing nodes between
2865 // Top and Bottom boundary. Some properties are simply incomparable, in many
2866 // other instances we should only override the other boundary if something
2867 // is a clear good pick on one boundary. Skip heuristics that are more
2868 // "tie-breaking" in nature.
2869 bool SameBoundary = Zone != nullptr;
2871 // For loops that are acyclic path limited, aggressively schedule for
2872 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal
2873 // heuristics to take precedence.
2874 if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() &&
2875 tryLatency(TryCand, Cand, *Zone))
2878 // Prioritize instructions that read unbuffered resources by stall cycles.
2879 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
2880 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2884 // Keep clustered nodes together to encourage downstream peephole
2885 // optimizations which may reduce resource requirements.
2887 // This is a best effort to set things up for a post-RA pass. Optimizations
2888 // like generating loads of multiple registers should ideally be done within
2889 // the scheduler pass by combining the loads during DAG postprocessing.
2890 const SUnit *CandNextClusterSU =
2891 Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2892 const SUnit *TryCandNextClusterSU =
2893 TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2894 if (tryGreater(TryCand.SU == TryCandNextClusterSU,
2895 Cand.SU == CandNextClusterSU,
2896 TryCand, Cand, Cluster))
2900 // Weak edges are for clustering and other constraints.
2901 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
2902 getWeakLeft(Cand.SU, Cand.AtTop),
2903 TryCand, Cand, Weak))
2907 // Avoid increasing the max pressure of the entire region.
2908 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2909 Cand.RPDelta.CurrentMax,
2910 TryCand, Cand, RegMax, TRI,
2915 // Avoid critical resource consumption and balance the schedule.
2916 TryCand.initResourceDelta(DAG, SchedModel);
2917 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2918 TryCand, Cand, ResourceReduce))
2920 if (tryGreater(TryCand.ResDelta.DemandedResources,
2921 Cand.ResDelta.DemandedResources,
2922 TryCand, Cand, ResourceDemand))
2925 // Avoid serializing long latency dependence chains.
2926 // For acyclic path limited loops, latency was already checked above.
2927 if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency &&
2928 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone))
2931 // Fall through to original instruction order.
2932 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2933 || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2934 TryCand.Reason = NodeOrder;
2939 /// Pick the best candidate from the queue.
2941 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2942 /// DAG building. To adjust for the current scheduling location we need to
2943 /// maintain the number of vreg uses remaining to be top-scheduled.
2944 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
2945 const CandPolicy &ZonePolicy,
2946 const RegPressureTracker &RPTracker,
2947 SchedCandidate &Cand) {
2948 // getMaxPressureDelta temporarily modifies the tracker.
2949 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2951 ReadyQueue &Q = Zone.Available;
2952 for (SUnit *SU : Q) {
2954 SchedCandidate TryCand(ZonePolicy);
2955 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, TempTracker);
2956 // Pass SchedBoundary only when comparing nodes from the same boundary.
2957 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
2958 tryCandidate(Cand, TryCand, ZoneArg);
2959 if (TryCand.Reason != NoCand) {
2960 // Initialize resource delta if needed in case future heuristics query it.
2961 if (TryCand.ResDelta == SchedResourceDelta())
2962 TryCand.initResourceDelta(DAG, SchedModel);
2963 Cand.setBest(TryCand);
2964 DEBUG(traceCandidate(Cand));
2969 /// Pick the best candidate node from either the top or bottom queue.
2970 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
2971 // Schedule as far as possible in the direction of no choice. This is most
2972 // efficient, but also provides the best heuristics for CriticalPSets.
2973 if (SUnit *SU = Bot.pickOnlyChoice()) {
2975 tracePick(Only1, false);
2978 if (SUnit *SU = Top.pickOnlyChoice()) {
2980 tracePick(Only1, true);
2983 // Set the bottom-up policy based on the state of the current bottom zone and
2984 // the instructions outside the zone, including the top zone.
2985 CandPolicy BotPolicy;
2986 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
2987 // Set the top-down policy based on the state of the current top zone and
2988 // the instructions outside the zone, including the bottom zone.
2989 CandPolicy TopPolicy;
2990 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
2992 // See if BotCand is still valid (because we previously scheduled from Top).
2993 DEBUG(dbgs() << "Picking from Bot:\n");
2994 if (!BotCand.isValid() || BotCand.SU->isScheduled ||
2995 BotCand.Policy != BotPolicy) {
2996 BotCand.reset(CandPolicy());
2997 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
2998 assert(BotCand.Reason != NoCand && "failed to find the first candidate");
3000 DEBUG(traceCandidate(BotCand));
3002 if (VerifyScheduling) {
3003 SchedCandidate TCand;
3004 TCand.reset(CandPolicy());
3005 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
3006 assert(TCand.SU == BotCand.SU &&
3007 "Last pick result should correspond to re-picking right now");
3012 // Check if the top Q has a better candidate.
3013 DEBUG(dbgs() << "Picking from Top:\n");
3014 if (!TopCand.isValid() || TopCand.SU->isScheduled ||
3015 TopCand.Policy != TopPolicy) {
3016 TopCand.reset(CandPolicy());
3017 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
3018 assert(TopCand.Reason != NoCand && "failed to find the first candidate");
3020 DEBUG(traceCandidate(TopCand));
3022 if (VerifyScheduling) {
3023 SchedCandidate TCand;
3024 TCand.reset(CandPolicy());
3025 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
3026 assert(TCand.SU == TopCand.SU &&
3027 "Last pick result should correspond to re-picking right now");
3032 // Pick best from BotCand and TopCand.
3033 assert(BotCand.isValid());
3034 assert(TopCand.isValid());
3035 SchedCandidate Cand = BotCand;
3036 TopCand.Reason = NoCand;
3037 tryCandidate(Cand, TopCand, nullptr);
3038 if (TopCand.Reason != NoCand) {
3039 Cand.setBest(TopCand);
3040 DEBUG(traceCandidate(Cand));
3043 IsTopNode = Cand.AtTop;
3048 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
3049 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
3050 if (DAG->top() == DAG->bottom()) {
3051 assert(Top.Available.empty() && Top.Pending.empty() &&
3052 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
3057 if (RegionPolicy.OnlyTopDown) {
3058 SU = Top.pickOnlyChoice();
3060 CandPolicy NoPolicy;
3061 TopCand.reset(NoPolicy);
3062 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
3063 assert(TopCand.Reason != NoCand && "failed to find a candidate");
3068 } else if (RegionPolicy.OnlyBottomUp) {
3069 SU = Bot.pickOnlyChoice();
3071 CandPolicy NoPolicy;
3072 BotCand.reset(NoPolicy);
3073 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
3074 assert(BotCand.Reason != NoCand && "failed to find a candidate");
3080 SU = pickNodeBidirectional(IsTopNode);
3082 } while (SU->isScheduled);
3084 if (SU->isTopReady())
3085 Top.removeReady(SU);
3086 if (SU->isBottomReady())
3087 Bot.removeReady(SU);
3089 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3093 void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
3094 MachineBasicBlock::iterator InsertPos = SU->getInstr();
3097 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3099 // Find already scheduled copies with a single physreg dependence and move
3100 // them just above the scheduled instruction.
3101 for (SDep &Dep : Deps) {
3102 if (Dep.getKind() != SDep::Data || !TRI->isPhysicalRegister(Dep.getReg()))
3104 SUnit *DepSU = Dep.getSUnit();
3105 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3107 MachineInstr *Copy = DepSU->getInstr();
3108 if (!Copy->isCopy())
3110 DEBUG(dbgs() << " Rescheduling physreg copy ";
3111 Dep.getSUnit()->dump(DAG));
3112 DAG->moveInstruction(Copy, InsertPos);
3116 /// Update the scheduler's state after scheduling a node. This is the same node
3117 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3118 /// update it's state based on the current cycle before MachineSchedStrategy
3121 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3122 /// them here. See comments in biasPhysRegCopy.
3123 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3125 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3127 if (SU->hasPhysRegUses)
3128 reschedulePhysRegCopies(SU, true);
3130 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3132 if (SU->hasPhysRegDefs)
3133 reschedulePhysRegCopies(SU, false);
3137 /// Create the standard converging machine scheduler. This will be used as the
3138 /// default scheduler if the target does not set a default.
3139 ScheduleDAGMILive *llvm::createGenericSchedLive(MachineSchedContext *C) {
3140 ScheduleDAGMILive *DAG =
3141 new ScheduleDAGMILive(C, llvm::make_unique<GenericScheduler>(C));
3142 // Register DAG post-processors.
3144 // FIXME: extend the mutation API to allow earlier mutations to instantiate
3145 // data and pass it to later mutations. Have a single mutation that gathers
3146 // the interesting nodes in one pass.
3147 DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI));
3151 static ScheduleDAGInstrs *createConveringSched(MachineSchedContext *C) {
3152 return createGenericSchedLive(C);
3155 static MachineSchedRegistry
3156 GenericSchedRegistry("converge", "Standard converging scheduler.",
3157 createConveringSched);
3159 //===----------------------------------------------------------------------===//
3160 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3161 //===----------------------------------------------------------------------===//
3163 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3165 SchedModel = DAG->getSchedModel();
3168 Rem.init(DAG, SchedModel);
3169 Top.init(DAG, SchedModel, &Rem);
3172 // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3173 // or are disabled, then these HazardRecs will be disabled.
3174 const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3175 if (!Top.HazardRec) {
3177 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3182 void PostGenericScheduler::registerRoots() {
3183 Rem.CriticalPath = DAG->ExitSU.getDepth();
3185 // Some roots may not feed into ExitSU. Check all of them in case.
3186 for (const SUnit *SU : BotRoots) {
3187 if (SU->getDepth() > Rem.CriticalPath)
3188 Rem.CriticalPath = SU->getDepth();
3190 DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3191 if (DumpCriticalPathLength) {
3192 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3196 /// Apply a set of heursitics to a new candidate for PostRA scheduling.
3198 /// \param Cand provides the policy and current best candidate.
3199 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3200 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3201 SchedCandidate &TryCand) {
3203 // Initialize the candidate if needed.
3204 if (!Cand.isValid()) {
3205 TryCand.Reason = NodeOrder;
3209 // Prioritize instructions that read unbuffered resources by stall cycles.
3210 if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3211 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3214 // Keep clustered nodes together.
3215 if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(),
3216 Cand.SU == DAG->getNextClusterSucc(),
3217 TryCand, Cand, Cluster))
3220 // Avoid critical resource consumption and balance the schedule.
3221 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3222 TryCand, Cand, ResourceReduce))
3224 if (tryGreater(TryCand.ResDelta.DemandedResources,
3225 Cand.ResDelta.DemandedResources,
3226 TryCand, Cand, ResourceDemand))
3229 // Avoid serializing long latency dependence chains.
3230 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3234 // Fall through to original instruction order.
3235 if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3236 TryCand.Reason = NodeOrder;
3239 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3240 ReadyQueue &Q = Top.Available;
3241 for (SUnit *SU : Q) {
3242 SchedCandidate TryCand(Cand.Policy);
3244 TryCand.AtTop = true;
3245 TryCand.initResourceDelta(DAG, SchedModel);
3246 tryCandidate(Cand, TryCand);
3247 if (TryCand.Reason != NoCand) {
3248 Cand.setBest(TryCand);
3249 DEBUG(traceCandidate(Cand));
3254 /// Pick the next node to schedule.
3255 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3256 if (DAG->top() == DAG->bottom()) {
3257 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3262 SU = Top.pickOnlyChoice();
3264 tracePick(Only1, true);
3266 CandPolicy NoPolicy;
3267 SchedCandidate TopCand(NoPolicy);
3268 // Set the top-down policy based on the state of the current top zone and
3269 // the instructions outside the zone, including the bottom zone.
3270 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3271 pickNodeFromQueue(TopCand);
3272 assert(TopCand.Reason != NoCand && "failed to find a candidate");
3276 } while (SU->isScheduled);
3279 Top.removeReady(SU);
3281 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3285 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3286 /// scheduled/remaining flags in the DAG nodes.
3287 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3288 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3292 ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) {
3293 return new ScheduleDAGMI(C, llvm::make_unique<PostGenericScheduler>(C),
3294 /*RemoveKillFlags=*/true);
3297 //===----------------------------------------------------------------------===//
3298 // ILP Scheduler. Currently for experimental analysis of heuristics.
3299 //===----------------------------------------------------------------------===//
3303 /// \brief Order nodes by the ILP metric.
3305 const SchedDFSResult *DFSResult = nullptr;
3306 const BitVector *ScheduledTrees = nullptr;
3309 ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {}
3311 /// \brief Apply a less-than relation on node priority.
3313 /// (Return true if A comes after B in the Q.)
3314 bool operator()(const SUnit *A, const SUnit *B) const {
3315 unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3316 unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3317 if (SchedTreeA != SchedTreeB) {
3318 // Unscheduled trees have lower priority.
3319 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3320 return ScheduledTrees->test(SchedTreeB);
3322 // Trees with shallower connections have have lower priority.
3323 if (DFSResult->getSubtreeLevel(SchedTreeA)
3324 != DFSResult->getSubtreeLevel(SchedTreeB)) {
3325 return DFSResult->getSubtreeLevel(SchedTreeA)
3326 < DFSResult->getSubtreeLevel(SchedTreeB);
3330 return DFSResult->getILP(A) < DFSResult->getILP(B);
3332 return DFSResult->getILP(A) > DFSResult->getILP(B);
3336 /// \brief Schedule based on the ILP metric.
3337 class ILPScheduler : public MachineSchedStrategy {
3338 ScheduleDAGMILive *DAG = nullptr;
3341 std::vector<SUnit*> ReadyQ;
3344 ILPScheduler(bool MaximizeILP) : Cmp(MaximizeILP) {}
3346 void initialize(ScheduleDAGMI *dag) override {
3347 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3348 DAG = static_cast<ScheduleDAGMILive*>(dag);
3349 DAG->computeDFSResult();
3350 Cmp.DFSResult = DAG->getDFSResult();
3351 Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3355 void registerRoots() override {
3356 // Restore the heap in ReadyQ with the updated DFS results.
3357 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3360 /// Implement MachineSchedStrategy interface.
3361 /// -----------------------------------------
3363 /// Callback to select the highest priority node from the ready Q.
3364 SUnit *pickNode(bool &IsTopNode) override {
3365 if (ReadyQ.empty()) return nullptr;
3366 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3367 SUnit *SU = ReadyQ.back();
3370 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
3371 << " ILP: " << DAG->getDFSResult()->getILP(SU)
3372 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3373 << DAG->getDFSResult()->getSubtreeLevel(
3374 DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3375 << "Scheduling " << *SU->getInstr());
3379 /// \brief Scheduler callback to notify that a new subtree is scheduled.
3380 void scheduleTree(unsigned SubtreeID) override {
3381 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3384 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3385 /// DFSResults, and resort the priority Q.
3386 void schedNode(SUnit *SU, bool IsTopNode) override {
3387 assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3390 void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3392 void releaseBottomNode(SUnit *SU) override {
3393 ReadyQ.push_back(SU);
3394 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3398 } // end anonymous namespace
3400 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3401 return new ScheduleDAGMILive(C, llvm::make_unique<ILPScheduler>(true));
3403 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3404 return new ScheduleDAGMILive(C, llvm::make_unique<ILPScheduler>(false));
3407 static MachineSchedRegistry ILPMaxRegistry(
3408 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3409 static MachineSchedRegistry ILPMinRegistry(
3410 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3412 //===----------------------------------------------------------------------===//
3413 // Machine Instruction Shuffler for Correctness Testing
3414 //===----------------------------------------------------------------------===//
3419 /// Apply a less-than relation on the node order, which corresponds to the
3420 /// instruction order prior to scheduling. IsReverse implements greater-than.
3421 template<bool IsReverse>
3423 bool operator()(SUnit *A, SUnit *B) const {
3425 return A->NodeNum > B->NodeNum;
3427 return A->NodeNum < B->NodeNum;
3431 /// Reorder instructions as much as possible.
3432 class InstructionShuffler : public MachineSchedStrategy {
3436 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3437 // gives nodes with a higher number higher priority causing the latest
3438 // instructions to be scheduled first.
3439 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false>>
3441 // When scheduling bottom-up, use greater-than as the queue priority.
3442 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true>>
3446 InstructionShuffler(bool alternate, bool topdown)
3447 : IsAlternating(alternate), IsTopDown(topdown) {}
3449 void initialize(ScheduleDAGMI*) override {
3454 /// Implement MachineSchedStrategy interface.
3455 /// -----------------------------------------
3457 SUnit *pickNode(bool &IsTopNode) override {
3461 if (TopQ.empty()) return nullptr;
3464 } while (SU->isScheduled);
3468 if (BottomQ.empty()) return nullptr;
3471 } while (SU->isScheduled);
3475 IsTopDown = !IsTopDown;
3479 void schedNode(SUnit *SU, bool IsTopNode) override {}
3481 void releaseTopNode(SUnit *SU) override {
3484 void releaseBottomNode(SUnit *SU) override {
3489 } // end anonymous namespace
3491 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3492 bool Alternate = !ForceTopDown && !ForceBottomUp;
3493 bool TopDown = !ForceBottomUp;
3494 assert((TopDown || !ForceTopDown) &&
3495 "-misched-topdown incompatible with -misched-bottomup");
3496 return new ScheduleDAGMILive(
3497 C, llvm::make_unique<InstructionShuffler>(Alternate, TopDown));
3500 static MachineSchedRegistry ShufflerRegistry(
3501 "shuffle", "Shuffle machine instructions alternating directions",
3502 createInstructionShuffler);
3505 //===----------------------------------------------------------------------===//
3506 // GraphWriter support for ScheduleDAGMILive.
3507 //===----------------------------------------------------------------------===//
3512 template<> struct GraphTraits<
3513 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3516 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3517 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
3519 static std::string getGraphName(const ScheduleDAG *G) {
3520 return G->MF.getName();
3523 static bool renderGraphFromBottomUp() {
3527 static bool isNodeHidden(const SUnit *Node) {
3528 if (ViewMISchedCutoff == 0)
3530 return (Node->Preds.size() > ViewMISchedCutoff
3531 || Node->Succs.size() > ViewMISchedCutoff);
3534 /// If you want to override the dot attributes printed for a particular
3535 /// edge, override this method.
3536 static std::string getEdgeAttributes(const SUnit *Node,
3538 const ScheduleDAG *Graph) {
3539 if (EI.isArtificialDep())
3540 return "color=cyan,style=dashed";
3542 return "color=blue,style=dashed";
3546 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3548 raw_string_ostream SS(Str);
3549 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3550 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3551 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3552 SS << "SU:" << SU->NodeNum;
3554 SS << " I:" << DFS->getNumInstrs(SU);
3557 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3558 return G->getGraphNodeLabel(SU);
3561 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3562 std::string Str("shape=Mrecord");
3563 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3564 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3565 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3567 Str += ",style=filled,fillcolor=\"#";
3568 Str += DOT::getColorString(DFS->getSubtreeID(N));
3575 } // end namespace llvm
3578 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3579 /// rendered using 'dot'.
3581 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3583 ViewGraph(this, Name, false, Title);
3585 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3586 << "systems with Graphviz or gv!\n";
3590 /// Out-of-line implementation with no arguments is handy for gdb.
3591 void ScheduleDAGMI::viewGraph() {
3592 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());