1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/MachineScheduler.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/PriorityQueue.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/CodeGen/LiveInterval.h"
25 #include "llvm/CodeGen/LiveIntervals.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineDominators.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineLoopInfo.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachinePassRegistry.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/Passes.h"
36 #include "llvm/CodeGen/RegisterClassInfo.h"
37 #include "llvm/CodeGen/RegisterPressure.h"
38 #include "llvm/CodeGen/ScheduleDAG.h"
39 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
40 #include "llvm/CodeGen/ScheduleDAGMutation.h"
41 #include "llvm/CodeGen/ScheduleDFS.h"
42 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
43 #include "llvm/CodeGen/SlotIndexes.h"
44 #include "llvm/CodeGen/TargetFrameLowering.h"
45 #include "llvm/CodeGen/TargetInstrInfo.h"
46 #include "llvm/CodeGen/TargetLowering.h"
47 #include "llvm/CodeGen/TargetPassConfig.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/TargetSchedule.h"
50 #include "llvm/CodeGen/TargetSubtargetInfo.h"
51 #include "llvm/Config/llvm-config.h"
52 #include "llvm/MC/LaneBitmask.h"
53 #include "llvm/Pass.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/GraphWriter.h"
59 #include "llvm/Support/MachineValueType.h"
60 #include "llvm/Support/raw_ostream.h"
74 #define DEBUG_TYPE "machine-scheduler"
78 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
79 cl::desc("Force top-down list scheduling"));
80 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
81 cl::desc("Force bottom-up list scheduling"));
83 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
84 cl::desc("Print critical path length to stdout"));
86 } // end namespace llvm
89 static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
90 cl::desc("Pop up a window to show MISched dags after they are processed"));
92 /// In some situations a few uninteresting nodes depend on nearly all other
93 /// nodes in the graph, provide a cutoff to hide them.
94 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
95 cl::desc("Hide nodes with more predecessor/successor than cutoff"));
97 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
98 cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
100 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
101 cl::desc("Only schedule this function"));
102 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
103 cl::desc("Only schedule this MBB#"));
104 static cl::opt<bool> PrintDAGs("misched-print-dags", cl::Hidden,
105 cl::desc("Print schedule DAGs"));
107 static const bool ViewMISchedDAGs = false;
108 static const bool PrintDAGs = false;
111 /// Avoid quadratic complexity in unusually large basic blocks by limiting the
112 /// size of the ready lists.
113 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
114 cl::desc("Limit ready list to N instructions"), cl::init(256));
116 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
117 cl::desc("Enable register pressure scheduling."), cl::init(true));
119 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
120 cl::desc("Enable cyclic critical path analysis."), cl::init(true));
122 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
123 cl::desc("Enable memop clustering."),
126 static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
127 cl::desc("Verify machine instrs before and after machine scheduling"));
129 // DAG subtrees must have at least this many nodes.
130 static const unsigned MinSubtreeSize = 8;
132 // Pin the vtables to this file.
133 void MachineSchedStrategy::anchor() {}
135 void ScheduleDAGMutation::anchor() {}
137 //===----------------------------------------------------------------------===//
138 // Machine Instruction Scheduling Pass and Registry
139 //===----------------------------------------------------------------------===//
141 MachineSchedContext::MachineSchedContext() {
142 RegClassInfo = new RegisterClassInfo();
145 MachineSchedContext::~MachineSchedContext() {
151 /// Base class for a machine scheduler class that can run at any point.
152 class MachineSchedulerBase : public MachineSchedContext,
153 public MachineFunctionPass {
155 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
157 void print(raw_ostream &O, const Module* = nullptr) const override;
160 void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
163 /// MachineScheduler runs after coalescing and before register allocation.
164 class MachineScheduler : public MachineSchedulerBase {
168 void getAnalysisUsage(AnalysisUsage &AU) const override;
170 bool runOnMachineFunction(MachineFunction&) override;
172 static char ID; // Class identification, replacement for typeinfo
175 ScheduleDAGInstrs *createMachineScheduler();
178 /// PostMachineScheduler runs after shortly before code emission.
179 class PostMachineScheduler : public MachineSchedulerBase {
181 PostMachineScheduler();
183 void getAnalysisUsage(AnalysisUsage &AU) const override;
185 bool runOnMachineFunction(MachineFunction&) override;
187 static char ID; // Class identification, replacement for typeinfo
190 ScheduleDAGInstrs *createPostMachineScheduler();
193 } // end anonymous namespace
195 char MachineScheduler::ID = 0;
197 char &llvm::MachineSchedulerID = MachineScheduler::ID;
199 INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE,
200 "Machine Instruction Scheduler", false, false)
201 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
202 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
203 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
204 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
205 INITIALIZE_PASS_END(MachineScheduler, DEBUG_TYPE,
206 "Machine Instruction Scheduler", false, false)
208 MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) {
209 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
212 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
213 AU.setPreservesCFG();
214 AU.addRequiredID(MachineDominatorsID);
215 AU.addRequired<MachineLoopInfo>();
216 AU.addRequired<AAResultsWrapperPass>();
217 AU.addRequired<TargetPassConfig>();
218 AU.addRequired<SlotIndexes>();
219 AU.addPreserved<SlotIndexes>();
220 AU.addRequired<LiveIntervals>();
221 AU.addPreserved<LiveIntervals>();
222 MachineFunctionPass::getAnalysisUsage(AU);
225 char PostMachineScheduler::ID = 0;
227 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
229 INITIALIZE_PASS(PostMachineScheduler, "postmisched",
230 "PostRA Machine Instruction Scheduler", false, false)
232 PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) {
233 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
236 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
237 AU.setPreservesCFG();
238 AU.addRequiredID(MachineDominatorsID);
239 AU.addRequired<MachineLoopInfo>();
240 AU.addRequired<TargetPassConfig>();
241 MachineFunctionPass::getAnalysisUsage(AU);
244 MachinePassRegistry<MachineSchedRegistry::ScheduleDAGCtor>
245 MachineSchedRegistry::Registry;
247 /// A dummy default scheduler factory indicates whether the scheduler
248 /// is overridden on the command line.
249 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
253 /// MachineSchedOpt allows command line selection of the scheduler.
254 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
255 RegisterPassParser<MachineSchedRegistry>>
256 MachineSchedOpt("misched",
257 cl::init(&useDefaultMachineSched), cl::Hidden,
258 cl::desc("Machine instruction scheduler to use"));
260 static MachineSchedRegistry
261 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
262 useDefaultMachineSched);
264 static cl::opt<bool> EnableMachineSched(
266 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
269 static cl::opt<bool> EnablePostRAMachineSched(
270 "enable-post-misched",
271 cl::desc("Enable the post-ra machine instruction scheduling pass."),
272 cl::init(true), cl::Hidden);
274 /// Decrement this iterator until reaching the top or a non-debug instr.
275 static MachineBasicBlock::const_iterator
276 priorNonDebug(MachineBasicBlock::const_iterator I,
277 MachineBasicBlock::const_iterator Beg) {
278 assert(I != Beg && "reached the top of the region, cannot decrement");
280 if (!I->isDebugInstr())
286 /// Non-const version.
287 static MachineBasicBlock::iterator
288 priorNonDebug(MachineBasicBlock::iterator I,
289 MachineBasicBlock::const_iterator Beg) {
290 return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)
291 .getNonConstIterator();
294 /// If this iterator is a debug value, increment until reaching the End or a
295 /// non-debug instruction.
296 static MachineBasicBlock::const_iterator
297 nextIfDebug(MachineBasicBlock::const_iterator I,
298 MachineBasicBlock::const_iterator End) {
299 for(; I != End; ++I) {
300 if (!I->isDebugInstr())
306 /// Non-const version.
307 static MachineBasicBlock::iterator
308 nextIfDebug(MachineBasicBlock::iterator I,
309 MachineBasicBlock::const_iterator End) {
310 return nextIfDebug(MachineBasicBlock::const_iterator(I), End)
311 .getNonConstIterator();
314 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
315 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
316 // Select the scheduler, or set the default.
317 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
318 if (Ctor != useDefaultMachineSched)
321 // Get the default scheduler set by the target for this function.
322 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
326 // Default to GenericScheduler.
327 return createGenericSchedLive(this);
330 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
331 /// the caller. We don't have a command line option to override the postRA
332 /// scheduler. The Target must configure it.
333 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
334 // Get the postRA scheduler set by the target for this function.
335 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
339 // Default to GenericScheduler.
340 return createGenericSchedPostRA(this);
343 /// Top-level MachineScheduler pass driver.
345 /// Visit blocks in function order. Divide each block into scheduling regions
346 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
347 /// consistent with the DAG builder, which traverses the interior of the
348 /// scheduling regions bottom-up.
350 /// This design avoids exposing scheduling boundaries to the DAG builder,
351 /// simplifying the DAG builder's support for "special" target instructions.
352 /// At the same time the design allows target schedulers to operate across
353 /// scheduling boundaries, for example to bundle the boundary instructions
354 /// without reordering them. This creates complexity, because the target
355 /// scheduler must update the RegionBegin and RegionEnd positions cached by
356 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
357 /// design would be to split blocks at scheduling boundaries, but LLVM has a
358 /// general bias against block splitting purely for implementation simplicity.
359 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
360 if (skipFunction(mf.getFunction()))
363 if (EnableMachineSched.getNumOccurrences()) {
364 if (!EnableMachineSched)
366 } else if (!mf.getSubtarget().enableMachineScheduler())
369 LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
371 // Initialize the context of the pass.
373 MLI = &getAnalysis<MachineLoopInfo>();
374 MDT = &getAnalysis<MachineDominatorTree>();
375 PassConfig = &getAnalysis<TargetPassConfig>();
376 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
378 LIS = &getAnalysis<LiveIntervals>();
380 if (VerifyScheduling) {
381 LLVM_DEBUG(LIS->dump());
382 MF->verify(this, "Before machine scheduling.");
384 RegClassInfo->runOnMachineFunction(*MF);
386 // Instantiate the selected scheduler for this target, function, and
387 // optimization level.
388 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
389 scheduleRegions(*Scheduler, false);
391 LLVM_DEBUG(LIS->dump());
392 if (VerifyScheduling)
393 MF->verify(this, "After machine scheduling.");
397 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
398 if (skipFunction(mf.getFunction()))
401 if (EnablePostRAMachineSched.getNumOccurrences()) {
402 if (!EnablePostRAMachineSched)
404 } else if (!mf.getSubtarget().enablePostRAScheduler()) {
405 LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
408 LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
410 // Initialize the context of the pass.
412 MLI = &getAnalysis<MachineLoopInfo>();
413 PassConfig = &getAnalysis<TargetPassConfig>();
415 if (VerifyScheduling)
416 MF->verify(this, "Before post machine scheduling.");
418 // Instantiate the selected scheduler for this target, function, and
419 // optimization level.
420 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
421 scheduleRegions(*Scheduler, true);
423 if (VerifyScheduling)
424 MF->verify(this, "After post machine scheduling.");
428 /// Return true of the given instruction should not be included in a scheduling
431 /// MachineScheduler does not currently support scheduling across calls. To
432 /// handle calls, the DAG builder needs to be modified to create register
433 /// anti/output dependencies on the registers clobbered by the call's regmask
434 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
435 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
436 /// the boundary, but there would be no benefit to postRA scheduling across
437 /// calls this late anyway.
438 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
439 MachineBasicBlock *MBB,
441 const TargetInstrInfo *TII) {
442 return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
445 /// A region of an MBB for scheduling.
448 /// RegionBegin is the first instruction in the scheduling region, and
449 /// RegionEnd is either MBB->end() or the scheduling boundary after the
450 /// last instruction in the scheduling region. These iterators cannot refer
451 /// to instructions outside of the identified scheduling region because
452 /// those may be reordered before scheduling this region.
453 MachineBasicBlock::iterator RegionBegin;
454 MachineBasicBlock::iterator RegionEnd;
455 unsigned NumRegionInstrs;
457 SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E,
459 RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {}
461 } // end anonymous namespace
463 using MBBRegionsVector = SmallVector<SchedRegion, 16>;
466 getSchedRegions(MachineBasicBlock *MBB,
467 MBBRegionsVector &Regions,
468 bool RegionsTopDown) {
469 MachineFunction *MF = MBB->getParent();
470 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
472 MachineBasicBlock::iterator I = nullptr;
473 for(MachineBasicBlock::iterator RegionEnd = MBB->end();
474 RegionEnd != MBB->begin(); RegionEnd = I) {
476 // Avoid decrementing RegionEnd for blocks with no terminator.
477 if (RegionEnd != MBB->end() ||
478 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
482 // The next region starts above the previous region. Look backward in the
483 // instruction stream until we find the nearest boundary.
484 unsigned NumRegionInstrs = 0;
486 for (;I != MBB->begin(); --I) {
487 MachineInstr &MI = *std::prev(I);
488 if (isSchedBoundary(&MI, &*MBB, MF, TII))
490 if (!MI.isDebugInstr())
491 // MBB::size() uses instr_iterator to count. Here we need a bundle to
492 // count as a single instruction.
496 Regions.push_back(SchedRegion(I, RegionEnd, NumRegionInstrs));
500 std::reverse(Regions.begin(), Regions.end());
503 /// Main driver for both MachineScheduler and PostMachineScheduler.
504 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
506 // Visit all machine basic blocks.
508 // TODO: Visit blocks in global postorder or postorder within the bottom-up
509 // loop tree. Then we can optionally compute global RegPressure.
510 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
511 MBB != MBBEnd; ++MBB) {
513 Scheduler.startBlock(&*MBB);
516 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
518 if (SchedOnlyBlock.getNumOccurrences()
519 && (int)SchedOnlyBlock != MBB->getNumber())
523 // Break the block into scheduling regions [I, RegionEnd). RegionEnd
524 // points to the scheduling boundary at the bottom of the region. The DAG
525 // does not include RegionEnd, but the region does (i.e. the next
526 // RegionEnd is above the previous RegionBegin). If the current block has
527 // no terminator then RegionEnd == MBB->end() for the bottom region.
529 // All the regions of MBB are first found and stored in MBBRegions, which
530 // will be processed (MBB) top-down if initialized with true.
532 // The Scheduler may insert instructions during either schedule() or
533 // exitRegion(), even for empty regions. So the local iterators 'I' and
534 // 'RegionEnd' are invalid across these calls. Instructions must not be
535 // added to other regions than the current one without updating MBBRegions.
537 MBBRegionsVector MBBRegions;
538 getSchedRegions(&*MBB, MBBRegions, Scheduler.doMBBSchedRegionsTopDown());
539 for (MBBRegionsVector::iterator R = MBBRegions.begin();
540 R != MBBRegions.end(); ++R) {
541 MachineBasicBlock::iterator I = R->RegionBegin;
542 MachineBasicBlock::iterator RegionEnd = R->RegionEnd;
543 unsigned NumRegionInstrs = R->NumRegionInstrs;
545 // Notify the scheduler of the region, even if we may skip scheduling
546 // it. Perhaps it still needs to be bundled.
547 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
549 // Skip empty scheduling regions (0 or 1 schedulable instructions).
550 if (I == RegionEnd || I == std::prev(RegionEnd)) {
551 // Close the current region. Bundle the terminator if needed.
552 // This invalidates 'RegionEnd' and 'I'.
553 Scheduler.exitRegion();
556 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
557 LLVM_DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB)
558 << " " << MBB->getName() << "\n From: " << *I
560 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
561 else dbgs() << "End";
562 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
563 if (DumpCriticalPathLength) {
564 errs() << MF->getName();
565 errs() << ":%bb. " << MBB->getNumber();
566 errs() << " " << MBB->getName() << " \n";
569 // Schedule a region: possibly reorder instructions.
570 // This invalidates the original region iterators.
571 Scheduler.schedule();
573 // Close the current region.
574 Scheduler.exitRegion();
576 Scheduler.finishBlock();
577 // FIXME: Ideally, no further passes should rely on kill flags. However,
578 // thumb2 size reduction is currently an exception, so the PostMIScheduler
581 Scheduler.fixupKills(*MBB);
583 Scheduler.finalizeSchedule();
586 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
590 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
591 LLVM_DUMP_METHOD void ReadyQueue::dump() const {
592 dbgs() << "Queue " << Name << ": ";
593 for (const SUnit *SU : Queue)
594 dbgs() << SU->NodeNum << " ";
599 //===----------------------------------------------------------------------===//
600 // ScheduleDAGMI - Basic machine instruction scheduling. This is
601 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
602 // virtual registers.
603 // ===----------------------------------------------------------------------===/
605 // Provide a vtable anchor.
606 ScheduleDAGMI::~ScheduleDAGMI() = default;
608 bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
609 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
612 bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
613 if (SuccSU != &ExitSU) {
614 // Do not use WillCreateCycle, it assumes SD scheduling.
615 // If Pred is reachable from Succ, then the edge creates a cycle.
616 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
618 Topo.AddPred(SuccSU, PredDep.getSUnit());
620 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
621 // Return true regardless of whether a new edge needed to be inserted.
625 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
626 /// NumPredsLeft reaches zero, release the successor node.
628 /// FIXME: Adjust SuccSU height based on MinLatency.
629 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
630 SUnit *SuccSU = SuccEdge->getSUnit();
632 if (SuccEdge->isWeak()) {
633 --SuccSU->WeakPredsLeft;
634 if (SuccEdge->isCluster())
635 NextClusterSucc = SuccSU;
639 if (SuccSU->NumPredsLeft == 0) {
640 dbgs() << "*** Scheduling failed! ***\n";
642 dbgs() << " has been released too many times!\n";
643 llvm_unreachable(nullptr);
646 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
647 // CurrCycle may have advanced since then.
648 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
649 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
651 --SuccSU->NumPredsLeft;
652 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
653 SchedImpl->releaseTopNode(SuccSU);
656 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
657 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
658 for (SDep &Succ : SU->Succs)
659 releaseSucc(SU, &Succ);
662 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
663 /// NumSuccsLeft reaches zero, release the predecessor node.
665 /// FIXME: Adjust PredSU height based on MinLatency.
666 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
667 SUnit *PredSU = PredEdge->getSUnit();
669 if (PredEdge->isWeak()) {
670 --PredSU->WeakSuccsLeft;
671 if (PredEdge->isCluster())
672 NextClusterPred = PredSU;
676 if (PredSU->NumSuccsLeft == 0) {
677 dbgs() << "*** Scheduling failed! ***\n";
679 dbgs() << " has been released too many times!\n";
680 llvm_unreachable(nullptr);
683 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
684 // CurrCycle may have advanced since then.
685 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
686 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
688 --PredSU->NumSuccsLeft;
689 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
690 SchedImpl->releaseBottomNode(PredSU);
693 /// releasePredecessors - Call releasePred on each of SU's predecessors.
694 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
695 for (SDep &Pred : SU->Preds)
696 releasePred(SU, &Pred);
699 void ScheduleDAGMI::startBlock(MachineBasicBlock *bb) {
700 ScheduleDAGInstrs::startBlock(bb);
701 SchedImpl->enterMBB(bb);
704 void ScheduleDAGMI::finishBlock() {
705 SchedImpl->leaveMBB();
706 ScheduleDAGInstrs::finishBlock();
709 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
710 /// crossing a scheduling boundary. [begin, end) includes all instructions in
711 /// the region, including the boundary itself and single-instruction regions
712 /// that don't get scheduled.
713 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
714 MachineBasicBlock::iterator begin,
715 MachineBasicBlock::iterator end,
716 unsigned regioninstrs)
718 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
720 SchedImpl->initPolicy(begin, end, regioninstrs);
723 /// This is normally called from the main scheduler loop but may also be invoked
724 /// by the scheduling strategy to perform additional code motion.
725 void ScheduleDAGMI::moveInstruction(
726 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
727 // Advance RegionBegin if the first instruction moves down.
728 if (&*RegionBegin == MI)
731 // Update the instruction stream.
732 BB->splice(InsertPos, BB, MI);
734 // Update LiveIntervals
736 LIS->handleMove(*MI, /*UpdateFlags=*/true);
738 // Recede RegionBegin if an instruction moves above the first.
739 if (RegionBegin == InsertPos)
743 bool ScheduleDAGMI::checkSchedLimit() {
745 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
746 CurrentTop = CurrentBottom;
749 ++NumInstrsScheduled;
754 /// Per-region scheduling driver, called back from
755 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
756 /// does not consider liveness or register pressure. It is useful for PostRA
757 /// scheduling and potentially other custom schedulers.
758 void ScheduleDAGMI::schedule() {
759 LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
760 LLVM_DEBUG(SchedImpl->dumpPolicy());
765 Topo.InitDAGTopologicalSorting();
769 SmallVector<SUnit*, 8> TopRoots, BotRoots;
770 findRootsAndBiasEdges(TopRoots, BotRoots);
773 if (PrintDAGs) dump();
774 if (ViewMISchedDAGs) viewGraph();
776 // Initialize the strategy before modifying the DAG.
777 // This may initialize a DFSResult to be used for queue priority.
778 SchedImpl->initialize(this);
780 // Initialize ready queues now that the DAG and priority data are finalized.
781 initQueues(TopRoots, BotRoots);
783 bool IsTopNode = false;
785 LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
786 SUnit *SU = SchedImpl->pickNode(IsTopNode);
789 assert(!SU->isScheduled && "Node already scheduled");
790 if (!checkSchedLimit())
793 MachineInstr *MI = SU->getInstr();
795 assert(SU->isTopReady() && "node still has unscheduled dependencies");
796 if (&*CurrentTop == MI)
797 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
799 moveInstruction(MI, CurrentTop);
801 assert(SU->isBottomReady() && "node still has unscheduled dependencies");
802 MachineBasicBlock::iterator priorII =
803 priorNonDebug(CurrentBottom, CurrentTop);
805 CurrentBottom = priorII;
807 if (&*CurrentTop == MI)
808 CurrentTop = nextIfDebug(++CurrentTop, priorII);
809 moveInstruction(MI, CurrentBottom);
813 // Notify the scheduling strategy before updating the DAG.
814 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
815 // runs, it can then use the accurate ReadyCycle time to determine whether
816 // newly released nodes can move to the readyQ.
817 SchedImpl->schedNode(SU, IsTopNode);
819 updateQueues(SU, IsTopNode);
821 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
826 dbgs() << "*** Final schedule for "
827 << printMBBReference(*begin()->getParent()) << " ***\n";
833 /// Apply each ScheduleDAGMutation step in order.
834 void ScheduleDAGMI::postprocessDAG() {
835 for (auto &m : Mutations)
840 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
841 SmallVectorImpl<SUnit*> &BotRoots) {
842 for (SUnit &SU : SUnits) {
843 assert(!SU.isBoundaryNode() && "Boundary node should not be in SUnits");
845 // Order predecessors so DFSResult follows the critical path.
846 SU.biasCriticalPath();
848 // A SUnit is ready to top schedule if it has no predecessors.
849 if (!SU.NumPredsLeft)
850 TopRoots.push_back(&SU);
851 // A SUnit is ready to bottom schedule if it has no successors.
852 if (!SU.NumSuccsLeft)
853 BotRoots.push_back(&SU);
855 ExitSU.biasCriticalPath();
858 /// Identify DAG roots and setup scheduler queues.
859 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
860 ArrayRef<SUnit*> BotRoots) {
861 NextClusterSucc = nullptr;
862 NextClusterPred = nullptr;
864 // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
866 // Nodes with unreleased weak edges can still be roots.
867 // Release top roots in forward order.
868 for (SUnit *SU : TopRoots)
869 SchedImpl->releaseTopNode(SU);
871 // Release bottom roots in reverse order so the higher priority nodes appear
872 // first. This is more natural and slightly more efficient.
873 for (SmallVectorImpl<SUnit*>::const_reverse_iterator
874 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
875 SchedImpl->releaseBottomNode(*I);
878 releaseSuccessors(&EntrySU);
879 releasePredecessors(&ExitSU);
881 SchedImpl->registerRoots();
883 // Advance past initial DebugValues.
884 CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
885 CurrentBottom = RegionEnd;
888 /// Update scheduler queues after scheduling an instruction.
889 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
890 // Release dependent instructions for scheduling.
892 releaseSuccessors(SU);
894 releasePredecessors(SU);
896 SU->isScheduled = true;
899 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
900 void ScheduleDAGMI::placeDebugValues() {
901 // If first instruction was a DBG_VALUE then put it back.
903 BB->splice(RegionBegin, BB, FirstDbgValue);
904 RegionBegin = FirstDbgValue;
907 for (std::vector<std::pair<MachineInstr *, MachineInstr *>>::iterator
908 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
909 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
910 MachineInstr *DbgValue = P.first;
911 MachineBasicBlock::iterator OrigPrevMI = P.second;
912 if (&*RegionBegin == DbgValue)
914 BB->splice(++OrigPrevMI, BB, DbgValue);
915 if (OrigPrevMI == std::prev(RegionEnd))
916 RegionEnd = DbgValue;
919 FirstDbgValue = nullptr;
922 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
923 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpSchedule() const {
924 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
925 if (SUnit *SU = getSUnit(&(*MI)))
928 dbgs() << "Missing SUnit\n";
933 //===----------------------------------------------------------------------===//
934 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
936 //===----------------------------------------------------------------------===//
938 ScheduleDAGMILive::~ScheduleDAGMILive() {
942 void ScheduleDAGMILive::collectVRegUses(SUnit &SU) {
943 const MachineInstr &MI = *SU.getInstr();
944 for (const MachineOperand &MO : MI.operands()) {
949 if (TrackLaneMasks && !MO.isUse())
952 unsigned Reg = MO.getReg();
953 if (!TargetRegisterInfo::isVirtualRegister(Reg))
957 if (TrackLaneMasks) {
958 bool FoundDef = false;
959 for (const MachineOperand &MO2 : MI.operands()) {
960 if (MO2.isReg() && MO2.isDef() && MO2.getReg() == Reg && !MO2.isDead()) {
969 // Record this local VReg use.
970 VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg);
971 for (; UI != VRegUses.end(); ++UI) {
975 if (UI == VRegUses.end())
976 VRegUses.insert(VReg2SUnit(Reg, LaneBitmask::getNone(), &SU));
980 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
981 /// crossing a scheduling boundary. [begin, end) includes all instructions in
982 /// the region, including the boundary itself and single-instruction regions
983 /// that don't get scheduled.
984 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
985 MachineBasicBlock::iterator begin,
986 MachineBasicBlock::iterator end,
987 unsigned regioninstrs)
989 // ScheduleDAGMI initializes SchedImpl's per-region policy.
990 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
992 // For convenience remember the end of the liveness region.
993 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
995 SUPressureDiffs.clear();
997 ShouldTrackPressure = SchedImpl->shouldTrackPressure();
998 ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
1000 assert((!ShouldTrackLaneMasks || ShouldTrackPressure) &&
1001 "ShouldTrackLaneMasks requires ShouldTrackPressure");
1004 // Setup the register pressure trackers for the top scheduled top and bottom
1005 // scheduled regions.
1006 void ScheduleDAGMILive::initRegPressure() {
1008 VRegUses.setUniverse(MRI.getNumVirtRegs());
1009 for (SUnit &SU : SUnits)
1010 collectVRegUses(SU);
1012 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
1013 ShouldTrackLaneMasks, false);
1014 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1015 ShouldTrackLaneMasks, false);
1017 // Close the RPTracker to finalize live ins.
1018 RPTracker.closeRegion();
1020 LLVM_DEBUG(RPTracker.dump());
1022 // Initialize the live ins and live outs.
1023 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
1024 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
1026 // Close one end of the tracker so we can call
1027 // getMaxUpward/DownwardPressureDelta before advancing across any
1028 // instructions. This converts currently live regs into live ins/outs.
1029 TopRPTracker.closeTop();
1030 BotRPTracker.closeBottom();
1032 BotRPTracker.initLiveThru(RPTracker);
1033 if (!BotRPTracker.getLiveThru().empty()) {
1034 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
1035 LLVM_DEBUG(dbgs() << "Live Thru: ";
1036 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
1039 // For each live out vreg reduce the pressure change associated with other
1040 // uses of the same vreg below the live-out reaching def.
1041 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
1043 // Account for liveness generated by the region boundary.
1044 if (LiveRegionEnd != RegionEnd) {
1045 SmallVector<RegisterMaskPair, 8> LiveUses;
1046 BotRPTracker.recede(&LiveUses);
1047 updatePressureDiffs(LiveUses);
1050 LLVM_DEBUG(dbgs() << "Top Pressure:\n";
1051 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1052 dbgs() << "Bottom Pressure:\n";
1053 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI););
1055 assert((BotRPTracker.getPos() == RegionEnd ||
1056 (RegionEnd->isDebugInstr() &&
1057 BotRPTracker.getPos() == priorNonDebug(RegionEnd, RegionBegin))) &&
1058 "Can't find the region bottom");
1060 // Cache the list of excess pressure sets in this region. This will also track
1061 // the max pressure in the scheduled code for these sets.
1062 RegionCriticalPSets.clear();
1063 const std::vector<unsigned> &RegionPressure =
1064 RPTracker.getPressure().MaxSetPressure;
1065 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
1066 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
1067 if (RegionPressure[i] > Limit) {
1068 LLVM_DEBUG(dbgs() << TRI->getRegPressureSetName(i) << " Limit " << Limit
1069 << " Actual " << RegionPressure[i] << "\n");
1070 RegionCriticalPSets.push_back(PressureChange(i));
1073 LLVM_DEBUG(dbgs() << "Excess PSets: ";
1074 for (const PressureChange &RCPS
1075 : RegionCriticalPSets) dbgs()
1076 << TRI->getRegPressureSetName(RCPS.getPSet()) << " ";
1080 void ScheduleDAGMILive::
1081 updateScheduledPressure(const SUnit *SU,
1082 const std::vector<unsigned> &NewMaxPressure) {
1083 const PressureDiff &PDiff = getPressureDiff(SU);
1084 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
1085 for (const PressureChange &PC : PDiff) {
1088 unsigned ID = PC.getPSet();
1089 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
1091 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
1092 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
1093 && NewMaxPressure[ID] <= (unsigned)std::numeric_limits<int16_t>::max())
1094 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
1096 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
1097 if (NewMaxPressure[ID] >= Limit - 2) {
1098 LLVM_DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": "
1099 << NewMaxPressure[ID]
1100 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ")
1101 << Limit << "(+ " << BotRPTracker.getLiveThru()[ID]
1107 /// Update the PressureDiff array for liveness after scheduling this
1109 void ScheduleDAGMILive::updatePressureDiffs(
1110 ArrayRef<RegisterMaskPair> LiveUses) {
1111 for (const RegisterMaskPair &P : LiveUses) {
1112 unsigned Reg = P.RegUnit;
1113 /// FIXME: Currently assuming single-use physregs.
1114 if (!TRI->isVirtualRegister(Reg))
1117 if (ShouldTrackLaneMasks) {
1118 // If the register has just become live then other uses won't change
1119 // this fact anymore => decrement pressure.
1120 // If the register has just become dead then other uses make it come
1121 // back to life => increment pressure.
1122 bool Decrement = P.LaneMask.any();
1124 for (const VReg2SUnit &V2SU
1125 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1126 SUnit &SU = *V2SU.SU;
1127 if (SU.isScheduled || &SU == &ExitSU)
1130 PressureDiff &PDiff = getPressureDiff(&SU);
1131 PDiff.addPressureChange(Reg, Decrement, &MRI);
1132 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") "
1133 << printReg(Reg, TRI) << ':'
1134 << PrintLaneMask(P.LaneMask) << ' ' << *SU.getInstr();
1135 dbgs() << " to "; PDiff.dump(*TRI););
1138 assert(P.LaneMask.any());
1139 LLVM_DEBUG(dbgs() << " LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n");
1140 // This may be called before CurrentBottom has been initialized. However,
1141 // BotRPTracker must have a valid position. We want the value live into the
1142 // instruction or live out of the block, so ask for the previous
1143 // instruction's live-out.
1144 const LiveInterval &LI = LIS->getInterval(Reg);
1146 MachineBasicBlock::const_iterator I =
1147 nextIfDebug(BotRPTracker.getPos(), BB->end());
1149 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1151 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
1152 VNI = LRQ.valueIn();
1154 // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1155 assert(VNI && "No live value at use.");
1156 for (const VReg2SUnit &V2SU
1157 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1158 SUnit *SU = V2SU.SU;
1159 // If this use comes before the reaching def, it cannot be a last use,
1160 // so decrease its pressure change.
1161 if (!SU->isScheduled && SU != &ExitSU) {
1162 LiveQueryResult LRQ =
1163 LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1164 if (LRQ.valueIn() == VNI) {
1165 PressureDiff &PDiff = getPressureDiff(SU);
1166 PDiff.addPressureChange(Reg, true, &MRI);
1167 LLVM_DEBUG(dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") "
1169 dbgs() << " to "; PDiff.dump(*TRI););
1177 void ScheduleDAGMILive::dump() const {
1178 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1179 if (EntrySU.getInstr() != nullptr)
1180 dumpNodeAll(EntrySU);
1181 for (const SUnit &SU : SUnits) {
1183 if (ShouldTrackPressure) {
1184 dbgs() << " Pressure Diff : ";
1185 getPressureDiff(&SU).dump(*TRI);
1187 dbgs() << " Single Issue : ";
1188 if (SchedModel.mustBeginGroup(SU.getInstr()) &&
1189 SchedModel.mustEndGroup(SU.getInstr()))
1195 if (ExitSU.getInstr() != nullptr)
1196 dumpNodeAll(ExitSU);
1200 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1201 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1202 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1204 /// This is a skeletal driver, with all the functionality pushed into helpers,
1205 /// so that it can be easily extended by experimental schedulers. Generally,
1206 /// implementing MachineSchedStrategy should be sufficient to implement a new
1207 /// scheduling algorithm. However, if a scheduler further subclasses
1208 /// ScheduleDAGMILive then it will want to override this virtual method in order
1209 /// to update any specialized state.
1210 void ScheduleDAGMILive::schedule() {
1211 LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1212 LLVM_DEBUG(SchedImpl->dumpPolicy());
1213 buildDAGWithRegPressure();
1215 Topo.InitDAGTopologicalSorting();
1219 SmallVector<SUnit*, 8> TopRoots, BotRoots;
1220 findRootsAndBiasEdges(TopRoots, BotRoots);
1222 // Initialize the strategy before modifying the DAG.
1223 // This may initialize a DFSResult to be used for queue priority.
1224 SchedImpl->initialize(this);
1227 if (PrintDAGs) dump();
1228 if (ViewMISchedDAGs) viewGraph();
1230 // Initialize ready queues now that the DAG and priority data are finalized.
1231 initQueues(TopRoots, BotRoots);
1233 bool IsTopNode = false;
1235 LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1236 SUnit *SU = SchedImpl->pickNode(IsTopNode);
1239 assert(!SU->isScheduled && "Node already scheduled");
1240 if (!checkSchedLimit())
1243 scheduleMI(SU, IsTopNode);
1246 unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1247 if (!ScheduledTrees.test(SubtreeID)) {
1248 ScheduledTrees.set(SubtreeID);
1249 DFSResult->scheduleTree(SubtreeID);
1250 SchedImpl->scheduleTree(SubtreeID);
1254 // Notify the scheduling strategy after updating the DAG.
1255 SchedImpl->schedNode(SU, IsTopNode);
1257 updateQueues(SU, IsTopNode);
1259 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1264 dbgs() << "*** Final schedule for "
1265 << printMBBReference(*begin()->getParent()) << " ***\n";
1271 /// Build the DAG and setup three register pressure trackers.
1272 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1273 if (!ShouldTrackPressure) {
1275 RegionCriticalPSets.clear();
1276 buildSchedGraph(AA);
1280 // Initialize the register pressure tracker used by buildSchedGraph.
1281 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1282 ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1284 // Account for liveness generate by the region boundary.
1285 if (LiveRegionEnd != RegionEnd)
1288 // Build the DAG, and compute current register pressure.
1289 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
1291 // Initialize top/bottom trackers after computing region pressure.
1295 void ScheduleDAGMILive::computeDFSResult() {
1297 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1299 ScheduledTrees.clear();
1300 DFSResult->resize(SUnits.size());
1301 DFSResult->compute(SUnits);
1302 ScheduledTrees.resize(DFSResult->getNumSubtrees());
1305 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1306 /// only provides the critical path for single block loops. To handle loops that
1307 /// span blocks, we could use the vreg path latencies provided by
1308 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1309 /// available for use in the scheduler.
1311 /// The cyclic path estimation identifies a def-use pair that crosses the back
1312 /// edge and considers the depth and height of the nodes. For example, consider
1313 /// the following instruction sequence where each instruction has unit latency
1314 /// and defines an epomymous virtual register:
1316 /// a->b(a,c)->c(b)->d(c)->exit
1318 /// The cyclic critical path is a two cycles: b->c->b
1319 /// The acyclic critical path is four cycles: a->b->c->d->exit
1320 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1321 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1322 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1323 /// LiveInDepth = depth(b) = len(a->b) = 1
1325 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1326 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1327 /// CyclicCriticalPath = min(2, 2) = 2
1329 /// This could be relevant to PostRA scheduling, but is currently implemented
1330 /// assuming LiveIntervals.
1331 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1332 // This only applies to single block loop.
1333 if (!BB->isSuccessor(BB))
1336 unsigned MaxCyclicLatency = 0;
1337 // Visit each live out vreg def to find def/use pairs that cross iterations.
1338 for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1339 unsigned Reg = P.RegUnit;
1340 if (!TRI->isVirtualRegister(Reg))
1342 const LiveInterval &LI = LIS->getInterval(Reg);
1343 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1347 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1348 const SUnit *DefSU = getSUnit(DefMI);
1352 unsigned LiveOutHeight = DefSU->getHeight();
1353 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1354 // Visit all local users of the vreg def.
1355 for (const VReg2SUnit &V2SU
1356 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1357 SUnit *SU = V2SU.SU;
1361 // Only consider uses of the phi.
1362 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1363 if (!LRQ.valueIn()->isPHIDef())
1366 // Assume that a path spanning two iterations is a cycle, which could
1367 // overestimate in strange cases. This allows cyclic latency to be
1368 // estimated as the minimum slack of the vreg's depth or height.
1369 unsigned CyclicLatency = 0;
1370 if (LiveOutDepth > SU->getDepth())
1371 CyclicLatency = LiveOutDepth - SU->getDepth();
1373 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1374 if (LiveInHeight > LiveOutHeight) {
1375 if (LiveInHeight - LiveOutHeight < CyclicLatency)
1376 CyclicLatency = LiveInHeight - LiveOutHeight;
1380 LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1381 << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1382 if (CyclicLatency > MaxCyclicLatency)
1383 MaxCyclicLatency = CyclicLatency;
1386 LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1387 return MaxCyclicLatency;
1390 /// Release ExitSU predecessors and setup scheduler queues. Re-position
1391 /// the Top RP tracker in case the region beginning has changed.
1392 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
1393 ArrayRef<SUnit*> BotRoots) {
1394 ScheduleDAGMI::initQueues(TopRoots, BotRoots);
1395 if (ShouldTrackPressure) {
1396 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1397 TopRPTracker.setPos(CurrentTop);
1401 /// Move an instruction and update register pressure.
1402 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1403 // Move the instruction to its new location in the instruction stream.
1404 MachineInstr *MI = SU->getInstr();
1407 assert(SU->isTopReady() && "node still has unscheduled dependencies");
1408 if (&*CurrentTop == MI)
1409 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1411 moveInstruction(MI, CurrentTop);
1412 TopRPTracker.setPos(MI);
1415 if (ShouldTrackPressure) {
1416 // Update top scheduled pressure.
1417 RegisterOperands RegOpers;
1418 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1419 if (ShouldTrackLaneMasks) {
1420 // Adjust liveness and add missing dead+read-undef flags.
1421 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1422 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1424 // Adjust for missing dead-def flags.
1425 RegOpers.detectDeadDefs(*MI, *LIS);
1428 TopRPTracker.advance(RegOpers);
1429 assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1430 LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure(
1431 TopRPTracker.getRegSetPressureAtPos(), TRI););
1433 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1436 assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1437 MachineBasicBlock::iterator priorII =
1438 priorNonDebug(CurrentBottom, CurrentTop);
1439 if (&*priorII == MI)
1440 CurrentBottom = priorII;
1442 if (&*CurrentTop == MI) {
1443 CurrentTop = nextIfDebug(++CurrentTop, priorII);
1444 TopRPTracker.setPos(CurrentTop);
1446 moveInstruction(MI, CurrentBottom);
1448 BotRPTracker.setPos(CurrentBottom);
1450 if (ShouldTrackPressure) {
1451 RegisterOperands RegOpers;
1452 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1453 if (ShouldTrackLaneMasks) {
1454 // Adjust liveness and add missing dead+read-undef flags.
1455 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1456 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1458 // Adjust for missing dead-def flags.
1459 RegOpers.detectDeadDefs(*MI, *LIS);
1462 if (BotRPTracker.getPos() != CurrentBottom)
1463 BotRPTracker.recedeSkipDebugValues();
1464 SmallVector<RegisterMaskPair, 8> LiveUses;
1465 BotRPTracker.recede(RegOpers, &LiveUses);
1466 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1467 LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure(
1468 BotRPTracker.getRegSetPressureAtPos(), TRI););
1470 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1471 updatePressureDiffs(LiveUses);
1476 //===----------------------------------------------------------------------===//
1477 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1478 //===----------------------------------------------------------------------===//
1482 /// Post-process the DAG to create cluster edges between neighboring
1483 /// loads or between neighboring stores.
1484 class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1487 MachineOperand *BaseOp;
1490 MemOpInfo(SUnit *su, MachineOperand *Op, int64_t ofs)
1491 : SU(su), BaseOp(Op), Offset(ofs) {}
1493 bool operator<(const MemOpInfo &RHS) const {
1494 if (BaseOp->getType() != RHS.BaseOp->getType())
1495 return BaseOp->getType() < RHS.BaseOp->getType();
1497 if (BaseOp->isReg())
1498 return std::make_tuple(BaseOp->getReg(), Offset, SU->NodeNum) <
1499 std::make_tuple(RHS.BaseOp->getReg(), RHS.Offset,
1501 if (BaseOp->isFI()) {
1502 const MachineFunction &MF =
1503 *BaseOp->getParent()->getParent()->getParent();
1504 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
1505 bool StackGrowsDown = TFI.getStackGrowthDirection() ==
1506 TargetFrameLowering::StackGrowsDown;
1507 // Can't use tuple comparison here since we might need to use a
1508 // different order when the stack grows down.
1509 if (BaseOp->getIndex() != RHS.BaseOp->getIndex())
1510 return StackGrowsDown ? BaseOp->getIndex() > RHS.BaseOp->getIndex()
1511 : BaseOp->getIndex() < RHS.BaseOp->getIndex();
1513 if (Offset != RHS.Offset)
1514 return StackGrowsDown ? Offset > RHS.Offset : Offset < RHS.Offset;
1516 return SU->NodeNum < RHS.SU->NodeNum;
1519 llvm_unreachable("MemOpClusterMutation only supports register or frame "
1524 const TargetInstrInfo *TII;
1525 const TargetRegisterInfo *TRI;
1529 BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1530 const TargetRegisterInfo *tri, bool IsLoad)
1531 : TII(tii), TRI(tri), IsLoad(IsLoad) {}
1533 void apply(ScheduleDAGInstrs *DAGInstrs) override;
1536 void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG);
1539 class StoreClusterMutation : public BaseMemOpClusterMutation {
1541 StoreClusterMutation(const TargetInstrInfo *tii,
1542 const TargetRegisterInfo *tri)
1543 : BaseMemOpClusterMutation(tii, tri, false) {}
1546 class LoadClusterMutation : public BaseMemOpClusterMutation {
1548 LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1549 : BaseMemOpClusterMutation(tii, tri, true) {}
1552 } // end anonymous namespace
1556 std::unique_ptr<ScheduleDAGMutation>
1557 createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1558 const TargetRegisterInfo *TRI) {
1559 return EnableMemOpCluster ? llvm::make_unique<LoadClusterMutation>(TII, TRI)
1563 std::unique_ptr<ScheduleDAGMutation>
1564 createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1565 const TargetRegisterInfo *TRI) {
1566 return EnableMemOpCluster ? llvm::make_unique<StoreClusterMutation>(TII, TRI)
1570 } // end namespace llvm
1572 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1573 ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
1574 SmallVector<MemOpInfo, 32> MemOpRecords;
1575 for (SUnit *SU : MemOps) {
1576 MachineOperand *BaseOp;
1578 if (TII->getMemOperandWithOffset(*SU->getInstr(), BaseOp, Offset, TRI))
1579 MemOpRecords.push_back(MemOpInfo(SU, BaseOp, Offset));
1581 if (MemOpRecords.size() < 2)
1584 llvm::sort(MemOpRecords);
1585 unsigned ClusterLength = 1;
1586 for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1587 SUnit *SUa = MemOpRecords[Idx].SU;
1588 SUnit *SUb = MemOpRecords[Idx+1].SU;
1589 if (TII->shouldClusterMemOps(*MemOpRecords[Idx].BaseOp,
1590 *MemOpRecords[Idx + 1].BaseOp,
1592 DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
1593 LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
1594 << SUb->NodeNum << ")\n");
1595 // Copy successor edges from SUa to SUb. Interleaving computation
1596 // dependent on SUa can prevent load combining due to register reuse.
1597 // Predecessor edges do not need to be copied from SUb to SUa since nearby
1598 // loads should have effectively the same inputs.
1599 for (const SDep &Succ : SUa->Succs) {
1600 if (Succ.getSUnit() == SUb)
1602 LLVM_DEBUG(dbgs() << " Copy Succ SU(" << Succ.getSUnit()->NodeNum
1604 DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial));
1612 /// Callback from DAG postProcessing to create cluster edges for loads.
1613 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
1614 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1616 // Map DAG NodeNum to store chain ID.
1617 DenseMap<unsigned, unsigned> StoreChainIDs;
1618 // Map each store chain to a set of dependent MemOps.
1619 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1620 for (SUnit &SU : DAG->SUnits) {
1621 if ((IsLoad && !SU.getInstr()->mayLoad()) ||
1622 (!IsLoad && !SU.getInstr()->mayStore()))
1625 unsigned ChainPredID = DAG->SUnits.size();
1626 for (const SDep &Pred : SU.Preds) {
1627 if (Pred.isCtrl()) {
1628 ChainPredID = Pred.getSUnit()->NodeNum;
1632 // Check if this chain-like pred has been seen
1633 // before. ChainPredID==MaxNodeID at the top of the schedule.
1634 unsigned NumChains = StoreChainDependents.size();
1635 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1636 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1638 StoreChainDependents.resize(NumChains + 1);
1639 StoreChainDependents[Result.first->second].push_back(&SU);
1642 // Iterate over the store chains.
1643 for (auto &SCD : StoreChainDependents)
1644 clusterNeighboringMemOps(SCD, DAG);
1647 //===----------------------------------------------------------------------===//
1648 // CopyConstrain - DAG post-processing to encourage copy elimination.
1649 //===----------------------------------------------------------------------===//
1653 /// Post-process the DAG to create weak edges from all uses of a copy to
1654 /// the one use that defines the copy's source vreg, most likely an induction
1655 /// variable increment.
1656 class CopyConstrain : public ScheduleDAGMutation {
1658 SlotIndex RegionBeginIdx;
1660 // RegionEndIdx is the slot index of the last non-debug instruction in the
1661 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1662 SlotIndex RegionEndIdx;
1665 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1667 void apply(ScheduleDAGInstrs *DAGInstrs) override;
1670 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1673 } // end anonymous namespace
1677 std::unique_ptr<ScheduleDAGMutation>
1678 createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
1679 const TargetRegisterInfo *TRI) {
1680 return llvm::make_unique<CopyConstrain>(TII, TRI);
1683 } // end namespace llvm
1685 /// constrainLocalCopy handles two possibilities:
1690 /// I3: dst = src (copy)
1691 /// (create pred->succ edges I0->I1, I2->I1)
1694 /// I0: dst = src (copy)
1698 /// (create pred->succ edges I1->I2, I3->I2)
1700 /// Although the MachineScheduler is currently constrained to single blocks,
1701 /// this algorithm should handle extended blocks. An EBB is a set of
1702 /// contiguously numbered blocks such that the previous block in the EBB is
1703 /// always the single predecessor.
1704 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
1705 LiveIntervals *LIS = DAG->getLIS();
1706 MachineInstr *Copy = CopySU->getInstr();
1708 // Check for pure vreg copies.
1709 const MachineOperand &SrcOp = Copy->getOperand(1);
1710 unsigned SrcReg = SrcOp.getReg();
1711 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
1714 const MachineOperand &DstOp = Copy->getOperand(0);
1715 unsigned DstReg = DstOp.getReg();
1716 if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
1719 // Check if either the dest or source is local. If it's live across a back
1720 // edge, it's not local. Note that if both vregs are live across the back
1721 // edge, we cannot successfully contrain the copy without cyclic scheduling.
1722 // If both the copy's source and dest are local live intervals, then we
1723 // should treat the dest as the global for the purpose of adding
1724 // constraints. This adds edges from source's other uses to the copy.
1725 unsigned LocalReg = SrcReg;
1726 unsigned GlobalReg = DstReg;
1727 LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1728 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
1731 LocalLI = &LIS->getInterval(LocalReg);
1732 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1735 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1737 // Find the global segment after the start of the local LI.
1738 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1739 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1740 // local live range. We could create edges from other global uses to the local
1741 // start, but the coalescer should have already eliminated these cases, so
1742 // don't bother dealing with it.
1743 if (GlobalSegment == GlobalLI->end())
1746 // If GlobalSegment is killed at the LocalLI->start, the call to find()
1747 // returned the next global segment. But if GlobalSegment overlaps with
1748 // LocalLI->start, then advance to the next segment. If a hole in GlobalLI
1749 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1750 if (GlobalSegment->contains(LocalLI->beginIndex()))
1753 if (GlobalSegment == GlobalLI->end())
1756 // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1757 if (GlobalSegment != GlobalLI->begin()) {
1758 // Two address defs have no hole.
1759 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
1760 GlobalSegment->start)) {
1763 // If the prior global segment may be defined by the same two-address
1764 // instruction that also defines LocalLI, then can't make a hole here.
1765 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
1766 LocalLI->beginIndex())) {
1769 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1770 // it would be a disconnected component in the live range.
1771 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
1772 "Disconnected LRG within the scheduling region.");
1774 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1778 SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1782 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1783 // constraining the uses of the last local def to precede GlobalDef.
1784 SmallVector<SUnit*,8> LocalUses;
1785 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1786 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1787 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1788 for (const SDep &Succ : LastLocalSU->Succs) {
1789 if (Succ.getKind() != SDep::Data || Succ.getReg() != LocalReg)
1791 if (Succ.getSUnit() == GlobalSU)
1793 if (!DAG->canAddEdge(GlobalSU, Succ.getSUnit()))
1795 LocalUses.push_back(Succ.getSUnit());
1797 // Open the top of the GlobalLI hole by constraining any earlier global uses
1798 // to precede the start of LocalLI.
1799 SmallVector<SUnit*,8> GlobalUses;
1800 MachineInstr *FirstLocalDef =
1801 LIS->getInstructionFromIndex(LocalLI->beginIndex());
1802 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1803 for (const SDep &Pred : GlobalSU->Preds) {
1804 if (Pred.getKind() != SDep::Anti || Pred.getReg() != GlobalReg)
1806 if (Pred.getSUnit() == FirstLocalSU)
1808 if (!DAG->canAddEdge(FirstLocalSU, Pred.getSUnit()))
1810 GlobalUses.push_back(Pred.getSUnit());
1812 LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1813 // Add the weak edges.
1814 for (SmallVectorImpl<SUnit*>::const_iterator
1815 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1816 LLVM_DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU("
1817 << GlobalSU->NodeNum << ")\n");
1818 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1820 for (SmallVectorImpl<SUnit*>::const_iterator
1821 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1822 LLVM_DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU("
1823 << FirstLocalSU->NodeNum << ")\n");
1824 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1828 /// Callback from DAG postProcessing to create weak edges to encourage
1829 /// copy elimination.
1830 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
1831 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1832 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1834 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1835 if (FirstPos == DAG->end())
1837 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
1838 RegionEndIdx = DAG->getLIS()->getInstructionIndex(
1839 *priorNonDebug(DAG->end(), DAG->begin()));
1841 for (SUnit &SU : DAG->SUnits) {
1842 if (!SU.getInstr()->isCopy())
1845 constrainLocalCopy(&SU, static_cast<ScheduleDAGMILive*>(DAG));
1849 //===----------------------------------------------------------------------===//
1850 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1851 // and possibly other custom schedulers.
1852 //===----------------------------------------------------------------------===//
1854 static const unsigned InvalidCycle = ~0U;
1856 SchedBoundary::~SchedBoundary() { delete HazardRec; }
1858 /// Given a Count of resource usage and a Latency value, return true if a
1859 /// SchedBoundary becomes resource limited.
1860 static bool checkResourceLimit(unsigned LFactor, unsigned Count,
1862 return (int)(Count - (Latency * LFactor)) > (int)LFactor;
1865 void SchedBoundary::reset() {
1866 // A new HazardRec is created for each DAG and owned by SchedBoundary.
1867 // Destroying and reconstructing it is very expensive though. So keep
1868 // invalid, placeholder HazardRecs.
1869 if (HazardRec && HazardRec->isEnabled()) {
1871 HazardRec = nullptr;
1875 CheckPending = false;
1878 MinReadyCycle = std::numeric_limits<unsigned>::max();
1879 ExpectedLatency = 0;
1880 DependentLatency = 0;
1882 MaxExecutedResCount = 0;
1884 IsResourceLimited = false;
1885 ReservedCycles.clear();
1887 // Track the maximum number of stall cycles that could arise either from the
1888 // latency of a DAG edge or the number of cycles that a processor resource is
1889 // reserved (SchedBoundary::ReservedCycles).
1890 MaxObservedStall = 0;
1892 // Reserve a zero-count for invalid CritResIdx.
1893 ExecutedResCounts.resize(1);
1894 assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1897 void SchedRemainder::
1898 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1900 if (!SchedModel->hasInstrSchedModel())
1902 RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1903 for (SUnit &SU : DAG->SUnits) {
1904 const MCSchedClassDesc *SC = DAG->getSchedClass(&SU);
1905 RemIssueCount += SchedModel->getNumMicroOps(SU.getInstr(), SC)
1906 * SchedModel->getMicroOpFactor();
1907 for (TargetSchedModel::ProcResIter
1908 PI = SchedModel->getWriteProcResBegin(SC),
1909 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1910 unsigned PIdx = PI->ProcResourceIdx;
1911 unsigned Factor = SchedModel->getResourceFactor(PIdx);
1912 RemainingCounts[PIdx] += (Factor * PI->Cycles);
1917 void SchedBoundary::
1918 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1921 SchedModel = smodel;
1923 if (SchedModel->hasInstrSchedModel()) {
1924 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
1925 ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1929 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1930 /// these "soft stalls" differently than the hard stall cycles based on CPU
1931 /// resources and computed by checkHazard(). A fully in-order model
1932 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
1933 /// available for scheduling until they are ready. However, a weaker in-order
1934 /// model may use this for heuristics. For example, if a processor has in-order
1935 /// behavior when reading certain resources, this may come into play.
1936 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
1937 if (!SU->isUnbuffered)
1940 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1941 if (ReadyCycle > CurrCycle)
1942 return ReadyCycle - CurrCycle;
1946 /// Compute the next cycle at which the given processor resource can be
1948 unsigned SchedBoundary::
1949 getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1950 unsigned NextUnreserved = ReservedCycles[PIdx];
1951 // If this resource has never been used, always return cycle zero.
1952 if (NextUnreserved == InvalidCycle)
1954 // For bottom-up scheduling add the cycles needed for the current operation.
1956 NextUnreserved += Cycles;
1957 return NextUnreserved;
1960 /// Does this SU have a hazard within the current instruction group.
1962 /// The scheduler supports two modes of hazard recognition. The first is the
1963 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1964 /// supports highly complicated in-order reservation tables
1965 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
1967 /// The second is a streamlined mechanism that checks for hazards based on
1968 /// simple counters that the scheduler itself maintains. It explicitly checks
1969 /// for instruction dispatch limitations, including the number of micro-ops that
1970 /// can dispatch per cycle.
1972 /// TODO: Also check whether the SU must start a new group.
1973 bool SchedBoundary::checkHazard(SUnit *SU) {
1974 if (HazardRec->isEnabled()
1975 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1979 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
1980 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
1981 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
1982 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
1987 ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) ||
1988 (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) {
1989 LLVM_DEBUG(dbgs() << " hazard: SU(" << SU->NodeNum << ") must "
1990 << (isTop() ? "begin" : "end") << " group\n");
1994 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1995 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1996 for (const MCWriteProcResEntry &PE :
1997 make_range(SchedModel->getWriteProcResBegin(SC),
1998 SchedModel->getWriteProcResEnd(SC))) {
1999 unsigned ResIdx = PE.ProcResourceIdx;
2000 unsigned Cycles = PE.Cycles;
2001 unsigned NRCycle = getNextResourceCycle(ResIdx, Cycles);
2002 if (NRCycle > CurrCycle) {
2004 MaxObservedStall = std::max(Cycles, MaxObservedStall);
2006 LLVM_DEBUG(dbgs() << " SU(" << SU->NodeNum << ") "
2007 << SchedModel->getResourceName(ResIdx) << "="
2008 << NRCycle << "c\n");
2016 // Find the unscheduled node in ReadySUs with the highest latency.
2017 unsigned SchedBoundary::
2018 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
2019 SUnit *LateSU = nullptr;
2020 unsigned RemLatency = 0;
2021 for (SUnit *SU : ReadySUs) {
2022 unsigned L = getUnscheduledLatency(SU);
2023 if (L > RemLatency) {
2029 LLVM_DEBUG(dbgs() << Available.getName() << " RemLatency SU("
2030 << LateSU->NodeNum << ") " << RemLatency << "c\n");
2035 // Count resources in this zone and the remaining unscheduled
2036 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
2037 // resource index, or zero if the zone is issue limited.
2038 unsigned SchedBoundary::
2039 getOtherResourceCount(unsigned &OtherCritIdx) {
2041 if (!SchedModel->hasInstrSchedModel())
2044 unsigned OtherCritCount = Rem->RemIssueCount
2045 + (RetiredMOps * SchedModel->getMicroOpFactor());
2046 LLVM_DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: "
2047 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
2048 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
2049 PIdx != PEnd; ++PIdx) {
2050 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
2051 if (OtherCount > OtherCritCount) {
2052 OtherCritCount = OtherCount;
2053 OtherCritIdx = PIdx;
2058 dbgs() << " " << Available.getName() << " + Remain CritRes: "
2059 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
2060 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
2062 return OtherCritCount;
2065 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
2066 assert(SU->getInstr() && "Scheduled SUnit must have instr");
2069 // ReadyCycle was been bumped up to the CurrCycle when this node was
2070 // scheduled, but CurrCycle may have been eagerly advanced immediately after
2071 // scheduling, so may now be greater than ReadyCycle.
2072 if (ReadyCycle > CurrCycle)
2073 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
2076 if (ReadyCycle < MinReadyCycle)
2077 MinReadyCycle = ReadyCycle;
2079 // Check for interlocks first. For the purpose of other heuristics, an
2080 // instruction that cannot issue appears as if it's not in the ReadyQueue.
2081 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2082 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) ||
2083 Available.size() >= ReadyListLimit)
2089 /// Move the boundary of scheduled code by one cycle.
2090 void SchedBoundary::bumpCycle(unsigned NextCycle) {
2091 if (SchedModel->getMicroOpBufferSize() == 0) {
2092 assert(MinReadyCycle < std::numeric_limits<unsigned>::max() &&
2093 "MinReadyCycle uninitialized");
2094 if (MinReadyCycle > NextCycle)
2095 NextCycle = MinReadyCycle;
2097 // Update the current micro-ops, which will issue in the next cycle.
2098 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
2099 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
2101 // Decrement DependentLatency based on the next cycle.
2102 if ((NextCycle - CurrCycle) > DependentLatency)
2103 DependentLatency = 0;
2105 DependentLatency -= (NextCycle - CurrCycle);
2107 if (!HazardRec->isEnabled()) {
2108 // Bypass HazardRec virtual calls.
2109 CurrCycle = NextCycle;
2111 // Bypass getHazardType calls in case of long latency.
2112 for (; CurrCycle != NextCycle; ++CurrCycle) {
2114 HazardRec->AdvanceCycle();
2116 HazardRec->RecedeCycle();
2119 CheckPending = true;
2121 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(),
2122 getScheduledLatency());
2124 LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName()
2128 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
2129 ExecutedResCounts[PIdx] += Count;
2130 if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2131 MaxExecutedResCount = ExecutedResCounts[PIdx];
2134 /// Add the given processor resource to this scheduled zone.
2136 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2137 /// during which this resource is consumed.
2139 /// \return the next cycle at which the instruction may execute without
2140 /// oversubscribing resources.
2141 unsigned SchedBoundary::
2142 countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
2143 unsigned Factor = SchedModel->getResourceFactor(PIdx);
2144 unsigned Count = Factor * Cycles;
2145 LLVM_DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx) << " +"
2146 << Cycles << "x" << Factor << "u\n");
2148 // Update Executed resources counts.
2149 incExecutedResources(PIdx, Count);
2150 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2151 Rem->RemainingCounts[PIdx] -= Count;
2153 // Check if this resource exceeds the current critical resource. If so, it
2154 // becomes the critical resource.
2155 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2156 ZoneCritResIdx = PIdx;
2157 LLVM_DEBUG(dbgs() << " *** Critical resource "
2158 << SchedModel->getResourceName(PIdx) << ": "
2159 << getResourceCount(PIdx) / SchedModel->getLatencyFactor()
2162 // For reserved resources, record the highest cycle using the resource.
2163 unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
2164 if (NextAvailable > CurrCycle) {
2165 LLVM_DEBUG(dbgs() << " Resource conflict: "
2166 << SchedModel->getProcResource(PIdx)->Name
2167 << " reserved until @" << NextAvailable << "\n");
2169 return NextAvailable;
2172 /// Move the boundary of scheduled code by one SUnit.
2173 void SchedBoundary::bumpNode(SUnit *SU) {
2174 // Update the reservation table.
2175 if (HazardRec->isEnabled()) {
2176 if (!isTop() && SU->isCall) {
2177 // Calls are scheduled with their preceding instructions. For bottom-up
2178 // scheduling, clear the pipeline state before emitting.
2181 HazardRec->EmitInstruction(SU);
2183 // checkHazard should prevent scheduling multiple instructions per cycle that
2184 // exceed the issue width.
2185 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2186 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2188 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2189 "Cannot schedule this instruction's MicroOps in the current cycle.");
2191 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2192 LLVM_DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
2194 unsigned NextCycle = CurrCycle;
2195 switch (SchedModel->getMicroOpBufferSize()) {
2197 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2200 if (ReadyCycle > NextCycle) {
2201 NextCycle = ReadyCycle;
2202 LLVM_DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n");
2206 // We don't currently model the OOO reorder buffer, so consider all
2207 // scheduled MOps to be "retired". We do loosely model in-order resource
2208 // latency. If this instruction uses an in-order resource, account for any
2209 // likely stall cycles.
2210 if (SU->isUnbuffered && ReadyCycle > NextCycle)
2211 NextCycle = ReadyCycle;
2214 RetiredMOps += IncMOps;
2216 // Update resource counts and critical resource.
2217 if (SchedModel->hasInstrSchedModel()) {
2218 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2219 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2220 Rem->RemIssueCount -= DecRemIssue;
2221 if (ZoneCritResIdx) {
2222 // Scale scheduled micro-ops for comparing with the critical resource.
2223 unsigned ScaledMOps =
2224 RetiredMOps * SchedModel->getMicroOpFactor();
2226 // If scaled micro-ops are now more than the previous critical resource by
2227 // a full cycle, then micro-ops issue becomes critical.
2228 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2229 >= (int)SchedModel->getLatencyFactor()) {
2231 LLVM_DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
2232 << ScaledMOps / SchedModel->getLatencyFactor()
2236 for (TargetSchedModel::ProcResIter
2237 PI = SchedModel->getWriteProcResBegin(SC),
2238 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2240 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
2241 if (RCycle > NextCycle)
2244 if (SU->hasReservedResource) {
2245 // For reserved resources, record the highest cycle using the resource.
2246 // For top-down scheduling, this is the cycle in which we schedule this
2247 // instruction plus the number of cycles the operations reserves the
2248 // resource. For bottom-up is it simply the instruction's cycle.
2249 for (TargetSchedModel::ProcResIter
2250 PI = SchedModel->getWriteProcResBegin(SC),
2251 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2252 unsigned PIdx = PI->ProcResourceIdx;
2253 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2255 ReservedCycles[PIdx] =
2256 std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2259 ReservedCycles[PIdx] = NextCycle;
2264 // Update ExpectedLatency and DependentLatency.
2265 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2266 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2267 if (SU->getDepth() > TopLatency) {
2268 TopLatency = SU->getDepth();
2269 LLVM_DEBUG(dbgs() << " " << Available.getName() << " TopLatency SU("
2270 << SU->NodeNum << ") " << TopLatency << "c\n");
2272 if (SU->getHeight() > BotLatency) {
2273 BotLatency = SU->getHeight();
2274 LLVM_DEBUG(dbgs() << " " << Available.getName() << " BotLatency SU("
2275 << SU->NodeNum << ") " << BotLatency << "c\n");
2277 // If we stall for any reason, bump the cycle.
2278 if (NextCycle > CurrCycle)
2279 bumpCycle(NextCycle);
2281 // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2282 // resource limited. If a stall occurred, bumpCycle does this.
2284 checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(),
2285 getScheduledLatency());
2287 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2288 // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2289 // one cycle. Since we commonly reach the max MOps here, opportunistically
2290 // bump the cycle to avoid uselessly checking everything in the readyQ.
2291 CurrMOps += IncMOps;
2293 // Bump the cycle count for issue group constraints.
2294 // This must be done after NextCycle has been adjust for all other stalls.
2295 // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set
2297 if ((isTop() && SchedModel->mustEndGroup(SU->getInstr())) ||
2298 (!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) {
2299 LLVM_DEBUG(dbgs() << " Bump cycle to " << (isTop() ? "end" : "begin")
2301 bumpCycle(++NextCycle);
2304 while (CurrMOps >= SchedModel->getIssueWidth()) {
2305 LLVM_DEBUG(dbgs() << " *** Max MOps " << CurrMOps << " at cycle "
2306 << CurrCycle << '\n');
2307 bumpCycle(++NextCycle);
2309 LLVM_DEBUG(dumpScheduledState());
2312 /// Release pending ready nodes in to the available queue. This makes them
2313 /// visible to heuristics.
2314 void SchedBoundary::releasePending() {
2315 // If the available queue is empty, it is safe to reset MinReadyCycle.
2316 if (Available.empty())
2317 MinReadyCycle = std::numeric_limits<unsigned>::max();
2319 // Check to see if any of the pending instructions are ready to issue. If
2320 // so, add them to the available queue.
2321 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2322 for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2323 SUnit *SU = *(Pending.begin()+i);
2324 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2326 if (ReadyCycle < MinReadyCycle)
2327 MinReadyCycle = ReadyCycle;
2329 if (!IsBuffered && ReadyCycle > CurrCycle)
2332 if (checkHazard(SU))
2335 if (Available.size() >= ReadyListLimit)
2339 Pending.remove(Pending.begin()+i);
2342 CheckPending = false;
2345 /// Remove SU from the ready set for this boundary.
2346 void SchedBoundary::removeReady(SUnit *SU) {
2347 if (Available.isInQueue(SU))
2348 Available.remove(Available.find(SU));
2350 assert(Pending.isInQueue(SU) && "bad ready count");
2351 Pending.remove(Pending.find(SU));
2355 /// If this queue only has one ready candidate, return it. As a side effect,
2356 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2357 /// one node is ready. If multiple instructions are ready, return NULL.
2358 SUnit *SchedBoundary::pickOnlyChoice() {
2363 // Defer any ready instrs that now have a hazard.
2364 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2365 if (checkHazard(*I)) {
2367 I = Available.remove(I);
2373 for (unsigned i = 0; Available.empty(); ++i) {
2374 // FIXME: Re-enable assert once PR20057 is resolved.
2375 // assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2376 // "permanent hazard");
2378 bumpCycle(CurrCycle + 1);
2382 LLVM_DEBUG(Pending.dump());
2383 LLVM_DEBUG(Available.dump());
2385 if (Available.size() == 1)
2386 return *Available.begin();
2390 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2391 // This is useful information to dump after bumpNode.
2392 // Note that the Queue contents are more useful before pickNodeFromQueue.
2393 LLVM_DUMP_METHOD void SchedBoundary::dumpScheduledState() const {
2396 if (ZoneCritResIdx) {
2397 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2398 ResCount = getResourceCount(ZoneCritResIdx);
2400 ResFactor = SchedModel->getMicroOpFactor();
2401 ResCount = RetiredMOps * ResFactor;
2403 unsigned LFactor = SchedModel->getLatencyFactor();
2404 dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2405 << " Retired: " << RetiredMOps;
2406 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c";
2407 dbgs() << "\n Critical: " << ResCount / LFactor << "c, "
2408 << ResCount / ResFactor << " "
2409 << SchedModel->getResourceName(ZoneCritResIdx)
2410 << "\n ExpectedLatency: " << ExpectedLatency << "c\n"
2411 << (IsResourceLimited ? " - Resource" : " - Latency")
2416 //===----------------------------------------------------------------------===//
2417 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2418 //===----------------------------------------------------------------------===//
2420 void GenericSchedulerBase::SchedCandidate::
2421 initResourceDelta(const ScheduleDAGMI *DAG,
2422 const TargetSchedModel *SchedModel) {
2423 if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2426 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2427 for (TargetSchedModel::ProcResIter
2428 PI = SchedModel->getWriteProcResBegin(SC),
2429 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2430 if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2431 ResDelta.CritResources += PI->Cycles;
2432 if (PI->ProcResourceIdx == Policy.DemandResIdx)
2433 ResDelta.DemandedResources += PI->Cycles;
2437 /// Compute remaining latency. We need this both to determine whether the
2438 /// overall schedule has become latency-limited and whether the instructions
2439 /// outside this zone are resource or latency limited.
2441 /// The "dependent" latency is updated incrementally during scheduling as the
2442 /// max height/depth of scheduled nodes minus the cycles since it was
2444 /// DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2446 /// The "independent" latency is the max ready queue depth:
2447 /// ILat = max N.depth for N in Available|Pending
2449 /// RemainingLatency is the greater of independent and dependent latency.
2451 /// These computations are expensive, especially in DAGs with many edges, so
2452 /// only do them if necessary.
2453 static unsigned computeRemLatency(SchedBoundary &CurrZone) {
2454 unsigned RemLatency = CurrZone.getDependentLatency();
2455 RemLatency = std::max(RemLatency,
2456 CurrZone.findMaxLatency(CurrZone.Available.elements()));
2457 RemLatency = std::max(RemLatency,
2458 CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2462 /// Returns true if the current cycle plus remaning latency is greater than
2463 /// the critical path in the scheduling region.
2464 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy &Policy,
2465 SchedBoundary &CurrZone,
2466 bool ComputeRemLatency,
2467 unsigned &RemLatency) const {
2468 // The current cycle is already greater than the critical path, so we are
2469 // already latency limited and don't need to compute the remaining latency.
2470 if (CurrZone.getCurrCycle() > Rem.CriticalPath)
2473 // If we haven't scheduled anything yet, then we aren't latency limited.
2474 if (CurrZone.getCurrCycle() == 0)
2477 if (ComputeRemLatency)
2478 RemLatency = computeRemLatency(CurrZone);
2480 return RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath;
2483 /// Set the CandPolicy given a scheduling zone given the current resources and
2484 /// latencies inside and outside the zone.
2485 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
2486 SchedBoundary &CurrZone,
2487 SchedBoundary *OtherZone) {
2488 // Apply preemptive heuristics based on the total latency and resources
2489 // inside and outside this zone. Potential stalls should be considered before
2490 // following this policy.
2492 // Compute the critical resource outside the zone.
2493 unsigned OtherCritIdx = 0;
2494 unsigned OtherCount =
2495 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2497 bool OtherResLimited = false;
2498 unsigned RemLatency = 0;
2499 bool RemLatencyComputed = false;
2500 if (SchedModel->hasInstrSchedModel() && OtherCount != 0) {
2501 RemLatency = computeRemLatency(CurrZone);
2502 RemLatencyComputed = true;
2503 OtherResLimited = checkResourceLimit(SchedModel->getLatencyFactor(),
2504 OtherCount, RemLatency);
2507 // Schedule aggressively for latency in PostRA mode. We don't check for
2508 // acyclic latency during PostRA, and highly out-of-order processors will
2509 // skip PostRA scheduling.
2510 if (!OtherResLimited &&
2511 (IsPostRA || shouldReduceLatency(Policy, CurrZone, !RemLatencyComputed,
2513 Policy.ReduceLatency |= true;
2514 LLVM_DEBUG(dbgs() << " " << CurrZone.Available.getName()
2515 << " RemainingLatency " << RemLatency << " + "
2516 << CurrZone.getCurrCycle() << "c > CritPath "
2517 << Rem.CriticalPath << "\n");
2519 // If the same resource is limiting inside and outside the zone, do nothing.
2520 if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2523 LLVM_DEBUG(if (CurrZone.isResourceLimited()) {
2524 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
2525 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) << "\n";
2526 } if (OtherResLimited) dbgs()
2527 << " RemainingLimit: "
2528 << SchedModel->getResourceName(OtherCritIdx) << "\n";
2529 if (!CurrZone.isResourceLimited() && !OtherResLimited) dbgs()
2530 << " Latency limited both directions.\n");
2532 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2533 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2535 if (OtherResLimited)
2536 Policy.DemandResIdx = OtherCritIdx;
2540 const char *GenericSchedulerBase::getReasonStr(
2541 GenericSchedulerBase::CandReason Reason) {
2543 case NoCand: return "NOCAND ";
2544 case Only1: return "ONLY1 ";
2545 case PhysReg: return "PHYS-REG ";
2546 case RegExcess: return "REG-EXCESS";
2547 case RegCritical: return "REG-CRIT ";
2548 case Stall: return "STALL ";
2549 case Cluster: return "CLUSTER ";
2550 case Weak: return "WEAK ";
2551 case RegMax: return "REG-MAX ";
2552 case ResourceReduce: return "RES-REDUCE";
2553 case ResourceDemand: return "RES-DEMAND";
2554 case TopDepthReduce: return "TOP-DEPTH ";
2555 case TopPathReduce: return "TOP-PATH ";
2556 case BotHeightReduce:return "BOT-HEIGHT";
2557 case BotPathReduce: return "BOT-PATH ";
2558 case NextDefUse: return "DEF-USE ";
2559 case NodeOrder: return "ORDER ";
2561 llvm_unreachable("Unknown reason!");
2564 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2566 unsigned ResIdx = 0;
2567 unsigned Latency = 0;
2568 switch (Cand.Reason) {
2572 P = Cand.RPDelta.Excess;
2575 P = Cand.RPDelta.CriticalMax;
2578 P = Cand.RPDelta.CurrentMax;
2580 case ResourceReduce:
2581 ResIdx = Cand.Policy.ReduceResIdx;
2583 case ResourceDemand:
2584 ResIdx = Cand.Policy.DemandResIdx;
2586 case TopDepthReduce:
2587 Latency = Cand.SU->getDepth();
2590 Latency = Cand.SU->getHeight();
2592 case BotHeightReduce:
2593 Latency = Cand.SU->getHeight();
2596 Latency = Cand.SU->getDepth();
2599 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
2601 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2602 << ":" << P.getUnitInc() << " ";
2606 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2610 dbgs() << " " << Latency << " cycles ";
2618 /// Return true if this heuristic determines order.
2619 bool tryLess(int TryVal, int CandVal,
2620 GenericSchedulerBase::SchedCandidate &TryCand,
2621 GenericSchedulerBase::SchedCandidate &Cand,
2622 GenericSchedulerBase::CandReason Reason) {
2623 if (TryVal < CandVal) {
2624 TryCand.Reason = Reason;
2627 if (TryVal > CandVal) {
2628 if (Cand.Reason > Reason)
2629 Cand.Reason = Reason;
2635 bool tryGreater(int TryVal, int CandVal,
2636 GenericSchedulerBase::SchedCandidate &TryCand,
2637 GenericSchedulerBase::SchedCandidate &Cand,
2638 GenericSchedulerBase::CandReason Reason) {
2639 if (TryVal > CandVal) {
2640 TryCand.Reason = Reason;
2643 if (TryVal < CandVal) {
2644 if (Cand.Reason > Reason)
2645 Cand.Reason = Reason;
2651 bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2652 GenericSchedulerBase::SchedCandidate &Cand,
2653 SchedBoundary &Zone) {
2655 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2656 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2657 TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2660 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2661 TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2664 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2665 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2666 TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2669 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2670 TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2675 } // end namespace llvm
2677 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
2678 LLVM_DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2679 << GenericSchedulerBase::getReasonStr(Reason) << '\n');
2682 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) {
2683 tracePick(Cand.Reason, Cand.AtTop);
2686 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
2687 assert(dag->hasVRegLiveness() &&
2688 "(PreRA)GenericScheduler needs vreg liveness");
2689 DAG = static_cast<ScheduleDAGMILive*>(dag);
2690 SchedModel = DAG->getSchedModel();
2693 Rem.init(DAG, SchedModel);
2694 Top.init(DAG, SchedModel, &Rem);
2695 Bot.init(DAG, SchedModel, &Rem);
2697 // Initialize resource counts.
2699 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2700 // are disabled, then these HazardRecs will be disabled.
2701 const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
2702 if (!Top.HazardRec) {
2704 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2707 if (!Bot.HazardRec) {
2709 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
2712 TopCand.SU = nullptr;
2713 BotCand.SU = nullptr;
2716 /// Initialize the per-region scheduling policy.
2717 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2718 MachineBasicBlock::iterator End,
2719 unsigned NumRegionInstrs) {
2720 const MachineFunction &MF = *Begin->getMF();
2721 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
2723 // Avoid setting up the register pressure tracker for small regions to save
2724 // compile time. As a rough heuristic, only track pressure when the number of
2725 // schedulable instructions exceeds half the integer register file.
2726 RegionPolicy.ShouldTrackPressure = true;
2727 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2728 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2729 if (TLI->isTypeLegal(LegalIntVT)) {
2730 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
2731 TLI->getRegClassFor(LegalIntVT));
2732 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2736 // For generic targets, we default to bottom-up, because it's simpler and more
2737 // compile-time optimizations have been implemented in that direction.
2738 RegionPolicy.OnlyBottomUp = true;
2740 // Allow the subtarget to override default policy.
2741 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs);
2743 // After subtarget overrides, apply command line options.
2744 if (!EnableRegPressure)
2745 RegionPolicy.ShouldTrackPressure = false;
2747 // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2748 // e.g. -misched-bottomup=false allows scheduling in both directions.
2749 assert((!ForceTopDown || !ForceBottomUp) &&
2750 "-misched-topdown incompatible with -misched-bottomup");
2751 if (ForceBottomUp.getNumOccurrences() > 0) {
2752 RegionPolicy.OnlyBottomUp = ForceBottomUp;
2753 if (RegionPolicy.OnlyBottomUp)
2754 RegionPolicy.OnlyTopDown = false;
2756 if (ForceTopDown.getNumOccurrences() > 0) {
2757 RegionPolicy.OnlyTopDown = ForceTopDown;
2758 if (RegionPolicy.OnlyTopDown)
2759 RegionPolicy.OnlyBottomUp = false;
2763 void GenericScheduler::dumpPolicy() const {
2764 // Cannot completely remove virtual function even in release mode.
2765 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2766 dbgs() << "GenericScheduler RegionPolicy: "
2767 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2768 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2769 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2774 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2775 /// critical path by more cycles than it takes to drain the instruction buffer.
2776 /// We estimate an upper bounds on in-flight instructions as:
2778 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2779 /// InFlightIterations = AcyclicPath / CyclesPerIteration
2780 /// InFlightResources = InFlightIterations * LoopResources
2782 /// TODO: Check execution resources in addition to IssueCount.
2783 void GenericScheduler::checkAcyclicLatency() {
2784 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2787 // Scaled number of cycles per loop iteration.
2788 unsigned IterCount =
2789 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2791 // Scaled acyclic critical path.
2792 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2793 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2794 unsigned InFlightCount =
2795 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2796 unsigned BufferLimit =
2797 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2799 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2802 dbgs() << "IssueCycles="
2803 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2804 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2805 << "c NumIters=" << (AcyclicCount + IterCount - 1) / IterCount
2806 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2807 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2808 if (Rem.IsAcyclicLatencyLimited) dbgs() << " ACYCLIC LATENCY LIMIT\n");
2811 void GenericScheduler::registerRoots() {
2812 Rem.CriticalPath = DAG->ExitSU.getDepth();
2814 // Some roots may not feed into ExitSU. Check all of them in case.
2815 for (const SUnit *SU : Bot.Available) {
2816 if (SU->getDepth() > Rem.CriticalPath)
2817 Rem.CriticalPath = SU->getDepth();
2819 LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2820 if (DumpCriticalPathLength) {
2821 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2824 if (EnableCyclicPath && SchedModel->getMicroOpBufferSize() > 0) {
2825 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2826 checkAcyclicLatency();
2831 bool tryPressure(const PressureChange &TryP,
2832 const PressureChange &CandP,
2833 GenericSchedulerBase::SchedCandidate &TryCand,
2834 GenericSchedulerBase::SchedCandidate &Cand,
2835 GenericSchedulerBase::CandReason Reason,
2836 const TargetRegisterInfo *TRI,
2837 const MachineFunction &MF) {
2838 // If one candidate decreases and the other increases, go with it.
2839 // Invalid candidates have UnitInc==0.
2840 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2844 // Do not compare the magnitude of pressure changes between top and bottom
2846 if (Cand.AtTop != TryCand.AtTop)
2849 // If both candidates affect the same set in the same boundary, go with the
2850 // smallest increase.
2851 unsigned TryPSet = TryP.getPSetOrMax();
2852 unsigned CandPSet = CandP.getPSetOrMax();
2853 if (TryPSet == CandPSet) {
2854 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2858 int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2859 std::numeric_limits<int>::max();
2861 int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2862 std::numeric_limits<int>::max();
2864 // If the candidates are decreasing pressure, reverse priority.
2865 if (TryP.getUnitInc() < 0)
2866 std::swap(TryRank, CandRank);
2867 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2870 unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2871 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2874 /// Minimize physical register live ranges. Regalloc wants them adjacent to
2875 /// their physreg def/use.
2877 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2878 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2879 /// with the operation that produces or consumes the physreg. We'll do this when
2880 /// regalloc has support for parallel copies.
2881 int biasPhysReg(const SUnit *SU, bool isTop) {
2882 const MachineInstr *MI = SU->getInstr();
2885 unsigned ScheduledOper = isTop ? 1 : 0;
2886 unsigned UnscheduledOper = isTop ? 0 : 1;
2887 // If we have already scheduled the physreg produce/consumer, immediately
2888 // schedule the copy.
2889 if (TargetRegisterInfo::isPhysicalRegister(
2890 MI->getOperand(ScheduledOper).getReg()))
2892 // If the physreg is at the boundary, defer it. Otherwise schedule it
2893 // immediately to free the dependent. We can hoist the copy later.
2894 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2895 if (TargetRegisterInfo::isPhysicalRegister(
2896 MI->getOperand(UnscheduledOper).getReg()))
2897 return AtBoundary ? -1 : 1;
2900 if (MI->isMoveImmediate()) {
2901 // If we have a move immediate and all successors have been assigned, bias
2902 // towards scheduling this later. Make sure all register defs are to
2903 // physical registers.
2905 for (const MachineOperand &Op : MI->defs()) {
2906 if (Op.isReg() && !TargetRegisterInfo::isPhysicalRegister(Op.getReg())) {
2913 return isTop ? -1 : 1;
2918 } // end namespace llvm
2920 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
2922 const RegPressureTracker &RPTracker,
2923 RegPressureTracker &TempTracker) {
2926 if (DAG->isTrackingPressure()) {
2928 TempTracker.getMaxDownwardPressureDelta(
2929 Cand.SU->getInstr(),
2931 DAG->getRegionCriticalPSets(),
2932 DAG->getRegPressure().MaxSetPressure);
2934 if (VerifyScheduling) {
2935 TempTracker.getMaxUpwardPressureDelta(
2936 Cand.SU->getInstr(),
2937 &DAG->getPressureDiff(Cand.SU),
2939 DAG->getRegionCriticalPSets(),
2940 DAG->getRegPressure().MaxSetPressure);
2942 RPTracker.getUpwardPressureDelta(
2943 Cand.SU->getInstr(),
2944 DAG->getPressureDiff(Cand.SU),
2946 DAG->getRegionCriticalPSets(),
2947 DAG->getRegPressure().MaxSetPressure);
2951 LLVM_DEBUG(if (Cand.RPDelta.Excess.isValid()) dbgs()
2952 << " Try SU(" << Cand.SU->NodeNum << ") "
2953 << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet()) << ":"
2954 << Cand.RPDelta.Excess.getUnitInc() << "\n");
2957 /// Apply a set of heuristics to a new candidate. Heuristics are currently
2958 /// hierarchical. This may be more efficient than a graduated cost model because
2959 /// we don't need to evaluate all aspects of the model for each node in the
2960 /// queue. But it's really done to make the heuristics easier to debug and
2961 /// statistically analyze.
2963 /// \param Cand provides the policy and current best candidate.
2964 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
2965 /// \param Zone describes the scheduled zone that we are extending, or nullptr
2966 // if Cand is from a different zone than TryCand.
2967 void GenericScheduler::tryCandidate(SchedCandidate &Cand,
2968 SchedCandidate &TryCand,
2969 SchedBoundary *Zone) const {
2970 // Initialize the candidate if needed.
2971 if (!Cand.isValid()) {
2972 TryCand.Reason = NodeOrder;
2976 // Bias PhysReg Defs and copies to their uses and defined respectively.
2977 if (tryGreater(biasPhysReg(TryCand.SU, TryCand.AtTop),
2978 biasPhysReg(Cand.SU, Cand.AtTop), TryCand, Cand, PhysReg))
2981 // Avoid exceeding the target's limit.
2982 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2983 Cand.RPDelta.Excess,
2984 TryCand, Cand, RegExcess, TRI,
2988 // Avoid increasing the max critical pressure in the scheduled region.
2989 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2990 Cand.RPDelta.CriticalMax,
2991 TryCand, Cand, RegCritical, TRI,
2995 // We only compare a subset of features when comparing nodes between
2996 // Top and Bottom boundary. Some properties are simply incomparable, in many
2997 // other instances we should only override the other boundary if something
2998 // is a clear good pick on one boundary. Skip heuristics that are more
2999 // "tie-breaking" in nature.
3000 bool SameBoundary = Zone != nullptr;
3002 // For loops that are acyclic path limited, aggressively schedule for
3003 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal
3004 // heuristics to take precedence.
3005 if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() &&
3006 tryLatency(TryCand, Cand, *Zone))
3009 // Prioritize instructions that read unbuffered resources by stall cycles.
3010 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
3011 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3015 // Keep clustered nodes together to encourage downstream peephole
3016 // optimizations which may reduce resource requirements.
3018 // This is a best effort to set things up for a post-RA pass. Optimizations
3019 // like generating loads of multiple registers should ideally be done within
3020 // the scheduler pass by combining the loads during DAG postprocessing.
3021 const SUnit *CandNextClusterSU =
3022 Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
3023 const SUnit *TryCandNextClusterSU =
3024 TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
3025 if (tryGreater(TryCand.SU == TryCandNextClusterSU,
3026 Cand.SU == CandNextClusterSU,
3027 TryCand, Cand, Cluster))
3031 // Weak edges are for clustering and other constraints.
3032 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
3033 getWeakLeft(Cand.SU, Cand.AtTop),
3034 TryCand, Cand, Weak))
3038 // Avoid increasing the max pressure of the entire region.
3039 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
3040 Cand.RPDelta.CurrentMax,
3041 TryCand, Cand, RegMax, TRI,
3046 // Avoid critical resource consumption and balance the schedule.
3047 TryCand.initResourceDelta(DAG, SchedModel);
3048 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3049 TryCand, Cand, ResourceReduce))
3051 if (tryGreater(TryCand.ResDelta.DemandedResources,
3052 Cand.ResDelta.DemandedResources,
3053 TryCand, Cand, ResourceDemand))
3056 // Avoid serializing long latency dependence chains.
3057 // For acyclic path limited loops, latency was already checked above.
3058 if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency &&
3059 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone))
3062 // Fall through to original instruction order.
3063 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
3064 || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
3065 TryCand.Reason = NodeOrder;
3070 /// Pick the best candidate from the queue.
3072 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
3073 /// DAG building. To adjust for the current scheduling location we need to
3074 /// maintain the number of vreg uses remaining to be top-scheduled.
3075 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
3076 const CandPolicy &ZonePolicy,
3077 const RegPressureTracker &RPTracker,
3078 SchedCandidate &Cand) {
3079 // getMaxPressureDelta temporarily modifies the tracker.
3080 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
3082 ReadyQueue &Q = Zone.Available;
3083 for (SUnit *SU : Q) {
3085 SchedCandidate TryCand(ZonePolicy);
3086 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, TempTracker);
3087 // Pass SchedBoundary only when comparing nodes from the same boundary.
3088 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
3089 tryCandidate(Cand, TryCand, ZoneArg);
3090 if (TryCand.Reason != NoCand) {
3091 // Initialize resource delta if needed in case future heuristics query it.
3092 if (TryCand.ResDelta == SchedResourceDelta())
3093 TryCand.initResourceDelta(DAG, SchedModel);
3094 Cand.setBest(TryCand);
3095 LLVM_DEBUG(traceCandidate(Cand));
3100 /// Pick the best candidate node from either the top or bottom queue.
3101 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
3102 // Schedule as far as possible in the direction of no choice. This is most
3103 // efficient, but also provides the best heuristics for CriticalPSets.
3104 if (SUnit *SU = Bot.pickOnlyChoice()) {
3106 tracePick(Only1, false);
3109 if (SUnit *SU = Top.pickOnlyChoice()) {
3111 tracePick(Only1, true);
3114 // Set the bottom-up policy based on the state of the current bottom zone and
3115 // the instructions outside the zone, including the top zone.
3116 CandPolicy BotPolicy;
3117 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
3118 // Set the top-down policy based on the state of the current top zone and
3119 // the instructions outside the zone, including the bottom zone.
3120 CandPolicy TopPolicy;
3121 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
3123 // See if BotCand is still valid (because we previously scheduled from Top).
3124 LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
3125 if (!BotCand.isValid() || BotCand.SU->isScheduled ||
3126 BotCand.Policy != BotPolicy) {
3127 BotCand.reset(CandPolicy());
3128 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
3129 assert(BotCand.Reason != NoCand && "failed to find the first candidate");
3131 LLVM_DEBUG(traceCandidate(BotCand));
3133 if (VerifyScheduling) {
3134 SchedCandidate TCand;
3135 TCand.reset(CandPolicy());
3136 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
3137 assert(TCand.SU == BotCand.SU &&
3138 "Last pick result should correspond to re-picking right now");
3143 // Check if the top Q has a better candidate.
3144 LLVM_DEBUG(dbgs() << "Picking from Top:\n");
3145 if (!TopCand.isValid() || TopCand.SU->isScheduled ||
3146 TopCand.Policy != TopPolicy) {
3147 TopCand.reset(CandPolicy());
3148 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
3149 assert(TopCand.Reason != NoCand && "failed to find the first candidate");
3151 LLVM_DEBUG(traceCandidate(TopCand));
3153 if (VerifyScheduling) {
3154 SchedCandidate TCand;
3155 TCand.reset(CandPolicy());
3156 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
3157 assert(TCand.SU == TopCand.SU &&
3158 "Last pick result should correspond to re-picking right now");
3163 // Pick best from BotCand and TopCand.
3164 assert(BotCand.isValid());
3165 assert(TopCand.isValid());
3166 SchedCandidate Cand = BotCand;
3167 TopCand.Reason = NoCand;
3168 tryCandidate(Cand, TopCand, nullptr);
3169 if (TopCand.Reason != NoCand) {
3170 Cand.setBest(TopCand);
3171 LLVM_DEBUG(traceCandidate(Cand));
3174 IsTopNode = Cand.AtTop;
3179 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
3180 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
3181 if (DAG->top() == DAG->bottom()) {
3182 assert(Top.Available.empty() && Top.Pending.empty() &&
3183 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
3188 if (RegionPolicy.OnlyTopDown) {
3189 SU = Top.pickOnlyChoice();
3191 CandPolicy NoPolicy;
3192 TopCand.reset(NoPolicy);
3193 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
3194 assert(TopCand.Reason != NoCand && "failed to find a candidate");
3199 } else if (RegionPolicy.OnlyBottomUp) {
3200 SU = Bot.pickOnlyChoice();
3202 CandPolicy NoPolicy;
3203 BotCand.reset(NoPolicy);
3204 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
3205 assert(BotCand.Reason != NoCand && "failed to find a candidate");
3211 SU = pickNodeBidirectional(IsTopNode);
3213 } while (SU->isScheduled);
3215 if (SU->isTopReady())
3216 Top.removeReady(SU);
3217 if (SU->isBottomReady())
3218 Bot.removeReady(SU);
3220 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
3221 << *SU->getInstr());
3225 void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) {
3226 MachineBasicBlock::iterator InsertPos = SU->getInstr();
3229 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3231 // Find already scheduled copies with a single physreg dependence and move
3232 // them just above the scheduled instruction.
3233 for (SDep &Dep : Deps) {
3234 if (Dep.getKind() != SDep::Data || !TRI->isPhysicalRegister(Dep.getReg()))
3236 SUnit *DepSU = Dep.getSUnit();
3237 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3239 MachineInstr *Copy = DepSU->getInstr();
3240 if (!Copy->isCopy() && !Copy->isMoveImmediate())
3242 LLVM_DEBUG(dbgs() << " Rescheduling physreg copy ";
3243 DAG->dumpNode(*Dep.getSUnit()));
3244 DAG->moveInstruction(Copy, InsertPos);
3248 /// Update the scheduler's state after scheduling a node. This is the same node
3249 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3250 /// update it's state based on the current cycle before MachineSchedStrategy
3253 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3254 /// them here. See comments in biasPhysReg.
3255 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3257 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3259 if (SU->hasPhysRegUses)
3260 reschedulePhysReg(SU, true);
3262 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3264 if (SU->hasPhysRegDefs)
3265 reschedulePhysReg(SU, false);
3269 /// Create the standard converging machine scheduler. This will be used as the
3270 /// default scheduler if the target does not set a default.
3271 ScheduleDAGMILive *llvm::createGenericSchedLive(MachineSchedContext *C) {
3272 ScheduleDAGMILive *DAG =
3273 new ScheduleDAGMILive(C, llvm::make_unique<GenericScheduler>(C));
3274 // Register DAG post-processors.
3276 // FIXME: extend the mutation API to allow earlier mutations to instantiate
3277 // data and pass it to later mutations. Have a single mutation that gathers
3278 // the interesting nodes in one pass.
3279 DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI));
3283 static ScheduleDAGInstrs *createConveringSched(MachineSchedContext *C) {
3284 return createGenericSchedLive(C);
3287 static MachineSchedRegistry
3288 GenericSchedRegistry("converge", "Standard converging scheduler.",
3289 createConveringSched);
3291 //===----------------------------------------------------------------------===//
3292 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3293 //===----------------------------------------------------------------------===//
3295 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3297 SchedModel = DAG->getSchedModel();
3300 Rem.init(DAG, SchedModel);
3301 Top.init(DAG, SchedModel, &Rem);
3304 // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3305 // or are disabled, then these HazardRecs will be disabled.
3306 const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3307 if (!Top.HazardRec) {
3309 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3314 void PostGenericScheduler::registerRoots() {
3315 Rem.CriticalPath = DAG->ExitSU.getDepth();
3317 // Some roots may not feed into ExitSU. Check all of them in case.
3318 for (const SUnit *SU : BotRoots) {
3319 if (SU->getDepth() > Rem.CriticalPath)
3320 Rem.CriticalPath = SU->getDepth();
3322 LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3323 if (DumpCriticalPathLength) {
3324 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3328 /// Apply a set of heuristics to a new candidate for PostRA scheduling.
3330 /// \param Cand provides the policy and current best candidate.
3331 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3332 void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3333 SchedCandidate &TryCand) {
3334 // Initialize the candidate if needed.
3335 if (!Cand.isValid()) {
3336 TryCand.Reason = NodeOrder;
3340 // Prioritize instructions that read unbuffered resources by stall cycles.
3341 if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3342 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3345 // Keep clustered nodes together.
3346 if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(),
3347 Cand.SU == DAG->getNextClusterSucc(),
3348 TryCand, Cand, Cluster))
3351 // Avoid critical resource consumption and balance the schedule.
3352 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3353 TryCand, Cand, ResourceReduce))
3355 if (tryGreater(TryCand.ResDelta.DemandedResources,
3356 Cand.ResDelta.DemandedResources,
3357 TryCand, Cand, ResourceDemand))
3360 // Avoid serializing long latency dependence chains.
3361 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3365 // Fall through to original instruction order.
3366 if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3367 TryCand.Reason = NodeOrder;
3370 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3371 ReadyQueue &Q = Top.Available;
3372 for (SUnit *SU : Q) {
3373 SchedCandidate TryCand(Cand.Policy);
3375 TryCand.AtTop = true;
3376 TryCand.initResourceDelta(DAG, SchedModel);
3377 tryCandidate(Cand, TryCand);
3378 if (TryCand.Reason != NoCand) {
3379 Cand.setBest(TryCand);
3380 LLVM_DEBUG(traceCandidate(Cand));
3385 /// Pick the next node to schedule.
3386 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3387 if (DAG->top() == DAG->bottom()) {
3388 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3393 SU = Top.pickOnlyChoice();
3395 tracePick(Only1, true);
3397 CandPolicy NoPolicy;
3398 SchedCandidate TopCand(NoPolicy);
3399 // Set the top-down policy based on the state of the current top zone and
3400 // the instructions outside the zone, including the bottom zone.
3401 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3402 pickNodeFromQueue(TopCand);
3403 assert(TopCand.Reason != NoCand && "failed to find a candidate");
3407 } while (SU->isScheduled);
3410 Top.removeReady(SU);
3412 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
3413 << *SU->getInstr());
3417 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3418 /// scheduled/remaining flags in the DAG nodes.
3419 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3420 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3424 ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) {
3425 return new ScheduleDAGMI(C, llvm::make_unique<PostGenericScheduler>(C),
3426 /*RemoveKillFlags=*/true);
3429 //===----------------------------------------------------------------------===//
3430 // ILP Scheduler. Currently for experimental analysis of heuristics.
3431 //===----------------------------------------------------------------------===//
3435 /// Order nodes by the ILP metric.
3437 const SchedDFSResult *DFSResult = nullptr;
3438 const BitVector *ScheduledTrees = nullptr;
3441 ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {}
3443 /// Apply a less-than relation on node priority.
3445 /// (Return true if A comes after B in the Q.)
3446 bool operator()(const SUnit *A, const SUnit *B) const {
3447 unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3448 unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3449 if (SchedTreeA != SchedTreeB) {
3450 // Unscheduled trees have lower priority.
3451 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3452 return ScheduledTrees->test(SchedTreeB);
3454 // Trees with shallower connections have have lower priority.
3455 if (DFSResult->getSubtreeLevel(SchedTreeA)
3456 != DFSResult->getSubtreeLevel(SchedTreeB)) {
3457 return DFSResult->getSubtreeLevel(SchedTreeA)
3458 < DFSResult->getSubtreeLevel(SchedTreeB);
3462 return DFSResult->getILP(A) < DFSResult->getILP(B);
3464 return DFSResult->getILP(A) > DFSResult->getILP(B);
3468 /// Schedule based on the ILP metric.
3469 class ILPScheduler : public MachineSchedStrategy {
3470 ScheduleDAGMILive *DAG = nullptr;
3473 std::vector<SUnit*> ReadyQ;
3476 ILPScheduler(bool MaximizeILP) : Cmp(MaximizeILP) {}
3478 void initialize(ScheduleDAGMI *dag) override {
3479 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3480 DAG = static_cast<ScheduleDAGMILive*>(dag);
3481 DAG->computeDFSResult();
3482 Cmp.DFSResult = DAG->getDFSResult();
3483 Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3487 void registerRoots() override {
3488 // Restore the heap in ReadyQ with the updated DFS results.
3489 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3492 /// Implement MachineSchedStrategy interface.
3493 /// -----------------------------------------
3495 /// Callback to select the highest priority node from the ready Q.
3496 SUnit *pickNode(bool &IsTopNode) override {
3497 if (ReadyQ.empty()) return nullptr;
3498 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3499 SUnit *SU = ReadyQ.back();
3502 LLVM_DEBUG(dbgs() << "Pick node "
3503 << "SU(" << SU->NodeNum << ") "
3504 << " ILP: " << DAG->getDFSResult()->getILP(SU)
3505 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU)
3507 << DAG->getDFSResult()->getSubtreeLevel(
3508 DAG->getDFSResult()->getSubtreeID(SU))
3510 << "Scheduling " << *SU->getInstr());
3514 /// Scheduler callback to notify that a new subtree is scheduled.
3515 void scheduleTree(unsigned SubtreeID) override {
3516 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3519 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3520 /// DFSResults, and resort the priority Q.
3521 void schedNode(SUnit *SU, bool IsTopNode) override {
3522 assert(!IsTopNode && "SchedDFSResult needs bottom-up");
3525 void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
3527 void releaseBottomNode(SUnit *SU) override {
3528 ReadyQ.push_back(SU);
3529 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3533 } // end anonymous namespace
3535 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
3536 return new ScheduleDAGMILive(C, llvm::make_unique<ILPScheduler>(true));
3538 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
3539 return new ScheduleDAGMILive(C, llvm::make_unique<ILPScheduler>(false));
3542 static MachineSchedRegistry ILPMaxRegistry(
3543 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3544 static MachineSchedRegistry ILPMinRegistry(
3545 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3547 //===----------------------------------------------------------------------===//
3548 // Machine Instruction Shuffler for Correctness Testing
3549 //===----------------------------------------------------------------------===//
3554 /// Apply a less-than relation on the node order, which corresponds to the
3555 /// instruction order prior to scheduling. IsReverse implements greater-than.
3556 template<bool IsReverse>
3558 bool operator()(SUnit *A, SUnit *B) const {
3560 return A->NodeNum > B->NodeNum;
3562 return A->NodeNum < B->NodeNum;
3566 /// Reorder instructions as much as possible.
3567 class InstructionShuffler : public MachineSchedStrategy {
3571 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3572 // gives nodes with a higher number higher priority causing the latest
3573 // instructions to be scheduled first.
3574 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false>>
3577 // When scheduling bottom-up, use greater-than as the queue priority.
3578 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true>>
3582 InstructionShuffler(bool alternate, bool topdown)
3583 : IsAlternating(alternate), IsTopDown(topdown) {}
3585 void initialize(ScheduleDAGMI*) override {
3590 /// Implement MachineSchedStrategy interface.
3591 /// -----------------------------------------
3593 SUnit *pickNode(bool &IsTopNode) override {
3597 if (TopQ.empty()) return nullptr;
3600 } while (SU->isScheduled);
3604 if (BottomQ.empty()) return nullptr;
3607 } while (SU->isScheduled);
3611 IsTopDown = !IsTopDown;
3615 void schedNode(SUnit *SU, bool IsTopNode) override {}
3617 void releaseTopNode(SUnit *SU) override {
3620 void releaseBottomNode(SUnit *SU) override {
3625 } // end anonymous namespace
3627 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
3628 bool Alternate = !ForceTopDown && !ForceBottomUp;
3629 bool TopDown = !ForceBottomUp;
3630 assert((TopDown || !ForceTopDown) &&
3631 "-misched-topdown incompatible with -misched-bottomup");
3632 return new ScheduleDAGMILive(
3633 C, llvm::make_unique<InstructionShuffler>(Alternate, TopDown));
3636 static MachineSchedRegistry ShufflerRegistry(
3637 "shuffle", "Shuffle machine instructions alternating directions",
3638 createInstructionShuffler);
3641 //===----------------------------------------------------------------------===//
3642 // GraphWriter support for ScheduleDAGMILive.
3643 //===----------------------------------------------------------------------===//
3648 template<> struct GraphTraits<
3649 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3652 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3653 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
3655 static std::string getGraphName(const ScheduleDAG *G) {
3656 return G->MF.getName();
3659 static bool renderGraphFromBottomUp() {
3663 static bool isNodeHidden(const SUnit *Node) {
3664 if (ViewMISchedCutoff == 0)
3666 return (Node->Preds.size() > ViewMISchedCutoff
3667 || Node->Succs.size() > ViewMISchedCutoff);
3670 /// If you want to override the dot attributes printed for a particular
3671 /// edge, override this method.
3672 static std::string getEdgeAttributes(const SUnit *Node,
3674 const ScheduleDAG *Graph) {
3675 if (EI.isArtificialDep())
3676 return "color=cyan,style=dashed";
3678 return "color=blue,style=dashed";
3682 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
3684 raw_string_ostream SS(Str);
3685 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3686 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3687 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3688 SS << "SU:" << SU->NodeNum;
3690 SS << " I:" << DFS->getNumInstrs(SU);
3694 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3695 return G->getGraphNodeLabel(SU);
3698 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
3699 std::string Str("shape=Mrecord");
3700 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3701 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
3702 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
3704 Str += ",style=filled,fillcolor=\"#";
3705 Str += DOT::getColorString(DFS->getSubtreeID(N));
3712 } // end namespace llvm
3715 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3716 /// rendered using 'dot'.
3717 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3719 ViewGraph(this, Name, false, Title);
3721 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3722 << "systems with Graphviz or gv!\n";
3726 /// Out-of-line implementation with no arguments is handy for gdb.
3727 void ScheduleDAGMI::viewGraph() {
3728 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());