1 //===- ScheduleDAG.cpp - Implement the ScheduleDAG class ------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// \file Implements the ScheduleDAG class, which is a base class used by
11 /// scheduling implementation classes.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/ScheduleDAG.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/iterator_range.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
21 #include "llvm/CodeGen/SelectionDAGNodes.h"
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Support/Compiler.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Target/TargetRegisterInfo.h"
28 #include "llvm/Target/TargetSubtargetInfo.h"
38 #define DEBUG_TYPE "pre-RA-sched"
41 static cl::opt<bool> StressSchedOpt(
42 "stress-sched", cl::Hidden, cl::init(false),
43 cl::desc("Stress test instruction scheduling"));
46 void SchedulingPriorityQueue::anchor() {}
48 ScheduleDAG::ScheduleDAG(MachineFunction &mf)
49 : TM(mf.getTarget()), TII(mf.getSubtarget().getInstrInfo()),
50 TRI(mf.getSubtarget().getRegisterInfo()), MF(mf),
51 MRI(mf.getRegInfo()) {
53 StressSched = StressSchedOpt;
57 ScheduleDAG::~ScheduleDAG() = default;
59 void ScheduleDAG::clearDAG() {
65 const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
66 if (!Node || !Node->isMachineOpcode()) return nullptr;
67 return &TII->get(Node->getMachineOpcode());
70 bool SUnit::addPred(const SDep &D, bool Required) {
71 // If this node already has this dependence, don't add a redundant one.
72 for (SDep &PredDep : Preds) {
73 // Zero-latency weak edges may be added purely for heuristic ordering. Don't
74 // add them if another kind of edge already exists.
75 if (!Required && PredDep.getSUnit() == D.getSUnit())
77 if (PredDep.overlaps(D)) {
78 // Extend the latency if needed. Equivalent to
79 // removePred(PredDep) + addPred(D).
80 if (PredDep.getLatency() < D.getLatency()) {
81 SUnit *PredSU = PredDep.getSUnit();
82 // Find the corresponding successor in N.
83 SDep ForwardD = PredDep;
84 ForwardD.setSUnit(this);
85 for (SDep &SuccDep : PredSU->Succs) {
86 if (SuccDep == ForwardD) {
87 SuccDep.setLatency(D.getLatency());
91 PredDep.setLatency(D.getLatency());
96 // Now add a corresponding succ to N.
99 SUnit *N = D.getSUnit();
100 // Update the bookkeeping.
101 if (D.getKind() == SDep::Data) {
102 assert(NumPreds < std::numeric_limits<unsigned>::max() &&
103 "NumPreds will overflow!");
104 assert(N->NumSuccs < std::numeric_limits<unsigned>::max() &&
105 "NumSuccs will overflow!");
109 if (!N->isScheduled) {
114 assert(NumPredsLeft < std::numeric_limits<unsigned>::max() &&
115 "NumPredsLeft will overflow!");
124 assert(N->NumSuccsLeft < std::numeric_limits<unsigned>::max() &&
125 "NumSuccsLeft will overflow!");
130 N->Succs.push_back(P);
131 if (P.getLatency() != 0) {
132 this->setDepthDirty();
138 void SUnit::removePred(const SDep &D) {
139 // Find the matching predecessor.
140 SmallVectorImpl<SDep>::iterator I = llvm::find(Preds, D);
141 if (I == Preds.end())
143 // Find the corresponding successor in N.
146 SUnit *N = D.getSUnit();
147 SmallVectorImpl<SDep>::iterator Succ = llvm::find(N->Succs, P);
148 assert(Succ != N->Succs.end() && "Mismatching preds / succs lists!");
149 N->Succs.erase(Succ);
151 // Update the bookkeeping.
152 if (P.getKind() == SDep::Data) {
153 assert(NumPreds > 0 && "NumPreds will underflow!");
154 assert(N->NumSuccs > 0 && "NumSuccs will underflow!");
158 if (!N->isScheduled) {
162 assert(NumPredsLeft > 0 && "NumPredsLeft will underflow!");
170 assert(N->NumSuccsLeft > 0 && "NumSuccsLeft will underflow!");
174 if (P.getLatency() != 0) {
175 this->setDepthDirty();
180 void SUnit::setDepthDirty() {
181 if (!isDepthCurrent) return;
182 SmallVector<SUnit*, 8> WorkList;
183 WorkList.push_back(this);
185 SUnit *SU = WorkList.pop_back_val();
186 SU->isDepthCurrent = false;
187 for (SDep &SuccDep : SU->Succs) {
188 SUnit *SuccSU = SuccDep.getSUnit();
189 if (SuccSU->isDepthCurrent)
190 WorkList.push_back(SuccSU);
192 } while (!WorkList.empty());
195 void SUnit::setHeightDirty() {
196 if (!isHeightCurrent) return;
197 SmallVector<SUnit*, 8> WorkList;
198 WorkList.push_back(this);
200 SUnit *SU = WorkList.pop_back_val();
201 SU->isHeightCurrent = false;
202 for (SDep &PredDep : SU->Preds) {
203 SUnit *PredSU = PredDep.getSUnit();
204 if (PredSU->isHeightCurrent)
205 WorkList.push_back(PredSU);
207 } while (!WorkList.empty());
210 void SUnit::setDepthToAtLeast(unsigned NewDepth) {
211 if (NewDepth <= getDepth())
215 isDepthCurrent = true;
218 void SUnit::setHeightToAtLeast(unsigned NewHeight) {
219 if (NewHeight <= getHeight())
223 isHeightCurrent = true;
226 /// Calculates the maximal path from the node to the exit.
227 void SUnit::ComputeDepth() {
228 SmallVector<SUnit*, 8> WorkList;
229 WorkList.push_back(this);
231 SUnit *Cur = WorkList.back();
234 unsigned MaxPredDepth = 0;
235 for (const SDep &PredDep : Cur->Preds) {
236 SUnit *PredSU = PredDep.getSUnit();
237 if (PredSU->isDepthCurrent)
238 MaxPredDepth = std::max(MaxPredDepth,
239 PredSU->Depth + PredDep.getLatency());
242 WorkList.push_back(PredSU);
248 if (MaxPredDepth != Cur->Depth) {
249 Cur->setDepthDirty();
250 Cur->Depth = MaxPredDepth;
252 Cur->isDepthCurrent = true;
254 } while (!WorkList.empty());
257 /// Calculates the maximal path from the node to the entry.
258 void SUnit::ComputeHeight() {
259 SmallVector<SUnit*, 8> WorkList;
260 WorkList.push_back(this);
262 SUnit *Cur = WorkList.back();
265 unsigned MaxSuccHeight = 0;
266 for (const SDep &SuccDep : Cur->Succs) {
267 SUnit *SuccSU = SuccDep.getSUnit();
268 if (SuccSU->isHeightCurrent)
269 MaxSuccHeight = std::max(MaxSuccHeight,
270 SuccSU->Height + SuccDep.getLatency());
273 WorkList.push_back(SuccSU);
279 if (MaxSuccHeight != Cur->Height) {
280 Cur->setHeightDirty();
281 Cur->Height = MaxSuccHeight;
283 Cur->isHeightCurrent = true;
285 } while (!WorkList.empty());
288 void SUnit::biasCriticalPath() {
292 SUnit::pred_iterator BestI = Preds.begin();
293 unsigned MaxDepth = BestI->getSUnit()->getDepth();
294 for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
296 if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
299 if (BestI != Preds.begin())
300 std::swap(*Preds.begin(), *BestI);
303 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
305 void SUnit::print(raw_ostream &OS, const ScheduleDAG *DAG) const {
306 if (this == &DAG->ExitSU)
308 else if (this == &DAG->EntrySU)
311 OS << "SU(" << NodeNum << ")";
314 LLVM_DUMP_METHOD void SUnit::dump(const ScheduleDAG *G) const {
320 LLVM_DUMP_METHOD void SUnit::dumpAll(const ScheduleDAG *G) const {
323 dbgs() << " # preds left : " << NumPredsLeft << "\n";
324 dbgs() << " # succs left : " << NumSuccsLeft << "\n";
326 dbgs() << " # weak preds left : " << WeakPredsLeft << "\n";
328 dbgs() << " # weak succs left : " << WeakSuccsLeft << "\n";
329 dbgs() << " # rdefs left : " << NumRegDefsLeft << "\n";
330 dbgs() << " Latency : " << Latency << "\n";
331 dbgs() << " Depth : " << getDepth() << "\n";
332 dbgs() << " Height : " << getHeight() << "\n";
334 if (Preds.size() != 0) {
335 dbgs() << " Predecessors:\n";
336 for (const SDep &SuccDep : Preds) {
338 switch (SuccDep.getKind()) {
339 case SDep::Data: dbgs() << "data "; break;
340 case SDep::Anti: dbgs() << "anti "; break;
341 case SDep::Output: dbgs() << "out "; break;
342 case SDep::Order: dbgs() << "ord "; break;
344 SuccDep.getSUnit()->print(dbgs(), G);
345 if (SuccDep.isArtificial())
347 dbgs() << ": Latency=" << SuccDep.getLatency();
348 if (SuccDep.isAssignedRegDep())
349 dbgs() << " Reg=" << PrintReg(SuccDep.getReg(), G->TRI);
353 if (Succs.size() != 0) {
354 dbgs() << " Successors:\n";
355 for (const SDep &SuccDep : Succs) {
357 switch (SuccDep.getKind()) {
358 case SDep::Data: dbgs() << "data "; break;
359 case SDep::Anti: dbgs() << "anti "; break;
360 case SDep::Output: dbgs() << "out "; break;
361 case SDep::Order: dbgs() << "ord "; break;
363 SuccDep.getSUnit()->print(dbgs(), G);
364 if (SuccDep.isArtificial())
366 dbgs() << ": Latency=" << SuccDep.getLatency();
367 if (SuccDep.isAssignedRegDep())
368 dbgs() << " Reg=" << PrintReg(SuccDep.getReg(), G->TRI);
376 unsigned ScheduleDAG::VerifyScheduledDAG(bool isBottomUp) {
377 bool AnyNotSched = false;
378 unsigned DeadNodes = 0;
379 for (const SUnit &SUnit : SUnits) {
380 if (!SUnit.isScheduled) {
381 if (SUnit.NumPreds == 0 && SUnit.NumSuccs == 0) {
386 dbgs() << "*** Scheduling failed! ***\n";
388 dbgs() << "has not been scheduled!\n";
391 if (SUnit.isScheduled &&
392 (isBottomUp ? SUnit.getHeight() : SUnit.getDepth()) >
393 unsigned(std::numeric_limits<int>::max())) {
395 dbgs() << "*** Scheduling failed! ***\n";
397 dbgs() << "has an unexpected "
398 << (isBottomUp ? "Height" : "Depth") << " value!\n";
402 if (SUnit.NumSuccsLeft != 0) {
404 dbgs() << "*** Scheduling failed! ***\n";
406 dbgs() << "has successors left!\n";
410 if (SUnit.NumPredsLeft != 0) {
412 dbgs() << "*** Scheduling failed! ***\n";
414 dbgs() << "has predecessors left!\n";
419 assert(!AnyNotSched);
420 return SUnits.size() - DeadNodes;
424 void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
425 // The idea of the algorithm is taken from
426 // "Online algorithms for managing the topological order of
427 // a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
428 // This is the MNR algorithm, which was first introduced by
429 // A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
430 // "Maintaining a topological order under edge insertions".
432 // Short description of the algorithm:
434 // Topological ordering, ord, of a DAG maps each node to a topological
435 // index so that for all edges X->Y it is the case that ord(X) < ord(Y).
437 // This means that if there is a path from the node X to the node Z,
438 // then ord(X) < ord(Z).
440 // This property can be used to check for reachability of nodes:
441 // if Z is reachable from X, then an insertion of the edge Z->X would
444 // The algorithm first computes a topological ordering for the DAG by
445 // initializing the Index2Node and Node2Index arrays and then tries to keep
446 // the ordering up-to-date after edge insertions by reordering the DAG.
448 // On insertion of the edge X->Y, the algorithm first marks by calling DFS
449 // the nodes reachable from Y, and then shifts them using Shift to lie
450 // immediately after X in Index2Node.
451 unsigned DAGSize = SUnits.size();
452 std::vector<SUnit*> WorkList;
453 WorkList.reserve(DAGSize);
455 Index2Node.resize(DAGSize);
456 Node2Index.resize(DAGSize);
458 // Initialize the data structures.
460 WorkList.push_back(ExitSU);
461 for (SUnit &SU : SUnits) {
462 int NodeNum = SU.NodeNum;
463 unsigned Degree = SU.Succs.size();
464 // Temporarily use the Node2Index array as scratch space for degree counts.
465 Node2Index[NodeNum] = Degree;
467 // Is it a node without dependencies?
469 assert(SU.Succs.empty() && "SUnit should have no successors");
470 // Collect leaf nodes.
471 WorkList.push_back(&SU);
476 while (!WorkList.empty()) {
477 SUnit *SU = WorkList.back();
479 if (SU->NodeNum < DAGSize)
480 Allocate(SU->NodeNum, --Id);
481 for (const SDep &PredDep : SU->Preds) {
482 SUnit *SU = PredDep.getSUnit();
483 if (SU->NodeNum < DAGSize && !--Node2Index[SU->NodeNum])
484 // If all dependencies of the node are processed already,
485 // then the node can be computed now.
486 WorkList.push_back(SU);
490 Visited.resize(DAGSize);
493 // Check correctness of the ordering
494 for (SUnit &SU : SUnits) {
495 for (const SDep &PD : SU.Preds) {
496 assert(Node2Index[SU.NodeNum] > Node2Index[PD.getSUnit()->NodeNum] &&
497 "Wrong topological sorting");
503 void ScheduleDAGTopologicalSort::AddPred(SUnit *Y, SUnit *X) {
504 int UpperBound, LowerBound;
505 LowerBound = Node2Index[Y->NodeNum];
506 UpperBound = Node2Index[X->NodeNum];
507 bool HasLoop = false;
508 // Is Ord(X) < Ord(Y) ?
509 if (LowerBound < UpperBound) {
510 // Update the topological order.
512 DFS(Y, UpperBound, HasLoop);
513 assert(!HasLoop && "Inserted edge creates a loop!");
514 // Recompute topological indexes.
515 Shift(Visited, LowerBound, UpperBound);
519 void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) {
520 // InitDAGTopologicalSorting();
523 void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
525 std::vector<const SUnit*> WorkList;
526 WorkList.reserve(SUnits.size());
528 WorkList.push_back(SU);
530 SU = WorkList.back();
532 Visited.set(SU->NodeNum);
533 for (const SDep &SuccDep
534 : make_range(SU->Succs.rbegin(), SU->Succs.rend())) {
535 unsigned s = SuccDep.getSUnit()->NodeNum;
536 // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
537 if (s >= Node2Index.size())
539 if (Node2Index[s] == UpperBound) {
543 // Visit successors if not already and in affected region.
544 if (!Visited.test(s) && Node2Index[s] < UpperBound) {
545 WorkList.push_back(SuccDep.getSUnit());
548 } while (!WorkList.empty());
551 std::vector<int> ScheduleDAGTopologicalSort::GetSubGraph(const SUnit &StartSU,
552 const SUnit &TargetSU,
554 std::vector<const SUnit*> WorkList;
555 int LowerBound = Node2Index[StartSU.NodeNum];
556 int UpperBound = Node2Index[TargetSU.NodeNum];
558 BitVector VisitedBack;
559 std::vector<int> Nodes;
561 if (LowerBound > UpperBound) {
566 WorkList.reserve(SUnits.size());
569 // Starting from StartSU, visit all successors up
571 WorkList.push_back(&StartSU);
573 const SUnit *SU = WorkList.back();
575 for (int I = SU->Succs.size()-1; I >= 0; --I) {
576 const SUnit *Succ = SU->Succs[I].getSUnit();
577 unsigned s = Succ->NodeNum;
578 // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
579 if (Succ->isBoundaryNode())
581 if (Node2Index[s] == UpperBound) {
585 // Visit successors if not already and in affected region.
586 if (!Visited.test(s) && Node2Index[s] < UpperBound) {
588 WorkList.push_back(Succ);
591 } while (!WorkList.empty());
599 VisitedBack.resize(SUnits.size());
602 // Starting from TargetSU, visit all predecessors up
603 // to LowerBound. SUs that are visited by the two
604 // passes are added to Nodes.
605 WorkList.push_back(&TargetSU);
607 const SUnit *SU = WorkList.back();
609 for (int I = SU->Preds.size()-1; I >= 0; --I) {
610 const SUnit *Pred = SU->Preds[I].getSUnit();
611 unsigned s = Pred->NodeNum;
612 // Edges to non-SUnits are allowed but ignored (e.g. EntrySU).
613 if (Pred->isBoundaryNode())
615 if (Node2Index[s] == LowerBound) {
619 if (!VisitedBack.test(s) && Visited.test(s)) {
621 WorkList.push_back(Pred);
625 } while (!WorkList.empty());
627 assert(Found && "Error in SUnit Graph!");
632 void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
638 for (i = LowerBound; i <= UpperBound; ++i) {
639 // w is node at topological index i.
640 int w = Index2Node[i];
641 if (Visited.test(w)) {
647 Allocate(w, i - shift);
651 for (unsigned LI : L) {
652 Allocate(LI, i - shift);
657 bool ScheduleDAGTopologicalSort::WillCreateCycle(SUnit *TargetSU, SUnit *SU) {
658 // Is SU reachable from TargetSU via successor edges?
659 if (IsReachable(SU, TargetSU))
661 for (const SDep &PredDep : TargetSU->Preds)
662 if (PredDep.isAssignedRegDep() &&
663 IsReachable(SU, PredDep.getSUnit()))
668 bool ScheduleDAGTopologicalSort::IsReachable(const SUnit *SU,
669 const SUnit *TargetSU) {
670 // If insertion of the edge SU->TargetSU would create a cycle
671 // then there is a path from TargetSU to SU.
672 int UpperBound, LowerBound;
673 LowerBound = Node2Index[TargetSU->NodeNum];
674 UpperBound = Node2Index[SU->NodeNum];
675 bool HasLoop = false;
676 // Is Ord(TargetSU) < Ord(SU) ?
677 if (LowerBound < UpperBound) {
679 // There may be a path from TargetSU to SU. Check for it.
680 DFS(TargetSU, UpperBound, HasLoop);
685 void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
686 Node2Index[n] = index;
687 Index2Node[index] = n;
690 ScheduleDAGTopologicalSort::
691 ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits, SUnit *exitsu)
692 : SUnits(sunits), ExitSU(exitsu) {}
694 ScheduleHazardRecognizer::~ScheduleHazardRecognizer() = default;