1 //===------- llvm/CodeGen/ScheduleDAG.h - Common Base Class------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the ScheduleDAG class, which is used as the common
11 // base class for instruction schedulers. This encapsulates the scheduling DAG,
12 // which is shared between SelectionDAG and MachineInstr scheduling.
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_SCHEDULEDAG_H
17 #define LLVM_CODEGEN_SCHEDULEDAG_H
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/GraphTraits.h"
21 #include "llvm/ADT/PointerIntPair.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/Target/TargetLowering.h"
29 class MachineConstantPool;
30 class MachineFunction;
31 class MachineRegisterInfo;
33 struct MCSchedClassDesc;
34 class TargetRegisterInfo;
37 class TargetInstrInfo;
40 class TargetRegisterClass;
41 template<class Graph> class GraphWriter;
43 /// SDep - Scheduling dependency. This represents one direction of an
44 /// edge in the scheduling DAG.
47 /// Kind - These are the different kinds of scheduling dependencies.
49 Data, ///< Regular data dependence (aka true-dependence).
50 Anti, ///< A register anti-dependedence (aka WAR).
51 Output, ///< A register output-dependence (aka WAW).
52 Order ///< Any other ordering dependency.
55 // Strong dependencies must be respected by the scheduler. Artificial
56 // dependencies may be removed only if they are redundant with another
59 // Weak dependencies may be violated by the scheduling strategy, but only if
60 // the strategy can prove it is correct to do so.
62 // Strong OrderKinds must occur before "Weak".
63 // Weak OrderKinds must occur after "Weak".
65 Barrier, ///< An unknown scheduling barrier.
66 MayAliasMem, ///< Nonvolatile load/Store instructions that may alias.
67 MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
68 Artificial, ///< Arbitrary strong DAG edge (no real dependence).
69 Weak, ///< Arbitrary weak DAG edge.
70 Cluster ///< Weak DAG edge linking a chain of clustered instrs.
74 /// Dep - A pointer to the depending/depended-on SUnit, and an enum
75 /// indicating the kind of the dependency.
76 PointerIntPair<SUnit *, 2, Kind> Dep;
78 /// Contents - A union discriminated by the dependence kind.
80 /// Reg - For Data, Anti, and Output dependencies, the associated
81 /// register. For Data dependencies that don't currently have a register
82 /// assigned, this is set to zero.
85 /// Order - Additional information about Order dependencies.
86 unsigned OrdKind; // enum OrderKind
89 /// Latency - The time associated with this edge. Often this is just
90 /// the value of the Latency field of the predecessor, however advanced
91 /// models may provide additional information about specific edges.
93 /// Record MinLatency seperately from "expected" Latency.
95 /// FIXME: this field is not packed on LP64. Convert to 16-bit DAG edge
96 /// latency after introducing saturating truncation.
100 /// SDep - Construct a null SDep. This is only for use by container
101 /// classes which require default constructors. SUnits may not
102 /// have null SDep edges.
103 SDep() : Dep(0, Data) {}
105 /// SDep - Construct an SDep with the specified values.
106 SDep(SUnit *S, Kind kind, unsigned Reg)
107 : Dep(S, kind), Contents() {
110 llvm_unreachable("Reg given for non-register dependence!");
114 "SDep::Anti and SDep::Output must use a non-zero Reg!");
123 MinLatency = Latency;
125 SDep(SUnit *S, OrderKind kind)
126 : Dep(S, Order), Contents(), Latency(0), MinLatency(0) {
127 Contents.OrdKind = kind;
130 /// Return true if the specified SDep is equivalent except for latency.
131 bool overlaps(const SDep &Other) const {
132 if (Dep != Other.Dep) return false;
133 switch (Dep.getInt()) {
137 return Contents.Reg == Other.Contents.Reg;
139 return Contents.OrdKind == Other.Contents.OrdKind;
141 llvm_unreachable("Invalid dependency kind!");
144 bool operator==(const SDep &Other) const {
145 return overlaps(Other)
146 && Latency == Other.Latency && MinLatency == Other.MinLatency;
149 bool operator!=(const SDep &Other) const {
150 return !operator==(Other);
153 /// getLatency - Return the latency value for this edge, which roughly
154 /// means the minimum number of cycles that must elapse between the
155 /// predecessor and the successor, given that they have this edge
157 unsigned getLatency() const {
161 /// setLatency - Set the latency for this edge.
162 void setLatency(unsigned Lat) {
166 /// getMinLatency - Return the minimum latency for this edge. Minimum
167 /// latency is used for scheduling groups, while normal (expected) latency
168 /// is for instruction cost and critical path.
169 unsigned getMinLatency() const {
173 /// setMinLatency - Set the minimum latency for this edge.
174 void setMinLatency(unsigned Lat) {
178 //// getSUnit - Return the SUnit to which this edge points.
179 SUnit *getSUnit() const {
180 return Dep.getPointer();
183 //// setSUnit - Assign the SUnit to which this edge points.
184 void setSUnit(SUnit *SU) {
188 /// getKind - Return an enum value representing the kind of the dependence.
189 Kind getKind() const {
193 /// isCtrl - Shorthand for getKind() != SDep::Data.
194 bool isCtrl() const {
195 return getKind() != Data;
198 /// isNormalMemory - Test if this is an Order dependence between two
199 /// memory accesses where both sides of the dependence access memory
200 /// in non-volatile and fully modeled ways.
201 bool isNormalMemory() const {
202 return getKind() == Order && (Contents.OrdKind == MayAliasMem
203 || Contents.OrdKind == MustAliasMem);
206 /// isMustAlias - Test if this is an Order dependence that is marked
207 /// as "must alias", meaning that the SUnits at either end of the edge
208 /// have a memory dependence on a known memory location.
209 bool isMustAlias() const {
210 return getKind() == Order && Contents.OrdKind == MustAliasMem;
213 /// isWeak - Test if this a weak dependence. Weak dependencies are
214 /// considered DAG edges for height computation and other heuristics, but do
215 /// not force ordering. Breaking a weak edge may require the scheduler to
216 /// compensate, for example by inserting a copy.
217 bool isWeak() const {
218 return getKind() == Order && Contents.OrdKind >= Weak;
221 /// isArtificial - Test if this is an Order dependence that is marked
222 /// as "artificial", meaning it isn't necessary for correctness.
223 bool isArtificial() const {
224 return getKind() == Order && Contents.OrdKind == Artificial;
227 /// isCluster - Test if this is an Order dependence that is marked
228 /// as "cluster", meaning it is artificial and wants to be adjacent.
229 bool isCluster() const {
230 return getKind() == Order && Contents.OrdKind == Cluster;
233 /// isAssignedRegDep - Test if this is a Data dependence that is
234 /// associated with a register.
235 bool isAssignedRegDep() const {
236 return getKind() == Data && Contents.Reg != 0;
239 /// getReg - Return the register associated with this edge. This is
240 /// only valid on Data, Anti, and Output edges. On Data edges, this
241 /// value may be zero, meaning there is no associated register.
242 unsigned getReg() const {
243 assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
244 "getReg called on non-register dependence edge!");
248 /// setReg - Assign the associated register for this edge. This is
249 /// only valid on Data, Anti, and Output edges. On Anti and Output
250 /// edges, this value must not be zero. On Data edges, the value may
251 /// be zero, which would mean that no specific register is associated
253 void setReg(unsigned Reg) {
254 assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
255 "setReg called on non-register dependence edge!");
256 assert((getKind() != Anti || Reg != 0) &&
257 "SDep::Anti edge cannot use the zero register!");
258 assert((getKind() != Output || Reg != 0) &&
259 "SDep::Output edge cannot use the zero register!");
265 struct isPodLike<SDep> { static const bool value = true; };
267 /// SUnit - Scheduling unit. This is a node in the scheduling DAG.
270 enum { BoundaryID = ~0u };
272 SDNode *Node; // Representative node.
273 MachineInstr *Instr; // Alternatively, a MachineInstr.
275 SUnit *OrigNode; // If not this, the node from which
276 // this node was cloned.
277 // (SD scheduling only)
279 const MCSchedClassDesc *SchedClass; // NULL or resolved SchedClass.
281 // Preds/Succs - The SUnits before/after us in the graph.
282 SmallVector<SDep, 4> Preds; // All sunit predecessors.
283 SmallVector<SDep, 4> Succs; // All sunit successors.
285 typedef SmallVector<SDep, 4>::iterator pred_iterator;
286 typedef SmallVector<SDep, 4>::iterator succ_iterator;
287 typedef SmallVector<SDep, 4>::const_iterator const_pred_iterator;
288 typedef SmallVector<SDep, 4>::const_iterator const_succ_iterator;
290 unsigned NodeNum; // Entry # of node in the node vector.
291 unsigned NodeQueueId; // Queue id of node.
292 unsigned NumPreds; // # of SDep::Data preds.
293 unsigned NumSuccs; // # of SDep::Data sucss.
294 unsigned NumPredsLeft; // # of preds not scheduled.
295 unsigned NumSuccsLeft; // # of succs not scheduled.
296 unsigned WeakPredsLeft; // # of weak preds not scheduled.
297 unsigned WeakSuccsLeft; // # of weak succs not scheduled.
298 unsigned short NumRegDefsLeft; // # of reg defs with no scheduled use.
299 unsigned short Latency; // Node latency.
300 bool isVRegCycle : 1; // May use and def the same vreg.
301 bool isCall : 1; // Is a function call.
302 bool isCallOp : 1; // Is a function call operand.
303 bool isTwoAddress : 1; // Is a two-address instruction.
304 bool isCommutable : 1; // Is a commutable instruction.
305 bool hasPhysRegUses : 1; // Has physreg uses.
306 bool hasPhysRegDefs : 1; // Has physreg defs that are being used.
307 bool hasPhysRegClobbers : 1; // Has any physreg defs, used or not.
308 bool isPending : 1; // True once pending.
309 bool isAvailable : 1; // True once available.
310 bool isScheduled : 1; // True once scheduled.
311 bool isScheduleHigh : 1; // True if preferable to schedule high.
312 bool isScheduleLow : 1; // True if preferable to schedule low.
313 bool isCloned : 1; // True if this node has been cloned.
314 Sched::Preference SchedulingPref; // Scheduling preference.
317 bool isDepthCurrent : 1; // True if Depth is current.
318 bool isHeightCurrent : 1; // True if Height is current.
319 unsigned Depth; // Node depth.
320 unsigned Height; // Node height.
322 unsigned TopReadyCycle; // Cycle relative to start when node is ready.
323 unsigned BotReadyCycle; // Cycle relative to end when node is ready.
325 const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null.
326 const TargetRegisterClass *CopySrcRC;
328 /// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
329 /// an SDNode and any nodes flagged to it.
330 SUnit(SDNode *node, unsigned nodenum)
331 : Node(node), Instr(0), OrigNode(0), SchedClass(0), NodeNum(nodenum),
332 NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
333 NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
334 Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
335 isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
336 hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
337 isAvailable(false), isScheduled(false), isScheduleHigh(false),
338 isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None),
339 isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
340 TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
342 /// SUnit - Construct an SUnit for post-regalloc scheduling to represent
344 SUnit(MachineInstr *instr, unsigned nodenum)
345 : Node(0), Instr(instr), OrigNode(0), SchedClass(0), NodeNum(nodenum),
346 NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
347 NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
348 Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
349 isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
350 hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
351 isAvailable(false), isScheduled(false), isScheduleHigh(false),
352 isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None),
353 isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
354 TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
356 /// SUnit - Construct a placeholder SUnit.
358 : Node(0), Instr(0), OrigNode(0), SchedClass(0), NodeNum(BoundaryID),
359 NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
360 NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
361 Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
362 isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
363 hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
364 isAvailable(false), isScheduled(false), isScheduleHigh(false),
365 isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None),
366 isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
367 TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
369 /// \brief Boundary nodes are placeholders for the boundary of the
370 /// scheduling region.
372 /// BoundaryNodes can have DAG edges, including Data edges, but they do not
373 /// correspond to schedulable entities (e.g. instructions) and do not have a
374 /// valid ID. Consequently, always check for boundary nodes before accessing
375 /// an assoicative data structure keyed on node ID.
376 bool isBoundaryNode() const { return NodeNum == BoundaryID; };
378 /// setNode - Assign the representative SDNode for this SUnit.
379 /// This may be used during pre-regalloc scheduling.
380 void setNode(SDNode *N) {
381 assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
385 /// getNode - Return the representative SDNode for this SUnit.
386 /// This may be used during pre-regalloc scheduling.
387 SDNode *getNode() const {
388 assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
392 /// isInstr - Return true if this SUnit refers to a machine instruction as
393 /// opposed to an SDNode.
394 bool isInstr() const { return Instr; }
396 /// setInstr - Assign the instruction for the SUnit.
397 /// This may be used during post-regalloc scheduling.
398 void setInstr(MachineInstr *MI) {
399 assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
403 /// getInstr - Return the representative MachineInstr for this SUnit.
404 /// This may be used during post-regalloc scheduling.
405 MachineInstr *getInstr() const {
406 assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
410 /// addPred - This adds the specified edge as a pred of the current node if
411 /// not already. It also adds the current node as a successor of the
413 bool addPred(const SDep &D, bool Required = true);
415 /// removePred - This removes the specified edge as a pred of the current
416 /// node if it exists. It also removes the current node as a successor of
417 /// the specified node.
418 void removePred(const SDep &D);
420 /// getDepth - Return the depth of this node, which is the length of the
421 /// maximum path up to any node which has no predecessors.
422 unsigned getDepth() const {
424 const_cast<SUnit *>(this)->ComputeDepth();
428 /// getHeight - Return the height of this node, which is the length of the
429 /// maximum path down to any node which has no successors.
430 unsigned getHeight() const {
431 if (!isHeightCurrent)
432 const_cast<SUnit *>(this)->ComputeHeight();
436 /// setDepthToAtLeast - If NewDepth is greater than this node's
437 /// depth value, set it to be the new depth value. This also
438 /// recursively marks successor nodes dirty.
439 void setDepthToAtLeast(unsigned NewDepth);
441 /// setDepthToAtLeast - If NewDepth is greater than this node's
442 /// depth value, set it to be the new height value. This also
443 /// recursively marks predecessor nodes dirty.
444 void setHeightToAtLeast(unsigned NewHeight);
446 /// setDepthDirty - Set a flag in this node to indicate that its
447 /// stored Depth value will require recomputation the next time
448 /// getDepth() is called.
449 void setDepthDirty();
451 /// setHeightDirty - Set a flag in this node to indicate that its
452 /// stored Height value will require recomputation the next time
453 /// getHeight() is called.
454 void setHeightDirty();
456 /// isPred - Test if node N is a predecessor of this node.
457 bool isPred(SUnit *N) {
458 for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i)
459 if (Preds[i].getSUnit() == N)
464 /// isSucc - Test if node N is a successor of this node.
465 bool isSucc(SUnit *N) {
466 for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i)
467 if (Succs[i].getSUnit() == N)
472 bool isTopReady() const {
473 return NumPredsLeft == 0;
475 bool isBottomReady() const {
476 return NumSuccsLeft == 0;
479 /// \brief Order this node's predecessor edges such that the critical path
480 /// edge occurs first.
481 void biasCriticalPath();
483 void dump(const ScheduleDAG *G) const;
484 void dumpAll(const ScheduleDAG *G) const;
485 void print(raw_ostream &O, const ScheduleDAG *G) const;
489 void ComputeHeight();
492 //===--------------------------------------------------------------------===//
493 /// SchedulingPriorityQueue - This interface is used to plug different
494 /// priorities computation algorithms into the list scheduler. It implements
495 /// the interface of a standard priority queue, where nodes are inserted in
496 /// arbitrary order and returned in priority order. The computation of the
497 /// priority and the representation of the queue are totally up to the
498 /// implementation to decide.
500 class SchedulingPriorityQueue {
501 virtual void anchor();
505 SchedulingPriorityQueue(bool rf = false):
506 CurCycle(0), HasReadyFilter(rf) {}
507 virtual ~SchedulingPriorityQueue() {}
509 virtual bool isBottomUp() const = 0;
511 virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
512 virtual void addNode(const SUnit *SU) = 0;
513 virtual void updateNode(const SUnit *SU) = 0;
514 virtual void releaseState() = 0;
516 virtual bool empty() const = 0;
518 bool hasReadyFilter() const { return HasReadyFilter; }
520 virtual bool tracksRegPressure() const { return false; }
522 virtual bool isReady(SUnit *) const {
523 assert(!HasReadyFilter && "The ready filter must override isReady()");
526 virtual void push(SUnit *U) = 0;
528 void push_all(const std::vector<SUnit *> &Nodes) {
529 for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
530 E = Nodes.end(); I != E; ++I)
534 virtual SUnit *pop() = 0;
536 virtual void remove(SUnit *SU) = 0;
538 virtual void dump(ScheduleDAG *) const {}
540 /// scheduledNode - As each node is scheduled, this method is invoked. This
541 /// allows the priority function to adjust the priority of related
542 /// unscheduled nodes, for example.
544 virtual void scheduledNode(SUnit *) {}
546 virtual void unscheduledNode(SUnit *) {}
548 void setCurCycle(unsigned Cycle) {
552 unsigned getCurCycle() const {
559 const TargetMachine &TM; // Target processor
560 const TargetInstrInfo *TII; // Target instruction information
561 const TargetRegisterInfo *TRI; // Target processor register info
562 MachineFunction &MF; // Machine function
563 MachineRegisterInfo &MRI; // Virtual/real register map
564 std::vector<SUnit> SUnits; // The scheduling units.
565 SUnit EntrySU; // Special node for the region entry.
566 SUnit ExitSU; // Special node for the region exit.
569 static const bool StressSched = false;
574 explicit ScheduleDAG(MachineFunction &mf);
576 virtual ~ScheduleDAG();
578 /// clearDAG - clear the DAG state (between regions).
581 /// getInstrDesc - Return the MCInstrDesc of this SUnit.
582 /// Return NULL for SDNodes without a machine opcode.
583 const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
584 if (SU->isInstr()) return &SU->getInstr()->getDesc();
585 return getNodeDesc(SU->getNode());
588 /// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered
591 virtual void viewGraph(const Twine &Name, const Twine &Title);
592 virtual void viewGraph();
594 virtual void dumpNode(const SUnit *SU) const = 0;
596 /// getGraphNodeLabel - Return a label for an SUnit node in a visualization
597 /// of the ScheduleDAG.
598 virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
600 /// getDAGLabel - Return a label for the region of code covered by the DAG.
601 virtual std::string getDAGName() const = 0;
603 /// addCustomGraphFeatures - Add custom features for a visualization of
605 virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
608 /// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
609 /// their state is consistent. Return the number of scheduled SUnits.
610 unsigned VerifyScheduledDAG(bool isBottomUp);
614 // Return the MCInstrDesc of this SDNode or NULL.
615 const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
618 class SUnitIterator : public std::iterator<std::forward_iterator_tag,
623 SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
625 bool operator==(const SUnitIterator& x) const {
626 return Operand == x.Operand;
628 bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
630 const SUnitIterator &operator=(const SUnitIterator &I) {
631 assert(I.Node==Node && "Cannot assign iterators to two different nodes!");
636 pointer operator*() const {
637 return Node->Preds[Operand].getSUnit();
639 pointer operator->() const { return operator*(); }
641 SUnitIterator& operator++() { // Preincrement
645 SUnitIterator operator++(int) { // Postincrement
646 SUnitIterator tmp = *this; ++*this; return tmp;
649 static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
650 static SUnitIterator end (SUnit *N) {
651 return SUnitIterator(N, (unsigned)N->Preds.size());
654 unsigned getOperand() const { return Operand; }
655 const SUnit *getNode() const { return Node; }
656 /// isCtrlDep - Test if this is not an SDep::Data dependence.
657 bool isCtrlDep() const {
658 return getSDep().isCtrl();
660 bool isArtificialDep() const {
661 return getSDep().isArtificial();
663 const SDep &getSDep() const {
664 return Node->Preds[Operand];
668 template <> struct GraphTraits<SUnit*> {
669 typedef SUnit NodeType;
670 typedef SUnitIterator ChildIteratorType;
671 static inline NodeType *getEntryNode(SUnit *N) { return N; }
672 static inline ChildIteratorType child_begin(NodeType *N) {
673 return SUnitIterator::begin(N);
675 static inline ChildIteratorType child_end(NodeType *N) {
676 return SUnitIterator::end(N);
680 template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
681 typedef std::vector<SUnit>::iterator nodes_iterator;
682 static nodes_iterator nodes_begin(ScheduleDAG *G) {
683 return G->SUnits.begin();
685 static nodes_iterator nodes_end(ScheduleDAG *G) {
686 return G->SUnits.end();
690 /// ScheduleDAGTopologicalSort is a class that computes a topological
691 /// ordering for SUnits and provides methods for dynamically updating
692 /// the ordering as new edges are added.
694 /// This allows a very fast implementation of IsReachable, for example.
696 class ScheduleDAGTopologicalSort {
697 /// SUnits - A reference to the ScheduleDAG's SUnits.
698 std::vector<SUnit> &SUnits;
701 /// Index2Node - Maps topological index to the node number.
702 std::vector<int> Index2Node;
703 /// Node2Index - Maps the node number to its topological index.
704 std::vector<int> Node2Index;
705 /// Visited - a set of nodes visited during a DFS traversal.
708 /// DFS - make a DFS traversal and mark all nodes affected by the
709 /// edge insertion. These nodes will later get new topological indexes
710 /// by means of the Shift method.
711 void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
713 /// Shift - reassign topological indexes for the nodes in the DAG
714 /// to preserve the topological ordering.
715 void Shift(BitVector& Visited, int LowerBound, int UpperBound);
717 /// Allocate - assign the topological index to the node n.
718 void Allocate(int n, int index);
721 ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
723 /// InitDAGTopologicalSorting - create the initial topological
724 /// ordering from the DAG to be scheduled.
725 void InitDAGTopologicalSorting();
727 /// IsReachable - Checks if SU is reachable from TargetSU.
728 bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
730 /// WillCreateCycle - Return true if addPred(TargetSU, SU) creates a cycle.
731 bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
733 /// AddPred - Updates the topological ordering to accommodate an edge
734 /// to be added from SUnit X to SUnit Y.
735 void AddPred(SUnit *Y, SUnit *X);
737 /// RemovePred - Updates the topological ordering to accommodate an
738 /// an edge to be removed from the specified node N from the predecessors
739 /// of the current node M.
740 void RemovePred(SUnit *M, SUnit *N);
742 typedef std::vector<int>::iterator iterator;
743 typedef std::vector<int>::const_iterator const_iterator;
744 iterator begin() { return Index2Node.begin(); }
745 const_iterator begin() const { return Index2Node.begin(); }
746 iterator end() { return Index2Node.end(); }
747 const_iterator end() const { return Index2Node.end(); }
749 typedef std::vector<int>::reverse_iterator reverse_iterator;
750 typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
751 reverse_iterator rbegin() { return Index2Node.rbegin(); }
752 const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
753 reverse_iterator rend() { return Index2Node.rend(); }
754 const_reverse_iterator rend() const { return Index2Node.rend(); }