1 //===------- llvm/CodeGen/ScheduleDAG.h - Common Base Class------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the ScheduleDAG class, which is used as the common
11 // base class for instruction schedulers. This encapsulates the scheduling DAG,
12 // which is shared between SelectionDAG and MachineInstr scheduling.
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_SCHEDULEDAG_H
17 #define LLVM_CODEGEN_SCHEDULEDAG_H
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/GraphTraits.h"
21 #include "llvm/ADT/PointerIntPair.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/Target/TargetLowering.h"
29 class MachineConstantPool;
30 class MachineFunction;
31 class MachineRegisterInfo;
33 struct MCSchedClassDesc;
34 class TargetRegisterInfo;
37 class TargetInstrInfo;
40 class TargetRegisterClass;
41 template<class Graph> class GraphWriter;
43 /// SDep - Scheduling dependency. This represents one direction of an
44 /// edge in the scheduling DAG.
47 /// Kind - These are the different kinds of scheduling dependencies.
49 Data, ///< Regular data dependence (aka true-dependence).
50 Anti, ///< A register anti-dependedence (aka WAR).
51 Output, ///< A register output-dependence (aka WAW).
52 Order ///< Any other ordering dependency.
55 // Strong dependencies must be respected by the scheduler. Artificial
56 // dependencies may be removed only if they are redundant with another
59 // Weak dependencies may be violated by the scheduling strategy, but only if
60 // the strategy can prove it is correct to do so.
62 // Strong OrderKinds must occur before "Weak".
63 // Weak OrderKinds must occur after "Weak".
65 Barrier, ///< An unknown scheduling barrier.
66 MayAliasMem, ///< Nonvolatile load/Store instructions that may alias.
67 MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
68 Artificial, ///< Arbitrary strong DAG edge (no real dependence).
69 Weak, ///< Arbitrary weak DAG edge.
70 Cluster ///< Weak DAG edge linking a chain of clustered instrs.
74 /// Dep - A pointer to the depending/depended-on SUnit, and an enum
75 /// indicating the kind of the dependency.
76 PointerIntPair<SUnit *, 2, Kind> Dep;
78 /// Contents - A union discriminated by the dependence kind.
80 /// Reg - For Data, Anti, and Output dependencies, the associated
81 /// register. For Data dependencies that don't currently have a register
82 /// assigned, this is set to zero.
85 /// Order - Additional information about Order dependencies.
86 unsigned OrdKind; // enum OrderKind
89 /// Latency - The time associated with this edge. Often this is just
90 /// the value of the Latency field of the predecessor, however advanced
91 /// models may provide additional information about specific edges.
93 /// Record MinLatency seperately from "expected" Latency.
95 /// FIXME: this field is not packed on LP64. Convert to 16-bit DAG edge
96 /// latency after introducing saturating truncation.
100 /// SDep - Construct a null SDep. This is only for use by container
101 /// classes which require default constructors. SUnits may not
102 /// have null SDep edges.
103 SDep() : Dep(0, Data) {}
105 /// SDep - Construct an SDep with the specified values.
106 SDep(SUnit *S, Kind kind, unsigned Reg)
107 : Dep(S, kind), Contents() {
110 llvm_unreachable("Reg given for non-register dependence!");
114 "SDep::Anti and SDep::Output must use a non-zero Reg!");
123 MinLatency = Latency;
125 SDep(SUnit *S, OrderKind kind)
126 : Dep(S, Order), Contents(), Latency(0), MinLatency(0) {
127 Contents.OrdKind = kind;
130 /// Return true if the specified SDep is equivalent except for latency.
131 bool overlaps(const SDep &Other) const {
132 if (Dep != Other.Dep) return false;
133 switch (Dep.getInt()) {
137 return Contents.Reg == Other.Contents.Reg;
139 return Contents.OrdKind == Other.Contents.OrdKind;
141 llvm_unreachable("Invalid dependency kind!");
144 bool operator==(const SDep &Other) const {
145 return overlaps(Other)
146 && Latency == Other.Latency && MinLatency == Other.MinLatency;
149 bool operator!=(const SDep &Other) const {
150 return !operator==(Other);
153 /// getLatency - Return the latency value for this edge, which roughly
154 /// means the minimum number of cycles that must elapse between the
155 /// predecessor and the successor, given that they have this edge
157 unsigned getLatency() const {
161 /// setLatency - Set the latency for this edge.
162 void setLatency(unsigned Lat) {
166 /// getMinLatency - Return the minimum latency for this edge. Minimum
167 /// latency is used for scheduling groups, while normal (expected) latency
168 /// is for instruction cost and critical path.
169 unsigned getMinLatency() const {
173 /// setMinLatency - Set the minimum latency for this edge.
174 void setMinLatency(unsigned Lat) {
178 //// getSUnit - Return the SUnit to which this edge points.
179 SUnit *getSUnit() const {
180 return Dep.getPointer();
183 //// setSUnit - Assign the SUnit to which this edge points.
184 void setSUnit(SUnit *SU) {
188 /// getKind - Return an enum value representing the kind of the dependence.
189 Kind getKind() const {
193 /// isCtrl - Shorthand for getKind() != SDep::Data.
194 bool isCtrl() const {
195 return getKind() != Data;
198 /// isNormalMemory - Test if this is an Order dependence between two
199 /// memory accesses where both sides of the dependence access memory
200 /// in non-volatile and fully modeled ways.
201 bool isNormalMemory() const {
202 return getKind() == Order && (Contents.OrdKind == MayAliasMem
203 || Contents.OrdKind == MustAliasMem);
206 /// isMustAlias - Test if this is an Order dependence that is marked
207 /// as "must alias", meaning that the SUnits at either end of the edge
208 /// have a memory dependence on a known memory location.
209 bool isMustAlias() const {
210 return getKind() == Order && Contents.OrdKind == MustAliasMem;
213 /// isWeak - Test if this a weak dependence. Weak dependencies are
214 /// considered DAG edges for height computation and other heuristics, but do
215 /// not force ordering. Breaking a weak edge may require the scheduler to
216 /// compensate, for example by inserting a copy.
217 bool isWeak() const {
218 return getKind() == Order && Contents.OrdKind >= Weak;
221 /// isArtificial - Test if this is an Order dependence that is marked
222 /// as "artificial", meaning it isn't necessary for correctness.
223 bool isArtificial() const {
224 return getKind() == Order && Contents.OrdKind == Artificial;
227 /// isCluster - Test if this is an Order dependence that is marked
228 /// as "cluster", meaning it is artificial and wants to be adjacent.
229 bool isCluster() const {
230 return getKind() == Order && Contents.OrdKind == Cluster;
233 /// isAssignedRegDep - Test if this is a Data dependence that is
234 /// associated with a register.
235 bool isAssignedRegDep() const {
236 return getKind() == Data && Contents.Reg != 0;
239 /// getReg - Return the register associated with this edge. This is
240 /// only valid on Data, Anti, and Output edges. On Data edges, this
241 /// value may be zero, meaning there is no associated register.
242 unsigned getReg() const {
243 assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
244 "getReg called on non-register dependence edge!");
248 /// setReg - Assign the associated register for this edge. This is
249 /// only valid on Data, Anti, and Output edges. On Anti and Output
250 /// edges, this value must not be zero. On Data edges, the value may
251 /// be zero, which would mean that no specific register is associated
253 void setReg(unsigned Reg) {
254 assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
255 "setReg called on non-register dependence edge!");
256 assert((getKind() != Anti || Reg != 0) &&
257 "SDep::Anti edge cannot use the zero register!");
258 assert((getKind() != Output || Reg != 0) &&
259 "SDep::Output edge cannot use the zero register!");
265 struct isPodLike<SDep> { static const bool value = true; };
267 /// SUnit - Scheduling unit. This is a node in the scheduling DAG.
270 enum { BoundaryID = ~0u };
272 SDNode *Node; // Representative node.
273 MachineInstr *Instr; // Alternatively, a MachineInstr.
275 SUnit *OrigNode; // If not this, the node from which
276 // this node was cloned.
277 // (SD scheduling only)
279 const MCSchedClassDesc *SchedClass; // NULL or resolved SchedClass.
281 // Preds/Succs - The SUnits before/after us in the graph.
282 SmallVector<SDep, 4> Preds; // All sunit predecessors.
283 SmallVector<SDep, 4> Succs; // All sunit successors.
285 typedef SmallVector<SDep, 4>::iterator pred_iterator;
286 typedef SmallVector<SDep, 4>::iterator succ_iterator;
287 typedef SmallVector<SDep, 4>::const_iterator const_pred_iterator;
288 typedef SmallVector<SDep, 4>::const_iterator const_succ_iterator;
290 unsigned NodeNum; // Entry # of node in the node vector.
291 unsigned NodeQueueId; // Queue id of node.
292 unsigned NumPreds; // # of SDep::Data preds.
293 unsigned NumSuccs; // # of SDep::Data sucss.
294 unsigned NumPredsLeft; // # of preds not scheduled.
295 unsigned NumSuccsLeft; // # of succs not scheduled.
296 unsigned WeakPredsLeft; // # of weak preds not scheduled.
297 unsigned WeakSuccsLeft; // # of weak succs not scheduled.
298 unsigned short NumRegDefsLeft; // # of reg defs with no scheduled use.
299 unsigned short Latency; // Node latency.
300 bool isVRegCycle : 1; // May use and def the same vreg.
301 bool isCall : 1; // Is a function call.
302 bool isCallOp : 1; // Is a function call operand.
303 bool isTwoAddress : 1; // Is a two-address instruction.
304 bool isCommutable : 1; // Is a commutable instruction.
305 bool hasPhysRegDefs : 1; // Has physreg defs that are being used.
306 bool hasPhysRegClobbers : 1; // Has any physreg defs, used or not.
307 bool isPending : 1; // True once pending.
308 bool isAvailable : 1; // True once available.
309 bool isScheduled : 1; // True once scheduled.
310 bool isScheduleHigh : 1; // True if preferable to schedule high.
311 bool isScheduleLow : 1; // True if preferable to schedule low.
312 bool isCloned : 1; // True if this node has been cloned.
313 Sched::Preference SchedulingPref; // Scheduling preference.
316 bool isDepthCurrent : 1; // True if Depth is current.
317 bool isHeightCurrent : 1; // True if Height is current.
318 unsigned Depth; // Node depth.
319 unsigned Height; // Node height.
321 unsigned TopReadyCycle; // Cycle relative to start when node is ready.
322 unsigned BotReadyCycle; // Cycle relative to end when node is ready.
324 const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null.
325 const TargetRegisterClass *CopySrcRC;
327 /// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
328 /// an SDNode and any nodes flagged to it.
329 SUnit(SDNode *node, unsigned nodenum)
330 : Node(node), Instr(0), OrigNode(0), SchedClass(0), NodeNum(nodenum),
331 NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
332 NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
333 Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
334 isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
335 hasPhysRegClobbers(false), isPending(false), isAvailable(false),
336 isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
337 isCloned(false), SchedulingPref(Sched::None),
338 isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
339 TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
341 /// SUnit - Construct an SUnit for post-regalloc scheduling to represent
343 SUnit(MachineInstr *instr, unsigned nodenum)
344 : Node(0), Instr(instr), OrigNode(0), SchedClass(0), NodeNum(nodenum),
345 NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
346 NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
347 Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
348 isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
349 hasPhysRegClobbers(false), isPending(false), isAvailable(false),
350 isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
351 isCloned(false), SchedulingPref(Sched::None),
352 isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
353 TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
355 /// SUnit - Construct a placeholder SUnit.
357 : Node(0), Instr(0), OrigNode(0), SchedClass(0), NodeNum(BoundaryID),
358 NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
359 NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
360 Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
361 isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
362 hasPhysRegClobbers(false), isPending(false), isAvailable(false),
363 isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
364 isCloned(false), SchedulingPref(Sched::None),
365 isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
366 TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
368 /// \brief Boundary nodes are placeholders for the boundary of the
369 /// scheduling region.
371 /// BoundaryNodes can have DAG edges, including Data edges, but they do not
372 /// correspond to schedulable entities (e.g. instructions) and do not have a
373 /// valid ID. Consequently, always check for boundary nodes before accessing
374 /// an assoicative data structure keyed on node ID.
375 bool isBoundaryNode() const { return NodeNum == BoundaryID; };
377 /// setNode - Assign the representative SDNode for this SUnit.
378 /// This may be used during pre-regalloc scheduling.
379 void setNode(SDNode *N) {
380 assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
384 /// getNode - Return the representative SDNode for this SUnit.
385 /// This may be used during pre-regalloc scheduling.
386 SDNode *getNode() const {
387 assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
391 /// isInstr - Return true if this SUnit refers to a machine instruction as
392 /// opposed to an SDNode.
393 bool isInstr() const { return Instr; }
395 /// setInstr - Assign the instruction for the SUnit.
396 /// This may be used during post-regalloc scheduling.
397 void setInstr(MachineInstr *MI) {
398 assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
402 /// getInstr - Return the representative MachineInstr for this SUnit.
403 /// This may be used during post-regalloc scheduling.
404 MachineInstr *getInstr() const {
405 assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
409 /// addPred - This adds the specified edge as a pred of the current node if
410 /// not already. It also adds the current node as a successor of the
412 bool addPred(const SDep &D, bool Required = true);
414 /// removePred - This removes the specified edge as a pred of the current
415 /// node if it exists. It also removes the current node as a successor of
416 /// the specified node.
417 void removePred(const SDep &D);
419 /// getDepth - Return the depth of this node, which is the length of the
420 /// maximum path up to any node which has no predecessors.
421 unsigned getDepth() const {
423 const_cast<SUnit *>(this)->ComputeDepth();
427 /// getHeight - Return the height of this node, which is the length of the
428 /// maximum path down to any node which has no successors.
429 unsigned getHeight() const {
430 if (!isHeightCurrent)
431 const_cast<SUnit *>(this)->ComputeHeight();
435 /// setDepthToAtLeast - If NewDepth is greater than this node's
436 /// depth value, set it to be the new depth value. This also
437 /// recursively marks successor nodes dirty.
438 void setDepthToAtLeast(unsigned NewDepth);
440 /// setDepthToAtLeast - If NewDepth is greater than this node's
441 /// depth value, set it to be the new height value. This also
442 /// recursively marks predecessor nodes dirty.
443 void setHeightToAtLeast(unsigned NewHeight);
445 /// setDepthDirty - Set a flag in this node to indicate that its
446 /// stored Depth value will require recomputation the next time
447 /// getDepth() is called.
448 void setDepthDirty();
450 /// setHeightDirty - Set a flag in this node to indicate that its
451 /// stored Height value will require recomputation the next time
452 /// getHeight() is called.
453 void setHeightDirty();
455 /// isPred - Test if node N is a predecessor of this node.
456 bool isPred(SUnit *N) {
457 for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i)
458 if (Preds[i].getSUnit() == N)
463 /// isSucc - Test if node N is a successor of this node.
464 bool isSucc(SUnit *N) {
465 for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i)
466 if (Succs[i].getSUnit() == N)
471 bool isTopReady() const {
472 return NumPredsLeft == 0;
474 bool isBottomReady() const {
475 return NumSuccsLeft == 0;
478 /// \brief Order this node's predecessor edges such that the critical path
479 /// edge occurs first.
480 void biasCriticalPath();
482 void dump(const ScheduleDAG *G) const;
483 void dumpAll(const ScheduleDAG *G) const;
484 void print(raw_ostream &O, const ScheduleDAG *G) const;
488 void ComputeHeight();
491 //===--------------------------------------------------------------------===//
492 /// SchedulingPriorityQueue - This interface is used to plug different
493 /// priorities computation algorithms into the list scheduler. It implements
494 /// the interface of a standard priority queue, where nodes are inserted in
495 /// arbitrary order and returned in priority order. The computation of the
496 /// priority and the representation of the queue are totally up to the
497 /// implementation to decide.
499 class SchedulingPriorityQueue {
500 virtual void anchor();
504 SchedulingPriorityQueue(bool rf = false):
505 CurCycle(0), HasReadyFilter(rf) {}
506 virtual ~SchedulingPriorityQueue() {}
508 virtual bool isBottomUp() const = 0;
510 virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
511 virtual void addNode(const SUnit *SU) = 0;
512 virtual void updateNode(const SUnit *SU) = 0;
513 virtual void releaseState() = 0;
515 virtual bool empty() const = 0;
517 bool hasReadyFilter() const { return HasReadyFilter; }
519 virtual bool tracksRegPressure() const { return false; }
521 virtual bool isReady(SUnit *) const {
522 assert(!HasReadyFilter && "The ready filter must override isReady()");
525 virtual void push(SUnit *U) = 0;
527 void push_all(const std::vector<SUnit *> &Nodes) {
528 for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
529 E = Nodes.end(); I != E; ++I)
533 virtual SUnit *pop() = 0;
535 virtual void remove(SUnit *SU) = 0;
537 virtual void dump(ScheduleDAG *) const {}
539 /// scheduledNode - As each node is scheduled, this method is invoked. This
540 /// allows the priority function to adjust the priority of related
541 /// unscheduled nodes, for example.
543 virtual void scheduledNode(SUnit *) {}
545 virtual void unscheduledNode(SUnit *) {}
547 void setCurCycle(unsigned Cycle) {
551 unsigned getCurCycle() const {
558 const TargetMachine &TM; // Target processor
559 const TargetInstrInfo *TII; // Target instruction information
560 const TargetRegisterInfo *TRI; // Target processor register info
561 MachineFunction &MF; // Machine function
562 MachineRegisterInfo &MRI; // Virtual/real register map
563 std::vector<SUnit> SUnits; // The scheduling units.
564 SUnit EntrySU; // Special node for the region entry.
565 SUnit ExitSU; // Special node for the region exit.
568 static const bool StressSched = false;
573 explicit ScheduleDAG(MachineFunction &mf);
575 virtual ~ScheduleDAG();
577 /// clearDAG - clear the DAG state (between regions).
580 /// getInstrDesc - Return the MCInstrDesc of this SUnit.
581 /// Return NULL for SDNodes without a machine opcode.
582 const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
583 if (SU->isInstr()) return &SU->getInstr()->getDesc();
584 return getNodeDesc(SU->getNode());
587 /// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered
590 virtual void viewGraph(const Twine &Name, const Twine &Title);
591 virtual void viewGraph();
593 virtual void dumpNode(const SUnit *SU) const = 0;
595 /// getGraphNodeLabel - Return a label for an SUnit node in a visualization
596 /// of the ScheduleDAG.
597 virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
599 /// getDAGLabel - Return a label for the region of code covered by the DAG.
600 virtual std::string getDAGName() const = 0;
602 /// addCustomGraphFeatures - Add custom features for a visualization of
604 virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
607 /// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
608 /// their state is consistent. Return the number of scheduled SUnits.
609 unsigned VerifyScheduledDAG(bool isBottomUp);
613 // Return the MCInstrDesc of this SDNode or NULL.
614 const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
617 class SUnitIterator : public std::iterator<std::forward_iterator_tag,
622 SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
624 bool operator==(const SUnitIterator& x) const {
625 return Operand == x.Operand;
627 bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
629 const SUnitIterator &operator=(const SUnitIterator &I) {
630 assert(I.Node==Node && "Cannot assign iterators to two different nodes!");
635 pointer operator*() const {
636 return Node->Preds[Operand].getSUnit();
638 pointer operator->() const { return operator*(); }
640 SUnitIterator& operator++() { // Preincrement
644 SUnitIterator operator++(int) { // Postincrement
645 SUnitIterator tmp = *this; ++*this; return tmp;
648 static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
649 static SUnitIterator end (SUnit *N) {
650 return SUnitIterator(N, (unsigned)N->Preds.size());
653 unsigned getOperand() const { return Operand; }
654 const SUnit *getNode() const { return Node; }
655 /// isCtrlDep - Test if this is not an SDep::Data dependence.
656 bool isCtrlDep() const {
657 return getSDep().isCtrl();
659 bool isArtificialDep() const {
660 return getSDep().isArtificial();
662 const SDep &getSDep() const {
663 return Node->Preds[Operand];
667 template <> struct GraphTraits<SUnit*> {
668 typedef SUnit NodeType;
669 typedef SUnitIterator ChildIteratorType;
670 static inline NodeType *getEntryNode(SUnit *N) { return N; }
671 static inline ChildIteratorType child_begin(NodeType *N) {
672 return SUnitIterator::begin(N);
674 static inline ChildIteratorType child_end(NodeType *N) {
675 return SUnitIterator::end(N);
679 template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
680 typedef std::vector<SUnit>::iterator nodes_iterator;
681 static nodes_iterator nodes_begin(ScheduleDAG *G) {
682 return G->SUnits.begin();
684 static nodes_iterator nodes_end(ScheduleDAG *G) {
685 return G->SUnits.end();
689 /// ScheduleDAGTopologicalSort is a class that computes a topological
690 /// ordering for SUnits and provides methods for dynamically updating
691 /// the ordering as new edges are added.
693 /// This allows a very fast implementation of IsReachable, for example.
695 class ScheduleDAGTopologicalSort {
696 /// SUnits - A reference to the ScheduleDAG's SUnits.
697 std::vector<SUnit> &SUnits;
700 /// Index2Node - Maps topological index to the node number.
701 std::vector<int> Index2Node;
702 /// Node2Index - Maps the node number to its topological index.
703 std::vector<int> Node2Index;
704 /// Visited - a set of nodes visited during a DFS traversal.
707 /// DFS - make a DFS traversal and mark all nodes affected by the
708 /// edge insertion. These nodes will later get new topological indexes
709 /// by means of the Shift method.
710 void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
712 /// Shift - reassign topological indexes for the nodes in the DAG
713 /// to preserve the topological ordering.
714 void Shift(BitVector& Visited, int LowerBound, int UpperBound);
716 /// Allocate - assign the topological index to the node n.
717 void Allocate(int n, int index);
720 ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
722 /// InitDAGTopologicalSorting - create the initial topological
723 /// ordering from the DAG to be scheduled.
724 void InitDAGTopologicalSorting();
726 /// IsReachable - Checks if SU is reachable from TargetSU.
727 bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
729 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU
730 /// will create a cycle.
731 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
733 /// AddPred - Updates the topological ordering to accommodate an edge
734 /// to be added from SUnit X to SUnit Y.
735 void AddPred(SUnit *Y, SUnit *X);
737 /// RemovePred - Updates the topological ordering to accommodate an
738 /// an edge to be removed from the specified node N from the predecessors
739 /// of the current node M.
740 void RemovePred(SUnit *M, SUnit *N);
742 typedef std::vector<int>::iterator iterator;
743 typedef std::vector<int>::const_iterator const_iterator;
744 iterator begin() { return Index2Node.begin(); }
745 const_iterator begin() const { return Index2Node.begin(); }
746 iterator end() { return Index2Node.end(); }
747 const_iterator end() const { return Index2Node.end(); }
749 typedef std::vector<int>::reverse_iterator reverse_iterator;
750 typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
751 reverse_iterator rbegin() { return Index2Node.rbegin(); }
752 const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
753 reverse_iterator rend() { return Index2Node.rend(); }
754 const_reverse_iterator rend() const { return Index2Node.rend(); }