1 //===- HexagonMachineScheduler.cpp - MI Scheduler for Hexagon -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
13 //===----------------------------------------------------------------------===//
15 #include "HexagonMachineScheduler.h"
16 #include "HexagonInstrInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/CodeGen/DFAPacketizer.h"
20 #include "llvm/CodeGen/MachineBasicBlock.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstr.h"
23 #include "llvm/CodeGen/MachineLoopInfo.h"
24 #include "llvm/CodeGen/RegisterClassInfo.h"
25 #include "llvm/CodeGen/RegisterPressure.h"
26 #include "llvm/CodeGen/ScheduleDAG.h"
27 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
28 #include "llvm/CodeGen/TargetInstrInfo.h"
29 #include "llvm/CodeGen/TargetOpcodes.h"
30 #include "llvm/CodeGen/TargetRegisterInfo.h"
31 #include "llvm/CodeGen/TargetSchedule.h"
32 #include "llvm/CodeGen/TargetSubtargetInfo.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
46 #define DEBUG_TYPE "machine-scheduler"
48 static cl::opt<bool> IgnoreBBRegPressure("ignore-bb-reg-pressure",
49 cl::Hidden, cl::ZeroOrMore, cl::init(false));
51 static cl::opt<bool> UseNewerCandidate("use-newer-candidate",
52 cl::Hidden, cl::ZeroOrMore, cl::init(true));
54 static cl::opt<unsigned> SchedDebugVerboseLevel("misched-verbose-level",
55 cl::Hidden, cl::ZeroOrMore, cl::init(1));
57 // Check if the scheduler should penalize instructions that are available to
58 // early due to a zero-latency dependence.
59 static cl::opt<bool> CheckEarlyAvail("check-early-avail", cl::Hidden,
60 cl::ZeroOrMore, cl::init(true));
62 // This value is used to determine if a register class is a high pressure set.
63 // We compute the maximum number of registers needed and divided by the total
64 // available. Then, we compare the result to this value.
65 static cl::opt<float> RPThreshold("hexagon-reg-pressure", cl::Hidden,
66 cl::init(0.75f), cl::desc("High register pressure threhold."));
68 /// Return true if there is a dependence between SUd and SUu.
69 static bool hasDependence(const SUnit *SUd, const SUnit *SUu,
70 const HexagonInstrInfo &QII) {
71 if (SUd->Succs.size() == 0)
74 // Enable .cur formation.
75 if (QII.mayBeCurLoad(*SUd->getInstr()))
78 if (QII.canExecuteInBundle(*SUd->getInstr(), *SUu->getInstr()))
81 for (const auto &S : SUd->Succs) {
82 // Since we do not add pseudos to packets, might as well
83 // ignore order dependencies.
87 if (S.getSUnit() == SUu && S.getLatency() > 0)
93 /// Check if scheduling of this SU is possible
94 /// in the current packet.
95 /// It is _not_ precise (statefull), it is more like
96 /// another heuristic. Many corner cases are figured
98 bool VLIWResourceModel::isResourceAvailable(SUnit *SU, bool IsTop) {
99 if (!SU || !SU->getInstr())
102 // First see if the pipeline could receive this instruction
103 // in the current cycle.
104 switch (SU->getInstr()->getOpcode()) {
106 if (!ResourcesModel->canReserveResources(*SU->getInstr()))
108 case TargetOpcode::EXTRACT_SUBREG:
109 case TargetOpcode::INSERT_SUBREG:
110 case TargetOpcode::SUBREG_TO_REG:
111 case TargetOpcode::REG_SEQUENCE:
112 case TargetOpcode::IMPLICIT_DEF:
113 case TargetOpcode::COPY:
114 case TargetOpcode::INLINEASM:
118 MachineBasicBlock *MBB = SU->getInstr()->getParent();
119 auto &QST = MBB->getParent()->getSubtarget<HexagonSubtarget>();
120 const auto &QII = *QST.getInstrInfo();
122 // Now see if there are no other dependencies to instructions already
125 for (unsigned i = 0, e = Packet.size(); i != e; ++i)
126 if (hasDependence(Packet[i], SU, QII))
129 for (unsigned i = 0, e = Packet.size(); i != e; ++i)
130 if (hasDependence(SU, Packet[i], QII))
136 /// Keep track of available resources.
137 bool VLIWResourceModel::reserveResources(SUnit *SU, bool IsTop) {
138 bool startNewCycle = false;
139 // Artificially reset state.
141 ResourcesModel->clearResources();
146 // If this SU does not fit in the packet or the packet is now full
148 if (!isResourceAvailable(SU, IsTop) ||
149 Packet.size() >= SchedModel->getIssueWidth()) {
150 ResourcesModel->clearResources();
153 startNewCycle = true;
156 switch (SU->getInstr()->getOpcode()) {
158 ResourcesModel->reserveResources(*SU->getInstr());
160 case TargetOpcode::EXTRACT_SUBREG:
161 case TargetOpcode::INSERT_SUBREG:
162 case TargetOpcode::SUBREG_TO_REG:
163 case TargetOpcode::REG_SEQUENCE:
164 case TargetOpcode::IMPLICIT_DEF:
165 case TargetOpcode::KILL:
166 case TargetOpcode::CFI_INSTRUCTION:
167 case TargetOpcode::EH_LABEL:
168 case TargetOpcode::COPY:
169 case TargetOpcode::INLINEASM:
172 Packet.push_back(SU);
175 LLVM_DEBUG(dbgs() << "Packet[" << TotalPackets << "]:\n");
176 for (unsigned i = 0, e = Packet.size(); i != e; ++i) {
177 LLVM_DEBUG(dbgs() << "\t[" << i << "] SU(");
178 LLVM_DEBUG(dbgs() << Packet[i]->NodeNum << ")\t");
179 LLVM_DEBUG(Packet[i]->getInstr()->dump());
183 return startNewCycle;
186 /// schedule - Called back from MachineScheduler::runOnMachineFunction
187 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
188 /// only includes instructions that have DAG nodes, not scheduling boundaries.
189 void VLIWMachineScheduler::schedule() {
190 LLVM_DEBUG(dbgs() << "********** MI Converging Scheduling VLIW "
191 << printMBBReference(*BB) << " " << BB->getName()
192 << " in_func " << BB->getParent()->getName()
193 << " at loop depth " << MLI->getLoopDepth(BB) << " \n");
195 buildDAGWithRegPressure();
197 Topo.InitDAGTopologicalSorting();
199 // Postprocess the DAG to add platform-specific artificial dependencies.
202 SmallVector<SUnit*, 8> TopRoots, BotRoots;
203 findRootsAndBiasEdges(TopRoots, BotRoots);
205 // Initialize the strategy before modifying the DAG.
206 SchedImpl->initialize(this);
208 LLVM_DEBUG(unsigned maxH = 0;
209 for (unsigned su = 0, e = SUnits.size(); su != e;
210 ++su) if (SUnits[su].getHeight() > maxH) maxH =
211 SUnits[su].getHeight();
212 dbgs() << "Max Height " << maxH << "\n";);
213 LLVM_DEBUG(unsigned maxD = 0;
214 for (unsigned su = 0, e = SUnits.size(); su != e;
215 ++su) if (SUnits[su].getDepth() > maxD) maxD =
216 SUnits[su].getDepth();
217 dbgs() << "Max Depth " << maxD << "\n";);
218 LLVM_DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) SUnits[su]
221 initQueues(TopRoots, BotRoots);
223 bool IsTopNode = false;
226 dbgs() << "** VLIWMachineScheduler::schedule picking next node\n");
227 SUnit *SU = SchedImpl->pickNode(IsTopNode);
230 if (!checkSchedLimit())
233 scheduleMI(SU, IsTopNode);
235 // Notify the scheduling strategy after updating the DAG.
236 SchedImpl->schedNode(SU, IsTopNode);
238 updateQueues(SU, IsTopNode);
240 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
245 dbgs() << "*** Final schedule for "
246 << printMBBReference(*begin()->getParent()) << " ***\n";
252 void ConvergingVLIWScheduler::initialize(ScheduleDAGMI *dag) {
253 DAG = static_cast<VLIWMachineScheduler*>(dag);
254 SchedModel = DAG->getSchedModel();
256 Top.init(DAG, SchedModel);
257 Bot.init(DAG, SchedModel);
259 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
260 // are disabled, then these HazardRecs will be disabled.
261 const InstrItineraryData *Itin = DAG->getSchedModel()->getInstrItineraries();
262 const TargetSubtargetInfo &STI = DAG->MF.getSubtarget();
263 const TargetInstrInfo *TII = STI.getInstrInfo();
264 delete Top.HazardRec;
265 delete Bot.HazardRec;
266 Top.HazardRec = TII->CreateTargetMIHazardRecognizer(Itin, DAG);
267 Bot.HazardRec = TII->CreateTargetMIHazardRecognizer(Itin, DAG);
269 delete Top.ResourceModel;
270 delete Bot.ResourceModel;
271 Top.ResourceModel = new VLIWResourceModel(STI, DAG->getSchedModel());
272 Bot.ResourceModel = new VLIWResourceModel(STI, DAG->getSchedModel());
274 const std::vector<unsigned> &MaxPressure =
275 DAG->getRegPressure().MaxSetPressure;
276 HighPressureSets.assign(MaxPressure.size(), 0);
277 for (unsigned i = 0, e = MaxPressure.size(); i < e; ++i) {
278 unsigned Limit = DAG->getRegClassInfo()->getRegPressureSetLimit(i);
279 HighPressureSets[i] =
280 ((float) MaxPressure[i] > ((float) Limit * RPThreshold));
283 assert((!ForceTopDown || !ForceBottomUp) &&
284 "-misched-topdown incompatible with -misched-bottomup");
287 void ConvergingVLIWScheduler::releaseTopNode(SUnit *SU) {
291 for (const SDep &PI : SU->Preds) {
292 unsigned PredReadyCycle = PI.getSUnit()->TopReadyCycle;
293 unsigned MinLatency = PI.getLatency();
295 Top.MaxMinLatency = std::max(MinLatency, Top.MaxMinLatency);
297 if (SU->TopReadyCycle < PredReadyCycle + MinLatency)
298 SU->TopReadyCycle = PredReadyCycle + MinLatency;
300 Top.releaseNode(SU, SU->TopReadyCycle);
303 void ConvergingVLIWScheduler::releaseBottomNode(SUnit *SU) {
307 assert(SU->getInstr() && "Scheduled SUnit must have instr");
309 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
311 unsigned SuccReadyCycle = I->getSUnit()->BotReadyCycle;
312 unsigned MinLatency = I->getLatency();
314 Bot.MaxMinLatency = std::max(MinLatency, Bot.MaxMinLatency);
316 if (SU->BotReadyCycle < SuccReadyCycle + MinLatency)
317 SU->BotReadyCycle = SuccReadyCycle + MinLatency;
319 Bot.releaseNode(SU, SU->BotReadyCycle);
322 /// Does this SU have a hazard within the current instruction group.
324 /// The scheduler supports two modes of hazard recognition. The first is the
325 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
326 /// supports highly complicated in-order reservation tables
327 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
329 /// The second is a streamlined mechanism that checks for hazards based on
330 /// simple counters that the scheduler itself maintains. It explicitly checks
331 /// for instruction dispatch limitations, including the number of micro-ops that
332 /// can dispatch per cycle.
334 /// TODO: Also check whether the SU must start a new group.
335 bool ConvergingVLIWScheduler::VLIWSchedBoundary::checkHazard(SUnit *SU) {
336 if (HazardRec->isEnabled())
337 return HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard;
339 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
340 if (IssueCount + uops > SchedModel->getIssueWidth())
346 void ConvergingVLIWScheduler::VLIWSchedBoundary::releaseNode(SUnit *SU,
347 unsigned ReadyCycle) {
348 if (ReadyCycle < MinReadyCycle)
349 MinReadyCycle = ReadyCycle;
351 // Check for interlocks first. For the purpose of other heuristics, an
352 // instruction that cannot issue appears as if it's not in the ReadyQueue.
353 if (ReadyCycle > CurrCycle || checkHazard(SU))
360 /// Move the boundary of scheduled code by one cycle.
361 void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpCycle() {
362 unsigned Width = SchedModel->getIssueWidth();
363 IssueCount = (IssueCount <= Width) ? 0 : IssueCount - Width;
365 assert(MinReadyCycle < std::numeric_limits<unsigned>::max() &&
366 "MinReadyCycle uninitialized");
367 unsigned NextCycle = std::max(CurrCycle + 1, MinReadyCycle);
369 if (!HazardRec->isEnabled()) {
370 // Bypass HazardRec virtual calls.
371 CurrCycle = NextCycle;
373 // Bypass getHazardType calls in case of long latency.
374 for (; CurrCycle != NextCycle; ++CurrCycle) {
376 HazardRec->AdvanceCycle();
378 HazardRec->RecedeCycle();
383 LLVM_DEBUG(dbgs() << "*** Next cycle " << Available.getName() << " cycle "
384 << CurrCycle << '\n');
387 /// Move the boundary of scheduled code by one SUnit.
388 void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpNode(SUnit *SU) {
389 bool startNewCycle = false;
391 // Update the reservation table.
392 if (HazardRec->isEnabled()) {
393 if (!isTop() && SU->isCall) {
394 // Calls are scheduled with their preceding instructions. For bottom-up
395 // scheduling, clear the pipeline state before emitting.
398 HazardRec->EmitInstruction(SU);
402 startNewCycle = ResourceModel->reserveResources(SU, isTop());
404 // Check the instruction group dispatch limit.
405 // TODO: Check if this SU must end a dispatch group.
406 IssueCount += SchedModel->getNumMicroOps(SU->getInstr());
408 LLVM_DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle << '\n');
412 LLVM_DEBUG(dbgs() << "*** IssueCount " << IssueCount << " at cycle "
413 << CurrCycle << '\n');
416 /// Release pending ready nodes in to the available queue. This makes them
417 /// visible to heuristics.
418 void ConvergingVLIWScheduler::VLIWSchedBoundary::releasePending() {
419 // If the available queue is empty, it is safe to reset MinReadyCycle.
420 if (Available.empty())
421 MinReadyCycle = std::numeric_limits<unsigned>::max();
423 // Check to see if any of the pending instructions are ready to issue. If
424 // so, add them to the available queue.
425 for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
426 SUnit *SU = *(Pending.begin()+i);
427 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
429 if (ReadyCycle < MinReadyCycle)
430 MinReadyCycle = ReadyCycle;
432 if (ReadyCycle > CurrCycle)
439 Pending.remove(Pending.begin()+i);
442 CheckPending = false;
445 /// Remove SU from the ready set for this boundary.
446 void ConvergingVLIWScheduler::VLIWSchedBoundary::removeReady(SUnit *SU) {
447 if (Available.isInQueue(SU))
448 Available.remove(Available.find(SU));
450 assert(Pending.isInQueue(SU) && "bad ready count");
451 Pending.remove(Pending.find(SU));
455 /// If this queue only has one ready candidate, return it. As a side effect,
456 /// advance the cycle until at least one node is ready. If multiple instructions
457 /// are ready, return NULL.
458 SUnit *ConvergingVLIWScheduler::VLIWSchedBoundary::pickOnlyChoice() {
462 auto AdvanceCycle = [this]() {
463 if (Available.empty())
465 if (Available.size() == 1 && Pending.size() > 0)
466 return !ResourceModel->isResourceAvailable(*Available.begin(), isTop()) ||
467 getWeakLeft(*Available.begin(), isTop()) != 0;
470 for (unsigned i = 0; AdvanceCycle(); ++i) {
471 assert(i <= (HazardRec->getMaxLookAhead() + MaxMinLatency) &&
472 "permanent hazard"); (void)i;
473 ResourceModel->reserveResources(nullptr, isTop());
477 if (Available.size() == 1)
478 return *Available.begin();
483 void ConvergingVLIWScheduler::traceCandidate(const char *Label,
484 const ReadyQueue &Q, SUnit *SU, int Cost, PressureChange P) {
485 dbgs() << Label << " " << Q.getName() << " ";
487 dbgs() << DAG->TRI->getRegPressureSetName(P.getPSet()) << ":"
488 << P.getUnitInc() << " ";
491 dbgs() << "cost(" << Cost << ")\t";
495 // Very detailed queue dump, to be used with higher verbosity levels.
496 void ConvergingVLIWScheduler::readyQueueVerboseDump(
497 const RegPressureTracker &RPTracker, SchedCandidate &Candidate,
499 RegPressureTracker &TempTracker = const_cast<RegPressureTracker &>(RPTracker);
501 dbgs() << ">>> " << Q.getName() << "\n";
502 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
503 RegPressureDelta RPDelta;
504 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
505 DAG->getRegionCriticalPSets(),
506 DAG->getRegPressure().MaxSetPressure);
507 std::stringstream dbgstr;
508 dbgstr << "SU(" << std::setw(3) << (*I)->NodeNum << ")";
509 dbgs() << dbgstr.str();
510 SchedulingCost(Q, *I, Candidate, RPDelta, true);
512 (*I)->getInstr()->dump();
518 /// isSingleUnscheduledPred - If SU2 is the only unscheduled predecessor
519 /// of SU, return true (we may have duplicates)
520 static inline bool isSingleUnscheduledPred(SUnit *SU, SUnit *SU2) {
521 if (SU->NumPredsLeft == 0)
524 for (auto &Pred : SU->Preds) {
525 // We found an available, but not scheduled, predecessor.
526 if (!Pred.getSUnit()->isScheduled && (Pred.getSUnit() != SU2))
533 /// isSingleUnscheduledSucc - If SU2 is the only unscheduled successor
534 /// of SU, return true (we may have duplicates)
535 static inline bool isSingleUnscheduledSucc(SUnit *SU, SUnit *SU2) {
536 if (SU->NumSuccsLeft == 0)
539 for (auto &Succ : SU->Succs) {
540 // We found an available, but not scheduled, successor.
541 if (!Succ.getSUnit()->isScheduled && (Succ.getSUnit() != SU2))
547 /// Check if the instruction changes the register pressure of a register in the
548 /// high pressure set. The function returns a negative value if the pressure
549 /// decreases and a positive value is the pressure increases. If the instruction
550 /// doesn't use a high pressure register or doesn't change the register
551 /// pressure, then return 0.
552 int ConvergingVLIWScheduler::pressureChange(const SUnit *SU, bool isBotUp) {
553 PressureDiff &PD = DAG->getPressureDiff(SU);
557 // The pressure differences are computed bottom-up, so the comparision for
558 // an increase is positive in the bottom direction, but negative in the
559 // top-down direction.
560 if (HighPressureSets[P.getPSet()])
561 return (isBotUp ? P.getUnitInc() : -P.getUnitInc());
566 // Constants used to denote relative importance of
567 // heuristic components for cost computation.
568 static const unsigned PriorityOne = 200;
569 static const unsigned PriorityTwo = 50;
570 static const unsigned PriorityThree = 75;
571 static const unsigned ScaleTwo = 10;
573 /// Single point to compute overall scheduling cost.
574 /// TODO: More heuristics will be used soon.
575 int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue &Q, SUnit *SU,
576 SchedCandidate &Candidate,
577 RegPressureDelta &Delta,
579 // Initial trivial priority.
582 // Do not waste time on a node that is already scheduled.
583 if (!SU || SU->isScheduled)
586 LLVM_DEBUG(if (verbose) dbgs()
587 << ((Q.getID() == TopQID) ? "(top|" : "(bot|"));
588 // Forced priority is high.
589 if (SU->isScheduleHigh) {
590 ResCount += PriorityOne;
591 LLVM_DEBUG(dbgs() << "H|");
594 unsigned IsAvailableAmt = 0;
595 // Critical path first.
596 if (Q.getID() == TopQID) {
597 if (Top.isLatencyBound(SU)) {
598 LLVM_DEBUG(if (verbose) dbgs() << "LB|");
599 ResCount += (SU->getHeight() * ScaleTwo);
602 LLVM_DEBUG(if (verbose) {
603 std::stringstream dbgstr;
604 dbgstr << "h" << std::setw(3) << SU->getHeight() << "|";
605 dbgs() << dbgstr.str();
608 // If resources are available for it, multiply the
609 // chance of scheduling.
610 if (Top.ResourceModel->isResourceAvailable(SU, true)) {
611 IsAvailableAmt = (PriorityTwo + PriorityThree);
612 ResCount += IsAvailableAmt;
613 LLVM_DEBUG(if (verbose) dbgs() << "A|");
615 LLVM_DEBUG(if (verbose) dbgs() << " |");
617 if (Bot.isLatencyBound(SU)) {
618 LLVM_DEBUG(if (verbose) dbgs() << "LB|");
619 ResCount += (SU->getDepth() * ScaleTwo);
622 LLVM_DEBUG(if (verbose) {
623 std::stringstream dbgstr;
624 dbgstr << "d" << std::setw(3) << SU->getDepth() << "|";
625 dbgs() << dbgstr.str();
628 // If resources are available for it, multiply the
629 // chance of scheduling.
630 if (Bot.ResourceModel->isResourceAvailable(SU, false)) {
631 IsAvailableAmt = (PriorityTwo + PriorityThree);
632 ResCount += IsAvailableAmt;
633 LLVM_DEBUG(if (verbose) dbgs() << "A|");
635 LLVM_DEBUG(if (verbose) dbgs() << " |");
638 unsigned NumNodesBlocking = 0;
639 if (Q.getID() == TopQID) {
640 // How many SUs does it block from scheduling?
641 // Look at all of the successors of this node.
642 // Count the number of nodes that
643 // this node is the sole unscheduled node for.
644 if (Top.isLatencyBound(SU))
645 for (const SDep &SI : SU->Succs)
646 if (isSingleUnscheduledPred(SI.getSUnit(), SU))
649 // How many unscheduled predecessors block this node?
650 if (Bot.isLatencyBound(SU))
651 for (const SDep &PI : SU->Preds)
652 if (isSingleUnscheduledSucc(PI.getSUnit(), SU))
655 ResCount += (NumNodesBlocking * ScaleTwo);
657 LLVM_DEBUG(if (verbose) {
658 std::stringstream dbgstr;
659 dbgstr << "blk " << std::setw(2) << NumNodesBlocking << ")|";
660 dbgs() << dbgstr.str();
663 // Factor in reg pressure as a heuristic.
664 if (!IgnoreBBRegPressure) {
665 // Decrease priority by the amount that register pressure exceeds the limit.
666 ResCount -= (Delta.Excess.getUnitInc()*PriorityOne);
667 // Decrease priority if register pressure exceeds the limit.
668 ResCount -= (Delta.CriticalMax.getUnitInc()*PriorityOne);
669 // Decrease priority slightly if register pressure would increase over the
671 ResCount -= (Delta.CurrentMax.getUnitInc()*PriorityTwo);
672 // If there are register pressure issues, then we remove the value added for
673 // the instruction being available. The rationale is that we really don't
674 // want to schedule an instruction that causes a spill.
675 if (IsAvailableAmt && pressureChange(SU, Q.getID() != TopQID) > 0 &&
676 (Delta.Excess.getUnitInc() || Delta.CriticalMax.getUnitInc() ||
677 Delta.CurrentMax.getUnitInc()))
678 ResCount -= IsAvailableAmt;
679 LLVM_DEBUG(if (verbose) {
680 dbgs() << "RP " << Delta.Excess.getUnitInc() << "/"
681 << Delta.CriticalMax.getUnitInc() << "/"
682 << Delta.CurrentMax.getUnitInc() << ")|";
686 // Give a little extra priority to a .cur instruction if there is a resource
688 auto &QST = DAG->MF.getSubtarget<HexagonSubtarget>();
689 auto &QII = *QST.getInstrInfo();
690 if (SU->isInstr() && QII.mayBeCurLoad(*SU->getInstr())) {
691 if (Q.getID() == TopQID &&
692 Top.ResourceModel->isResourceAvailable(SU, true)) {
693 ResCount += PriorityTwo;
694 LLVM_DEBUG(if (verbose) dbgs() << "C|");
695 } else if (Q.getID() == BotQID &&
696 Bot.ResourceModel->isResourceAvailable(SU, false)) {
697 ResCount += PriorityTwo;
698 LLVM_DEBUG(if (verbose) dbgs() << "C|");
702 // Give preference to a zero latency instruction if the dependent
703 // instruction is in the current packet.
704 if (Q.getID() == TopQID && getWeakLeft(SU, true) == 0) {
705 for (const SDep &PI : SU->Preds) {
706 if (!PI.getSUnit()->getInstr()->isPseudo() && PI.isAssignedRegDep() &&
707 PI.getLatency() == 0 &&
708 Top.ResourceModel->isInPacket(PI.getSUnit())) {
709 ResCount += PriorityThree;
710 LLVM_DEBUG(if (verbose) dbgs() << "Z|");
713 } else if (Q.getID() == BotQID && getWeakLeft(SU, false) == 0) {
714 for (const SDep &SI : SU->Succs) {
715 if (!SI.getSUnit()->getInstr()->isPseudo() && SI.isAssignedRegDep() &&
716 SI.getLatency() == 0 &&
717 Bot.ResourceModel->isInPacket(SI.getSUnit())) {
718 ResCount += PriorityThree;
719 LLVM_DEBUG(if (verbose) dbgs() << "Z|");
724 // If the instruction has a non-zero latency dependence with an instruction in
725 // the current packet, then it should not be scheduled yet. The case occurs
726 // when the dependent instruction is scheduled in a new packet, so the
727 // scheduler updates the current cycle and pending instructions become
729 if (CheckEarlyAvail) {
730 if (Q.getID() == TopQID) {
731 for (const auto &PI : SU->Preds) {
732 if (PI.getLatency() > 0 &&
733 Top.ResourceModel->isInPacket(PI.getSUnit())) {
734 ResCount -= PriorityOne;
735 LLVM_DEBUG(if (verbose) dbgs() << "D|");
739 for (const auto &SI : SU->Succs) {
740 if (SI.getLatency() > 0 &&
741 Bot.ResourceModel->isInPacket(SI.getSUnit())) {
742 ResCount -= PriorityOne;
743 LLVM_DEBUG(if (verbose) dbgs() << "D|");
749 LLVM_DEBUG(if (verbose) {
750 std::stringstream dbgstr;
751 dbgstr << "Total " << std::setw(4) << ResCount << ")";
752 dbgs() << dbgstr.str();
758 /// Pick the best candidate from the top queue.
760 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
761 /// DAG building. To adjust for the current scheduling location we need to
762 /// maintain the number of vreg uses remaining to be top-scheduled.
763 ConvergingVLIWScheduler::CandResult ConvergingVLIWScheduler::
764 pickNodeFromQueue(VLIWSchedBoundary &Zone, const RegPressureTracker &RPTracker,
765 SchedCandidate &Candidate) {
766 ReadyQueue &Q = Zone.Available;
767 LLVM_DEBUG(if (SchedDebugVerboseLevel > 1)
768 readyQueueVerboseDump(RPTracker, Candidate, Q);
771 // getMaxPressureDelta temporarily modifies the tracker.
772 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
774 // BestSU remains NULL if no top candidates beat the best existing candidate.
775 CandResult FoundCandidate = NoCand;
776 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
777 RegPressureDelta RPDelta;
778 TempTracker.getMaxPressureDelta((*I)->getInstr(), RPDelta,
779 DAG->getRegionCriticalPSets(),
780 DAG->getRegPressure().MaxSetPressure);
782 int CurrentCost = SchedulingCost(Q, *I, Candidate, RPDelta, false);
784 // Initialize the candidate if needed.
786 LLVM_DEBUG(traceCandidate("DCAND", Q, *I, CurrentCost));
788 Candidate.RPDelta = RPDelta;
789 Candidate.SCost = CurrentCost;
790 FoundCandidate = NodeOrder;
794 // Choose node order for negative cost candidates. There is no good
795 // candidate in this case.
796 if (CurrentCost < 0 && Candidate.SCost < 0) {
797 if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
798 || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
799 LLVM_DEBUG(traceCandidate("NCAND", Q, *I, CurrentCost));
801 Candidate.RPDelta = RPDelta;
802 Candidate.SCost = CurrentCost;
803 FoundCandidate = NodeOrder;
809 if (CurrentCost > Candidate.SCost) {
810 LLVM_DEBUG(traceCandidate("CCAND", Q, *I, CurrentCost));
812 Candidate.RPDelta = RPDelta;
813 Candidate.SCost = CurrentCost;
814 FoundCandidate = BestCost;
818 // Choose an instruction that does not depend on an artificial edge.
819 unsigned CurrWeak = getWeakLeft(*I, (Q.getID() == TopQID));
820 unsigned CandWeak = getWeakLeft(Candidate.SU, (Q.getID() == TopQID));
821 if (CurrWeak != CandWeak) {
822 if (CurrWeak < CandWeak) {
823 LLVM_DEBUG(traceCandidate("WCAND", Q, *I, CurrentCost));
825 Candidate.RPDelta = RPDelta;
826 Candidate.SCost = CurrentCost;
827 FoundCandidate = Weak;
832 if (CurrentCost == Candidate.SCost && Zone.isLatencyBound(*I)) {
833 unsigned CurrSize, CandSize;
834 if (Q.getID() == TopQID) {
835 CurrSize = (*I)->Succs.size();
836 CandSize = Candidate.SU->Succs.size();
838 CurrSize = (*I)->Preds.size();
839 CandSize = Candidate.SU->Preds.size();
841 if (CurrSize > CandSize) {
842 LLVM_DEBUG(traceCandidate("SPCAND", Q, *I, CurrentCost));
844 Candidate.RPDelta = RPDelta;
845 Candidate.SCost = CurrentCost;
846 FoundCandidate = BestCost;
848 // Keep the old candidate if it's a better candidate. That is, don't use
849 // the subsequent tie breaker.
850 if (CurrSize != CandSize)
855 // To avoid scheduling indeterminism, we need a tie breaker
856 // for the case when cost is identical for two nodes.
857 if (UseNewerCandidate && CurrentCost == Candidate.SCost) {
858 if ((Q.getID() == TopQID && (*I)->NodeNum < Candidate.SU->NodeNum)
859 || (Q.getID() == BotQID && (*I)->NodeNum > Candidate.SU->NodeNum)) {
860 LLVM_DEBUG(traceCandidate("TCAND", Q, *I, CurrentCost));
862 Candidate.RPDelta = RPDelta;
863 Candidate.SCost = CurrentCost;
864 FoundCandidate = NodeOrder;
869 // Fall through to original instruction order.
870 // Only consider node order if Candidate was chosen from this Q.
871 if (FoundCandidate == NoCand)
874 return FoundCandidate;
877 /// Pick the best candidate node from either the top or bottom queue.
878 SUnit *ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode) {
879 // Schedule as far as possible in the direction of no choice. This is most
880 // efficient, but also provides the best heuristics for CriticalPSets.
881 if (SUnit *SU = Bot.pickOnlyChoice()) {
882 LLVM_DEBUG(dbgs() << "Picked only Bottom\n");
886 if (SUnit *SU = Top.pickOnlyChoice()) {
887 LLVM_DEBUG(dbgs() << "Picked only Top\n");
891 SchedCandidate BotCand;
892 // Prefer bottom scheduling when heuristics are silent.
893 CandResult BotResult = pickNodeFromQueue(Bot,
894 DAG->getBotRPTracker(), BotCand);
895 assert(BotResult != NoCand && "failed to find the first candidate");
897 // If either Q has a single candidate that provides the least increase in
898 // Excess pressure, we can immediately schedule from that Q.
900 // RegionCriticalPSets summarizes the pressure within the scheduled region and
901 // affects picking from either Q. If scheduling in one direction must
902 // increase pressure for one of the excess PSets, then schedule in that
903 // direction first to provide more freedom in the other direction.
904 if (BotResult == SingleExcess || BotResult == SingleCritical) {
905 LLVM_DEBUG(dbgs() << "Prefered Bottom Node\n");
909 // Check if the top Q has a better candidate.
910 SchedCandidate TopCand;
911 CandResult TopResult = pickNodeFromQueue(Top,
912 DAG->getTopRPTracker(), TopCand);
913 assert(TopResult != NoCand && "failed to find the first candidate");
915 if (TopResult == SingleExcess || TopResult == SingleCritical) {
916 LLVM_DEBUG(dbgs() << "Prefered Top Node\n");
920 // If either Q has a single candidate that minimizes pressure above the
921 // original region's pressure pick it.
922 if (BotResult == SingleMax) {
923 LLVM_DEBUG(dbgs() << "Prefered Bottom Node SingleMax\n");
927 if (TopResult == SingleMax) {
928 LLVM_DEBUG(dbgs() << "Prefered Top Node SingleMax\n");
932 if (TopCand.SCost > BotCand.SCost) {
933 LLVM_DEBUG(dbgs() << "Prefered Top Node Cost\n");
937 // Otherwise prefer the bottom candidate in node order.
938 LLVM_DEBUG(dbgs() << "Prefered Bottom in Node order\n");
943 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
944 SUnit *ConvergingVLIWScheduler::pickNode(bool &IsTopNode) {
945 if (DAG->top() == DAG->bottom()) {
946 assert(Top.Available.empty() && Top.Pending.empty() &&
947 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
952 SU = Top.pickOnlyChoice();
954 SchedCandidate TopCand;
955 CandResult TopResult =
956 pickNodeFromQueue(Top, DAG->getTopRPTracker(), TopCand);
957 assert(TopResult != NoCand && "failed to find the first candidate");
962 } else if (ForceBottomUp) {
963 SU = Bot.pickOnlyChoice();
965 SchedCandidate BotCand;
966 CandResult BotResult =
967 pickNodeFromQueue(Bot, DAG->getBotRPTracker(), BotCand);
968 assert(BotResult != NoCand && "failed to find the first candidate");
974 SU = pickNodeBidrectional(IsTopNode);
976 if (SU->isTopReady())
978 if (SU->isBottomReady())
981 LLVM_DEBUG(dbgs() << "*** " << (IsTopNode ? "Top" : "Bottom")
982 << " Scheduling instruction in cycle "
983 << (IsTopNode ? Top.CurrCycle : Bot.CurrCycle) << " ("
984 << reportPackets() << ")\n";
989 /// Update the scheduler's state after scheduling a node. This is the same node
990 /// that was just returned by pickNode(). However, VLIWMachineScheduler needs
991 /// to update it's state based on the current cycle before MachineSchedStrategy
993 void ConvergingVLIWScheduler::schedNode(SUnit *SU, bool IsTopNode) {
996 SU->TopReadyCycle = Top.CurrCycle;
999 SU->BotReadyCycle = Bot.CurrCycle;