1 //===-- SIMachineScheduler.cpp - SI Scheduler Interface -*- C++ -*-----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI Machine Scheduler interface
13 //===----------------------------------------------------------------------===//
15 #include "SIMachineScheduler.h"
16 #include "AMDGPUSubtarget.h"
17 #include "llvm/CodeGen/LiveInterval.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/MachineScheduler.h"
21 #include "llvm/CodeGen/RegisterPressure.h"
25 #define DEBUG_TYPE "misched"
27 // This scheduler implements a different scheduling algorithm than
30 // There are several specific architecture behaviours that can't be modelled
31 // for GenericScheduler:
32 // . When accessing the result of an SGPR load instruction, you have to wait
33 // for all the SGPR load instructions before your current instruction to
35 // . When accessing the result of an VGPR load instruction, you have to wait
36 // for all the VGPR load instructions previous to the VGPR load instruction
37 // you are interested in to finish.
38 // . The less the register pressure, the best load latencies are hidden
40 // Moreover some specifities (like the fact a lot of instructions in the shader
41 // have few dependencies) makes the generic scheduler have some unpredictable
42 // behaviours. For example when register pressure becomes high, it can either
43 // manage to prevent register pressure from going too high, or it can
44 // increase register pressure even more than if it hadn't taken register
45 // pressure into account.
47 // Also some other bad behaviours are generated, like loading at the beginning
48 // of the shader a constant in VGPR you won't need until the end of the shader.
50 // The scheduling problem for SI can distinguish three main parts:
51 // . Hiding high latencies (texture sampling, etc)
52 // . Hiding low latencies (SGPR constant loading, etc)
53 // . Keeping register usage low for better latency hiding and general
56 // Some other things can also affect performance, but are hard to predict
57 // (cache usage, the fact the HW can issue several instructions from different
58 // wavefronts if different types, etc)
60 // This scheduler tries to solve the scheduling problem by dividing it into
61 // simpler sub-problems. It divides the instructions into blocks, schedules
62 // locally inside the blocks where it takes care of low latencies, and then
63 // chooses the order of the blocks by taking care of high latencies.
64 // Dividing the instructions into blocks helps control keeping register
67 // First the instructions are put into blocks.
68 // We want the blocks help control register usage and hide high latencies
69 // later. To help control register usage, we typically want all local
70 // computations, when for example you create a result that can be comsummed
71 // right away, to be contained in a block. Block inputs and outputs would
72 // typically be important results that are needed in several locations of
73 // the shader. Since we do want blocks to help hide high latencies, we want
74 // the instructions inside the block to have a minimal set of dependencies
75 // on high latencies. It will make it easy to pick blocks to hide specific
77 // The block creation algorithm is divided into several steps, and several
78 // variants can be tried during the scheduling process.
80 // Second the order of the instructions inside the blocks is choosen.
81 // At that step we do take into account only register usage and hiding
82 // low latency instructions
84 // Third the block order is choosen, there we try to hide high latencies
85 // and keep register usage low.
87 // After the third step, a pass is done to improve the hiding of low
90 // Actually when talking about 'low latency' or 'high latency' it includes
91 // both the latency to get the cache (or global mem) data go to the register,
92 // and the bandwith limitations.
93 // Increasing the number of active wavefronts helps hide the former, but it
94 // doesn't solve the latter, thus why even if wavefront count is high, we have
95 // to try have as many instructions hiding high latencies as possible.
96 // The OpenCL doc says for example latency of 400 cycles for a global mem access,
97 // which is hidden by 10 instructions if the wavefront count is 10.
99 // Some figures taken from AMD docs:
100 // Both texture and constant L1 caches are 4-way associative with 64 bytes
102 // Constant cache is shared with 4 CUs.
103 // For texture sampling, the address generation unit receives 4 texture
104 // addresses per cycle, thus we could expect texture sampling latency to be
105 // equivalent to 4 instructions in the very best case (a VGPR is 64 work items,
106 // instructions in a wavefront group are executed every 4 cycles),
107 // or 16 instructions if the other wavefronts associated to the 3 other VALUs
108 // of the CU do texture sampling too. (Don't take these figures too seriously,
109 // as I'm not 100% sure of the computation)
110 // Data exports should get similar latency.
111 // For constant loading, the cache is shader with 4 CUs.
112 // The doc says "a throughput of 16B/cycle for each of the 4 Compute Unit"
113 // I guess if the other CU don't read the cache, it can go up to 64B/cycle.
114 // It means a simple s_buffer_load should take one instruction to hide, as
115 // well as a s_buffer_loadx2 and potentially a s_buffer_loadx8 if on the same
118 // As of today the driver doesn't preload the constants in cache, thus the
119 // first loads get extra latency. The doc says global memory access can be
120 // 300-600 cycles. We do not specially take that into account when scheduling
121 // As we expect the driver to be able to preload the constants soon.
128 static const char *getReasonStr(SIScheduleCandReason Reason) {
130 case NoCand: return "NOCAND";
131 case RegUsage: return "REGUSAGE";
132 case Latency: return "LATENCY";
133 case Successor: return "SUCCESSOR";
134 case Depth: return "DEPTH";
135 case NodeOrder: return "ORDER";
137 llvm_unreachable("Unknown reason!");
142 static bool tryLess(int TryVal, int CandVal,
143 SISchedulerCandidate &TryCand,
144 SISchedulerCandidate &Cand,
145 SIScheduleCandReason Reason) {
146 if (TryVal < CandVal) {
147 TryCand.Reason = Reason;
150 if (TryVal > CandVal) {
151 if (Cand.Reason > Reason)
152 Cand.Reason = Reason;
155 Cand.setRepeat(Reason);
159 static bool tryGreater(int TryVal, int CandVal,
160 SISchedulerCandidate &TryCand,
161 SISchedulerCandidate &Cand,
162 SIScheduleCandReason Reason) {
163 if (TryVal > CandVal) {
164 TryCand.Reason = Reason;
167 if (TryVal < CandVal) {
168 if (Cand.Reason > Reason)
169 Cand.Reason = Reason;
172 Cand.setRepeat(Reason);
176 // SIScheduleBlock //
178 void SIScheduleBlock::addUnit(SUnit *SU) {
179 NodeNum2Index[SU->NodeNum] = SUnits.size();
180 SUnits.push_back(SU);
185 void SIScheduleBlock::traceCandidate(const SISchedCandidate &Cand) {
187 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
192 void SIScheduleBlock::tryCandidateTopDown(SISchedCandidate &Cand,
193 SISchedCandidate &TryCand) {
194 // Initialize the candidate if needed.
195 if (!Cand.isValid()) {
196 TryCand.Reason = NodeOrder;
200 if (Cand.SGPRUsage > 60 &&
201 tryLess(TryCand.SGPRUsage, Cand.SGPRUsage, TryCand, Cand, RegUsage))
204 // Schedule low latency instructions as top as possible.
205 // Order of priority is:
206 // . Low latency instructions which do not depend on other low latency
207 // instructions we haven't waited for
208 // . Other instructions which do not depend on low latency instructions
209 // we haven't waited for
211 // . All other instructions
212 // Goal is to get: low latency instructions - independant instructions
213 // - (eventually some more low latency instructions)
214 // - instructions that depend on the first low latency instructions.
215 // If in the block there is a lot of constant loads, the SGPR usage
216 // could go quite high, thus above the arbitrary limit of 60 will encourage
217 // use the already loaded constants (in order to release some SGPRs) before
219 if (tryLess(TryCand.HasLowLatencyNonWaitedParent,
220 Cand.HasLowLatencyNonWaitedParent,
221 TryCand, Cand, SIScheduleCandReason::Depth))
224 if (tryGreater(TryCand.IsLowLatency, Cand.IsLowLatency,
225 TryCand, Cand, SIScheduleCandReason::Depth))
228 if (TryCand.IsLowLatency &&
229 tryLess(TryCand.LowLatencyOffset, Cand.LowLatencyOffset,
230 TryCand, Cand, SIScheduleCandReason::Depth))
233 if (tryLess(TryCand.VGPRUsage, Cand.VGPRUsage, TryCand, Cand, RegUsage))
236 // Fall through to original instruction order.
237 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) {
238 TryCand.Reason = NodeOrder;
242 SUnit* SIScheduleBlock::pickNode() {
243 SISchedCandidate TopCand;
245 for (SUnit* SU : TopReadySUs) {
246 SISchedCandidate TryCand;
247 std::vector<unsigned> pressure;
248 std::vector<unsigned> MaxPressure;
249 // Predict register usage after this instruction.
251 TopRPTracker.getDownwardPressure(SU->getInstr(), pressure, MaxPressure);
252 TryCand.SGPRUsage = pressure[DAG->getSGPRSetID()];
253 TryCand.VGPRUsage = pressure[DAG->getVGPRSetID()];
254 TryCand.IsLowLatency = DAG->IsLowLatencySU[SU->NodeNum];
255 TryCand.LowLatencyOffset = DAG->LowLatencyOffset[SU->NodeNum];
256 TryCand.HasLowLatencyNonWaitedParent =
257 HasLowLatencyNonWaitedParent[NodeNum2Index[SU->NodeNum]];
258 tryCandidateTopDown(TopCand, TryCand);
259 if (TryCand.Reason != NoCand)
260 TopCand.setBest(TryCand);
267 // Schedule something valid.
268 void SIScheduleBlock::fastSchedule() {
273 for (SUnit* SU : SUnits) {
274 if (!SU->NumPredsLeft)
275 TopReadySUs.push_back(SU);
278 while (!TopReadySUs.empty()) {
279 SUnit *SU = TopReadySUs[0];
280 ScheduledSUnits.push_back(SU);
287 // Returns if the register was set between first and last.
288 static bool isDefBetween(unsigned Reg,
289 SlotIndex First, SlotIndex Last,
290 const MachineRegisterInfo *MRI,
291 const LiveIntervals *LIS) {
292 for (MachineRegisterInfo::def_instr_iterator
293 UI = MRI->def_instr_begin(Reg),
294 UE = MRI->def_instr_end(); UI != UE; ++UI) {
295 const MachineInstr* MI = &*UI;
296 if (MI->isDebugValue())
298 SlotIndex InstSlot = LIS->getInstructionIndex(MI).getRegSlot();
299 if (InstSlot >= First && InstSlot <= Last)
305 void SIScheduleBlock::initRegPressure(MachineBasicBlock::iterator BeginBlock,
306 MachineBasicBlock::iterator EndBlock) {
307 IntervalPressure Pressure, BotPressure;
308 RegPressureTracker RPTracker(Pressure), BotRPTracker(BotPressure);
309 LiveIntervals *LIS = DAG->getLIS();
310 MachineRegisterInfo *MRI = DAG->getMRI();
311 DAG->initRPTracker(TopRPTracker);
312 DAG->initRPTracker(BotRPTracker);
313 DAG->initRPTracker(RPTracker);
315 // Goes though all SU. RPTracker captures what had to be alive for the SUs
316 // to execute, and what is still alive at the end.
317 for (SUnit* SU : ScheduledSUnits) {
318 RPTracker.setPos(SU->getInstr());
322 // Close the RPTracker to finalize live ins/outs.
323 RPTracker.closeRegion();
325 // Initialize the live ins and live outs.
326 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
327 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
329 // Do not Track Physical Registers, because it messes up.
330 for (unsigned Reg : RPTracker.getPressure().LiveInRegs) {
331 if (TargetRegisterInfo::isVirtualRegister(Reg))
332 LiveInRegs.insert(Reg);
335 // There is several possibilities to distinguish:
336 // 1) Reg is not input to any instruction in the block, but is output of one
337 // 2) 1) + read in the block and not needed after it
338 // 3) 1) + read in the block but needed in another block
339 // 4) Reg is input of an instruction but another block will read it too
340 // 5) Reg is input of an instruction and then rewritten in the block.
341 // result is not read in the block (implies used in another block)
342 // 6) Reg is input of an instruction and then rewritten in the block.
343 // result is read in the block and not needed in another block
344 // 7) Reg is input of an instruction and then rewritten in the block.
345 // result is read in the block but also needed in another block
346 // LiveInRegs will contains all the regs in situation 4, 5, 6, 7
347 // We want LiveOutRegs to contain only Regs whose content will be read after
348 // in another block, and whose content was written in the current block,
349 // that is we want it to get 1, 3, 5, 7
350 // Since we made the MIs of a block to be packed all together before
351 // scheduling, then the LiveIntervals were correct, and the RPTracker was
352 // able to correctly handle 5 vs 6, 2 vs 3.
353 // (Note: This is not sufficient for RPTracker to not do mistakes for case 4)
354 // The RPTracker's LiveOutRegs has 1, 3, (some correct or incorrect)4, 5, 7
355 // Comparing to LiveInRegs is not sufficient to differenciate 4 vs 5, 7
356 // The use of findDefBetween removes the case 4.
357 for (unsigned Reg : RPTracker.getPressure().LiveOutRegs) {
358 if (TargetRegisterInfo::isVirtualRegister(Reg) &&
359 isDefBetween(Reg, LIS->getInstructionIndex(BeginBlock).getRegSlot(),
360 LIS->getInstructionIndex(EndBlock).getRegSlot(),
362 LiveOutRegs.insert(Reg);
366 // Pressure = sum_alive_registers register size
367 // Internally llvm will represent some registers as big 128 bits registers
368 // for example, but they actually correspond to 4 actual 32 bits registers.
369 // Thus Pressure is not equal to num_alive_registers * constant.
370 LiveInPressure = TopPressure.MaxSetPressure;
371 LiveOutPressure = BotPressure.MaxSetPressure;
373 // Prepares TopRPTracker for top down scheduling.
374 TopRPTracker.closeTop();
377 void SIScheduleBlock::schedule(MachineBasicBlock::iterator BeginBlock,
378 MachineBasicBlock::iterator EndBlock) {
382 // PreScheduling phase to set LiveIn and LiveOut.
383 initRegPressure(BeginBlock, EndBlock);
386 // Schedule for real now.
390 for (SUnit* SU : SUnits) {
391 if (!SU->NumPredsLeft)
392 TopReadySUs.push_back(SU);
395 while (!TopReadySUs.empty()) {
396 SUnit *SU = pickNode();
397 ScheduledSUnits.push_back(SU);
398 TopRPTracker.setPos(SU->getInstr());
399 TopRPTracker.advance();
403 // TODO: compute InternalAdditionnalPressure.
404 InternalAdditionnalPressure.resize(TopPressure.MaxSetPressure.size());
406 // Check everything is right.
408 assert(SUnits.size() == ScheduledSUnits.size() &&
409 TopReadySUs.empty());
410 for (SUnit* SU : SUnits) {
411 assert(SU->isScheduled &&
412 SU->NumPredsLeft == 0);
419 void SIScheduleBlock::undoSchedule() {
420 for (SUnit* SU : SUnits) {
421 SU->isScheduled = false;
422 for (SDep& Succ : SU->Succs) {
423 if (BC->isSUInBlock(Succ.getSUnit(), ID))
424 undoReleaseSucc(SU, &Succ);
427 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0);
428 ScheduledSUnits.clear();
432 void SIScheduleBlock::undoReleaseSucc(SUnit *SU, SDep *SuccEdge) {
433 SUnit *SuccSU = SuccEdge->getSUnit();
435 if (SuccEdge->isWeak()) {
436 ++SuccSU->WeakPredsLeft;
439 ++SuccSU->NumPredsLeft;
442 void SIScheduleBlock::releaseSucc(SUnit *SU, SDep *SuccEdge) {
443 SUnit *SuccSU = SuccEdge->getSUnit();
445 if (SuccEdge->isWeak()) {
446 --SuccSU->WeakPredsLeft;
450 if (SuccSU->NumPredsLeft == 0) {
451 dbgs() << "*** Scheduling failed! ***\n";
453 dbgs() << " has been released too many times!\n";
454 llvm_unreachable(nullptr);
458 --SuccSU->NumPredsLeft;
461 /// Release Successors of the SU that are in the block or not.
462 void SIScheduleBlock::releaseSuccessors(SUnit *SU, bool InOrOutBlock) {
463 for (SDep& Succ : SU->Succs) {
464 SUnit *SuccSU = Succ.getSUnit();
466 if (BC->isSUInBlock(SuccSU, ID) != InOrOutBlock)
469 releaseSucc(SU, &Succ);
470 if (SuccSU->NumPredsLeft == 0 && InOrOutBlock)
471 TopReadySUs.push_back(SuccSU);
475 void SIScheduleBlock::nodeScheduled(SUnit *SU) {
477 assert (!SU->NumPredsLeft);
478 std::vector<SUnit*>::iterator I =
479 std::find(TopReadySUs.begin(), TopReadySUs.end(), SU);
480 if (I == TopReadySUs.end()) {
481 dbgs() << "Data Structure Bug in SI Scheduler\n";
482 llvm_unreachable(nullptr);
484 TopReadySUs.erase(I);
486 releaseSuccessors(SU, true);
487 // Scheduling this node will trigger a wait,
488 // thus propagate to other instructions that they do not need to wait either.
489 if (HasLowLatencyNonWaitedParent[NodeNum2Index[SU->NodeNum]])
490 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0);
492 if (DAG->IsLowLatencySU[SU->NodeNum]) {
493 for (SDep& Succ : SU->Succs) {
494 std::map<unsigned, unsigned>::iterator I =
495 NodeNum2Index.find(Succ.getSUnit()->NodeNum);
496 if (I != NodeNum2Index.end())
497 HasLowLatencyNonWaitedParent[I->second] = 1;
500 SU->isScheduled = true;
503 void SIScheduleBlock::finalizeUnits() {
504 // We remove links from outside blocks to enable scheduling inside the block.
505 for (SUnit* SU : SUnits) {
506 releaseSuccessors(SU, false);
507 if (DAG->IsHighLatencySU[SU->NodeNum])
508 HighLatencyBlock = true;
510 HasLowLatencyNonWaitedParent.resize(SUnits.size(), 0);
513 // we maintain ascending order of IDs
514 void SIScheduleBlock::addPred(SIScheduleBlock *Pred) {
515 unsigned PredID = Pred->getID();
517 // Check if not already predecessor.
518 for (SIScheduleBlock* P : Preds) {
519 if (PredID == P->getID())
522 Preds.push_back(Pred);
525 for (SIScheduleBlock* S : Succs) {
526 if (PredID == S->getID())
527 assert(!"Loop in the Block Graph!\n");
532 void SIScheduleBlock::addSucc(SIScheduleBlock *Succ) {
533 unsigned SuccID = Succ->getID();
535 // Check if not already predecessor.
536 for (SIScheduleBlock* S : Succs) {
537 if (SuccID == S->getID())
540 if (Succ->isHighLatencyBlock())
541 ++NumHighLatencySuccessors;
542 Succs.push_back(Succ);
544 for (SIScheduleBlock* P : Preds) {
545 if (SuccID == P->getID())
546 assert("Loop in the Block Graph!\n");
552 void SIScheduleBlock::printDebug(bool full) {
553 dbgs() << "Block (" << ID << ")\n";
557 dbgs() << "\nContains High Latency Instruction: "
558 << HighLatencyBlock << '\n';
559 dbgs() << "\nDepends On:\n";
560 for (SIScheduleBlock* P : Preds) {
561 P->printDebug(false);
564 dbgs() << "\nSuccessors:\n";
565 for (SIScheduleBlock* S : Succs) {
566 S->printDebug(false);
570 dbgs() << "LiveInPressure " << LiveInPressure[DAG->getSGPRSetID()] << ' '
571 << LiveInPressure[DAG->getVGPRSetID()] << '\n';
572 dbgs() << "LiveOutPressure " << LiveOutPressure[DAG->getSGPRSetID()] << ' '
573 << LiveOutPressure[DAG->getVGPRSetID()] << "\n\n";
574 dbgs() << "LiveIns:\n";
575 for (unsigned Reg : LiveInRegs)
576 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' ';
578 dbgs() << "\nLiveOuts:\n";
579 for (unsigned Reg : LiveOutRegs)
580 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' ';
583 dbgs() << "\nInstructions:\n";
585 for (SUnit* SU : SUnits) {
589 for (SUnit* SU : SUnits) {
594 dbgs() << "///////////////////////\n";
599 // SIScheduleBlockCreator //
601 SIScheduleBlockCreator::SIScheduleBlockCreator(SIScheduleDAGMI *DAG) :
605 SIScheduleBlockCreator::~SIScheduleBlockCreator() {
609 SIScheduleBlockCreator::getBlocks(SISchedulerBlockCreatorVariant BlockVariant) {
610 std::map<SISchedulerBlockCreatorVariant, SIScheduleBlocks>::iterator B =
611 Blocks.find(BlockVariant);
612 if (B == Blocks.end()) {
613 SIScheduleBlocks Res;
614 createBlocksForVariant(BlockVariant);
616 scheduleInsideBlocks();
618 Res.Blocks = CurrentBlocks;
619 Res.TopDownIndex2Block = TopDownIndex2Block;
620 Res.TopDownBlock2Index = TopDownBlock2Index;
621 Blocks[BlockVariant] = Res;
628 bool SIScheduleBlockCreator::isSUInBlock(SUnit *SU, unsigned ID) {
629 if (SU->NodeNum >= DAG->SUnits.size())
631 return CurrentBlocks[Node2CurrentBlock[SU->NodeNum]]->getID() == ID;
634 void SIScheduleBlockCreator::colorHighLatenciesAlone() {
635 unsigned DAGSize = DAG->SUnits.size();
637 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
638 SUnit *SU = &DAG->SUnits[i];
639 if (DAG->IsHighLatencySU[SU->NodeNum]) {
640 CurrentColoring[SU->NodeNum] = NextReservedID++;
645 void SIScheduleBlockCreator::colorHighLatenciesGroups() {
646 unsigned DAGSize = DAG->SUnits.size();
647 unsigned NumHighLatencies = 0;
649 unsigned Color = NextReservedID;
651 std::set<unsigned> FormingGroup;
653 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
654 SUnit *SU = &DAG->SUnits[i];
655 if (DAG->IsHighLatencySU[SU->NodeNum])
659 if (NumHighLatencies == 0)
662 if (NumHighLatencies <= 6)
664 else if (NumHighLatencies <= 12)
669 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
670 SUnit *SU = &DAG->SUnits[i];
671 if (DAG->IsHighLatencySU[SU->NodeNum]) {
672 unsigned CompatibleGroup = true;
673 unsigned ProposedColor = Color;
674 for (unsigned j : FormingGroup) {
675 // TODO: Currently CompatibleGroup will always be false,
676 // because the graph enforces the load order. This
677 // can be fixed, but as keeping the load order is often
678 // good for performance that causes a performance hit (both
679 // the default scheduler and this scheduler).
680 // When this scheduler determines a good load order,
681 // this can be fixed.
682 if (!DAG->canAddEdge(SU, &DAG->SUnits[j]) ||
683 !DAG->canAddEdge(&DAG->SUnits[j], SU))
684 CompatibleGroup = false;
686 if (!CompatibleGroup || ++Count == GroupSize) {
687 FormingGroup.clear();
688 Color = ++NextReservedID;
689 if (!CompatibleGroup) {
690 ProposedColor = Color;
691 FormingGroup.insert(SU->NodeNum);
695 FormingGroup.insert(SU->NodeNum);
697 CurrentColoring[SU->NodeNum] = ProposedColor;
702 void SIScheduleBlockCreator::colorComputeReservedDependencies() {
703 unsigned DAGSize = DAG->SUnits.size();
704 std::map<std::set<unsigned>, unsigned> ColorCombinations;
706 CurrentTopDownReservedDependencyColoring.clear();
707 CurrentBottomUpReservedDependencyColoring.clear();
709 CurrentTopDownReservedDependencyColoring.resize(DAGSize, 0);
710 CurrentBottomUpReservedDependencyColoring.resize(DAGSize, 0);
712 // Traverse TopDown, and give different colors to SUs depending
713 // on which combination of High Latencies they depend on.
715 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
716 SUnit *SU = &DAG->SUnits[DAG->TopDownIndex2SU[i]];
717 std::set<unsigned> SUColors;
720 if (CurrentColoring[SU->NodeNum]) {
721 CurrentTopDownReservedDependencyColoring[SU->NodeNum] =
722 CurrentColoring[SU->NodeNum];
726 for (SDep& PredDep : SU->Preds) {
727 SUnit *Pred = PredDep.getSUnit();
728 if (PredDep.isWeak() || Pred->NodeNum >= DAGSize)
730 if (CurrentTopDownReservedDependencyColoring[Pred->NodeNum] > 0)
731 SUColors.insert(CurrentTopDownReservedDependencyColoring[Pred->NodeNum]);
733 // Color 0 by default.
734 if (SUColors.empty())
736 // Same color than parents.
737 if (SUColors.size() == 1 && *SUColors.begin() > DAGSize)
738 CurrentTopDownReservedDependencyColoring[SU->NodeNum] =
741 std::map<std::set<unsigned>, unsigned>::iterator Pos =
742 ColorCombinations.find(SUColors);
743 if (Pos != ColorCombinations.end()) {
744 CurrentTopDownReservedDependencyColoring[SU->NodeNum] = Pos->second;
746 CurrentTopDownReservedDependencyColoring[SU->NodeNum] =
748 ColorCombinations[SUColors] = NextNonReservedID++;
753 ColorCombinations.clear();
755 // Same as before, but BottomUp.
757 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
758 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
759 std::set<unsigned> SUColors;
762 if (CurrentColoring[SU->NodeNum]) {
763 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] =
764 CurrentColoring[SU->NodeNum];
768 for (SDep& SuccDep : SU->Succs) {
769 SUnit *Succ = SuccDep.getSUnit();
770 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
772 if (CurrentBottomUpReservedDependencyColoring[Succ->NodeNum] > 0)
773 SUColors.insert(CurrentBottomUpReservedDependencyColoring[Succ->NodeNum]);
776 if (SUColors.empty())
778 // Same color than parents.
779 if (SUColors.size() == 1 && *SUColors.begin() > DAGSize)
780 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] =
783 std::map<std::set<unsigned>, unsigned>::iterator Pos =
784 ColorCombinations.find(SUColors);
785 if (Pos != ColorCombinations.end()) {
786 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] = Pos->second;
788 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] =
790 ColorCombinations[SUColors] = NextNonReservedID++;
796 void SIScheduleBlockCreator::colorAccordingToReservedDependencies() {
797 unsigned DAGSize = DAG->SUnits.size();
798 std::map<std::pair<unsigned, unsigned>, unsigned> ColorCombinations;
800 // Every combination of colors given by the top down
801 // and bottom up Reserved node dependency
803 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
804 SUnit *SU = &DAG->SUnits[i];
805 std::pair<unsigned, unsigned> SUColors;
807 // High latency instructions: already given.
808 if (CurrentColoring[SU->NodeNum])
811 SUColors.first = CurrentTopDownReservedDependencyColoring[SU->NodeNum];
812 SUColors.second = CurrentBottomUpReservedDependencyColoring[SU->NodeNum];
814 std::map<std::pair<unsigned, unsigned>, unsigned>::iterator Pos =
815 ColorCombinations.find(SUColors);
816 if (Pos != ColorCombinations.end()) {
817 CurrentColoring[SU->NodeNum] = Pos->second;
819 CurrentColoring[SU->NodeNum] = NextNonReservedID;
820 ColorCombinations[SUColors] = NextNonReservedID++;
825 void SIScheduleBlockCreator::colorEndsAccordingToDependencies() {
826 unsigned DAGSize = DAG->SUnits.size();
827 std::vector<int> PendingColoring = CurrentColoring;
829 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
830 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
831 std::set<unsigned> SUColors;
832 std::set<unsigned> SUColorsPending;
834 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
837 if (CurrentBottomUpReservedDependencyColoring[SU->NodeNum] > 0 ||
838 CurrentTopDownReservedDependencyColoring[SU->NodeNum] > 0)
841 for (SDep& SuccDep : SU->Succs) {
842 SUnit *Succ = SuccDep.getSUnit();
843 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
845 if (CurrentBottomUpReservedDependencyColoring[Succ->NodeNum] > 0 ||
846 CurrentTopDownReservedDependencyColoring[Succ->NodeNum] > 0)
847 SUColors.insert(CurrentColoring[Succ->NodeNum]);
848 SUColorsPending.insert(PendingColoring[Succ->NodeNum]);
850 if (SUColors.size() == 1 && SUColorsPending.size() == 1)
851 PendingColoring[SU->NodeNum] = *SUColors.begin();
852 else // TODO: Attribute new colors depending on color
853 // combination of children.
854 PendingColoring[SU->NodeNum] = NextNonReservedID++;
856 CurrentColoring = PendingColoring;
860 void SIScheduleBlockCreator::colorForceConsecutiveOrderInGroup() {
861 unsigned DAGSize = DAG->SUnits.size();
862 unsigned PreviousColor;
863 std::set<unsigned> SeenColors;
868 PreviousColor = CurrentColoring[0];
870 for (unsigned i = 1, e = DAGSize; i != e; ++i) {
871 SUnit *SU = &DAG->SUnits[i];
872 unsigned CurrentColor = CurrentColoring[i];
873 unsigned PreviousColorSave = PreviousColor;
874 assert(i == SU->NodeNum);
876 if (CurrentColor != PreviousColor)
877 SeenColors.insert(PreviousColor);
878 PreviousColor = CurrentColor;
880 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
883 if (SeenColors.find(CurrentColor) == SeenColors.end())
886 if (PreviousColorSave != CurrentColor)
887 CurrentColoring[i] = NextNonReservedID++;
889 CurrentColoring[i] = CurrentColoring[i-1];
893 void SIScheduleBlockCreator::colorMergeConstantLoadsNextGroup() {
894 unsigned DAGSize = DAG->SUnits.size();
896 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
897 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
898 std::set<unsigned> SUColors;
900 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
903 // No predecessor: Vgpr constant loading.
904 // Low latency instructions usually have a predecessor (the address)
905 if (SU->Preds.size() > 0 && !DAG->IsLowLatencySU[SU->NodeNum])
908 for (SDep& SuccDep : SU->Succs) {
909 SUnit *Succ = SuccDep.getSUnit();
910 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
912 SUColors.insert(CurrentColoring[Succ->NodeNum]);
914 if (SUColors.size() == 1)
915 CurrentColoring[SU->NodeNum] = *SUColors.begin();
919 void SIScheduleBlockCreator::colorMergeIfPossibleNextGroup() {
920 unsigned DAGSize = DAG->SUnits.size();
922 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
923 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
924 std::set<unsigned> SUColors;
926 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
929 for (SDep& SuccDep : SU->Succs) {
930 SUnit *Succ = SuccDep.getSUnit();
931 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
933 SUColors.insert(CurrentColoring[Succ->NodeNum]);
935 if (SUColors.size() == 1)
936 CurrentColoring[SU->NodeNum] = *SUColors.begin();
940 void SIScheduleBlockCreator::colorMergeIfPossibleNextGroupOnlyForReserved() {
941 unsigned DAGSize = DAG->SUnits.size();
943 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
944 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
945 std::set<unsigned> SUColors;
947 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
950 for (SDep& SuccDep : SU->Succs) {
951 SUnit *Succ = SuccDep.getSUnit();
952 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
954 SUColors.insert(CurrentColoring[Succ->NodeNum]);
956 if (SUColors.size() == 1 && *SUColors.begin() <= DAGSize)
957 CurrentColoring[SU->NodeNum] = *SUColors.begin();
961 void SIScheduleBlockCreator::colorMergeIfPossibleSmallGroupsToNextGroup() {
962 unsigned DAGSize = DAG->SUnits.size();
963 std::map<unsigned, unsigned> ColorCount;
965 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
966 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
967 unsigned color = CurrentColoring[SU->NodeNum];
968 std::map<unsigned, unsigned>::iterator Pos = ColorCount.find(color);
969 if (Pos != ColorCount.end()) {
972 ColorCount[color] = 1;
976 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
977 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
978 unsigned color = CurrentColoring[SU->NodeNum];
979 std::set<unsigned> SUColors;
981 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
984 if (ColorCount[color] > 1)
987 for (SDep& SuccDep : SU->Succs) {
988 SUnit *Succ = SuccDep.getSUnit();
989 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
991 SUColors.insert(CurrentColoring[Succ->NodeNum]);
993 if (SUColors.size() == 1 && *SUColors.begin() != color) {
995 CurrentColoring[SU->NodeNum] = *SUColors.begin();
996 ++ColorCount[*SUColors.begin()];
1001 void SIScheduleBlockCreator::cutHugeBlocks() {
1005 void SIScheduleBlockCreator::regroupNoUserInstructions() {
1006 unsigned DAGSize = DAG->SUnits.size();
1007 int GroupID = NextNonReservedID++;
1009 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1010 SUnit *SU = &DAG->SUnits[DAG->BottomUpIndex2SU[i]];
1011 bool hasSuccessor = false;
1013 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
1016 for (SDep& SuccDep : SU->Succs) {
1017 SUnit *Succ = SuccDep.getSUnit();
1018 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
1020 hasSuccessor = true;
1023 CurrentColoring[SU->NodeNum] = GroupID;
1027 void SIScheduleBlockCreator::createBlocksForVariant(SISchedulerBlockCreatorVariant BlockVariant) {
1028 unsigned DAGSize = DAG->SUnits.size();
1029 std::map<unsigned,unsigned> RealID;
1031 CurrentBlocks.clear();
1032 CurrentColoring.clear();
1033 CurrentColoring.resize(DAGSize, 0);
1034 Node2CurrentBlock.clear();
1036 // Restore links previous scheduling variant has overridden.
1037 DAG->restoreSULinksLeft();
1040 NextNonReservedID = DAGSize + 1;
1042 DEBUG(dbgs() << "Coloring the graph\n");
1044 if (BlockVariant == SISchedulerBlockCreatorVariant::LatenciesGrouped)
1045 colorHighLatenciesGroups();
1047 colorHighLatenciesAlone();
1048 colorComputeReservedDependencies();
1049 colorAccordingToReservedDependencies();
1050 colorEndsAccordingToDependencies();
1051 if (BlockVariant == SISchedulerBlockCreatorVariant::LatenciesAlonePlusConsecutive)
1052 colorForceConsecutiveOrderInGroup();
1053 regroupNoUserInstructions();
1054 colorMergeConstantLoadsNextGroup();
1055 colorMergeIfPossibleNextGroupOnlyForReserved();
1057 // Put SUs of same color into same block
1058 Node2CurrentBlock.resize(DAGSize, -1);
1059 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1060 SUnit *SU = &DAG->SUnits[i];
1061 unsigned Color = CurrentColoring[SU->NodeNum];
1062 if (RealID.find(Color) == RealID.end()) {
1063 int ID = CurrentBlocks.size();
1064 BlockPtrs.push_back(
1065 make_unique<SIScheduleBlock>(DAG, this, ID));
1066 CurrentBlocks.push_back(BlockPtrs.rbegin()->get());
1069 CurrentBlocks[RealID[Color]]->addUnit(SU);
1070 Node2CurrentBlock[SU->NodeNum] = RealID[Color];
1073 // Build dependencies between blocks.
1074 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1075 SUnit *SU = &DAG->SUnits[i];
1076 int SUID = Node2CurrentBlock[i];
1077 for (SDep& SuccDep : SU->Succs) {
1078 SUnit *Succ = SuccDep.getSUnit();
1079 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
1081 if (Node2CurrentBlock[Succ->NodeNum] != SUID)
1082 CurrentBlocks[SUID]->addSucc(CurrentBlocks[Node2CurrentBlock[Succ->NodeNum]]);
1084 for (SDep& PredDep : SU->Preds) {
1085 SUnit *Pred = PredDep.getSUnit();
1086 if (PredDep.isWeak() || Pred->NodeNum >= DAGSize)
1088 if (Node2CurrentBlock[Pred->NodeNum] != SUID)
1089 CurrentBlocks[SUID]->addPred(CurrentBlocks[Node2CurrentBlock[Pred->NodeNum]]);
1093 // Free root and leafs of all blocks to enable scheduling inside them.
1094 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) {
1095 SIScheduleBlock *Block = CurrentBlocks[i];
1096 Block->finalizeUnits();
1099 dbgs() << "Blocks created:\n\n";
1100 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) {
1101 SIScheduleBlock *Block = CurrentBlocks[i];
1102 Block->printDebug(true);
1107 // Two functions taken from Codegen/MachineScheduler.cpp
1109 /// If this iterator is a debug value, increment until reaching the End or a
1110 /// non-debug instruction.
1111 static MachineBasicBlock::const_iterator
1112 nextIfDebug(MachineBasicBlock::const_iterator I,
1113 MachineBasicBlock::const_iterator End) {
1114 for(; I != End; ++I) {
1115 if (!I->isDebugValue())
1121 /// Non-const version.
1122 static MachineBasicBlock::iterator
1123 nextIfDebug(MachineBasicBlock::iterator I,
1124 MachineBasicBlock::const_iterator End) {
1125 // Cast the return value to nonconst MachineInstr, then cast to an
1126 // instr_iterator, which does not check for null, finally return a
1128 return MachineBasicBlock::instr_iterator(
1129 const_cast<MachineInstr*>(
1130 &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
1133 void SIScheduleBlockCreator::topologicalSort() {
1134 unsigned DAGSize = CurrentBlocks.size();
1135 std::vector<int> WorkList;
1137 DEBUG(dbgs() << "Topological Sort\n");
1139 WorkList.reserve(DAGSize);
1140 TopDownIndex2Block.resize(DAGSize);
1141 TopDownBlock2Index.resize(DAGSize);
1142 BottomUpIndex2Block.resize(DAGSize);
1144 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1145 SIScheduleBlock *Block = CurrentBlocks[i];
1146 unsigned Degree = Block->getSuccs().size();
1147 TopDownBlock2Index[i] = Degree;
1149 WorkList.push_back(i);
1154 while (!WorkList.empty()) {
1155 int i = WorkList.back();
1156 SIScheduleBlock *Block = CurrentBlocks[i];
1157 WorkList.pop_back();
1158 TopDownBlock2Index[i] = --Id;
1159 TopDownIndex2Block[Id] = i;
1160 for (SIScheduleBlock* Pred : Block->getPreds()) {
1161 if (!--TopDownBlock2Index[Pred->getID()])
1162 WorkList.push_back(Pred->getID());
1167 // Check correctness of the ordering.
1168 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1169 SIScheduleBlock *Block = CurrentBlocks[i];
1170 for (SIScheduleBlock* Pred : Block->getPreds()) {
1171 assert(TopDownBlock2Index[i] > TopDownBlock2Index[Pred->getID()] &&
1172 "Wrong Top Down topological sorting");
1177 BottomUpIndex2Block = std::vector<int>(TopDownIndex2Block.rbegin(),
1178 TopDownIndex2Block.rend());
1181 void SIScheduleBlockCreator::scheduleInsideBlocks() {
1182 unsigned DAGSize = CurrentBlocks.size();
1184 DEBUG(dbgs() << "\nScheduling Blocks\n\n");
1186 // We do schedule a valid scheduling such that a Block corresponds
1187 // to a range of instructions.
1188 DEBUG(dbgs() << "First phase: Fast scheduling for Reg Liveness\n");
1189 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1190 SIScheduleBlock *Block = CurrentBlocks[i];
1191 Block->fastSchedule();
1194 // Note: the following code, and the part restoring previous position
1195 // is by far the most expensive operation of the Scheduler.
1197 // Do not update CurrentTop.
1198 MachineBasicBlock::iterator CurrentTopFastSched = DAG->getCurrentTop();
1199 std::vector<MachineBasicBlock::iterator> PosOld;
1200 std::vector<MachineBasicBlock::iterator> PosNew;
1201 PosOld.reserve(DAG->SUnits.size());
1202 PosNew.reserve(DAG->SUnits.size());
1204 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1205 int BlockIndice = TopDownIndex2Block[i];
1206 SIScheduleBlock *Block = CurrentBlocks[BlockIndice];
1207 std::vector<SUnit*> SUs = Block->getScheduledUnits();
1209 for (SUnit* SU : SUs) {
1210 MachineInstr *MI = SU->getInstr();
1211 MachineBasicBlock::iterator Pos = MI;
1212 PosOld.push_back(Pos);
1213 if (&*CurrentTopFastSched == MI) {
1214 PosNew.push_back(Pos);
1215 CurrentTopFastSched = nextIfDebug(++CurrentTopFastSched,
1216 DAG->getCurrentBottom());
1218 // Update the instruction stream.
1219 DAG->getBB()->splice(CurrentTopFastSched, DAG->getBB(), MI);
1221 // Update LiveIntervals.
1222 // Note: Moving all instructions and calling handleMove everytime
1223 // is the most cpu intensive operation of the scheduler.
1224 // It would gain a lot if there was a way to recompute the
1225 // LiveIntervals for the entire scheduling region.
1226 DAG->getLIS()->handleMove(MI, /*UpdateFlags=*/true);
1227 PosNew.push_back(CurrentTopFastSched);
1232 // Now we have Block of SUs == Block of MI.
1233 // We do the final schedule for the instructions inside the block.
1234 // The property that all the SUs of the Block are grouped together as MI
1235 // is used for correct reg usage tracking.
1236 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1237 SIScheduleBlock *Block = CurrentBlocks[i];
1238 std::vector<SUnit*> SUs = Block->getScheduledUnits();
1239 Block->schedule((*SUs.begin())->getInstr(), (*SUs.rbegin())->getInstr());
1242 DEBUG(dbgs() << "Restoring MI Pos\n");
1243 // Restore old ordering (which prevents a LIS->handleMove bug).
1244 for (unsigned i = PosOld.size(), e = 0; i != e; --i) {
1245 MachineBasicBlock::iterator POld = PosOld[i-1];
1246 MachineBasicBlock::iterator PNew = PosNew[i-1];
1248 // Update the instruction stream.
1249 DAG->getBB()->splice(POld, DAG->getBB(), PNew);
1251 // Update LiveIntervals.
1252 DAG->getLIS()->handleMove(POld, /*UpdateFlags=*/true);
1257 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) {
1258 SIScheduleBlock *Block = CurrentBlocks[i];
1259 Block->printDebug(true);
1264 void SIScheduleBlockCreator::fillStats() {
1265 unsigned DAGSize = CurrentBlocks.size();
1267 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1268 int BlockIndice = TopDownIndex2Block[i];
1269 SIScheduleBlock *Block = CurrentBlocks[BlockIndice];
1270 if (Block->getPreds().size() == 0)
1274 for (SIScheduleBlock *Pred : Block->getPreds()) {
1275 if (Depth < Pred->Depth + 1)
1276 Depth = Pred->Depth + 1;
1278 Block->Depth = Depth;
1282 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1283 int BlockIndice = BottomUpIndex2Block[i];
1284 SIScheduleBlock *Block = CurrentBlocks[BlockIndice];
1285 if (Block->getSuccs().size() == 0)
1288 unsigned Height = 0;
1289 for (SIScheduleBlock *Succ : Block->getSuccs()) {
1290 if (Height < Succ->Height + 1)
1291 Height = Succ->Height + 1;
1293 Block->Height = Height;
1298 // SIScheduleBlockScheduler //
1300 SIScheduleBlockScheduler::SIScheduleBlockScheduler(SIScheduleDAGMI *DAG,
1301 SISchedulerBlockSchedulerVariant Variant,
1302 SIScheduleBlocks BlocksStruct) :
1303 DAG(DAG), Variant(Variant), Blocks(BlocksStruct.Blocks),
1304 LastPosWaitedHighLatency(0), NumBlockScheduled(0), VregCurrentUsage(0),
1305 SregCurrentUsage(0), maxVregUsage(0), maxSregUsage(0) {
1307 // Fill the usage of every output
1308 // Warning: while by construction we always have a link between two blocks
1309 // when one needs a result from the other, the number of users of an output
1310 // is not the sum of child blocks having as input the same virtual register.
1311 // Here is an example. A produces x and y. B eats x and produces x'.
1312 // C eats x' and y. The register coalescer may have attributed the same
1313 // virtual register to x and x'.
1314 // To count accurately, we do a topological sort. In case the register is
1315 // found for several parents, we increment the usage of the one with the
1316 // highest topological index.
1317 LiveOutRegsNumUsages.resize(Blocks.size());
1318 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1319 SIScheduleBlock *Block = Blocks[i];
1320 for (unsigned Reg : Block->getInRegs()) {
1323 for (SIScheduleBlock* Pred: Block->getPreds()) {
1324 std::set<unsigned> PredOutRegs = Pred->getOutRegs();
1325 std::set<unsigned>::iterator RegPos = PredOutRegs.find(Reg);
1327 if (RegPos != PredOutRegs.end()) {
1329 if (topoInd < BlocksStruct.TopDownBlock2Index[Pred->getID()]) {
1330 topoInd = BlocksStruct.TopDownBlock2Index[Pred->getID()];
1338 int PredID = BlocksStruct.TopDownIndex2Block[topoInd];
1339 std::map<unsigned, unsigned>::iterator RegPos =
1340 LiveOutRegsNumUsages[PredID].find(Reg);
1341 if (RegPos != LiveOutRegsNumUsages[PredID].end()) {
1342 ++LiveOutRegsNumUsages[PredID][Reg];
1344 LiveOutRegsNumUsages[PredID][Reg] = 1;
1349 LastPosHighLatencyParentScheduled.resize(Blocks.size(), 0);
1350 BlockNumPredsLeft.resize(Blocks.size());
1351 BlockNumSuccsLeft.resize(Blocks.size());
1353 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1354 SIScheduleBlock *Block = Blocks[i];
1355 BlockNumPredsLeft[i] = Block->getPreds().size();
1356 BlockNumSuccsLeft[i] = Block->getSuccs().size();
1360 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1361 SIScheduleBlock *Block = Blocks[i];
1362 assert(Block->getID() == i);
1366 std::set<unsigned> InRegs = DAG->getInRegs();
1367 addLiveRegs(InRegs);
1369 // Fill LiveRegsConsumers for regs that were already
1370 // defined before scheduling.
1371 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1372 SIScheduleBlock *Block = Blocks[i];
1373 for (unsigned Reg : Block->getInRegs()) {
1375 for (SIScheduleBlock* Pred: Block->getPreds()) {
1376 std::set<unsigned> PredOutRegs = Pred->getOutRegs();
1377 std::set<unsigned>::iterator RegPos = PredOutRegs.find(Reg);
1379 if (RegPos != PredOutRegs.end()) {
1386 if (LiveRegsConsumers.find(Reg) == LiveRegsConsumers.end())
1387 LiveRegsConsumers[Reg] = 1;
1389 ++LiveRegsConsumers[Reg];
1394 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1395 SIScheduleBlock *Block = Blocks[i];
1396 if (BlockNumPredsLeft[i] == 0) {
1397 ReadyBlocks.push_back(Block);
1401 while (SIScheduleBlock *Block = pickBlock()) {
1402 BlocksScheduled.push_back(Block);
1403 blockScheduled(Block);
1407 dbgs() << "Block Order:";
1408 for (SIScheduleBlock* Block : BlocksScheduled) {
1409 dbgs() << ' ' << Block->getID();
1414 bool SIScheduleBlockScheduler::tryCandidateLatency(SIBlockSchedCandidate &Cand,
1415 SIBlockSchedCandidate &TryCand) {
1416 if (!Cand.isValid()) {
1417 TryCand.Reason = NodeOrder;
1421 // Try to hide high latencies.
1422 if (tryLess(TryCand.LastPosHighLatParentScheduled,
1423 Cand.LastPosHighLatParentScheduled, TryCand, Cand, Latency))
1425 // Schedule high latencies early so you can hide them better.
1426 if (tryGreater(TryCand.IsHighLatency, Cand.IsHighLatency,
1427 TryCand, Cand, Latency))
1429 if (TryCand.IsHighLatency && tryGreater(TryCand.Height, Cand.Height,
1430 TryCand, Cand, Depth))
1432 if (tryGreater(TryCand.NumHighLatencySuccessors,
1433 Cand.NumHighLatencySuccessors,
1434 TryCand, Cand, Successor))
1439 bool SIScheduleBlockScheduler::tryCandidateRegUsage(SIBlockSchedCandidate &Cand,
1440 SIBlockSchedCandidate &TryCand) {
1441 if (!Cand.isValid()) {
1442 TryCand.Reason = NodeOrder;
1446 if (tryLess(TryCand.VGPRUsageDiff > 0, Cand.VGPRUsageDiff > 0,
1447 TryCand, Cand, RegUsage))
1449 if (tryGreater(TryCand.NumSuccessors > 0,
1450 Cand.NumSuccessors > 0,
1451 TryCand, Cand, Successor))
1453 if (tryGreater(TryCand.Height, Cand.Height, TryCand, Cand, Depth))
1455 if (tryLess(TryCand.VGPRUsageDiff, Cand.VGPRUsageDiff,
1456 TryCand, Cand, RegUsage))
1461 SIScheduleBlock *SIScheduleBlockScheduler::pickBlock() {
1462 SIBlockSchedCandidate Cand;
1463 std::vector<SIScheduleBlock*>::iterator Best;
1464 SIScheduleBlock *Block;
1465 if (ReadyBlocks.empty())
1468 DAG->fillVgprSgprCost(LiveRegs.begin(), LiveRegs.end(),
1469 VregCurrentUsage, SregCurrentUsage);
1470 if (VregCurrentUsage > maxVregUsage)
1471 maxVregUsage = VregCurrentUsage;
1472 if (VregCurrentUsage > maxSregUsage)
1473 maxSregUsage = VregCurrentUsage;
1475 dbgs() << "Picking New Blocks\n";
1476 dbgs() << "Available: ";
1477 for (SIScheduleBlock* Block : ReadyBlocks)
1478 dbgs() << Block->getID() << ' ';
1479 dbgs() << "\nCurrent Live:\n";
1480 for (unsigned Reg : LiveRegs)
1481 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' ';
1483 dbgs() << "Current VGPRs: " << VregCurrentUsage << '\n';
1484 dbgs() << "Current SGPRs: " << SregCurrentUsage << '\n';
1487 Cand.Block = nullptr;
1488 for (std::vector<SIScheduleBlock*>::iterator I = ReadyBlocks.begin(),
1489 E = ReadyBlocks.end(); I != E; ++I) {
1490 SIBlockSchedCandidate TryCand;
1492 TryCand.IsHighLatency = TryCand.Block->isHighLatencyBlock();
1493 TryCand.VGPRUsageDiff =
1494 checkRegUsageImpact(TryCand.Block->getInRegs(),
1495 TryCand.Block->getOutRegs())[DAG->getVGPRSetID()];
1496 TryCand.NumSuccessors = TryCand.Block->getSuccs().size();
1497 TryCand.NumHighLatencySuccessors =
1498 TryCand.Block->getNumHighLatencySuccessors();
1499 TryCand.LastPosHighLatParentScheduled =
1500 (unsigned int) std::max<int> (0,
1501 LastPosHighLatencyParentScheduled[TryCand.Block->getID()] -
1502 LastPosWaitedHighLatency);
1503 TryCand.Height = TryCand.Block->Height;
1504 // Try not to increase VGPR usage too much, else we may spill.
1505 if (VregCurrentUsage > 120 ||
1506 Variant != SISchedulerBlockSchedulerVariant::BlockLatencyRegUsage) {
1507 if (!tryCandidateRegUsage(Cand, TryCand) &&
1508 Variant != SISchedulerBlockSchedulerVariant::BlockRegUsage)
1509 tryCandidateLatency(Cand, TryCand);
1511 if (!tryCandidateLatency(Cand, TryCand))
1512 tryCandidateRegUsage(Cand, TryCand);
1514 if (TryCand.Reason != NoCand) {
1515 Cand.setBest(TryCand);
1517 DEBUG(dbgs() << "Best Current Choice: " << Cand.Block->getID() << ' '
1518 << getReasonStr(Cand.Reason) << '\n');
1523 dbgs() << "Picking: " << Cand.Block->getID() << '\n';
1524 dbgs() << "Is a block with high latency instruction: "
1525 << (Cand.IsHighLatency ? "yes\n" : "no\n");
1526 dbgs() << "Position of last high latency dependency: "
1527 << Cand.LastPosHighLatParentScheduled << '\n';
1528 dbgs() << "VGPRUsageDiff: " << Cand.VGPRUsageDiff << '\n';
1533 ReadyBlocks.erase(Best);
1537 // Tracking of currently alive registers to determine VGPR Usage.
1539 void SIScheduleBlockScheduler::addLiveRegs(std::set<unsigned> &Regs) {
1540 for (unsigned Reg : Regs) {
1541 // For now only track virtual registers.
1542 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1544 // If not already in the live set, then add it.
1545 (void) LiveRegs.insert(Reg);
1549 void SIScheduleBlockScheduler::decreaseLiveRegs(SIScheduleBlock *Block,
1550 std::set<unsigned> &Regs) {
1551 for (unsigned Reg : Regs) {
1552 // For now only track virtual registers.
1553 std::set<unsigned>::iterator Pos = LiveRegs.find(Reg);
1554 assert (Pos != LiveRegs.end() && // Reg must be live.
1555 LiveRegsConsumers.find(Reg) != LiveRegsConsumers.end() &&
1556 LiveRegsConsumers[Reg] >= 1);
1557 --LiveRegsConsumers[Reg];
1558 if (LiveRegsConsumers[Reg] == 0)
1559 LiveRegs.erase(Pos);
1563 void SIScheduleBlockScheduler::releaseBlockSuccs(SIScheduleBlock *Parent) {
1564 for (SIScheduleBlock* Block : Parent->getSuccs()) {
1565 --BlockNumPredsLeft[Block->getID()];
1566 if (BlockNumPredsLeft[Block->getID()] == 0) {
1567 ReadyBlocks.push_back(Block);
1569 // TODO: Improve check. When the dependency between the high latency
1570 // instructions and the instructions of the other blocks are WAR or WAW
1571 // there will be no wait triggered. We would like these cases to not
1572 // update LastPosHighLatencyParentScheduled.
1573 if (Parent->isHighLatencyBlock())
1574 LastPosHighLatencyParentScheduled[Block->getID()] = NumBlockScheduled;
1578 void SIScheduleBlockScheduler::blockScheduled(SIScheduleBlock *Block) {
1579 decreaseLiveRegs(Block, Block->getInRegs());
1580 addLiveRegs(Block->getOutRegs());
1581 releaseBlockSuccs(Block);
1582 for (std::map<unsigned, unsigned>::iterator RegI =
1583 LiveOutRegsNumUsages[Block->getID()].begin(),
1584 E = LiveOutRegsNumUsages[Block->getID()].end(); RegI != E; ++RegI) {
1585 std::pair<unsigned, unsigned> RegP = *RegI;
1586 if (LiveRegsConsumers.find(RegP.first) == LiveRegsConsumers.end())
1587 LiveRegsConsumers[RegP.first] = RegP.second;
1589 assert(LiveRegsConsumers[RegP.first] == 0);
1590 LiveRegsConsumers[RegP.first] += RegP.second;
1593 if (LastPosHighLatencyParentScheduled[Block->getID()] >
1594 (unsigned)LastPosWaitedHighLatency)
1595 LastPosWaitedHighLatency =
1596 LastPosHighLatencyParentScheduled[Block->getID()];
1597 ++NumBlockScheduled;
1601 SIScheduleBlockScheduler::checkRegUsageImpact(std::set<unsigned> &InRegs,
1602 std::set<unsigned> &OutRegs) {
1603 std::vector<int> DiffSetPressure;
1604 DiffSetPressure.assign(DAG->getTRI()->getNumRegPressureSets(), 0);
1606 for (unsigned Reg : InRegs) {
1607 // For now only track virtual registers.
1608 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1610 if (LiveRegsConsumers[Reg] > 1)
1612 PSetIterator PSetI = DAG->getMRI()->getPressureSets(Reg);
1613 for (; PSetI.isValid(); ++PSetI) {
1614 DiffSetPressure[*PSetI] -= PSetI.getWeight();
1618 for (unsigned Reg : OutRegs) {
1619 // For now only track virtual registers.
1620 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1622 PSetIterator PSetI = DAG->getMRI()->getPressureSets(Reg);
1623 for (; PSetI.isValid(); ++PSetI) {
1624 DiffSetPressure[*PSetI] += PSetI.getWeight();
1628 return DiffSetPressure;
1633 struct SIScheduleBlockResult
1634 SIScheduler::scheduleVariant(SISchedulerBlockCreatorVariant BlockVariant,
1635 SISchedulerBlockSchedulerVariant ScheduleVariant) {
1636 SIScheduleBlocks Blocks = BlockCreator.getBlocks(BlockVariant);
1637 SIScheduleBlockScheduler Scheduler(DAG, ScheduleVariant, Blocks);
1638 std::vector<SIScheduleBlock*> ScheduledBlocks;
1639 struct SIScheduleBlockResult Res;
1641 ScheduledBlocks = Scheduler.getBlocks();
1643 for (unsigned b = 0; b < ScheduledBlocks.size(); ++b) {
1644 SIScheduleBlock *Block = ScheduledBlocks[b];
1645 std::vector<SUnit*> SUs = Block->getScheduledUnits();
1647 for (SUnit* SU : SUs)
1648 Res.SUs.push_back(SU->NodeNum);
1651 Res.MaxSGPRUsage = Scheduler.getSGPRUsage();
1652 Res.MaxVGPRUsage = Scheduler.getVGPRUsage();
1656 // SIScheduleDAGMI //
1658 SIScheduleDAGMI::SIScheduleDAGMI(MachineSchedContext *C) :
1659 ScheduleDAGMILive(C, make_unique<GenericScheduler>(C)) {
1660 SITII = static_cast<const SIInstrInfo*>(TII);
1661 SITRI = static_cast<const SIRegisterInfo*>(TRI);
1663 VGPRSetID = SITRI->getVGPR32PressureSet();
1664 SGPRSetID = SITRI->getSGPR32PressureSet();
1667 SIScheduleDAGMI::~SIScheduleDAGMI() {
1670 ScheduleDAGInstrs *llvm::createSIMachineScheduler(MachineSchedContext *C) {
1671 return new SIScheduleDAGMI(C);
1674 // Code adapted from scheduleDAG.cpp
1675 // Does a topological sort over the SUs.
1676 // Both TopDown and BottomUp
1677 void SIScheduleDAGMI::topologicalSort() {
1678 std::vector<int> TopDownSU2Index;
1679 unsigned DAGSize = SUnits.size();
1680 std::vector<SUnit*> WorkList;
1682 DEBUG(dbgs() << "Topological Sort\n");
1683 WorkList.reserve(DAGSize);
1685 TopDownIndex2SU.resize(DAGSize);
1686 TopDownSU2Index.resize(DAGSize);
1687 BottomUpIndex2SU.resize(DAGSize);
1689 WorkList.push_back(&getExitSU());
1690 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1691 SUnit *SU = &SUnits[i];
1692 int NodeNum = SU->NodeNum;
1693 unsigned Degree = SU->Succs.size();
1694 TopDownSU2Index[NodeNum] = Degree;
1696 assert(SU->Succs.empty() && "SUnit should have no successors");
1697 WorkList.push_back(SU);
1702 while (!WorkList.empty()) {
1703 SUnit *SU = WorkList.back();
1704 WorkList.pop_back();
1705 if (SU->NodeNum < DAGSize) {
1706 TopDownSU2Index[SU->NodeNum] = --Id;
1707 TopDownIndex2SU[Id] = SU->NodeNum;
1709 for (SDep& Pred : SU->Preds) {
1710 SUnit *SU = Pred.getSUnit();
1711 if (SU->NodeNum < DAGSize && !--TopDownSU2Index[SU->NodeNum])
1712 WorkList.push_back(SU);
1716 BottomUpIndex2SU = std::vector<int>(TopDownIndex2SU.rbegin(),
1717 TopDownIndex2SU.rend());
1720 // Check correctness of the ordering
1721 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1722 SUnit *SU = &SUnits[i];
1723 for (SDep& Pred : SU->Preds) {
1724 if (Pred.getSUnit()->NodeNum >= DAGSize)
1726 assert(TopDownSU2Index[SU->NodeNum] >
1727 TopDownSU2Index[Pred.getSUnit()->NodeNum] &&
1728 "Wrong Top Down topological sorting");
1731 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1732 SUnit *SU = &SUnits[i];
1733 for (SDep& Succ : SU->Succs) {
1734 if (Succ.getSUnit()->NodeNum >= DAGSize)
1736 assert(TopDownSU2Index[SU->NodeNum] <
1737 TopDownSU2Index[Succ.getSUnit()->NodeNum] &&
1738 "Wrong Bottom Up topological sorting");
1744 // Move low latencies further from their user without
1745 // increasing SGPR usage (in general)
1746 // This is to be replaced by a better pass that would
1747 // take into account SGPR usage (based on VGPR Usage
1748 // and the corresponding wavefront count), that would
1749 // try to merge groups of loads if it make sense, etc
1750 void SIScheduleDAGMI::moveLowLatencies() {
1751 unsigned DAGSize = SUnits.size();
1752 int LastLowLatencyUser = -1;
1753 int LastLowLatencyPos = -1;
1755 for (unsigned i = 0, e = ScheduledSUnits.size(); i != e; ++i) {
1756 SUnit *SU = &SUnits[ScheduledSUnits[i]];
1757 bool IsLowLatencyUser = false;
1758 unsigned MinPos = 0;
1760 for (SDep& PredDep : SU->Preds) {
1761 SUnit *Pred = PredDep.getSUnit();
1762 if (SITII->isLowLatencyInstruction(Pred->getInstr())) {
1763 IsLowLatencyUser = true;
1765 if (Pred->NodeNum >= DAGSize)
1767 unsigned PredPos = ScheduledSUnitsInv[Pred->NodeNum];
1768 if (PredPos >= MinPos)
1769 MinPos = PredPos + 1;
1772 if (SITII->isLowLatencyInstruction(SU->getInstr())) {
1773 unsigned BestPos = LastLowLatencyUser + 1;
1774 if ((int)BestPos <= LastLowLatencyPos)
1775 BestPos = LastLowLatencyPos + 1;
1776 if (BestPos < MinPos)
1779 for (unsigned u = i; u > BestPos; --u) {
1780 ++ScheduledSUnitsInv[ScheduledSUnits[u-1]];
1781 ScheduledSUnits[u] = ScheduledSUnits[u-1];
1783 ScheduledSUnits[BestPos] = SU->NodeNum;
1784 ScheduledSUnitsInv[SU->NodeNum] = BestPos;
1786 LastLowLatencyPos = BestPos;
1787 if (IsLowLatencyUser)
1788 LastLowLatencyUser = BestPos;
1789 } else if (IsLowLatencyUser) {
1790 LastLowLatencyUser = i;
1791 // Moves COPY instructions on which depends
1792 // the low latency instructions too.
1793 } else if (SU->getInstr()->getOpcode() == AMDGPU::COPY) {
1794 bool CopyForLowLat = false;
1795 for (SDep& SuccDep : SU->Succs) {
1796 SUnit *Succ = SuccDep.getSUnit();
1797 if (SITII->isLowLatencyInstruction(Succ->getInstr())) {
1798 CopyForLowLat = true;
1804 for (unsigned u = i; u > MinPos; --u) {
1805 ++ScheduledSUnitsInv[ScheduledSUnits[u-1]];
1806 ScheduledSUnits[u] = ScheduledSUnits[u-1];
1808 ScheduledSUnits[MinPos] = SU->NodeNum;
1809 ScheduledSUnitsInv[SU->NodeNum] = MinPos;
1815 void SIScheduleDAGMI::restoreSULinksLeft() {
1816 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1817 SUnits[i].isScheduled = false;
1818 SUnits[i].WeakPredsLeft = SUnitsLinksBackup[i].WeakPredsLeft;
1819 SUnits[i].NumPredsLeft = SUnitsLinksBackup[i].NumPredsLeft;
1820 SUnits[i].WeakSuccsLeft = SUnitsLinksBackup[i].WeakSuccsLeft;
1821 SUnits[i].NumSuccsLeft = SUnitsLinksBackup[i].NumSuccsLeft;
1825 // Return the Vgpr and Sgpr usage corresponding to some virtual registers.
1826 template<typename _Iterator> void
1827 SIScheduleDAGMI::fillVgprSgprCost(_Iterator First, _Iterator End,
1828 unsigned &VgprUsage, unsigned &SgprUsage) {
1831 for (_Iterator RegI = First; RegI != End; ++RegI) {
1832 unsigned Reg = *RegI;
1833 // For now only track virtual registers
1834 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1836 PSetIterator PSetI = MRI.getPressureSets(Reg);
1837 for (; PSetI.isValid(); ++PSetI) {
1838 if (*PSetI == VGPRSetID)
1839 VgprUsage += PSetI.getWeight();
1840 else if (*PSetI == SGPRSetID)
1841 SgprUsage += PSetI.getWeight();
1846 void SIScheduleDAGMI::schedule()
1848 SmallVector<SUnit*, 8> TopRoots, BotRoots;
1849 SIScheduleBlockResult Best, Temp;
1850 DEBUG(dbgs() << "Preparing Scheduling\n");
1852 buildDAGWithRegPressure();
1854 for(SUnit& SU : SUnits)
1858 Topo.InitDAGTopologicalSorting();
1860 findRootsAndBiasEdges(TopRoots, BotRoots);
1861 // We reuse several ScheduleDAGMI and ScheduleDAGMILive
1862 // functions, but to make them happy we must initialize
1863 // the default Scheduler implementation (even if we do not
1865 SchedImpl->initialize(this);
1866 initQueues(TopRoots, BotRoots);
1868 // Fill some stats to help scheduling.
1870 SUnitsLinksBackup = SUnits;
1871 IsLowLatencySU.clear();
1872 LowLatencyOffset.clear();
1873 IsHighLatencySU.clear();
1875 IsLowLatencySU.resize(SUnits.size(), 0);
1876 LowLatencyOffset.resize(SUnits.size(), 0);
1877 IsHighLatencySU.resize(SUnits.size(), 0);
1879 for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) {
1880 SUnit *SU = &SUnits[i];
1881 unsigned BaseLatReg, OffLatReg;
1882 if (SITII->isLowLatencyInstruction(SU->getInstr())) {
1883 IsLowLatencySU[i] = 1;
1884 if (SITII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseLatReg,
1886 LowLatencyOffset[i] = OffLatReg;
1887 } else if (SITII->isHighLatencyInstruction(SU->getInstr()))
1888 IsHighLatencySU[i] = 1;
1891 SIScheduler Scheduler(this);
1892 Best = Scheduler.scheduleVariant(SISchedulerBlockCreatorVariant::LatenciesAlone,
1893 SISchedulerBlockSchedulerVariant::BlockLatencyRegUsage);
1894 #if 0 // To enable when handleMove fix lands
1895 // if VGPR usage is extremely high, try other good performing variants
1896 // which could lead to lower VGPR usage
1897 if (Best.MaxVGPRUsage > 180) {
1898 std::vector<std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant>> Variants = {
1899 { LatenciesAlone, BlockRegUsageLatency },
1900 // { LatenciesAlone, BlockRegUsage },
1901 { LatenciesGrouped, BlockLatencyRegUsage },
1902 // { LatenciesGrouped, BlockRegUsageLatency },
1903 // { LatenciesGrouped, BlockRegUsage },
1904 { LatenciesAlonePlusConsecutive, BlockLatencyRegUsage },
1905 // { LatenciesAlonePlusConsecutive, BlockRegUsageLatency },
1906 // { LatenciesAlonePlusConsecutive, BlockRegUsage }
1908 for (std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant> v : Variants) {
1909 Temp = Scheduler.scheduleVariant(v.first, v.second);
1910 if (Temp.MaxVGPRUsage < Best.MaxVGPRUsage)
1914 // if VGPR usage is still extremely high, we may spill. Try other variants
1915 // which are less performing, but that could lead to lower VGPR usage.
1916 if (Best.MaxVGPRUsage > 200) {
1917 std::vector<std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant>> Variants = {
1918 // { LatenciesAlone, BlockRegUsageLatency },
1919 { LatenciesAlone, BlockRegUsage },
1920 // { LatenciesGrouped, BlockLatencyRegUsage },
1921 { LatenciesGrouped, BlockRegUsageLatency },
1922 { LatenciesGrouped, BlockRegUsage },
1923 // { LatenciesAlonePlusConsecutive, BlockLatencyRegUsage },
1924 { LatenciesAlonePlusConsecutive, BlockRegUsageLatency },
1925 { LatenciesAlonePlusConsecutive, BlockRegUsage }
1927 for (std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant> v : Variants) {
1928 Temp = Scheduler.scheduleVariant(v.first, v.second);
1929 if (Temp.MaxVGPRUsage < Best.MaxVGPRUsage)
1934 ScheduledSUnits = Best.SUs;
1935 ScheduledSUnitsInv.resize(SUnits.size());
1937 for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) {
1938 ScheduledSUnitsInv[ScheduledSUnits[i]] = i;
1943 // Tell the outside world about the result of the scheduling.
1945 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1946 TopRPTracker.setPos(CurrentTop);
1948 for (std::vector<unsigned>::iterator I = ScheduledSUnits.begin(),
1949 E = ScheduledSUnits.end(); I != E; ++I) {
1950 SUnit *SU = &SUnits[*I];
1952 scheduleMI(SU, true);
1954 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
1955 << *SU->getInstr());
1958 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1963 unsigned BBNum = begin()->getParent()->getNumber();
1964 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";