1 //===-- SIMachineScheduler.cpp - SI Scheduler Interface -*- C++ -*-----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief SI Machine Scheduler interface
13 //===----------------------------------------------------------------------===//
16 #include "SIMachineScheduler.h"
17 #include "llvm/CodeGen/LiveInterval.h"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/MachineScheduler.h"
21 #include "llvm/CodeGen/RegisterPressure.h"
25 #define DEBUG_TYPE "misched"
27 // This scheduler implements a different scheduling algorithm than
30 // There are several specific architecture behaviours that can't be modelled
31 // for GenericScheduler:
32 // . When accessing the result of an SGPR load instruction, you have to wait
33 // for all the SGPR load instructions before your current instruction to
35 // . When accessing the result of an VGPR load instruction, you have to wait
36 // for all the VGPR load instructions previous to the VGPR load instruction
37 // you are interested in to finish.
38 // . The less the register pressure, the best load latencies are hidden
40 // Moreover some specifities (like the fact a lot of instructions in the shader
41 // have few dependencies) makes the generic scheduler have some unpredictable
42 // behaviours. For example when register pressure becomes high, it can either
43 // manage to prevent register pressure from going too high, or it can
44 // increase register pressure even more than if it hadn't taken register
45 // pressure into account.
47 // Also some other bad behaviours are generated, like loading at the beginning
48 // of the shader a constant in VGPR you won't need until the end of the shader.
50 // The scheduling problem for SI can distinguish three main parts:
51 // . Hiding high latencies (texture sampling, etc)
52 // . Hiding low latencies (SGPR constant loading, etc)
53 // . Keeping register usage low for better latency hiding and general
56 // Some other things can also affect performance, but are hard to predict
57 // (cache usage, the fact the HW can issue several instructions from different
58 // wavefronts if different types, etc)
60 // This scheduler tries to solve the scheduling problem by dividing it into
61 // simpler sub-problems. It divides the instructions into blocks, schedules
62 // locally inside the blocks where it takes care of low latencies, and then
63 // chooses the order of the blocks by taking care of high latencies.
64 // Dividing the instructions into blocks helps control keeping register
67 // First the instructions are put into blocks.
68 // We want the blocks help control register usage and hide high latencies
69 // later. To help control register usage, we typically want all local
70 // computations, when for example you create a result that can be comsummed
71 // right away, to be contained in a block. Block inputs and outputs would
72 // typically be important results that are needed in several locations of
73 // the shader. Since we do want blocks to help hide high latencies, we want
74 // the instructions inside the block to have a minimal set of dependencies
75 // on high latencies. It will make it easy to pick blocks to hide specific
77 // The block creation algorithm is divided into several steps, and several
78 // variants can be tried during the scheduling process.
80 // Second the order of the instructions inside the blocks is choosen.
81 // At that step we do take into account only register usage and hiding
82 // low latency instructions
84 // Third the block order is choosen, there we try to hide high latencies
85 // and keep register usage low.
87 // After the third step, a pass is done to improve the hiding of low
90 // Actually when talking about 'low latency' or 'high latency' it includes
91 // both the latency to get the cache (or global mem) data go to the register,
92 // and the bandwith limitations.
93 // Increasing the number of active wavefronts helps hide the former, but it
94 // doesn't solve the latter, thus why even if wavefront count is high, we have
95 // to try have as many instructions hiding high latencies as possible.
96 // The OpenCL doc says for example latency of 400 cycles for a global mem access,
97 // which is hidden by 10 instructions if the wavefront count is 10.
99 // Some figures taken from AMD docs:
100 // Both texture and constant L1 caches are 4-way associative with 64 bytes
102 // Constant cache is shared with 4 CUs.
103 // For texture sampling, the address generation unit receives 4 texture
104 // addresses per cycle, thus we could expect texture sampling latency to be
105 // equivalent to 4 instructions in the very best case (a VGPR is 64 work items,
106 // instructions in a wavefront group are executed every 4 cycles),
107 // or 16 instructions if the other wavefronts associated to the 3 other VALUs
108 // of the CU do texture sampling too. (Don't take these figures too seriously,
109 // as I'm not 100% sure of the computation)
110 // Data exports should get similar latency.
111 // For constant loading, the cache is shader with 4 CUs.
112 // The doc says "a throughput of 16B/cycle for each of the 4 Compute Unit"
113 // I guess if the other CU don't read the cache, it can go up to 64B/cycle.
114 // It means a simple s_buffer_load should take one instruction to hide, as
115 // well as a s_buffer_loadx2 and potentially a s_buffer_loadx8 if on the same
118 // As of today the driver doesn't preload the constants in cache, thus the
119 // first loads get extra latency. The doc says global memory access can be
120 // 300-600 cycles. We do not specially take that into account when scheduling
121 // As we expect the driver to be able to preload the constants soon.
128 static const char *getReasonStr(SIScheduleCandReason Reason) {
130 case NoCand: return "NOCAND";
131 case RegUsage: return "REGUSAGE";
132 case Latency: return "LATENCY";
133 case Successor: return "SUCCESSOR";
134 case Depth: return "DEPTH";
135 case NodeOrder: return "ORDER";
137 llvm_unreachable("Unknown reason!");
142 static bool tryLess(int TryVal, int CandVal,
143 SISchedulerCandidate &TryCand,
144 SISchedulerCandidate &Cand,
145 SIScheduleCandReason Reason) {
146 if (TryVal < CandVal) {
147 TryCand.Reason = Reason;
150 if (TryVal > CandVal) {
151 if (Cand.Reason > Reason)
152 Cand.Reason = Reason;
155 Cand.setRepeat(Reason);
159 static bool tryGreater(int TryVal, int CandVal,
160 SISchedulerCandidate &TryCand,
161 SISchedulerCandidate &Cand,
162 SIScheduleCandReason Reason) {
163 if (TryVal > CandVal) {
164 TryCand.Reason = Reason;
167 if (TryVal < CandVal) {
168 if (Cand.Reason > Reason)
169 Cand.Reason = Reason;
172 Cand.setRepeat(Reason);
176 // SIScheduleBlock //
178 void SIScheduleBlock::addUnit(SUnit *SU) {
179 NodeNum2Index[SU->NodeNum] = SUnits.size();
180 SUnits.push_back(SU);
185 void SIScheduleBlock::traceCandidate(const SISchedCandidate &Cand) {
187 dbgs() << " SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
192 void SIScheduleBlock::tryCandidateTopDown(SISchedCandidate &Cand,
193 SISchedCandidate &TryCand) {
194 // Initialize the candidate if needed.
195 if (!Cand.isValid()) {
196 TryCand.Reason = NodeOrder;
200 if (Cand.SGPRUsage > 60 &&
201 tryLess(TryCand.SGPRUsage, Cand.SGPRUsage, TryCand, Cand, RegUsage))
204 // Schedule low latency instructions as top as possible.
205 // Order of priority is:
206 // . Low latency instructions which do not depend on other low latency
207 // instructions we haven't waited for
208 // . Other instructions which do not depend on low latency instructions
209 // we haven't waited for
211 // . All other instructions
212 // Goal is to get: low latency instructions - independant instructions
213 // - (eventually some more low latency instructions)
214 // - instructions that depend on the first low latency instructions.
215 // If in the block there is a lot of constant loads, the SGPR usage
216 // could go quite high, thus above the arbitrary limit of 60 will encourage
217 // use the already loaded constants (in order to release some SGPRs) before
219 if (tryLess(TryCand.HasLowLatencyNonWaitedParent,
220 Cand.HasLowLatencyNonWaitedParent,
221 TryCand, Cand, SIScheduleCandReason::Depth))
224 if (tryGreater(TryCand.IsLowLatency, Cand.IsLowLatency,
225 TryCand, Cand, SIScheduleCandReason::Depth))
228 if (TryCand.IsLowLatency &&
229 tryLess(TryCand.LowLatencyOffset, Cand.LowLatencyOffset,
230 TryCand, Cand, SIScheduleCandReason::Depth))
233 if (tryLess(TryCand.VGPRUsage, Cand.VGPRUsage, TryCand, Cand, RegUsage))
236 // Fall through to original instruction order.
237 if (TryCand.SU->NodeNum < Cand.SU->NodeNum) {
238 TryCand.Reason = NodeOrder;
242 SUnit* SIScheduleBlock::pickNode() {
243 SISchedCandidate TopCand;
245 for (SUnit* SU : TopReadySUs) {
246 SISchedCandidate TryCand;
247 std::vector<unsigned> pressure;
248 std::vector<unsigned> MaxPressure;
249 // Predict register usage after this instruction.
251 TopRPTracker.getDownwardPressure(SU->getInstr(), pressure, MaxPressure);
252 TryCand.SGPRUsage = pressure[DAG->getSGPRSetID()];
253 TryCand.VGPRUsage = pressure[DAG->getVGPRSetID()];
254 TryCand.IsLowLatency = DAG->IsLowLatencySU[SU->NodeNum];
255 TryCand.LowLatencyOffset = DAG->LowLatencyOffset[SU->NodeNum];
256 TryCand.HasLowLatencyNonWaitedParent =
257 HasLowLatencyNonWaitedParent[NodeNum2Index[SU->NodeNum]];
258 tryCandidateTopDown(TopCand, TryCand);
259 if (TryCand.Reason != NoCand)
260 TopCand.setBest(TryCand);
267 // Schedule something valid.
268 void SIScheduleBlock::fastSchedule() {
273 for (SUnit* SU : SUnits) {
274 if (!SU->NumPredsLeft)
275 TopReadySUs.push_back(SU);
278 while (!TopReadySUs.empty()) {
279 SUnit *SU = TopReadySUs[0];
280 ScheduledSUnits.push_back(SU);
287 // Returns if the register was set between first and last.
288 static bool isDefBetween(unsigned Reg,
289 SlotIndex First, SlotIndex Last,
290 const MachineRegisterInfo *MRI,
291 const LiveIntervals *LIS) {
292 for (MachineRegisterInfo::def_instr_iterator
293 UI = MRI->def_instr_begin(Reg),
294 UE = MRI->def_instr_end(); UI != UE; ++UI) {
295 const MachineInstr* MI = &*UI;
296 if (MI->isDebugValue())
298 SlotIndex InstSlot = LIS->getInstructionIndex(*MI).getRegSlot();
299 if (InstSlot >= First && InstSlot <= Last)
305 void SIScheduleBlock::initRegPressure(MachineBasicBlock::iterator BeginBlock,
306 MachineBasicBlock::iterator EndBlock) {
307 IntervalPressure Pressure, BotPressure;
308 RegPressureTracker RPTracker(Pressure), BotRPTracker(BotPressure);
309 LiveIntervals *LIS = DAG->getLIS();
310 MachineRegisterInfo *MRI = DAG->getMRI();
311 DAG->initRPTracker(TopRPTracker);
312 DAG->initRPTracker(BotRPTracker);
313 DAG->initRPTracker(RPTracker);
315 // Goes though all SU. RPTracker captures what had to be alive for the SUs
316 // to execute, and what is still alive at the end.
317 for (SUnit* SU : ScheduledSUnits) {
318 RPTracker.setPos(SU->getInstr());
322 // Close the RPTracker to finalize live ins/outs.
323 RPTracker.closeRegion();
325 // Initialize the live ins and live outs.
326 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
327 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
329 // Do not Track Physical Registers, because it messes up.
330 for (const auto &RegMaskPair : RPTracker.getPressure().LiveInRegs) {
331 if (TargetRegisterInfo::isVirtualRegister(RegMaskPair.RegUnit))
332 LiveInRegs.insert(RegMaskPair.RegUnit);
335 // There is several possibilities to distinguish:
336 // 1) Reg is not input to any instruction in the block, but is output of one
337 // 2) 1) + read in the block and not needed after it
338 // 3) 1) + read in the block but needed in another block
339 // 4) Reg is input of an instruction but another block will read it too
340 // 5) Reg is input of an instruction and then rewritten in the block.
341 // result is not read in the block (implies used in another block)
342 // 6) Reg is input of an instruction and then rewritten in the block.
343 // result is read in the block and not needed in another block
344 // 7) Reg is input of an instruction and then rewritten in the block.
345 // result is read in the block but also needed in another block
346 // LiveInRegs will contains all the regs in situation 4, 5, 6, 7
347 // We want LiveOutRegs to contain only Regs whose content will be read after
348 // in another block, and whose content was written in the current block,
349 // that is we want it to get 1, 3, 5, 7
350 // Since we made the MIs of a block to be packed all together before
351 // scheduling, then the LiveIntervals were correct, and the RPTracker was
352 // able to correctly handle 5 vs 6, 2 vs 3.
353 // (Note: This is not sufficient for RPTracker to not do mistakes for case 4)
354 // The RPTracker's LiveOutRegs has 1, 3, (some correct or incorrect)4, 5, 7
355 // Comparing to LiveInRegs is not sufficient to differenciate 4 vs 5, 7
356 // The use of findDefBetween removes the case 4.
357 for (const auto &RegMaskPair : RPTracker.getPressure().LiveOutRegs) {
358 unsigned Reg = RegMaskPair.RegUnit;
359 if (TargetRegisterInfo::isVirtualRegister(Reg) &&
360 isDefBetween(Reg, LIS->getInstructionIndex(*BeginBlock).getRegSlot(),
361 LIS->getInstructionIndex(*EndBlock).getRegSlot(), MRI,
363 LiveOutRegs.insert(Reg);
367 // Pressure = sum_alive_registers register size
368 // Internally llvm will represent some registers as big 128 bits registers
369 // for example, but they actually correspond to 4 actual 32 bits registers.
370 // Thus Pressure is not equal to num_alive_registers * constant.
371 LiveInPressure = TopPressure.MaxSetPressure;
372 LiveOutPressure = BotPressure.MaxSetPressure;
374 // Prepares TopRPTracker for top down scheduling.
375 TopRPTracker.closeTop();
378 void SIScheduleBlock::schedule(MachineBasicBlock::iterator BeginBlock,
379 MachineBasicBlock::iterator EndBlock) {
383 // PreScheduling phase to set LiveIn and LiveOut.
384 initRegPressure(BeginBlock, EndBlock);
387 // Schedule for real now.
391 for (SUnit* SU : SUnits) {
392 if (!SU->NumPredsLeft)
393 TopReadySUs.push_back(SU);
396 while (!TopReadySUs.empty()) {
397 SUnit *SU = pickNode();
398 ScheduledSUnits.push_back(SU);
399 TopRPTracker.setPos(SU->getInstr());
400 TopRPTracker.advance();
404 // TODO: compute InternalAdditionnalPressure.
405 InternalAdditionnalPressure.resize(TopPressure.MaxSetPressure.size());
407 // Check everything is right.
409 assert(SUnits.size() == ScheduledSUnits.size() &&
410 TopReadySUs.empty());
411 for (SUnit* SU : SUnits) {
412 assert(SU->isScheduled &&
413 SU->NumPredsLeft == 0);
420 void SIScheduleBlock::undoSchedule() {
421 for (SUnit* SU : SUnits) {
422 SU->isScheduled = false;
423 for (SDep& Succ : SU->Succs) {
424 if (BC->isSUInBlock(Succ.getSUnit(), ID))
425 undoReleaseSucc(SU, &Succ);
428 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0);
429 ScheduledSUnits.clear();
433 void SIScheduleBlock::undoReleaseSucc(SUnit *SU, SDep *SuccEdge) {
434 SUnit *SuccSU = SuccEdge->getSUnit();
436 if (SuccEdge->isWeak()) {
437 ++SuccSU->WeakPredsLeft;
440 ++SuccSU->NumPredsLeft;
443 void SIScheduleBlock::releaseSucc(SUnit *SU, SDep *SuccEdge) {
444 SUnit *SuccSU = SuccEdge->getSUnit();
446 if (SuccEdge->isWeak()) {
447 --SuccSU->WeakPredsLeft;
451 if (SuccSU->NumPredsLeft == 0) {
452 dbgs() << "*** Scheduling failed! ***\n";
454 dbgs() << " has been released too many times!\n";
455 llvm_unreachable(nullptr);
459 --SuccSU->NumPredsLeft;
462 /// Release Successors of the SU that are in the block or not.
463 void SIScheduleBlock::releaseSuccessors(SUnit *SU, bool InOrOutBlock) {
464 for (SDep& Succ : SU->Succs) {
465 SUnit *SuccSU = Succ.getSUnit();
467 if (SuccSU->NodeNum >= DAG->SUnits.size())
470 if (BC->isSUInBlock(SuccSU, ID) != InOrOutBlock)
473 releaseSucc(SU, &Succ);
474 if (SuccSU->NumPredsLeft == 0 && InOrOutBlock)
475 TopReadySUs.push_back(SuccSU);
479 void SIScheduleBlock::nodeScheduled(SUnit *SU) {
481 assert (!SU->NumPredsLeft);
482 std::vector<SUnit*>::iterator I =
483 std::find(TopReadySUs.begin(), TopReadySUs.end(), SU);
484 if (I == TopReadySUs.end()) {
485 dbgs() << "Data Structure Bug in SI Scheduler\n";
486 llvm_unreachable(nullptr);
488 TopReadySUs.erase(I);
490 releaseSuccessors(SU, true);
491 // Scheduling this node will trigger a wait,
492 // thus propagate to other instructions that they do not need to wait either.
493 if (HasLowLatencyNonWaitedParent[NodeNum2Index[SU->NodeNum]])
494 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0);
496 if (DAG->IsLowLatencySU[SU->NodeNum]) {
497 for (SDep& Succ : SU->Succs) {
498 std::map<unsigned, unsigned>::iterator I =
499 NodeNum2Index.find(Succ.getSUnit()->NodeNum);
500 if (I != NodeNum2Index.end())
501 HasLowLatencyNonWaitedParent[I->second] = 1;
504 SU->isScheduled = true;
507 void SIScheduleBlock::finalizeUnits() {
508 // We remove links from outside blocks to enable scheduling inside the block.
509 for (SUnit* SU : SUnits) {
510 releaseSuccessors(SU, false);
511 if (DAG->IsHighLatencySU[SU->NodeNum])
512 HighLatencyBlock = true;
514 HasLowLatencyNonWaitedParent.resize(SUnits.size(), 0);
517 // we maintain ascending order of IDs
518 void SIScheduleBlock::addPred(SIScheduleBlock *Pred) {
519 unsigned PredID = Pred->getID();
521 // Check if not already predecessor.
522 for (SIScheduleBlock* P : Preds) {
523 if (PredID == P->getID())
526 Preds.push_back(Pred);
528 assert(none_of(Succs,
529 [=](SIScheduleBlock *S) { return PredID == S->getID(); }) &&
530 "Loop in the Block Graph!");
533 void SIScheduleBlock::addSucc(SIScheduleBlock *Succ) {
534 unsigned SuccID = Succ->getID();
536 // Check if not already predecessor.
537 for (SIScheduleBlock* S : Succs) {
538 if (SuccID == S->getID())
541 if (Succ->isHighLatencyBlock())
542 ++NumHighLatencySuccessors;
543 Succs.push_back(Succ);
544 assert(none_of(Preds,
545 [=](SIScheduleBlock *P) { return SuccID == P->getID(); }) &&
546 "Loop in the Block Graph!");
550 void SIScheduleBlock::printDebug(bool full) {
551 dbgs() << "Block (" << ID << ")\n";
555 dbgs() << "\nContains High Latency Instruction: "
556 << HighLatencyBlock << '\n';
557 dbgs() << "\nDepends On:\n";
558 for (SIScheduleBlock* P : Preds) {
559 P->printDebug(false);
562 dbgs() << "\nSuccessors:\n";
563 for (SIScheduleBlock* S : Succs) {
564 S->printDebug(false);
568 dbgs() << "LiveInPressure " << LiveInPressure[DAG->getSGPRSetID()] << ' '
569 << LiveInPressure[DAG->getVGPRSetID()] << '\n';
570 dbgs() << "LiveOutPressure " << LiveOutPressure[DAG->getSGPRSetID()] << ' '
571 << LiveOutPressure[DAG->getVGPRSetID()] << "\n\n";
572 dbgs() << "LiveIns:\n";
573 for (unsigned Reg : LiveInRegs)
574 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' ';
576 dbgs() << "\nLiveOuts:\n";
577 for (unsigned Reg : LiveOutRegs)
578 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' ';
581 dbgs() << "\nInstructions:\n";
583 for (SUnit* SU : SUnits) {
587 for (SUnit* SU : SUnits) {
592 dbgs() << "///////////////////////\n";
597 // SIScheduleBlockCreator //
599 SIScheduleBlockCreator::SIScheduleBlockCreator(SIScheduleDAGMI *DAG) :
603 SIScheduleBlockCreator::~SIScheduleBlockCreator() {
607 SIScheduleBlockCreator::getBlocks(SISchedulerBlockCreatorVariant BlockVariant) {
608 std::map<SISchedulerBlockCreatorVariant, SIScheduleBlocks>::iterator B =
609 Blocks.find(BlockVariant);
610 if (B == Blocks.end()) {
611 SIScheduleBlocks Res;
612 createBlocksForVariant(BlockVariant);
614 scheduleInsideBlocks();
616 Res.Blocks = CurrentBlocks;
617 Res.TopDownIndex2Block = TopDownIndex2Block;
618 Res.TopDownBlock2Index = TopDownBlock2Index;
619 Blocks[BlockVariant] = Res;
626 bool SIScheduleBlockCreator::isSUInBlock(SUnit *SU, unsigned ID) {
627 if (SU->NodeNum >= DAG->SUnits.size())
629 return CurrentBlocks[Node2CurrentBlock[SU->NodeNum]]->getID() == ID;
632 void SIScheduleBlockCreator::colorHighLatenciesAlone() {
633 unsigned DAGSize = DAG->SUnits.size();
635 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
636 SUnit *SU = &DAG->SUnits[i];
637 if (DAG->IsHighLatencySU[SU->NodeNum]) {
638 CurrentColoring[SU->NodeNum] = NextReservedID++;
643 void SIScheduleBlockCreator::colorHighLatenciesGroups() {
644 unsigned DAGSize = DAG->SUnits.size();
645 unsigned NumHighLatencies = 0;
647 unsigned Color = NextReservedID;
649 std::set<unsigned> FormingGroup;
651 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
652 SUnit *SU = &DAG->SUnits[i];
653 if (DAG->IsHighLatencySU[SU->NodeNum])
657 if (NumHighLatencies == 0)
660 if (NumHighLatencies <= 6)
662 else if (NumHighLatencies <= 12)
667 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
668 SUnit *SU = &DAG->SUnits[i];
669 if (DAG->IsHighLatencySU[SU->NodeNum]) {
670 unsigned CompatibleGroup = true;
671 unsigned ProposedColor = Color;
672 for (unsigned j : FormingGroup) {
673 // TODO: Currently CompatibleGroup will always be false,
674 // because the graph enforces the load order. This
675 // can be fixed, but as keeping the load order is often
676 // good for performance that causes a performance hit (both
677 // the default scheduler and this scheduler).
678 // When this scheduler determines a good load order,
679 // this can be fixed.
680 if (!DAG->canAddEdge(SU, &DAG->SUnits[j]) ||
681 !DAG->canAddEdge(&DAG->SUnits[j], SU))
682 CompatibleGroup = false;
684 if (!CompatibleGroup || ++Count == GroupSize) {
685 FormingGroup.clear();
686 Color = ++NextReservedID;
687 if (!CompatibleGroup) {
688 ProposedColor = Color;
689 FormingGroup.insert(SU->NodeNum);
693 FormingGroup.insert(SU->NodeNum);
695 CurrentColoring[SU->NodeNum] = ProposedColor;
700 void SIScheduleBlockCreator::colorComputeReservedDependencies() {
701 unsigned DAGSize = DAG->SUnits.size();
702 std::map<std::set<unsigned>, unsigned> ColorCombinations;
704 CurrentTopDownReservedDependencyColoring.clear();
705 CurrentBottomUpReservedDependencyColoring.clear();
707 CurrentTopDownReservedDependencyColoring.resize(DAGSize, 0);
708 CurrentBottomUpReservedDependencyColoring.resize(DAGSize, 0);
710 // Traverse TopDown, and give different colors to SUs depending
711 // on which combination of High Latencies they depend on.
713 for (unsigned SUNum : DAG->TopDownIndex2SU) {
714 SUnit *SU = &DAG->SUnits[SUNum];
715 std::set<unsigned> SUColors;
718 if (CurrentColoring[SU->NodeNum]) {
719 CurrentTopDownReservedDependencyColoring[SU->NodeNum] =
720 CurrentColoring[SU->NodeNum];
724 for (SDep& PredDep : SU->Preds) {
725 SUnit *Pred = PredDep.getSUnit();
726 if (PredDep.isWeak() || Pred->NodeNum >= DAGSize)
728 if (CurrentTopDownReservedDependencyColoring[Pred->NodeNum] > 0)
729 SUColors.insert(CurrentTopDownReservedDependencyColoring[Pred->NodeNum]);
731 // Color 0 by default.
732 if (SUColors.empty())
734 // Same color than parents.
735 if (SUColors.size() == 1 && *SUColors.begin() > DAGSize)
736 CurrentTopDownReservedDependencyColoring[SU->NodeNum] =
739 std::map<std::set<unsigned>, unsigned>::iterator Pos =
740 ColorCombinations.find(SUColors);
741 if (Pos != ColorCombinations.end()) {
742 CurrentTopDownReservedDependencyColoring[SU->NodeNum] = Pos->second;
744 CurrentTopDownReservedDependencyColoring[SU->NodeNum] =
746 ColorCombinations[SUColors] = NextNonReservedID++;
751 ColorCombinations.clear();
753 // Same as before, but BottomUp.
755 for (unsigned SUNum : DAG->BottomUpIndex2SU) {
756 SUnit *SU = &DAG->SUnits[SUNum];
757 std::set<unsigned> SUColors;
760 if (CurrentColoring[SU->NodeNum]) {
761 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] =
762 CurrentColoring[SU->NodeNum];
766 for (SDep& SuccDep : SU->Succs) {
767 SUnit *Succ = SuccDep.getSUnit();
768 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
770 if (CurrentBottomUpReservedDependencyColoring[Succ->NodeNum] > 0)
771 SUColors.insert(CurrentBottomUpReservedDependencyColoring[Succ->NodeNum]);
774 if (SUColors.empty())
776 // Same color than parents.
777 if (SUColors.size() == 1 && *SUColors.begin() > DAGSize)
778 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] =
781 std::map<std::set<unsigned>, unsigned>::iterator Pos =
782 ColorCombinations.find(SUColors);
783 if (Pos != ColorCombinations.end()) {
784 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] = Pos->second;
786 CurrentBottomUpReservedDependencyColoring[SU->NodeNum] =
788 ColorCombinations[SUColors] = NextNonReservedID++;
794 void SIScheduleBlockCreator::colorAccordingToReservedDependencies() {
795 unsigned DAGSize = DAG->SUnits.size();
796 std::map<std::pair<unsigned, unsigned>, unsigned> ColorCombinations;
798 // Every combination of colors given by the top down
799 // and bottom up Reserved node dependency
801 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
802 SUnit *SU = &DAG->SUnits[i];
803 std::pair<unsigned, unsigned> SUColors;
805 // High latency instructions: already given.
806 if (CurrentColoring[SU->NodeNum])
809 SUColors.first = CurrentTopDownReservedDependencyColoring[SU->NodeNum];
810 SUColors.second = CurrentBottomUpReservedDependencyColoring[SU->NodeNum];
812 std::map<std::pair<unsigned, unsigned>, unsigned>::iterator Pos =
813 ColorCombinations.find(SUColors);
814 if (Pos != ColorCombinations.end()) {
815 CurrentColoring[SU->NodeNum] = Pos->second;
817 CurrentColoring[SU->NodeNum] = NextNonReservedID;
818 ColorCombinations[SUColors] = NextNonReservedID++;
823 void SIScheduleBlockCreator::colorEndsAccordingToDependencies() {
824 unsigned DAGSize = DAG->SUnits.size();
825 std::vector<int> PendingColoring = CurrentColoring;
827 for (unsigned SUNum : DAG->BottomUpIndex2SU) {
828 SUnit *SU = &DAG->SUnits[SUNum];
829 std::set<unsigned> SUColors;
830 std::set<unsigned> SUColorsPending;
832 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
835 if (CurrentBottomUpReservedDependencyColoring[SU->NodeNum] > 0 ||
836 CurrentTopDownReservedDependencyColoring[SU->NodeNum] > 0)
839 for (SDep& SuccDep : SU->Succs) {
840 SUnit *Succ = SuccDep.getSUnit();
841 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
843 if (CurrentBottomUpReservedDependencyColoring[Succ->NodeNum] > 0 ||
844 CurrentTopDownReservedDependencyColoring[Succ->NodeNum] > 0)
845 SUColors.insert(CurrentColoring[Succ->NodeNum]);
846 SUColorsPending.insert(PendingColoring[Succ->NodeNum]);
848 if (SUColors.size() == 1 && SUColorsPending.size() == 1)
849 PendingColoring[SU->NodeNum] = *SUColors.begin();
850 else // TODO: Attribute new colors depending on color
851 // combination of children.
852 PendingColoring[SU->NodeNum] = NextNonReservedID++;
854 CurrentColoring = PendingColoring;
858 void SIScheduleBlockCreator::colorForceConsecutiveOrderInGroup() {
859 unsigned DAGSize = DAG->SUnits.size();
860 unsigned PreviousColor;
861 std::set<unsigned> SeenColors;
866 PreviousColor = CurrentColoring[0];
868 for (unsigned i = 1, e = DAGSize; i != e; ++i) {
869 SUnit *SU = &DAG->SUnits[i];
870 unsigned CurrentColor = CurrentColoring[i];
871 unsigned PreviousColorSave = PreviousColor;
872 assert(i == SU->NodeNum);
874 if (CurrentColor != PreviousColor)
875 SeenColors.insert(PreviousColor);
876 PreviousColor = CurrentColor;
878 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
881 if (SeenColors.find(CurrentColor) == SeenColors.end())
884 if (PreviousColorSave != CurrentColor)
885 CurrentColoring[i] = NextNonReservedID++;
887 CurrentColoring[i] = CurrentColoring[i-1];
891 void SIScheduleBlockCreator::colorMergeConstantLoadsNextGroup() {
892 unsigned DAGSize = DAG->SUnits.size();
894 for (unsigned SUNum : DAG->BottomUpIndex2SU) {
895 SUnit *SU = &DAG->SUnits[SUNum];
896 std::set<unsigned> SUColors;
898 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
901 // No predecessor: Vgpr constant loading.
902 // Low latency instructions usually have a predecessor (the address)
903 if (SU->Preds.size() > 0 && !DAG->IsLowLatencySU[SU->NodeNum])
906 for (SDep& SuccDep : SU->Succs) {
907 SUnit *Succ = SuccDep.getSUnit();
908 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
910 SUColors.insert(CurrentColoring[Succ->NodeNum]);
912 if (SUColors.size() == 1)
913 CurrentColoring[SU->NodeNum] = *SUColors.begin();
917 void SIScheduleBlockCreator::colorMergeIfPossibleNextGroup() {
918 unsigned DAGSize = DAG->SUnits.size();
920 for (unsigned SUNum : DAG->BottomUpIndex2SU) {
921 SUnit *SU = &DAG->SUnits[SUNum];
922 std::set<unsigned> SUColors;
924 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
927 for (SDep& SuccDep : SU->Succs) {
928 SUnit *Succ = SuccDep.getSUnit();
929 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
931 SUColors.insert(CurrentColoring[Succ->NodeNum]);
933 if (SUColors.size() == 1)
934 CurrentColoring[SU->NodeNum] = *SUColors.begin();
938 void SIScheduleBlockCreator::colorMergeIfPossibleNextGroupOnlyForReserved() {
939 unsigned DAGSize = DAG->SUnits.size();
941 for (unsigned SUNum : DAG->BottomUpIndex2SU) {
942 SUnit *SU = &DAG->SUnits[SUNum];
943 std::set<unsigned> SUColors;
945 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
948 for (SDep& SuccDep : SU->Succs) {
949 SUnit *Succ = SuccDep.getSUnit();
950 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
952 SUColors.insert(CurrentColoring[Succ->NodeNum]);
954 if (SUColors.size() == 1 && *SUColors.begin() <= DAGSize)
955 CurrentColoring[SU->NodeNum] = *SUColors.begin();
959 void SIScheduleBlockCreator::colorMergeIfPossibleSmallGroupsToNextGroup() {
960 unsigned DAGSize = DAG->SUnits.size();
961 std::map<unsigned, unsigned> ColorCount;
963 for (unsigned SUNum : DAG->BottomUpIndex2SU) {
964 SUnit *SU = &DAG->SUnits[SUNum];
965 unsigned color = CurrentColoring[SU->NodeNum];
966 std::map<unsigned, unsigned>::iterator Pos = ColorCount.find(color);
967 if (Pos != ColorCount.end()) {
970 ColorCount[color] = 1;
974 for (unsigned SUNum : DAG->BottomUpIndex2SU) {
975 SUnit *SU = &DAG->SUnits[SUNum];
976 unsigned color = CurrentColoring[SU->NodeNum];
977 std::set<unsigned> SUColors;
979 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
982 if (ColorCount[color] > 1)
985 for (SDep& SuccDep : SU->Succs) {
986 SUnit *Succ = SuccDep.getSUnit();
987 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
989 SUColors.insert(CurrentColoring[Succ->NodeNum]);
991 if (SUColors.size() == 1 && *SUColors.begin() != color) {
993 CurrentColoring[SU->NodeNum] = *SUColors.begin();
994 ++ColorCount[*SUColors.begin()];
999 void SIScheduleBlockCreator::cutHugeBlocks() {
1003 void SIScheduleBlockCreator::regroupNoUserInstructions() {
1004 unsigned DAGSize = DAG->SUnits.size();
1005 int GroupID = NextNonReservedID++;
1007 for (unsigned SUNum : DAG->BottomUpIndex2SU) {
1008 SUnit *SU = &DAG->SUnits[SUNum];
1009 bool hasSuccessor = false;
1011 if (CurrentColoring[SU->NodeNum] <= (int)DAGSize)
1014 for (SDep& SuccDep : SU->Succs) {
1015 SUnit *Succ = SuccDep.getSUnit();
1016 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
1018 hasSuccessor = true;
1021 CurrentColoring[SU->NodeNum] = GroupID;
1025 void SIScheduleBlockCreator::createBlocksForVariant(SISchedulerBlockCreatorVariant BlockVariant) {
1026 unsigned DAGSize = DAG->SUnits.size();
1027 std::map<unsigned,unsigned> RealID;
1029 CurrentBlocks.clear();
1030 CurrentColoring.clear();
1031 CurrentColoring.resize(DAGSize, 0);
1032 Node2CurrentBlock.clear();
1034 // Restore links previous scheduling variant has overridden.
1035 DAG->restoreSULinksLeft();
1038 NextNonReservedID = DAGSize + 1;
1040 DEBUG(dbgs() << "Coloring the graph\n");
1042 if (BlockVariant == SISchedulerBlockCreatorVariant::LatenciesGrouped)
1043 colorHighLatenciesGroups();
1045 colorHighLatenciesAlone();
1046 colorComputeReservedDependencies();
1047 colorAccordingToReservedDependencies();
1048 colorEndsAccordingToDependencies();
1049 if (BlockVariant == SISchedulerBlockCreatorVariant::LatenciesAlonePlusConsecutive)
1050 colorForceConsecutiveOrderInGroup();
1051 regroupNoUserInstructions();
1052 colorMergeConstantLoadsNextGroup();
1053 colorMergeIfPossibleNextGroupOnlyForReserved();
1055 // Put SUs of same color into same block
1056 Node2CurrentBlock.resize(DAGSize, -1);
1057 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1058 SUnit *SU = &DAG->SUnits[i];
1059 unsigned Color = CurrentColoring[SU->NodeNum];
1060 if (RealID.find(Color) == RealID.end()) {
1061 int ID = CurrentBlocks.size();
1062 BlockPtrs.push_back(
1063 make_unique<SIScheduleBlock>(DAG, this, ID));
1064 CurrentBlocks.push_back(BlockPtrs.rbegin()->get());
1067 CurrentBlocks[RealID[Color]]->addUnit(SU);
1068 Node2CurrentBlock[SU->NodeNum] = RealID[Color];
1071 // Build dependencies between blocks.
1072 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1073 SUnit *SU = &DAG->SUnits[i];
1074 int SUID = Node2CurrentBlock[i];
1075 for (SDep& SuccDep : SU->Succs) {
1076 SUnit *Succ = SuccDep.getSUnit();
1077 if (SuccDep.isWeak() || Succ->NodeNum >= DAGSize)
1079 if (Node2CurrentBlock[Succ->NodeNum] != SUID)
1080 CurrentBlocks[SUID]->addSucc(CurrentBlocks[Node2CurrentBlock[Succ->NodeNum]]);
1082 for (SDep& PredDep : SU->Preds) {
1083 SUnit *Pred = PredDep.getSUnit();
1084 if (PredDep.isWeak() || Pred->NodeNum >= DAGSize)
1086 if (Node2CurrentBlock[Pred->NodeNum] != SUID)
1087 CurrentBlocks[SUID]->addPred(CurrentBlocks[Node2CurrentBlock[Pred->NodeNum]]);
1091 // Free root and leafs of all blocks to enable scheduling inside them.
1092 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) {
1093 SIScheduleBlock *Block = CurrentBlocks[i];
1094 Block->finalizeUnits();
1097 dbgs() << "Blocks created:\n\n";
1098 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) {
1099 SIScheduleBlock *Block = CurrentBlocks[i];
1100 Block->printDebug(true);
1105 // Two functions taken from Codegen/MachineScheduler.cpp
1107 /// If this iterator is a debug value, increment until reaching the End or a
1108 /// non-debug instruction.
1109 static MachineBasicBlock::const_iterator
1110 nextIfDebug(MachineBasicBlock::const_iterator I,
1111 MachineBasicBlock::const_iterator End) {
1112 for(; I != End; ++I) {
1113 if (!I->isDebugValue())
1119 /// Non-const version.
1120 static MachineBasicBlock::iterator
1121 nextIfDebug(MachineBasicBlock::iterator I,
1122 MachineBasicBlock::const_iterator End) {
1123 // Cast the return value to nonconst MachineInstr, then cast to an
1124 // instr_iterator, which does not check for null, finally return a
1126 return MachineBasicBlock::instr_iterator(
1127 const_cast<MachineInstr*>(
1128 &*nextIfDebug(MachineBasicBlock::const_iterator(I), End)));
1131 void SIScheduleBlockCreator::topologicalSort() {
1132 unsigned DAGSize = CurrentBlocks.size();
1133 std::vector<int> WorkList;
1135 DEBUG(dbgs() << "Topological Sort\n");
1137 WorkList.reserve(DAGSize);
1138 TopDownIndex2Block.resize(DAGSize);
1139 TopDownBlock2Index.resize(DAGSize);
1140 BottomUpIndex2Block.resize(DAGSize);
1142 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1143 SIScheduleBlock *Block = CurrentBlocks[i];
1144 unsigned Degree = Block->getSuccs().size();
1145 TopDownBlock2Index[i] = Degree;
1147 WorkList.push_back(i);
1152 while (!WorkList.empty()) {
1153 int i = WorkList.back();
1154 SIScheduleBlock *Block = CurrentBlocks[i];
1155 WorkList.pop_back();
1156 TopDownBlock2Index[i] = --Id;
1157 TopDownIndex2Block[Id] = i;
1158 for (SIScheduleBlock* Pred : Block->getPreds()) {
1159 if (!--TopDownBlock2Index[Pred->getID()])
1160 WorkList.push_back(Pred->getID());
1165 // Check correctness of the ordering.
1166 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1167 SIScheduleBlock *Block = CurrentBlocks[i];
1168 for (SIScheduleBlock* Pred : Block->getPreds()) {
1169 assert(TopDownBlock2Index[i] > TopDownBlock2Index[Pred->getID()] &&
1170 "Wrong Top Down topological sorting");
1175 BottomUpIndex2Block = std::vector<int>(TopDownIndex2Block.rbegin(),
1176 TopDownIndex2Block.rend());
1179 void SIScheduleBlockCreator::scheduleInsideBlocks() {
1180 unsigned DAGSize = CurrentBlocks.size();
1182 DEBUG(dbgs() << "\nScheduling Blocks\n\n");
1184 // We do schedule a valid scheduling such that a Block corresponds
1185 // to a range of instructions.
1186 DEBUG(dbgs() << "First phase: Fast scheduling for Reg Liveness\n");
1187 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1188 SIScheduleBlock *Block = CurrentBlocks[i];
1189 Block->fastSchedule();
1192 // Note: the following code, and the part restoring previous position
1193 // is by far the most expensive operation of the Scheduler.
1195 // Do not update CurrentTop.
1196 MachineBasicBlock::iterator CurrentTopFastSched = DAG->getCurrentTop();
1197 std::vector<MachineBasicBlock::iterator> PosOld;
1198 std::vector<MachineBasicBlock::iterator> PosNew;
1199 PosOld.reserve(DAG->SUnits.size());
1200 PosNew.reserve(DAG->SUnits.size());
1202 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1203 int BlockIndice = TopDownIndex2Block[i];
1204 SIScheduleBlock *Block = CurrentBlocks[BlockIndice];
1205 std::vector<SUnit*> SUs = Block->getScheduledUnits();
1207 for (SUnit* SU : SUs) {
1208 MachineInstr *MI = SU->getInstr();
1209 MachineBasicBlock::iterator Pos = MI;
1210 PosOld.push_back(Pos);
1211 if (&*CurrentTopFastSched == MI) {
1212 PosNew.push_back(Pos);
1213 CurrentTopFastSched = nextIfDebug(++CurrentTopFastSched,
1214 DAG->getCurrentBottom());
1216 // Update the instruction stream.
1217 DAG->getBB()->splice(CurrentTopFastSched, DAG->getBB(), MI);
1219 // Update LiveIntervals.
1220 // Note: Moving all instructions and calling handleMove everytime
1221 // is the most cpu intensive operation of the scheduler.
1222 // It would gain a lot if there was a way to recompute the
1223 // LiveIntervals for the entire scheduling region.
1224 DAG->getLIS()->handleMove(*MI, /*UpdateFlags=*/true);
1225 PosNew.push_back(CurrentTopFastSched);
1230 // Now we have Block of SUs == Block of MI.
1231 // We do the final schedule for the instructions inside the block.
1232 // The property that all the SUs of the Block are grouped together as MI
1233 // is used for correct reg usage tracking.
1234 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1235 SIScheduleBlock *Block = CurrentBlocks[i];
1236 std::vector<SUnit*> SUs = Block->getScheduledUnits();
1237 Block->schedule((*SUs.begin())->getInstr(), (*SUs.rbegin())->getInstr());
1240 DEBUG(dbgs() << "Restoring MI Pos\n");
1241 // Restore old ordering (which prevents a LIS->handleMove bug).
1242 for (unsigned i = PosOld.size(), e = 0; i != e; --i) {
1243 MachineBasicBlock::iterator POld = PosOld[i-1];
1244 MachineBasicBlock::iterator PNew = PosNew[i-1];
1246 // Update the instruction stream.
1247 DAG->getBB()->splice(POld, DAG->getBB(), PNew);
1249 // Update LiveIntervals.
1250 DAG->getLIS()->handleMove(*POld, /*UpdateFlags=*/true);
1255 for (unsigned i = 0, e = CurrentBlocks.size(); i != e; ++i) {
1256 SIScheduleBlock *Block = CurrentBlocks[i];
1257 Block->printDebug(true);
1262 void SIScheduleBlockCreator::fillStats() {
1263 unsigned DAGSize = CurrentBlocks.size();
1265 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1266 int BlockIndice = TopDownIndex2Block[i];
1267 SIScheduleBlock *Block = CurrentBlocks[BlockIndice];
1268 if (Block->getPreds().size() == 0)
1272 for (SIScheduleBlock *Pred : Block->getPreds()) {
1273 if (Depth < Pred->Depth + 1)
1274 Depth = Pred->Depth + 1;
1276 Block->Depth = Depth;
1280 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
1281 int BlockIndice = BottomUpIndex2Block[i];
1282 SIScheduleBlock *Block = CurrentBlocks[BlockIndice];
1283 if (Block->getSuccs().size() == 0)
1286 unsigned Height = 0;
1287 for (SIScheduleBlock *Succ : Block->getSuccs()) {
1288 if (Height < Succ->Height + 1)
1289 Height = Succ->Height + 1;
1291 Block->Height = Height;
1296 // SIScheduleBlockScheduler //
1298 SIScheduleBlockScheduler::SIScheduleBlockScheduler(SIScheduleDAGMI *DAG,
1299 SISchedulerBlockSchedulerVariant Variant,
1300 SIScheduleBlocks BlocksStruct) :
1301 DAG(DAG), Variant(Variant), Blocks(BlocksStruct.Blocks),
1302 LastPosWaitedHighLatency(0), NumBlockScheduled(0), VregCurrentUsage(0),
1303 SregCurrentUsage(0), maxVregUsage(0), maxSregUsage(0) {
1305 // Fill the usage of every output
1306 // Warning: while by construction we always have a link between two blocks
1307 // when one needs a result from the other, the number of users of an output
1308 // is not the sum of child blocks having as input the same virtual register.
1309 // Here is an example. A produces x and y. B eats x and produces x'.
1310 // C eats x' and y. The register coalescer may have attributed the same
1311 // virtual register to x and x'.
1312 // To count accurately, we do a topological sort. In case the register is
1313 // found for several parents, we increment the usage of the one with the
1314 // highest topological index.
1315 LiveOutRegsNumUsages.resize(Blocks.size());
1316 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1317 SIScheduleBlock *Block = Blocks[i];
1318 for (unsigned Reg : Block->getInRegs()) {
1321 for (SIScheduleBlock* Pred: Block->getPreds()) {
1322 std::set<unsigned> PredOutRegs = Pred->getOutRegs();
1323 std::set<unsigned>::iterator RegPos = PredOutRegs.find(Reg);
1325 if (RegPos != PredOutRegs.end()) {
1327 if (topoInd < BlocksStruct.TopDownBlock2Index[Pred->getID()]) {
1328 topoInd = BlocksStruct.TopDownBlock2Index[Pred->getID()];
1336 int PredID = BlocksStruct.TopDownIndex2Block[topoInd];
1337 std::map<unsigned, unsigned>::iterator RegPos =
1338 LiveOutRegsNumUsages[PredID].find(Reg);
1339 if (RegPos != LiveOutRegsNumUsages[PredID].end()) {
1340 ++LiveOutRegsNumUsages[PredID][Reg];
1342 LiveOutRegsNumUsages[PredID][Reg] = 1;
1347 LastPosHighLatencyParentScheduled.resize(Blocks.size(), 0);
1348 BlockNumPredsLeft.resize(Blocks.size());
1349 BlockNumSuccsLeft.resize(Blocks.size());
1351 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1352 SIScheduleBlock *Block = Blocks[i];
1353 BlockNumPredsLeft[i] = Block->getPreds().size();
1354 BlockNumSuccsLeft[i] = Block->getSuccs().size();
1358 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1359 SIScheduleBlock *Block = Blocks[i];
1360 assert(Block->getID() == i);
1364 std::set<unsigned> InRegs = DAG->getInRegs();
1365 addLiveRegs(InRegs);
1367 // Fill LiveRegsConsumers for regs that were already
1368 // defined before scheduling.
1369 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1370 SIScheduleBlock *Block = Blocks[i];
1371 for (unsigned Reg : Block->getInRegs()) {
1373 for (SIScheduleBlock* Pred: Block->getPreds()) {
1374 std::set<unsigned> PredOutRegs = Pred->getOutRegs();
1375 std::set<unsigned>::iterator RegPos = PredOutRegs.find(Reg);
1377 if (RegPos != PredOutRegs.end()) {
1384 if (LiveRegsConsumers.find(Reg) == LiveRegsConsumers.end())
1385 LiveRegsConsumers[Reg] = 1;
1387 ++LiveRegsConsumers[Reg];
1392 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1393 SIScheduleBlock *Block = Blocks[i];
1394 if (BlockNumPredsLeft[i] == 0) {
1395 ReadyBlocks.push_back(Block);
1399 while (SIScheduleBlock *Block = pickBlock()) {
1400 BlocksScheduled.push_back(Block);
1401 blockScheduled(Block);
1405 dbgs() << "Block Order:";
1406 for (SIScheduleBlock* Block : BlocksScheduled) {
1407 dbgs() << ' ' << Block->getID();
1412 bool SIScheduleBlockScheduler::tryCandidateLatency(SIBlockSchedCandidate &Cand,
1413 SIBlockSchedCandidate &TryCand) {
1414 if (!Cand.isValid()) {
1415 TryCand.Reason = NodeOrder;
1419 // Try to hide high latencies.
1420 if (tryLess(TryCand.LastPosHighLatParentScheduled,
1421 Cand.LastPosHighLatParentScheduled, TryCand, Cand, Latency))
1423 // Schedule high latencies early so you can hide them better.
1424 if (tryGreater(TryCand.IsHighLatency, Cand.IsHighLatency,
1425 TryCand, Cand, Latency))
1427 if (TryCand.IsHighLatency && tryGreater(TryCand.Height, Cand.Height,
1428 TryCand, Cand, Depth))
1430 if (tryGreater(TryCand.NumHighLatencySuccessors,
1431 Cand.NumHighLatencySuccessors,
1432 TryCand, Cand, Successor))
1437 bool SIScheduleBlockScheduler::tryCandidateRegUsage(SIBlockSchedCandidate &Cand,
1438 SIBlockSchedCandidate &TryCand) {
1439 if (!Cand.isValid()) {
1440 TryCand.Reason = NodeOrder;
1444 if (tryLess(TryCand.VGPRUsageDiff > 0, Cand.VGPRUsageDiff > 0,
1445 TryCand, Cand, RegUsage))
1447 if (tryGreater(TryCand.NumSuccessors > 0,
1448 Cand.NumSuccessors > 0,
1449 TryCand, Cand, Successor))
1451 if (tryGreater(TryCand.Height, Cand.Height, TryCand, Cand, Depth))
1453 if (tryLess(TryCand.VGPRUsageDiff, Cand.VGPRUsageDiff,
1454 TryCand, Cand, RegUsage))
1459 SIScheduleBlock *SIScheduleBlockScheduler::pickBlock() {
1460 SIBlockSchedCandidate Cand;
1461 std::vector<SIScheduleBlock*>::iterator Best;
1462 SIScheduleBlock *Block;
1463 if (ReadyBlocks.empty())
1466 DAG->fillVgprSgprCost(LiveRegs.begin(), LiveRegs.end(),
1467 VregCurrentUsage, SregCurrentUsage);
1468 if (VregCurrentUsage > maxVregUsage)
1469 maxVregUsage = VregCurrentUsage;
1470 if (VregCurrentUsage > maxSregUsage)
1471 maxSregUsage = VregCurrentUsage;
1473 dbgs() << "Picking New Blocks\n";
1474 dbgs() << "Available: ";
1475 for (SIScheduleBlock* Block : ReadyBlocks)
1476 dbgs() << Block->getID() << ' ';
1477 dbgs() << "\nCurrent Live:\n";
1478 for (unsigned Reg : LiveRegs)
1479 dbgs() << PrintVRegOrUnit(Reg, DAG->getTRI()) << ' ';
1481 dbgs() << "Current VGPRs: " << VregCurrentUsage << '\n';
1482 dbgs() << "Current SGPRs: " << SregCurrentUsage << '\n';
1485 Cand.Block = nullptr;
1486 for (std::vector<SIScheduleBlock*>::iterator I = ReadyBlocks.begin(),
1487 E = ReadyBlocks.end(); I != E; ++I) {
1488 SIBlockSchedCandidate TryCand;
1490 TryCand.IsHighLatency = TryCand.Block->isHighLatencyBlock();
1491 TryCand.VGPRUsageDiff =
1492 checkRegUsageImpact(TryCand.Block->getInRegs(),
1493 TryCand.Block->getOutRegs())[DAG->getVGPRSetID()];
1494 TryCand.NumSuccessors = TryCand.Block->getSuccs().size();
1495 TryCand.NumHighLatencySuccessors =
1496 TryCand.Block->getNumHighLatencySuccessors();
1497 TryCand.LastPosHighLatParentScheduled =
1498 (unsigned int) std::max<int> (0,
1499 LastPosHighLatencyParentScheduled[TryCand.Block->getID()] -
1500 LastPosWaitedHighLatency);
1501 TryCand.Height = TryCand.Block->Height;
1502 // Try not to increase VGPR usage too much, else we may spill.
1503 if (VregCurrentUsage > 120 ||
1504 Variant != SISchedulerBlockSchedulerVariant::BlockLatencyRegUsage) {
1505 if (!tryCandidateRegUsage(Cand, TryCand) &&
1506 Variant != SISchedulerBlockSchedulerVariant::BlockRegUsage)
1507 tryCandidateLatency(Cand, TryCand);
1509 if (!tryCandidateLatency(Cand, TryCand))
1510 tryCandidateRegUsage(Cand, TryCand);
1512 if (TryCand.Reason != NoCand) {
1513 Cand.setBest(TryCand);
1515 DEBUG(dbgs() << "Best Current Choice: " << Cand.Block->getID() << ' '
1516 << getReasonStr(Cand.Reason) << '\n');
1521 dbgs() << "Picking: " << Cand.Block->getID() << '\n';
1522 dbgs() << "Is a block with high latency instruction: "
1523 << (Cand.IsHighLatency ? "yes\n" : "no\n");
1524 dbgs() << "Position of last high latency dependency: "
1525 << Cand.LastPosHighLatParentScheduled << '\n';
1526 dbgs() << "VGPRUsageDiff: " << Cand.VGPRUsageDiff << '\n';
1531 ReadyBlocks.erase(Best);
1535 // Tracking of currently alive registers to determine VGPR Usage.
1537 void SIScheduleBlockScheduler::addLiveRegs(std::set<unsigned> &Regs) {
1538 for (unsigned Reg : Regs) {
1539 // For now only track virtual registers.
1540 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1542 // If not already in the live set, then add it.
1543 (void) LiveRegs.insert(Reg);
1547 void SIScheduleBlockScheduler::decreaseLiveRegs(SIScheduleBlock *Block,
1548 std::set<unsigned> &Regs) {
1549 for (unsigned Reg : Regs) {
1550 // For now only track virtual registers.
1551 std::set<unsigned>::iterator Pos = LiveRegs.find(Reg);
1552 assert (Pos != LiveRegs.end() && // Reg must be live.
1553 LiveRegsConsumers.find(Reg) != LiveRegsConsumers.end() &&
1554 LiveRegsConsumers[Reg] >= 1);
1555 --LiveRegsConsumers[Reg];
1556 if (LiveRegsConsumers[Reg] == 0)
1557 LiveRegs.erase(Pos);
1561 void SIScheduleBlockScheduler::releaseBlockSuccs(SIScheduleBlock *Parent) {
1562 for (SIScheduleBlock* Block : Parent->getSuccs()) {
1563 --BlockNumPredsLeft[Block->getID()];
1564 if (BlockNumPredsLeft[Block->getID()] == 0) {
1565 ReadyBlocks.push_back(Block);
1567 // TODO: Improve check. When the dependency between the high latency
1568 // instructions and the instructions of the other blocks are WAR or WAW
1569 // there will be no wait triggered. We would like these cases to not
1570 // update LastPosHighLatencyParentScheduled.
1571 if (Parent->isHighLatencyBlock())
1572 LastPosHighLatencyParentScheduled[Block->getID()] = NumBlockScheduled;
1576 void SIScheduleBlockScheduler::blockScheduled(SIScheduleBlock *Block) {
1577 decreaseLiveRegs(Block, Block->getInRegs());
1578 addLiveRegs(Block->getOutRegs());
1579 releaseBlockSuccs(Block);
1580 for (std::map<unsigned, unsigned>::iterator RegI =
1581 LiveOutRegsNumUsages[Block->getID()].begin(),
1582 E = LiveOutRegsNumUsages[Block->getID()].end(); RegI != E; ++RegI) {
1583 std::pair<unsigned, unsigned> RegP = *RegI;
1584 if (LiveRegsConsumers.find(RegP.first) == LiveRegsConsumers.end())
1585 LiveRegsConsumers[RegP.first] = RegP.second;
1587 assert(LiveRegsConsumers[RegP.first] == 0);
1588 LiveRegsConsumers[RegP.first] += RegP.second;
1591 if (LastPosHighLatencyParentScheduled[Block->getID()] >
1592 (unsigned)LastPosWaitedHighLatency)
1593 LastPosWaitedHighLatency =
1594 LastPosHighLatencyParentScheduled[Block->getID()];
1595 ++NumBlockScheduled;
1599 SIScheduleBlockScheduler::checkRegUsageImpact(std::set<unsigned> &InRegs,
1600 std::set<unsigned> &OutRegs) {
1601 std::vector<int> DiffSetPressure;
1602 DiffSetPressure.assign(DAG->getTRI()->getNumRegPressureSets(), 0);
1604 for (unsigned Reg : InRegs) {
1605 // For now only track virtual registers.
1606 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1608 if (LiveRegsConsumers[Reg] > 1)
1610 PSetIterator PSetI = DAG->getMRI()->getPressureSets(Reg);
1611 for (; PSetI.isValid(); ++PSetI) {
1612 DiffSetPressure[*PSetI] -= PSetI.getWeight();
1616 for (unsigned Reg : OutRegs) {
1617 // For now only track virtual registers.
1618 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1620 PSetIterator PSetI = DAG->getMRI()->getPressureSets(Reg);
1621 for (; PSetI.isValid(); ++PSetI) {
1622 DiffSetPressure[*PSetI] += PSetI.getWeight();
1626 return DiffSetPressure;
1631 struct SIScheduleBlockResult
1632 SIScheduler::scheduleVariant(SISchedulerBlockCreatorVariant BlockVariant,
1633 SISchedulerBlockSchedulerVariant ScheduleVariant) {
1634 SIScheduleBlocks Blocks = BlockCreator.getBlocks(BlockVariant);
1635 SIScheduleBlockScheduler Scheduler(DAG, ScheduleVariant, Blocks);
1636 std::vector<SIScheduleBlock*> ScheduledBlocks;
1637 struct SIScheduleBlockResult Res;
1639 ScheduledBlocks = Scheduler.getBlocks();
1641 for (unsigned b = 0; b < ScheduledBlocks.size(); ++b) {
1642 SIScheduleBlock *Block = ScheduledBlocks[b];
1643 std::vector<SUnit*> SUs = Block->getScheduledUnits();
1645 for (SUnit* SU : SUs)
1646 Res.SUs.push_back(SU->NodeNum);
1649 Res.MaxSGPRUsage = Scheduler.getSGPRUsage();
1650 Res.MaxVGPRUsage = Scheduler.getVGPRUsage();
1654 // SIScheduleDAGMI //
1656 SIScheduleDAGMI::SIScheduleDAGMI(MachineSchedContext *C) :
1657 ScheduleDAGMILive(C, make_unique<GenericScheduler>(C)) {
1658 SITII = static_cast<const SIInstrInfo*>(TII);
1659 SITRI = static_cast<const SIRegisterInfo*>(TRI);
1661 VGPRSetID = SITRI->getVGPR32PressureSet();
1662 SGPRSetID = SITRI->getSGPR32PressureSet();
1665 SIScheduleDAGMI::~SIScheduleDAGMI() {
1668 ScheduleDAGInstrs *llvm::createSIMachineScheduler(MachineSchedContext *C) {
1669 return new SIScheduleDAGMI(C);
1672 // Code adapted from scheduleDAG.cpp
1673 // Does a topological sort over the SUs.
1674 // Both TopDown and BottomUp
1675 void SIScheduleDAGMI::topologicalSort() {
1676 Topo.InitDAGTopologicalSorting();
1678 TopDownIndex2SU = std::vector<int>(Topo.begin(), Topo.end());
1679 BottomUpIndex2SU = std::vector<int>(Topo.rbegin(), Topo.rend());
1682 // Move low latencies further from their user without
1683 // increasing SGPR usage (in general)
1684 // This is to be replaced by a better pass that would
1685 // take into account SGPR usage (based on VGPR Usage
1686 // and the corresponding wavefront count), that would
1687 // try to merge groups of loads if it make sense, etc
1688 void SIScheduleDAGMI::moveLowLatencies() {
1689 unsigned DAGSize = SUnits.size();
1690 int LastLowLatencyUser = -1;
1691 int LastLowLatencyPos = -1;
1693 for (unsigned i = 0, e = ScheduledSUnits.size(); i != e; ++i) {
1694 SUnit *SU = &SUnits[ScheduledSUnits[i]];
1695 bool IsLowLatencyUser = false;
1696 unsigned MinPos = 0;
1698 for (SDep& PredDep : SU->Preds) {
1699 SUnit *Pred = PredDep.getSUnit();
1700 if (SITII->isLowLatencyInstruction(*Pred->getInstr())) {
1701 IsLowLatencyUser = true;
1703 if (Pred->NodeNum >= DAGSize)
1705 unsigned PredPos = ScheduledSUnitsInv[Pred->NodeNum];
1706 if (PredPos >= MinPos)
1707 MinPos = PredPos + 1;
1710 if (SITII->isLowLatencyInstruction(*SU->getInstr())) {
1711 unsigned BestPos = LastLowLatencyUser + 1;
1712 if ((int)BestPos <= LastLowLatencyPos)
1713 BestPos = LastLowLatencyPos + 1;
1714 if (BestPos < MinPos)
1717 for (unsigned u = i; u > BestPos; --u) {
1718 ++ScheduledSUnitsInv[ScheduledSUnits[u-1]];
1719 ScheduledSUnits[u] = ScheduledSUnits[u-1];
1721 ScheduledSUnits[BestPos] = SU->NodeNum;
1722 ScheduledSUnitsInv[SU->NodeNum] = BestPos;
1724 LastLowLatencyPos = BestPos;
1725 if (IsLowLatencyUser)
1726 LastLowLatencyUser = BestPos;
1727 } else if (IsLowLatencyUser) {
1728 LastLowLatencyUser = i;
1729 // Moves COPY instructions on which depends
1730 // the low latency instructions too.
1731 } else if (SU->getInstr()->getOpcode() == AMDGPU::COPY) {
1732 bool CopyForLowLat = false;
1733 for (SDep& SuccDep : SU->Succs) {
1734 SUnit *Succ = SuccDep.getSUnit();
1735 if (SITII->isLowLatencyInstruction(*Succ->getInstr())) {
1736 CopyForLowLat = true;
1742 for (unsigned u = i; u > MinPos; --u) {
1743 ++ScheduledSUnitsInv[ScheduledSUnits[u-1]];
1744 ScheduledSUnits[u] = ScheduledSUnits[u-1];
1746 ScheduledSUnits[MinPos] = SU->NodeNum;
1747 ScheduledSUnitsInv[SU->NodeNum] = MinPos;
1753 void SIScheduleDAGMI::restoreSULinksLeft() {
1754 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1755 SUnits[i].isScheduled = false;
1756 SUnits[i].WeakPredsLeft = SUnitsLinksBackup[i].WeakPredsLeft;
1757 SUnits[i].NumPredsLeft = SUnitsLinksBackup[i].NumPredsLeft;
1758 SUnits[i].WeakSuccsLeft = SUnitsLinksBackup[i].WeakSuccsLeft;
1759 SUnits[i].NumSuccsLeft = SUnitsLinksBackup[i].NumSuccsLeft;
1763 // Return the Vgpr and Sgpr usage corresponding to some virtual registers.
1764 template<typename _Iterator> void
1765 SIScheduleDAGMI::fillVgprSgprCost(_Iterator First, _Iterator End,
1766 unsigned &VgprUsage, unsigned &SgprUsage) {
1769 for (_Iterator RegI = First; RegI != End; ++RegI) {
1770 unsigned Reg = *RegI;
1771 // For now only track virtual registers
1772 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1774 PSetIterator PSetI = MRI.getPressureSets(Reg);
1775 for (; PSetI.isValid(); ++PSetI) {
1776 if (*PSetI == VGPRSetID)
1777 VgprUsage += PSetI.getWeight();
1778 else if (*PSetI == SGPRSetID)
1779 SgprUsage += PSetI.getWeight();
1784 void SIScheduleDAGMI::schedule()
1786 SmallVector<SUnit*, 8> TopRoots, BotRoots;
1787 SIScheduleBlockResult Best, Temp;
1788 DEBUG(dbgs() << "Preparing Scheduling\n");
1790 buildDAGWithRegPressure();
1792 for(SUnit& SU : SUnits)
1797 findRootsAndBiasEdges(TopRoots, BotRoots);
1798 // We reuse several ScheduleDAGMI and ScheduleDAGMILive
1799 // functions, but to make them happy we must initialize
1800 // the default Scheduler implementation (even if we do not
1802 SchedImpl->initialize(this);
1803 initQueues(TopRoots, BotRoots);
1805 // Fill some stats to help scheduling.
1807 SUnitsLinksBackup = SUnits;
1808 IsLowLatencySU.clear();
1809 LowLatencyOffset.clear();
1810 IsHighLatencySU.clear();
1812 IsLowLatencySU.resize(SUnits.size(), 0);
1813 LowLatencyOffset.resize(SUnits.size(), 0);
1814 IsHighLatencySU.resize(SUnits.size(), 0);
1816 for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) {
1817 SUnit *SU = &SUnits[i];
1818 unsigned BaseLatReg;
1820 if (SITII->isLowLatencyInstruction(*SU->getInstr())) {
1821 IsLowLatencySU[i] = 1;
1822 if (SITII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseLatReg, OffLatReg,
1824 LowLatencyOffset[i] = OffLatReg;
1825 } else if (SITII->isHighLatencyInstruction(*SU->getInstr()))
1826 IsHighLatencySU[i] = 1;
1829 SIScheduler Scheduler(this);
1830 Best = Scheduler.scheduleVariant(SISchedulerBlockCreatorVariant::LatenciesAlone,
1831 SISchedulerBlockSchedulerVariant::BlockLatencyRegUsage);
1833 // if VGPR usage is extremely high, try other good performing variants
1834 // which could lead to lower VGPR usage
1835 if (Best.MaxVGPRUsage > 180) {
1836 std::vector<std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant>> Variants = {
1837 { LatenciesAlone, BlockRegUsageLatency },
1838 // { LatenciesAlone, BlockRegUsage },
1839 { LatenciesGrouped, BlockLatencyRegUsage },
1840 // { LatenciesGrouped, BlockRegUsageLatency },
1841 // { LatenciesGrouped, BlockRegUsage },
1842 { LatenciesAlonePlusConsecutive, BlockLatencyRegUsage },
1843 // { LatenciesAlonePlusConsecutive, BlockRegUsageLatency },
1844 // { LatenciesAlonePlusConsecutive, BlockRegUsage }
1846 for (std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant> v : Variants) {
1847 Temp = Scheduler.scheduleVariant(v.first, v.second);
1848 if (Temp.MaxVGPRUsage < Best.MaxVGPRUsage)
1852 // if VGPR usage is still extremely high, we may spill. Try other variants
1853 // which are less performing, but that could lead to lower VGPR usage.
1854 if (Best.MaxVGPRUsage > 200) {
1855 std::vector<std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant>> Variants = {
1856 // { LatenciesAlone, BlockRegUsageLatency },
1857 { LatenciesAlone, BlockRegUsage },
1858 // { LatenciesGrouped, BlockLatencyRegUsage },
1859 { LatenciesGrouped, BlockRegUsageLatency },
1860 { LatenciesGrouped, BlockRegUsage },
1861 // { LatenciesAlonePlusConsecutive, BlockLatencyRegUsage },
1862 { LatenciesAlonePlusConsecutive, BlockRegUsageLatency },
1863 { LatenciesAlonePlusConsecutive, BlockRegUsage }
1865 for (std::pair<SISchedulerBlockCreatorVariant, SISchedulerBlockSchedulerVariant> v : Variants) {
1866 Temp = Scheduler.scheduleVariant(v.first, v.second);
1867 if (Temp.MaxVGPRUsage < Best.MaxVGPRUsage)
1872 ScheduledSUnits = Best.SUs;
1873 ScheduledSUnitsInv.resize(SUnits.size());
1875 for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) {
1876 ScheduledSUnitsInv[ScheduledSUnits[i]] = i;
1881 // Tell the outside world about the result of the scheduling.
1883 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1884 TopRPTracker.setPos(CurrentTop);
1886 for (std::vector<unsigned>::iterator I = ScheduledSUnits.begin(),
1887 E = ScheduledSUnits.end(); I != E; ++I) {
1888 SUnit *SU = &SUnits[*I];
1890 scheduleMI(SU, true);
1892 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
1893 << *SU->getInstr());
1896 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1901 unsigned BBNum = begin()->getParent()->getNumber();
1902 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";