1 //===-- GCNSchedStrategy.cpp - GCN Scheduler Strategy ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This contains a MachineSchedStrategy implementation for maximizing wave
12 /// occupancy on GCN hardware.
13 //===----------------------------------------------------------------------===//
15 #include "GCNSchedStrategy.h"
16 #include "AMDGPUSubtarget.h"
17 #include "SIInstrInfo.h"
18 #include "SIMachineFunctionInfo.h"
19 #include "SIRegisterInfo.h"
20 #include "llvm/CodeGen/RegisterClassInfo.h"
21 #include "llvm/Support/MathExtras.h"
23 #define DEBUG_TYPE "machine-scheduler"
27 GCNMaxOccupancySchedStrategy::GCNMaxOccupancySchedStrategy(
28 const MachineSchedContext *C) :
29 GenericScheduler(C), TargetOccupancy(0), MF(nullptr) { }
31 void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) {
32 GenericScheduler::initialize(DAG);
34 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
38 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
40 // FIXME: This is also necessary, because some passes that run after
41 // scheduling and before regalloc increase register pressure.
42 const int ErrorMargin = 3;
44 SGPRExcessLimit = Context->RegClassInfo
45 ->getNumAllocatableRegs(&AMDGPU::SGPR_32RegClass) - ErrorMargin;
46 VGPRExcessLimit = Context->RegClassInfo
47 ->getNumAllocatableRegs(&AMDGPU::VGPR_32RegClass) - ErrorMargin;
48 if (TargetOccupancy) {
49 SGPRCriticalLimit = ST.getMaxNumSGPRs(TargetOccupancy, true);
50 VGPRCriticalLimit = ST.getMaxNumVGPRs(TargetOccupancy);
52 SGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
53 SRI->getSGPRPressureSet());
54 VGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
55 SRI->getVGPRPressureSet());
58 SGPRCriticalLimit -= ErrorMargin;
59 VGPRCriticalLimit -= ErrorMargin;
62 void GCNMaxOccupancySchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
63 bool AtTop, const RegPressureTracker &RPTracker,
64 const SIRegisterInfo *SRI,
65 unsigned SGPRPressure,
66 unsigned VGPRPressure) {
71 // getDownwardPressure() and getUpwardPressure() make temporary changes to
72 // the tracker, so we need to pass those function a non-const copy.
73 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
75 std::vector<unsigned> Pressure;
76 std::vector<unsigned> MaxPressure;
79 TempTracker.getDownwardPressure(SU->getInstr(), Pressure, MaxPressure);
81 // FIXME: I think for bottom up scheduling, the register pressure is cached
82 // and can be retrieved by DAG->getPressureDif(SU).
83 TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure);
86 unsigned NewSGPRPressure = Pressure[SRI->getSGPRPressureSet()];
87 unsigned NewVGPRPressure = Pressure[SRI->getVGPRPressureSet()];
89 // If two instructions increase the pressure of different register sets
90 // by the same amount, the generic scheduler will prefer to schedule the
91 // instruction that increases the set with the least amount of registers,
92 // which in our case would be SGPRs. This is rarely what we want, so
93 // when we report excess/critical register pressure, we do it either
94 // only for VGPRs or only for SGPRs.
96 // FIXME: Better heuristics to determine whether to prefer SGPRs or VGPRs.
97 const unsigned MaxVGPRPressureInc = 16;
98 bool ShouldTrackVGPRs = VGPRPressure + MaxVGPRPressureInc >= VGPRExcessLimit;
99 bool ShouldTrackSGPRs = !ShouldTrackVGPRs && SGPRPressure >= SGPRExcessLimit;
102 // FIXME: We have to enter REG-EXCESS before we reach the actual threshold
103 // to increase the likelihood we don't go over the limits. We should improve
104 // the analysis to look through dependencies to find the path with the least
105 // register pressure.
107 // We only need to update the RPDelata for instructions that increase
108 // register pressure. Instructions that decrease or keep reg pressure
109 // the same will be marked as RegExcess in tryCandidate() when they
110 // are compared with instructions that increase the register pressure.
111 if (ShouldTrackVGPRs && NewVGPRPressure >= VGPRExcessLimit) {
112 Cand.RPDelta.Excess = PressureChange(SRI->getVGPRPressureSet());
113 Cand.RPDelta.Excess.setUnitInc(NewVGPRPressure - VGPRExcessLimit);
116 if (ShouldTrackSGPRs && NewSGPRPressure >= SGPRExcessLimit) {
117 Cand.RPDelta.Excess = PressureChange(SRI->getSGPRPressureSet());
118 Cand.RPDelta.Excess.setUnitInc(NewSGPRPressure - SGPRExcessLimit);
121 // Register pressure is considered 'CRITICAL' if it is approaching a value
122 // that would reduce the wave occupancy for the execution unit. When
123 // register pressure is 'CRITICAL', increading SGPR and VGPR pressure both
124 // has the same cost, so we don't need to prefer one over the other.
126 int SGPRDelta = NewSGPRPressure - SGPRCriticalLimit;
127 int VGPRDelta = NewVGPRPressure - VGPRCriticalLimit;
129 if (SGPRDelta >= 0 || VGPRDelta >= 0) {
130 if (SGPRDelta > VGPRDelta) {
131 Cand.RPDelta.CriticalMax = PressureChange(SRI->getSGPRPressureSet());
132 Cand.RPDelta.CriticalMax.setUnitInc(SGPRDelta);
134 Cand.RPDelta.CriticalMax = PressureChange(SRI->getVGPRPressureSet());
135 Cand.RPDelta.CriticalMax.setUnitInc(VGPRDelta);
140 // This function is mostly cut and pasted from
141 // GenericScheduler::pickNodeFromQueue()
142 void GCNMaxOccupancySchedStrategy::pickNodeFromQueue(SchedBoundary &Zone,
143 const CandPolicy &ZonePolicy,
144 const RegPressureTracker &RPTracker,
145 SchedCandidate &Cand) {
146 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
147 ArrayRef<unsigned> Pressure = RPTracker.getRegSetPressureAtPos();
148 unsigned SGPRPressure = Pressure[SRI->getSGPRPressureSet()];
149 unsigned VGPRPressure = Pressure[SRI->getVGPRPressureSet()];
150 ReadyQueue &Q = Zone.Available;
151 for (SUnit *SU : Q) {
153 SchedCandidate TryCand(ZonePolicy);
154 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, SRI,
155 SGPRPressure, VGPRPressure);
156 // Pass SchedBoundary only when comparing nodes from the same boundary.
157 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
158 GenericScheduler::tryCandidate(Cand, TryCand, ZoneArg);
159 if (TryCand.Reason != NoCand) {
160 // Initialize resource delta if needed in case future heuristics query it.
161 if (TryCand.ResDelta == SchedResourceDelta())
162 TryCand.initResourceDelta(Zone.DAG, SchedModel);
163 Cand.setBest(TryCand);
168 // This function is mostly cut and pasted from
169 // GenericScheduler::pickNodeBidirectional()
170 SUnit *GCNMaxOccupancySchedStrategy::pickNodeBidirectional(bool &IsTopNode) {
171 // Schedule as far as possible in the direction of no choice. This is most
172 // efficient, but also provides the best heuristics for CriticalPSets.
173 if (SUnit *SU = Bot.pickOnlyChoice()) {
177 if (SUnit *SU = Top.pickOnlyChoice()) {
181 // Set the bottom-up policy based on the state of the current bottom zone and
182 // the instructions outside the zone, including the top zone.
183 CandPolicy BotPolicy;
184 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
185 // Set the top-down policy based on the state of the current top zone and
186 // the instructions outside the zone, including the bottom zone.
187 CandPolicy TopPolicy;
188 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
190 // See if BotCand is still valid (because we previously scheduled from Top).
191 LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
192 if (!BotCand.isValid() || BotCand.SU->isScheduled ||
193 BotCand.Policy != BotPolicy) {
194 BotCand.reset(CandPolicy());
195 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
196 assert(BotCand.Reason != NoCand && "failed to find the first candidate");
198 LLVM_DEBUG(traceCandidate(BotCand));
201 // Check if the top Q has a better candidate.
202 LLVM_DEBUG(dbgs() << "Picking from Top:\n");
203 if (!TopCand.isValid() || TopCand.SU->isScheduled ||
204 TopCand.Policy != TopPolicy) {
205 TopCand.reset(CandPolicy());
206 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
207 assert(TopCand.Reason != NoCand && "failed to find the first candidate");
209 LLVM_DEBUG(traceCandidate(TopCand));
212 // Pick best from BotCand and TopCand.
213 LLVM_DEBUG(dbgs() << "Top Cand: "; traceCandidate(TopCand);
214 dbgs() << "Bot Cand: "; traceCandidate(BotCand););
216 if (TopCand.Reason == BotCand.Reason) {
218 GenericSchedulerBase::CandReason TopReason = TopCand.Reason;
219 TopCand.Reason = NoCand;
220 GenericScheduler::tryCandidate(Cand, TopCand, nullptr);
221 if (TopCand.Reason != NoCand) {
222 Cand.setBest(TopCand);
224 TopCand.Reason = TopReason;
227 if (TopCand.Reason == RegExcess && TopCand.RPDelta.Excess.getUnitInc() <= 0) {
229 } else if (BotCand.Reason == RegExcess && BotCand.RPDelta.Excess.getUnitInc() <= 0) {
231 } else if (TopCand.Reason == RegCritical && TopCand.RPDelta.CriticalMax.getUnitInc() <= 0) {
233 } else if (BotCand.Reason == RegCritical && BotCand.RPDelta.CriticalMax.getUnitInc() <= 0) {
236 if (BotCand.Reason > TopCand.Reason) {
243 LLVM_DEBUG(dbgs() << "Picking: "; traceCandidate(Cand););
245 IsTopNode = Cand.AtTop;
249 // This function is mostly cut and pasted from
250 // GenericScheduler::pickNode()
251 SUnit *GCNMaxOccupancySchedStrategy::pickNode(bool &IsTopNode) {
252 if (DAG->top() == DAG->bottom()) {
253 assert(Top.Available.empty() && Top.Pending.empty() &&
254 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
259 if (RegionPolicy.OnlyTopDown) {
260 SU = Top.pickOnlyChoice();
263 TopCand.reset(NoPolicy);
264 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
265 assert(TopCand.Reason != NoCand && "failed to find a candidate");
269 } else if (RegionPolicy.OnlyBottomUp) {
270 SU = Bot.pickOnlyChoice();
273 BotCand.reset(NoPolicy);
274 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
275 assert(BotCand.Reason != NoCand && "failed to find a candidate");
280 SU = pickNodeBidirectional(IsTopNode);
282 } while (SU->isScheduled);
284 if (SU->isTopReady())
286 if (SU->isBottomReady())
289 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
294 GCNScheduleDAGMILive::GCNScheduleDAGMILive(MachineSchedContext *C,
295 std::unique_ptr<MachineSchedStrategy> S) :
296 ScheduleDAGMILive(C, std::move(S)),
297 ST(MF.getSubtarget<GCNSubtarget>()),
298 MFI(*MF.getInfo<SIMachineFunctionInfo>()),
299 StartingOccupancy(MFI.getOccupancy()),
300 MinOccupancy(StartingOccupancy), Stage(0), RegionIdx(0) {
302 LLVM_DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
305 void GCNScheduleDAGMILive::schedule() {
307 // Just record regions at the first pass.
308 Regions.push_back(std::make_pair(RegionBegin, RegionEnd));
312 std::vector<MachineInstr*> Unsched;
313 Unsched.reserve(NumRegionInstrs);
314 for (auto &I : *this) {
315 Unsched.push_back(&I);
318 GCNRegPressure PressureBefore;
320 PressureBefore = Pressure[RegionIdx];
322 LLVM_DEBUG(dbgs() << "Pressure before scheduling:\nRegion live-ins:";
323 GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI);
324 dbgs() << "Region live-in pressure: ";
325 llvm::getRegPressure(MRI, LiveIns[RegionIdx]).print(dbgs());
326 dbgs() << "Region register pressure: ";
327 PressureBefore.print(dbgs()));
330 ScheduleDAGMILive::schedule();
331 Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
336 // Check the results of scheduling.
337 GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
338 auto PressureAfter = getRealRegPressure();
340 LLVM_DEBUG(dbgs() << "Pressure after scheduling: ";
341 PressureAfter.print(dbgs()));
343 if (PressureAfter.getSGPRNum() <= S.SGPRCriticalLimit &&
344 PressureAfter.getVGPRNum() <= S.VGPRCriticalLimit) {
345 Pressure[RegionIdx] = PressureAfter;
346 LLVM_DEBUG(dbgs() << "Pressure in desired limits, done.\n");
349 unsigned Occ = MFI.getOccupancy();
350 unsigned WavesAfter = std::min(Occ, PressureAfter.getOccupancy(ST));
351 unsigned WavesBefore = std::min(Occ, PressureBefore.getOccupancy(ST));
352 LLVM_DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore
353 << ", after " << WavesAfter << ".\n");
355 // We could not keep current target occupancy because of the just scheduled
356 // region. Record new occupancy for next scheduling cycle.
357 unsigned NewOccupancy = std::max(WavesAfter, WavesBefore);
358 // Allow memory bound functions to drop to 4 waves if not limited by an
360 if (WavesAfter < WavesBefore && WavesAfter < MinOccupancy &&
361 WavesAfter >= MFI.getMinAllowedOccupancy()) {
362 LLVM_DEBUG(dbgs() << "Function is memory bound, allow occupancy drop up to "
363 << MFI.getMinAllowedOccupancy() << " waves\n");
364 NewOccupancy = WavesAfter;
366 if (NewOccupancy < MinOccupancy) {
367 MinOccupancy = NewOccupancy;
368 MFI.limitOccupancy(MinOccupancy);
369 LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "
370 << MinOccupancy << ".\n");
373 if (WavesAfter >= MinOccupancy) {
374 Pressure[RegionIdx] = PressureAfter;
378 LLVM_DEBUG(dbgs() << "Attempting to revert scheduling.\n");
379 RegionEnd = RegionBegin;
380 for (MachineInstr *MI : Unsched) {
381 if (MI->isDebugInstr())
384 if (MI->getIterator() != RegionEnd) {
386 BB->insert(RegionEnd, MI);
387 if (!MI->isDebugInstr())
388 LIS->handleMove(*MI, true);
390 // Reset read-undef flags and update them later.
391 for (auto &Op : MI->operands())
392 if (Op.isReg() && Op.isDef())
393 Op.setIsUndef(false);
394 RegisterOperands RegOpers;
395 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
396 if (!MI->isDebugInstr()) {
397 if (ShouldTrackLaneMasks) {
398 // Adjust liveness and add missing dead+read-undef flags.
399 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
400 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
402 // Adjust for missing dead-def flags.
403 RegOpers.detectDeadDefs(*MI, *LIS);
406 RegionEnd = MI->getIterator();
408 LLVM_DEBUG(dbgs() << "Scheduling " << *MI);
410 RegionBegin = Unsched.front()->getIterator();
411 Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
416 GCNRegPressure GCNScheduleDAGMILive::getRealRegPressure() const {
417 GCNDownwardRPTracker RPTracker(*LIS);
418 RPTracker.advance(begin(), end(), &LiveIns[RegionIdx]);
419 return RPTracker.moveMaxPressure();
422 void GCNScheduleDAGMILive::computeBlockPressure(const MachineBasicBlock *MBB) {
423 GCNDownwardRPTracker RPTracker(*LIS);
425 // If the block has the only successor then live-ins of that successor are
426 // live-outs of the current block. We can reuse calculated live set if the
427 // successor will be sent to scheduling past current block.
428 const MachineBasicBlock *OnlySucc = nullptr;
429 if (MBB->succ_size() == 1 && !(*MBB->succ_begin())->empty()) {
430 SlotIndexes *Ind = LIS->getSlotIndexes();
431 if (Ind->getMBBStartIdx(MBB) < Ind->getMBBStartIdx(*MBB->succ_begin()))
432 OnlySucc = *MBB->succ_begin();
435 // Scheduler sends regions from the end of the block upwards.
436 size_t CurRegion = RegionIdx;
437 for (size_t E = Regions.size(); CurRegion != E; ++CurRegion)
438 if (Regions[CurRegion].first->getParent() != MBB)
442 auto I = MBB->begin();
443 auto LiveInIt = MBBLiveIns.find(MBB);
444 if (LiveInIt != MBBLiveIns.end()) {
445 auto LiveIn = std::move(LiveInIt->second);
446 RPTracker.reset(*MBB->begin(), &LiveIn);
447 MBBLiveIns.erase(LiveInIt);
449 I = Regions[CurRegion].first;
454 I = RPTracker.getNext();
456 if (Regions[CurRegion].first == I) {
457 LiveIns[CurRegion] = RPTracker.getLiveRegs();
458 RPTracker.clearMaxPressure();
461 if (Regions[CurRegion].second == I) {
462 Pressure[CurRegion] = RPTracker.moveMaxPressure();
463 if (CurRegion-- == RegionIdx)
466 RPTracker.advanceToNext();
467 RPTracker.advanceBeforeNext();
471 if (I != MBB->end()) {
472 RPTracker.advanceToNext();
473 RPTracker.advance(MBB->end());
475 RPTracker.reset(*OnlySucc->begin(), &RPTracker.getLiveRegs());
476 RPTracker.advanceBeforeNext();
477 MBBLiveIns[OnlySucc] = RPTracker.moveLiveRegs();
481 void GCNScheduleDAGMILive::finalizeSchedule() {
482 GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
483 LLVM_DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
485 LiveIns.resize(Regions.size());
486 Pressure.resize(Regions.size());
491 MachineBasicBlock *MBB = nullptr;
494 // Retry function scheduling if we found resulting occupancy and it is
495 // lower than used for first pass scheduling. This will give more freedom
496 // to schedule low register pressure blocks.
497 // Code is partially copied from MachineSchedulerBase::scheduleRegions().
499 if (!LIS || StartingOccupancy <= MinOccupancy)
504 << "Retrying function scheduling with lowest recorded occupancy "
505 << MinOccupancy << ".\n");
507 S.setTargetOccupancy(MinOccupancy);
510 for (auto Region : Regions) {
511 RegionBegin = Region.first;
512 RegionEnd = Region.second;
514 if (RegionBegin->getParent() != MBB) {
515 if (MBB) finishBlock();
516 MBB = RegionBegin->getParent();
519 computeBlockPressure(MBB);
522 unsigned NumRegionInstrs = std::distance(begin(), end());
523 enterRegion(MBB, begin(), end(), NumRegionInstrs);
525 // Skip empty scheduling regions (0 or 1 schedulable instructions).
526 if (begin() == end() || begin() == std::prev(end())) {
531 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
532 LLVM_DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " "
533 << MBB->getName() << "\n From: " << *begin()
535 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
536 else dbgs() << "End";
537 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');