1 //===-- GCNSchedStrategy.cpp - GCN Scheduler Strategy ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This contains a MachineSchedStrategy implementation for maximizing wave
11 /// occupancy on GCN hardware.
12 //===----------------------------------------------------------------------===//
14 #include "GCNSchedStrategy.h"
15 #include "SIMachineFunctionInfo.h"
17 #define DEBUG_TYPE "machine-scheduler"
21 GCNMaxOccupancySchedStrategy::GCNMaxOccupancySchedStrategy(
22 const MachineSchedContext *C) :
23 GenericScheduler(C), TargetOccupancy(0), HasClusteredNodes(false),
24 HasExcessPressure(false), MF(nullptr) { }
26 void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) {
27 GenericScheduler::initialize(DAG);
31 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
33 // FIXME: This is also necessary, because some passes that run after
34 // scheduling and before regalloc increase register pressure.
35 const unsigned ErrorMargin = 3;
38 Context->RegClassInfo->getNumAllocatableRegs(&AMDGPU::SGPR_32RegClass);
40 Context->RegClassInfo->getNumAllocatableRegs(&AMDGPU::VGPR_32RegClass);
42 SIMachineFunctionInfo &MFI = *MF->getInfo<SIMachineFunctionInfo>();
43 // Set the initial TargetOccupnacy to the maximum occupancy that we can
44 // achieve for this function. This effectively sets a lower bound on the
45 // 'Critical' register limits in the scheduler.
46 TargetOccupancy = MFI.getOccupancy();
48 std::min(ST.getMaxNumSGPRs(TargetOccupancy, true), SGPRExcessLimit);
50 std::min(ST.getMaxNumVGPRs(TargetOccupancy), VGPRExcessLimit);
52 // Subtract error margin from register limits and avoid overflow.
54 std::min(SGPRCriticalLimit - ErrorMargin, SGPRCriticalLimit);
56 std::min(VGPRCriticalLimit - ErrorMargin, VGPRCriticalLimit);
57 SGPRExcessLimit = std::min(SGPRExcessLimit - ErrorMargin, SGPRExcessLimit);
58 VGPRExcessLimit = std::min(VGPRExcessLimit - ErrorMargin, VGPRExcessLimit);
61 void GCNMaxOccupancySchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
62 bool AtTop, const RegPressureTracker &RPTracker,
63 const SIRegisterInfo *SRI,
64 unsigned SGPRPressure,
65 unsigned VGPRPressure) {
70 // getDownwardPressure() and getUpwardPressure() make temporary changes to
71 // the tracker, so we need to pass those function a non-const copy.
72 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
78 TempTracker.getDownwardPressure(SU->getInstr(), Pressure, MaxPressure);
80 // FIXME: I think for bottom up scheduling, the register pressure is cached
81 // and can be retrieved by DAG->getPressureDif(SU).
82 TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure);
85 unsigned NewSGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32];
86 unsigned NewVGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
88 // If two instructions increase the pressure of different register sets
89 // by the same amount, the generic scheduler will prefer to schedule the
90 // instruction that increases the set with the least amount of registers,
91 // which in our case would be SGPRs. This is rarely what we want, so
92 // when we report excess/critical register pressure, we do it either
93 // only for VGPRs or only for SGPRs.
95 // FIXME: Better heuristics to determine whether to prefer SGPRs or VGPRs.
96 const unsigned MaxVGPRPressureInc = 16;
97 bool ShouldTrackVGPRs = VGPRPressure + MaxVGPRPressureInc >= VGPRExcessLimit;
98 bool ShouldTrackSGPRs = !ShouldTrackVGPRs && SGPRPressure >= SGPRExcessLimit;
101 // FIXME: We have to enter REG-EXCESS before we reach the actual threshold
102 // to increase the likelihood we don't go over the limits. We should improve
103 // the analysis to look through dependencies to find the path with the least
104 // register pressure.
106 // We only need to update the RPDelta for instructions that increase register
107 // pressure. Instructions that decrease or keep reg pressure the same will be
108 // marked as RegExcess in tryCandidate() when they are compared with
109 // instructions that increase the register pressure.
110 if (ShouldTrackVGPRs && NewVGPRPressure >= VGPRExcessLimit) {
111 HasExcessPressure = true;
112 Cand.RPDelta.Excess = PressureChange(AMDGPU::RegisterPressureSets::VGPR_32);
113 Cand.RPDelta.Excess.setUnitInc(NewVGPRPressure - VGPRExcessLimit);
116 if (ShouldTrackSGPRs && NewSGPRPressure >= SGPRExcessLimit) {
117 HasExcessPressure = true;
118 Cand.RPDelta.Excess = PressureChange(AMDGPU::RegisterPressureSets::SReg_32);
119 Cand.RPDelta.Excess.setUnitInc(NewSGPRPressure - SGPRExcessLimit);
122 // Register pressure is considered 'CRITICAL' if it is approaching a value
123 // that would reduce the wave occupancy for the execution unit. When
124 // register pressure is 'CRITICAL', increasing SGPR and VGPR pressure both
125 // has the same cost, so we don't need to prefer one over the other.
127 int SGPRDelta = NewSGPRPressure - SGPRCriticalLimit;
128 int VGPRDelta = NewVGPRPressure - VGPRCriticalLimit;
130 if (SGPRDelta >= 0 || VGPRDelta >= 0) {
131 HasExcessPressure = true;
132 if (SGPRDelta > VGPRDelta) {
133 Cand.RPDelta.CriticalMax =
134 PressureChange(AMDGPU::RegisterPressureSets::SReg_32);
135 Cand.RPDelta.CriticalMax.setUnitInc(SGPRDelta);
137 Cand.RPDelta.CriticalMax =
138 PressureChange(AMDGPU::RegisterPressureSets::VGPR_32);
139 Cand.RPDelta.CriticalMax.setUnitInc(VGPRDelta);
144 // This function is mostly cut and pasted from
145 // GenericScheduler::pickNodeFromQueue()
146 void GCNMaxOccupancySchedStrategy::pickNodeFromQueue(SchedBoundary &Zone,
147 const CandPolicy &ZonePolicy,
148 const RegPressureTracker &RPTracker,
149 SchedCandidate &Cand) {
150 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
151 ArrayRef<unsigned> Pressure = RPTracker.getRegSetPressureAtPos();
152 unsigned SGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32];
153 unsigned VGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
154 ReadyQueue &Q = Zone.Available;
155 for (SUnit *SU : Q) {
157 SchedCandidate TryCand(ZonePolicy);
158 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, SRI,
159 SGPRPressure, VGPRPressure);
160 // Pass SchedBoundary only when comparing nodes from the same boundary.
161 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
162 GenericScheduler::tryCandidate(Cand, TryCand, ZoneArg);
163 if (TryCand.Reason != NoCand) {
164 // Initialize resource delta if needed in case future heuristics query it.
165 if (TryCand.ResDelta == SchedResourceDelta())
166 TryCand.initResourceDelta(Zone.DAG, SchedModel);
167 Cand.setBest(TryCand);
168 LLVM_DEBUG(traceCandidate(Cand));
173 // This function is mostly cut and pasted from
174 // GenericScheduler::pickNodeBidirectional()
175 SUnit *GCNMaxOccupancySchedStrategy::pickNodeBidirectional(bool &IsTopNode) {
176 // Schedule as far as possible in the direction of no choice. This is most
177 // efficient, but also provides the best heuristics for CriticalPSets.
178 if (SUnit *SU = Bot.pickOnlyChoice()) {
182 if (SUnit *SU = Top.pickOnlyChoice()) {
186 // Set the bottom-up policy based on the state of the current bottom zone and
187 // the instructions outside the zone, including the top zone.
188 CandPolicy BotPolicy;
189 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
190 // Set the top-down policy based on the state of the current top zone and
191 // the instructions outside the zone, including the bottom zone.
192 CandPolicy TopPolicy;
193 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
195 // See if BotCand is still valid (because we previously scheduled from Top).
196 LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
197 if (!BotCand.isValid() || BotCand.SU->isScheduled ||
198 BotCand.Policy != BotPolicy) {
199 BotCand.reset(CandPolicy());
200 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
201 assert(BotCand.Reason != NoCand && "failed to find the first candidate");
203 LLVM_DEBUG(traceCandidate(BotCand));
205 if (VerifyScheduling) {
206 SchedCandidate TCand;
207 TCand.reset(CandPolicy());
208 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
209 assert(TCand.SU == BotCand.SU &&
210 "Last pick result should correspond to re-picking right now");
215 // Check if the top Q has a better candidate.
216 LLVM_DEBUG(dbgs() << "Picking from Top:\n");
217 if (!TopCand.isValid() || TopCand.SU->isScheduled ||
218 TopCand.Policy != TopPolicy) {
219 TopCand.reset(CandPolicy());
220 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
221 assert(TopCand.Reason != NoCand && "failed to find the first candidate");
223 LLVM_DEBUG(traceCandidate(TopCand));
225 if (VerifyScheduling) {
226 SchedCandidate TCand;
227 TCand.reset(CandPolicy());
228 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
229 assert(TCand.SU == TopCand.SU &&
230 "Last pick result should correspond to re-picking right now");
235 // Pick best from BotCand and TopCand.
236 LLVM_DEBUG(dbgs() << "Top Cand: "; traceCandidate(TopCand);
237 dbgs() << "Bot Cand: "; traceCandidate(BotCand););
238 SchedCandidate Cand = BotCand;
239 TopCand.Reason = NoCand;
240 GenericScheduler::tryCandidate(Cand, TopCand, nullptr);
241 if (TopCand.Reason != NoCand) {
242 Cand.setBest(TopCand);
244 LLVM_DEBUG(dbgs() << "Picking: "; traceCandidate(Cand););
246 IsTopNode = Cand.AtTop;
250 // This function is mostly cut and pasted from
251 // GenericScheduler::pickNode()
252 SUnit *GCNMaxOccupancySchedStrategy::pickNode(bool &IsTopNode) {
253 if (DAG->top() == DAG->bottom()) {
254 assert(Top.Available.empty() && Top.Pending.empty() &&
255 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
260 if (RegionPolicy.OnlyTopDown) {
261 SU = Top.pickOnlyChoice();
264 TopCand.reset(NoPolicy);
265 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
266 assert(TopCand.Reason != NoCand && "failed to find a candidate");
270 } else if (RegionPolicy.OnlyBottomUp) {
271 SU = Bot.pickOnlyChoice();
274 BotCand.reset(NoPolicy);
275 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
276 assert(BotCand.Reason != NoCand && "failed to find a candidate");
281 SU = pickNodeBidirectional(IsTopNode);
283 } while (SU->isScheduled);
285 if (SU->isTopReady())
287 if (SU->isBottomReady())
290 if (!HasClusteredNodes && SU->getInstr()->mayLoadOrStore()) {
291 for (SDep &Dep : SU->Preds) {
292 if (Dep.isCluster()) {
293 HasClusteredNodes = true;
299 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
304 GCNScheduleDAGMILive::GCNScheduleDAGMILive(MachineSchedContext *C,
305 std::unique_ptr<MachineSchedStrategy> S) :
306 ScheduleDAGMILive(C, std::move(S)),
307 ST(MF.getSubtarget<GCNSubtarget>()),
308 MFI(*MF.getInfo<SIMachineFunctionInfo>()),
309 StartingOccupancy(MFI.getOccupancy()),
310 MinOccupancy(StartingOccupancy), Stage(Collect), RegionIdx(0) {
312 LLVM_DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
315 void GCNScheduleDAGMILive::schedule() {
316 if (Stage == Collect) {
317 // Just record regions at the first pass.
318 Regions.push_back(std::make_pair(RegionBegin, RegionEnd));
322 std::vector<MachineInstr*> Unsched;
323 Unsched.reserve(NumRegionInstrs);
324 for (auto &I : *this) {
325 Unsched.push_back(&I);
328 GCNRegPressure PressureBefore;
330 PressureBefore = Pressure[RegionIdx];
332 LLVM_DEBUG(dbgs() << "Pressure before scheduling:\nRegion live-ins:";
333 GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI);
334 dbgs() << "Region live-in pressure: ";
335 llvm::getRegPressure(MRI, LiveIns[RegionIdx]).print(dbgs());
336 dbgs() << "Region register pressure: ";
337 PressureBefore.print(dbgs()));
340 GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
341 // Set HasClusteredNodes to true for late stages where we have already
342 // collected it. That way pickNode() will not scan SDep's when not needed.
343 S.HasClusteredNodes = Stage > InitialSchedule;
344 S.HasExcessPressure = false;
345 ScheduleDAGMILive::schedule();
346 Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
347 RescheduleRegions[RegionIdx] = false;
348 if (Stage == InitialSchedule && S.HasClusteredNodes)
349 RegionsWithClusters[RegionIdx] = true;
350 if (S.HasExcessPressure)
351 RegionsWithHighRP[RegionIdx] = true;
356 // Check the results of scheduling.
357 auto PressureAfter = getRealRegPressure();
359 LLVM_DEBUG(dbgs() << "Pressure after scheduling: ";
360 PressureAfter.print(dbgs()));
362 if (PressureAfter.getSGPRNum() <= S.SGPRCriticalLimit &&
363 PressureAfter.getVGPRNum(ST.hasGFX90AInsts()) <= S.VGPRCriticalLimit) {
364 Pressure[RegionIdx] = PressureAfter;
365 LLVM_DEBUG(dbgs() << "Pressure in desired limits, done.\n");
369 unsigned WavesAfter =
370 std::min(S.TargetOccupancy, PressureAfter.getOccupancy(ST));
371 unsigned WavesBefore =
372 std::min(S.TargetOccupancy, PressureBefore.getOccupancy(ST));
373 LLVM_DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore
374 << ", after " << WavesAfter << ".\n");
376 // We may not be able to keep the current target occupancy because of the just
377 // scheduled region. We might still be able to revert scheduling if the
378 // occupancy before was higher, or if the current schedule has register
379 // pressure higher than the excess limits which could lead to more spilling.
380 unsigned NewOccupancy = std::max(WavesAfter, WavesBefore);
381 // Allow memory bound functions to drop to 4 waves if not limited by an
383 if (WavesAfter < WavesBefore && WavesAfter < MinOccupancy &&
384 WavesAfter >= MFI.getMinAllowedOccupancy()) {
385 LLVM_DEBUG(dbgs() << "Function is memory bound, allow occupancy drop up to "
386 << MFI.getMinAllowedOccupancy() << " waves\n");
387 NewOccupancy = WavesAfter;
390 if (NewOccupancy < MinOccupancy) {
391 MinOccupancy = NewOccupancy;
392 MFI.limitOccupancy(MinOccupancy);
393 LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "
394 << MinOccupancy << ".\n");
397 unsigned MaxVGPRs = ST.getMaxNumVGPRs(MF);
398 unsigned MaxSGPRs = ST.getMaxNumSGPRs(MF);
399 if (PressureAfter.getVGPRNum(false) > MaxVGPRs ||
400 PressureAfter.getAGPRNum() > MaxVGPRs ||
401 PressureAfter.getSGPRNum() > MaxSGPRs) {
402 RescheduleRegions[RegionIdx] = true;
403 RegionsWithHighRP[RegionIdx] = true;
406 // If this condition is true, then either the occupancy before and after
407 // scheduling is the same, or we are allowing the occupancy to drop because
408 // the function is memory bound. Even if we are OK with the current occupancy,
409 // we still need to verify that we will not introduce any extra chance of
411 if (WavesAfter >= MinOccupancy) {
412 if (Stage == UnclusteredReschedule &&
413 !PressureAfter.less(ST, PressureBefore)) {
414 LLVM_DEBUG(dbgs() << "Unclustered reschedule did not help.\n");
415 } else if (WavesAfter > MFI.getMinWavesPerEU() ||
416 PressureAfter.less(ST, PressureBefore) ||
417 !RescheduleRegions[RegionIdx]) {
418 Pressure[RegionIdx] = PressureAfter;
419 if (!RegionsWithClusters[RegionIdx] &&
420 (Stage + 1) == UnclusteredReschedule)
421 RescheduleRegions[RegionIdx] = false;
424 LLVM_DEBUG(dbgs() << "New pressure will result in more spilling.\n");
428 LLVM_DEBUG(dbgs() << "Attempting to revert scheduling.\n");
429 RescheduleRegions[RegionIdx] = RegionsWithClusters[RegionIdx] ||
430 (Stage + 1) != UnclusteredReschedule;
431 RegionEnd = RegionBegin;
432 for (MachineInstr *MI : Unsched) {
433 if (MI->isDebugInstr())
436 if (MI->getIterator() != RegionEnd) {
438 BB->insert(RegionEnd, MI);
439 if (!MI->isDebugInstr())
440 LIS->handleMove(*MI, true);
442 // Reset read-undef flags and update them later.
443 for (auto &Op : MI->operands())
444 if (Op.isReg() && Op.isDef())
445 Op.setIsUndef(false);
446 RegisterOperands RegOpers;
447 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
448 if (!MI->isDebugInstr()) {
449 if (ShouldTrackLaneMasks) {
450 // Adjust liveness and add missing dead+read-undef flags.
451 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
452 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
454 // Adjust for missing dead-def flags.
455 RegOpers.detectDeadDefs(*MI, *LIS);
458 RegionEnd = MI->getIterator();
460 LLVM_DEBUG(dbgs() << "Scheduling " << *MI);
462 RegionBegin = Unsched.front()->getIterator();
463 Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
468 GCNRegPressure GCNScheduleDAGMILive::getRealRegPressure() const {
469 GCNDownwardRPTracker RPTracker(*LIS);
470 RPTracker.advance(begin(), end(), &LiveIns[RegionIdx]);
471 return RPTracker.moveMaxPressure();
474 void GCNScheduleDAGMILive::computeBlockPressure(const MachineBasicBlock *MBB) {
475 GCNDownwardRPTracker RPTracker(*LIS);
477 // If the block has the only successor then live-ins of that successor are
478 // live-outs of the current block. We can reuse calculated live set if the
479 // successor will be sent to scheduling past current block.
480 const MachineBasicBlock *OnlySucc = nullptr;
481 if (MBB->succ_size() == 1 && !(*MBB->succ_begin())->empty()) {
482 SlotIndexes *Ind = LIS->getSlotIndexes();
483 if (Ind->getMBBStartIdx(MBB) < Ind->getMBBStartIdx(*MBB->succ_begin()))
484 OnlySucc = *MBB->succ_begin();
487 // Scheduler sends regions from the end of the block upwards.
488 size_t CurRegion = RegionIdx;
489 for (size_t E = Regions.size(); CurRegion != E; ++CurRegion)
490 if (Regions[CurRegion].first->getParent() != MBB)
494 auto I = MBB->begin();
495 auto LiveInIt = MBBLiveIns.find(MBB);
496 if (LiveInIt != MBBLiveIns.end()) {
497 auto LiveIn = std::move(LiveInIt->second);
498 RPTracker.reset(*MBB->begin(), &LiveIn);
499 MBBLiveIns.erase(LiveInIt);
501 auto &Rgn = Regions[CurRegion];
503 auto *NonDbgMI = &*skipDebugInstructionsForward(Rgn.first, Rgn.second);
504 auto LRS = BBLiveInMap.lookup(NonDbgMI);
505 #ifdef EXPENSIVE_CHECKS
506 assert(isEqual(getLiveRegsBefore(*NonDbgMI, *LIS), LRS));
508 RPTracker.reset(*I, &LRS);
512 I = RPTracker.getNext();
514 if (Regions[CurRegion].first == I) {
515 LiveIns[CurRegion] = RPTracker.getLiveRegs();
516 RPTracker.clearMaxPressure();
519 if (Regions[CurRegion].second == I) {
520 Pressure[CurRegion] = RPTracker.moveMaxPressure();
521 if (CurRegion-- == RegionIdx)
524 RPTracker.advanceToNext();
525 RPTracker.advanceBeforeNext();
529 if (I != MBB->end()) {
530 RPTracker.advanceToNext();
531 RPTracker.advance(MBB->end());
533 RPTracker.reset(*OnlySucc->begin(), &RPTracker.getLiveRegs());
534 RPTracker.advanceBeforeNext();
535 MBBLiveIns[OnlySucc] = RPTracker.moveLiveRegs();
539 DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet>
540 GCNScheduleDAGMILive::getBBLiveInMap() const {
541 assert(!Regions.empty());
542 std::vector<MachineInstr *> BBStarters;
543 BBStarters.reserve(Regions.size());
544 auto I = Regions.rbegin(), E = Regions.rend();
545 auto *BB = I->first->getParent();
547 auto *MI = &*skipDebugInstructionsForward(I->first, I->second);
548 BBStarters.push_back(MI);
551 } while (I != E && I->first->getParent() == BB);
553 return getLiveRegMap(BBStarters, false /*After*/, *LIS);
556 void GCNScheduleDAGMILive::finalizeSchedule() {
557 LLVM_DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
559 LiveIns.resize(Regions.size());
560 Pressure.resize(Regions.size());
561 RescheduleRegions.resize(Regions.size());
562 RegionsWithClusters.resize(Regions.size());
563 RegionsWithHighRP.resize(Regions.size());
564 RescheduleRegions.set();
565 RegionsWithClusters.reset();
566 RegionsWithHighRP.reset();
568 if (!Regions.empty())
569 BBLiveInMap = getBBLiveInMap();
571 std::vector<std::unique_ptr<ScheduleDAGMutation>> SavedMutations;
576 MachineBasicBlock *MBB = nullptr;
578 if (Stage > InitialSchedule) {
582 // Retry function scheduling if we found resulting occupancy and it is
583 // lower than used for first pass scheduling. This will give more freedom
584 // to schedule low register pressure blocks.
585 // Code is partially copied from MachineSchedulerBase::scheduleRegions().
587 if (Stage == UnclusteredReschedule) {
588 if (RescheduleRegions.none())
591 "Retrying function scheduling without clustering.\n");
594 if (Stage == ClusteredLowOccupancyReschedule) {
595 if (StartingOccupancy <= MinOccupancy)
600 << "Retrying function scheduling with lowest recorded occupancy "
601 << MinOccupancy << ".\n");
605 if (Stage == UnclusteredReschedule)
606 SavedMutations.swap(Mutations);
608 for (auto Region : Regions) {
609 if ((Stage == UnclusteredReschedule && !RescheduleRegions[RegionIdx]) ||
610 (Stage == ClusteredLowOccupancyReschedule &&
611 !RegionsWithClusters[RegionIdx] && !RegionsWithHighRP[RegionIdx])) {
617 RegionBegin = Region.first;
618 RegionEnd = Region.second;
620 if (RegionBegin->getParent() != MBB) {
621 if (MBB) finishBlock();
622 MBB = RegionBegin->getParent();
624 if (Stage == InitialSchedule)
625 computeBlockPressure(MBB);
628 unsigned NumRegionInstrs = std::distance(begin(), end());
629 enterRegion(MBB, begin(), end(), NumRegionInstrs);
631 // Skip empty scheduling regions (0 or 1 schedulable instructions).
632 if (begin() == end() || begin() == std::prev(end())) {
637 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
638 LLVM_DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " "
639 << MBB->getName() << "\n From: " << *begin()
641 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
642 else dbgs() << "End";
643 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
652 if (Stage == UnclusteredReschedule)
653 SavedMutations.swap(Mutations);
654 } while (Stage != LastStage);