1 //===-- GCNSchedStrategy.cpp - GCN Scheduler Strategy ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This contains a MachineSchedStrategy implementation for maximizing wave
11 /// occupancy on GCN hardware.
12 //===----------------------------------------------------------------------===//
14 #include "GCNSchedStrategy.h"
15 #include "AMDGPUSubtarget.h"
16 #include "SIInstrInfo.h"
17 #include "SIMachineFunctionInfo.h"
18 #include "SIRegisterInfo.h"
19 #include "Utils/AMDGPUBaseInfo.h"
20 #include "llvm/CodeGen/RegisterClassInfo.h"
21 #include "llvm/Support/MathExtras.h"
23 #define DEBUG_TYPE "machine-scheduler"
27 GCNMaxOccupancySchedStrategy::GCNMaxOccupancySchedStrategy(
28 const MachineSchedContext *C) :
29 GenericScheduler(C), TargetOccupancy(0), MF(nullptr) { }
31 void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) {
32 GenericScheduler::initialize(DAG);
34 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
38 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
40 // FIXME: This is also necessary, because some passes that run after
41 // scheduling and before regalloc increase register pressure.
42 const int ErrorMargin = 3;
44 SGPRExcessLimit = Context->RegClassInfo
45 ->getNumAllocatableRegs(&AMDGPU::SGPR_32RegClass) - ErrorMargin;
46 VGPRExcessLimit = Context->RegClassInfo
47 ->getNumAllocatableRegs(&AMDGPU::VGPR_32RegClass) - ErrorMargin;
48 if (TargetOccupancy) {
49 SGPRCriticalLimit = ST.getMaxNumSGPRs(TargetOccupancy, true);
50 VGPRCriticalLimit = ST.getMaxNumVGPRs(TargetOccupancy);
52 SGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
53 AMDGPU::RegisterPressureSets::SReg_32);
54 VGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
55 AMDGPU::RegisterPressureSets::VGPR_32);
58 SGPRCriticalLimit -= ErrorMargin;
59 VGPRCriticalLimit -= ErrorMargin;
62 void GCNMaxOccupancySchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
63 bool AtTop, const RegPressureTracker &RPTracker,
64 const SIRegisterInfo *SRI,
65 unsigned SGPRPressure,
66 unsigned VGPRPressure) {
71 // getDownwardPressure() and getUpwardPressure() make temporary changes to
72 // the tracker, so we need to pass those function a non-const copy.
73 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
79 TempTracker.getDownwardPressure(SU->getInstr(), Pressure, MaxPressure);
81 // FIXME: I think for bottom up scheduling, the register pressure is cached
82 // and can be retrieved by DAG->getPressureDif(SU).
83 TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure);
86 unsigned NewSGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32];
87 unsigned NewVGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
89 // If two instructions increase the pressure of different register sets
90 // by the same amount, the generic scheduler will prefer to schedule the
91 // instruction that increases the set with the least amount of registers,
92 // which in our case would be SGPRs. This is rarely what we want, so
93 // when we report excess/critical register pressure, we do it either
94 // only for VGPRs or only for SGPRs.
96 // FIXME: Better heuristics to determine whether to prefer SGPRs or VGPRs.
97 const unsigned MaxVGPRPressureInc = 16;
98 bool ShouldTrackVGPRs = VGPRPressure + MaxVGPRPressureInc >= VGPRExcessLimit;
99 bool ShouldTrackSGPRs = !ShouldTrackVGPRs && SGPRPressure >= SGPRExcessLimit;
102 // FIXME: We have to enter REG-EXCESS before we reach the actual threshold
103 // to increase the likelihood we don't go over the limits. We should improve
104 // the analysis to look through dependencies to find the path with the least
105 // register pressure.
107 // We only need to update the RPDelta for instructions that increase register
108 // pressure. Instructions that decrease or keep reg pressure the same will be
109 // marked as RegExcess in tryCandidate() when they are compared with
110 // instructions that increase the register pressure.
111 if (ShouldTrackVGPRs && NewVGPRPressure >= VGPRExcessLimit) {
112 Cand.RPDelta.Excess = PressureChange(AMDGPU::RegisterPressureSets::VGPR_32);
113 Cand.RPDelta.Excess.setUnitInc(NewVGPRPressure - VGPRExcessLimit);
116 if (ShouldTrackSGPRs && NewSGPRPressure >= SGPRExcessLimit) {
117 Cand.RPDelta.Excess = PressureChange(AMDGPU::RegisterPressureSets::SReg_32);
118 Cand.RPDelta.Excess.setUnitInc(NewSGPRPressure - SGPRExcessLimit);
121 // Register pressure is considered 'CRITICAL' if it is approaching a value
122 // that would reduce the wave occupancy for the execution unit. When
123 // register pressure is 'CRITICAL', increading SGPR and VGPR pressure both
124 // has the same cost, so we don't need to prefer one over the other.
126 int SGPRDelta = NewSGPRPressure - SGPRCriticalLimit;
127 int VGPRDelta = NewVGPRPressure - VGPRCriticalLimit;
129 if (SGPRDelta >= 0 || VGPRDelta >= 0) {
130 if (SGPRDelta > VGPRDelta) {
131 Cand.RPDelta.CriticalMax =
132 PressureChange(AMDGPU::RegisterPressureSets::SReg_32);
133 Cand.RPDelta.CriticalMax.setUnitInc(SGPRDelta);
135 Cand.RPDelta.CriticalMax =
136 PressureChange(AMDGPU::RegisterPressureSets::VGPR_32);
137 Cand.RPDelta.CriticalMax.setUnitInc(VGPRDelta);
142 // This function is mostly cut and pasted from
143 // GenericScheduler::pickNodeFromQueue()
144 void GCNMaxOccupancySchedStrategy::pickNodeFromQueue(SchedBoundary &Zone,
145 const CandPolicy &ZonePolicy,
146 const RegPressureTracker &RPTracker,
147 SchedCandidate &Cand) {
148 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
149 ArrayRef<unsigned> Pressure = RPTracker.getRegSetPressureAtPos();
150 unsigned SGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32];
151 unsigned VGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
152 ReadyQueue &Q = Zone.Available;
153 for (SUnit *SU : Q) {
155 SchedCandidate TryCand(ZonePolicy);
156 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, SRI,
157 SGPRPressure, VGPRPressure);
158 // Pass SchedBoundary only when comparing nodes from the same boundary.
159 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
160 GenericScheduler::tryCandidate(Cand, TryCand, ZoneArg);
161 if (TryCand.Reason != NoCand) {
162 // Initialize resource delta if needed in case future heuristics query it.
163 if (TryCand.ResDelta == SchedResourceDelta())
164 TryCand.initResourceDelta(Zone.DAG, SchedModel);
165 Cand.setBest(TryCand);
166 LLVM_DEBUG(traceCandidate(Cand));
171 // This function is mostly cut and pasted from
172 // GenericScheduler::pickNodeBidirectional()
173 SUnit *GCNMaxOccupancySchedStrategy::pickNodeBidirectional(bool &IsTopNode) {
174 // Schedule as far as possible in the direction of no choice. This is most
175 // efficient, but also provides the best heuristics for CriticalPSets.
176 if (SUnit *SU = Bot.pickOnlyChoice()) {
180 if (SUnit *SU = Top.pickOnlyChoice()) {
184 // Set the bottom-up policy based on the state of the current bottom zone and
185 // the instructions outside the zone, including the top zone.
186 CandPolicy BotPolicy;
187 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
188 // Set the top-down policy based on the state of the current top zone and
189 // the instructions outside the zone, including the bottom zone.
190 CandPolicy TopPolicy;
191 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
193 // See if BotCand is still valid (because we previously scheduled from Top).
194 LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
195 if (!BotCand.isValid() || BotCand.SU->isScheduled ||
196 BotCand.Policy != BotPolicy) {
197 BotCand.reset(CandPolicy());
198 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
199 assert(BotCand.Reason != NoCand && "failed to find the first candidate");
201 LLVM_DEBUG(traceCandidate(BotCand));
203 if (VerifyScheduling) {
204 SchedCandidate TCand;
205 TCand.reset(CandPolicy());
206 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
207 assert(TCand.SU == BotCand.SU &&
208 "Last pick result should correspond to re-picking right now");
213 // Check if the top Q has a better candidate.
214 LLVM_DEBUG(dbgs() << "Picking from Top:\n");
215 if (!TopCand.isValid() || TopCand.SU->isScheduled ||
216 TopCand.Policy != TopPolicy) {
217 TopCand.reset(CandPolicy());
218 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
219 assert(TopCand.Reason != NoCand && "failed to find the first candidate");
221 LLVM_DEBUG(traceCandidate(TopCand));
223 if (VerifyScheduling) {
224 SchedCandidate TCand;
225 TCand.reset(CandPolicy());
226 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
227 assert(TCand.SU == TopCand.SU &&
228 "Last pick result should correspond to re-picking right now");
233 // Pick best from BotCand and TopCand.
234 LLVM_DEBUG(dbgs() << "Top Cand: "; traceCandidate(TopCand);
235 dbgs() << "Bot Cand: "; traceCandidate(BotCand););
236 SchedCandidate Cand = BotCand;
237 TopCand.Reason = NoCand;
238 GenericScheduler::tryCandidate(Cand, TopCand, nullptr);
239 if (TopCand.Reason != NoCand) {
240 Cand.setBest(TopCand);
242 LLVM_DEBUG(dbgs() << "Picking: "; traceCandidate(Cand););
244 IsTopNode = Cand.AtTop;
248 // This function is mostly cut and pasted from
249 // GenericScheduler::pickNode()
250 SUnit *GCNMaxOccupancySchedStrategy::pickNode(bool &IsTopNode) {
251 if (DAG->top() == DAG->bottom()) {
252 assert(Top.Available.empty() && Top.Pending.empty() &&
253 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
258 if (RegionPolicy.OnlyTopDown) {
259 SU = Top.pickOnlyChoice();
262 TopCand.reset(NoPolicy);
263 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
264 assert(TopCand.Reason != NoCand && "failed to find a candidate");
268 } else if (RegionPolicy.OnlyBottomUp) {
269 SU = Bot.pickOnlyChoice();
272 BotCand.reset(NoPolicy);
273 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
274 assert(BotCand.Reason != NoCand && "failed to find a candidate");
279 SU = pickNodeBidirectional(IsTopNode);
281 } while (SU->isScheduled);
283 if (SU->isTopReady())
285 if (SU->isBottomReady())
288 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
293 GCNScheduleDAGMILive::GCNScheduleDAGMILive(MachineSchedContext *C,
294 std::unique_ptr<MachineSchedStrategy> S) :
295 ScheduleDAGMILive(C, std::move(S)),
296 ST(MF.getSubtarget<GCNSubtarget>()),
297 MFI(*MF.getInfo<SIMachineFunctionInfo>()),
298 StartingOccupancy(MFI.getOccupancy()),
299 MinOccupancy(StartingOccupancy), Stage(Collect), RegionIdx(0) {
301 LLVM_DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
304 void GCNScheduleDAGMILive::schedule() {
305 if (Stage == Collect) {
306 // Just record regions at the first pass.
307 Regions.push_back(std::make_pair(RegionBegin, RegionEnd));
311 std::vector<MachineInstr*> Unsched;
312 Unsched.reserve(NumRegionInstrs);
313 for (auto &I : *this) {
314 Unsched.push_back(&I);
317 GCNRegPressure PressureBefore;
319 PressureBefore = Pressure[RegionIdx];
321 LLVM_DEBUG(dbgs() << "Pressure before scheduling:\nRegion live-ins:";
322 GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI);
323 dbgs() << "Region live-in pressure: ";
324 llvm::getRegPressure(MRI, LiveIns[RegionIdx]).print(dbgs());
325 dbgs() << "Region register pressure: ";
326 PressureBefore.print(dbgs()));
329 ScheduleDAGMILive::schedule();
330 Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
331 RescheduleRegions[RegionIdx] = false;
336 // Check the results of scheduling.
337 GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
338 auto PressureAfter = getRealRegPressure();
340 LLVM_DEBUG(dbgs() << "Pressure after scheduling: ";
341 PressureAfter.print(dbgs()));
343 if (PressureAfter.getSGPRNum() <= S.SGPRCriticalLimit &&
344 PressureAfter.getVGPRNum() <= S.VGPRCriticalLimit) {
345 Pressure[RegionIdx] = PressureAfter;
346 LLVM_DEBUG(dbgs() << "Pressure in desired limits, done.\n");
349 unsigned Occ = MFI.getOccupancy();
350 unsigned WavesAfter = std::min(Occ, PressureAfter.getOccupancy(ST));
351 unsigned WavesBefore = std::min(Occ, PressureBefore.getOccupancy(ST));
352 LLVM_DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore
353 << ", after " << WavesAfter << ".\n");
355 // We could not keep current target occupancy because of the just scheduled
356 // region. Record new occupancy for next scheduling cycle.
357 unsigned NewOccupancy = std::max(WavesAfter, WavesBefore);
358 // Allow memory bound functions to drop to 4 waves if not limited by an
360 if (WavesAfter < WavesBefore && WavesAfter < MinOccupancy &&
361 WavesAfter >= MFI.getMinAllowedOccupancy()) {
362 LLVM_DEBUG(dbgs() << "Function is memory bound, allow occupancy drop up to "
363 << MFI.getMinAllowedOccupancy() << " waves\n");
364 NewOccupancy = WavesAfter;
366 if (NewOccupancy < MinOccupancy) {
367 MinOccupancy = NewOccupancy;
368 MFI.limitOccupancy(MinOccupancy);
369 LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "
370 << MinOccupancy << ".\n");
373 unsigned MaxVGPRs = ST.getMaxNumVGPRs(MF);
374 unsigned MaxSGPRs = ST.getMaxNumSGPRs(MF);
375 if (PressureAfter.getVGPRNum() > MaxVGPRs ||
376 PressureAfter.getSGPRNum() > MaxSGPRs)
377 RescheduleRegions[RegionIdx] = true;
379 if (WavesAfter >= MinOccupancy) {
380 if (Stage == UnclusteredReschedule &&
381 !PressureAfter.less(ST, PressureBefore)) {
382 LLVM_DEBUG(dbgs() << "Unclustered reschedule did not help.\n");
383 } else if (WavesAfter > MFI.getMinWavesPerEU() ||
384 PressureAfter.less(ST, PressureBefore) ||
385 !RescheduleRegions[RegionIdx]) {
386 Pressure[RegionIdx] = PressureAfter;
389 LLVM_DEBUG(dbgs() << "New pressure will result in more spilling.\n");
393 LLVM_DEBUG(dbgs() << "Attempting to revert scheduling.\n");
394 RescheduleRegions[RegionIdx] = true;
395 RegionEnd = RegionBegin;
396 for (MachineInstr *MI : Unsched) {
397 if (MI->isDebugInstr())
400 if (MI->getIterator() != RegionEnd) {
402 BB->insert(RegionEnd, MI);
403 if (!MI->isDebugInstr())
404 LIS->handleMove(*MI, true);
406 // Reset read-undef flags and update them later.
407 for (auto &Op : MI->operands())
408 if (Op.isReg() && Op.isDef())
409 Op.setIsUndef(false);
410 RegisterOperands RegOpers;
411 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
412 if (!MI->isDebugInstr()) {
413 if (ShouldTrackLaneMasks) {
414 // Adjust liveness and add missing dead+read-undef flags.
415 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
416 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
418 // Adjust for missing dead-def flags.
419 RegOpers.detectDeadDefs(*MI, *LIS);
422 RegionEnd = MI->getIterator();
424 LLVM_DEBUG(dbgs() << "Scheduling " << *MI);
426 RegionBegin = Unsched.front()->getIterator();
427 Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
432 GCNRegPressure GCNScheduleDAGMILive::getRealRegPressure() const {
433 GCNDownwardRPTracker RPTracker(*LIS);
434 RPTracker.advance(begin(), end(), &LiveIns[RegionIdx]);
435 return RPTracker.moveMaxPressure();
438 void GCNScheduleDAGMILive::computeBlockPressure(const MachineBasicBlock *MBB) {
439 GCNDownwardRPTracker RPTracker(*LIS);
441 // If the block has the only successor then live-ins of that successor are
442 // live-outs of the current block. We can reuse calculated live set if the
443 // successor will be sent to scheduling past current block.
444 const MachineBasicBlock *OnlySucc = nullptr;
445 if (MBB->succ_size() == 1 && !(*MBB->succ_begin())->empty()) {
446 SlotIndexes *Ind = LIS->getSlotIndexes();
447 if (Ind->getMBBStartIdx(MBB) < Ind->getMBBStartIdx(*MBB->succ_begin()))
448 OnlySucc = *MBB->succ_begin();
451 // Scheduler sends regions from the end of the block upwards.
452 size_t CurRegion = RegionIdx;
453 for (size_t E = Regions.size(); CurRegion != E; ++CurRegion)
454 if (Regions[CurRegion].first->getParent() != MBB)
458 auto I = MBB->begin();
459 auto LiveInIt = MBBLiveIns.find(MBB);
460 if (LiveInIt != MBBLiveIns.end()) {
461 auto LiveIn = std::move(LiveInIt->second);
462 RPTracker.reset(*MBB->begin(), &LiveIn);
463 MBBLiveIns.erase(LiveInIt);
465 auto &Rgn = Regions[CurRegion];
467 auto *NonDbgMI = &*skipDebugInstructionsForward(Rgn.first, Rgn.second);
468 auto LRS = BBLiveInMap.lookup(NonDbgMI);
469 assert(isEqual(getLiveRegsBefore(*NonDbgMI, *LIS), LRS));
470 RPTracker.reset(*I, &LRS);
474 I = RPTracker.getNext();
476 if (Regions[CurRegion].first == I) {
477 LiveIns[CurRegion] = RPTracker.getLiveRegs();
478 RPTracker.clearMaxPressure();
481 if (Regions[CurRegion].second == I) {
482 Pressure[CurRegion] = RPTracker.moveMaxPressure();
483 if (CurRegion-- == RegionIdx)
486 RPTracker.advanceToNext();
487 RPTracker.advanceBeforeNext();
491 if (I != MBB->end()) {
492 RPTracker.advanceToNext();
493 RPTracker.advance(MBB->end());
495 RPTracker.reset(*OnlySucc->begin(), &RPTracker.getLiveRegs());
496 RPTracker.advanceBeforeNext();
497 MBBLiveIns[OnlySucc] = RPTracker.moveLiveRegs();
501 DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet>
502 GCNScheduleDAGMILive::getBBLiveInMap() const {
503 assert(!Regions.empty());
504 std::vector<MachineInstr *> BBStarters;
505 BBStarters.reserve(Regions.size());
506 auto I = Regions.rbegin(), E = Regions.rend();
507 auto *BB = I->first->getParent();
509 auto *MI = &*skipDebugInstructionsForward(I->first, I->second);
510 BBStarters.push_back(MI);
513 } while (I != E && I->first->getParent() == BB);
515 return getLiveRegMap(BBStarters, false /*After*/, *LIS);
518 void GCNScheduleDAGMILive::finalizeSchedule() {
519 GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
520 LLVM_DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
522 LiveIns.resize(Regions.size());
523 Pressure.resize(Regions.size());
524 RescheduleRegions.resize(Regions.size());
525 RescheduleRegions.set();
527 if (!Regions.empty())
528 BBLiveInMap = getBBLiveInMap();
530 std::vector<std::unique_ptr<ScheduleDAGMutation>> SavedMutations;
535 MachineBasicBlock *MBB = nullptr;
537 if (Stage > InitialSchedule) {
541 // Retry function scheduling if we found resulting occupancy and it is
542 // lower than used for first pass scheduling. This will give more freedom
543 // to schedule low register pressure blocks.
544 // Code is partially copied from MachineSchedulerBase::scheduleRegions().
546 if (Stage == UnclusteredReschedule) {
547 if (RescheduleRegions.none())
550 "Retrying function scheduling without clustering.\n");
553 if (Stage == ClusteredLowOccupancyReschedule) {
554 if (StartingOccupancy <= MinOccupancy)
559 << "Retrying function scheduling with lowest recorded occupancy "
560 << MinOccupancy << ".\n");
562 S.setTargetOccupancy(MinOccupancy);
566 if (Stage == UnclusteredReschedule)
567 SavedMutations.swap(Mutations);
569 for (auto Region : Regions) {
570 if (Stage == UnclusteredReschedule && !RescheduleRegions[RegionIdx])
573 RegionBegin = Region.first;
574 RegionEnd = Region.second;
576 if (RegionBegin->getParent() != MBB) {
577 if (MBB) finishBlock();
578 MBB = RegionBegin->getParent();
580 if (Stage == InitialSchedule)
581 computeBlockPressure(MBB);
584 unsigned NumRegionInstrs = std::distance(begin(), end());
585 enterRegion(MBB, begin(), end(), NumRegionInstrs);
587 // Skip empty scheduling regions (0 or 1 schedulable instructions).
588 if (begin() == end() || begin() == std::prev(end())) {
593 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
594 LLVM_DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " "
595 << MBB->getName() << "\n From: " << *begin()
597 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
598 else dbgs() << "End";
599 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
608 if (Stage == UnclusteredReschedule)
609 SavedMutations.swap(Mutations);
610 } while (Stage != LastStage);