1 //===- RegAllocGreedy.cpp - greedy register allocator ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the RAGreedy function pass for register allocation in
13 //===----------------------------------------------------------------------===//
15 #include "AllocationOrder.h"
16 #include "InterferenceCache.h"
17 #include "LiveDebugVariables.h"
18 #include "RegAllocBase.h"
19 #include "SpillPlacement.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/IndexedMap.h"
26 #include "llvm/ADT/MapVector.h"
27 #include "llvm/ADT/SetVector.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/StringRef.h"
33 #include "llvm/Analysis/AliasAnalysis.h"
34 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
35 #include "llvm/CodeGen/CalcSpillWeights.h"
36 #include "llvm/CodeGen/EdgeBundles.h"
37 #include "llvm/CodeGen/LiveInterval.h"
38 #include "llvm/CodeGen/LiveIntervalUnion.h"
39 #include "llvm/CodeGen/LiveIntervals.h"
40 #include "llvm/CodeGen/LiveRangeEdit.h"
41 #include "llvm/CodeGen/LiveRegMatrix.h"
42 #include "llvm/CodeGen/LiveStacks.h"
43 #include "llvm/CodeGen/MachineBasicBlock.h"
44 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
45 #include "llvm/CodeGen/MachineDominators.h"
46 #include "llvm/CodeGen/MachineFrameInfo.h"
47 #include "llvm/CodeGen/MachineFunction.h"
48 #include "llvm/CodeGen/MachineFunctionPass.h"
49 #include "llvm/CodeGen/MachineInstr.h"
50 #include "llvm/CodeGen/MachineLoopInfo.h"
51 #include "llvm/CodeGen/MachineOperand.h"
52 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/CodeGen/RegAllocRegistry.h"
55 #include "llvm/CodeGen/RegisterClassInfo.h"
56 #include "llvm/CodeGen/SlotIndexes.h"
57 #include "llvm/CodeGen/TargetInstrInfo.h"
58 #include "llvm/CodeGen/TargetRegisterInfo.h"
59 #include "llvm/CodeGen/TargetSubtargetInfo.h"
60 #include "llvm/CodeGen/VirtRegMap.h"
61 #include "llvm/IR/Function.h"
62 #include "llvm/IR/LLVMContext.h"
63 #include "llvm/MC/MCRegisterInfo.h"
64 #include "llvm/Pass.h"
65 #include "llvm/Support/BlockFrequency.h"
66 #include "llvm/Support/BranchProbability.h"
67 #include "llvm/Support/CommandLine.h"
68 #include "llvm/Support/Debug.h"
69 #include "llvm/Support/MathExtras.h"
70 #include "llvm/Support/Timer.h"
71 #include "llvm/Support/raw_ostream.h"
72 #include "llvm/Target/TargetMachine.h"
83 #define DEBUG_TYPE "regalloc"
85 STATISTIC(NumGlobalSplits, "Number of split global live ranges");
86 STATISTIC(NumLocalSplits, "Number of split local live ranges");
87 STATISTIC(NumEvicted, "Number of interferences evicted");
89 static cl::opt<SplitEditor::ComplementSpillMode> SplitSpillMode(
90 "split-spill-mode", cl::Hidden,
91 cl::desc("Spill mode for splitting live ranges"),
92 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
93 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"),
94 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed")),
95 cl::init(SplitEditor::SM_Speed));
97 static cl::opt<unsigned>
98 LastChanceRecoloringMaxDepth("lcr-max-depth", cl::Hidden,
99 cl::desc("Last chance recoloring max depth"),
102 static cl::opt<unsigned> LastChanceRecoloringMaxInterference(
103 "lcr-max-interf", cl::Hidden,
104 cl::desc("Last chance recoloring maximum number of considered"
105 " interference at a time"),
108 static cl::opt<bool> ExhaustiveSearch(
109 "exhaustive-register-search", cl::NotHidden,
110 cl::desc("Exhaustive Search for registers bypassing the depth "
111 "and interference cutoffs of last chance recoloring"),
114 static cl::opt<bool> EnableLocalReassignment(
115 "enable-local-reassign", cl::Hidden,
116 cl::desc("Local reassignment can yield better allocation decisions, but "
117 "may be compile time intensive"),
120 static cl::opt<bool> EnableDeferredSpilling(
121 "enable-deferred-spilling", cl::Hidden,
122 cl::desc("Instead of spilling a variable right away, defer the actual "
123 "code insertion to the end of the allocation. That way the "
124 "allocator might still find a suitable coloring for this "
125 "variable because of other evicted variables."),
128 static cl::opt<unsigned>
129 HugeSizeForSplit("huge-size-for-split", cl::Hidden,
130 cl::desc("A threshold of live range size which may cause "
131 "high compile time cost in global splitting."),
134 // FIXME: Find a good default for this flag and remove the flag.
135 static cl::opt<unsigned>
136 CSRFirstTimeCost("regalloc-csr-first-time-cost",
137 cl::desc("Cost for first time use of callee-saved register."),
138 cl::init(0), cl::Hidden);
140 static cl::opt<bool> ConsiderLocalIntervalCost(
141 "condsider-local-interval-cost", cl::Hidden,
142 cl::desc("Consider the cost of local intervals created by a split "
143 "candidate when choosing the best split candidate."),
146 static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
147 createGreedyRegisterAllocator);
151 class RAGreedy : public MachineFunctionPass,
153 private LiveRangeEdit::Delegate {
154 // Convenient shortcuts.
155 using PQueue = std::priority_queue<std::pair<unsigned, unsigned>>;
156 using SmallLISet = SmallPtrSet<LiveInterval *, 4>;
157 using SmallVirtRegSet = SmallSet<unsigned, 16>;
162 // Shortcuts to some useful interface.
163 const TargetInstrInfo *TII;
164 const TargetRegisterInfo *TRI;
165 RegisterClassInfo RCI;
168 SlotIndexes *Indexes;
169 MachineBlockFrequencyInfo *MBFI;
170 MachineDominatorTree *DomTree;
171 MachineLoopInfo *Loops;
172 MachineOptimizationRemarkEmitter *ORE;
173 EdgeBundles *Bundles;
174 SpillPlacement *SpillPlacer;
175 LiveDebugVariables *DebugVars;
179 std::unique_ptr<Spiller> SpillerInstance;
181 unsigned NextCascade;
183 // Live ranges pass through a number of stages as we try to allocate them.
184 // Some of the stages may also create new live ranges:
186 // - Region splitting.
187 // - Per-block splitting.
188 // - Local splitting.
191 // Ranges produced by one of the stages skip the previous stages when they are
192 // dequeued. This improves performance because we can skip interference checks
193 // that are unlikely to give any results. It also guarantees that the live
194 // range splitting algorithm terminates, something that is otherwise hard to
196 enum LiveRangeStage {
197 /// Newly created live range that has never been queued.
200 /// Only attempt assignment and eviction. Then requeue as RS_Split.
203 /// Attempt live range splitting if assignment is impossible.
206 /// Attempt more aggressive live range splitting that is guaranteed to make
207 /// progress. This is used for split products that may not be making
211 /// Live range will be spilled. No more splitting will be attempted.
215 /// Live range is in memory. Because of other evictions, it might get moved
216 /// in a register in the end.
219 /// There is nothing more we can do to this live range. Abort compilation
220 /// if it can't be assigned.
224 // Enum CutOffStage to keep a track whether the register allocation failed
225 // because of the cutoffs encountered in last chance recoloring.
226 // Note: This is used as bitmask. New value should be next power of 2.
228 // No cutoffs encountered
231 // lcr-max-depth cutoff encountered
234 // lcr-max-interf cutoff encountered
241 static const char *const StageName[];
244 // RegInfo - Keep additional information about each live range.
246 LiveRangeStage Stage = RS_New;
248 // Cascade - Eviction loop prevention. See canEvictInterference().
249 unsigned Cascade = 0;
254 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
256 LiveRangeStage getStage(const LiveInterval &VirtReg) const {
257 return ExtraRegInfo[VirtReg.reg].Stage;
260 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
261 ExtraRegInfo.resize(MRI->getNumVirtRegs());
262 ExtraRegInfo[VirtReg.reg].Stage = Stage;
265 template<typename Iterator>
266 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
267 ExtraRegInfo.resize(MRI->getNumVirtRegs());
268 for (;Begin != End; ++Begin) {
269 unsigned Reg = *Begin;
270 if (ExtraRegInfo[Reg].Stage == RS_New)
271 ExtraRegInfo[Reg].Stage = NewStage;
275 /// Cost of evicting interference.
276 struct EvictionCost {
277 unsigned BrokenHints = 0; ///< Total number of broken hints.
278 float MaxWeight = 0; ///< Maximum spill weight evicted.
280 EvictionCost() = default;
282 bool isMax() const { return BrokenHints == ~0u; }
284 void setMax() { BrokenHints = ~0u; }
286 void setBrokenHints(unsigned NHints) { BrokenHints = NHints; }
288 bool operator<(const EvictionCost &O) const {
289 return std::tie(BrokenHints, MaxWeight) <
290 std::tie(O.BrokenHints, O.MaxWeight);
294 /// EvictionTrack - Keeps track of past evictions in order to optimize region
296 class EvictionTrack {
300 std::pair<unsigned /* evictor */, unsigned /* physreg */>;
301 using EvicteeInfo = llvm::DenseMap<unsigned /* evictee */, EvictorInfo>;
304 /// Each Vreg that has been evicted in the last stage of selectOrSplit will
305 /// be mapped to the evictor Vreg and the PhysReg it was evicted from.
306 EvicteeInfo Evictees;
309 /// Clear all eviction information.
310 void clear() { Evictees.clear(); }
312 /// Clear eviction information for the given evictee Vreg.
313 /// E.g. when Vreg get's a new allocation, the old eviction info is no
315 /// \param Evictee The evictee Vreg for whom we want to clear collected
317 void clearEvicteeInfo(unsigned Evictee) { Evictees.erase(Evictee); }
319 /// Track new eviction.
320 /// The Evictor vreg has evicted the Evictee vreg from Physreg.
321 /// \param PhysReg The physical register Evictee was evicted from.
322 /// \param Evictor The evictor Vreg that evicted Evictee.
323 /// \param Evictee The evictee Vreg.
324 void addEviction(unsigned PhysReg, unsigned Evictor, unsigned Evictee) {
325 Evictees[Evictee].first = Evictor;
326 Evictees[Evictee].second = PhysReg;
329 /// Return the Evictor Vreg which evicted Evictee Vreg from PhysReg.
330 /// \param Evictee The evictee vreg.
331 /// \return The Evictor vreg which evicted Evictee vreg from PhysReg. 0 if
332 /// nobody has evicted Evictee from PhysReg.
333 EvictorInfo getEvictor(unsigned Evictee) {
334 if (Evictees.count(Evictee)) {
335 return Evictees[Evictee];
338 return EvictorInfo(0, 0);
342 // Keeps track of past evictions in order to optimize region split decision.
343 EvictionTrack LastEvicted;
346 std::unique_ptr<SplitAnalysis> SA;
347 std::unique_ptr<SplitEditor> SE;
349 /// Cached per-block interference maps
350 InterferenceCache IntfCache;
352 /// All basic blocks where the current register has uses.
353 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
355 /// Global live range splitting candidate info.
356 struct GlobalSplitCandidate {
357 // Register intended for assignment, or 0.
360 // SplitKit interval index for this candidate.
363 // Interference for PhysReg.
364 InterferenceCache::Cursor Intf;
366 // Bundles where this candidate should be live.
367 BitVector LiveBundles;
368 SmallVector<unsigned, 8> ActiveBlocks;
370 void reset(InterferenceCache &Cache, unsigned Reg) {
373 Intf.setPhysReg(Cache, Reg);
375 ActiveBlocks.clear();
378 // Set B[i] = C for every live bundle where B[i] was NoCand.
379 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
381 for (unsigned i : LiveBundles.set_bits())
382 if (B[i] == NoCand) {
390 /// Candidate info for each PhysReg in AllocationOrder.
391 /// This vector never shrinks, but grows to the size of the largest register
393 SmallVector<GlobalSplitCandidate, 32> GlobalCand;
395 enum : unsigned { NoCand = ~0u };
397 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
398 /// NoCand which indicates the stack interval.
399 SmallVector<unsigned, 32> BundleCand;
401 /// Callee-save register cost, calculated once per machine function.
402 BlockFrequency CSRCost;
404 /// Run or not the local reassignment heuristic. This information is
405 /// obtained from the TargetSubtargetInfo.
406 bool EnableLocalReassign;
408 /// Enable or not the consideration of the cost of local intervals created
409 /// by a split candidate when choosing the best split candidate.
410 bool EnableAdvancedRASplitCost;
412 /// Set of broken hints that may be reconciled later because of eviction.
413 SmallSetVector<LiveInterval *, 8> SetOfBrokenHints;
418 /// Return the pass name.
419 StringRef getPassName() const override { return "Greedy Register Allocator"; }
421 /// RAGreedy analysis usage.
422 void getAnalysisUsage(AnalysisUsage &AU) const override;
423 void releaseMemory() override;
424 Spiller &spiller() override { return *SpillerInstance; }
425 void enqueue(LiveInterval *LI) override;
426 LiveInterval *dequeue() override;
427 unsigned selectOrSplit(LiveInterval&, SmallVectorImpl<unsigned>&) override;
428 void aboutToRemoveInterval(LiveInterval &) override;
430 /// Perform register allocation.
431 bool runOnMachineFunction(MachineFunction &mf) override;
433 MachineFunctionProperties getRequiredProperties() const override {
434 return MachineFunctionProperties().set(
435 MachineFunctionProperties::Property::NoPHIs);
441 unsigned selectOrSplitImpl(LiveInterval &, SmallVectorImpl<unsigned> &,
442 SmallVirtRegSet &, unsigned = 0);
444 bool LRE_CanEraseVirtReg(unsigned) override;
445 void LRE_WillShrinkVirtReg(unsigned) override;
446 void LRE_DidCloneVirtReg(unsigned, unsigned) override;
447 void enqueue(PQueue &CurQueue, LiveInterval *LI);
448 LiveInterval *dequeue(PQueue &CurQueue);
450 BlockFrequency calcSpillCost();
451 bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&);
452 bool addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
453 bool growRegion(GlobalSplitCandidate &Cand);
454 bool splitCanCauseEvictionChain(unsigned Evictee, GlobalSplitCandidate &Cand,
456 const AllocationOrder &Order);
457 bool splitCanCauseLocalSpill(unsigned VirtRegToSplit,
458 GlobalSplitCandidate &Cand, unsigned BBNumber,
459 const AllocationOrder &Order);
460 BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate &,
461 const AllocationOrder &Order,
462 bool *CanCauseEvictionChain);
463 bool calcCompactRegion(GlobalSplitCandidate&);
464 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
465 void calcGapWeights(unsigned, SmallVectorImpl<float>&);
466 unsigned canReassign(LiveInterval &VirtReg, unsigned PrevReg);
467 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
468 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
469 bool canEvictInterferenceInRange(LiveInterval &VirtReg, unsigned PhysReg,
470 SlotIndex Start, SlotIndex End,
471 EvictionCost &MaxCost);
472 unsigned getCheapestEvicteeWeight(const AllocationOrder &Order,
473 LiveInterval &VirtReg, SlotIndex Start,
474 SlotIndex End, float *BestEvictWeight);
475 void evictInterference(LiveInterval&, unsigned,
476 SmallVectorImpl<unsigned>&);
477 bool mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
478 SmallLISet &RecoloringCandidates,
479 const SmallVirtRegSet &FixedRegisters);
481 unsigned tryAssign(LiveInterval&, AllocationOrder&,
482 SmallVectorImpl<unsigned>&);
483 unsigned tryEvict(LiveInterval&, AllocationOrder&,
484 SmallVectorImpl<unsigned>&, unsigned = ~0u);
485 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
486 SmallVectorImpl<unsigned>&);
487 unsigned isSplitBenefitWorthCost(LiveInterval &VirtReg);
488 /// Calculate cost of region splitting.
489 unsigned calculateRegionSplitCost(LiveInterval &VirtReg,
490 AllocationOrder &Order,
491 BlockFrequency &BestCost,
492 unsigned &NumCands, bool IgnoreCSR,
493 bool *CanCauseEvictionChain = nullptr);
494 /// Perform region splitting.
495 unsigned doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
497 SmallVectorImpl<unsigned> &NewVRegs);
498 /// Check other options before using a callee-saved register for the first
500 unsigned tryAssignCSRFirstTime(LiveInterval &VirtReg, AllocationOrder &Order,
501 unsigned PhysReg, unsigned &CostPerUseLimit,
502 SmallVectorImpl<unsigned> &NewVRegs);
503 void initializeCSRCost();
504 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
505 SmallVectorImpl<unsigned>&);
506 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
507 SmallVectorImpl<unsigned>&);
508 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
509 SmallVectorImpl<unsigned>&);
510 unsigned trySplit(LiveInterval&, AllocationOrder&,
511 SmallVectorImpl<unsigned>&);
512 unsigned tryLastChanceRecoloring(LiveInterval &, AllocationOrder &,
513 SmallVectorImpl<unsigned> &,
514 SmallVirtRegSet &, unsigned);
515 bool tryRecoloringCandidates(PQueue &, SmallVectorImpl<unsigned> &,
516 SmallVirtRegSet &, unsigned);
517 void tryHintRecoloring(LiveInterval &);
518 void tryHintsRecoloring();
520 /// Model the information carried by one end of a copy.
522 /// The frequency of the copy.
524 /// The virtual register or physical register.
526 /// Its currently assigned register.
527 /// In case of a physical register Reg == PhysReg.
530 HintInfo(BlockFrequency Freq, unsigned Reg, unsigned PhysReg)
531 : Freq(Freq), Reg(Reg), PhysReg(PhysReg) {}
533 using HintsInfo = SmallVector<HintInfo, 4>;
535 BlockFrequency getBrokenHintFreq(const HintsInfo &, unsigned);
536 void collectHintInfo(unsigned, HintsInfo &);
538 bool isUnusedCalleeSavedReg(unsigned PhysReg) const;
540 /// Compute and report the number of spills and reloads for a loop.
541 void reportNumberOfSplillsReloads(MachineLoop *L, unsigned &Reloads,
542 unsigned &FoldedReloads, unsigned &Spills,
543 unsigned &FoldedSpills);
545 /// Report the number of spills and reloads for each loop.
546 void reportNumberOfSplillsReloads() {
547 for (MachineLoop *L : *Loops) {
548 unsigned Reloads, FoldedReloads, Spills, FoldedSpills;
549 reportNumberOfSplillsReloads(L, Reloads, FoldedReloads, Spills,
555 } // end anonymous namespace
557 char RAGreedy::ID = 0;
558 char &llvm::RAGreedyID = RAGreedy::ID;
560 INITIALIZE_PASS_BEGIN(RAGreedy, "greedy",
561 "Greedy Register Allocator", false, false)
562 INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables)
563 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
564 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
565 INITIALIZE_PASS_DEPENDENCY(RegisterCoalescer)
566 INITIALIZE_PASS_DEPENDENCY(MachineScheduler)
567 INITIALIZE_PASS_DEPENDENCY(LiveStacks)
568 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
569 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
570 INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
571 INITIALIZE_PASS_DEPENDENCY(LiveRegMatrix)
572 INITIALIZE_PASS_DEPENDENCY(EdgeBundles)
573 INITIALIZE_PASS_DEPENDENCY(SpillPlacement)
574 INITIALIZE_PASS_DEPENDENCY(MachineOptimizationRemarkEmitterPass)
575 INITIALIZE_PASS_END(RAGreedy, "greedy",
576 "Greedy Register Allocator", false, false)
579 const char *const RAGreedy::StageName[] = {
590 // Hysteresis to use when comparing floats.
591 // This helps stabilize decisions based on float comparisons.
592 const float Hysteresis = (2007 / 2048.0f); // 0.97998046875
594 FunctionPass* llvm::createGreedyRegisterAllocator() {
595 return new RAGreedy();
598 RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
601 void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
602 AU.setPreservesCFG();
603 AU.addRequired<MachineBlockFrequencyInfo>();
604 AU.addPreserved<MachineBlockFrequencyInfo>();
605 AU.addRequired<AAResultsWrapperPass>();
606 AU.addPreserved<AAResultsWrapperPass>();
607 AU.addRequired<LiveIntervals>();
608 AU.addPreserved<LiveIntervals>();
609 AU.addRequired<SlotIndexes>();
610 AU.addPreserved<SlotIndexes>();
611 AU.addRequired<LiveDebugVariables>();
612 AU.addPreserved<LiveDebugVariables>();
613 AU.addRequired<LiveStacks>();
614 AU.addPreserved<LiveStacks>();
615 AU.addRequired<MachineDominatorTree>();
616 AU.addPreserved<MachineDominatorTree>();
617 AU.addRequired<MachineLoopInfo>();
618 AU.addPreserved<MachineLoopInfo>();
619 AU.addRequired<VirtRegMap>();
620 AU.addPreserved<VirtRegMap>();
621 AU.addRequired<LiveRegMatrix>();
622 AU.addPreserved<LiveRegMatrix>();
623 AU.addRequired<EdgeBundles>();
624 AU.addRequired<SpillPlacement>();
625 AU.addRequired<MachineOptimizationRemarkEmitterPass>();
626 MachineFunctionPass::getAnalysisUsage(AU);
629 //===----------------------------------------------------------------------===//
630 // LiveRangeEdit delegate methods
631 //===----------------------------------------------------------------------===//
633 bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
634 LiveInterval &LI = LIS->getInterval(VirtReg);
635 if (VRM->hasPhys(VirtReg)) {
636 Matrix->unassign(LI);
637 aboutToRemoveInterval(LI);
640 // Unassigned virtreg is probably in the priority queue.
641 // RegAllocBase will erase it after dequeueing.
642 // Nonetheless, clear the live-range so that the debug
643 // dump will show the right state for that VirtReg.
648 void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
649 if (!VRM->hasPhys(VirtReg))
652 // Register is assigned, put it back on the queue for reassignment.
653 LiveInterval &LI = LIS->getInterval(VirtReg);
654 Matrix->unassign(LI);
658 void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
659 // Cloning a register we haven't even heard about yet? Just ignore it.
660 if (!ExtraRegInfo.inBounds(Old))
663 // LRE may clone a virtual register because dead code elimination causes it to
664 // be split into connected components. The new components are much smaller
665 // than the original, so they should get a new chance at being assigned.
666 // same stage as the parent.
667 ExtraRegInfo[Old].Stage = RS_Assign;
668 ExtraRegInfo.grow(New);
669 ExtraRegInfo[New] = ExtraRegInfo[Old];
672 void RAGreedy::releaseMemory() {
673 SpillerInstance.reset();
674 ExtraRegInfo.clear();
678 void RAGreedy::enqueue(LiveInterval *LI) { enqueue(Queue, LI); }
680 void RAGreedy::enqueue(PQueue &CurQueue, LiveInterval *LI) {
681 // Prioritize live ranges by size, assigning larger ranges first.
682 // The queue holds (size, reg) pairs.
683 const unsigned Size = LI->getSize();
684 const unsigned Reg = LI->reg;
685 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
686 "Can only enqueue virtual registers");
689 ExtraRegInfo.grow(Reg);
690 if (ExtraRegInfo[Reg].Stage == RS_New)
691 ExtraRegInfo[Reg].Stage = RS_Assign;
693 if (ExtraRegInfo[Reg].Stage == RS_Split) {
694 // Unsplit ranges that couldn't be allocated immediately are deferred until
695 // everything else has been allocated.
697 } else if (ExtraRegInfo[Reg].Stage == RS_Memory) {
698 // Memory operand should be considered last.
699 // Change the priority such that Memory operand are assigned in
700 // the reverse order that they came in.
701 // TODO: Make this a member variable and probably do something about hints.
702 static unsigned MemOp = 0;
705 // Giant live ranges fall back to the global assignment heuristic, which
706 // prevents excessive spilling in pathological cases.
707 bool ReverseLocal = TRI->reverseLocalAssignment();
708 const TargetRegisterClass &RC = *MRI->getRegClass(Reg);
709 bool ForceGlobal = !ReverseLocal &&
710 (Size / SlotIndex::InstrDist) > (2 * RC.getNumRegs());
712 if (ExtraRegInfo[Reg].Stage == RS_Assign && !ForceGlobal && !LI->empty() &&
713 LIS->intervalIsInOneMBB(*LI)) {
714 // Allocate original local ranges in linear instruction order. Since they
715 // are singly defined, this produces optimal coloring in the absence of
716 // global interference and other constraints.
718 Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
720 // Allocating bottom up may allow many short LRGs to be assigned first
721 // to one of the cheap registers. This could be much faster for very
722 // large blocks on targets with many physical registers.
723 Prio = Indexes->getZeroIndex().getInstrDistance(LI->endIndex());
725 Prio |= RC.AllocationPriority << 24;
727 // Allocate global and split ranges in long->short order. Long ranges that
728 // don't fit should be spilled (or split) ASAP so they don't create
729 // interference. Mark a bit to prioritize global above local ranges.
730 Prio = (1u << 29) + Size;
732 // Mark a higher bit to prioritize global and local above RS_Split.
735 // Boost ranges that have a physical register hint.
736 if (VRM->hasKnownPreference(Reg))
739 // The virtual register number is a tie breaker for same-sized ranges.
740 // Give lower vreg numbers higher priority to assign them first.
741 CurQueue.push(std::make_pair(Prio, ~Reg));
744 LiveInterval *RAGreedy::dequeue() { return dequeue(Queue); }
746 LiveInterval *RAGreedy::dequeue(PQueue &CurQueue) {
747 if (CurQueue.empty())
749 LiveInterval *LI = &LIS->getInterval(~CurQueue.top().second);
754 //===----------------------------------------------------------------------===//
756 //===----------------------------------------------------------------------===//
758 /// tryAssign - Try to assign VirtReg to an available register.
759 unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
760 AllocationOrder &Order,
761 SmallVectorImpl<unsigned> &NewVRegs) {
764 while ((PhysReg = Order.next()))
765 if (!Matrix->checkInterference(VirtReg, PhysReg))
767 if (!PhysReg || Order.isHint())
770 // PhysReg is available, but there may be a better choice.
772 // If we missed a simple hint, try to cheaply evict interference from the
773 // preferred register.
774 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
775 if (Order.isHint(Hint)) {
776 LLVM_DEBUG(dbgs() << "missed hint " << printReg(Hint, TRI) << '\n');
777 EvictionCost MaxCost;
778 MaxCost.setBrokenHints(1);
779 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
780 evictInterference(VirtReg, Hint, NewVRegs);
783 // Record the missed hint, we may be able to recover
784 // at the end if the surrounding allocation changed.
785 SetOfBrokenHints.insert(&VirtReg);
788 // Try to evict interference from a cheaper alternative.
789 unsigned Cost = TRI->getCostPerUse(PhysReg);
791 // Most registers have 0 additional cost.
795 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << " is available at cost "
797 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
798 return CheapReg ? CheapReg : PhysReg;
801 //===----------------------------------------------------------------------===//
802 // Interference eviction
803 //===----------------------------------------------------------------------===//
805 unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) {
806 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo, Matrix);
808 while ((PhysReg = Order.next())) {
809 if (PhysReg == PrevReg)
812 MCRegUnitIterator Units(PhysReg, TRI);
813 for (; Units.isValid(); ++Units) {
814 // Instantiate a "subquery", not to be confused with the Queries array.
815 LiveIntervalUnion::Query subQ(VirtReg, Matrix->getLiveUnions()[*Units]);
816 if (subQ.checkInterference())
819 // If no units have interference, break out with the current PhysReg.
820 if (!Units.isValid())
824 LLVM_DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
825 << printReg(PrevReg, TRI) << " to "
826 << printReg(PhysReg, TRI) << '\n');
830 /// shouldEvict - determine if A should evict the assigned live range B. The
831 /// eviction policy defined by this function together with the allocation order
832 /// defined by enqueue() decides which registers ultimately end up being split
835 /// Cascade numbers are used to prevent infinite loops if this function is a
838 /// @param A The live range to be assigned.
839 /// @param IsHint True when A is about to be assigned to its preferred
841 /// @param B The live range to be evicted.
842 /// @param BreaksHint True when B is already assigned to its preferred register.
843 bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
844 LiveInterval &B, bool BreaksHint) {
845 bool CanSplit = getStage(B) < RS_Spill;
847 // Be fairly aggressive about following hints as long as the evictee can be
849 if (CanSplit && IsHint && !BreaksHint)
852 if (A.weight > B.weight) {
853 LLVM_DEBUG(dbgs() << "should evict: " << B << " w= " << B.weight << '\n');
859 /// canEvictInterference - Return true if all interferences between VirtReg and
860 /// PhysReg can be evicted.
862 /// @param VirtReg Live range that is about to be assigned.
863 /// @param PhysReg Desired register for assignment.
864 /// @param IsHint True when PhysReg is VirtReg's preferred register.
865 /// @param MaxCost Only look for cheaper candidates and update with new cost
866 /// when returning true.
867 /// @returns True when interference can be evicted cheaper than MaxCost.
868 bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
869 bool IsHint, EvictionCost &MaxCost) {
870 // It is only possible to evict virtual register interference.
871 if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
874 bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
876 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
877 // involved in an eviction before. If a cascade number was assigned, deny
878 // evicting anything with the same or a newer cascade number. This prevents
879 // infinite eviction loops.
881 // This works out so a register without a cascade number is allowed to evict
882 // anything, and it can be evicted by anything.
883 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
885 Cascade = NextCascade;
888 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
889 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
890 // If there is 10 or more interferences, chances are one is heavier.
891 if (Q.collectInterferingVRegs(10) >= 10)
894 // Check if any interfering live range is heavier than MaxWeight.
895 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
896 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
897 assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) &&
898 "Only expecting virtual register interference from query");
899 // Never evict spill products. They cannot split or spill.
900 if (getStage(*Intf) == RS_Done)
902 // Once a live range becomes small enough, it is urgent that we find a
903 // register for it. This is indicated by an infinite spill weight. These
904 // urgent live ranges get to evict almost anything.
906 // Also allow urgent evictions of unspillable ranges from a strictly
907 // larger allocation order.
908 bool Urgent = !VirtReg.isSpillable() &&
909 (Intf->isSpillable() ||
910 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) <
911 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg)));
912 // Only evict older cascades or live ranges without a cascade.
913 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
914 if (Cascade <= IntfCascade) {
917 // We permit breaking cascades for urgent evictions. It should be the
918 // last resort, though, so make it really expensive.
919 Cost.BrokenHints += 10;
921 // Would this break a satisfied hint?
922 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
923 // Update eviction cost.
924 Cost.BrokenHints += BreaksHint;
925 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
926 // Abort if this would be too expensive.
927 if (!(Cost < MaxCost))
931 // Apply the eviction policy for non-urgent evictions.
932 if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
934 // If !MaxCost.isMax(), then we're just looking for a cheap register.
935 // Evicting another local live range in this case could lead to suboptimal
937 if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
938 (!EnableLocalReassign || !canReassign(*Intf, PhysReg))) {
947 /// Return true if all interferences between VirtReg and PhysReg between
948 /// Start and End can be evicted.
950 /// \param VirtReg Live range that is about to be assigned.
951 /// \param PhysReg Desired register for assignment.
952 /// \param Start Start of range to look for interferences.
953 /// \param End End of range to look for interferences.
954 /// \param MaxCost Only look for cheaper candidates and update with new cost
955 /// when returning true.
956 /// \return True when interference can be evicted cheaper than MaxCost.
957 bool RAGreedy::canEvictInterferenceInRange(LiveInterval &VirtReg,
958 unsigned PhysReg, SlotIndex Start,
960 EvictionCost &MaxCost) {
963 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
964 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
966 // Check if any interfering live range is heavier than MaxWeight.
967 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
968 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
970 // Check if interference overlast the segment in interest.
971 if (!Intf->overlaps(Start, End))
974 // Cannot evict non virtual reg interference.
975 if (!TargetRegisterInfo::isVirtualRegister(Intf->reg))
977 // Never evict spill products. They cannot split or spill.
978 if (getStage(*Intf) == RS_Done)
981 // Would this break a satisfied hint?
982 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
983 // Update eviction cost.
984 Cost.BrokenHints += BreaksHint;
985 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
986 // Abort if this would be too expensive.
987 if (!(Cost < MaxCost))
992 if (Cost.MaxWeight == 0)
999 /// Return the physical register that will be best
1000 /// candidate for eviction by a local split interval that will be created
1001 /// between Start and End.
1003 /// \param Order The allocation order
1004 /// \param VirtReg Live range that is about to be assigned.
1005 /// \param Start Start of range to look for interferences
1006 /// \param End End of range to look for interferences
1007 /// \param BestEvictweight The eviction cost of that eviction
1008 /// \return The PhysReg which is the best candidate for eviction and the
1009 /// eviction cost in BestEvictweight
1010 unsigned RAGreedy::getCheapestEvicteeWeight(const AllocationOrder &Order,
1011 LiveInterval &VirtReg,
1012 SlotIndex Start, SlotIndex End,
1013 float *BestEvictweight) {
1014 EvictionCost BestEvictCost;
1015 BestEvictCost.setMax();
1016 BestEvictCost.MaxWeight = VirtReg.weight;
1017 unsigned BestEvicteePhys = 0;
1019 // Go over all physical registers and find the best candidate for eviction
1020 for (auto PhysReg : Order.getOrder()) {
1022 if (!canEvictInterferenceInRange(VirtReg, PhysReg, Start, End,
1027 BestEvicteePhys = PhysReg;
1029 *BestEvictweight = BestEvictCost.MaxWeight;
1030 return BestEvicteePhys;
1033 /// evictInterference - Evict any interferring registers that prevent VirtReg
1034 /// from being assigned to Physreg. This assumes that canEvictInterference
1036 void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
1037 SmallVectorImpl<unsigned> &NewVRegs) {
1038 // Make sure that VirtReg has a cascade number, and assign that cascade
1039 // number to every evicted register. These live ranges than then only be
1040 // evicted by a newer cascade, preventing infinite loops.
1041 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
1043 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
1045 LLVM_DEBUG(dbgs() << "evicting " << printReg(PhysReg, TRI)
1046 << " interference: Cascade " << Cascade << '\n');
1048 // Collect all interfering virtregs first.
1049 SmallVector<LiveInterval*, 8> Intfs;
1050 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1051 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
1052 // We usually have the interfering VRegs cached so collectInterferingVRegs()
1053 // should be fast, we may need to recalculate if when different physregs
1054 // overlap the same register unit so we had different SubRanges queried
1056 Q.collectInterferingVRegs();
1057 ArrayRef<LiveInterval*> IVR = Q.interferingVRegs();
1058 Intfs.append(IVR.begin(), IVR.end());
1061 // Evict them second. This will invalidate the queries.
1062 for (unsigned i = 0, e = Intfs.size(); i != e; ++i) {
1063 LiveInterval *Intf = Intfs[i];
1064 // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
1065 if (!VRM->hasPhys(Intf->reg))
1068 LastEvicted.addEviction(PhysReg, VirtReg.reg, Intf->reg);
1070 Matrix->unassign(*Intf);
1071 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
1072 VirtReg.isSpillable() < Intf->isSpillable()) &&
1073 "Cannot decrease cascade number, illegal eviction");
1074 ExtraRegInfo[Intf->reg].Cascade = Cascade;
1076 NewVRegs.push_back(Intf->reg);
1080 /// Returns true if the given \p PhysReg is a callee saved register and has not
1081 /// been used for allocation yet.
1082 bool RAGreedy::isUnusedCalleeSavedReg(unsigned PhysReg) const {
1083 unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg);
1087 return !Matrix->isPhysRegUsed(PhysReg);
1090 /// tryEvict - Try to evict all interferences for a physreg.
1091 /// @param VirtReg Currently unassigned virtual register.
1092 /// @param Order Physregs to try.
1093 /// @return Physreg to assign VirtReg, or 0.
1094 unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
1095 AllocationOrder &Order,
1096 SmallVectorImpl<unsigned> &NewVRegs,
1097 unsigned CostPerUseLimit) {
1098 NamedRegionTimer T("evict", "Evict", TimerGroupName, TimerGroupDescription,
1099 TimePassesIsEnabled);
1101 // Keep track of the cheapest interference seen so far.
1102 EvictionCost BestCost;
1104 unsigned BestPhys = 0;
1105 unsigned OrderLimit = Order.getOrder().size();
1107 // When we are just looking for a reduced cost per use, don't break any
1108 // hints, and only evict smaller spill weights.
1109 if (CostPerUseLimit < ~0u) {
1110 BestCost.BrokenHints = 0;
1111 BestCost.MaxWeight = VirtReg.weight;
1113 // Check of any registers in RC are below CostPerUseLimit.
1114 const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg);
1115 unsigned MinCost = RegClassInfo.getMinCost(RC);
1116 if (MinCost >= CostPerUseLimit) {
1117 LLVM_DEBUG(dbgs() << TRI->getRegClassName(RC) << " minimum cost = "
1118 << MinCost << ", no cheaper registers to be found.\n");
1122 // It is normal for register classes to have a long tail of registers with
1123 // the same cost. We don't need to look at them if they're too expensive.
1124 if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) {
1125 OrderLimit = RegClassInfo.getLastCostChange(RC);
1126 LLVM_DEBUG(dbgs() << "Only trying the first " << OrderLimit
1132 while (unsigned PhysReg = Order.next(OrderLimit)) {
1133 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
1135 // The first use of a callee-saved register in a function has cost 1.
1136 // Don't start using a CSR when the CostPerUseLimit is low.
1137 if (CostPerUseLimit == 1 && isUnusedCalleeSavedReg(PhysReg)) {
1139 dbgs() << printReg(PhysReg, TRI) << " would clobber CSR "
1140 << printReg(RegClassInfo.getLastCalleeSavedAlias(PhysReg), TRI)
1145 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
1151 // Stop if the hint can be used.
1159 evictInterference(VirtReg, BestPhys, NewVRegs);
1163 //===----------------------------------------------------------------------===//
1165 //===----------------------------------------------------------------------===//
1167 /// addSplitConstraints - Fill out the SplitConstraints vector based on the
1168 /// interference pattern in Physreg and its aliases. Add the constraints to
1169 /// SpillPlacement and return the static cost of this split in Cost, assuming
1170 /// that all preferences in SplitConstraints are met.
1171 /// Return false if there are no bundles with positive bias.
1172 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
1173 BlockFrequency &Cost) {
1174 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1176 // Reset interference dependent info.
1177 SplitConstraints.resize(UseBlocks.size());
1178 BlockFrequency StaticCost = 0;
1179 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1180 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1181 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
1183 BC.Number = BI.MBB->getNumber();
1184 Intf.moveToBlock(BC.Number);
1185 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
1186 BC.Exit = (BI.LiveOut &&
1187 !LIS->getInstructionFromIndex(BI.LastInstr)->isImplicitDef())
1188 ? SpillPlacement::PrefReg
1189 : SpillPlacement::DontCare;
1190 BC.ChangesValue = BI.FirstDef.isValid();
1192 if (!Intf.hasInterference())
1195 // Number of spill code instructions to insert.
1198 // Interference for the live-in value.
1200 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) {
1201 BC.Entry = SpillPlacement::MustSpill;
1203 } else if (Intf.first() < BI.FirstInstr) {
1204 BC.Entry = SpillPlacement::PrefSpill;
1206 } else if (Intf.first() < BI.LastInstr) {
1210 // Abort if the spill cannot be inserted at the MBB' start
1211 if (((BC.Entry == SpillPlacement::MustSpill) ||
1212 (BC.Entry == SpillPlacement::PrefSpill)) &&
1213 SlotIndex::isEarlierInstr(BI.FirstInstr,
1214 SA->getFirstSplitPoint(BC.Number)))
1218 // Interference for the live-out value.
1220 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) {
1221 BC.Exit = SpillPlacement::MustSpill;
1223 } else if (Intf.last() > BI.LastInstr) {
1224 BC.Exit = SpillPlacement::PrefSpill;
1226 } else if (Intf.last() > BI.FirstInstr) {
1231 // Accumulate the total frequency of inserted spill code.
1233 StaticCost += SpillPlacer->getBlockFrequency(BC.Number);
1237 // Add constraints for use-blocks. Note that these are the only constraints
1238 // that may add a positive bias, it is downhill from here.
1239 SpillPlacer->addConstraints(SplitConstraints);
1240 return SpillPlacer->scanActiveBundles();
1243 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
1244 /// live-through blocks in Blocks.
1245 bool RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
1246 ArrayRef<unsigned> Blocks) {
1247 const unsigned GroupSize = 8;
1248 SpillPlacement::BlockConstraint BCS[GroupSize];
1249 unsigned TBS[GroupSize];
1250 unsigned B = 0, T = 0;
1252 for (unsigned i = 0; i != Blocks.size(); ++i) {
1253 unsigned Number = Blocks[i];
1254 Intf.moveToBlock(Number);
1256 if (!Intf.hasInterference()) {
1257 assert(T < GroupSize && "Array overflow");
1259 if (++T == GroupSize) {
1260 SpillPlacer->addLinks(makeArrayRef(TBS, T));
1266 assert(B < GroupSize && "Array overflow");
1267 BCS[B].Number = Number;
1269 // Abort if the spill cannot be inserted at the MBB' start
1270 MachineBasicBlock *MBB = MF->getBlockNumbered(Number);
1271 if (!MBB->empty() &&
1272 SlotIndex::isEarlierInstr(LIS->getInstructionIndex(MBB->instr_front()),
1273 SA->getFirstSplitPoint(Number)))
1275 // Interference for the live-in value.
1276 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
1277 BCS[B].Entry = SpillPlacement::MustSpill;
1279 BCS[B].Entry = SpillPlacement::PrefSpill;
1281 // Interference for the live-out value.
1282 if (Intf.last() >= SA->getLastSplitPoint(Number))
1283 BCS[B].Exit = SpillPlacement::MustSpill;
1285 BCS[B].Exit = SpillPlacement::PrefSpill;
1287 if (++B == GroupSize) {
1288 SpillPlacer->addConstraints(makeArrayRef(BCS, B));
1293 SpillPlacer->addConstraints(makeArrayRef(BCS, B));
1294 SpillPlacer->addLinks(makeArrayRef(TBS, T));
1298 bool RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
1299 // Keep track of through blocks that have not been added to SpillPlacer.
1300 BitVector Todo = SA->getThroughBlocks();
1301 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
1302 unsigned AddedTo = 0;
1304 unsigned Visited = 0;
1308 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
1309 // Find new through blocks in the periphery of PrefRegBundles.
1310 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
1311 unsigned Bundle = NewBundles[i];
1312 // Look at all blocks connected to Bundle in the full graph.
1313 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
1314 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
1316 unsigned Block = *I;
1317 if (!Todo.test(Block))
1320 // This is a new through block. Add it to SpillPlacer later.
1321 ActiveBlocks.push_back(Block);
1327 // Any new blocks to add?
1328 if (ActiveBlocks.size() == AddedTo)
1331 // Compute through constraints from the interference, or assume that all
1332 // through blocks prefer spilling when forming compact regions.
1333 auto NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
1335 if (!addThroughConstraints(Cand.Intf, NewBlocks))
1338 // Provide a strong negative bias on through blocks to prevent unwanted
1339 // liveness on loop backedges.
1340 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
1341 AddedTo = ActiveBlocks.size();
1343 // Perhaps iterating can enable more bundles?
1344 SpillPlacer->iterate();
1346 LLVM_DEBUG(dbgs() << ", v=" << Visited);
1350 /// calcCompactRegion - Compute the set of edge bundles that should be live
1351 /// when splitting the current live range into compact regions. Compact
1352 /// regions can be computed without looking at interference. They are the
1353 /// regions formed by removing all the live-through blocks from the live range.
1355 /// Returns false if the current live range is already compact, or if the
1356 /// compact regions would form single block regions anyway.
1357 bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
1358 // Without any through blocks, the live range is already compact.
1359 if (!SA->getNumThroughBlocks())
1362 // Compact regions don't correspond to any physreg.
1363 Cand.reset(IntfCache, 0);
1365 LLVM_DEBUG(dbgs() << "Compact region bundles");
1367 // Use the spill placer to determine the live bundles. GrowRegion pretends
1368 // that all the through blocks have interference when PhysReg is unset.
1369 SpillPlacer->prepare(Cand.LiveBundles);
1371 // The static split cost will be zero since Cand.Intf reports no interference.
1372 BlockFrequency Cost;
1373 if (!addSplitConstraints(Cand.Intf, Cost)) {
1374 LLVM_DEBUG(dbgs() << ", none.\n");
1378 if (!growRegion(Cand)) {
1379 LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n");
1383 SpillPlacer->finish();
1385 if (!Cand.LiveBundles.any()) {
1386 LLVM_DEBUG(dbgs() << ", none.\n");
1391 for (int i : Cand.LiveBundles.set_bits())
1392 dbgs() << " EB#" << i;
1398 /// calcSpillCost - Compute how expensive it would be to split the live range in
1399 /// SA around all use blocks instead of forming bundle regions.
1400 BlockFrequency RAGreedy::calcSpillCost() {
1401 BlockFrequency Cost = 0;
1402 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1403 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1404 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1405 unsigned Number = BI.MBB->getNumber();
1406 // We normally only need one spill instruction - a load or a store.
1407 Cost += SpillPlacer->getBlockFrequency(Number);
1409 // Unless the value is redefined in the block.
1410 if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
1411 Cost += SpillPlacer->getBlockFrequency(Number);
1416 /// Check if splitting Evictee will create a local split interval in
1417 /// basic block number BBNumber that may cause a bad eviction chain. This is
1418 /// intended to prevent bad eviction sequences like:
1419 /// movl %ebp, 8(%esp) # 4-byte Spill
1430 /// movl 16(%esp), %ebp # 4 - byte Reload
1432 /// Such sequences are created in 2 scenarios:
1435 /// %0 is evicted from physreg0 by %1.
1436 /// Evictee %0 is intended for region splitting with split candidate
1437 /// physreg0 (the reg %0 was evicted from).
1438 /// Region splitting creates a local interval because of interference with the
1439 /// evictor %1 (normally region splitting creates 2 interval, the "by reg"
1440 /// and "by stack" intervals and local interval created when interference
1442 /// One of the split intervals ends up evicting %2 from physreg1.
1443 /// Evictee %2 is intended for region splitting with split candidate
1445 /// One of the split intervals ends up evicting %3 from physreg2, etc.
1448 /// %0 is evicted from physreg0 by %1.
1449 /// %2 is evicted from physreg2 by %3 etc.
1450 /// Evictee %0 is intended for region splitting with split candidate
1452 /// Region splitting creates a local interval because of interference with the
1454 /// One of the split intervals ends up evicting back original evictor %1
1455 /// from physreg0 (the reg %0 was evicted from).
1456 /// Another evictee %2 is intended for region splitting with split candidate
1458 /// One of the split intervals ends up evicting %3 from physreg2, etc.
1460 /// \param Evictee The register considered to be split.
1461 /// \param Cand The split candidate that determines the physical register
1462 /// we are splitting for and the interferences.
1463 /// \param BBNumber The number of a BB for which the region split process will
1464 /// create a local split interval.
1465 /// \param Order The physical registers that may get evicted by a split
1466 /// artifact of Evictee.
1467 /// \return True if splitting Evictee may cause a bad eviction chain, false
1469 bool RAGreedy::splitCanCauseEvictionChain(unsigned Evictee,
1470 GlobalSplitCandidate &Cand,
1472 const AllocationOrder &Order) {
1473 EvictionTrack::EvictorInfo VregEvictorInfo = LastEvicted.getEvictor(Evictee);
1474 unsigned Evictor = VregEvictorInfo.first;
1475 unsigned PhysReg = VregEvictorInfo.second;
1477 // No actual evictor.
1478 if (!Evictor || !PhysReg)
1481 float MaxWeight = 0;
1482 unsigned FutureEvictedPhysReg =
1483 getCheapestEvicteeWeight(Order, LIS->getInterval(Evictee),
1484 Cand.Intf.first(), Cand.Intf.last(), &MaxWeight);
1486 // The bad eviction chain occurs when either the split candidate is the
1487 // evicting reg or one of the split artifact will evict the evicting reg.
1488 if ((PhysReg != Cand.PhysReg) && (PhysReg != FutureEvictedPhysReg))
1491 Cand.Intf.moveToBlock(BBNumber);
1493 // Check to see if the Evictor contains interference (with Evictee) in the
1494 // given BB. If so, this interference caused the eviction of Evictee from
1495 // PhysReg. This suggest that we will create a local interval during the
1496 // region split to avoid this interference This local interval may cause a bad
1498 if (!LIS->hasInterval(Evictor))
1500 LiveInterval &EvictorLI = LIS->getInterval(Evictor);
1501 if (EvictorLI.FindSegmentContaining(Cand.Intf.first()) == EvictorLI.end())
1504 // Now, check to see if the local interval we will create is going to be
1505 // expensive enough to evict somebody If so, this may cause a bad eviction
1507 VirtRegAuxInfo VRAI(*MF, *LIS, VRM, getAnalysis<MachineLoopInfo>(), *MBFI);
1508 float splitArtifactWeight =
1509 VRAI.futureWeight(LIS->getInterval(Evictee),
1510 Cand.Intf.first().getPrevIndex(), Cand.Intf.last());
1511 if (splitArtifactWeight >= 0 && splitArtifactWeight < MaxWeight)
1517 /// Check if splitting VirtRegToSplit will create a local split interval
1518 /// in basic block number BBNumber that may cause a spill.
1520 /// \param VirtRegToSplit The register considered to be split.
1521 /// \param Cand The split candidate that determines the physical
1522 /// register we are splitting for and the interferences.
1523 /// \param BBNumber The number of a BB for which the region split process
1524 /// will create a local split interval.
1525 /// \param Order The physical registers that may get evicted by a
1526 /// split artifact of VirtRegToSplit.
1527 /// \return True if splitting VirtRegToSplit may cause a spill, false
1529 bool RAGreedy::splitCanCauseLocalSpill(unsigned VirtRegToSplit,
1530 GlobalSplitCandidate &Cand,
1532 const AllocationOrder &Order) {
1533 Cand.Intf.moveToBlock(BBNumber);
1535 // Check if the local interval will find a non interfereing assignment.
1536 for (auto PhysReg : Order.getOrder()) {
1537 if (!Matrix->checkInterference(Cand.Intf.first().getPrevIndex(),
1538 Cand.Intf.last(), PhysReg))
1542 // Check if the local interval will evict a cheaper interval.
1543 float CheapestEvictWeight = 0;
1544 unsigned FutureEvictedPhysReg = getCheapestEvicteeWeight(
1545 Order, LIS->getInterval(VirtRegToSplit), Cand.Intf.first(),
1546 Cand.Intf.last(), &CheapestEvictWeight);
1548 // Have we found an interval that can be evicted?
1549 if (FutureEvictedPhysReg) {
1550 VirtRegAuxInfo VRAI(*MF, *LIS, VRM, getAnalysis<MachineLoopInfo>(), *MBFI);
1551 float splitArtifactWeight =
1552 VRAI.futureWeight(LIS->getInterval(VirtRegToSplit),
1553 Cand.Intf.first().getPrevIndex(), Cand.Intf.last());
1554 // Will the weight of the local interval be higher than the cheapest evictee
1555 // weight? If so it will evict it and will not cause a spill.
1556 if (splitArtifactWeight >= 0 && splitArtifactWeight > CheapestEvictWeight)
1560 // The local interval is not able to find non interferencing assignment and
1561 // not able to evict a less worthy interval, therfore, it can cause a spill.
1565 /// calcGlobalSplitCost - Return the global split cost of following the split
1566 /// pattern in LiveBundles. This cost should be added to the local cost of the
1567 /// interference pattern in SplitConstraints.
1569 BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand,
1570 const AllocationOrder &Order,
1571 bool *CanCauseEvictionChain) {
1572 BlockFrequency GlobalCost = 0;
1573 const BitVector &LiveBundles = Cand.LiveBundles;
1574 unsigned VirtRegToSplit = SA->getParent().reg;
1575 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1576 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1577 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1578 SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
1579 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, false)];
1580 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, true)];
1583 Cand.Intf.moveToBlock(BC.Number);
1584 // Check wheather a local interval is going to be created during the region
1585 // split. Calculate adavanced spilt cost (cost of local intervals) if option
1587 if (EnableAdvancedRASplitCost && Cand.Intf.hasInterference() && BI.LiveIn &&
1588 BI.LiveOut && RegIn && RegOut) {
1590 if (CanCauseEvictionChain &&
1591 splitCanCauseEvictionChain(VirtRegToSplit, Cand, BC.Number, Order)) {
1592 // This interference causes our eviction from this assignment, we might
1593 // evict somebody else and eventually someone will spill, add that cost.
1594 // See splitCanCauseEvictionChain for detailed description of scenarios.
1595 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1596 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1598 *CanCauseEvictionChain = true;
1600 } else if (splitCanCauseLocalSpill(VirtRegToSplit, Cand, BC.Number,
1602 // This interference causes local interval to spill, add that cost.
1603 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1604 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1609 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
1611 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
1613 GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
1616 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
1617 unsigned Number = Cand.ActiveBlocks[i];
1618 bool RegIn = LiveBundles[Bundles->getBundle(Number, false)];
1619 bool RegOut = LiveBundles[Bundles->getBundle(Number, true)];
1620 if (!RegIn && !RegOut)
1622 if (RegIn && RegOut) {
1623 // We need double spill code if this block has interference.
1624 Cand.Intf.moveToBlock(Number);
1625 if (Cand.Intf.hasInterference()) {
1626 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1627 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1629 // Check wheather a local interval is going to be created during the
1631 if (EnableAdvancedRASplitCost && CanCauseEvictionChain &&
1632 splitCanCauseEvictionChain(VirtRegToSplit, Cand, Number, Order)) {
1633 // This interference cause our eviction from this assignment, we might
1634 // evict somebody else, add that cost.
1635 // See splitCanCauseEvictionChain for detailed description of
1637 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1638 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1640 *CanCauseEvictionChain = true;
1645 // live-in / stack-out or stack-in live-out.
1646 GlobalCost += SpillPlacer->getBlockFrequency(Number);
1651 /// splitAroundRegion - Split the current live range around the regions
1652 /// determined by BundleCand and GlobalCand.
1654 /// Before calling this function, GlobalCand and BundleCand must be initialized
1655 /// so each bundle is assigned to a valid candidate, or NoCand for the
1656 /// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor
1657 /// objects must be initialized for the current live range, and intervals
1658 /// created for the used candidates.
1660 /// @param LREdit The LiveRangeEdit object handling the current split.
1661 /// @param UsedCands List of used GlobalCand entries. Every BundleCand value
1662 /// must appear in this list.
1663 void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
1664 ArrayRef<unsigned> UsedCands) {
1665 // These are the intervals created for new global ranges. We may create more
1666 // intervals for local ranges.
1667 const unsigned NumGlobalIntvs = LREdit.size();
1668 LLVM_DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs
1670 assert(NumGlobalIntvs && "No global intervals configured");
1672 // Isolate even single instructions when dealing with a proper sub-class.
1673 // That guarantees register class inflation for the stack interval because it
1675 unsigned Reg = SA->getParent().reg;
1676 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1678 // First handle all the blocks with uses.
1679 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1680 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1681 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1682 unsigned Number = BI.MBB->getNumber();
1683 unsigned IntvIn = 0, IntvOut = 0;
1684 SlotIndex IntfIn, IntfOut;
1686 unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)];
1687 if (CandIn != NoCand) {
1688 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1689 IntvIn = Cand.IntvIdx;
1690 Cand.Intf.moveToBlock(Number);
1691 IntfIn = Cand.Intf.first();
1695 unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)];
1696 if (CandOut != NoCand) {
1697 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1698 IntvOut = Cand.IntvIdx;
1699 Cand.Intf.moveToBlock(Number);
1700 IntfOut = Cand.Intf.last();
1704 // Create separate intervals for isolated blocks with multiple uses.
1705 if (!IntvIn && !IntvOut) {
1706 LLVM_DEBUG(dbgs() << printMBBReference(*BI.MBB) << " isolated.\n");
1707 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1708 SE->splitSingleBlock(BI);
1712 if (IntvIn && IntvOut)
1713 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1715 SE->splitRegInBlock(BI, IntvIn, IntfIn);
1717 SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1720 // Handle live-through blocks. The relevant live-through blocks are stored in
1721 // the ActiveBlocks list with each candidate. We need to filter out
1723 BitVector Todo = SA->getThroughBlocks();
1724 for (unsigned c = 0; c != UsedCands.size(); ++c) {
1725 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks;
1726 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1727 unsigned Number = Blocks[i];
1728 if (!Todo.test(Number))
1732 unsigned IntvIn = 0, IntvOut = 0;
1733 SlotIndex IntfIn, IntfOut;
1735 unsigned CandIn = BundleCand[Bundles->getBundle(Number, false)];
1736 if (CandIn != NoCand) {
1737 GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1738 IntvIn = Cand.IntvIdx;
1739 Cand.Intf.moveToBlock(Number);
1740 IntfIn = Cand.Intf.first();
1743 unsigned CandOut = BundleCand[Bundles->getBundle(Number, true)];
1744 if (CandOut != NoCand) {
1745 GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1746 IntvOut = Cand.IntvIdx;
1747 Cand.Intf.moveToBlock(Number);
1748 IntfOut = Cand.Intf.last();
1750 if (!IntvIn && !IntvOut)
1752 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1758 SmallVector<unsigned, 8> IntvMap;
1759 SE->finish(&IntvMap);
1760 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
1762 ExtraRegInfo.resize(MRI->getNumVirtRegs());
1763 unsigned OrigBlocks = SA->getNumLiveBlocks();
1765 // Sort out the new intervals created by splitting. We get four kinds:
1766 // - Remainder intervals should not be split again.
1767 // - Candidate intervals can be assigned to Cand.PhysReg.
1768 // - Block-local splits are candidates for local splitting.
1769 // - DCE leftovers should go back on the queue.
1770 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1771 LiveInterval &Reg = LIS->getInterval(LREdit.get(i));
1773 // Ignore old intervals from DCE.
1774 if (getStage(Reg) != RS_New)
1777 // Remainder interval. Don't try splitting again, spill if it doesn't
1779 if (IntvMap[i] == 0) {
1780 setStage(Reg, RS_Spill);
1784 // Global intervals. Allow repeated splitting as long as the number of live
1785 // blocks is strictly decreasing.
1786 if (IntvMap[i] < NumGlobalIntvs) {
1787 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
1788 LLVM_DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1789 << " blocks as original.\n");
1790 // Don't allow repeated splitting as a safe guard against looping.
1791 setStage(Reg, RS_Split2);
1796 // Other intervals are treated as new. This includes local intervals created
1797 // for blocks with multiple uses, and anything created by DCE.
1801 MF->verify(this, "After splitting live range around region");
1804 // Global split has high compile time cost especially for large live range.
1805 // Return false for the case here where the potential benefit will never
1807 unsigned RAGreedy::isSplitBenefitWorthCost(LiveInterval &VirtReg) {
1808 MachineInstr *MI = MRI->getUniqueVRegDef(VirtReg.reg);
1809 if (MI && TII->isTriviallyReMaterializable(*MI, AA) &&
1810 VirtReg.size() > HugeSizeForSplit)
1815 unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1816 SmallVectorImpl<unsigned> &NewVRegs) {
1817 if (!isSplitBenefitWorthCost(VirtReg))
1819 unsigned NumCands = 0;
1820 BlockFrequency SpillCost = calcSpillCost();
1821 BlockFrequency BestCost;
1823 // Check if we can split this live range around a compact region.
1824 bool HasCompact = calcCompactRegion(GlobalCand.front());
1826 // Yes, keep GlobalCand[0] as the compact region candidate.
1828 BestCost = BlockFrequency::getMaxFrequency();
1830 // No benefit from the compact region, our fallback will be per-block
1831 // splitting. Make sure we find a solution that is cheaper than spilling.
1832 BestCost = SpillCost;
1833 LLVM_DEBUG(dbgs() << "Cost of isolating all blocks = ";
1834 MBFI->printBlockFreq(dbgs(), BestCost) << '\n');
1837 bool CanCauseEvictionChain = false;
1839 calculateRegionSplitCost(VirtReg, Order, BestCost, NumCands,
1840 false /*IgnoreCSR*/, &CanCauseEvictionChain);
1842 // Split candidates with compact regions can cause a bad eviction sequence.
1843 // See splitCanCauseEvictionChain for detailed description of scenarios.
1844 // To avoid it, we need to comapre the cost with the spill cost and not the
1845 // current max frequency.
1846 if (HasCompact && (BestCost > SpillCost) && (BestCand != NoCand) &&
1847 CanCauseEvictionChain) {
1851 // No solutions found, fall back to single block splitting.
1852 if (!HasCompact && BestCand == NoCand)
1855 return doRegionSplit(VirtReg, BestCand, HasCompact, NewVRegs);
1858 unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg,
1859 AllocationOrder &Order,
1860 BlockFrequency &BestCost,
1861 unsigned &NumCands, bool IgnoreCSR,
1862 bool *CanCauseEvictionChain) {
1863 unsigned BestCand = NoCand;
1865 while (unsigned PhysReg = Order.next()) {
1866 if (IgnoreCSR && isUnusedCalleeSavedReg(PhysReg))
1869 // Discard bad candidates before we run out of interference cache cursors.
1870 // This will only affect register classes with a lot of registers (>32).
1871 if (NumCands == IntfCache.getMaxCursors()) {
1872 unsigned WorstCount = ~0u;
1874 for (unsigned i = 0; i != NumCands; ++i) {
1875 if (i == BestCand || !GlobalCand[i].PhysReg)
1877 unsigned Count = GlobalCand[i].LiveBundles.count();
1878 if (Count < WorstCount) {
1884 GlobalCand[Worst] = GlobalCand[NumCands];
1885 if (BestCand == NumCands)
1889 if (GlobalCand.size() <= NumCands)
1890 GlobalCand.resize(NumCands+1);
1891 GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1892 Cand.reset(IntfCache, PhysReg);
1894 SpillPlacer->prepare(Cand.LiveBundles);
1895 BlockFrequency Cost;
1896 if (!addSplitConstraints(Cand.Intf, Cost)) {
1897 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tno positive bundles\n");
1900 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << "\tstatic = ";
1901 MBFI->printBlockFreq(dbgs(), Cost));
1902 if (Cost >= BestCost) {
1904 if (BestCand == NoCand)
1905 dbgs() << " worse than no bundles\n";
1907 dbgs() << " worse than "
1908 << printReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1912 if (!growRegion(Cand)) {
1913 LLVM_DEBUG(dbgs() << ", cannot spill all interferences.\n");
1917 SpillPlacer->finish();
1919 // No live bundles, defer to splitSingleBlocks().
1920 if (!Cand.LiveBundles.any()) {
1921 LLVM_DEBUG(dbgs() << " no bundles.\n");
1925 bool HasEvictionChain = false;
1926 Cost += calcGlobalSplitCost(Cand, Order, &HasEvictionChain);
1928 dbgs() << ", total = ";
1929 MBFI->printBlockFreq(dbgs(), Cost) << " with bundles";
1930 for (int i : Cand.LiveBundles.set_bits())
1931 dbgs() << " EB#" << i;
1934 if (Cost < BestCost) {
1935 BestCand = NumCands;
1937 // See splitCanCauseEvictionChain for detailed description of bad
1938 // eviction chain scenarios.
1939 if (CanCauseEvictionChain)
1940 *CanCauseEvictionChain = HasEvictionChain;
1945 if (CanCauseEvictionChain && BestCand != NoCand) {
1946 // See splitCanCauseEvictionChain for detailed description of bad
1947 // eviction chain scenarios.
1948 LLVM_DEBUG(dbgs() << "Best split candidate of vreg "
1949 << printReg(VirtReg.reg, TRI) << " may ");
1950 if (!(*CanCauseEvictionChain))
1951 LLVM_DEBUG(dbgs() << "not ");
1952 LLVM_DEBUG(dbgs() << "cause bad eviction chain\n");
1958 unsigned RAGreedy::doRegionSplit(LiveInterval &VirtReg, unsigned BestCand,
1960 SmallVectorImpl<unsigned> &NewVRegs) {
1961 SmallVector<unsigned, 8> UsedCands;
1962 // Prepare split editor.
1963 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
1964 SE->reset(LREdit, SplitSpillMode);
1966 // Assign all edge bundles to the preferred candidate, or NoCand.
1967 BundleCand.assign(Bundles->getNumBundles(), NoCand);
1969 // Assign bundles for the best candidate region.
1970 if (BestCand != NoCand) {
1971 GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1972 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1973 UsedCands.push_back(BestCand);
1974 Cand.IntvIdx = SE->openIntv();
1975 LLVM_DEBUG(dbgs() << "Split for " << printReg(Cand.PhysReg, TRI) << " in "
1976 << B << " bundles, intv " << Cand.IntvIdx << ".\n");
1981 // Assign bundles for the compact region.
1983 GlobalSplitCandidate &Cand = GlobalCand.front();
1984 assert(!Cand.PhysReg && "Compact region has no physreg");
1985 if (unsigned B = Cand.getBundles(BundleCand, 0)) {
1986 UsedCands.push_back(0);
1987 Cand.IntvIdx = SE->openIntv();
1988 LLVM_DEBUG(dbgs() << "Split for compact region in " << B
1989 << " bundles, intv " << Cand.IntvIdx << ".\n");
1994 splitAroundRegion(LREdit, UsedCands);
1998 //===----------------------------------------------------------------------===//
1999 // Per-Block Splitting
2000 //===----------------------------------------------------------------------===//
2002 /// tryBlockSplit - Split a global live range around every block with uses. This
2003 /// creates a lot of local live ranges, that will be split by tryLocalSplit if
2004 /// they don't allocate.
2005 unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
2006 SmallVectorImpl<unsigned> &NewVRegs) {
2007 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
2008 unsigned Reg = VirtReg.reg;
2009 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
2010 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
2011 SE->reset(LREdit, SplitSpillMode);
2012 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
2013 for (unsigned i = 0; i != UseBlocks.size(); ++i) {
2014 const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
2015 if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
2016 SE->splitSingleBlock(BI);
2018 // No blocks were split.
2022 // We did split for some blocks.
2023 SmallVector<unsigned, 8> IntvMap;
2024 SE->finish(&IntvMap);
2026 // Tell LiveDebugVariables about the new ranges.
2027 DebugVars->splitRegister(Reg, LREdit.regs(), *LIS);
2029 ExtraRegInfo.resize(MRI->getNumVirtRegs());
2031 // Sort out the new intervals created by splitting. The remainder interval
2032 // goes straight to spilling, the new local ranges get to stay RS_New.
2033 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
2034 LiveInterval &LI = LIS->getInterval(LREdit.get(i));
2035 if (getStage(LI) == RS_New && IntvMap[i] == 0)
2036 setStage(LI, RS_Spill);
2040 MF->verify(this, "After splitting live range around basic blocks");
2044 //===----------------------------------------------------------------------===//
2045 // Per-Instruction Splitting
2046 //===----------------------------------------------------------------------===//
2048 /// Get the number of allocatable registers that match the constraints of \p Reg
2049 /// on \p MI and that are also in \p SuperRC.
2050 static unsigned getNumAllocatableRegsForConstraints(
2051 const MachineInstr *MI, unsigned Reg, const TargetRegisterClass *SuperRC,
2052 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI,
2053 const RegisterClassInfo &RCI) {
2054 assert(SuperRC && "Invalid register class");
2056 const TargetRegisterClass *ConstrainedRC =
2057 MI->getRegClassConstraintEffectForVReg(Reg, SuperRC, TII, TRI,
2058 /* ExploreBundle */ true);
2061 return RCI.getNumAllocatableRegs(ConstrainedRC);
2064 /// tryInstructionSplit - Split a live range around individual instructions.
2065 /// This is normally not worthwhile since the spiller is doing essentially the
2066 /// same thing. However, when the live range is in a constrained register
2067 /// class, it may help to insert copies such that parts of the live range can
2068 /// be moved to a larger register class.
2070 /// This is similar to spilling to a larger register class.
2072 RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
2073 SmallVectorImpl<unsigned> &NewVRegs) {
2074 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
2075 // There is no point to this if there are no larger sub-classes.
2076 if (!RegClassInfo.isProperSubClass(CurRC))
2079 // Always enable split spill mode, since we're effectively spilling to a
2081 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
2082 SE->reset(LREdit, SplitEditor::SM_Size);
2084 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
2085 if (Uses.size() <= 1)
2088 LLVM_DEBUG(dbgs() << "Split around " << Uses.size()
2089 << " individual instrs.\n");
2091 const TargetRegisterClass *SuperRC =
2092 TRI->getLargestLegalSuperClass(CurRC, *MF);
2093 unsigned SuperRCNumAllocatableRegs = RCI.getNumAllocatableRegs(SuperRC);
2094 // Split around every non-copy instruction if this split will relax
2095 // the constraints on the virtual register.
2096 // Otherwise, splitting just inserts uncoalescable copies that do not help
2098 for (unsigned i = 0; i != Uses.size(); ++i) {
2099 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]))
2100 if (MI->isFullCopy() ||
2101 SuperRCNumAllocatableRegs ==
2102 getNumAllocatableRegsForConstraints(MI, VirtReg.reg, SuperRC, TII,
2104 LLVM_DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI);
2108 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]);
2109 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]);
2110 SE->useIntv(SegStart, SegStop);
2113 if (LREdit.empty()) {
2114 LLVM_DEBUG(dbgs() << "All uses were copies.\n");
2118 SmallVector<unsigned, 8> IntvMap;
2119 SE->finish(&IntvMap);
2120 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
2121 ExtraRegInfo.resize(MRI->getNumVirtRegs());
2123 // Assign all new registers to RS_Spill. This was the last chance.
2124 setStage(LREdit.begin(), LREdit.end(), RS_Spill);
2128 //===----------------------------------------------------------------------===//
2130 //===----------------------------------------------------------------------===//
2132 /// calcGapWeights - Compute the maximum spill weight that needs to be evicted
2133 /// in order to use PhysReg between two entries in SA->UseSlots.
2135 /// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
2137 void RAGreedy::calcGapWeights(unsigned PhysReg,
2138 SmallVectorImpl<float> &GapWeight) {
2139 assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
2140 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
2141 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
2142 const unsigned NumGaps = Uses.size()-1;
2144 // Start and end points for the interference check.
2145 SlotIndex StartIdx =
2146 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
2148 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
2150 GapWeight.assign(NumGaps, 0.0f);
2152 // Add interference from each overlapping register.
2153 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
2154 if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
2155 .checkInterference())
2158 // We know that VirtReg is a continuous interval from FirstInstr to
2159 // LastInstr, so we don't need InterferenceQuery.
2161 // Interference that overlaps an instruction is counted in both gaps
2162 // surrounding the instruction. The exception is interference before
2163 // StartIdx and after StopIdx.
2165 LiveIntervalUnion::SegmentIter IntI =
2166 Matrix->getLiveUnions()[*Units] .find(StartIdx);
2167 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
2168 // Skip the gaps before IntI.
2169 while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
2170 if (++Gap == NumGaps)
2175 // Update the gaps covered by IntI.
2176 const float weight = IntI.value()->weight;
2177 for (; Gap != NumGaps; ++Gap) {
2178 GapWeight[Gap] = std::max(GapWeight[Gap], weight);
2179 if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
2187 // Add fixed interference.
2188 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
2189 const LiveRange &LR = LIS->getRegUnit(*Units);
2190 LiveRange::const_iterator I = LR.find(StartIdx);
2191 LiveRange::const_iterator E = LR.end();
2193 // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
2194 for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
2195 while (Uses[Gap+1].getBoundaryIndex() < I->start)
2196 if (++Gap == NumGaps)
2201 for (; Gap != NumGaps; ++Gap) {
2202 GapWeight[Gap] = huge_valf;
2203 if (Uses[Gap+1].getBaseIndex() >= I->end)
2212 /// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
2215 unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
2216 SmallVectorImpl<unsigned> &NewVRegs) {
2217 // TODO: the function currently only handles a single UseBlock; it should be
2218 // possible to generalize.
2219 if (SA->getUseBlocks().size() != 1)
2222 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
2224 // Note that it is possible to have an interval that is live-in or live-out
2225 // while only covering a single block - A phi-def can use undef values from
2226 // predecessors, and the block could be a single-block loop.
2227 // We don't bother doing anything clever about such a case, we simply assume
2228 // that the interval is continuous from FirstInstr to LastInstr. We should
2229 // make sure that we don't do anything illegal to such an interval, though.
2231 ArrayRef<SlotIndex> Uses = SA->getUseSlots();
2232 if (Uses.size() <= 2)
2234 const unsigned NumGaps = Uses.size()-1;
2237 dbgs() << "tryLocalSplit: ";
2238 for (unsigned i = 0, e = Uses.size(); i != e; ++i)
2239 dbgs() << ' ' << Uses[i];
2243 // If VirtReg is live across any register mask operands, compute a list of
2244 // gaps with register masks.
2245 SmallVector<unsigned, 8> RegMaskGaps;
2246 if (Matrix->checkRegMaskInterference(VirtReg)) {
2247 // Get regmask slots for the whole block.
2248 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
2249 LLVM_DEBUG(dbgs() << RMS.size() << " regmasks in block:");
2250 // Constrain to VirtReg's live range.
2251 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
2252 Uses.front().getRegSlot()) - RMS.begin();
2253 unsigned re = RMS.size();
2254 for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
2255 // Look for Uses[i] <= RMS <= Uses[i+1].
2256 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
2257 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
2259 // Skip a regmask on the same instruction as the last use. It doesn't
2260 // overlap the live range.
2261 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
2263 LLVM_DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-'
2265 RegMaskGaps.push_back(i);
2266 // Advance ri to the next gap. A regmask on one of the uses counts in
2268 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
2271 LLVM_DEBUG(dbgs() << '\n');
2274 // Since we allow local split results to be split again, there is a risk of
2275 // creating infinite loops. It is tempting to require that the new live
2276 // ranges have less instructions than the original. That would guarantee
2277 // convergence, but it is too strict. A live range with 3 instructions can be
2278 // split 2+3 (including the COPY), and we want to allow that.
2280 // Instead we use these rules:
2282 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
2283 // noop split, of course).
2284 // 2. Require progress be made for ranges with getStage() == RS_Split2. All
2285 // the new ranges must have fewer instructions than before the split.
2286 // 3. New ranges with the same number of instructions are marked RS_Split2,
2287 // smaller ranges are marked RS_New.
2289 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
2290 // excessive splitting and infinite loops.
2292 bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
2294 // Best split candidate.
2295 unsigned BestBefore = NumGaps;
2296 unsigned BestAfter = 0;
2299 const float blockFreq =
2300 SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
2301 (1.0f / MBFI->getEntryFreq());
2302 SmallVector<float, 8> GapWeight;
2305 while (unsigned PhysReg = Order.next()) {
2306 // Keep track of the largest spill weight that would need to be evicted in
2307 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
2308 calcGapWeights(PhysReg, GapWeight);
2310 // Remove any gaps with regmask clobbers.
2311 if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
2312 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
2313 GapWeight[RegMaskGaps[i]] = huge_valf;
2315 // Try to find the best sequence of gaps to close.
2316 // The new spill weight must be larger than any gap interference.
2318 // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
2319 unsigned SplitBefore = 0, SplitAfter = 1;
2321 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
2322 // It is the spill weight that needs to be evicted.
2323 float MaxGap = GapWeight[0];
2326 // Live before/after split?
2327 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
2328 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
2330 LLVM_DEBUG(dbgs() << printReg(PhysReg, TRI) << ' ' << Uses[SplitBefore]
2331 << '-' << Uses[SplitAfter] << " i=" << MaxGap);
2333 // Stop before the interval gets so big we wouldn't be making progress.
2334 if (!LiveBefore && !LiveAfter) {
2335 LLVM_DEBUG(dbgs() << " all\n");
2338 // Should the interval be extended or shrunk?
2341 // How many gaps would the new range have?
2342 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
2344 // Legally, without causing looping?
2345 bool Legal = !ProgressRequired || NewGaps < NumGaps;
2347 if (Legal && MaxGap < huge_valf) {
2348 // Estimate the new spill weight. Each instruction reads or writes the
2349 // register. Conservatively assume there are no read-modify-write
2352 // Try to guess the size of the new interval.
2353 const float EstWeight = normalizeSpillWeight(
2354 blockFreq * (NewGaps + 1),
2355 Uses[SplitBefore].distance(Uses[SplitAfter]) +
2356 (LiveBefore + LiveAfter) * SlotIndex::InstrDist,
2358 // Would this split be possible to allocate?
2359 // Never allocate all gaps, we wouldn't be making progress.
2360 LLVM_DEBUG(dbgs() << " w=" << EstWeight);
2361 if (EstWeight * Hysteresis >= MaxGap) {
2363 float Diff = EstWeight - MaxGap;
2364 if (Diff > BestDiff) {
2365 LLVM_DEBUG(dbgs() << " (best)");
2366 BestDiff = Hysteresis * Diff;
2367 BestBefore = SplitBefore;
2368 BestAfter = SplitAfter;
2375 if (++SplitBefore < SplitAfter) {
2376 LLVM_DEBUG(dbgs() << " shrink\n");
2377 // Recompute the max when necessary.
2378 if (GapWeight[SplitBefore - 1] >= MaxGap) {
2379 MaxGap = GapWeight[SplitBefore];
2380 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
2381 MaxGap = std::max(MaxGap, GapWeight[i]);
2388 // Try to extend the interval.
2389 if (SplitAfter >= NumGaps) {
2390 LLVM_DEBUG(dbgs() << " end\n");
2394 LLVM_DEBUG(dbgs() << " extend\n");
2395 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
2399 // Didn't find any candidates?
2400 if (BestBefore == NumGaps)
2403 LLVM_DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] << '-'
2404 << Uses[BestAfter] << ", " << BestDiff << ", "
2405 << (BestAfter - BestBefore + 1) << " instrs\n");
2407 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
2411 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
2412 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]);
2413 SE->useIntv(SegStart, SegStop);
2414 SmallVector<unsigned, 8> IntvMap;
2415 SE->finish(&IntvMap);
2416 DebugVars->splitRegister(VirtReg.reg, LREdit.regs(), *LIS);
2418 // If the new range has the same number of instructions as before, mark it as
2419 // RS_Split2 so the next split will be forced to make progress. Otherwise,
2420 // leave the new intervals as RS_New so they can compete.
2421 bool LiveBefore = BestBefore != 0 || BI.LiveIn;
2422 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
2423 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
2424 if (NewGaps >= NumGaps) {
2425 LLVM_DEBUG(dbgs() << "Tagging non-progress ranges: ");
2426 assert(!ProgressRequired && "Didn't make progress when it was required.");
2427 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
2428 if (IntvMap[i] == 1) {
2429 setStage(LIS->getInterval(LREdit.get(i)), RS_Split2);
2430 LLVM_DEBUG(dbgs() << printReg(LREdit.get(i)));
2432 LLVM_DEBUG(dbgs() << '\n');
2439 //===----------------------------------------------------------------------===//
2440 // Live Range Splitting
2441 //===----------------------------------------------------------------------===//
2443 /// trySplit - Try to split VirtReg or one of its interferences, making it
2445 /// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
2446 unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
2447 SmallVectorImpl<unsigned>&NewVRegs) {
2448 // Ranges must be Split2 or less.
2449 if (getStage(VirtReg) >= RS_Spill)
2452 // Local intervals are handled separately.
2453 if (LIS->intervalIsInOneMBB(VirtReg)) {
2454 NamedRegionTimer T("local_split", "Local Splitting", TimerGroupName,
2455 TimerGroupDescription, TimePassesIsEnabled);
2456 SA->analyze(&VirtReg);
2457 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
2458 if (PhysReg || !NewVRegs.empty())
2460 return tryInstructionSplit(VirtReg, Order, NewVRegs);
2463 NamedRegionTimer T("global_split", "Global Splitting", TimerGroupName,
2464 TimerGroupDescription, TimePassesIsEnabled);
2466 SA->analyze(&VirtReg);
2468 // FIXME: SplitAnalysis may repair broken live ranges coming from the
2469 // coalescer. That may cause the range to become allocatable which means that
2470 // tryRegionSplit won't be making progress. This check should be replaced with
2471 // an assertion when the coalescer is fixed.
2472 if (SA->didRepairRange()) {
2473 // VirtReg has changed, so all cached queries are invalid.
2474 Matrix->invalidateVirtRegs();
2475 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
2479 // First try to split around a region spanning multiple blocks. RS_Split2
2480 // ranges already made dubious progress with region splitting, so they go
2481 // straight to single block splitting.
2482 if (getStage(VirtReg) < RS_Split2) {
2483 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
2484 if (PhysReg || !NewVRegs.empty())
2488 // Then isolate blocks.
2489 return tryBlockSplit(VirtReg, Order, NewVRegs);
2492 //===----------------------------------------------------------------------===//
2493 // Last Chance Recoloring
2494 //===----------------------------------------------------------------------===//
2496 /// Return true if \p reg has any tied def operand.
2497 static bool hasTiedDef(MachineRegisterInfo *MRI, unsigned reg) {
2498 for (const MachineOperand &MO : MRI->def_operands(reg))
2505 /// mayRecolorAllInterferences - Check if the virtual registers that
2506 /// interfere with \p VirtReg on \p PhysReg (or one of its aliases) may be
2507 /// recolored to free \p PhysReg.
2508 /// When true is returned, \p RecoloringCandidates has been augmented with all
2509 /// the live intervals that need to be recolored in order to free \p PhysReg
2511 /// \p FixedRegisters contains all the virtual registers that cannot be
2514 RAGreedy::mayRecolorAllInterferences(unsigned PhysReg, LiveInterval &VirtReg,
2515 SmallLISet &RecoloringCandidates,
2516 const SmallVirtRegSet &FixedRegisters) {
2517 const TargetRegisterClass *CurRC = MRI->getRegClass(VirtReg.reg);
2519 for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
2520 LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
2521 // If there is LastChanceRecoloringMaxInterference or more interferences,
2522 // chances are one would not be recolorable.
2523 if (Q.collectInterferingVRegs(LastChanceRecoloringMaxInterference) >=
2524 LastChanceRecoloringMaxInterference && !ExhaustiveSearch) {
2525 LLVM_DEBUG(dbgs() << "Early abort: too many interferences.\n");
2526 CutOffInfo |= CO_Interf;
2529 for (unsigned i = Q.interferingVRegs().size(); i; --i) {
2530 LiveInterval *Intf = Q.interferingVRegs()[i - 1];
2531 // If Intf is done and sit on the same register class as VirtReg,
2532 // it would not be recolorable as it is in the same state as VirtReg.
2533 // However, if VirtReg has tied defs and Intf doesn't, then
2534 // there is still a point in examining if it can be recolorable.
2535 if (((getStage(*Intf) == RS_Done &&
2536 MRI->getRegClass(Intf->reg) == CurRC) &&
2537 !(hasTiedDef(MRI, VirtReg.reg) && !hasTiedDef(MRI, Intf->reg))) ||
2538 FixedRegisters.count(Intf->reg)) {
2540 dbgs() << "Early abort: the interference is not recolorable.\n");
2543 RecoloringCandidates.insert(Intf);
2549 /// tryLastChanceRecoloring - Try to assign a color to \p VirtReg by recoloring
2550 /// its interferences.
2551 /// Last chance recoloring chooses a color for \p VirtReg and recolors every
2552 /// virtual register that was using it. The recoloring process may recursively
2553 /// use the last chance recoloring. Therefore, when a virtual register has been
2554 /// assigned a color by this mechanism, it is marked as Fixed, i.e., it cannot
2555 /// be last-chance-recolored again during this recoloring "session".
2558 /// vA can use {R1, R2 }
2559 /// vB can use { R2, R3}
2560 /// vC can use {R1 }
2561 /// Where vA, vB, and vC cannot be split anymore (they are reloads for
2562 /// instance) and they all interfere.
2564 /// vA is assigned R1
2565 /// vB is assigned R2
2566 /// vC tries to evict vA but vA is already done.
2567 /// Regular register allocation fails.
2569 /// Last chance recoloring kicks in:
2570 /// vC does as if vA was evicted => vC uses R1.
2571 /// vC is marked as fixed.
2572 /// vA needs to find a color.
2573 /// None are available.
2574 /// vA cannot evict vC: vC is a fixed virtual register now.
2575 /// vA does as if vB was evicted => vA uses R2.
2576 /// vB needs to find a color.
2577 /// R3 is available.
2578 /// Recoloring => vC = R1, vA = R2, vB = R3
2580 /// \p Order defines the preferred allocation order for \p VirtReg.
2581 /// \p NewRegs will contain any new virtual register that have been created
2582 /// (split, spill) during the process and that must be assigned.
2583 /// \p FixedRegisters contains all the virtual registers that cannot be
2585 /// \p Depth gives the current depth of the last chance recoloring.
2586 /// \return a physical register that can be used for VirtReg or ~0u if none
2588 unsigned RAGreedy::tryLastChanceRecoloring(LiveInterval &VirtReg,
2589 AllocationOrder &Order,
2590 SmallVectorImpl<unsigned> &NewVRegs,
2591 SmallVirtRegSet &FixedRegisters,
2593 LLVM_DEBUG(dbgs() << "Try last chance recoloring for " << VirtReg << '\n');
2594 // Ranges must be Done.
2595 assert((getStage(VirtReg) >= RS_Done || !VirtReg.isSpillable()) &&
2596 "Last chance recoloring should really be last chance");
2597 // Set the max depth to LastChanceRecoloringMaxDepth.
2598 // We may want to reconsider that if we end up with a too large search space
2599 // for target with hundreds of registers.
2600 // Indeed, in that case we may want to cut the search space earlier.
2601 if (Depth >= LastChanceRecoloringMaxDepth && !ExhaustiveSearch) {
2602 LLVM_DEBUG(dbgs() << "Abort because max depth has been reached.\n");
2603 CutOffInfo |= CO_Depth;
2607 // Set of Live intervals that will need to be recolored.
2608 SmallLISet RecoloringCandidates;
2609 // Record the original mapping virtual register to physical register in case
2610 // the recoloring fails.
2611 DenseMap<unsigned, unsigned> VirtRegToPhysReg;
2612 // Mark VirtReg as fixed, i.e., it will not be recolored pass this point in
2613 // this recoloring "session".
2614 FixedRegisters.insert(VirtReg.reg);
2615 SmallVector<unsigned, 4> CurrentNewVRegs;
2618 while (unsigned PhysReg = Order.next()) {
2619 LLVM_DEBUG(dbgs() << "Try to assign: " << VirtReg << " to "
2620 << printReg(PhysReg, TRI) << '\n');
2621 RecoloringCandidates.clear();
2622 VirtRegToPhysReg.clear();
2623 CurrentNewVRegs.clear();
2625 // It is only possible to recolor virtual register interference.
2626 if (Matrix->checkInterference(VirtReg, PhysReg) >
2627 LiveRegMatrix::IK_VirtReg) {
2629 dbgs() << "Some interferences are not with virtual registers.\n");
2634 // Early give up on this PhysReg if it is obvious we cannot recolor all
2635 // the interferences.
2636 if (!mayRecolorAllInterferences(PhysReg, VirtReg, RecoloringCandidates,
2638 LLVM_DEBUG(dbgs() << "Some interferences cannot be recolored.\n");
2642 // RecoloringCandidates contains all the virtual registers that interfer
2643 // with VirtReg on PhysReg (or one of its aliases).
2644 // Enqueue them for recoloring and perform the actual recoloring.
2645 PQueue RecoloringQueue;
2646 for (SmallLISet::iterator It = RecoloringCandidates.begin(),
2647 EndIt = RecoloringCandidates.end();
2648 It != EndIt; ++It) {
2649 unsigned ItVirtReg = (*It)->reg;
2650 enqueue(RecoloringQueue, *It);
2651 assert(VRM->hasPhys(ItVirtReg) &&
2652 "Interferences are supposed to be with allocated variables");
2654 // Record the current allocation.
2655 VirtRegToPhysReg[ItVirtReg] = VRM->getPhys(ItVirtReg);
2656 // unset the related struct.
2657 Matrix->unassign(**It);
2660 // Do as if VirtReg was assigned to PhysReg so that the underlying
2661 // recoloring has the right information about the interferes and
2662 // available colors.
2663 Matrix->assign(VirtReg, PhysReg);
2665 // Save the current recoloring state.
2666 // If we cannot recolor all the interferences, we will have to start again
2667 // at this point for the next physical register.
2668 SmallVirtRegSet SaveFixedRegisters(FixedRegisters);
2669 if (tryRecoloringCandidates(RecoloringQueue, CurrentNewVRegs,
2670 FixedRegisters, Depth)) {
2671 // Push the queued vregs into the main queue.
2672 for (unsigned NewVReg : CurrentNewVRegs)
2673 NewVRegs.push_back(NewVReg);
2674 // Do not mess up with the global assignment process.
2675 // I.e., VirtReg must be unassigned.
2676 Matrix->unassign(VirtReg);
2680 LLVM_DEBUG(dbgs() << "Fail to assign: " << VirtReg << " to "
2681 << printReg(PhysReg, TRI) << '\n');
2683 // The recoloring attempt failed, undo the changes.
2684 FixedRegisters = SaveFixedRegisters;
2685 Matrix->unassign(VirtReg);
2687 // For a newly created vreg which is also in RecoloringCandidates,
2688 // don't add it to NewVRegs because its physical register will be restored
2689 // below. Other vregs in CurrentNewVRegs are created by calling
2690 // selectOrSplit and should be added into NewVRegs.
2691 for (SmallVectorImpl<unsigned>::iterator Next = CurrentNewVRegs.begin(),
2692 End = CurrentNewVRegs.end();
2693 Next != End; ++Next) {
2694 if (RecoloringCandidates.count(&LIS->getInterval(*Next)))
2696 NewVRegs.push_back(*Next);
2699 for (SmallLISet::iterator It = RecoloringCandidates.begin(),
2700 EndIt = RecoloringCandidates.end();
2701 It != EndIt; ++It) {
2702 unsigned ItVirtReg = (*It)->reg;
2703 if (VRM->hasPhys(ItVirtReg))
2704 Matrix->unassign(**It);
2705 unsigned ItPhysReg = VirtRegToPhysReg[ItVirtReg];
2706 Matrix->assign(**It, ItPhysReg);
2710 // Last chance recoloring did not worked either, give up.
2714 /// tryRecoloringCandidates - Try to assign a new color to every register
2715 /// in \RecoloringQueue.
2716 /// \p NewRegs will contain any new virtual register created during the
2717 /// recoloring process.
2718 /// \p FixedRegisters[in/out] contains all the registers that have been
2720 /// \return true if all virtual registers in RecoloringQueue were successfully
2721 /// recolored, false otherwise.
2722 bool RAGreedy::tryRecoloringCandidates(PQueue &RecoloringQueue,
2723 SmallVectorImpl<unsigned> &NewVRegs,
2724 SmallVirtRegSet &FixedRegisters,
2726 while (!RecoloringQueue.empty()) {
2727 LiveInterval *LI = dequeue(RecoloringQueue);
2728 LLVM_DEBUG(dbgs() << "Try to recolor: " << *LI << '\n');
2730 PhysReg = selectOrSplitImpl(*LI, NewVRegs, FixedRegisters, Depth + 1);
2731 // When splitting happens, the live-range may actually be empty.
2732 // In that case, this is okay to continue the recoloring even
2733 // if we did not find an alternative color for it. Indeed,
2734 // there will not be anything to color for LI in the end.
2735 if (PhysReg == ~0u || (!PhysReg && !LI->empty()))
2739 assert(LI->empty() && "Only empty live-range do not require a register");
2740 LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
2741 << " succeeded. Empty LI.\n");
2744 LLVM_DEBUG(dbgs() << "Recoloring of " << *LI
2745 << " succeeded with: " << printReg(PhysReg, TRI) << '\n');
2747 Matrix->assign(*LI, PhysReg);
2748 FixedRegisters.insert(LI->reg);
2753 //===----------------------------------------------------------------------===//
2755 //===----------------------------------------------------------------------===//
2757 unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
2758 SmallVectorImpl<unsigned> &NewVRegs) {
2759 CutOffInfo = CO_None;
2760 LLVMContext &Ctx = MF->getFunction().getContext();
2761 SmallVirtRegSet FixedRegisters;
2762 unsigned Reg = selectOrSplitImpl(VirtReg, NewVRegs, FixedRegisters);
2763 if (Reg == ~0U && (CutOffInfo != CO_None)) {
2764 uint8_t CutOffEncountered = CutOffInfo & (CO_Depth | CO_Interf);
2765 if (CutOffEncountered == CO_Depth)
2766 Ctx.emitError("register allocation failed: maximum depth for recoloring "
2767 "reached. Use -fexhaustive-register-search to skip "
2769 else if (CutOffEncountered == CO_Interf)
2770 Ctx.emitError("register allocation failed: maximum interference for "
2771 "recoloring reached. Use -fexhaustive-register-search "
2773 else if (CutOffEncountered == (CO_Depth | CO_Interf))
2774 Ctx.emitError("register allocation failed: maximum interference and "
2775 "depth for recoloring reached. Use "
2776 "-fexhaustive-register-search to skip cutoffs");
2781 /// Using a CSR for the first time has a cost because it causes push|pop
2782 /// to be added to prologue|epilogue. Splitting a cold section of the live
2783 /// range can have lower cost than using the CSR for the first time;
2784 /// Spilling a live range in the cold path can have lower cost than using
2785 /// the CSR for the first time. Returns the physical register if we decide
2786 /// to use the CSR; otherwise return 0.
2787 unsigned RAGreedy::tryAssignCSRFirstTime(LiveInterval &VirtReg,
2788 AllocationOrder &Order,
2790 unsigned &CostPerUseLimit,
2791 SmallVectorImpl<unsigned> &NewVRegs) {
2792 if (getStage(VirtReg) == RS_Spill && VirtReg.isSpillable()) {
2793 // We choose spill over using the CSR for the first time if the spill cost
2794 // is lower than CSRCost.
2795 SA->analyze(&VirtReg);
2796 if (calcSpillCost() >= CSRCost)
2799 // We are going to spill, set CostPerUseLimit to 1 to make sure that
2800 // we will not use a callee-saved register in tryEvict.
2801 CostPerUseLimit = 1;
2804 if (getStage(VirtReg) < RS_Split) {
2805 // We choose pre-splitting over using the CSR for the first time if
2806 // the cost of splitting is lower than CSRCost.
2807 SA->analyze(&VirtReg);
2808 unsigned NumCands = 0;
2809 BlockFrequency BestCost = CSRCost; // Don't modify CSRCost.
2810 unsigned BestCand = calculateRegionSplitCost(VirtReg, Order, BestCost,
2811 NumCands, true /*IgnoreCSR*/);
2812 if (BestCand == NoCand)
2813 // Use the CSR if we can't find a region split below CSRCost.
2816 // Perform the actual pre-splitting.
2817 doRegionSplit(VirtReg, BestCand, false/*HasCompact*/, NewVRegs);
2823 void RAGreedy::aboutToRemoveInterval(LiveInterval &LI) {
2824 // Do not keep invalid information around.
2825 SetOfBrokenHints.remove(&LI);
2828 void RAGreedy::initializeCSRCost() {
2829 // We use the larger one out of the command-line option and the value report
2831 CSRCost = BlockFrequency(
2832 std::max((unsigned)CSRFirstTimeCost, TRI->getCSRFirstUseCost()));
2833 if (!CSRCost.getFrequency())
2836 // Raw cost is relative to Entry == 2^14; scale it appropriately.
2837 uint64_t ActualEntry = MBFI->getEntryFreq();
2842 uint64_t FixedEntry = 1 << 14;
2843 if (ActualEntry < FixedEntry)
2844 CSRCost *= BranchProbability(ActualEntry, FixedEntry);
2845 else if (ActualEntry <= UINT32_MAX)
2846 // Invert the fraction and divide.
2847 CSRCost /= BranchProbability(FixedEntry, ActualEntry);
2849 // Can't use BranchProbability in general, since it takes 32-bit numbers.
2850 CSRCost = CSRCost.getFrequency() * (ActualEntry / FixedEntry);
2853 /// Collect the hint info for \p Reg.
2854 /// The results are stored into \p Out.
2855 /// \p Out is not cleared before being populated.
2856 void RAGreedy::collectHintInfo(unsigned Reg, HintsInfo &Out) {
2857 for (const MachineInstr &Instr : MRI->reg_nodbg_instructions(Reg)) {
2858 if (!Instr.isFullCopy())
2860 // Look for the other end of the copy.
2861 unsigned OtherReg = Instr.getOperand(0).getReg();
2862 if (OtherReg == Reg) {
2863 OtherReg = Instr.getOperand(1).getReg();
2864 if (OtherReg == Reg)
2867 // Get the current assignment.
2868 unsigned OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
2870 : VRM->getPhys(OtherReg);
2871 // Push the collected information.
2872 Out.push_back(HintInfo(MBFI->getBlockFreq(Instr.getParent()), OtherReg,
2877 /// Using the given \p List, compute the cost of the broken hints if
2878 /// \p PhysReg was used.
2879 /// \return The cost of \p List for \p PhysReg.
2880 BlockFrequency RAGreedy::getBrokenHintFreq(const HintsInfo &List,
2882 BlockFrequency Cost = 0;
2883 for (const HintInfo &Info : List) {
2884 if (Info.PhysReg != PhysReg)
2890 /// Using the register assigned to \p VirtReg, try to recolor
2891 /// all the live ranges that are copy-related with \p VirtReg.
2892 /// The recoloring is then propagated to all the live-ranges that have
2893 /// been recolored and so on, until no more copies can be coalesced or
2894 /// it is not profitable.
2895 /// For a given live range, profitability is determined by the sum of the
2896 /// frequencies of the non-identity copies it would introduce with the old
2897 /// and new register.
2898 void RAGreedy::tryHintRecoloring(LiveInterval &VirtReg) {
2899 // We have a broken hint, check if it is possible to fix it by
2900 // reusing PhysReg for the copy-related live-ranges. Indeed, we evicted
2901 // some register and PhysReg may be available for the other live-ranges.
2902 SmallSet<unsigned, 4> Visited;
2903 SmallVector<unsigned, 2> RecoloringCandidates;
2905 unsigned Reg = VirtReg.reg;
2906 unsigned PhysReg = VRM->getPhys(Reg);
2907 // Start the recoloring algorithm from the input live-interval, then
2908 // it will propagate to the ones that are copy-related with it.
2909 Visited.insert(Reg);
2910 RecoloringCandidates.push_back(Reg);
2912 LLVM_DEBUG(dbgs() << "Trying to reconcile hints for: " << printReg(Reg, TRI)
2913 << '(' << printReg(PhysReg, TRI) << ")\n");
2916 Reg = RecoloringCandidates.pop_back_val();
2918 // We cannot recolor physical register.
2919 if (TargetRegisterInfo::isPhysicalRegister(Reg))
2922 assert(VRM->hasPhys(Reg) && "We have unallocated variable!!");
2924 // Get the live interval mapped with this virtual register to be able
2925 // to check for the interference with the new color.
2926 LiveInterval &LI = LIS->getInterval(Reg);
2927 unsigned CurrPhys = VRM->getPhys(Reg);
2928 // Check that the new color matches the register class constraints and
2929 // that it is free for this live range.
2930 if (CurrPhys != PhysReg && (!MRI->getRegClass(Reg)->contains(PhysReg) ||
2931 Matrix->checkInterference(LI, PhysReg)))
2934 LLVM_DEBUG(dbgs() << printReg(Reg, TRI) << '(' << printReg(CurrPhys, TRI)
2935 << ") is recolorable.\n");
2937 // Gather the hint info.
2939 collectHintInfo(Reg, Info);
2940 // Check if recoloring the live-range will increase the cost of the
2941 // non-identity copies.
2942 if (CurrPhys != PhysReg) {
2943 LLVM_DEBUG(dbgs() << "Checking profitability:\n");
2944 BlockFrequency OldCopiesCost = getBrokenHintFreq(Info, CurrPhys);
2945 BlockFrequency NewCopiesCost = getBrokenHintFreq(Info, PhysReg);
2946 LLVM_DEBUG(dbgs() << "Old Cost: " << OldCopiesCost.getFrequency()
2947 << "\nNew Cost: " << NewCopiesCost.getFrequency()
2949 if (OldCopiesCost < NewCopiesCost) {
2950 LLVM_DEBUG(dbgs() << "=> Not profitable.\n");
2953 // At this point, the cost is either cheaper or equal. If it is
2954 // equal, we consider this is profitable because it may expose
2955 // more recoloring opportunities.
2956 LLVM_DEBUG(dbgs() << "=> Profitable.\n");
2957 // Recolor the live-range.
2958 Matrix->unassign(LI);
2959 Matrix->assign(LI, PhysReg);
2961 // Push all copy-related live-ranges to keep reconciling the broken
2963 for (const HintInfo &HI : Info) {
2964 if (Visited.insert(HI.Reg).second)
2965 RecoloringCandidates.push_back(HI.Reg);
2967 } while (!RecoloringCandidates.empty());
2970 /// Try to recolor broken hints.
2971 /// Broken hints may be repaired by recoloring when an evicted variable
2972 /// freed up a register for a larger live-range.
2973 /// Consider the following example:
2981 /// Let us assume b gets split:
2991 /// Because of how the allocation work, b, c, and d may be assigned different
2992 /// colors. Now, if a gets evicted later:
3002 /// e = ld SpillSlot
3004 /// This is likely that we can assign the same register for b, c, and d,
3005 /// getting rid of 2 copies.
3006 void RAGreedy::tryHintsRecoloring() {
3007 for (LiveInterval *LI : SetOfBrokenHints) {
3008 assert(TargetRegisterInfo::isVirtualRegister(LI->reg) &&
3009 "Recoloring is possible only for virtual registers");
3010 // Some dead defs may be around (e.g., because of debug uses).
3012 if (!VRM->hasPhys(LI->reg))
3014 tryHintRecoloring(*LI);
3018 unsigned RAGreedy::selectOrSplitImpl(LiveInterval &VirtReg,
3019 SmallVectorImpl<unsigned> &NewVRegs,
3020 SmallVirtRegSet &FixedRegisters,
3022 unsigned CostPerUseLimit = ~0u;
3023 // First try assigning a free register.
3024 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo, Matrix);
3025 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) {
3026 // If VirtReg got an assignment, the eviction info is no longre relevant.
3027 LastEvicted.clearEvicteeInfo(VirtReg.reg);
3028 // When NewVRegs is not empty, we may have made decisions such as evicting
3029 // a virtual register, go with the earlier decisions and use the physical
3031 if (CSRCost.getFrequency() && isUnusedCalleeSavedReg(PhysReg) &&
3033 unsigned CSRReg = tryAssignCSRFirstTime(VirtReg, Order, PhysReg,
3034 CostPerUseLimit, NewVRegs);
3035 if (CSRReg || !NewVRegs.empty())
3036 // Return now if we decide to use a CSR or create new vregs due to
3043 LiveRangeStage Stage = getStage(VirtReg);
3044 LLVM_DEBUG(dbgs() << StageName[Stage] << " Cascade "
3045 << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
3047 // Try to evict a less worthy live range, but only for ranges from the primary
3048 // queue. The RS_Split ranges already failed to do this, and they should not
3049 // get a second chance until they have been split.
3050 if (Stage != RS_Split)
3051 if (unsigned PhysReg =
3052 tryEvict(VirtReg, Order, NewVRegs, CostPerUseLimit)) {
3053 unsigned Hint = MRI->getSimpleHint(VirtReg.reg);
3054 // If VirtReg has a hint and that hint is broken record this
3055 // virtual register as a recoloring candidate for broken hint.
3056 // Indeed, since we evicted a variable in its neighborhood it is
3057 // likely we can at least partially recolor some of the
3058 // copy-related live-ranges.
3059 if (Hint && Hint != PhysReg)
3060 SetOfBrokenHints.insert(&VirtReg);
3061 // If VirtReg eviction someone, the eviction info for it as an evictee is
3062 // no longre relevant.
3063 LastEvicted.clearEvicteeInfo(VirtReg.reg);
3067 assert((NewVRegs.empty() || Depth) && "Cannot append to existing NewVRegs");
3069 // The first time we see a live range, don't try to split or spill.
3070 // Wait until the second time, when all smaller ranges have been allocated.
3071 // This gives a better picture of the interference to split around.
3072 if (Stage < RS_Split) {
3073 setStage(VirtReg, RS_Split);
3074 LLVM_DEBUG(dbgs() << "wait for second round\n");
3075 NewVRegs.push_back(VirtReg.reg);
3079 if (Stage < RS_Spill) {
3080 // Try splitting VirtReg or interferences.
3081 unsigned NewVRegSizeBefore = NewVRegs.size();
3082 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
3083 if (PhysReg || (NewVRegs.size() - NewVRegSizeBefore)) {
3084 // If VirtReg got split, the eviction info is no longre relevant.
3085 LastEvicted.clearEvicteeInfo(VirtReg.reg);
3090 // If we couldn't allocate a register from spilling, there is probably some
3091 // invalid inline assembly. The base class will report it.
3092 if (Stage >= RS_Done || !VirtReg.isSpillable())
3093 return tryLastChanceRecoloring(VirtReg, Order, NewVRegs, FixedRegisters,
3096 // Finally spill VirtReg itself.
3097 if (EnableDeferredSpilling && getStage(VirtReg) < RS_Memory) {
3098 // TODO: This is experimental and in particular, we do not model
3099 // the live range splitting done by spilling correctly.
3100 // We would need a deep integration with the spiller to do the
3101 // right thing here. Anyway, that is still good for early testing.
3102 setStage(VirtReg, RS_Memory);
3103 LLVM_DEBUG(dbgs() << "Do as if this register is in memory\n");
3104 NewVRegs.push_back(VirtReg.reg);
3106 NamedRegionTimer T("spill", "Spiller", TimerGroupName,
3107 TimerGroupDescription, TimePassesIsEnabled);
3108 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this, &DeadRemats);
3109 spiller().spill(LRE);
3110 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
3113 MF->verify(this, "After spilling");
3116 // The live virtual register requesting allocation was spilled, so tell
3117 // the caller not to allocate anything during this round.
3121 void RAGreedy::reportNumberOfSplillsReloads(MachineLoop *L, unsigned &Reloads,
3122 unsigned &FoldedReloads,
3124 unsigned &FoldedSpills) {
3130 // Sum up the spill and reloads in subloops.
3131 for (MachineLoop *SubLoop : *L) {
3132 unsigned SubReloads;
3133 unsigned SubFoldedReloads;
3135 unsigned SubFoldedSpills;
3137 reportNumberOfSplillsReloads(SubLoop, SubReloads, SubFoldedReloads,
3138 SubSpills, SubFoldedSpills);
3139 Reloads += SubReloads;
3140 FoldedReloads += SubFoldedReloads;
3141 Spills += SubSpills;
3142 FoldedSpills += SubFoldedSpills;
3145 const MachineFrameInfo &MFI = MF->getFrameInfo();
3146 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
3149 for (MachineBasicBlock *MBB : L->getBlocks())
3150 // Handle blocks that were not included in subloops.
3151 if (Loops->getLoopFor(MBB) == L)
3152 for (MachineInstr &MI : *MBB) {
3153 SmallVector<const MachineMemOperand *, 2> Accesses;
3154 auto isSpillSlotAccess = [&MFI](const MachineMemOperand *A) {
3155 return MFI.isSpillSlotObjectIndex(
3156 cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
3160 if (TII->isLoadFromStackSlot(MI, FI) && MFI.isSpillSlotObjectIndex(FI))
3162 else if (TII->hasLoadFromStackSlot(MI, Accesses) &&
3163 llvm::any_of(Accesses, isSpillSlotAccess))
3165 else if (TII->isStoreToStackSlot(MI, FI) &&
3166 MFI.isSpillSlotObjectIndex(FI))
3168 else if (TII->hasStoreToStackSlot(MI, Accesses) &&
3169 llvm::any_of(Accesses, isSpillSlotAccess))
3173 if (Reloads || FoldedReloads || Spills || FoldedSpills) {
3174 using namespace ore;
3177 MachineOptimizationRemarkMissed R(DEBUG_TYPE, "LoopSpillReload",
3178 L->getStartLoc(), L->getHeader());
3180 R << NV("NumSpills", Spills) << " spills ";
3182 R << NV("NumFoldedSpills", FoldedSpills) << " folded spills ";
3184 R << NV("NumReloads", Reloads) << " reloads ";
3186 R << NV("NumFoldedReloads", FoldedReloads) << " folded reloads ";
3187 R << "generated in loop";
3193 bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
3194 LLVM_DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
3195 << "********** Function: " << mf.getName() << '\n');
3198 TRI = MF->getSubtarget().getRegisterInfo();
3199 TII = MF->getSubtarget().getInstrInfo();
3200 RCI.runOnMachineFunction(mf);
3202 EnableLocalReassign = EnableLocalReassignment ||
3203 MF->getSubtarget().enableRALocalReassignment(
3204 MF->getTarget().getOptLevel());
3206 EnableAdvancedRASplitCost = ConsiderLocalIntervalCost ||
3207 MF->getSubtarget().enableAdvancedRASplitCost();
3210 MF->verify(this, "Before greedy register allocator");
3212 RegAllocBase::init(getAnalysis<VirtRegMap>(),
3213 getAnalysis<LiveIntervals>(),
3214 getAnalysis<LiveRegMatrix>());
3215 Indexes = &getAnalysis<SlotIndexes>();
3216 MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
3217 DomTree = &getAnalysis<MachineDominatorTree>();
3218 ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
3219 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
3220 Loops = &getAnalysis<MachineLoopInfo>();
3221 Bundles = &getAnalysis<EdgeBundles>();
3222 SpillPlacer = &getAnalysis<SpillPlacement>();
3223 DebugVars = &getAnalysis<LiveDebugVariables>();
3224 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3226 initializeCSRCost();
3228 calculateSpillWeightsAndHints(*LIS, mf, VRM, *Loops, *MBFI);
3230 LLVM_DEBUG(LIS->dump());
3232 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
3233 SE.reset(new SplitEditor(*SA, *AA, *LIS, *VRM, *DomTree, *MBFI));
3234 ExtraRegInfo.clear();
3235 ExtraRegInfo.resize(MRI->getNumVirtRegs());
3237 IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
3238 GlobalCand.resize(32); // This will grow as needed.
3239 SetOfBrokenHints.clear();
3240 LastEvicted.clear();
3243 tryHintsRecoloring();
3245 reportNumberOfSplillsReloads();