1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and computes live intervals for
13 // each virtual and physical register.
15 //===----------------------------------------------------------------------===//
17 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
18 #include "LiveRangeCalc.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/CodeGen/LiveVariables.h"
22 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
23 #include "llvm/CodeGen/MachineDominators.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/Passes.h"
27 #include "llvm/CodeGen/VirtRegMap.h"
28 #include "llvm/IR/Value.h"
29 #include "llvm/Support/BlockFrequency.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetInstrInfo.h"
35 #include "llvm/Target/TargetRegisterInfo.h"
36 #include "llvm/Target/TargetSubtargetInfo.h"
41 #define DEBUG_TYPE "regalloc"
43 char LiveIntervals::ID = 0;
44 char &llvm::LiveIntervalsID = LiveIntervals::ID;
45 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals",
46 "Live Interval Analysis", false, false)
47 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
48 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
49 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
50 INITIALIZE_PASS_END(LiveIntervals, "liveintervals",
51 "Live Interval Analysis", false, false)
54 static cl::opt<bool> EnablePrecomputePhysRegs(
55 "precompute-phys-liveness", cl::Hidden,
56 cl::desc("Eagerly compute live intervals for all physreg units."));
58 static bool EnablePrecomputePhysRegs = false;
62 cl::opt<bool> UseSegmentSetForPhysRegs(
63 "use-segment-set-for-physregs", cl::Hidden, cl::init(true),
65 "Use segment set for the computation of the live ranges of physregs."));
68 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
70 AU.addRequired<AAResultsWrapperPass>();
71 AU.addPreserved<AAResultsWrapperPass>();
72 AU.addPreserved<LiveVariables>();
73 AU.addPreservedID(MachineLoopInfoID);
74 AU.addRequiredTransitiveID(MachineDominatorsID);
75 AU.addPreservedID(MachineDominatorsID);
76 AU.addPreserved<SlotIndexes>();
77 AU.addRequiredTransitive<SlotIndexes>();
78 MachineFunctionPass::getAnalysisUsage(AU);
81 LiveIntervals::LiveIntervals() : MachineFunctionPass(ID),
82 DomTree(nullptr), LRCalc(nullptr) {
83 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
86 LiveIntervals::~LiveIntervals() {
90 void LiveIntervals::releaseMemory() {
91 // Free the live intervals themselves.
92 for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i)
93 delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)];
94 VirtRegIntervals.clear();
97 RegMaskBlocks.clear();
99 for (unsigned i = 0, e = RegUnitRanges.size(); i != e; ++i)
100 delete RegUnitRanges[i];
101 RegUnitRanges.clear();
103 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd.
104 VNInfoAllocator.Reset();
107 /// runOnMachineFunction - calculates LiveIntervals
109 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
111 MRI = &MF->getRegInfo();
112 TRI = MF->getSubtarget().getRegisterInfo();
113 TII = MF->getSubtarget().getInstrInfo();
114 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
115 Indexes = &getAnalysis<SlotIndexes>();
116 DomTree = &getAnalysis<MachineDominatorTree>();
119 LRCalc = new LiveRangeCalc();
121 // Allocate space for all virtual registers.
122 VirtRegIntervals.resize(MRI->getNumVirtRegs());
126 computeLiveInRegUnits();
128 if (EnablePrecomputePhysRegs) {
129 // For stress testing, precompute live ranges of all physical register
130 // units, including reserved registers.
131 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
138 /// print - Implement the dump method.
139 void LiveIntervals::print(raw_ostream &OS, const Module* ) const {
140 OS << "********** INTERVALS **********\n";
142 // Dump the regunits.
143 for (unsigned i = 0, e = RegUnitRanges.size(); i != e; ++i)
144 if (LiveRange *LR = RegUnitRanges[i])
145 OS << PrintRegUnit(i, TRI) << ' ' << *LR << '\n';
147 // Dump the virtregs.
148 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
149 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
150 if (hasInterval(Reg))
151 OS << getInterval(Reg) << '\n';
155 for (unsigned i = 0, e = RegMaskSlots.size(); i != e; ++i)
156 OS << ' ' << RegMaskSlots[i];
162 void LiveIntervals::printInstrs(raw_ostream &OS) const {
163 OS << "********** MACHINEINSTRS **********\n";
164 MF->print(OS, Indexes);
167 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
168 void LiveIntervals::dumpInstrs() const {
173 LiveInterval* LiveIntervals::createInterval(unsigned reg) {
174 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
175 llvm::huge_valf : 0.0F;
176 return new LiveInterval(reg, Weight);
180 /// computeVirtRegInterval - Compute the live interval of a virtual register,
181 /// based on defs and uses.
182 void LiveIntervals::computeVirtRegInterval(LiveInterval &LI) {
183 assert(LRCalc && "LRCalc not initialized.");
184 assert(LI.empty() && "Should only compute empty intervals.");
185 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
186 LRCalc->calculate(LI, MRI->shouldTrackSubRegLiveness(LI.reg));
187 computeDeadValues(LI, nullptr);
190 void LiveIntervals::computeVirtRegs() {
191 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
192 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
193 if (MRI->reg_nodbg_empty(Reg))
195 createAndComputeVirtRegInterval(Reg);
199 void LiveIntervals::computeRegMasks() {
200 RegMaskBlocks.resize(MF->getNumBlockIDs());
202 // Find all instructions with regmask operands.
203 for (MachineBasicBlock &MBB : *MF) {
204 std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB.getNumber()];
205 RMB.first = RegMaskSlots.size();
207 // Some block starts, such as EH funclets, create masks.
208 if (const uint32_t *Mask = MBB.getBeginClobberMask(TRI)) {
209 RegMaskSlots.push_back(Indexes->getMBBStartIdx(&MBB));
210 RegMaskBits.push_back(Mask);
213 for (MachineInstr &MI : MBB) {
214 for (const MachineOperand &MO : MI.operands()) {
217 RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot());
218 RegMaskBits.push_back(MO.getRegMask());
222 // Some block ends, such as funclet returns, create masks. Put the mask on
223 // the last instruction of the block, because MBB slot index intervals are
225 if (const uint32_t *Mask = MBB.getEndClobberMask(TRI)) {
226 assert(!MBB.empty() && "empty return block?");
227 RegMaskSlots.push_back(
228 Indexes->getInstructionIndex(MBB.back()).getRegSlot());
229 RegMaskBits.push_back(Mask);
232 // Compute the number of register mask instructions in this block.
233 RMB.second = RegMaskSlots.size() - RMB.first;
237 //===----------------------------------------------------------------------===//
238 // Register Unit Liveness
239 //===----------------------------------------------------------------------===//
241 // Fixed interference typically comes from ABI boundaries: Function arguments
242 // and return values are passed in fixed registers, and so are exception
243 // pointers entering landing pads. Certain instructions require values to be
244 // present in specific registers. That is also represented through fixed
248 /// computeRegUnitInterval - Compute the live range of a register unit, based
249 /// on the uses and defs of aliasing registers. The range should be empty,
250 /// or contain only dead phi-defs from ABI blocks.
251 void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) {
252 assert(LRCalc && "LRCalc not initialized.");
253 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
255 // The physregs aliasing Unit are the roots and their super-registers.
256 // Create all values as dead defs before extending to uses. Note that roots
257 // may share super-registers. That's OK because createDeadDefs() is
258 // idempotent. It is very rare for a register unit to have multiple roots, so
259 // uniquing super-registers is probably not worthwhile.
260 for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
261 for (MCSuperRegIterator Supers(*Roots, TRI, /*IncludeSelf=*/true);
262 Supers.isValid(); ++Supers) {
263 if (!MRI->reg_empty(*Supers))
264 LRCalc->createDeadDefs(LR, *Supers);
268 // Now extend LR to reach all uses.
269 // Ignore uses of reserved registers. We only track defs of those.
270 for (MCRegUnitRootIterator Roots(Unit, TRI); Roots.isValid(); ++Roots) {
271 for (MCSuperRegIterator Supers(*Roots, TRI, /*IncludeSelf=*/true);
272 Supers.isValid(); ++Supers) {
273 unsigned Reg = *Supers;
274 if (!MRI->isReserved(Reg) && !MRI->reg_empty(Reg))
275 LRCalc->extendToUses(LR, Reg);
279 // Flush the segment set to the segment vector.
280 if (UseSegmentSetForPhysRegs)
281 LR.flushSegmentSet();
285 /// computeLiveInRegUnits - Precompute the live ranges of any register units
286 /// that are live-in to an ABI block somewhere. Register values can appear
287 /// without a corresponding def when entering the entry block or a landing pad.
289 void LiveIntervals::computeLiveInRegUnits() {
290 RegUnitRanges.resize(TRI->getNumRegUnits());
291 DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n");
293 // Keep track of the live range sets allocated.
294 SmallVector<unsigned, 8> NewRanges;
296 // Check all basic blocks for live-ins.
297 for (MachineFunction::const_iterator MFI = MF->begin(), MFE = MF->end();
299 const MachineBasicBlock *MBB = &*MFI;
301 // We only care about ABI blocks: Entry + landing pads.
302 if ((MFI != MF->begin() && !MBB->isEHPad()) || MBB->livein_empty())
305 // Create phi-defs at Begin for all live-in registers.
306 SlotIndex Begin = Indexes->getMBBStartIdx(MBB);
307 DEBUG(dbgs() << Begin << "\tBB#" << MBB->getNumber());
308 for (const auto &LI : MBB->liveins()) {
309 for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) {
310 unsigned Unit = *Units;
311 LiveRange *LR = RegUnitRanges[Unit];
313 // Use segment set to speed-up initial computation of the live range.
314 LR = RegUnitRanges[Unit] = new LiveRange(UseSegmentSetForPhysRegs);
315 NewRanges.push_back(Unit);
317 VNInfo *VNI = LR->createDeadDef(Begin, getVNInfoAllocator());
319 DEBUG(dbgs() << ' ' << PrintRegUnit(Unit, TRI) << '#' << VNI->id);
322 DEBUG(dbgs() << '\n');
324 DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n");
326 // Compute the 'normal' part of the ranges.
327 for (unsigned i = 0, e = NewRanges.size(); i != e; ++i) {
328 unsigned Unit = NewRanges[i];
329 computeRegUnitRange(*RegUnitRanges[Unit], Unit);
334 static void createSegmentsForValues(LiveRange &LR,
335 iterator_range<LiveInterval::vni_iterator> VNIs) {
336 for (auto VNI : VNIs) {
339 SlotIndex Def = VNI->def;
340 LR.addSegment(LiveRange::Segment(Def, Def.getDeadSlot(), VNI));
344 typedef SmallVector<std::pair<SlotIndex, VNInfo*>, 16> ShrinkToUsesWorkList;
346 static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes,
347 ShrinkToUsesWorkList &WorkList,
348 const LiveRange &OldRange) {
349 // Keep track of the PHIs that are in use.
350 SmallPtrSet<VNInfo*, 8> UsedPHIs;
351 // Blocks that have already been added to WorkList as live-out.
352 SmallPtrSet<MachineBasicBlock*, 16> LiveOut;
354 // Extend intervals to reach all uses in WorkList.
355 while (!WorkList.empty()) {
356 SlotIndex Idx = WorkList.back().first;
357 VNInfo *VNI = WorkList.back().second;
359 const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(Idx.getPrevSlot());
360 SlotIndex BlockStart = Indexes.getMBBStartIdx(MBB);
362 // Extend the live range for VNI to be live at Idx.
363 if (VNInfo *ExtVNI = LR.extendInBlock(BlockStart, Idx)) {
364 assert(ExtVNI == VNI && "Unexpected existing value number");
366 // Is this a PHIDef we haven't seen before?
367 if (!VNI->isPHIDef() || VNI->def != BlockStart ||
368 !UsedPHIs.insert(VNI).second)
370 // The PHI is live, make sure the predecessors are live-out.
371 for (auto &Pred : MBB->predecessors()) {
372 if (!LiveOut.insert(Pred).second)
374 SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
375 // A predecessor is not required to have a live-out value for a PHI.
376 if (VNInfo *PVNI = OldRange.getVNInfoBefore(Stop))
377 WorkList.push_back(std::make_pair(Stop, PVNI));
382 // VNI is live-in to MBB.
383 DEBUG(dbgs() << " live-in at " << BlockStart << '\n');
384 LR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI));
386 // Make sure VNI is live-out from the predecessors.
387 for (auto &Pred : MBB->predecessors()) {
388 if (!LiveOut.insert(Pred).second)
390 SlotIndex Stop = Indexes.getMBBEndIdx(Pred);
391 assert(OldRange.getVNInfoBefore(Stop) == VNI &&
392 "Wrong value out of predecessor");
393 WorkList.push_back(std::make_pair(Stop, VNI));
398 bool LiveIntervals::shrinkToUses(LiveInterval *li,
399 SmallVectorImpl<MachineInstr*> *dead) {
400 DEBUG(dbgs() << "Shrink: " << *li << '\n');
401 assert(TargetRegisterInfo::isVirtualRegister(li->reg)
402 && "Can only shrink virtual registers");
404 // Shrink subregister live ranges.
405 bool NeedsCleanup = false;
406 for (LiveInterval::SubRange &S : li->subranges()) {
407 shrinkToUses(S, li->reg);
412 li->removeEmptySubRanges();
414 // Find all the values used, including PHI kills.
415 ShrinkToUsesWorkList WorkList;
417 // Visit all instructions reading li->reg.
418 for (MachineRegisterInfo::reg_instr_iterator
419 I = MRI->reg_instr_begin(li->reg), E = MRI->reg_instr_end();
421 MachineInstr *UseMI = &*(I++);
422 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg))
424 SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot();
425 LiveQueryResult LRQ = li->Query(Idx);
426 VNInfo *VNI = LRQ.valueIn();
428 // This shouldn't happen: readsVirtualRegister returns true, but there is
429 // no live value. It is likely caused by a target getting <undef> flags
431 DEBUG(dbgs() << Idx << '\t' << *UseMI
432 << "Warning: Instr claims to read non-existent value in "
436 // Special case: An early-clobber tied operand reads and writes the
437 // register one slot early.
438 if (VNInfo *DefVNI = LRQ.valueDefined())
441 WorkList.push_back(std::make_pair(Idx, VNI));
444 // Create new live ranges with only minimal live segments per def.
446 createSegmentsForValues(NewLR, make_range(li->vni_begin(), li->vni_end()));
447 extendSegmentsToUses(NewLR, *Indexes, WorkList, *li);
449 // Move the trimmed segments back.
450 li->segments.swap(NewLR.segments);
452 // Handle dead values.
453 bool CanSeparate = computeDeadValues(*li, dead);
454 DEBUG(dbgs() << "Shrunk: " << *li << '\n');
458 bool LiveIntervals::computeDeadValues(LiveInterval &LI,
459 SmallVectorImpl<MachineInstr*> *dead) {
460 bool MayHaveSplitComponents = false;
461 for (auto VNI : LI.valnos) {
464 SlotIndex Def = VNI->def;
465 LiveRange::iterator I = LI.FindSegmentContaining(Def);
466 assert(I != LI.end() && "Missing segment for VNI");
468 // Is the register live before? Otherwise we may have to add a read-undef
469 // flag for subregister defs.
470 unsigned VReg = LI.reg;
471 if (MRI->shouldTrackSubRegLiveness(VReg)) {
472 if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) {
473 MachineInstr *MI = getInstructionFromIndex(Def);
474 MI->setRegisterDefReadUndef(VReg);
478 if (I->end != Def.getDeadSlot())
480 if (VNI->isPHIDef()) {
481 // This is a dead PHI. Remove it.
484 DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n");
485 MayHaveSplitComponents = true;
487 // This is a dead def. Make sure the instruction knows.
488 MachineInstr *MI = getInstructionFromIndex(Def);
489 assert(MI && "No instruction defining live value");
490 MI->addRegisterDead(LI.reg, TRI);
491 if (dead && MI->allDefsAreDead()) {
492 DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI);
497 return MayHaveSplitComponents;
500 void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) {
501 DEBUG(dbgs() << "Shrink: " << SR << '\n');
502 assert(TargetRegisterInfo::isVirtualRegister(Reg)
503 && "Can only shrink virtual registers");
504 // Find all the values used, including PHI kills.
505 ShrinkToUsesWorkList WorkList;
507 // Visit all instructions reading Reg.
509 for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) {
510 // Skip "undef" uses.
513 // Maybe the operand is for a subregister we don't care about.
514 unsigned SubReg = MO.getSubReg();
516 LaneBitmask LaneMask = TRI->getSubRegIndexLaneMask(SubReg);
517 if ((LaneMask & SR.LaneMask).none())
520 // We only need to visit each instruction once.
521 MachineInstr *UseMI = MO.getParent();
522 SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot();
527 LiveQueryResult LRQ = SR.Query(Idx);
528 VNInfo *VNI = LRQ.valueIn();
529 // For Subranges it is possible that only undef values are left in that
530 // part of the subregister, so there is no real liverange at the use
534 // Special case: An early-clobber tied operand reads and writes the
535 // register one slot early.
536 if (VNInfo *DefVNI = LRQ.valueDefined())
539 WorkList.push_back(std::make_pair(Idx, VNI));
542 // Create a new live ranges with only minimal live segments per def.
544 createSegmentsForValues(NewLR, make_range(SR.vni_begin(), SR.vni_end()));
545 extendSegmentsToUses(NewLR, *Indexes, WorkList, SR);
547 // Move the trimmed ranges back.
548 SR.segments.swap(NewLR.segments);
550 // Remove dead PHI value numbers
551 for (auto VNI : SR.valnos) {
554 const LiveRange::Segment *Segment = SR.getSegmentContaining(VNI->def);
555 assert(Segment != nullptr && "Missing segment for VNI");
556 if (Segment->end != VNI->def.getDeadSlot())
558 if (VNI->isPHIDef()) {
559 // This is a dead PHI. Remove it.
560 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n");
562 SR.removeSegment(*Segment);
566 DEBUG(dbgs() << "Shrunk: " << SR << '\n');
569 void LiveIntervals::extendToIndices(LiveRange &LR,
570 ArrayRef<SlotIndex> Indices,
571 ArrayRef<SlotIndex> Undefs) {
572 assert(LRCalc && "LRCalc not initialized.");
573 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
574 for (unsigned i = 0, e = Indices.size(); i != e; ++i)
575 LRCalc->extend(LR, Indices[i], /*PhysReg=*/0, Undefs);
578 void LiveIntervals::pruneValue(LiveRange &LR, SlotIndex Kill,
579 SmallVectorImpl<SlotIndex> *EndPoints) {
580 LiveQueryResult LRQ = LR.Query(Kill);
581 VNInfo *VNI = LRQ.valueOutOrDead();
585 MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill);
586 SlotIndex MBBEnd = Indexes->getMBBEndIdx(KillMBB);
588 // If VNI isn't live out from KillMBB, the value is trivially pruned.
589 if (LRQ.endPoint() < MBBEnd) {
590 LR.removeSegment(Kill, LRQ.endPoint());
591 if (EndPoints) EndPoints->push_back(LRQ.endPoint());
595 // VNI is live out of KillMBB.
596 LR.removeSegment(Kill, MBBEnd);
597 if (EndPoints) EndPoints->push_back(MBBEnd);
599 // Find all blocks that are reachable from KillMBB without leaving VNI's live
600 // range. It is possible that KillMBB itself is reachable, so start a DFS
601 // from each successor.
602 typedef df_iterator_default_set<MachineBasicBlock*,9> VisitedTy;
604 for (MachineBasicBlock::succ_iterator
605 SuccI = KillMBB->succ_begin(), SuccE = KillMBB->succ_end();
606 SuccI != SuccE; ++SuccI) {
607 for (df_ext_iterator<MachineBasicBlock*, VisitedTy>
608 I = df_ext_begin(*SuccI, Visited), E = df_ext_end(*SuccI, Visited);
610 MachineBasicBlock *MBB = *I;
612 // Check if VNI is live in to MBB.
613 SlotIndex MBBStart, MBBEnd;
614 std::tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB);
615 LiveQueryResult LRQ = LR.Query(MBBStart);
616 if (LRQ.valueIn() != VNI) {
617 // This block isn't part of the VNI segment. Prune the search.
622 // Prune the search if VNI is killed in MBB.
623 if (LRQ.endPoint() < MBBEnd) {
624 LR.removeSegment(MBBStart, LRQ.endPoint());
625 if (EndPoints) EndPoints->push_back(LRQ.endPoint());
630 // VNI is live through MBB.
631 LR.removeSegment(MBBStart, MBBEnd);
632 if (EndPoints) EndPoints->push_back(MBBEnd);
638 //===----------------------------------------------------------------------===//
639 // Register allocator hooks.
642 void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
643 // Keep track of regunit ranges.
644 SmallVector<std::pair<const LiveRange*, LiveRange::const_iterator>, 8> RU;
645 // Keep track of subregister ranges.
646 SmallVector<std::pair<const LiveInterval::SubRange*,
647 LiveRange::const_iterator>, 4> SRs;
649 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) {
650 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
651 if (MRI->reg_nodbg_empty(Reg))
653 const LiveInterval &LI = getInterval(Reg);
657 // Find the regunit intervals for the assigned register. They may overlap
658 // the virtual register live range, cancelling any kills.
660 for (MCRegUnitIterator Units(VRM->getPhys(Reg), TRI); Units.isValid();
662 const LiveRange &RURange = getRegUnit(*Units);
665 RU.push_back(std::make_pair(&RURange, RURange.find(LI.begin()->end)));
668 if (MRI->subRegLivenessEnabled()) {
670 for (const LiveInterval::SubRange &SR : LI.subranges()) {
671 SRs.push_back(std::make_pair(&SR, SR.find(LI.begin()->end)));
675 // Every instruction that kills Reg corresponds to a segment range end
677 for (LiveInterval::const_iterator RI = LI.begin(), RE = LI.end(); RI != RE;
679 // A block index indicates an MBB edge.
680 if (RI->end.isBlock())
682 MachineInstr *MI = getInstructionFromIndex(RI->end);
686 // Check if any of the regunits are live beyond the end of RI. That could
687 // happen when a physreg is defined as a copy of a virtreg:
689 // %EAX = COPY %vreg5
690 // FOO %vreg5 <--- MI, cancel kill because %EAX is live.
693 // There should be no kill flag on FOO when %vreg5 is rewritten as %EAX.
694 for (auto &RUP : RU) {
695 const LiveRange &RURange = *RUP.first;
696 LiveRange::const_iterator &I = RUP.second;
697 if (I == RURange.end())
699 I = RURange.advanceTo(I, RI->end);
700 if (I == RURange.end() || I->start >= RI->end)
702 // I is overlapping RI.
706 if (MRI->subRegLivenessEnabled()) {
707 // When reading a partial undefined value we must not add a kill flag.
708 // The regalloc might have used the undef lane for something else.
710 // %vreg1 = ... ; R32: %vreg1
711 // %vreg2:high16 = ... ; R64: %vreg2
712 // = read %vreg2<kill> ; R64: %vreg2
713 // = read %vreg1 ; R32: %vreg1
714 // The <kill> flag is correct for %vreg2, but the register allocator may
715 // assign R0L to %vreg1, and R0 to %vreg2 because the low 32bits of R0
716 // are actually never written by %vreg2. After assignment the <kill>
717 // flag at the read instruction is invalid.
718 LaneBitmask DefinedLanesMask;
720 // Compute a mask of lanes that are defined.
721 DefinedLanesMask = LaneBitmask::getNone();
722 for (auto &SRP : SRs) {
723 const LiveInterval::SubRange &SR = *SRP.first;
724 LiveRange::const_iterator &I = SRP.second;
727 I = SR.advanceTo(I, RI->end);
728 if (I == SR.end() || I->start >= RI->end)
730 // I is overlapping RI
731 DefinedLanesMask |= SR.LaneMask;
734 DefinedLanesMask = LaneBitmask::getAll();
736 bool IsFullWrite = false;
737 for (const MachineOperand &MO : MI->operands()) {
738 if (!MO.isReg() || MO.getReg() != Reg)
741 // Reading any undefined lanes?
742 LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg());
743 if ((UseMask & ~DefinedLanesMask).any())
745 } else if (MO.getSubReg() == 0) {
746 // Writing to the full register?
752 // If an instruction writes to a subregister, a new segment starts in
753 // the LiveInterval. But as this is only overriding part of the register
754 // adding kill-flags is not correct here after registers have been
757 // Next segment has to be adjacent in the subregister write case.
758 LiveRange::const_iterator N = std::next(RI);
759 if (N != LI.end() && N->start == RI->end)
764 MI->addRegisterKilled(Reg, nullptr);
767 MI->clearRegisterKills(Reg, nullptr);
773 LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
774 // A local live range must be fully contained inside the block, meaning it is
775 // defined and killed at instructions, not at block boundaries. It is not
776 // live in or or out of any block.
778 // It is technically possible to have a PHI-defined live range identical to a
779 // single block, but we are going to return false in that case.
781 SlotIndex Start = LI.beginIndex();
785 SlotIndex Stop = LI.endIndex();
789 // getMBBFromIndex doesn't need to search the MBB table when both indexes
790 // belong to proper instructions.
791 MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
792 MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
793 return MBB1 == MBB2 ? MBB1 : nullptr;
797 LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const {
798 for (const VNInfo *PHI : LI.valnos) {
799 if (PHI->isUnused() || !PHI->isPHIDef())
801 const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def);
802 // Conservatively return true instead of scanning huge predecessor lists.
803 if (PHIMBB->pred_size() > 100)
805 for (MachineBasicBlock::const_pred_iterator
806 PI = PHIMBB->pred_begin(), PE = PHIMBB->pred_end(); PI != PE; ++PI)
807 if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(*PI)))
813 float LiveIntervals::getSpillWeight(bool isDef, bool isUse,
814 const MachineBlockFrequencyInfo *MBFI,
815 const MachineInstr &MI) {
816 BlockFrequency Freq = MBFI->getBlockFreq(MI.getParent());
817 const float Scale = 1.0f / MBFI->getEntryFreq();
818 return (isDef + isUse) * (Freq.getFrequency() * Scale);
822 LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr &startInst) {
823 LiveInterval& Interval = createEmptyInterval(reg);
824 VNInfo *VN = Interval.getNextValue(
825 SlotIndex(getInstructionIndex(startInst).getRegSlot()),
826 getVNInfoAllocator());
827 LiveRange::Segment S(SlotIndex(getInstructionIndex(startInst).getRegSlot()),
828 getMBBEndIdx(startInst.getParent()), VN);
829 Interval.addSegment(S);
835 //===----------------------------------------------------------------------===//
836 // Register mask functions
837 //===----------------------------------------------------------------------===//
839 bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI,
840 BitVector &UsableRegs) {
843 LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end();
845 // Use a smaller arrays for local live ranges.
846 ArrayRef<SlotIndex> Slots;
847 ArrayRef<const uint32_t*> Bits;
848 if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) {
849 Slots = getRegMaskSlotsInBlock(MBB->getNumber());
850 Bits = getRegMaskBitsInBlock(MBB->getNumber());
852 Slots = getRegMaskSlots();
853 Bits = getRegMaskBits();
856 // We are going to enumerate all the register mask slots contained in LI.
857 // Start with a binary search of RegMaskSlots to find a starting point.
858 ArrayRef<SlotIndex>::iterator SlotI =
859 std::lower_bound(Slots.begin(), Slots.end(), LiveI->start);
860 ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
862 // No slots in range, LI begins after the last call.
868 assert(*SlotI >= LiveI->start);
869 // Loop over all slots overlapping this segment.
870 while (*SlotI < LiveI->end) {
871 // *SlotI overlaps LI. Collect mask bits.
873 // This is the first overlap. Initialize UsableRegs to all ones.
875 UsableRegs.resize(TRI->getNumRegs(), true);
878 // Remove usable registers clobbered by this mask.
879 UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]);
880 if (++SlotI == SlotE)
883 // *SlotI is beyond the current LI segment.
884 LiveI = LI.advanceTo(LiveI, *SlotI);
887 // Advance SlotI until it overlaps.
888 while (*SlotI < LiveI->start)
889 if (++SlotI == SlotE)
894 //===----------------------------------------------------------------------===//
895 // IntervalUpdate class.
896 //===----------------------------------------------------------------------===//
898 // HMEditor is a toolkit used by handleMove to trim or extend live intervals.
899 class LiveIntervals::HMEditor {
902 const MachineRegisterInfo& MRI;
903 const TargetRegisterInfo& TRI;
906 SmallPtrSet<LiveRange*, 8> Updated;
910 HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
911 const TargetRegisterInfo& TRI,
912 SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags)
913 : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx),
914 UpdateFlags(UpdateFlags) {}
916 // FIXME: UpdateFlags is a workaround that creates live intervals for all
917 // physregs, even those that aren't needed for regalloc, in order to update
918 // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill
919 // flags, and postRA passes will use a live register utility instead.
920 LiveRange *getRegUnitLI(unsigned Unit) {
922 return &LIS.getRegUnit(Unit);
923 return LIS.getCachedRegUnit(Unit);
926 /// Update all live ranges touched by MI, assuming a move from OldIdx to
928 void updateAllRanges(MachineInstr *MI) {
929 DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI);
930 bool hasRegMask = false;
931 for (MachineOperand &MO : MI->operands()) {
939 // Aggressively clear all kill flags.
940 // They are reinserted by VirtRegRewriter.
944 unsigned Reg = MO.getReg();
947 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
948 LiveInterval &LI = LIS.getInterval(Reg);
949 if (LI.hasSubRanges()) {
950 unsigned SubReg = MO.getSubReg();
951 LaneBitmask LaneMask = SubReg ? TRI.getSubRegIndexLaneMask(SubReg)
952 : MRI.getMaxLaneMaskForVReg(Reg);
953 for (LiveInterval::SubRange &S : LI.subranges()) {
954 if ((S.LaneMask & LaneMask).none())
956 updateRange(S, Reg, S.LaneMask);
959 updateRange(LI, Reg, LaneBitmask::getNone());
963 // For physregs, only update the regunits that actually have a
964 // precomputed live range.
965 for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units)
966 if (LiveRange *LR = getRegUnitLI(*Units))
967 updateRange(*LR, *Units, LaneBitmask::getNone());
970 updateRegMaskSlots();
974 /// Update a single live range, assuming an instruction has been moved from
975 /// OldIdx to NewIdx.
976 void updateRange(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
977 if (!Updated.insert(&LR).second)
981 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
982 dbgs() << PrintReg(Reg);
984 dbgs() << " L" << PrintLaneMask(LaneMask);
986 dbgs() << PrintRegUnit(Reg, &TRI);
988 dbgs() << ":\t" << LR << '\n';
990 if (SlotIndex::isEarlierInstr(OldIdx, NewIdx))
993 handleMoveUp(LR, Reg, LaneMask);
994 DEBUG(dbgs() << " -->\t" << LR << '\n');
998 /// Update LR to reflect an instruction has been moved downwards from OldIdx
999 /// to NewIdx (OldIdx < NewIdx).
1000 void handleMoveDown(LiveRange &LR) {
1001 LiveRange::iterator E = LR.end();
1002 // Segment going into OldIdx.
1003 LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
1005 // No value live before or after OldIdx? Nothing to do.
1006 if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
1009 LiveRange::iterator OldIdxOut;
1010 // Do we have a value live-in to OldIdx?
1011 if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
1012 // If the live-in value already extends to NewIdx, there is nothing to do.
1013 if (SlotIndex::isEarlierEqualInstr(NewIdx, OldIdxIn->end))
1015 // Aggressively remove all kill flags from the old kill point.
1016 // Kill flags shouldn't be used while live intervals exist, they will be
1017 // reinserted by VirtRegRewriter.
1018 if (MachineInstr *KillMI = LIS.getInstructionFromIndex(OldIdxIn->end))
1019 for (MIBundleOperands MO(*KillMI); MO.isValid(); ++MO)
1020 if (MO->isReg() && MO->isUse())
1021 MO->setIsKill(false);
1023 // Is there a def before NewIdx which is not OldIdx?
1024 LiveRange::iterator Next = std::next(OldIdxIn);
1025 if (Next != E && !SlotIndex::isSameInstr(OldIdx, Next->start) &&
1026 SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
1027 // If we are here then OldIdx was just a use but not a def. We only have
1028 // to ensure liveness extends to NewIdx.
1029 LiveRange::iterator NewIdxIn =
1030 LR.advanceTo(Next, NewIdx.getBaseIndex());
1031 // Extend the segment before NewIdx if necessary.
1032 if (NewIdxIn == E ||
1033 !SlotIndex::isEarlierInstr(NewIdxIn->start, NewIdx)) {
1034 LiveRange::iterator Prev = std::prev(NewIdxIn);
1035 Prev->end = NewIdx.getRegSlot();
1038 OldIdxIn->end = Next->start;
1042 // Adjust OldIdxIn->end to reach NewIdx. This may temporarily make LR
1043 // invalid by overlapping ranges.
1044 bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
1045 OldIdxIn->end = NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber());
1046 // If this was not a kill, then there was no def and we're done.
1050 // Did we have a Def at OldIdx?
1052 if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
1055 OldIdxOut = OldIdxIn;
1058 // If we are here then there is a Definition at OldIdx. OldIdxOut points
1059 // to the segment starting there.
1060 assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
1062 VNInfo *OldIdxVNI = OldIdxOut->valno;
1063 assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
1065 // If the defined value extends beyond NewIdx, just move the beginning
1066 // of the segment to NewIdx.
1067 SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
1068 if (SlotIndex::isEarlierInstr(NewIdxDef, OldIdxOut->end)) {
1069 OldIdxVNI->def = NewIdxDef;
1070 OldIdxOut->start = OldIdxVNI->def;
1074 // If we are here then we have a Definition at OldIdx which ends before
1077 // Is there an existing Def at NewIdx?
1078 LiveRange::iterator AfterNewIdx
1079 = LR.advanceTo(OldIdxOut, NewIdx.getRegSlot());
1080 bool OldIdxDefIsDead = OldIdxOut->end.isDead();
1081 if (!OldIdxDefIsDead &&
1082 SlotIndex::isEarlierInstr(OldIdxOut->end, NewIdxDef)) {
1083 // OldIdx is not a dead def, and NewIdxDef is inside a new interval.
1085 if (OldIdxOut != LR.begin() &&
1086 !SlotIndex::isEarlierInstr(std::prev(OldIdxOut)->end,
1087 OldIdxOut->start)) {
1088 // There is no gap between OldIdxOut and its predecessor anymore,
1090 LiveRange::iterator IPrev = std::prev(OldIdxOut);
1092 IPrev->end = OldIdxOut->end;
1094 // The value is live in to OldIdx
1095 LiveRange::iterator INext = std::next(OldIdxOut);
1096 assert(INext != E && "Must have following segment");
1097 // We merge OldIdxOut and its successor. As we're dealing with subreg
1098 // reordering, there is always a successor to OldIdxOut in the same BB
1099 // We don't need INext->valno anymore and will reuse for the new segment
1102 INext->start = OldIdxOut->end;
1103 INext->valno->def = INext->start;
1105 // If NewIdx is behind the last segment, extend that and append a new one.
1106 if (AfterNewIdx == E) {
1107 // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
1109 // |- ?/OldIdxOut -| |- X0 -| ... |- Xn -| end
1110 // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS -| end
1111 std::copy(std::next(OldIdxOut), E, OldIdxOut);
1112 // The last segment is undefined now, reuse it for a dead def.
1113 LiveRange::iterator NewSegment = std::prev(E);
1114 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
1116 DefVNI->def = NewIdxDef;
1118 LiveRange::iterator Prev = std::prev(NewSegment);
1119 Prev->end = NewIdxDef;
1121 // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up
1123 // |- ?/OldIdxOut -| |- X0 -| ... |- Xn/AfterNewIdx -| |- Next -|
1124 // => |- X0/OldIdxOut -| ... |- Xn -| |- Xn/AfterNewIdx -| |- Next -|
1125 std::copy(std::next(OldIdxOut), std::next(AfterNewIdx), OldIdxOut);
1126 LiveRange::iterator Prev = std::prev(AfterNewIdx);
1127 // We have two cases:
1128 if (SlotIndex::isEarlierInstr(Prev->start, NewIdxDef)) {
1129 // Case 1: NewIdx is inside a liverange. Split this liverange at
1130 // NewIdxDef into the segment "Prev" followed by "NewSegment".
1131 LiveRange::iterator NewSegment = AfterNewIdx;
1132 *NewSegment = LiveRange::Segment(NewIdxDef, Prev->end, Prev->valno);
1133 Prev->valno->def = NewIdxDef;
1135 *Prev = LiveRange::Segment(Prev->start, NewIdxDef, DefVNI);
1136 DefVNI->def = Prev->start;
1138 // Case 2: NewIdx is in a lifetime hole. Keep AfterNewIdx as is and
1139 // turn Prev into a segment from NewIdx to AfterNewIdx->start.
1140 *Prev = LiveRange::Segment(NewIdxDef, AfterNewIdx->start, DefVNI);
1141 DefVNI->def = NewIdxDef;
1142 assert(DefVNI != AfterNewIdx->valno);
1148 if (AfterNewIdx != E &&
1149 SlotIndex::isSameInstr(AfterNewIdx->start, NewIdxDef)) {
1150 // There is an existing def at NewIdx. The def at OldIdx is coalesced into
1152 assert(AfterNewIdx->valno != OldIdxVNI && "Multiple defs of value?");
1153 LR.removeValNo(OldIdxVNI);
1155 // There was no existing def at NewIdx. We need to create a dead def
1156 // at NewIdx. Shift segments over the old OldIdxOut segment, this frees
1157 // a new segment at the place where we want to construct the dead def.
1158 // |- OldIdxOut -| |- X0 -| ... |- Xn -| |- AfterNewIdx -|
1159 // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS. -| |- AfterNewIdx -|
1160 assert(AfterNewIdx != OldIdxOut && "Inconsistent iterators");
1161 std::copy(std::next(OldIdxOut), AfterNewIdx, OldIdxOut);
1162 // We can reuse OldIdxVNI now.
1163 LiveRange::iterator NewSegment = std::prev(AfterNewIdx);
1164 VNInfo *NewSegmentVNI = OldIdxVNI;
1165 NewSegmentVNI->def = NewIdxDef;
1166 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
1171 /// Update LR to reflect an instruction has been moved upwards from OldIdx
1172 /// to NewIdx (NewIdx < OldIdx).
1173 void handleMoveUp(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) {
1174 LiveRange::iterator E = LR.end();
1175 // Segment going into OldIdx.
1176 LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex());
1178 // No value live before or after OldIdx? Nothing to do.
1179 if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start))
1182 LiveRange::iterator OldIdxOut;
1183 // Do we have a value live-in to OldIdx?
1184 if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) {
1185 // If the live-in value isn't killed here, then we have no Def at
1186 // OldIdx, moreover the value must be live at NewIdx so there is nothing
1188 bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end);
1192 // At this point we have to move OldIdxIn->end back to the nearest
1193 // previous use or (dead-)def but no further than NewIdx.
1194 SlotIndex DefBeforeOldIdx
1195 = std::max(OldIdxIn->start.getDeadSlot(),
1196 NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber()));
1197 OldIdxIn->end = findLastUseBefore(DefBeforeOldIdx, Reg, LaneMask);
1199 // Did we have a Def at OldIdx? If not we are done now.
1200 OldIdxOut = std::next(OldIdxIn);
1201 if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start))
1204 OldIdxOut = OldIdxIn;
1205 OldIdxIn = OldIdxOut != LR.begin() ? std::prev(OldIdxOut) : E;
1208 // If we are here then there is a Definition at OldIdx. OldIdxOut points
1209 // to the segment starting there.
1210 assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) &&
1212 VNInfo *OldIdxVNI = OldIdxOut->valno;
1213 assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def");
1214 bool OldIdxDefIsDead = OldIdxOut->end.isDead();
1216 // Is there an existing def at NewIdx?
1217 SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber());
1218 LiveRange::iterator NewIdxOut = LR.find(NewIdx.getRegSlot());
1219 if (SlotIndex::isSameInstr(NewIdxOut->start, NewIdx)) {
1220 assert(NewIdxOut->valno != OldIdxVNI &&
1221 "Same value defined more than once?");
1222 // If OldIdx was a dead def remove it.
1223 if (!OldIdxDefIsDead) {
1224 // Remove segment starting at NewIdx and move begin of OldIdxOut to
1225 // NewIdx so it can take its place.
1226 OldIdxVNI->def = NewIdxDef;
1227 OldIdxOut->start = NewIdxDef;
1228 LR.removeValNo(NewIdxOut->valno);
1230 // Simply remove the dead def at OldIdx.
1231 LR.removeValNo(OldIdxVNI);
1234 // Previously nothing was live after NewIdx, so all we have to do now is
1235 // move the begin of OldIdxOut to NewIdx.
1236 if (!OldIdxDefIsDead) {
1237 // Do we have any intermediate Defs between OldIdx and NewIdx?
1238 if (OldIdxIn != E &&
1239 SlotIndex::isEarlierInstr(NewIdxDef, OldIdxIn->start)) {
1240 // OldIdx is not a dead def and NewIdx is before predecessor start.
1241 LiveRange::iterator NewIdxIn = NewIdxOut;
1242 assert(NewIdxIn == LR.find(NewIdx.getBaseIndex()));
1243 const SlotIndex SplitPos = NewIdxDef;
1245 // Merge the OldIdxIn and OldIdxOut segments into OldIdxOut.
1246 *OldIdxOut = LiveRange::Segment(OldIdxIn->start, OldIdxOut->end,
1248 // OldIdxIn and OldIdxVNI are now undef and can be overridden.
1249 // We Slide [NewIdxIn, OldIdxIn) down one position.
1250 // |- X0/NewIdxIn -| ... |- Xn-1 -||- Xn/OldIdxIn -||- OldIdxOut -|
1251 // => |- undef/NexIdxIn -| |- X0 -| ... |- Xn-1 -| |- Xn/OldIdxOut -|
1252 std::copy_backward(NewIdxIn, OldIdxIn, OldIdxOut);
1253 // NewIdxIn is now considered undef so we can reuse it for the moved
1255 LiveRange::iterator NewSegment = NewIdxIn;
1256 LiveRange::iterator Next = std::next(NewSegment);
1257 if (SlotIndex::isEarlierInstr(Next->start, NewIdx)) {
1258 // There is no gap between NewSegment and its predecessor.
1259 *NewSegment = LiveRange::Segment(Next->start, SplitPos,
1261 *Next = LiveRange::Segment(SplitPos, Next->end, OldIdxVNI);
1262 Next->valno->def = SplitPos;
1264 // There is a gap between NewSegment and its predecessor
1265 // Value becomes live in.
1266 *NewSegment = LiveRange::Segment(SplitPos, Next->start, OldIdxVNI);
1267 NewSegment->valno->def = SplitPos;
1270 // Leave the end point of a live def.
1271 OldIdxOut->start = NewIdxDef;
1272 OldIdxVNI->def = NewIdxDef;
1273 if (OldIdxIn != E && SlotIndex::isEarlierInstr(NewIdx, OldIdxIn->end))
1274 OldIdxIn->end = NewIdx.getRegSlot();
1277 // OldIdxVNI is a dead def. It may have been moved across other values
1278 // in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut)
1279 // down one position.
1280 // |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - |
1281 // => |- undef/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -|
1282 std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut));
1283 // OldIdxVNI can be reused now to build a new dead def segment.
1284 LiveRange::iterator NewSegment = NewIdxOut;
1285 VNInfo *NewSegmentVNI = OldIdxVNI;
1286 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(),
1288 NewSegmentVNI->def = NewIdxDef;
1293 void updateRegMaskSlots() {
1294 SmallVectorImpl<SlotIndex>::iterator RI =
1295 std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
1297 assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() &&
1298 "No RegMask at OldIdx.");
1299 *RI = NewIdx.getRegSlot();
1300 assert((RI == LIS.RegMaskSlots.begin() ||
1301 SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) &&
1302 "Cannot move regmask instruction above another call");
1303 assert((std::next(RI) == LIS.RegMaskSlots.end() ||
1304 SlotIndex::isEarlierInstr(*RI, *std::next(RI))) &&
1305 "Cannot move regmask instruction below another call");
1308 // Return the last use of reg between NewIdx and OldIdx.
1309 SlotIndex findLastUseBefore(SlotIndex Before, unsigned Reg,
1310 LaneBitmask LaneMask) {
1311 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1312 SlotIndex LastUse = Before;
1313 for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) {
1316 unsigned SubReg = MO.getSubReg();
1317 if (SubReg != 0 && LaneMask.any()
1318 && (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask).none())
1321 const MachineInstr &MI = *MO.getParent();
1322 SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
1323 if (InstSlot > LastUse && InstSlot < OldIdx)
1324 LastUse = InstSlot.getRegSlot();
1329 // This is a regunit interval, so scanning the use list could be very
1330 // expensive. Scan upwards from OldIdx instead.
1331 assert(Before < OldIdx && "Expected upwards move");
1332 SlotIndexes *Indexes = LIS.getSlotIndexes();
1333 MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Before);
1335 // OldIdx may not correspond to an instruction any longer, so set MII to
1336 // point to the next instruction after OldIdx, or MBB->end().
1337 MachineBasicBlock::iterator MII = MBB->end();
1338 if (MachineInstr *MI = Indexes->getInstructionFromIndex(
1339 Indexes->getNextNonNullIndex(OldIdx)))
1340 if (MI->getParent() == MBB)
1343 MachineBasicBlock::iterator Begin = MBB->begin();
1344 while (MII != Begin) {
1345 if ((--MII)->isDebugValue())
1347 SlotIndex Idx = Indexes->getInstructionIndex(*MII);
1349 // Stop searching when Before is reached.
1350 if (!SlotIndex::isEarlierInstr(Before, Idx))
1353 // Check if MII uses Reg.
1354 for (MIBundleOperands MO(*MII); MO.isValid(); ++MO)
1355 if (MO->isReg() && !MO->isUndef() &&
1356 TargetRegisterInfo::isPhysicalRegister(MO->getReg()) &&
1357 TRI.hasRegUnit(MO->getReg(), Reg))
1358 return Idx.getRegSlot();
1360 // Didn't reach Before. It must be the first instruction in the block.
1365 void LiveIntervals::handleMove(MachineInstr &MI, bool UpdateFlags) {
1366 assert(!MI.isBundled() && "Can't handle bundled instructions yet.");
1367 SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
1368 Indexes->removeMachineInstrFromMaps(MI);
1369 SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI);
1370 assert(getMBBStartIdx(MI.getParent()) <= OldIndex &&
1371 OldIndex < getMBBEndIdx(MI.getParent()) &&
1372 "Cannot handle moves across basic block boundaries.");
1374 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
1375 HME.updateAllRanges(&MI);
1378 void LiveIntervals::handleMoveIntoBundle(MachineInstr &MI,
1379 MachineInstr &BundleStart,
1381 SlotIndex OldIndex = Indexes->getInstructionIndex(MI);
1382 SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart);
1383 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags);
1384 HME.updateAllRanges(&MI);
1387 void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin,
1388 const MachineBasicBlock::iterator End,
1389 const SlotIndex endIdx,
1390 LiveRange &LR, const unsigned Reg,
1391 LaneBitmask LaneMask) {
1392 LiveInterval::iterator LII = LR.find(endIdx);
1393 SlotIndex lastUseIdx;
1394 if (LII == LR.begin()) {
1395 // This happens when the function is called for a subregister that only
1396 // occurs _after_ the range that is to be repaired.
1399 if (LII != LR.end() && LII->start < endIdx)
1400 lastUseIdx = LII->end;
1404 for (MachineBasicBlock::iterator I = End; I != Begin;) {
1406 MachineInstr &MI = *I;
1407 if (MI.isDebugValue())
1410 SlotIndex instrIdx = getInstructionIndex(MI);
1411 bool isStartValid = getInstructionFromIndex(LII->start);
1412 bool isEndValid = getInstructionFromIndex(LII->end);
1414 // FIXME: This doesn't currently handle early-clobber or multiple removed
1415 // defs inside of the region to repair.
1416 for (MachineInstr::mop_iterator OI = MI.operands_begin(),
1417 OE = MI.operands_end();
1419 const MachineOperand &MO = *OI;
1420 if (!MO.isReg() || MO.getReg() != Reg)
1423 unsigned SubReg = MO.getSubReg();
1424 LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg);
1425 if ((Mask & LaneMask).none())
1429 if (!isStartValid) {
1430 if (LII->end.isDead()) {
1431 SlotIndex prevStart;
1432 if (LII != LR.begin())
1433 prevStart = std::prev(LII)->start;
1435 // FIXME: This could be more efficient if there was a
1436 // removeSegment method that returned an iterator.
1437 LR.removeSegment(*LII, true);
1438 if (prevStart.isValid())
1439 LII = LR.find(prevStart);
1443 LII->start = instrIdx.getRegSlot();
1444 LII->valno->def = instrIdx.getRegSlot();
1445 if (MO.getSubReg() && !MO.isUndef())
1446 lastUseIdx = instrIdx.getRegSlot();
1448 lastUseIdx = SlotIndex();
1453 if (!lastUseIdx.isValid()) {
1454 VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
1455 LiveRange::Segment S(instrIdx.getRegSlot(),
1456 instrIdx.getDeadSlot(), VNI);
1457 LII = LR.addSegment(S);
1458 } else if (LII->start != instrIdx.getRegSlot()) {
1459 VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator);
1460 LiveRange::Segment S(instrIdx.getRegSlot(), lastUseIdx, VNI);
1461 LII = LR.addSegment(S);
1464 if (MO.getSubReg() && !MO.isUndef())
1465 lastUseIdx = instrIdx.getRegSlot();
1467 lastUseIdx = SlotIndex();
1468 } else if (MO.isUse()) {
1469 // FIXME: This should probably be handled outside of this branch,
1470 // either as part of the def case (for defs inside of the region) or
1471 // after the loop over the region.
1472 if (!isEndValid && !LII->end.isBlock())
1473 LII->end = instrIdx.getRegSlot();
1474 if (!lastUseIdx.isValid())
1475 lastUseIdx = instrIdx.getRegSlot();
1482 LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB,
1483 MachineBasicBlock::iterator Begin,
1484 MachineBasicBlock::iterator End,
1485 ArrayRef<unsigned> OrigRegs) {
1486 // Find anchor points, which are at the beginning/end of blocks or at
1487 // instructions that already have indexes.
1488 while (Begin != MBB->begin() && !Indexes->hasIndex(*Begin))
1490 while (End != MBB->end() && !Indexes->hasIndex(*End))
1494 if (End == MBB->end())
1495 endIdx = getMBBEndIdx(MBB).getPrevSlot();
1497 endIdx = getInstructionIndex(*End);
1499 Indexes->repairIndexesInRange(MBB, Begin, End);
1501 for (MachineBasicBlock::iterator I = End; I != Begin;) {
1503 MachineInstr &MI = *I;
1504 if (MI.isDebugValue())
1506 for (MachineInstr::const_mop_iterator MOI = MI.operands_begin(),
1507 MOE = MI.operands_end();
1508 MOI != MOE; ++MOI) {
1510 TargetRegisterInfo::isVirtualRegister(MOI->getReg()) &&
1511 !hasInterval(MOI->getReg())) {
1512 createAndComputeVirtRegInterval(MOI->getReg());
1517 for (unsigned i = 0, e = OrigRegs.size(); i != e; ++i) {
1518 unsigned Reg = OrigRegs[i];
1519 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1522 LiveInterval &LI = getInterval(Reg);
1523 // FIXME: Should we support undefs that gain defs?
1524 if (!LI.hasAtLeastOneValue())
1527 for (LiveInterval::SubRange &S : LI.subranges()) {
1528 repairOldRegInRange(Begin, End, endIdx, S, Reg, S.LaneMask);
1530 repairOldRegInRange(Begin, End, endIdx, LI, Reg);
1534 void LiveIntervals::removePhysRegDefAt(unsigned Reg, SlotIndex Pos) {
1535 for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) {
1536 if (LiveRange *LR = getCachedRegUnit(*Units))
1537 if (VNInfo *VNI = LR->getVNInfoAt(Pos))
1538 LR->removeValNo(VNI);
1542 void LiveIntervals::removeVRegDefAt(LiveInterval &LI, SlotIndex Pos) {
1543 // LI may not have the main range computed yet, but its subranges may
1545 VNInfo *VNI = LI.getVNInfoAt(Pos);
1546 if (VNI != nullptr) {
1547 assert(VNI->def.getBaseIndex() == Pos.getBaseIndex());
1548 LI.removeValNo(VNI);
1551 // Also remove the value defined in subranges.
1552 for (LiveInterval::SubRange &S : LI.subranges()) {
1553 if (VNInfo *SVNI = S.getVNInfoAt(Pos))
1554 if (SVNI->def.getBaseIndex() == Pos.getBaseIndex())
1555 S.removeValNo(SVNI);
1557 LI.removeEmptySubRanges();
1560 void LiveIntervals::splitSeparateComponents(LiveInterval &LI,
1561 SmallVectorImpl<LiveInterval*> &SplitLIs) {
1562 ConnectedVNInfoEqClasses ConEQ(*this);
1563 unsigned NumComp = ConEQ.Classify(LI);
1566 DEBUG(dbgs() << " Split " << NumComp << " components: " << LI << '\n');
1567 unsigned Reg = LI.reg;
1568 const TargetRegisterClass *RegClass = MRI->getRegClass(Reg);
1569 for (unsigned I = 1; I < NumComp; ++I) {
1570 unsigned NewVReg = MRI->createVirtualRegister(RegClass);
1571 LiveInterval &NewLI = createEmptyInterval(NewVReg);
1572 SplitLIs.push_back(&NewLI);
1574 ConEQ.Distribute(LI, SplitLIs.data(), *MRI);
1577 void LiveIntervals::constructMainRangeFromSubranges(LiveInterval &LI) {
1578 assert(LRCalc && "LRCalc not initialized.");
1579 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator());
1580 LRCalc->constructMainRangeFromSubranges(LI);