1 //===- LiveInterval.cpp - Live Interval Representation --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveRange and LiveInterval classes. Given some
11 // numbering of each the machine instructions an interval [i, j) is said to be a
12 // live range for register v if there is no instruction with number j' >= j
13 // such that v is live at j' and there is no instruction with number i' < i such
14 // that v is live at i'. In this implementation ranges can have holes,
15 // i.e. a range might look like [1,20), [50,65), [1000,1001). Each
16 // individual segment is represented as an instance of LiveRange::Segment,
17 // and the whole range is represented as an instance of LiveRange.
19 //===----------------------------------------------------------------------===//
21 #include "llvm/CodeGen/LiveInterval.h"
22 #include "LiveRangeUtils.h"
23 #include "RegisterCoalescer.h"
24 #include "llvm/ADT/ArrayRef.h"
25 #include "llvm/ADT/STLExtras.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/iterator_range.h"
29 #include "llvm/CodeGen/LiveIntervals.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineInstr.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/SlotIndexes.h"
35 #include "llvm/CodeGen/TargetRegisterInfo.h"
36 #include "llvm/MC/LaneBitmask.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/raw_ostream.h"
50 //===----------------------------------------------------------------------===//
51 // Implementation of various methods necessary for calculation of live ranges.
52 // The implementation of the methods abstracts from the concrete type of the
53 // segment collection.
55 // Implementation of the class follows the Template design pattern. The base
56 // class contains generic algorithms that call collection-specific methods,
57 // which are provided in concrete subclasses. In order to avoid virtual calls
58 // these methods are provided by means of C++ template instantiation.
59 // The base class calls the methods of the subclass through method impl(),
60 // which casts 'this' pointer to the type of the subclass.
62 //===----------------------------------------------------------------------===//
64 template <typename ImplT, typename IteratorT, typename CollectionT>
65 class CalcLiveRangeUtilBase {
70 CalcLiveRangeUtilBase(LiveRange *LR) : LR(LR) {}
73 using Segment = LiveRange::Segment;
74 using iterator = IteratorT;
76 /// A counterpart of LiveRange::createDeadDef: Make sure the range has a
77 /// value defined at @p Def.
78 /// If @p ForVNI is null, and there is no value defined at @p Def, a new
79 /// value will be allocated using @p VNInfoAllocator.
80 /// If @p ForVNI is null, the return value is the value defined at @p Def,
81 /// either a pre-existing one, or the one newly created.
82 /// If @p ForVNI is not null, then @p Def should be the location where
83 /// @p ForVNI is defined. If the range does not have a value defined at
84 /// @p Def, the value @p ForVNI will be used instead of allocating a new
85 /// one. If the range already has a value defined at @p Def, it must be
86 /// same as @p ForVNI. In either case, @p ForVNI will be the return value.
87 VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator *VNInfoAllocator,
89 assert(!Def.isDead() && "Cannot define a value at the dead slot");
90 assert((!ForVNI || ForVNI->def == Def) &&
91 "If ForVNI is specified, it must match Def");
92 iterator I = impl().find(Def);
93 if (I == segments().end()) {
94 VNInfo *VNI = ForVNI ? ForVNI : LR->getNextValue(Def, *VNInfoAllocator);
95 impl().insertAtEnd(Segment(Def, Def.getDeadSlot(), VNI));
99 Segment *S = segmentAt(I);
100 if (SlotIndex::isSameInstr(Def, S->start)) {
101 assert((!ForVNI || ForVNI == S->valno) && "Value number mismatch");
102 assert(S->valno->def == S->start && "Inconsistent existing value def");
104 // It is possible to have both normal and early-clobber defs of the same
105 // register on an instruction. It doesn't make a lot of sense, but it is
106 // possible to specify in inline assembly.
108 // Just convert everything to early-clobber.
109 Def = std::min(Def, S->start);
111 S->start = S->valno->def = Def;
114 assert(SlotIndex::isEarlierInstr(Def, S->start) && "Already live at def");
115 VNInfo *VNI = ForVNI ? ForVNI : LR->getNextValue(Def, *VNInfoAllocator);
116 segments().insert(I, Segment(Def, Def.getDeadSlot(), VNI));
120 VNInfo *extendInBlock(SlotIndex StartIdx, SlotIndex Use) {
121 if (segments().empty())
124 impl().findInsertPos(Segment(Use.getPrevSlot(), Use, nullptr));
125 if (I == segments().begin())
128 if (I->end <= StartIdx)
131 extendSegmentEndTo(I, Use);
135 std::pair<VNInfo*,bool> extendInBlock(ArrayRef<SlotIndex> Undefs,
136 SlotIndex StartIdx, SlotIndex Use) {
137 if (segments().empty())
138 return std::make_pair(nullptr, false);
139 SlotIndex BeforeUse = Use.getPrevSlot();
140 iterator I = impl().findInsertPos(Segment(BeforeUse, Use, nullptr));
141 if (I == segments().begin())
142 return std::make_pair(nullptr, LR->isUndefIn(Undefs, StartIdx, BeforeUse));
144 if (I->end <= StartIdx)
145 return std::make_pair(nullptr, LR->isUndefIn(Undefs, StartIdx, BeforeUse));
147 if (LR->isUndefIn(Undefs, I->end, BeforeUse))
148 return std::make_pair(nullptr, true);
149 extendSegmentEndTo(I, Use);
151 return std::make_pair(I->valno, false);
154 /// This method is used when we want to extend the segment specified
155 /// by I to end at the specified endpoint. To do this, we should
156 /// merge and eliminate all segments that this will overlap
157 /// with. The iterator is not invalidated.
158 void extendSegmentEndTo(iterator I, SlotIndex NewEnd) {
159 assert(I != segments().end() && "Not a valid segment!");
160 Segment *S = segmentAt(I);
161 VNInfo *ValNo = I->valno;
163 // Search for the first segment that we can't merge with.
164 iterator MergeTo = std::next(I);
165 for (; MergeTo != segments().end() && NewEnd >= MergeTo->end; ++MergeTo)
166 assert(MergeTo->valno == ValNo && "Cannot merge with differing values!");
168 // If NewEnd was in the middle of a segment, make sure to get its endpoint.
169 S->end = std::max(NewEnd, std::prev(MergeTo)->end);
171 // If the newly formed segment now touches the segment after it and if they
172 // have the same value number, merge the two segments into one segment.
173 if (MergeTo != segments().end() && MergeTo->start <= I->end &&
174 MergeTo->valno == ValNo) {
175 S->end = MergeTo->end;
179 // Erase any dead segments.
180 segments().erase(std::next(I), MergeTo);
183 /// This method is used when we want to extend the segment specified
184 /// by I to start at the specified endpoint. To do this, we should
185 /// merge and eliminate all segments that this will overlap with.
186 iterator extendSegmentStartTo(iterator I, SlotIndex NewStart) {
187 assert(I != segments().end() && "Not a valid segment!");
188 Segment *S = segmentAt(I);
189 VNInfo *ValNo = I->valno;
191 // Search for the first segment that we can't merge with.
192 iterator MergeTo = I;
194 if (MergeTo == segments().begin()) {
196 segments().erase(MergeTo, I);
199 assert(MergeTo->valno == ValNo && "Cannot merge with differing values!");
201 } while (NewStart <= MergeTo->start);
203 // If we start in the middle of another segment, just delete a range and
204 // extend that segment.
205 if (MergeTo->end >= NewStart && MergeTo->valno == ValNo) {
206 segmentAt(MergeTo)->end = S->end;
208 // Otherwise, extend the segment right after.
210 Segment *MergeToSeg = segmentAt(MergeTo);
211 MergeToSeg->start = NewStart;
212 MergeToSeg->end = S->end;
215 segments().erase(std::next(MergeTo), std::next(I));
219 iterator addSegment(Segment S) {
220 SlotIndex Start = S.start, End = S.end;
221 iterator I = impl().findInsertPos(S);
223 // If the inserted segment starts in the middle or right at the end of
224 // another segment, just extend that segment to contain the segment of S.
225 if (I != segments().begin()) {
226 iterator B = std::prev(I);
227 if (S.valno == B->valno) {
228 if (B->start <= Start && B->end >= Start) {
229 extendSegmentEndTo(B, End);
233 // Check to make sure that we are not overlapping two live segments with
234 // different valno's.
235 assert(B->end <= Start &&
236 "Cannot overlap two segments with differing ValID's"
237 " (did you def the same reg twice in a MachineInstr?)");
241 // Otherwise, if this segment ends in the middle of, or right next
242 // to, another segment, merge it into that segment.
243 if (I != segments().end()) {
244 if (S.valno == I->valno) {
245 if (I->start <= End) {
246 I = extendSegmentStartTo(I, Start);
248 // If S is a complete superset of a segment, we may need to grow its
251 extendSegmentEndTo(I, End);
255 // Check to make sure that we are not overlapping two live segments with
256 // different valno's.
257 assert(I->start >= End &&
258 "Cannot overlap two segments with differing ValID's");
262 // Otherwise, this is just a new segment that doesn't interact with
265 return segments().insert(I, S);
269 ImplT &impl() { return *static_cast<ImplT *>(this); }
271 CollectionT &segments() { return impl().segmentsColl(); }
273 Segment *segmentAt(iterator I) { return const_cast<Segment *>(&(*I)); }
276 //===----------------------------------------------------------------------===//
277 // Instantiation of the methods for calculation of live ranges
278 // based on a segment vector.
279 //===----------------------------------------------------------------------===//
281 class CalcLiveRangeUtilVector;
282 using CalcLiveRangeUtilVectorBase =
283 CalcLiveRangeUtilBase<CalcLiveRangeUtilVector, LiveRange::iterator,
284 LiveRange::Segments>;
286 class CalcLiveRangeUtilVector : public CalcLiveRangeUtilVectorBase {
288 CalcLiveRangeUtilVector(LiveRange *LR) : CalcLiveRangeUtilVectorBase(LR) {}
291 friend CalcLiveRangeUtilVectorBase;
293 LiveRange::Segments &segmentsColl() { return LR->segments; }
295 void insertAtEnd(const Segment &S) { LR->segments.push_back(S); }
297 iterator find(SlotIndex Pos) { return LR->find(Pos); }
299 iterator findInsertPos(Segment S) {
300 return std::upper_bound(LR->begin(), LR->end(), S.start);
304 //===----------------------------------------------------------------------===//
305 // Instantiation of the methods for calculation of live ranges
306 // based on a segment set.
307 //===----------------------------------------------------------------------===//
309 class CalcLiveRangeUtilSet;
310 using CalcLiveRangeUtilSetBase =
311 CalcLiveRangeUtilBase<CalcLiveRangeUtilSet, LiveRange::SegmentSet::iterator,
312 LiveRange::SegmentSet>;
314 class CalcLiveRangeUtilSet : public CalcLiveRangeUtilSetBase {
316 CalcLiveRangeUtilSet(LiveRange *LR) : CalcLiveRangeUtilSetBase(LR) {}
319 friend CalcLiveRangeUtilSetBase;
321 LiveRange::SegmentSet &segmentsColl() { return *LR->segmentSet; }
323 void insertAtEnd(const Segment &S) {
324 LR->segmentSet->insert(LR->segmentSet->end(), S);
327 iterator find(SlotIndex Pos) {
329 LR->segmentSet->upper_bound(Segment(Pos, Pos.getNextSlot(), nullptr));
330 if (I == LR->segmentSet->begin())
332 iterator PrevI = std::prev(I);
333 if (Pos < (*PrevI).end)
338 iterator findInsertPos(Segment S) {
339 iterator I = LR->segmentSet->upper_bound(S);
340 if (I != LR->segmentSet->end() && !(S.start < *I))
346 } // end anonymous namespace
348 //===----------------------------------------------------------------------===//
350 //===----------------------------------------------------------------------===//
352 LiveRange::iterator LiveRange::find(SlotIndex Pos) {
353 // This algorithm is basically std::upper_bound.
354 // Unfortunately, std::upper_bound cannot be used with mixed types until we
355 // adopt C++0x. Many libraries can do it, but not all.
356 if (empty() || Pos >= endIndex())
358 iterator I = begin();
361 size_t Mid = Len >> 1;
362 if (Pos < I[Mid].end) {
372 VNInfo *LiveRange::createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc) {
373 // Use the segment set, if it is available.
374 if (segmentSet != nullptr)
375 return CalcLiveRangeUtilSet(this).createDeadDef(Def, &VNIAlloc, nullptr);
376 // Otherwise use the segment vector.
377 return CalcLiveRangeUtilVector(this).createDeadDef(Def, &VNIAlloc, nullptr);
380 VNInfo *LiveRange::createDeadDef(VNInfo *VNI) {
381 // Use the segment set, if it is available.
382 if (segmentSet != nullptr)
383 return CalcLiveRangeUtilSet(this).createDeadDef(VNI->def, nullptr, VNI);
384 // Otherwise use the segment vector.
385 return CalcLiveRangeUtilVector(this).createDeadDef(VNI->def, nullptr, VNI);
388 // overlaps - Return true if the intersection of the two live ranges is
391 // An example for overlaps():
395 // 8: C = A + B ;; last use of A
397 // The live ranges should look like:
403 // A->overlaps(C) should return false since we want to be able to join
406 bool LiveRange::overlapsFrom(const LiveRange& other,
407 const_iterator StartPos) const {
408 assert(!empty() && "empty range");
409 const_iterator i = begin();
410 const_iterator ie = end();
411 const_iterator j = StartPos;
412 const_iterator je = other.end();
414 assert((StartPos->start <= i->start || StartPos == other.begin()) &&
415 StartPos != other.end() && "Bogus start position hint!");
417 if (i->start < j->start) {
418 i = std::upper_bound(i, ie, j->start);
419 if (i != begin()) --i;
420 } else if (j->start < i->start) {
422 if (StartPos != other.end() && StartPos->start <= i->start) {
423 assert(StartPos < other.end() && i < end());
424 j = std::upper_bound(j, je, i->start);
425 if (j != other.begin()) --j;
431 if (j == je) return false;
434 if (i->start > j->start) {
439 if (i->end > j->start)
447 bool LiveRange::overlaps(const LiveRange &Other, const CoalescerPair &CP,
448 const SlotIndexes &Indexes) const {
449 assert(!empty() && "empty range");
453 // Use binary searches to find initial positions.
454 const_iterator I = find(Other.beginIndex());
455 const_iterator IE = end();
458 const_iterator J = Other.find(I->start);
459 const_iterator JE = Other.end();
464 // J has just been advanced to satisfy:
465 assert(J->end >= I->start);
466 // Check for an overlap.
467 if (J->start < I->end) {
468 // I and J are overlapping. Find the later start.
469 SlotIndex Def = std::max(I->start, J->start);
470 // Allow the overlap if Def is a coalescable copy.
472 !CP.isCoalescable(Indexes.getInstructionFromIndex(Def)))
475 // Advance the iterator that ends first to check for more overlaps.
476 if (J->end > I->end) {
480 // Advance J until J->end >= I->start.
484 while (J->end < I->start);
488 /// overlaps - Return true if the live range overlaps an interval specified
490 bool LiveRange::overlaps(SlotIndex Start, SlotIndex End) const {
491 assert(Start < End && "Invalid range");
492 const_iterator I = std::lower_bound(begin(), end(), End);
493 return I != begin() && (--I)->end > Start;
496 bool LiveRange::covers(const LiveRange &Other) const {
498 return Other.empty();
500 const_iterator I = begin();
501 for (const Segment &O : Other.segments) {
502 I = advanceTo(I, O.start);
503 if (I == end() || I->start > O.start)
506 // Check adjacent live segments and see if we can get behind O.end.
507 while (I->end < O.end) {
508 const_iterator Last = I;
509 // Get next segment and abort if it was not adjacent.
511 if (I == end() || Last->end != I->start)
518 /// ValNo is dead, remove it. If it is the largest value number, just nuke it
519 /// (and any other deleted values neighboring it), otherwise mark it as ~1U so
520 /// it can be nuked later.
521 void LiveRange::markValNoForDeletion(VNInfo *ValNo) {
522 if (ValNo->id == getNumValNums()-1) {
525 } while (!valnos.empty() && valnos.back()->isUnused());
531 /// RenumberValues - Renumber all values in order of appearance and delete the
532 /// remaining unused values.
533 void LiveRange::RenumberValues() {
534 SmallPtrSet<VNInfo*, 8> Seen;
536 for (const Segment &S : segments) {
537 VNInfo *VNI = S.valno;
538 if (!Seen.insert(VNI).second)
540 assert(!VNI->isUnused() && "Unused valno used by live segment");
541 VNI->id = (unsigned)valnos.size();
542 valnos.push_back(VNI);
546 void LiveRange::addSegmentToSet(Segment S) {
547 CalcLiveRangeUtilSet(this).addSegment(S);
550 LiveRange::iterator LiveRange::addSegment(Segment S) {
551 // Use the segment set, if it is available.
552 if (segmentSet != nullptr) {
556 // Otherwise use the segment vector.
557 return CalcLiveRangeUtilVector(this).addSegment(S);
560 void LiveRange::append(const Segment S) {
561 // Check that the segment belongs to the back of the list.
562 assert(segments.empty() || segments.back().end <= S.start);
563 segments.push_back(S);
566 std::pair<VNInfo*,bool> LiveRange::extendInBlock(ArrayRef<SlotIndex> Undefs,
567 SlotIndex StartIdx, SlotIndex Kill) {
568 // Use the segment set, if it is available.
569 if (segmentSet != nullptr)
570 return CalcLiveRangeUtilSet(this).extendInBlock(Undefs, StartIdx, Kill);
571 // Otherwise use the segment vector.
572 return CalcLiveRangeUtilVector(this).extendInBlock(Undefs, StartIdx, Kill);
575 VNInfo *LiveRange::extendInBlock(SlotIndex StartIdx, SlotIndex Kill) {
576 // Use the segment set, if it is available.
577 if (segmentSet != nullptr)
578 return CalcLiveRangeUtilSet(this).extendInBlock(StartIdx, Kill);
579 // Otherwise use the segment vector.
580 return CalcLiveRangeUtilVector(this).extendInBlock(StartIdx, Kill);
583 /// Remove the specified segment from this range. Note that the segment must
584 /// be in a single Segment in its entirety.
585 void LiveRange::removeSegment(SlotIndex Start, SlotIndex End,
586 bool RemoveDeadValNo) {
587 // Find the Segment containing this span.
588 iterator I = find(Start);
589 assert(I != end() && "Segment is not in range!");
590 assert(I->containsInterval(Start, End)
591 && "Segment is not entirely in range!");
593 // If the span we are removing is at the start of the Segment, adjust it.
594 VNInfo *ValNo = I->valno;
595 if (I->start == Start) {
597 if (RemoveDeadValNo) {
598 // Check if val# is dead.
600 for (const_iterator II = begin(), EE = end(); II != EE; ++II)
601 if (II != I && II->valno == ValNo) {
606 // Now that ValNo is dead, remove it.
607 markValNoForDeletion(ValNo);
611 segments.erase(I); // Removed the whole Segment.
617 // Otherwise if the span we are removing is at the end of the Segment,
618 // adjust the other way.
624 // Otherwise, we are splitting the Segment into two pieces.
625 SlotIndex OldEnd = I->end;
626 I->end = Start; // Trim the old segment.
628 // Insert the new one.
629 segments.insert(std::next(I), Segment(End, OldEnd, ValNo));
632 /// removeValNo - Remove all the segments defined by the specified value#.
633 /// Also remove the value# from value# list.
634 void LiveRange::removeValNo(VNInfo *ValNo) {
636 segments.erase(remove_if(*this, [ValNo](const Segment &S) {
637 return S.valno == ValNo;
639 // Now that ValNo is dead, remove it.
640 markValNoForDeletion(ValNo);
643 void LiveRange::join(LiveRange &Other,
644 const int *LHSValNoAssignments,
645 const int *RHSValNoAssignments,
646 SmallVectorImpl<VNInfo *> &NewVNInfo) {
649 // Determine if any of our values are mapped. This is uncommon, so we want
650 // to avoid the range scan if not.
651 bool MustMapCurValNos = false;
652 unsigned NumVals = getNumValNums();
653 unsigned NumNewVals = NewVNInfo.size();
654 for (unsigned i = 0; i != NumVals; ++i) {
655 unsigned LHSValID = LHSValNoAssignments[i];
657 (NewVNInfo[LHSValID] && NewVNInfo[LHSValID] != getValNumInfo(i))) {
658 MustMapCurValNos = true;
663 // If we have to apply a mapping to our base range assignment, rewrite it now.
664 if (MustMapCurValNos && !empty()) {
665 // Map the first live range.
667 iterator OutIt = begin();
668 OutIt->valno = NewVNInfo[LHSValNoAssignments[OutIt->valno->id]];
669 for (iterator I = std::next(OutIt), E = end(); I != E; ++I) {
670 VNInfo* nextValNo = NewVNInfo[LHSValNoAssignments[I->valno->id]];
671 assert(nextValNo && "Huh?");
673 // If this live range has the same value # as its immediate predecessor,
674 // and if they are neighbors, remove one Segment. This happens when we
675 // have [0,4:0)[4,7:1) and map 0/1 onto the same value #.
676 if (OutIt->valno == nextValNo && OutIt->end == I->start) {
679 // Didn't merge. Move OutIt to the next segment,
681 OutIt->valno = nextValNo;
683 OutIt->start = I->start;
688 // If we merge some segments, chop off the end.
690 segments.erase(OutIt, end());
693 // Rewrite Other values before changing the VNInfo ids.
694 // This can leave Other in an invalid state because we're not coalescing
695 // touching segments that now have identical values. That's OK since Other is
696 // not supposed to be valid after calling join();
697 for (Segment &S : Other.segments)
698 S.valno = NewVNInfo[RHSValNoAssignments[S.valno->id]];
700 // Update val# info. Renumber them and make sure they all belong to this
701 // LiveRange now. Also remove dead val#'s.
702 unsigned NumValNos = 0;
703 for (unsigned i = 0; i < NumNewVals; ++i) {
704 VNInfo *VNI = NewVNInfo[i];
706 if (NumValNos >= NumVals)
707 valnos.push_back(VNI);
709 valnos[NumValNos] = VNI;
710 VNI->id = NumValNos++; // Renumber val#.
713 if (NumNewVals < NumVals)
714 valnos.resize(NumNewVals); // shrinkify
716 // Okay, now insert the RHS live segments into the LHS.
717 LiveRangeUpdater Updater(this);
718 for (Segment &S : Other.segments)
722 /// Merge all of the segments in RHS into this live range as the specified
723 /// value number. The segments in RHS are allowed to overlap with segments in
724 /// the current range, but only if the overlapping segments have the
725 /// specified value number.
726 void LiveRange::MergeSegmentsInAsValue(const LiveRange &RHS,
728 LiveRangeUpdater Updater(this);
729 for (const Segment &S : RHS.segments)
730 Updater.add(S.start, S.end, LHSValNo);
733 /// MergeValueInAsValue - Merge all of the live segments of a specific val#
734 /// in RHS into this live range as the specified value number.
735 /// The segments in RHS are allowed to overlap with segments in the
736 /// current range, it will replace the value numbers of the overlaped
737 /// segments with the specified value number.
738 void LiveRange::MergeValueInAsValue(const LiveRange &RHS,
739 const VNInfo *RHSValNo,
741 LiveRangeUpdater Updater(this);
742 for (const Segment &S : RHS.segments)
743 if (S.valno == RHSValNo)
744 Updater.add(S.start, S.end, LHSValNo);
747 /// MergeValueNumberInto - This method is called when two value nubmers
748 /// are found to be equivalent. This eliminates V1, replacing all
749 /// segments with the V1 value number with the V2 value number. This can
750 /// cause merging of V1/V2 values numbers and compaction of the value space.
751 VNInfo *LiveRange::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) {
752 assert(V1 != V2 && "Identical value#'s are always equivalent!");
754 // This code actually merges the (numerically) larger value number into the
755 // smaller value number, which is likely to allow us to compactify the value
756 // space. The only thing we have to be careful of is to preserve the
757 // instruction that defines the result value.
759 // Make sure V2 is smaller than V1.
760 if (V1->id < V2->id) {
765 // Merge V1 segments into V2.
766 for (iterator I = begin(); I != end(); ) {
768 if (S->valno != V1) continue; // Not a V1 Segment.
770 // Okay, we found a V1 live range. If it had a previous, touching, V2 live
774 if (Prev->valno == V2 && Prev->end == S->start) {
777 // Erase this live-range.
784 // Okay, now we have a V1 or V2 live range that is maximally merged forward.
785 // Ensure that it is a V2 live-range.
788 // If we can merge it into later V2 segments, do so now. We ignore any
789 // following V1 segments, as they will be merged in subsequent iterations
792 if (I->start == S->end && I->valno == V2) {
800 // Now that V1 is dead, remove it.
801 markValNoForDeletion(V1);
806 void LiveRange::flushSegmentSet() {
807 assert(segmentSet != nullptr && "segment set must have been created");
810 "segment set can be used only initially before switching to the array");
811 segments.append(segmentSet->begin(), segmentSet->end());
812 segmentSet = nullptr;
816 bool LiveRange::isLiveAtIndexes(ArrayRef<SlotIndex> Slots) const {
817 ArrayRef<SlotIndex>::iterator SlotI = Slots.begin();
818 ArrayRef<SlotIndex>::iterator SlotE = Slots.end();
820 // If there are no regmask slots, we have nothing to search.
824 // Start our search at the first segment that ends after the first slot.
825 const_iterator SegmentI = find(*SlotI);
826 const_iterator SegmentE = end();
828 // If there are no segments that end after the first slot, we're done.
829 if (SegmentI == SegmentE)
832 // Look for each slot in the live range.
833 for ( ; SlotI != SlotE; ++SlotI) {
834 // Go to the next segment that ends after the current slot.
835 // The slot may be within a hole in the range.
836 SegmentI = advanceTo(SegmentI, *SlotI);
837 if (SegmentI == SegmentE)
840 // If this segment contains the slot, we're done.
841 if (SegmentI->contains(*SlotI))
843 // Otherwise, look for the next slot.
846 // We didn't find a segment containing any of the slots.
850 void LiveInterval::freeSubRange(SubRange *S) {
852 // Memory was allocated with BumpPtr allocator and is not freed here.
855 void LiveInterval::removeEmptySubRanges() {
856 SubRange **NextPtr = &SubRanges;
857 SubRange *I = *NextPtr;
858 while (I != nullptr) {
864 // Skip empty subranges until we find the first nonempty one.
866 SubRange *Next = I->Next;
869 } while (I != nullptr && I->empty());
874 void LiveInterval::clearSubRanges() {
875 for (SubRange *I = SubRanges, *Next; I != nullptr; I = Next) {
882 void LiveInterval::refineSubRanges(BumpPtrAllocator &Allocator,
883 LaneBitmask LaneMask, std::function<void(LiveInterval::SubRange&)> Apply) {
884 LaneBitmask ToApply = LaneMask;
885 for (SubRange &SR : subranges()) {
886 LaneBitmask SRMask = SR.LaneMask;
887 LaneBitmask Matching = SRMask & LaneMask;
891 SubRange *MatchingRange;
892 if (SRMask == Matching) {
893 // The subrange fits (it does not cover bits outside \p LaneMask).
896 // We have to split the subrange into a matching and non-matching part.
897 // Reduce lanemask of existing lane to non-matching part.
898 SR.LaneMask = SRMask & ~Matching;
899 // Create a new subrange for the matching part
900 MatchingRange = createSubRangeFrom(Allocator, Matching, SR);
902 Apply(*MatchingRange);
903 ToApply &= ~Matching;
905 // Create a new subrange if there are uncovered bits left.
907 SubRange *NewRange = createSubRange(Allocator, ToApply);
912 unsigned LiveInterval::getSize() const {
914 for (const Segment &S : segments)
915 Sum += S.start.distance(S.end);
919 void LiveInterval::computeSubRangeUndefs(SmallVectorImpl<SlotIndex> &Undefs,
920 LaneBitmask LaneMask,
921 const MachineRegisterInfo &MRI,
922 const SlotIndexes &Indexes) const {
923 assert(TargetRegisterInfo::isVirtualRegister(reg));
924 LaneBitmask VRegMask = MRI.getMaxLaneMaskForVReg(reg);
925 assert((VRegMask & LaneMask).any());
926 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
927 for (const MachineOperand &MO : MRI.def_operands(reg)) {
930 unsigned SubReg = MO.getSubReg();
931 assert(SubReg != 0 && "Undef should only be set on subreg defs");
932 LaneBitmask DefMask = TRI.getSubRegIndexLaneMask(SubReg);
933 LaneBitmask UndefMask = VRegMask & ~DefMask;
934 if ((UndefMask & LaneMask).any()) {
935 const MachineInstr &MI = *MO.getParent();
936 bool EarlyClobber = MO.isEarlyClobber();
937 SlotIndex Pos = Indexes.getInstructionIndex(MI).getRegSlot(EarlyClobber);
938 Undefs.push_back(Pos);
943 raw_ostream& llvm::operator<<(raw_ostream& OS, const LiveRange::Segment &S) {
944 return OS << '[' << S.start << ',' << S.end << ':' << S.valno->id << ')';
947 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
948 LLVM_DUMP_METHOD void LiveRange::Segment::dump() const {
949 dbgs() << *this << '\n';
953 void LiveRange::print(raw_ostream &OS) const {
957 for (const Segment &S : segments) {
959 assert(S.valno == getValNumInfo(S.valno->id) && "Bad VNInfo");
963 // Print value number info.
964 if (getNumValNums()) {
967 for (const_vni_iterator i = vni_begin(), e = vni_end(); i != e;
969 const VNInfo *vni = *i;
972 if (vni->isUnused()) {
983 void LiveInterval::SubRange::print(raw_ostream &OS) const {
984 OS << " L" << PrintLaneMask(LaneMask) << ' '
985 << static_cast<const LiveRange&>(*this);
988 void LiveInterval::print(raw_ostream &OS) const {
989 OS << printReg(reg) << ' ';
992 for (const SubRange &SR : subranges())
996 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
997 LLVM_DUMP_METHOD void LiveRange::dump() const {
998 dbgs() << *this << '\n';
1001 LLVM_DUMP_METHOD void LiveInterval::SubRange::dump() const {
1002 dbgs() << *this << '\n';
1005 LLVM_DUMP_METHOD void LiveInterval::dump() const {
1006 dbgs() << *this << '\n';
1011 void LiveRange::verify() const {
1012 for (const_iterator I = begin(), E = end(); I != E; ++I) {
1013 assert(I->start.isValid());
1014 assert(I->end.isValid());
1015 assert(I->start < I->end);
1016 assert(I->valno != nullptr);
1017 assert(I->valno->id < valnos.size());
1018 assert(I->valno == valnos[I->valno->id]);
1019 if (std::next(I) != E) {
1020 assert(I->end <= std::next(I)->start);
1021 if (I->end == std::next(I)->start)
1022 assert(I->valno != std::next(I)->valno);
1027 void LiveInterval::verify(const MachineRegisterInfo *MRI) const {
1030 // Make sure SubRanges are fine and LaneMasks are disjunct.
1032 LaneBitmask MaxMask = MRI != nullptr ? MRI->getMaxLaneMaskForVReg(reg)
1033 : LaneBitmask::getAll();
1034 for (const SubRange &SR : subranges()) {
1035 // Subrange lanemask should be disjunct to any previous subrange masks.
1036 assert((Mask & SR.LaneMask).none());
1037 Mask |= SR.LaneMask;
1039 // subrange mask should not contained in maximum lane mask for the vreg.
1040 assert((Mask & ~MaxMask).none());
1041 // empty subranges must be removed.
1042 assert(!SR.empty());
1045 // Main liverange should cover subrange.
1051 //===----------------------------------------------------------------------===//
1052 // LiveRangeUpdater class
1053 //===----------------------------------------------------------------------===//
1055 // The LiveRangeUpdater class always maintains these invariants:
1057 // - When LastStart is invalid, Spills is empty and the iterators are invalid.
1058 // This is the initial state, and the state created by flush().
1059 // In this state, isDirty() returns false.
1061 // Otherwise, segments are kept in three separate areas:
1063 // 1. [begin; WriteI) at the front of LR.
1064 // 2. [ReadI; end) at the back of LR.
1067 // - LR.begin() <= WriteI <= ReadI <= LR.end().
1068 // - Segments in all three areas are fully ordered and coalesced.
1069 // - Segments in area 1 precede and can't coalesce with segments in area 2.
1070 // - Segments in Spills precede and can't coalesce with segments in area 2.
1071 // - No coalescing is possible between segments in Spills and segments in area
1072 // 1, and there are no overlapping segments.
1074 // The segments in Spills are not ordered with respect to the segments in area
1075 // 1. They need to be merged.
1077 // When they exist, Spills.back().start <= LastStart,
1078 // and WriteI[-1].start <= LastStart.
1080 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1081 void LiveRangeUpdater::print(raw_ostream &OS) const {
1084 OS << "Clean updater: " << *LR << '\n';
1086 OS << "Null updater.\n";
1089 assert(LR && "Can't have null LR in dirty updater.");
1090 OS << " updater with gap = " << (ReadI - WriteI)
1091 << ", last start = " << LastStart
1093 for (const auto &S : make_range(LR->begin(), WriteI))
1096 for (unsigned I = 0, E = Spills.size(); I != E; ++I)
1097 OS << ' ' << Spills[I];
1099 for (const auto &S : make_range(ReadI, LR->end()))
1104 LLVM_DUMP_METHOD void LiveRangeUpdater::dump() const {
1109 // Determine if A and B should be coalesced.
1110 static inline bool coalescable(const LiveRange::Segment &A,
1111 const LiveRange::Segment &B) {
1112 assert(A.start <= B.start && "Unordered live segments.");
1113 if (A.end == B.start)
1114 return A.valno == B.valno;
1115 if (A.end < B.start)
1117 assert(A.valno == B.valno && "Cannot overlap different values");
1121 void LiveRangeUpdater::add(LiveRange::Segment Seg) {
1122 assert(LR && "Cannot add to a null destination");
1124 // Fall back to the regular add method if the live range
1125 // is using the segment set instead of the segment vector.
1126 if (LR->segmentSet != nullptr) {
1127 LR->addSegmentToSet(Seg);
1131 // Flush the state if Start moves backwards.
1132 if (!LastStart.isValid() || LastStart > Seg.start) {
1135 // This brings us to an uninitialized state. Reinitialize.
1136 assert(Spills.empty() && "Leftover spilled segments");
1137 WriteI = ReadI = LR->begin();
1140 // Remember start for next time.
1141 LastStart = Seg.start;
1143 // Advance ReadI until it ends after Seg.start.
1144 LiveRange::iterator E = LR->end();
1145 if (ReadI != E && ReadI->end <= Seg.start) {
1146 // First try to close the gap between WriteI and ReadI with spills.
1147 if (ReadI != WriteI)
1149 // Then advance ReadI.
1150 if (ReadI == WriteI)
1151 ReadI = WriteI = LR->find(Seg.start);
1153 while (ReadI != E && ReadI->end <= Seg.start)
1154 *WriteI++ = *ReadI++;
1157 assert(ReadI == E || ReadI->end > Seg.start);
1159 // Check if the ReadI segment begins early.
1160 if (ReadI != E && ReadI->start <= Seg.start) {
1161 assert(ReadI->valno == Seg.valno && "Cannot overlap different values");
1162 // Bail if Seg is completely contained in ReadI.
1163 if (ReadI->end >= Seg.end)
1165 // Coalesce into Seg.
1166 Seg.start = ReadI->start;
1170 // Coalesce as much as possible from ReadI into Seg.
1171 while (ReadI != E && coalescable(Seg, *ReadI)) {
1172 Seg.end = std::max(Seg.end, ReadI->end);
1176 // Try coalescing Spills.back() into Seg.
1177 if (!Spills.empty() && coalescable(Spills.back(), Seg)) {
1178 Seg.start = Spills.back().start;
1179 Seg.end = std::max(Spills.back().end, Seg.end);
1183 // Try coalescing Seg into WriteI[-1].
1184 if (WriteI != LR->begin() && coalescable(WriteI[-1], Seg)) {
1185 WriteI[-1].end = std::max(WriteI[-1].end, Seg.end);
1189 // Seg doesn't coalesce with anything, and needs to be inserted somewhere.
1190 if (WriteI != ReadI) {
1195 // Finally, append to LR or Spills.
1197 LR->segments.push_back(Seg);
1198 WriteI = ReadI = LR->end();
1200 Spills.push_back(Seg);
1203 // Merge as many spilled segments as possible into the gap between WriteI
1204 // and ReadI. Advance WriteI to reflect the inserted instructions.
1205 void LiveRangeUpdater::mergeSpills() {
1206 // Perform a backwards merge of Spills and [SpillI;WriteI).
1207 size_t GapSize = ReadI - WriteI;
1208 size_t NumMoved = std::min(Spills.size(), GapSize);
1209 LiveRange::iterator Src = WriteI;
1210 LiveRange::iterator Dst = Src + NumMoved;
1211 LiveRange::iterator SpillSrc = Spills.end();
1212 LiveRange::iterator B = LR->begin();
1214 // This is the new WriteI position after merging spills.
1217 // Now merge Src and Spills backwards.
1218 while (Src != Dst) {
1219 if (Src != B && Src[-1].start > SpillSrc[-1].start)
1222 *--Dst = *--SpillSrc;
1224 assert(NumMoved == size_t(Spills.end() - SpillSrc));
1225 Spills.erase(SpillSrc, Spills.end());
1228 void LiveRangeUpdater::flush() {
1231 // Clear the dirty state.
1232 LastStart = SlotIndex();
1234 assert(LR && "Cannot add to a null destination");
1236 // Nothing to merge?
1237 if (Spills.empty()) {
1238 LR->segments.erase(WriteI, ReadI);
1243 // Resize the WriteI - ReadI gap to match Spills.
1244 size_t GapSize = ReadI - WriteI;
1245 if (GapSize < Spills.size()) {
1246 // The gap is too small. Make some room.
1247 size_t WritePos = WriteI - LR->begin();
1248 LR->segments.insert(ReadI, Spills.size() - GapSize, LiveRange::Segment());
1249 // This also invalidated ReadI, but it is recomputed below.
1250 WriteI = LR->begin() + WritePos;
1252 // Shrink the gap if necessary.
1253 LR->segments.erase(WriteI + Spills.size(), ReadI);
1255 ReadI = WriteI + Spills.size();
1260 unsigned ConnectedVNInfoEqClasses::Classify(const LiveRange &LR) {
1261 // Create initial equivalence classes.
1263 EqClass.grow(LR.getNumValNums());
1265 const VNInfo *used = nullptr, *unused = nullptr;
1267 // Determine connections.
1268 for (const VNInfo *VNI : LR.valnos) {
1269 // Group all unused values into one class.
1270 if (VNI->isUnused()) {
1272 EqClass.join(unused->id, VNI->id);
1277 if (VNI->isPHIDef()) {
1278 const MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
1279 assert(MBB && "Phi-def has no defining MBB");
1280 // Connect to values live out of predecessors.
1281 for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(),
1282 PE = MBB->pred_end(); PI != PE; ++PI)
1283 if (const VNInfo *PVNI = LR.getVNInfoBefore(LIS.getMBBEndIdx(*PI)))
1284 EqClass.join(VNI->id, PVNI->id);
1286 // Normal value defined by an instruction. Check for two-addr redef.
1287 // FIXME: This could be coincidental. Should we really check for a tied
1288 // operand constraint?
1289 // Note that VNI->def may be a use slot for an early clobber def.
1290 if (const VNInfo *UVNI = LR.getVNInfoBefore(VNI->def))
1291 EqClass.join(VNI->id, UVNI->id);
1295 // Lump all the unused values in with the last used value.
1297 EqClass.join(used->id, unused->id);
1300 return EqClass.getNumClasses();
1303 void ConnectedVNInfoEqClasses::Distribute(LiveInterval &LI, LiveInterval *LIV[],
1304 MachineRegisterInfo &MRI) {
1305 // Rewrite instructions.
1306 for (MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LI.reg),
1307 RE = MRI.reg_end(); RI != RE;) {
1308 MachineOperand &MO = *RI;
1309 MachineInstr *MI = RI->getParent();
1311 // DBG_VALUE instructions don't have slot indexes, so get the index of the
1312 // instruction before them.
1313 // Normally, DBG_VALUE instructions are removed before this function is
1314 // called, but it is not a requirement.
1316 if (MI->isDebugValue())
1317 Idx = LIS.getSlotIndexes()->getIndexBefore(*MI);
1319 Idx = LIS.getInstructionIndex(*MI);
1320 LiveQueryResult LRQ = LI.Query(Idx);
1321 const VNInfo *VNI = MO.readsReg() ? LRQ.valueIn() : LRQ.valueDefined();
1322 // In the case of an <undef> use that isn't tied to any def, VNI will be
1323 // NULL. If the use is tied to a def, VNI will be the defined value.
1326 if (unsigned EqClass = getEqClass(VNI))
1327 MO.setReg(LIV[EqClass-1]->reg);
1330 // Distribute subregister liveranges.
1331 if (LI.hasSubRanges()) {
1332 unsigned NumComponents = EqClass.getNumClasses();
1333 SmallVector<unsigned, 8> VNIMapping;
1334 SmallVector<LiveInterval::SubRange*, 8> SubRanges;
1335 BumpPtrAllocator &Allocator = LIS.getVNInfoAllocator();
1336 for (LiveInterval::SubRange &SR : LI.subranges()) {
1337 // Create new subranges in the split intervals and construct a mapping
1338 // for the VNInfos in the subrange.
1339 unsigned NumValNos = SR.valnos.size();
1341 VNIMapping.reserve(NumValNos);
1343 SubRanges.resize(NumComponents-1, nullptr);
1344 for (unsigned I = 0; I < NumValNos; ++I) {
1345 const VNInfo &VNI = *SR.valnos[I];
1346 unsigned ComponentNum;
1347 if (VNI.isUnused()) {
1350 const VNInfo *MainRangeVNI = LI.getVNInfoAt(VNI.def);
1351 assert(MainRangeVNI != nullptr
1352 && "SubRange def must have corresponding main range def");
1353 ComponentNum = getEqClass(MainRangeVNI);
1354 if (ComponentNum > 0 && SubRanges[ComponentNum-1] == nullptr) {
1355 SubRanges[ComponentNum-1]
1356 = LIV[ComponentNum-1]->createSubRange(Allocator, SR.LaneMask);
1359 VNIMapping.push_back(ComponentNum);
1361 DistributeRange(SR, SubRanges.data(), VNIMapping);
1363 LI.removeEmptySubRanges();
1366 // Distribute main liverange.
1367 DistributeRange(LI, LIV, EqClass);