1 //===- UnwindInfoSection.cpp ----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "UnwindInfoSection.h"
10 #include "ConcatOutputSection.h"
12 #include "InputSection.h"
13 #include "OutputSection.h"
14 #include "OutputSegment.h"
15 #include "SymbolTable.h"
17 #include "SyntheticSections.h"
20 #include "lld/Common/ErrorHandler.h"
21 #include "lld/Common/Memory.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/BinaryFormat/MachO.h"
25 #include "llvm/Support/Parallel.h"
30 using namespace llvm::MachO;
31 using namespace llvm::support::endian;
33 using namespace lld::macho;
35 #define COMMON_ENCODINGS_MAX 127
36 #define COMPACT_ENCODINGS_MAX 256
38 #define SECOND_LEVEL_PAGE_BYTES 4096
39 #define SECOND_LEVEL_PAGE_WORDS (SECOND_LEVEL_PAGE_BYTES / sizeof(uint32_t))
40 #define REGULAR_SECOND_LEVEL_ENTRIES_MAX \
41 ((SECOND_LEVEL_PAGE_BYTES - \
42 sizeof(unwind_info_regular_second_level_page_header)) / \
43 sizeof(unwind_info_regular_second_level_entry))
44 #define COMPRESSED_SECOND_LEVEL_ENTRIES_MAX \
45 ((SECOND_LEVEL_PAGE_BYTES - \
46 sizeof(unwind_info_compressed_second_level_page_header)) / \
49 #define COMPRESSED_ENTRY_FUNC_OFFSET_BITS 24
50 #define COMPRESSED_ENTRY_FUNC_OFFSET_MASK \
51 UNWIND_INFO_COMPRESSED_ENTRY_FUNC_OFFSET(~0)
53 // Compact Unwind format is a Mach-O evolution of DWARF Unwind that
54 // optimizes space and exception-time lookup. Most DWARF unwind
55 // entries can be replaced with Compact Unwind entries, but the ones
56 // that cannot are retained in DWARF form.
58 // This comment will address macro-level organization of the pre-link
59 // and post-link compact unwind tables. For micro-level organization
60 // pertaining to the bitfield layout of the 32-bit compact unwind
61 // entries, see libunwind/include/mach-o/compact_unwind_encoding.h
63 // Important clarifying factoids:
65 // * __LD,__compact_unwind is the compact unwind format for compiler
66 // output and linker input. It is never a final output. It could be
67 // an intermediate output with the `-r` option which retains relocs.
69 // * __TEXT,__unwind_info is the compact unwind format for final
70 // linker output. It is never an input.
72 // * __TEXT,__eh_frame is the DWARF format for both linker input and output.
74 // * __TEXT,__unwind_info entries are divided into 4 KiB pages (2nd
75 // level) by ascending address, and the pages are referenced by an
76 // index (1st level) in the section header.
78 // * Following the headers in __TEXT,__unwind_info, the bulk of the
79 // section contains a vector of compact unwind entries
80 // `{functionOffset, encoding}` sorted by ascending `functionOffset`.
81 // Adjacent entries with the same encoding can be folded to great
82 // advantage, achieving a 3-order-of-magnitude reduction in the
85 // * The __TEXT,__unwind_info format can accommodate up to 127 unique
86 // encodings for the space-efficient compressed format. In practice,
87 // fewer than a dozen unique encodings are used by C++ programs of
88 // all sizes. Therefore, we don't even bother implementing the regular
89 // non-compressed format. Time will tell if anyone in the field ever
90 // overflows the 127-encodings limit.
92 // Refer to the definition of unwind_info_section_header in
93 // compact_unwind_encoding.h for an overview of the format we are encoding
96 // TODO(gkm): prune __eh_frame entries superseded by __unwind_info, PR50410
97 // TODO(gkm): how do we align the 2nd-level pages?
99 // The offsets of various fields in the on-disk representation of each compact
101 struct CompactUnwindOffsets {
102 uint32_t functionAddress;
103 uint32_t functionLength;
105 uint32_t personality;
108 CompactUnwindOffsets(size_t wordSize) {
112 assert(wordSize == 4);
118 template <class Ptr> void init() {
119 functionAddress = offsetof(Layout<Ptr>, functionAddress);
120 functionLength = offsetof(Layout<Ptr>, functionLength);
121 encoding = offsetof(Layout<Ptr>, encoding);
122 personality = offsetof(Layout<Ptr>, personality);
123 lsda = offsetof(Layout<Ptr>, lsda);
126 template <class Ptr> struct Layout {
128 uint32_t functionLength;
129 compact_unwind_encoding_t encoding;
135 // LLD's internal representation of a compact unwind entry.
136 struct CompactUnwindEntry {
137 uint64_t functionAddress;
138 uint32_t functionLength;
139 compact_unwind_encoding_t encoding;
144 using EncodingMap = DenseMap<compact_unwind_encoding_t, size_t>;
146 struct SecondLevelPage {
151 std::vector<compact_unwind_encoding_t> localEncodings;
152 EncodingMap localEncodingIndexes;
155 // UnwindInfoSectionImpl allows us to avoid cluttering our header file with a
156 // lengthy definition of UnwindInfoSection.
157 class UnwindInfoSectionImpl final : public UnwindInfoSection {
159 UnwindInfoSectionImpl() : cuOffsets(target->wordSize) {}
160 uint64_t getSize() const override { return unwindInfoSize; }
161 void prepareRelocations() override;
162 void finalize() override;
163 void writeTo(uint8_t *buf) const override;
166 void prepareRelocations(ConcatInputSection *);
167 void relocateCompactUnwind(std::vector<CompactUnwindEntry> &);
168 void encodePersonalities();
170 uint64_t unwindInfoSize = 0;
171 std::vector<decltype(symbols)::value_type> symbolsVec;
172 CompactUnwindOffsets cuOffsets;
173 std::vector<std::pair<compact_unwind_encoding_t, size_t>> commonEncodings;
174 EncodingMap commonEncodingIndexes;
175 // The entries here will be in the same order as their originating symbols
177 std::vector<CompactUnwindEntry> cuEntries;
178 // Indices into the cuEntries vector.
179 std::vector<size_t> cuIndices;
180 std::vector<Symbol *> personalities;
181 SmallDenseMap<std::pair<InputSection *, uint64_t /* addend */>, Symbol *>
183 // Indices into cuEntries for CUEs with a non-null LSDA.
184 std::vector<size_t> entriesWithLsda;
185 // Map of cuEntries index to an index within the LSDA array.
186 DenseMap<size_t, uint32_t> lsdaIndex;
187 std::vector<SecondLevelPage> secondLevelPages;
188 uint64_t level2PagesOffset = 0;
191 UnwindInfoSection::UnwindInfoSection()
192 : SyntheticSection(segment_names::text, section_names::unwindInfo) {
196 // Record function symbols that may need entries emitted in __unwind_info, which
197 // stores unwind data for address ranges.
199 // Note that if several adjacent functions have the same unwind encoding, LSDA,
200 // and personality function, they share one unwind entry. For this to work,
201 // functions without unwind info need explicit "no unwind info" unwind entries
202 // -- else the unwinder would think they have the unwind info of the closest
203 // function with unwind info right before in the image. Thus, we add function
204 // symbols for each unique address regardless of whether they have associated
206 void UnwindInfoSection::addSymbol(const Defined *d) {
208 allEntriesAreOmitted = false;
209 // We don't yet know the final output address of this symbol, but we know that
210 // they are uniquely determined by a combination of the isec and value, so
211 // we use that as the key here.
212 auto p = symbols.insert({{d->isec, d->value}, d});
213 // If we have multiple symbols at the same address, only one of them can have
214 // an associated unwind entry.
215 if (!p.second && d->unwindEntry) {
216 assert(!p.first->second->unwindEntry);
221 void UnwindInfoSectionImpl::prepareRelocations() {
222 // This iteration needs to be deterministic, since prepareRelocations may add
223 // entries to the GOT. Hence the use of a MapVector for
224 // UnwindInfoSection::symbols.
225 for (const Defined *d : make_second_range(symbols))
226 if (d->unwindEntry &&
227 d->unwindEntry->getName() == section_names::compactUnwind)
228 prepareRelocations(d->unwindEntry);
231 // Compact unwind relocations have different semantics, so we handle them in a
232 // separate code path from regular relocations. First, we do not wish to add
233 // rebase opcodes for __LD,__compact_unwind, because that section doesn't
234 // actually end up in the final binary. Second, personality pointers always
235 // reside in the GOT and must be treated specially.
236 void UnwindInfoSectionImpl::prepareRelocations(ConcatInputSection *isec) {
237 assert(!isec->shouldOmitFromOutput() &&
238 "__compact_unwind section should not be omitted");
240 // FIXME: Make this skip relocations for CompactUnwindEntries that
241 // point to dead-stripped functions. That might save some amount of
242 // work. But since there are usually just few personality functions
243 // that are referenced from many places, at least some of them likely
244 // live, it wouldn't reduce number of got entries.
245 for (size_t i = 0; i < isec->relocs.size(); ++i) {
246 Reloc &r = isec->relocs[i];
247 assert(target->hasAttr(r.type, RelocAttrBits::UNSIGNED));
249 // Functions and LSDA entries always reside in the same object file as the
250 // compact unwind entries that references them, and thus appear as section
251 // relocs. There is no need to prepare them. We only prepare relocs for
252 // personality functions.
253 if (r.offset != cuOffsets.personality)
256 if (auto *s = r.referent.dyn_cast<Symbol *>()) {
257 // Personality functions are nearly always system-defined (e.g.,
258 // ___gxx_personality_v0 for C++) and relocated as dylib symbols. When an
259 // application provides its own personality function, it might be
260 // referenced by an extern Defined symbol reloc, or a local section reloc.
261 if (auto *defined = dyn_cast<Defined>(s)) {
262 // XXX(vyng) This is a a special case for handling duplicate personality
263 // symbols. Note that LD64's behavior is a bit different and it is
264 // inconsistent with how symbol resolution usually work
266 // So we've decided not to follow it. Instead, simply pick the symbol
267 // with the same name from the symbol table to replace the local one.
269 // (See discussions/alternatives already considered on D107533)
270 if (!defined->isExternal())
271 if (Symbol *sym = symtab->find(defined->getName()))
273 r.referent = s = sym;
275 if (auto *undefined = dyn_cast<Undefined>(s)) {
276 treatUndefinedSymbol(*undefined, isec, r.offset);
277 // treatUndefinedSymbol() can replace s with a DylibSymbol; re-check.
278 if (isa<Undefined>(s))
282 if (auto *defined = dyn_cast<Defined>(s)) {
283 // Check if we have created a synthetic symbol at the same address.
284 Symbol *&personality =
285 personalityTable[{defined->isec, defined->value}];
286 if (personality == nullptr) {
287 personality = defined;
288 in.got->addEntry(defined);
289 } else if (personality != defined) {
290 r.referent = personality;
294 assert(isa<DylibSymbol>(s));
299 if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) {
300 assert(!isCoalescedWeak(referentIsec));
301 // Personality functions can be referenced via section relocations
302 // if they live in the same object file. Create placeholder synthetic
303 // symbols for them in the GOT.
304 Symbol *&s = personalityTable[{referentIsec, r.addend}];
306 // This runs after dead stripping, so the noDeadStrip argument does not
308 s = make<Defined>("<internal>", /*file=*/nullptr, referentIsec,
309 r.addend, /*size=*/0, /*isWeakDef=*/false,
310 /*isExternal=*/false, /*isPrivateExtern=*/false,
311 /*includeInSymtab=*/true,
312 /*isThumb=*/false, /*isReferencedDynamically=*/false,
313 /*noDeadStrip=*/false);
323 // We need to apply the relocations to the pre-link compact unwind section
324 // before converting it to post-link form. There should only be absolute
325 // relocations here: since we are not emitting the pre-link CU section, there
326 // is no source address to make a relative location meaningful.
327 void UnwindInfoSectionImpl::relocateCompactUnwind(
328 std::vector<CompactUnwindEntry> &cuEntries) {
329 parallelFor(0, symbolsVec.size(), [&](size_t i) {
330 CompactUnwindEntry &cu = cuEntries[i];
331 const Defined *d = symbolsVec[i].second;
332 cu.functionAddress = d->getVA();
336 // If we have DWARF unwind info, create a CU entry that points to it.
337 if (d->unwindEntry->getName() == section_names::ehFrame) {
338 cu.encoding = target->modeDwarfEncoding | d->unwindEntry->outSecOff;
339 const FDE &fde = cast<ObjFile>(d->getFile())->fdes[d->unwindEntry];
340 cu.functionLength = fde.funcLength;
341 cu.personality = fde.personality;
346 assert(d->unwindEntry->getName() == section_names::compactUnwind);
348 auto buf = reinterpret_cast<const uint8_t *>(d->unwindEntry->data.data()) -
351 support::endian::read32le(buf + cuOffsets.functionLength);
352 cu.encoding = support::endian::read32le(buf + cuOffsets.encoding);
353 for (const Reloc &r : d->unwindEntry->relocs) {
354 if (r.offset == cuOffsets.personality) {
355 cu.personality = r.referent.get<Symbol *>();
356 } else if (r.offset == cuOffsets.lsda) {
357 if (auto *referentSym = r.referent.dyn_cast<Symbol *>())
358 cu.lsda = cast<Defined>(referentSym)->isec;
360 cu.lsda = r.referent.get<InputSection *>();
366 // There should only be a handful of unique personality pointers, so we can
367 // encode them as 2-bit indices into a small array.
368 void UnwindInfoSectionImpl::encodePersonalities() {
369 for (size_t idx : cuIndices) {
370 CompactUnwindEntry &cu = cuEntries[idx];
371 if (cu.personality == nullptr)
373 // Linear search is fast enough for a small array.
374 auto it = find(personalities, cu.personality);
375 uint32_t personalityIndex; // 1-based index
376 if (it != personalities.end()) {
377 personalityIndex = std::distance(personalities.begin(), it) + 1;
379 personalities.push_back(cu.personality);
380 personalityIndex = personalities.size();
383 personalityIndex << countTrailingZeros(
384 static_cast<compact_unwind_encoding_t>(UNWIND_PERSONALITY_MASK));
386 if (personalities.size() > 3)
387 error("too many personalities (" + Twine(personalities.size()) +
388 ") for compact unwind to encode");
391 static bool canFoldEncoding(compact_unwind_encoding_t encoding) {
392 // From compact_unwind_encoding.h:
393 // UNWIND_X86_64_MODE_STACK_IND:
394 // A "frameless" (RBP not used as frame pointer) function large constant
395 // stack size. This case is like the previous, except the stack size is too
396 // large to encode in the compact unwind encoding. Instead it requires that
397 // the function contains "subq $nnnnnnnn,RSP" in its prolog. The compact
398 // encoding contains the offset to the nnnnnnnn value in the function in
399 // UNWIND_X86_64_FRAMELESS_STACK_SIZE.
400 // Since this means the unwinder has to look at the `subq` in the function
401 // of the unwind info's unwind address, two functions that have identical
402 // unwind info can't be folded if it's using this encoding since both
403 // entries need unique addresses.
404 static_assert(UNWIND_X86_64_MODE_MASK == UNWIND_X86_MODE_MASK, "");
405 static_assert(UNWIND_X86_64_MODE_STACK_IND == UNWIND_X86_MODE_STACK_IND, "");
406 if ((target->cpuType == CPU_TYPE_X86_64 || target->cpuType == CPU_TYPE_X86) &&
407 (encoding & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_STACK_IND) {
408 // FIXME: Consider passing in the two function addresses and getting
409 // their two stack sizes off the `subq` and only returning false if they're
410 // actually different.
416 // Scan the __LD,__compact_unwind entries and compute the space needs of
417 // __TEXT,__unwind_info and __TEXT,__eh_frame.
418 void UnwindInfoSectionImpl::finalize() {
422 // At this point, the address space for __TEXT,__text has been
423 // assigned, so we can relocate the __LD,__compact_unwind entries
424 // into a temporary buffer. Relocation is necessary in order to sort
425 // the CU entries by function address. Sorting is necessary so that
426 // we can fold adjacent CU entries with identical
427 // encoding+personality+lsda. Folding is necessary because it reduces
428 // the number of CU entries by as much as 3 orders of magnitude!
429 cuEntries.resize(symbols.size());
430 // The "map" part of the symbols MapVector was only needed for deduplication
431 // in addSymbol(). Now that we are done adding, move the contents to a plain
432 // std::vector for indexed access.
433 symbolsVec = symbols.takeVector();
434 relocateCompactUnwind(cuEntries);
436 // Rather than sort & fold the 32-byte entries directly, we create a
437 // vector of indices to entries and sort & fold that instead.
438 cuIndices.resize(cuEntries.size());
439 std::iota(cuIndices.begin(), cuIndices.end(), 0);
440 llvm::sort(cuIndices, [&](size_t a, size_t b) {
441 return cuEntries[a].functionAddress < cuEntries[b].functionAddress;
444 // Fold adjacent entries with matching encoding+personality+lsda
445 // We use three iterators on the same cuIndices to fold in-situ:
446 // (1) `foldBegin` is the first of a potential sequence of matching entries
447 // (2) `foldEnd` is the first non-matching entry after `foldBegin`.
448 // The semi-open interval [ foldBegin .. foldEnd ) contains a range
449 // entries that can be folded into a single entry and written to ...
451 auto foldWrite = cuIndices.begin();
452 for (auto foldBegin = cuIndices.begin(); foldBegin < cuIndices.end();) {
453 auto foldEnd = foldBegin;
454 while (++foldEnd < cuIndices.end() &&
455 cuEntries[*foldBegin].encoding == cuEntries[*foldEnd].encoding &&
456 cuEntries[*foldBegin].personality ==
457 cuEntries[*foldEnd].personality &&
458 cuEntries[*foldBegin].lsda == cuEntries[*foldEnd].lsda &&
459 canFoldEncoding(cuEntries[*foldEnd].encoding))
461 *foldWrite++ = *foldBegin;
464 cuIndices.erase(foldWrite, cuIndices.end());
466 encodePersonalities();
468 // Count frequencies of the folded encodings
469 EncodingMap encodingFrequencies;
470 for (size_t idx : cuIndices)
471 encodingFrequencies[cuEntries[idx].encoding]++;
473 // Make a vector of encodings, sorted by descending frequency
474 for (const auto &frequency : encodingFrequencies)
475 commonEncodings.emplace_back(frequency);
476 llvm::sort(commonEncodings,
477 [](const std::pair<compact_unwind_encoding_t, size_t> &a,
478 const std::pair<compact_unwind_encoding_t, size_t> &b) {
479 if (a.second == b.second)
480 // When frequencies match, secondarily sort on encoding
481 // to maintain parity with validate-unwind-info.py
482 return a.first > b.first;
483 return a.second > b.second;
486 // Truncate the vector to 127 elements.
487 // Common encoding indexes are limited to 0..126, while encoding
488 // indexes 127..255 are local to each second-level page
489 if (commonEncodings.size() > COMMON_ENCODINGS_MAX)
490 commonEncodings.resize(COMMON_ENCODINGS_MAX);
492 // Create a map from encoding to common-encoding-table index
493 for (size_t i = 0; i < commonEncodings.size(); i++)
494 commonEncodingIndexes[commonEncodings[i].first] = i;
496 // Split folded encodings into pages, where each page is limited by ...
497 // (a) 4 KiB capacity
498 // (b) 24-bit difference between first & final function address
499 // (c) 8-bit compact-encoding-table index,
500 // for which 0..126 references the global common-encodings table,
501 // and 127..255 references a local per-second-level-page table.
502 // First we try the compact format and determine how many entries fit.
503 // If more entries fit in the regular format, we use that.
504 for (size_t i = 0; i < cuIndices.size();) {
505 size_t idx = cuIndices[i];
506 secondLevelPages.emplace_back();
507 SecondLevelPage &page = secondLevelPages.back();
509 uint64_t functionAddressMax =
510 cuEntries[idx].functionAddress + COMPRESSED_ENTRY_FUNC_OFFSET_MASK;
511 size_t n = commonEncodings.size();
512 size_t wordsRemaining =
513 SECOND_LEVEL_PAGE_WORDS -
514 sizeof(unwind_info_compressed_second_level_page_header) /
516 while (wordsRemaining >= 1 && i < cuIndices.size()) {
518 const CompactUnwindEntry *cuPtr = &cuEntries[idx];
519 if (cuPtr->functionAddress >= functionAddressMax) {
521 } else if (commonEncodingIndexes.count(cuPtr->encoding) ||
522 page.localEncodingIndexes.count(cuPtr->encoding)) {
525 } else if (wordsRemaining >= 2 && n < COMPACT_ENCODINGS_MAX) {
526 page.localEncodings.emplace_back(cuPtr->encoding);
527 page.localEncodingIndexes[cuPtr->encoding] = n++;
534 page.entryCount = i - page.entryIndex;
536 // If this is not the final page, see if it's possible to fit more
537 // entries by using the regular format. This can happen when there
538 // are many unique encodings, and we we saturated the local
539 // encoding table early.
540 if (i < cuIndices.size() &&
541 page.entryCount < REGULAR_SECOND_LEVEL_ENTRIES_MAX) {
542 page.kind = UNWIND_SECOND_LEVEL_REGULAR;
543 page.entryCount = std::min(REGULAR_SECOND_LEVEL_ENTRIES_MAX,
544 cuIndices.size() - page.entryIndex);
545 i = page.entryIndex + page.entryCount;
547 page.kind = UNWIND_SECOND_LEVEL_COMPRESSED;
551 for (size_t idx : cuIndices) {
552 lsdaIndex[idx] = entriesWithLsda.size();
553 if (cuEntries[idx].lsda)
554 entriesWithLsda.push_back(idx);
557 // compute size of __TEXT,__unwind_info section
558 level2PagesOffset = sizeof(unwind_info_section_header) +
559 commonEncodings.size() * sizeof(uint32_t) +
560 personalities.size() * sizeof(uint32_t) +
561 // The extra second-level-page entry is for the sentinel
562 (secondLevelPages.size() + 1) *
563 sizeof(unwind_info_section_header_index_entry) +
564 entriesWithLsda.size() *
565 sizeof(unwind_info_section_header_lsda_index_entry);
567 level2PagesOffset + secondLevelPages.size() * SECOND_LEVEL_PAGE_BYTES;
570 // All inputs are relocated and output addresses are known, so write!
572 void UnwindInfoSectionImpl::writeTo(uint8_t *buf) const {
573 assert(!cuIndices.empty() && "call only if there is unwind info");
576 auto *uip = reinterpret_cast<unwind_info_section_header *>(buf);
578 uip->commonEncodingsArraySectionOffset = sizeof(unwind_info_section_header);
579 uip->commonEncodingsArrayCount = commonEncodings.size();
580 uip->personalityArraySectionOffset =
581 uip->commonEncodingsArraySectionOffset +
582 (uip->commonEncodingsArrayCount * sizeof(uint32_t));
583 uip->personalityArrayCount = personalities.size();
584 uip->indexSectionOffset = uip->personalityArraySectionOffset +
585 (uip->personalityArrayCount * sizeof(uint32_t));
586 uip->indexCount = secondLevelPages.size() + 1;
589 auto *i32p = reinterpret_cast<uint32_t *>(&uip[1]);
590 for (const auto &encoding : commonEncodings)
591 *i32p++ = encoding.first;
594 for (const Symbol *personality : personalities)
595 *i32p++ = personality->getGotVA() - in.header->addr;
598 uint32_t lsdaOffset =
599 uip->indexSectionOffset +
600 uip->indexCount * sizeof(unwind_info_section_header_index_entry);
601 uint64_t l2PagesOffset = level2PagesOffset;
602 auto *iep = reinterpret_cast<unwind_info_section_header_index_entry *>(i32p);
603 for (const SecondLevelPage &page : secondLevelPages) {
604 size_t idx = cuIndices[page.entryIndex];
605 iep->functionOffset = cuEntries[idx].functionAddress - in.header->addr;
606 iep->secondLevelPagesSectionOffset = l2PagesOffset;
607 iep->lsdaIndexArraySectionOffset =
608 lsdaOffset + lsdaIndex.lookup(idx) *
609 sizeof(unwind_info_section_header_lsda_index_entry);
611 l2PagesOffset += SECOND_LEVEL_PAGE_BYTES;
614 const CompactUnwindEntry &cuEnd = cuEntries[cuIndices.back()];
615 iep->functionOffset =
616 cuEnd.functionAddress - in.header->addr + cuEnd.functionLength;
617 iep->secondLevelPagesSectionOffset = 0;
618 iep->lsdaIndexArraySectionOffset =
619 lsdaOffset + entriesWithLsda.size() *
620 sizeof(unwind_info_section_header_lsda_index_entry);
625 reinterpret_cast<unwind_info_section_header_lsda_index_entry *>(iep);
626 for (size_t idx : entriesWithLsda) {
627 const CompactUnwindEntry &cu = cuEntries[idx];
628 lep->lsdaOffset = cu.lsda->getVA(/*off=*/0) - in.header->addr;
629 lep->functionOffset = cu.functionAddress - in.header->addr;
634 auto *pp = reinterpret_cast<uint32_t *>(lep);
635 for (const SecondLevelPage &page : secondLevelPages) {
636 if (page.kind == UNWIND_SECOND_LEVEL_COMPRESSED) {
637 uintptr_t functionAddressBase =
638 cuEntries[cuIndices[page.entryIndex]].functionAddress;
640 reinterpret_cast<unwind_info_compressed_second_level_page_header *>(
642 p2p->kind = page.kind;
643 p2p->entryPageOffset =
644 sizeof(unwind_info_compressed_second_level_page_header);
645 p2p->entryCount = page.entryCount;
646 p2p->encodingsPageOffset =
647 p2p->entryPageOffset + p2p->entryCount * sizeof(uint32_t);
648 p2p->encodingsCount = page.localEncodings.size();
649 auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
650 for (size_t i = 0; i < page.entryCount; i++) {
651 const CompactUnwindEntry &cue =
652 cuEntries[cuIndices[page.entryIndex + i]];
653 auto it = commonEncodingIndexes.find(cue.encoding);
654 if (it == commonEncodingIndexes.end())
655 it = page.localEncodingIndexes.find(cue.encoding);
656 *ep++ = (it->second << COMPRESSED_ENTRY_FUNC_OFFSET_BITS) |
657 (cue.functionAddress - functionAddressBase);
659 if (!page.localEncodings.empty())
660 memcpy(ep, page.localEncodings.data(),
661 page.localEncodings.size() * sizeof(uint32_t));
664 reinterpret_cast<unwind_info_regular_second_level_page_header *>(pp);
665 p2p->kind = page.kind;
666 p2p->entryPageOffset =
667 sizeof(unwind_info_regular_second_level_page_header);
668 p2p->entryCount = page.entryCount;
669 auto *ep = reinterpret_cast<uint32_t *>(&p2p[1]);
670 for (size_t i = 0; i < page.entryCount; i++) {
671 const CompactUnwindEntry &cue =
672 cuEntries[cuIndices[page.entryIndex + i]];
673 *ep++ = cue.functionAddress;
674 *ep++ = cue.encoding;
677 pp += SECOND_LEVEL_PAGE_WORDS;
681 UnwindInfoSection *macho::makeUnwindInfoSection() {
682 return make<UnwindInfoSectionImpl>();