1 //===- lib/FileFormat/MachO/ArchHandler_arm64.cpp -------------------------===//
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "ArchHandler.h"
12 #include "MachONormalizedFileBinaryUtils.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/ADT/StringSwitch.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/Support/Endian.h"
17 #include "llvm/Support/ErrorHandling.h"
18 #include "llvm/Support/Format.h"
20 using namespace llvm::MachO;
21 using namespace lld::mach_o::normalized;
26 using llvm::support::ulittle32_t;
27 using llvm::support::ulittle64_t;
29 using llvm::support::little32_t;
30 using llvm::support::little64_t;
32 class ArchHandler_arm64 : public ArchHandler {
34 ArchHandler_arm64() = default;
35 ~ArchHandler_arm64() override = default;
37 const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
39 Reference::KindArch kindArch() override {
40 return Reference::KindArch::AArch64;
43 /// Used by GOTPass to locate GOT References
44 bool isGOTAccess(const Reference &ref, bool &canBypassGOT) override {
45 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
47 assert(ref.kindArch() == Reference::KindArch::AArch64);
48 switch (ref.kindValue()) {
54 case unwindCIEToPersonalityFunction:
63 /// Used by GOTPass to update GOT References.
64 void updateReferenceToGOT(const Reference *ref, bool targetNowGOT) override {
65 // If GOT slot was instanciated, transform:
66 // gotPage21/gotOffset12 -> page21/offset12scale8
67 // If GOT slot optimized away, transform:
68 // gotPage21/gotOffset12 -> page21/addOffset12
69 assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
70 assert(ref->kindArch() == Reference::KindArch::AArch64);
71 switch (ref->kindValue()) {
73 const_cast<Reference *>(ref)->setKindValue(page21);
76 const_cast<Reference *>(ref)->setKindValue(targetNowGOT ?
77 offset12scale8 : addOffset12);
80 const_cast<Reference *>(ref)->setKindValue(delta32);
83 const_cast<Reference *>(ref)->setKindValue(imageOffset);
86 llvm_unreachable("Not a GOT reference");
90 const StubInfo &stubInfo() override { return _sStubInfo; }
92 bool isCallSite(const Reference &) override;
93 bool isNonCallBranch(const Reference &) override {
97 bool isPointer(const Reference &) override;
98 bool isPairedReloc(const normalized::Relocation &) override;
100 bool needsCompactUnwind() override {
103 Reference::KindValue imageOffsetKind() override {
106 Reference::KindValue imageOffsetKindIndirect() override {
107 return imageOffsetGot;
110 Reference::KindValue unwindRefToPersonalityFunctionKind() override {
111 return unwindCIEToPersonalityFunction;
114 Reference::KindValue unwindRefToCIEKind() override {
118 Reference::KindValue unwindRefToFunctionKind() override {
119 return unwindFDEToFunction;
122 Reference::KindValue unwindRefToEhFrameKind() override {
123 return unwindInfoToEhFrame;
126 Reference::KindValue pointerKind() override {
130 Reference::KindValue lazyImmediateLocationKind() override {
131 return lazyImmediateLocation;
134 uint32_t dwarfCompactUnwindType() override {
138 llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
139 const DefinedAtom *inAtom,
140 uint32_t offsetInAtom,
141 uint64_t fixupAddress, bool isBig,
142 FindAtomBySectionAndAddress atomFromAddress,
143 FindAtomBySymbolIndex atomFromSymbolIndex,
144 Reference::KindValue *kind,
145 const lld::Atom **target,
146 Reference::Addend *addend) override;
148 getPairReferenceInfo(const normalized::Relocation &reloc1,
149 const normalized::Relocation &reloc2,
150 const DefinedAtom *inAtom,
151 uint32_t offsetInAtom,
152 uint64_t fixupAddress, bool isBig, bool scatterable,
153 FindAtomBySectionAndAddress atomFromAddress,
154 FindAtomBySymbolIndex atomFromSymbolIndex,
155 Reference::KindValue *kind,
156 const lld::Atom **target,
157 Reference::Addend *addend) override;
159 bool needsLocalSymbolInRelocatableFile(const DefinedAtom *atom) override {
160 return (atom->contentType() == DefinedAtom::typeCString);
163 void generateAtomContent(const DefinedAtom &atom, bool relocatable,
164 FindAddressForAtom findAddress,
165 FindAddressForAtom findSectionAddress,
166 uint64_t imageBaseAddress,
167 llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
169 void appendSectionRelocations(const DefinedAtom &atom,
170 uint64_t atomSectionOffset,
171 const Reference &ref,
172 FindSymbolIndexForAtom symbolIndexForAtom,
173 FindSectionIndexForAtom sectionIndexForAtom,
174 FindAddressForAtom addressForAtom,
175 normalized::Relocations &relocs) override;
178 static const Registry::KindStrings _sKindStrings[];
179 static const StubInfo _sStubInfo;
181 enum Arm64Kind : Reference::KindValue {
182 invalid, /// for error condition
184 // Kinds found in mach-o .o files:
185 branch26, /// ex: bl _foo
186 page21, /// ex: adrp x1, _foo@PAGE
187 offset12, /// ex: ldrb w0, [x1, _foo@PAGEOFF]
188 offset12scale2, /// ex: ldrs w0, [x1, _foo@PAGEOFF]
189 offset12scale4, /// ex: ldr w0, [x1, _foo@PAGEOFF]
190 offset12scale8, /// ex: ldr x0, [x1, _foo@PAGEOFF]
191 offset12scale16, /// ex: ldr q0, [x1, _foo@PAGEOFF]
192 gotPage21, /// ex: adrp x1, _foo@GOTPAGE
193 gotOffset12, /// ex: ldr w0, [x1, _foo@GOTPAGEOFF]
194 tlvPage21, /// ex: adrp x1, _foo@TLVPAGE
195 tlvOffset12, /// ex: ldr w0, [x1, _foo@TLVPAGEOFF]
197 pointer64, /// ex: .quad _foo
198 delta64, /// ex: .quad _foo - .
199 delta32, /// ex: .long _foo - .
200 negDelta32, /// ex: .long . - _foo
201 pointer64ToGOT, /// ex: .quad _foo@GOT
202 delta32ToGOT, /// ex: .long _foo@GOT - .
204 // Kinds introduced by Passes:
205 addOffset12, /// Location contains LDR to change into ADD.
206 lazyPointer, /// Location contains a lazy pointer.
207 lazyImmediateLocation, /// Location contains immediate value used in stub.
208 imageOffset, /// Location contains offset of atom in final image
209 imageOffsetGot, /// Location contains offset of GOT entry for atom in
210 /// final image (typically personality function).
211 unwindCIEToPersonalityFunction, /// Nearly delta32ToGOT, but cannot be
212 /// rematerialized in relocatable object
213 /// (yay for implicit contracts!).
214 unwindFDEToFunction, /// Nearly delta64, but cannot be rematerialized in
215 /// relocatable object (yay for implicit contracts!).
216 unwindInfoToEhFrame, /// Fix low 24 bits of compact unwind encoding to
217 /// refer to __eh_frame entry.
220 void applyFixupFinal(const Reference &ref, uint8_t *location,
221 uint64_t fixupAddress, uint64_t targetAddress,
222 uint64_t inAtomAddress, uint64_t imageBaseAddress,
223 FindAddressForAtom findSectionAddress);
225 void applyFixupRelocatable(const Reference &ref, uint8_t *location,
226 uint64_t fixupAddress, uint64_t targetAddress,
227 uint64_t inAtomAddress, bool targetUnnamed);
229 // Utility functions for inspecting/updating instructions.
230 static uint32_t setDisplacementInBranch26(uint32_t instr, int32_t disp);
231 static uint32_t setDisplacementInADRP(uint32_t instr, int64_t disp);
232 static Arm64Kind offset12KindFromInstruction(uint32_t instr);
233 static uint32_t setImm12(uint32_t instr, uint32_t offset);
236 const Registry::KindStrings ArchHandler_arm64::_sKindStrings[] = {
237 LLD_KIND_STRING_ENTRY(invalid),
238 LLD_KIND_STRING_ENTRY(branch26),
239 LLD_KIND_STRING_ENTRY(page21),
240 LLD_KIND_STRING_ENTRY(offset12),
241 LLD_KIND_STRING_ENTRY(offset12scale2),
242 LLD_KIND_STRING_ENTRY(offset12scale4),
243 LLD_KIND_STRING_ENTRY(offset12scale8),
244 LLD_KIND_STRING_ENTRY(offset12scale16),
245 LLD_KIND_STRING_ENTRY(gotPage21),
246 LLD_KIND_STRING_ENTRY(gotOffset12),
247 LLD_KIND_STRING_ENTRY(tlvPage21),
248 LLD_KIND_STRING_ENTRY(tlvOffset12),
249 LLD_KIND_STRING_ENTRY(pointer64),
250 LLD_KIND_STRING_ENTRY(delta64),
251 LLD_KIND_STRING_ENTRY(delta32),
252 LLD_KIND_STRING_ENTRY(negDelta32),
253 LLD_KIND_STRING_ENTRY(pointer64ToGOT),
254 LLD_KIND_STRING_ENTRY(delta32ToGOT),
256 LLD_KIND_STRING_ENTRY(addOffset12),
257 LLD_KIND_STRING_ENTRY(lazyPointer),
258 LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
259 LLD_KIND_STRING_ENTRY(imageOffset),
260 LLD_KIND_STRING_ENTRY(imageOffsetGot),
261 LLD_KIND_STRING_ENTRY(unwindCIEToPersonalityFunction),
262 LLD_KIND_STRING_ENTRY(unwindFDEToFunction),
263 LLD_KIND_STRING_ENTRY(unwindInfoToEhFrame),
268 const ArchHandler::StubInfo ArchHandler_arm64::_sStubInfo = {
271 // Lazy pointer references
272 { Reference::KindArch::AArch64, pointer64, 0, 0 },
273 { Reference::KindArch::AArch64, lazyPointer, 0, 0 },
275 // GOT pointer to dyld_stub_binder
276 { Reference::KindArch::AArch64, pointer64, 0, 0 },
278 // arm64 code alignment 2^1
281 // Stub size and code
283 { 0x10, 0x00, 0x00, 0x90, // ADRP X16, lazy_pointer@page
284 0x10, 0x02, 0x40, 0xF9, // LDR X16, [X16, lazy_pointer@pageoff]
285 0x00, 0x02, 0x1F, 0xD6 }, // BR X16
286 { Reference::KindArch::AArch64, page21, 0, 0 },
287 { true, offset12scale8, 4, 0 },
289 // Stub Helper size and code
291 { 0x50, 0x00, 0x00, 0x18, // LDR W16, L0
292 0x00, 0x00, 0x00, 0x14, // LDR B helperhelper
293 0x00, 0x00, 0x00, 0x00 }, // L0: .long 0
294 { Reference::KindArch::AArch64, lazyImmediateLocation, 8, 0 },
295 { Reference::KindArch::AArch64, branch26, 4, 0 },
297 // Stub helper image cache content type
298 DefinedAtom::typeGOT,
300 // Stub Helper-Common size and code
302 // Stub helper alignment
304 { 0x11, 0x00, 0x00, 0x90, // ADRP X17, dyld_ImageLoaderCache@page
305 0x31, 0x02, 0x00, 0x91, // ADD X17, X17, dyld_ImageLoaderCache@pageoff
306 0xF0, 0x47, 0xBF, 0xA9, // STP X16/X17, [SP, #-16]!
307 0x10, 0x00, 0x00, 0x90, // ADRP X16, _fast_lazy_bind@page
308 0x10, 0x02, 0x40, 0xF9, // LDR X16, [X16,_fast_lazy_bind@pageoff]
309 0x00, 0x02, 0x1F, 0xD6 }, // BR X16
310 { Reference::KindArch::AArch64, page21, 0, 0 },
311 { true, offset12, 4, 0 },
312 { Reference::KindArch::AArch64, page21, 12, 0 },
313 { true, offset12scale8, 16, 0 }
316 bool ArchHandler_arm64::isCallSite(const Reference &ref) {
317 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
319 assert(ref.kindArch() == Reference::KindArch::AArch64);
320 return (ref.kindValue() == branch26);
323 bool ArchHandler_arm64::isPointer(const Reference &ref) {
324 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
326 assert(ref.kindArch() == Reference::KindArch::AArch64);
327 Reference::KindValue kind = ref.kindValue();
328 return (kind == pointer64);
331 bool ArchHandler_arm64::isPairedReloc(const Relocation &r) {
332 return ((r.type == ARM64_RELOC_ADDEND) || (r.type == ARM64_RELOC_SUBTRACTOR));
335 uint32_t ArchHandler_arm64::setDisplacementInBranch26(uint32_t instr,
336 int32_t displacement) {
337 assert((displacement <= 134217727) && (displacement > (-134217728)) &&
338 "arm64 branch out of range");
339 return (instr & 0xFC000000) | ((uint32_t)(displacement >> 2) & 0x03FFFFFF);
342 uint32_t ArchHandler_arm64::setDisplacementInADRP(uint32_t instruction,
343 int64_t displacement) {
344 assert((displacement <= 0x100000000LL) && (displacement > (-0x100000000LL)) &&
345 "arm64 ADRP out of range");
346 assert(((instruction & 0x9F000000) == 0x90000000) &&
347 "reloc not on ADRP instruction");
348 uint32_t immhi = (displacement >> 9) & (0x00FFFFE0);
349 uint32_t immlo = (displacement << 17) & (0x60000000);
350 return (instruction & 0x9F00001F) | immlo | immhi;
353 ArchHandler_arm64::Arm64Kind
354 ArchHandler_arm64::offset12KindFromInstruction(uint32_t instruction) {
355 if (instruction & 0x08000000) {
356 switch ((instruction >> 30) & 0x3) {
358 if ((instruction & 0x04800000) == 0x04800000)
359 return offset12scale16;
362 return offset12scale2;
364 return offset12scale4;
366 return offset12scale8;
372 uint32_t ArchHandler_arm64::setImm12(uint32_t instruction, uint32_t offset) {
373 assert(((offset & 0xFFFFF000) == 0) && "imm12 offset out of range");
374 uint32_t imm12 = offset << 10;
375 return (instruction & 0xFFC003FF) | imm12;
378 llvm::Error ArchHandler_arm64::getReferenceInfo(
379 const Relocation &reloc, const DefinedAtom *inAtom, uint32_t offsetInAtom,
380 uint64_t fixupAddress, bool isBig,
381 FindAtomBySectionAndAddress atomFromAddress,
382 FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
383 const lld::Atom **target, Reference::Addend *addend) {
384 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
385 switch (relocPattern(reloc)) {
386 case ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4:
389 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
392 return llvm::Error::success();
393 case ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4:
394 // ex: adrp x1, _foo@PAGE
396 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
399 return llvm::Error::success();
400 case ARM64_RELOC_PAGEOFF12 | rExtern | rLength4:
401 // ex: ldr x0, [x1, _foo@PAGEOFF]
402 *kind = offset12KindFromInstruction(*(const little32_t *)fixupContent);
403 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
406 return llvm::Error::success();
407 case ARM64_RELOC_GOT_LOAD_PAGE21 | rPcRel | rExtern | rLength4:
408 // ex: adrp x1, _foo@GOTPAGE
410 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
413 return llvm::Error::success();
414 case ARM64_RELOC_GOT_LOAD_PAGEOFF12 | rExtern | rLength4:
415 // ex: ldr x0, [x1, _foo@GOTPAGEOFF]
417 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
420 return llvm::Error::success();
421 case ARM64_RELOC_TLVP_LOAD_PAGE21 | rPcRel | rExtern | rLength4:
422 // ex: adrp x1, _foo@TLVPAGE
424 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
427 return llvm::Error::success();
428 case ARM64_RELOC_TLVP_LOAD_PAGEOFF12 | rExtern | rLength4:
429 // ex: ldr x0, [x1, _foo@TLVPAGEOFF]
431 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
434 return llvm::Error::success();
435 case ARM64_RELOC_UNSIGNED | rExtern | rLength8:
436 // ex: .quad _foo + N
438 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
440 *addend = *(const little64_t *)fixupContent;
441 return llvm::Error::success();
442 case ARM64_RELOC_UNSIGNED | rLength8:
443 // ex: .quad Lfoo + N
445 return atomFromAddress(reloc.symbol, *(const little64_t *)fixupContent,
447 case ARM64_RELOC_POINTER_TO_GOT | rExtern | rLength8:
448 // ex: .quad _foo@GOT
449 *kind = pointer64ToGOT;
450 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
453 return llvm::Error::success();
454 case ARM64_RELOC_POINTER_TO_GOT | rPcRel | rExtern | rLength4:
455 // ex: .long _foo@GOT - .
457 // If we are in an .eh_frame section, then the kind of the relocation should
458 // not be delta32ToGOT. It may instead be unwindCIEToPersonalityFunction.
459 if (inAtom->contentType() == DefinedAtom::typeCFI)
460 *kind = unwindCIEToPersonalityFunction;
462 *kind = delta32ToGOT;
464 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
467 return llvm::Error::success();
469 return llvm::make_error<GenericError>("unsupported arm64 relocation type");
473 llvm::Error ArchHandler_arm64::getPairReferenceInfo(
474 const normalized::Relocation &reloc1, const normalized::Relocation &reloc2,
475 const DefinedAtom *inAtom, uint32_t offsetInAtom, uint64_t fixupAddress,
476 bool swap, bool scatterable, FindAtomBySectionAndAddress atomFromAddress,
477 FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind,
478 const lld::Atom **target, Reference::Addend *addend) {
479 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
480 switch (relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
481 case ((ARM64_RELOC_ADDEND | rLength4) << 16 |
482 ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4):
485 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
487 *addend = reloc1.symbol;
488 return llvm::Error::success();
489 case ((ARM64_RELOC_ADDEND | rLength4) << 16 |
490 ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4):
491 // ex: adrp x1, _foo@PAGE
493 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
495 *addend = reloc1.symbol;
496 return llvm::Error::success();
497 case ((ARM64_RELOC_ADDEND | rLength4) << 16 |
498 ARM64_RELOC_PAGEOFF12 | rExtern | rLength4): {
499 // ex: ldr w0, [x1, _foo@PAGEOFF]
500 uint32_t cont32 = (int32_t)*(const little32_t *)fixupContent;
501 *kind = offset12KindFromInstruction(cont32);
502 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
504 *addend = reloc1.symbol;
505 return llvm::Error::success();
507 case ((ARM64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
508 ARM64_RELOC_UNSIGNED | rExtern | rLength8):
509 // ex: .quad _foo - .
510 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
513 // If we are in an .eh_frame section, then the kind of the relocation should
514 // not be delta64. It may instead be unwindFDEToFunction.
515 if (inAtom->contentType() == DefinedAtom::typeCFI)
516 *kind = unwindFDEToFunction;
520 // The offsets of the 2 relocations must match
521 if (reloc1.offset != reloc2.offset)
522 return llvm::make_error<GenericError>(
523 "paired relocs must have the same offset");
524 *addend = (int64_t)*(const little64_t *)fixupContent + offsetInAtom;
525 return llvm::Error::success();
526 case ((ARM64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
527 ARM64_RELOC_UNSIGNED | rExtern | rLength4):
528 // ex: .quad _foo - .
530 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
532 *addend = (int32_t)*(const little32_t *)fixupContent + offsetInAtom;
533 return llvm::Error::success();
535 return llvm::make_error<GenericError>("unsupported arm64 relocation pair");
539 void ArchHandler_arm64::generateAtomContent(
540 const DefinedAtom &atom, bool relocatable, FindAddressForAtom findAddress,
541 FindAddressForAtom findSectionAddress, uint64_t imageBaseAddress,
542 llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
544 std::copy(atom.rawContent().begin(), atom.rawContent().end(),
545 atomContentBuffer.begin());
548 if (atom.begin() != atom.end()) {
549 DEBUG_WITH_TYPE("atom-content", llvm::dbgs()
550 << "Applying fixups to atom:\n"
552 << llvm::format(" 0x%09lX", &atom)
554 << atom.file().ordinal()
560 << atom.contentType()
564 for (const Reference *ref : atom) {
565 uint32_t offset = ref->offsetInAtom();
566 const Atom *target = ref->target();
567 bool targetUnnamed = target->name().empty();
568 uint64_t targetAddress = 0;
569 if (isa<DefinedAtom>(target))
570 targetAddress = findAddress(*target);
571 uint64_t atomAddress = findAddress(atom);
572 uint64_t fixupAddress = atomAddress + offset;
574 applyFixupRelocatable(*ref, &atomContentBuffer[offset], fixupAddress,
575 targetAddress, atomAddress, targetUnnamed);
577 applyFixupFinal(*ref, &atomContentBuffer[offset], fixupAddress,
578 targetAddress, atomAddress, imageBaseAddress,
584 void ArchHandler_arm64::applyFixupFinal(const Reference &ref, uint8_t *loc,
585 uint64_t fixupAddress,
586 uint64_t targetAddress,
587 uint64_t inAtomAddress,
588 uint64_t imageBaseAddress,
589 FindAddressForAtom findSectionAddress) {
590 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
592 assert(ref.kindArch() == Reference::KindArch::AArch64);
593 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
594 ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
595 int32_t displacement;
596 uint32_t instruction;
599 switch (static_cast<Arm64Kind>(ref.kindValue())) {
601 displacement = (targetAddress - fixupAddress) + ref.addend();
602 *loc32 = setDisplacementInBranch26(*loc32, displacement);
608 ((targetAddress + ref.addend()) & (-4096)) - (fixupAddress & (-4096));
609 *loc32 = setDisplacementInADRP(*loc32, displacement);
614 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
615 *loc32 = setImm12(*loc32, displacement);
618 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
619 assert(((displacement & 0x1) == 0) &&
620 "scaled imm12 not accessing 2-byte aligneds");
621 *loc32 = setImm12(*loc32, displacement >> 1);
624 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
625 assert(((displacement & 0x3) == 0) &&
626 "scaled imm12 not accessing 4-byte aligned");
627 *loc32 = setImm12(*loc32, displacement >> 2);
630 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
631 assert(((displacement & 0x7) == 0) &&
632 "scaled imm12 not accessing 8-byte aligned");
633 *loc32 = setImm12(*loc32, displacement >> 3);
635 case offset12scale16:
636 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
637 assert(((displacement & 0xF) == 0) &&
638 "scaled imm12 not accessing 16-byte aligned");
639 *loc32 = setImm12(*loc32, displacement >> 4);
642 instruction = *loc32;
643 assert(((instruction & 0xFFC00000) == 0xF9400000) &&
644 "GOT reloc is not an LDR instruction");
645 displacement = (targetAddress + ref.addend()) & 0x00000FFF;
646 value32 = 0x91000000 | (instruction & 0x000003FF);
647 instruction = setImm12(value32, displacement);
648 *loc32 = instruction;
652 *loc64 = targetAddress + ref.addend();
655 case unwindFDEToFunction:
656 *loc64 = (targetAddress - fixupAddress) + ref.addend();
660 case unwindCIEToPersonalityFunction:
661 *loc32 = (targetAddress - fixupAddress) + ref.addend();
664 *loc32 = fixupAddress - targetAddress + ref.addend();
669 case lazyImmediateLocation:
670 *loc32 = ref.addend();
673 *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
676 llvm_unreachable("imageOffsetGot should have been changed to imageOffset");
678 case unwindInfoToEhFrame:
679 value64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
680 assert(value64 < 0xffffffU && "offset in __eh_frame too large");
681 *loc32 = (*loc32 & 0xff000000U) | value64;
684 // Fall into llvm_unreachable().
687 llvm_unreachable("invalid arm64 Reference Kind");
690 void ArchHandler_arm64::applyFixupRelocatable(const Reference &ref,
692 uint64_t fixupAddress,
693 uint64_t targetAddress,
694 uint64_t inAtomAddress,
695 bool targetUnnamed) {
696 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
698 assert(ref.kindArch() == Reference::KindArch::AArch64);
699 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
700 ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
701 switch (static_cast<Arm64Kind>(ref.kindValue())) {
703 *loc32 = setDisplacementInBranch26(*loc32, 0);
708 *loc32 = setDisplacementInADRP(*loc32, 0);
714 case offset12scale16:
717 *loc32 = setImm12(*loc32, 0);
721 *loc64 = targetAddress + ref.addend();
723 *loc64 = ref.addend();
726 *loc64 = ref.addend() + inAtomAddress - fixupAddress;
728 case unwindFDEToFunction:
729 // We don't emit unwindFDEToFunction in -r mode as they are implicitly
730 // generated from the data in the __eh_frame section. So here we need
731 // to use the targetAddress so that we can generate the full relocation
732 // when we parse again later.
733 *loc64 = targetAddress - fixupAddress;
736 *loc32 = ref.addend() + inAtomAddress - fixupAddress;
739 // We don't emit negDelta32 in -r mode as they are implicitly
740 // generated from the data in the __eh_frame section. So here we need
741 // to use the targetAddress so that we can generate the full relocation
742 // when we parse again later.
743 *loc32 = fixupAddress - targetAddress + ref.addend();
749 *loc32 = inAtomAddress - fixupAddress;
751 case unwindCIEToPersonalityFunction:
752 // We don't emit unwindCIEToPersonalityFunction in -r mode as they are
753 // implicitly generated from the data in the __eh_frame section. So here we
754 // need to use the targetAddress so that we can generate the full relocation
755 // when we parse again later.
756 *loc32 = targetAddress - fixupAddress;
759 llvm_unreachable("lazy reference kind implies GOT pass was run");
761 case lazyImmediateLocation:
762 llvm_unreachable("lazy reference kind implies Stubs pass was run");
765 case unwindInfoToEhFrame:
766 llvm_unreachable("fixup implies __unwind_info");
769 // Fall into llvm_unreachable().
772 llvm_unreachable("unknown arm64 Reference Kind");
775 void ArchHandler_arm64::appendSectionRelocations(
776 const DefinedAtom &atom, uint64_t atomSectionOffset, const Reference &ref,
777 FindSymbolIndexForAtom symbolIndexForAtom,
778 FindSectionIndexForAtom sectionIndexForAtom,
779 FindAddressForAtom addressForAtom, normalized::Relocations &relocs) {
780 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
782 assert(ref.kindArch() == Reference::KindArch::AArch64);
783 uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
784 switch (static_cast<Arm64Kind>(ref.kindValue())) {
787 appendReloc(relocs, sectionOffset, ref.addend(), 0,
788 ARM64_RELOC_ADDEND | rLength4);
789 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
790 ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
792 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
793 ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
798 appendReloc(relocs, sectionOffset, ref.addend(), 0,
799 ARM64_RELOC_ADDEND | rLength4);
800 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
801 ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
803 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
804 ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
811 case offset12scale16:
813 appendReloc(relocs, sectionOffset, ref.addend(), 0,
814 ARM64_RELOC_ADDEND | rLength4);
815 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
816 ARM64_RELOC_PAGEOFF12 | rExtern | rLength4);
818 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
819 ARM64_RELOC_PAGEOFF12 | rExtern | rLength4);
823 assert(ref.addend() == 0);
824 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
825 ARM64_RELOC_GOT_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
828 assert(ref.addend() == 0);
829 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
830 ARM64_RELOC_GOT_LOAD_PAGEOFF12 | rExtern | rLength4);
833 assert(ref.addend() == 0);
834 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
835 ARM64_RELOC_TLVP_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
838 assert(ref.addend() == 0);
839 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
840 ARM64_RELOC_TLVP_LOAD_PAGEOFF12 | rExtern | rLength4);
843 if (ref.target()->name().empty())
844 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
845 ARM64_RELOC_UNSIGNED | rLength8);
847 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
848 ARM64_RELOC_UNSIGNED | rExtern | rLength8);
851 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
852 ARM64_RELOC_SUBTRACTOR | rExtern | rLength8);
853 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
854 ARM64_RELOC_UNSIGNED | rExtern | rLength8);
857 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
858 ARM64_RELOC_SUBTRACTOR | rExtern | rLength4 );
859 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
860 ARM64_RELOC_UNSIGNED | rExtern | rLength4 );
863 assert(ref.addend() == 0);
864 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
865 ARM64_RELOC_POINTER_TO_GOT | rExtern | rLength8);
868 assert(ref.addend() == 0);
869 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
870 ARM64_RELOC_POINTER_TO_GOT | rPcRel | rExtern | rLength4);
873 llvm_unreachable("lazy reference kind implies GOT pass was run");
875 case lazyImmediateLocation:
876 llvm_unreachable("lazy reference kind implies Stubs pass was run");
879 llvm_unreachable("deltas from mach_header can only be in final images");
880 case unwindCIEToPersonalityFunction:
881 case unwindFDEToFunction:
882 case unwindInfoToEhFrame:
887 // Fall into llvm_unreachable().
890 llvm_unreachable("unknown arm64 Reference Kind");
893 std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_arm64() {
894 return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_arm64());
897 } // namespace mach_o