1 //===- lib/FileFormat/MachO/ArchHandler_x86_64.cpp ------------------------===//
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "ArchHandler.h"
12 #include "MachONormalizedFileBinaryUtils.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/ADT/StringSwitch.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/Support/Endian.h"
17 #include "llvm/Support/ErrorHandling.h"
19 using namespace llvm::MachO;
20 using namespace lld::mach_o::normalized;
25 using llvm::support::ulittle32_t;
26 using llvm::support::ulittle64_t;
28 using llvm::support::little32_t;
29 using llvm::support::little64_t;
31 class ArchHandler_x86_64 : public ArchHandler {
33 ArchHandler_x86_64() = default;
34 ~ArchHandler_x86_64() override = default;
36 const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
38 Reference::KindArch kindArch() override {
39 return Reference::KindArch::x86_64;
42 /// Used by GOTPass to locate GOT References
43 bool isGOTAccess(const Reference &ref, bool &canBypassGOT) override {
44 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
46 assert(ref.kindArch() == Reference::KindArch::x86_64);
47 switch (ref.kindValue()) {
62 bool isTLVAccess(const Reference &ref) const override {
63 assert(ref.kindNamespace() == Reference::KindNamespace::mach_o);
64 assert(ref.kindArch() == Reference::KindArch::x86_64);
65 return ref.kindValue() == ripRel32Tlv;
68 void updateReferenceToTLV(const Reference *ref) override {
69 assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
70 assert(ref->kindArch() == Reference::KindArch::x86_64);
71 assert(ref->kindValue() == ripRel32Tlv);
72 const_cast<Reference*>(ref)->setKindValue(ripRel32);
75 /// Used by GOTPass to update GOT References
76 void updateReferenceToGOT(const Reference *ref, bool targetNowGOT) override {
77 assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
78 assert(ref->kindArch() == Reference::KindArch::x86_64);
80 switch (ref->kindValue()) {
82 assert(targetNowGOT && "target must be GOT");
84 const_cast<Reference *>(ref)
85 ->setKindValue(targetNowGOT ? ripRel32 : ripRel32GotLoadNowLea);
88 const_cast<Reference *>(ref)->setKindValue(imageOffset);
91 llvm_unreachable("unknown GOT reference kind");
95 bool needsCompactUnwind() override {
99 Reference::KindValue imageOffsetKind() override {
103 Reference::KindValue imageOffsetKindIndirect() override {
104 return imageOffsetGot;
107 Reference::KindValue unwindRefToPersonalityFunctionKind() override {
111 Reference::KindValue unwindRefToCIEKind() override {
115 Reference::KindValue unwindRefToFunctionKind() override{
116 return unwindFDEToFunction;
119 Reference::KindValue unwindRefToEhFrameKind() override {
120 return unwindInfoToEhFrame;
123 Reference::KindValue pointerKind() override {
127 uint32_t dwarfCompactUnwindType() override {
131 const StubInfo &stubInfo() override { return _sStubInfo; }
133 bool isNonCallBranch(const Reference &) override {
137 bool isCallSite(const Reference &) override;
138 bool isPointer(const Reference &) override;
139 bool isPairedReloc(const normalized::Relocation &) override;
141 llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
142 const DefinedAtom *inAtom,
143 uint32_t offsetInAtom,
144 uint64_t fixupAddress, bool swap,
145 FindAtomBySectionAndAddress atomFromAddress,
146 FindAtomBySymbolIndex atomFromSymbolIndex,
147 Reference::KindValue *kind,
148 const lld::Atom **target,
149 Reference::Addend *addend) override;
151 getPairReferenceInfo(const normalized::Relocation &reloc1,
152 const normalized::Relocation &reloc2,
153 const DefinedAtom *inAtom,
154 uint32_t offsetInAtom,
155 uint64_t fixupAddress, bool swap, bool scatterable,
156 FindAtomBySectionAndAddress atomFromAddress,
157 FindAtomBySymbolIndex atomFromSymbolIndex,
158 Reference::KindValue *kind,
159 const lld::Atom **target,
160 Reference::Addend *addend) override;
162 bool needsLocalSymbolInRelocatableFile(const DefinedAtom *atom) override {
163 return (atom->contentType() == DefinedAtom::typeCString);
166 void generateAtomContent(const DefinedAtom &atom, bool relocatable,
167 FindAddressForAtom findAddress,
168 FindAddressForAtom findSectionAddress,
170 llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
172 void appendSectionRelocations(const DefinedAtom &atom,
173 uint64_t atomSectionOffset,
174 const Reference &ref,
175 FindSymbolIndexForAtom symbolIndexForAtom,
176 FindSectionIndexForAtom sectionIndexForAtom,
177 FindAddressForAtom addressForAtom,
178 normalized::Relocations &relocs) override;
181 static const Registry::KindStrings _sKindStrings[];
182 static const StubInfo _sStubInfo;
184 enum X86_64Kind: Reference::KindValue {
185 invalid, /// for error condition
187 // Kinds found in mach-o .o files:
188 branch32, /// ex: call _foo
189 ripRel32, /// ex: movq _foo(%rip), %rax
190 ripRel32Minus1, /// ex: movb $0x12, _foo(%rip)
191 ripRel32Minus2, /// ex: movw $0x1234, _foo(%rip)
192 ripRel32Minus4, /// ex: movl $0x12345678, _foo(%rip)
193 ripRel32Anon, /// ex: movq L1(%rip), %rax
194 ripRel32Minus1Anon, /// ex: movb $0x12, L1(%rip)
195 ripRel32Minus2Anon, /// ex: movw $0x1234, L1(%rip)
196 ripRel32Minus4Anon, /// ex: movw $0x12345678, L1(%rip)
197 ripRel32GotLoad, /// ex: movq _foo@GOTPCREL(%rip), %rax
198 ripRel32Got, /// ex: pushq _foo@GOTPCREL(%rip)
199 ripRel32Tlv, /// ex: movq _foo@TLVP(%rip), %rdi
200 pointer64, /// ex: .quad _foo
201 pointer64Anon, /// ex: .quad L1
202 delta64, /// ex: .quad _foo - .
203 delta32, /// ex: .long _foo - .
204 delta64Anon, /// ex: .quad L1 - .
205 delta32Anon, /// ex: .long L1 - .
206 negDelta64, /// ex: .quad . - _foo
207 negDelta32, /// ex: .long . - _foo
209 // Kinds introduced by Passes:
210 ripRel32GotLoadNowLea, /// Target of GOT load is in linkage unit so
211 /// "movq _foo@GOTPCREL(%rip), %rax" can be changed
212 /// to "leaq _foo(%rip), %rax
213 lazyPointer, /// Location contains a lazy pointer.
214 lazyImmediateLocation, /// Location contains immediate value used in stub.
216 imageOffset, /// Location contains offset of atom in final image
217 imageOffsetGot, /// Location contains offset of GOT entry for atom in
218 /// final image (typically personality function).
219 unwindFDEToFunction, /// Nearly delta64, but cannot be rematerialized in
220 /// relocatable object (yay for implicit contracts!).
221 unwindInfoToEhFrame, /// Fix low 24 bits of compact unwind encoding to
222 /// refer to __eh_frame entry.
223 tlvInitSectionOffset /// Location contains offset tlv init-value atom
224 /// within the __thread_data section.
227 Reference::KindValue kindFromReloc(const normalized::Relocation &reloc);
229 void applyFixupFinal(const Reference &ref, uint8_t *location,
230 uint64_t fixupAddress, uint64_t targetAddress,
231 uint64_t inAtomAddress, uint64_t imageBaseAddress,
232 FindAddressForAtom findSectionAddress);
234 void applyFixupRelocatable(const Reference &ref, uint8_t *location,
235 uint64_t fixupAddress,
236 uint64_t targetAddress,
237 uint64_t inAtomAddress);
240 const Registry::KindStrings ArchHandler_x86_64::_sKindStrings[] = {
241 LLD_KIND_STRING_ENTRY(invalid), LLD_KIND_STRING_ENTRY(branch32),
242 LLD_KIND_STRING_ENTRY(ripRel32), LLD_KIND_STRING_ENTRY(ripRel32Minus1),
243 LLD_KIND_STRING_ENTRY(ripRel32Minus2), LLD_KIND_STRING_ENTRY(ripRel32Minus4),
244 LLD_KIND_STRING_ENTRY(ripRel32Anon),
245 LLD_KIND_STRING_ENTRY(ripRel32Minus1Anon),
246 LLD_KIND_STRING_ENTRY(ripRel32Minus2Anon),
247 LLD_KIND_STRING_ENTRY(ripRel32Minus4Anon),
248 LLD_KIND_STRING_ENTRY(ripRel32GotLoad),
249 LLD_KIND_STRING_ENTRY(ripRel32GotLoadNowLea),
250 LLD_KIND_STRING_ENTRY(ripRel32Got), LLD_KIND_STRING_ENTRY(ripRel32Tlv),
251 LLD_KIND_STRING_ENTRY(lazyPointer),
252 LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
253 LLD_KIND_STRING_ENTRY(pointer64), LLD_KIND_STRING_ENTRY(pointer64Anon),
254 LLD_KIND_STRING_ENTRY(delta32), LLD_KIND_STRING_ENTRY(delta64),
255 LLD_KIND_STRING_ENTRY(delta32Anon), LLD_KIND_STRING_ENTRY(delta64Anon),
256 LLD_KIND_STRING_ENTRY(negDelta64),
257 LLD_KIND_STRING_ENTRY(negDelta32),
258 LLD_KIND_STRING_ENTRY(imageOffset), LLD_KIND_STRING_ENTRY(imageOffsetGot),
259 LLD_KIND_STRING_ENTRY(unwindFDEToFunction),
260 LLD_KIND_STRING_ENTRY(unwindInfoToEhFrame),
261 LLD_KIND_STRING_ENTRY(tlvInitSectionOffset),
265 const ArchHandler::StubInfo ArchHandler_x86_64::_sStubInfo = {
268 // Lazy pointer references
269 { Reference::KindArch::x86_64, pointer64, 0, 0 },
270 { Reference::KindArch::x86_64, lazyPointer, 0, 0 },
272 // GOT pointer to dyld_stub_binder
273 { Reference::KindArch::x86_64, pointer64, 0, 0 },
275 // x86_64 code alignment 2^1
278 // Stub size and code
280 { 0xff, 0x25, 0x00, 0x00, 0x00, 0x00 }, // jmp *lazyPointer
281 { Reference::KindArch::x86_64, ripRel32, 2, 0 },
284 // Stub Helper size and code
286 { 0x68, 0x00, 0x00, 0x00, 0x00, // pushq $lazy-info-offset
287 0xE9, 0x00, 0x00, 0x00, 0x00 }, // jmp helperhelper
288 { Reference::KindArch::x86_64, lazyImmediateLocation, 1, 0 },
289 { Reference::KindArch::x86_64, branch32, 6, 0 },
291 // Stub helper image cache content type
292 DefinedAtom::typeNonLazyPointer,
294 // Stub Helper-Common size and code
296 // Stub helper alignment
298 { 0x4C, 0x8D, 0x1D, 0x00, 0x00, 0x00, 0x00, // leaq cache(%rip),%r11
299 0x41, 0x53, // push %r11
300 0xFF, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *binder(%rip)
302 { Reference::KindArch::x86_64, ripRel32, 3, 0 },
304 { Reference::KindArch::x86_64, ripRel32, 11, 0 },
309 bool ArchHandler_x86_64::isCallSite(const Reference &ref) {
310 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
312 assert(ref.kindArch() == Reference::KindArch::x86_64);
313 return (ref.kindValue() == branch32);
316 bool ArchHandler_x86_64::isPointer(const Reference &ref) {
317 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
319 assert(ref.kindArch() == Reference::KindArch::x86_64);
320 Reference::KindValue kind = ref.kindValue();
321 return (kind == pointer64 || kind == pointer64Anon);
324 bool ArchHandler_x86_64::isPairedReloc(const Relocation &reloc) {
325 return (reloc.type == X86_64_RELOC_SUBTRACTOR);
329 ArchHandler_x86_64::kindFromReloc(const Relocation &reloc) {
330 switch(relocPattern(reloc)) {
331 case X86_64_RELOC_BRANCH | rPcRel | rExtern | rLength4:
333 case X86_64_RELOC_SIGNED | rPcRel | rExtern | rLength4:
335 case X86_64_RELOC_SIGNED | rPcRel | rLength4:
337 case X86_64_RELOC_SIGNED_1 | rPcRel | rExtern | rLength4:
338 return ripRel32Minus1;
339 case X86_64_RELOC_SIGNED_1 | rPcRel | rLength4:
340 return ripRel32Minus1Anon;
341 case X86_64_RELOC_SIGNED_2 | rPcRel | rExtern | rLength4:
342 return ripRel32Minus2;
343 case X86_64_RELOC_SIGNED_2 | rPcRel | rLength4:
344 return ripRel32Minus2Anon;
345 case X86_64_RELOC_SIGNED_4 | rPcRel | rExtern | rLength4:
346 return ripRel32Minus4;
347 case X86_64_RELOC_SIGNED_4 | rPcRel | rLength4:
348 return ripRel32Minus4Anon;
349 case X86_64_RELOC_GOT_LOAD | rPcRel | rExtern | rLength4:
350 return ripRel32GotLoad;
351 case X86_64_RELOC_GOT | rPcRel | rExtern | rLength4:
353 case X86_64_RELOC_TLV | rPcRel | rExtern | rLength4:
355 case X86_64_RELOC_UNSIGNED | rExtern | rLength8:
357 case X86_64_RELOC_UNSIGNED | rLength8:
358 return pointer64Anon;
365 ArchHandler_x86_64::getReferenceInfo(const Relocation &reloc,
366 const DefinedAtom *inAtom,
367 uint32_t offsetInAtom,
368 uint64_t fixupAddress, bool swap,
369 FindAtomBySectionAndAddress atomFromAddress,
370 FindAtomBySymbolIndex atomFromSymbolIndex,
371 Reference::KindValue *kind,
372 const lld::Atom **target,
373 Reference::Addend *addend) {
374 *kind = kindFromReloc(reloc);
375 if (*kind == invalid)
376 return llvm::make_error<GenericError>("unknown type");
377 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
378 uint64_t targetAddress;
382 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
384 *addend = *(const little32_t *)fixupContent;
385 return llvm::Error::success();
387 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
389 *addend = (int32_t)*(const little32_t *)fixupContent + 1;
390 return llvm::Error::success();
392 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
394 *addend = (int32_t)*(const little32_t *)fixupContent + 2;
395 return llvm::Error::success();
397 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
399 *addend = (int32_t)*(const little32_t *)fixupContent + 4;
400 return llvm::Error::success();
402 targetAddress = fixupAddress + 4 + *(const little32_t *)fixupContent;
403 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
404 case ripRel32Minus1Anon:
405 targetAddress = fixupAddress + 5 + *(const little32_t *)fixupContent;
406 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
407 case ripRel32Minus2Anon:
408 targetAddress = fixupAddress + 6 + *(const little32_t *)fixupContent;
409 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
410 case ripRel32Minus4Anon:
411 targetAddress = fixupAddress + 8 + *(const little32_t *)fixupContent;
412 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
413 case ripRel32GotLoad:
416 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
418 *addend = *(const little32_t *)fixupContent;
419 return llvm::Error::success();
420 case tlvInitSectionOffset:
422 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
424 // If this is the 3rd pointer of a tlv-thunk (i.e. the pointer to the TLV's
425 // initial value) we need to handle it specially.
426 if (inAtom->contentType() == DefinedAtom::typeThunkTLV &&
427 offsetInAtom == 16) {
428 *kind = tlvInitSectionOffset;
429 assert(*addend == 0 && "TLV-init has non-zero addend?");
431 *addend = *(const little64_t *)fixupContent;
432 return llvm::Error::success();
434 targetAddress = *(const little64_t *)fixupContent;
435 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
437 llvm_unreachable("bad reloc kind");
442 ArchHandler_x86_64::getPairReferenceInfo(const normalized::Relocation &reloc1,
443 const normalized::Relocation &reloc2,
444 const DefinedAtom *inAtom,
445 uint32_t offsetInAtom,
446 uint64_t fixupAddress, bool swap,
448 FindAtomBySectionAndAddress atomFromAddress,
449 FindAtomBySymbolIndex atomFromSymbolIndex,
450 Reference::KindValue *kind,
451 const lld::Atom **target,
452 Reference::Addend *addend) {
453 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
454 uint64_t targetAddress;
455 const lld::Atom *fromTarget;
456 if (auto ec = atomFromSymbolIndex(reloc1.symbol, &fromTarget))
459 switch(relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
460 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
461 X86_64_RELOC_UNSIGNED | rExtern | rLength8): {
462 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
464 uint64_t encodedAddend = (int64_t)*(const little64_t *)fixupContent;
465 if (inAtom == fromTarget) {
466 if (inAtom->contentType() == DefinedAtom::typeCFI)
467 *kind = unwindFDEToFunction;
470 *addend = encodedAddend + offsetInAtom;
471 } else if (inAtom == *target) {
473 *addend = encodedAddend - offsetInAtom;
474 *target = fromTarget;
476 return llvm::make_error<GenericError>("Invalid pointer diff");
477 return llvm::Error::success();
479 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
480 X86_64_RELOC_UNSIGNED | rExtern | rLength4): {
481 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
483 uint32_t encodedAddend = (int32_t)*(const little32_t *)fixupContent;
484 if (inAtom == fromTarget) {
486 *addend = encodedAddend + offsetInAtom;
487 } else if (inAtom == *target) {
489 *addend = encodedAddend - offsetInAtom;
490 *target = fromTarget;
492 return llvm::make_error<GenericError>("Invalid pointer diff");
493 return llvm::Error::success();
495 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
496 X86_64_RELOC_UNSIGNED | rLength8):
497 if (fromTarget != inAtom)
498 return llvm::make_error<GenericError>("pointer diff not in base atom");
500 targetAddress = offsetInAtom + (int64_t)*(const little64_t *)fixupContent;
501 return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
502 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
503 X86_64_RELOC_UNSIGNED | rLength4):
504 if (fromTarget != inAtom)
505 return llvm::make_error<GenericError>("pointer diff not in base atom");
507 targetAddress = offsetInAtom + (int32_t)*(const little32_t *)fixupContent;
508 return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
510 return llvm::make_error<GenericError>("unknown pair");
514 void ArchHandler_x86_64::generateAtomContent(
515 const DefinedAtom &atom, bool relocatable, FindAddressForAtom findAddress,
516 FindAddressForAtom findSectionAddress, uint64_t imageBaseAddress,
517 llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
519 std::copy(atom.rawContent().begin(), atom.rawContent().end(),
520 atomContentBuffer.begin());
522 for (const Reference *ref : atom) {
523 uint32_t offset = ref->offsetInAtom();
524 const Atom *target = ref->target();
525 uint64_t targetAddress = 0;
526 if (isa<DefinedAtom>(target))
527 targetAddress = findAddress(*target);
528 uint64_t atomAddress = findAddress(atom);
529 uint64_t fixupAddress = atomAddress + offset;
531 applyFixupRelocatable(*ref, &atomContentBuffer[offset],
532 fixupAddress, targetAddress,
535 applyFixupFinal(*ref, &atomContentBuffer[offset],
536 fixupAddress, targetAddress,
537 atomAddress, imageBaseAddress, findSectionAddress);
542 void ArchHandler_x86_64::applyFixupFinal(
543 const Reference &ref, uint8_t *loc, uint64_t fixupAddress,
544 uint64_t targetAddress, uint64_t inAtomAddress, uint64_t imageBaseAddress,
545 FindAddressForAtom findSectionAddress) {
546 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
548 assert(ref.kindArch() == Reference::KindArch::x86_64);
549 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
550 ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
551 switch (static_cast<X86_64Kind>(ref.kindValue())) {
556 case ripRel32GotLoad:
558 *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
562 *loc64 = targetAddress + ref.addend();
564 case tlvInitSectionOffset:
565 *loc64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
568 case ripRel32Minus1Anon:
569 *loc32 = targetAddress - (fixupAddress + 5) + ref.addend();
572 case ripRel32Minus2Anon:
573 *loc32 = targetAddress - (fixupAddress + 6) + ref.addend();
576 case ripRel32Minus4Anon:
577 *loc32 = targetAddress - (fixupAddress + 8) + ref.addend();
581 *loc32 = targetAddress - fixupAddress + ref.addend();
585 case unwindFDEToFunction:
586 *loc64 = targetAddress - fixupAddress + ref.addend();
588 case ripRel32GotLoadNowLea:
589 // Change MOVQ to LEA
590 assert(loc[-2] == 0x8B);
592 *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
595 *loc64 = fixupAddress - targetAddress + ref.addend();
598 *loc32 = fixupAddress - targetAddress + ref.addend();
603 case lazyImmediateLocation:
604 *loc32 = ref.addend();
608 *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
610 case unwindInfoToEhFrame: {
611 uint64_t val = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
612 assert(val < 0xffffffU && "offset in __eh_frame too large");
613 *loc32 = (*loc32 & 0xff000000U) | val;
617 // Fall into llvm_unreachable().
620 llvm_unreachable("invalid x86_64 Reference Kind");
623 void ArchHandler_x86_64::applyFixupRelocatable(const Reference &ref,
625 uint64_t fixupAddress,
626 uint64_t targetAddress,
627 uint64_t inAtomAddress) {
628 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
630 assert(ref.kindArch() == Reference::KindArch::x86_64);
631 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
632 ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
633 switch (static_cast<X86_64Kind>(ref.kindValue())) {
637 case ripRel32GotLoad:
639 *loc32 = ref.addend();
642 *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend();
644 case tlvInitSectionOffset:
646 *loc64 = ref.addend();
649 *loc64 = targetAddress + ref.addend();
652 *loc32 = ref.addend() - 1;
654 case ripRel32Minus1Anon:
655 *loc32 = (targetAddress - (fixupAddress + 5)) + ref.addend();
658 *loc32 = ref.addend() - 2;
660 case ripRel32Minus2Anon:
661 *loc32 = (targetAddress - (fixupAddress + 6)) + ref.addend();
664 *loc32 = ref.addend() - 4;
666 case ripRel32Minus4Anon:
667 *loc32 = (targetAddress - (fixupAddress + 8)) + ref.addend();
670 *loc32 = ref.addend() + inAtomAddress - fixupAddress;
673 // The value we write here should be the the delta to the target
674 // after taking in to account the difference from the fixup back to the
675 // last defined label
678 // Lfixup: .quad Ltarget - .
682 // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
683 *loc32 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
686 *loc64 = ref.addend() + inAtomAddress - fixupAddress;
689 // The value we write here should be the the delta to the target
690 // after taking in to account the difference from the fixup back to the
691 // last defined label
694 // Lfixup: .quad Ltarget - .
698 // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
699 *loc64 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
702 *loc64 = ref.addend() + fixupAddress - inAtomAddress;
705 *loc32 = ref.addend() + fixupAddress - inAtomAddress;
707 case ripRel32GotLoadNowLea:
708 llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
711 case lazyImmediateLocation:
712 llvm_unreachable("lazy reference kind implies Stubs pass was run");
716 case unwindInfoToEhFrame:
717 llvm_unreachable("fixup implies __unwind_info");
719 case unwindFDEToFunction:
720 // Do nothing for now
723 // Fall into llvm_unreachable().
726 llvm_unreachable("unknown x86_64 Reference Kind");
729 void ArchHandler_x86_64::appendSectionRelocations(
730 const DefinedAtom &atom,
731 uint64_t atomSectionOffset,
732 const Reference &ref,
733 FindSymbolIndexForAtom symbolIndexForAtom,
734 FindSectionIndexForAtom sectionIndexForAtom,
735 FindAddressForAtom addressForAtom,
736 normalized::Relocations &relocs) {
737 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
739 assert(ref.kindArch() == Reference::KindArch::x86_64);
740 uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
741 switch (static_cast<X86_64Kind>(ref.kindValue())) {
743 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
744 X86_64_RELOC_BRANCH | rPcRel | rExtern | rLength4);
747 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
748 X86_64_RELOC_SIGNED | rPcRel | rExtern | rLength4 );
751 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
752 X86_64_RELOC_SIGNED | rPcRel | rLength4 );
755 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
756 X86_64_RELOC_GOT | rPcRel | rExtern | rLength4 );
758 case ripRel32GotLoad:
759 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
760 X86_64_RELOC_GOT_LOAD | rPcRel | rExtern | rLength4 );
763 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
764 X86_64_RELOC_TLV | rPcRel | rExtern | rLength4 );
766 case tlvInitSectionOffset:
768 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
769 X86_64_RELOC_UNSIGNED | rExtern | rLength8);
772 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
773 X86_64_RELOC_UNSIGNED | rLength8);
776 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
777 X86_64_RELOC_SIGNED_1 | rPcRel | rExtern | rLength4 );
779 case ripRel32Minus1Anon:
780 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
781 X86_64_RELOC_SIGNED_1 | rPcRel | rLength4 );
784 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
785 X86_64_RELOC_SIGNED_2 | rPcRel | rExtern | rLength4 );
787 case ripRel32Minus2Anon:
788 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
789 X86_64_RELOC_SIGNED_2 | rPcRel | rLength4 );
792 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
793 X86_64_RELOC_SIGNED_4 | rPcRel | rExtern | rLength4 );
795 case ripRel32Minus4Anon:
796 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
797 X86_64_RELOC_SIGNED_4 | rPcRel | rLength4 );
800 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
801 X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
802 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
803 X86_64_RELOC_UNSIGNED | rExtern | rLength4 );
806 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
807 X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
808 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
809 X86_64_RELOC_UNSIGNED | rLength4 );
812 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
813 X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
814 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
815 X86_64_RELOC_UNSIGNED | rExtern | rLength8 );
818 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
819 X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
820 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
821 X86_64_RELOC_UNSIGNED | rLength8 );
823 case unwindFDEToFunction:
824 case unwindInfoToEhFrame:
827 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
828 X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
829 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
830 X86_64_RELOC_UNSIGNED | rExtern | rLength4 );
833 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
834 X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
835 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
836 X86_64_RELOC_UNSIGNED | rExtern | rLength8 );
838 case ripRel32GotLoadNowLea:
839 llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
842 case lazyImmediateLocation:
843 llvm_unreachable("lazy reference kind implies Stubs pass was run");
847 llvm_unreachable("__unwind_info references should have been resolved");
850 // Fall into llvm_unreachable().
853 llvm_unreachable("unknown x86_64 Reference Kind");
856 std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_x86_64() {
857 return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_x86_64());
860 } // namespace mach_o