1 //===- lib/FileFormat/MachO/ArchHandler_x86_64.cpp ------------------------===//
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "ArchHandler.h"
12 #include "MachONormalizedFileBinaryUtils.h"
13 #include "llvm/ADT/StringRef.h"
14 #include "llvm/ADT/StringSwitch.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/Support/Endian.h"
17 #include "llvm/Support/ErrorHandling.h"
19 using namespace llvm::MachO;
20 using namespace lld::mach_o::normalized;
25 using llvm::support::ulittle32_t;
26 using llvm::support::ulittle64_t;
28 using llvm::support::little32_t;
29 using llvm::support::little64_t;
31 class ArchHandler_x86_64 : public ArchHandler {
33 ArchHandler_x86_64() = default;
34 ~ArchHandler_x86_64() override = default;
36 const Registry::KindStrings *kindStrings() override { return _sKindStrings; }
38 Reference::KindArch kindArch() override {
39 return Reference::KindArch::x86_64;
42 /// Used by GOTPass to locate GOT References
43 bool isGOTAccess(const Reference &ref, bool &canBypassGOT) override {
44 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
46 assert(ref.kindArch() == Reference::KindArch::x86_64);
47 switch (ref.kindValue()) {
62 bool isTLVAccess(const Reference &ref) const override {
63 assert(ref.kindNamespace() == Reference::KindNamespace::mach_o);
64 assert(ref.kindArch() == Reference::KindArch::x86_64);
65 return ref.kindValue() == ripRel32Tlv;
68 void updateReferenceToTLV(const Reference *ref) override {
69 assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
70 assert(ref->kindArch() == Reference::KindArch::x86_64);
71 assert(ref->kindValue() == ripRel32Tlv);
72 const_cast<Reference*>(ref)->setKindValue(ripRel32);
75 /// Used by GOTPass to update GOT References
76 void updateReferenceToGOT(const Reference *ref, bool targetNowGOT) override {
77 assert(ref->kindNamespace() == Reference::KindNamespace::mach_o);
78 assert(ref->kindArch() == Reference::KindArch::x86_64);
80 switch (ref->kindValue()) {
82 assert(targetNowGOT && "target must be GOT");
84 const_cast<Reference *>(ref)
85 ->setKindValue(targetNowGOT ? ripRel32 : ripRel32GotLoadNowLea);
88 const_cast<Reference *>(ref)->setKindValue(imageOffset);
91 llvm_unreachable("unknown GOT reference kind");
95 bool needsCompactUnwind() override {
99 Reference::KindValue imageOffsetKind() override {
103 Reference::KindValue imageOffsetKindIndirect() override {
104 return imageOffsetGot;
107 Reference::KindValue unwindRefToPersonalityFunctionKind() override {
111 Reference::KindValue unwindRefToCIEKind() override {
115 Reference::KindValue unwindRefToFunctionKind() override{
116 return unwindFDEToFunction;
119 Reference::KindValue lazyImmediateLocationKind() override {
120 return lazyImmediateLocation;
123 Reference::KindValue unwindRefToEhFrameKind() override {
124 return unwindInfoToEhFrame;
127 Reference::KindValue pointerKind() override {
131 uint32_t dwarfCompactUnwindType() override {
135 const StubInfo &stubInfo() override { return _sStubInfo; }
137 bool isNonCallBranch(const Reference &) override {
141 bool isCallSite(const Reference &) override;
142 bool isPointer(const Reference &) override;
143 bool isPairedReloc(const normalized::Relocation &) override;
145 llvm::Error getReferenceInfo(const normalized::Relocation &reloc,
146 const DefinedAtom *inAtom,
147 uint32_t offsetInAtom,
148 uint64_t fixupAddress, bool swap,
149 FindAtomBySectionAndAddress atomFromAddress,
150 FindAtomBySymbolIndex atomFromSymbolIndex,
151 Reference::KindValue *kind,
152 const lld::Atom **target,
153 Reference::Addend *addend) override;
155 getPairReferenceInfo(const normalized::Relocation &reloc1,
156 const normalized::Relocation &reloc2,
157 const DefinedAtom *inAtom,
158 uint32_t offsetInAtom,
159 uint64_t fixupAddress, bool swap, bool scatterable,
160 FindAtomBySectionAndAddress atomFromAddress,
161 FindAtomBySymbolIndex atomFromSymbolIndex,
162 Reference::KindValue *kind,
163 const lld::Atom **target,
164 Reference::Addend *addend) override;
166 bool needsLocalSymbolInRelocatableFile(const DefinedAtom *atom) override {
167 return (atom->contentType() == DefinedAtom::typeCString);
170 void generateAtomContent(const DefinedAtom &atom, bool relocatable,
171 FindAddressForAtom findAddress,
172 FindAddressForAtom findSectionAddress,
174 llvm::MutableArrayRef<uint8_t> atomContentBuffer) override;
176 void appendSectionRelocations(const DefinedAtom &atom,
177 uint64_t atomSectionOffset,
178 const Reference &ref,
179 FindSymbolIndexForAtom symbolIndexForAtom,
180 FindSectionIndexForAtom sectionIndexForAtom,
181 FindAddressForAtom addressForAtom,
182 normalized::Relocations &relocs) override;
185 static const Registry::KindStrings _sKindStrings[];
186 static const StubInfo _sStubInfo;
188 enum X86_64Kind: Reference::KindValue {
189 invalid, /// for error condition
191 // Kinds found in mach-o .o files:
192 branch32, /// ex: call _foo
193 ripRel32, /// ex: movq _foo(%rip), %rax
194 ripRel32Minus1, /// ex: movb $0x12, _foo(%rip)
195 ripRel32Minus2, /// ex: movw $0x1234, _foo(%rip)
196 ripRel32Minus4, /// ex: movl $0x12345678, _foo(%rip)
197 ripRel32Anon, /// ex: movq L1(%rip), %rax
198 ripRel32Minus1Anon, /// ex: movb $0x12, L1(%rip)
199 ripRel32Minus2Anon, /// ex: movw $0x1234, L1(%rip)
200 ripRel32Minus4Anon, /// ex: movw $0x12345678, L1(%rip)
201 ripRel32GotLoad, /// ex: movq _foo@GOTPCREL(%rip), %rax
202 ripRel32Got, /// ex: pushq _foo@GOTPCREL(%rip)
203 ripRel32Tlv, /// ex: movq _foo@TLVP(%rip), %rdi
204 pointer64, /// ex: .quad _foo
205 pointer64Anon, /// ex: .quad L1
206 delta64, /// ex: .quad _foo - .
207 delta32, /// ex: .long _foo - .
208 delta64Anon, /// ex: .quad L1 - .
209 delta32Anon, /// ex: .long L1 - .
210 negDelta64, /// ex: .quad . - _foo
211 negDelta32, /// ex: .long . - _foo
213 // Kinds introduced by Passes:
214 ripRel32GotLoadNowLea, /// Target of GOT load is in linkage unit so
215 /// "movq _foo@GOTPCREL(%rip), %rax" can be changed
216 /// to "leaq _foo(%rip), %rax
217 lazyPointer, /// Location contains a lazy pointer.
218 lazyImmediateLocation, /// Location contains immediate value used in stub.
220 imageOffset, /// Location contains offset of atom in final image
221 imageOffsetGot, /// Location contains offset of GOT entry for atom in
222 /// final image (typically personality function).
223 unwindFDEToFunction, /// Nearly delta64, but cannot be rematerialized in
224 /// relocatable object (yay for implicit contracts!).
225 unwindInfoToEhFrame, /// Fix low 24 bits of compact unwind encoding to
226 /// refer to __eh_frame entry.
227 tlvInitSectionOffset /// Location contains offset tlv init-value atom
228 /// within the __thread_data section.
231 Reference::KindValue kindFromReloc(const normalized::Relocation &reloc);
233 void applyFixupFinal(const Reference &ref, uint8_t *location,
234 uint64_t fixupAddress, uint64_t targetAddress,
235 uint64_t inAtomAddress, uint64_t imageBaseAddress,
236 FindAddressForAtom findSectionAddress);
238 void applyFixupRelocatable(const Reference &ref, uint8_t *location,
239 uint64_t fixupAddress,
240 uint64_t targetAddress,
241 uint64_t inAtomAddress);
244 const Registry::KindStrings ArchHandler_x86_64::_sKindStrings[] = {
245 LLD_KIND_STRING_ENTRY(invalid), LLD_KIND_STRING_ENTRY(branch32),
246 LLD_KIND_STRING_ENTRY(ripRel32), LLD_KIND_STRING_ENTRY(ripRel32Minus1),
247 LLD_KIND_STRING_ENTRY(ripRel32Minus2), LLD_KIND_STRING_ENTRY(ripRel32Minus4),
248 LLD_KIND_STRING_ENTRY(ripRel32Anon),
249 LLD_KIND_STRING_ENTRY(ripRel32Minus1Anon),
250 LLD_KIND_STRING_ENTRY(ripRel32Minus2Anon),
251 LLD_KIND_STRING_ENTRY(ripRel32Minus4Anon),
252 LLD_KIND_STRING_ENTRY(ripRel32GotLoad),
253 LLD_KIND_STRING_ENTRY(ripRel32GotLoadNowLea),
254 LLD_KIND_STRING_ENTRY(ripRel32Got), LLD_KIND_STRING_ENTRY(ripRel32Tlv),
255 LLD_KIND_STRING_ENTRY(lazyPointer),
256 LLD_KIND_STRING_ENTRY(lazyImmediateLocation),
257 LLD_KIND_STRING_ENTRY(pointer64), LLD_KIND_STRING_ENTRY(pointer64Anon),
258 LLD_KIND_STRING_ENTRY(delta32), LLD_KIND_STRING_ENTRY(delta64),
259 LLD_KIND_STRING_ENTRY(delta32Anon), LLD_KIND_STRING_ENTRY(delta64Anon),
260 LLD_KIND_STRING_ENTRY(negDelta64),
261 LLD_KIND_STRING_ENTRY(negDelta32),
262 LLD_KIND_STRING_ENTRY(imageOffset), LLD_KIND_STRING_ENTRY(imageOffsetGot),
263 LLD_KIND_STRING_ENTRY(unwindFDEToFunction),
264 LLD_KIND_STRING_ENTRY(unwindInfoToEhFrame),
265 LLD_KIND_STRING_ENTRY(tlvInitSectionOffset),
269 const ArchHandler::StubInfo ArchHandler_x86_64::_sStubInfo = {
272 // Lazy pointer references
273 { Reference::KindArch::x86_64, pointer64, 0, 0 },
274 { Reference::KindArch::x86_64, lazyPointer, 0, 0 },
276 // GOT pointer to dyld_stub_binder
277 { Reference::KindArch::x86_64, pointer64, 0, 0 },
279 // x86_64 code alignment 2^1
282 // Stub size and code
284 { 0xff, 0x25, 0x00, 0x00, 0x00, 0x00 }, // jmp *lazyPointer
285 { Reference::KindArch::x86_64, ripRel32, 2, 0 },
288 // Stub Helper size and code
290 { 0x68, 0x00, 0x00, 0x00, 0x00, // pushq $lazy-info-offset
291 0xE9, 0x00, 0x00, 0x00, 0x00 }, // jmp helperhelper
292 { Reference::KindArch::x86_64, lazyImmediateLocation, 1, 0 },
293 { Reference::KindArch::x86_64, branch32, 6, 0 },
295 // Stub helper image cache content type
296 DefinedAtom::typeNonLazyPointer,
298 // Stub Helper-Common size and code
300 // Stub helper alignment
302 { 0x4C, 0x8D, 0x1D, 0x00, 0x00, 0x00, 0x00, // leaq cache(%rip),%r11
303 0x41, 0x53, // push %r11
304 0xFF, 0x25, 0x00, 0x00, 0x00, 0x00, // jmp *binder(%rip)
306 { Reference::KindArch::x86_64, ripRel32, 3, 0 },
308 { Reference::KindArch::x86_64, ripRel32, 11, 0 },
313 bool ArchHandler_x86_64::isCallSite(const Reference &ref) {
314 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
316 assert(ref.kindArch() == Reference::KindArch::x86_64);
317 return (ref.kindValue() == branch32);
320 bool ArchHandler_x86_64::isPointer(const Reference &ref) {
321 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
323 assert(ref.kindArch() == Reference::KindArch::x86_64);
324 Reference::KindValue kind = ref.kindValue();
325 return (kind == pointer64 || kind == pointer64Anon);
328 bool ArchHandler_x86_64::isPairedReloc(const Relocation &reloc) {
329 return (reloc.type == X86_64_RELOC_SUBTRACTOR);
333 ArchHandler_x86_64::kindFromReloc(const Relocation &reloc) {
334 switch(relocPattern(reloc)) {
335 case X86_64_RELOC_BRANCH | rPcRel | rExtern | rLength4:
337 case X86_64_RELOC_SIGNED | rPcRel | rExtern | rLength4:
339 case X86_64_RELOC_SIGNED | rPcRel | rLength4:
341 case X86_64_RELOC_SIGNED_1 | rPcRel | rExtern | rLength4:
342 return ripRel32Minus1;
343 case X86_64_RELOC_SIGNED_1 | rPcRel | rLength4:
344 return ripRel32Minus1Anon;
345 case X86_64_RELOC_SIGNED_2 | rPcRel | rExtern | rLength4:
346 return ripRel32Minus2;
347 case X86_64_RELOC_SIGNED_2 | rPcRel | rLength4:
348 return ripRel32Minus2Anon;
349 case X86_64_RELOC_SIGNED_4 | rPcRel | rExtern | rLength4:
350 return ripRel32Minus4;
351 case X86_64_RELOC_SIGNED_4 | rPcRel | rLength4:
352 return ripRel32Minus4Anon;
353 case X86_64_RELOC_GOT_LOAD | rPcRel | rExtern | rLength4:
354 return ripRel32GotLoad;
355 case X86_64_RELOC_GOT | rPcRel | rExtern | rLength4:
357 case X86_64_RELOC_TLV | rPcRel | rExtern | rLength4:
359 case X86_64_RELOC_UNSIGNED | rExtern | rLength8:
361 case X86_64_RELOC_UNSIGNED | rLength8:
362 return pointer64Anon;
369 ArchHandler_x86_64::getReferenceInfo(const Relocation &reloc,
370 const DefinedAtom *inAtom,
371 uint32_t offsetInAtom,
372 uint64_t fixupAddress, bool swap,
373 FindAtomBySectionAndAddress atomFromAddress,
374 FindAtomBySymbolIndex atomFromSymbolIndex,
375 Reference::KindValue *kind,
376 const lld::Atom **target,
377 Reference::Addend *addend) {
378 *kind = kindFromReloc(reloc);
379 if (*kind == invalid)
380 return llvm::make_error<GenericError>("unknown type");
381 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
382 uint64_t targetAddress;
386 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
388 *addend = *(const little32_t *)fixupContent;
389 return llvm::Error::success();
391 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
393 *addend = (int32_t)*(const little32_t *)fixupContent + 1;
394 return llvm::Error::success();
396 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
398 *addend = (int32_t)*(const little32_t *)fixupContent + 2;
399 return llvm::Error::success();
401 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
403 *addend = (int32_t)*(const little32_t *)fixupContent + 4;
404 return llvm::Error::success();
406 targetAddress = fixupAddress + 4 + *(const little32_t *)fixupContent;
407 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
408 case ripRel32Minus1Anon:
409 targetAddress = fixupAddress + 5 + *(const little32_t *)fixupContent;
410 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
411 case ripRel32Minus2Anon:
412 targetAddress = fixupAddress + 6 + *(const little32_t *)fixupContent;
413 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
414 case ripRel32Minus4Anon:
415 targetAddress = fixupAddress + 8 + *(const little32_t *)fixupContent;
416 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
417 case ripRel32GotLoad:
420 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
422 *addend = *(const little32_t *)fixupContent;
423 return llvm::Error::success();
424 case tlvInitSectionOffset:
426 if (auto ec = atomFromSymbolIndex(reloc.symbol, target))
428 // If this is the 3rd pointer of a tlv-thunk (i.e. the pointer to the TLV's
429 // initial value) we need to handle it specially.
430 if (inAtom->contentType() == DefinedAtom::typeThunkTLV &&
431 offsetInAtom == 16) {
432 *kind = tlvInitSectionOffset;
433 assert(*addend == 0 && "TLV-init has non-zero addend?");
435 *addend = *(const little64_t *)fixupContent;
436 return llvm::Error::success();
438 targetAddress = *(const little64_t *)fixupContent;
439 return atomFromAddress(reloc.symbol, targetAddress, target, addend);
441 llvm_unreachable("bad reloc kind");
446 ArchHandler_x86_64::getPairReferenceInfo(const normalized::Relocation &reloc1,
447 const normalized::Relocation &reloc2,
448 const DefinedAtom *inAtom,
449 uint32_t offsetInAtom,
450 uint64_t fixupAddress, bool swap,
452 FindAtomBySectionAndAddress atomFromAddress,
453 FindAtomBySymbolIndex atomFromSymbolIndex,
454 Reference::KindValue *kind,
455 const lld::Atom **target,
456 Reference::Addend *addend) {
457 const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom];
458 uint64_t targetAddress;
459 const lld::Atom *fromTarget;
460 if (auto ec = atomFromSymbolIndex(reloc1.symbol, &fromTarget))
463 switch(relocPattern(reloc1) << 16 | relocPattern(reloc2)) {
464 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
465 X86_64_RELOC_UNSIGNED | rExtern | rLength8): {
466 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
468 uint64_t encodedAddend = (int64_t)*(const little64_t *)fixupContent;
469 if (inAtom == fromTarget) {
470 if (inAtom->contentType() == DefinedAtom::typeCFI)
471 *kind = unwindFDEToFunction;
474 *addend = encodedAddend + offsetInAtom;
475 } else if (inAtom == *target) {
477 *addend = encodedAddend - offsetInAtom;
478 *target = fromTarget;
480 return llvm::make_error<GenericError>("Invalid pointer diff");
481 return llvm::Error::success();
483 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
484 X86_64_RELOC_UNSIGNED | rExtern | rLength4): {
485 if (auto ec = atomFromSymbolIndex(reloc2.symbol, target))
487 uint32_t encodedAddend = (int32_t)*(const little32_t *)fixupContent;
488 if (inAtom == fromTarget) {
490 *addend = encodedAddend + offsetInAtom;
491 } else if (inAtom == *target) {
493 *addend = encodedAddend - offsetInAtom;
494 *target = fromTarget;
496 return llvm::make_error<GenericError>("Invalid pointer diff");
497 return llvm::Error::success();
499 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength8) << 16 |
500 X86_64_RELOC_UNSIGNED | rLength8):
501 if (fromTarget != inAtom)
502 return llvm::make_error<GenericError>("pointer diff not in base atom");
504 targetAddress = offsetInAtom + (int64_t)*(const little64_t *)fixupContent;
505 return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
506 case ((X86_64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 |
507 X86_64_RELOC_UNSIGNED | rLength4):
508 if (fromTarget != inAtom)
509 return llvm::make_error<GenericError>("pointer diff not in base atom");
511 targetAddress = offsetInAtom + (int32_t)*(const little32_t *)fixupContent;
512 return atomFromAddress(reloc2.symbol, targetAddress, target, addend);
514 return llvm::make_error<GenericError>("unknown pair");
518 void ArchHandler_x86_64::generateAtomContent(
519 const DefinedAtom &atom, bool relocatable, FindAddressForAtom findAddress,
520 FindAddressForAtom findSectionAddress, uint64_t imageBaseAddress,
521 llvm::MutableArrayRef<uint8_t> atomContentBuffer) {
523 std::copy(atom.rawContent().begin(), atom.rawContent().end(),
524 atomContentBuffer.begin());
526 for (const Reference *ref : atom) {
527 uint32_t offset = ref->offsetInAtom();
528 const Atom *target = ref->target();
529 uint64_t targetAddress = 0;
530 if (isa<DefinedAtom>(target))
531 targetAddress = findAddress(*target);
532 uint64_t atomAddress = findAddress(atom);
533 uint64_t fixupAddress = atomAddress + offset;
535 applyFixupRelocatable(*ref, &atomContentBuffer[offset],
536 fixupAddress, targetAddress,
539 applyFixupFinal(*ref, &atomContentBuffer[offset],
540 fixupAddress, targetAddress,
541 atomAddress, imageBaseAddress, findSectionAddress);
546 void ArchHandler_x86_64::applyFixupFinal(
547 const Reference &ref, uint8_t *loc, uint64_t fixupAddress,
548 uint64_t targetAddress, uint64_t inAtomAddress, uint64_t imageBaseAddress,
549 FindAddressForAtom findSectionAddress) {
550 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
552 assert(ref.kindArch() == Reference::KindArch::x86_64);
553 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
554 ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
555 switch (static_cast<X86_64Kind>(ref.kindValue())) {
560 case ripRel32GotLoad:
562 *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
566 *loc64 = targetAddress + ref.addend();
568 case tlvInitSectionOffset:
569 *loc64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
572 case ripRel32Minus1Anon:
573 *loc32 = targetAddress - (fixupAddress + 5) + ref.addend();
576 case ripRel32Minus2Anon:
577 *loc32 = targetAddress - (fixupAddress + 6) + ref.addend();
580 case ripRel32Minus4Anon:
581 *loc32 = targetAddress - (fixupAddress + 8) + ref.addend();
585 *loc32 = targetAddress - fixupAddress + ref.addend();
589 case unwindFDEToFunction:
590 *loc64 = targetAddress - fixupAddress + ref.addend();
592 case ripRel32GotLoadNowLea:
593 // Change MOVQ to LEA
594 assert(loc[-2] == 0x8B);
596 *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
599 *loc64 = fixupAddress - targetAddress + ref.addend();
602 *loc32 = fixupAddress - targetAddress + ref.addend();
607 case lazyImmediateLocation:
608 *loc32 = ref.addend();
612 *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
614 case unwindInfoToEhFrame: {
615 uint64_t val = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
616 assert(val < 0xffffffU && "offset in __eh_frame too large");
617 *loc32 = (*loc32 & 0xff000000U) | val;
621 // Fall into llvm_unreachable().
624 llvm_unreachable("invalid x86_64 Reference Kind");
627 void ArchHandler_x86_64::applyFixupRelocatable(const Reference &ref,
629 uint64_t fixupAddress,
630 uint64_t targetAddress,
631 uint64_t inAtomAddress) {
632 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
634 assert(ref.kindArch() == Reference::KindArch::x86_64);
635 ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
636 ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
637 switch (static_cast<X86_64Kind>(ref.kindValue())) {
641 case ripRel32GotLoad:
643 *loc32 = ref.addend();
646 *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend();
648 case tlvInitSectionOffset:
650 *loc64 = ref.addend();
653 *loc64 = targetAddress + ref.addend();
656 *loc32 = ref.addend() - 1;
658 case ripRel32Minus1Anon:
659 *loc32 = (targetAddress - (fixupAddress + 5)) + ref.addend();
662 *loc32 = ref.addend() - 2;
664 case ripRel32Minus2Anon:
665 *loc32 = (targetAddress - (fixupAddress + 6)) + ref.addend();
668 *loc32 = ref.addend() - 4;
670 case ripRel32Minus4Anon:
671 *loc32 = (targetAddress - (fixupAddress + 8)) + ref.addend();
674 *loc32 = ref.addend() + inAtomAddress - fixupAddress;
677 // The value we write here should be the the delta to the target
678 // after taking in to account the difference from the fixup back to the
679 // last defined label
682 // Lfixup: .quad Ltarget - .
686 // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
687 *loc32 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
690 *loc64 = ref.addend() + inAtomAddress - fixupAddress;
693 // The value we write here should be the the delta to the target
694 // after taking in to account the difference from the fixup back to the
695 // last defined label
698 // Lfixup: .quad Ltarget - .
702 // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
703 *loc64 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
706 *loc64 = ref.addend() + fixupAddress - inAtomAddress;
709 *loc32 = ref.addend() + fixupAddress - inAtomAddress;
711 case ripRel32GotLoadNowLea:
712 llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
715 case lazyImmediateLocation:
716 llvm_unreachable("lazy reference kind implies Stubs pass was run");
720 case unwindInfoToEhFrame:
721 llvm_unreachable("fixup implies __unwind_info");
723 case unwindFDEToFunction:
724 // Do nothing for now
727 // Fall into llvm_unreachable().
730 llvm_unreachable("unknown x86_64 Reference Kind");
733 void ArchHandler_x86_64::appendSectionRelocations(
734 const DefinedAtom &atom,
735 uint64_t atomSectionOffset,
736 const Reference &ref,
737 FindSymbolIndexForAtom symbolIndexForAtom,
738 FindSectionIndexForAtom sectionIndexForAtom,
739 FindAddressForAtom addressForAtom,
740 normalized::Relocations &relocs) {
741 if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
743 assert(ref.kindArch() == Reference::KindArch::x86_64);
744 uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
745 switch (static_cast<X86_64Kind>(ref.kindValue())) {
747 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
748 X86_64_RELOC_BRANCH | rPcRel | rExtern | rLength4);
751 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
752 X86_64_RELOC_SIGNED | rPcRel | rExtern | rLength4 );
755 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
756 X86_64_RELOC_SIGNED | rPcRel | rLength4 );
759 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
760 X86_64_RELOC_GOT | rPcRel | rExtern | rLength4 );
762 case ripRel32GotLoad:
763 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
764 X86_64_RELOC_GOT_LOAD | rPcRel | rExtern | rLength4 );
767 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
768 X86_64_RELOC_TLV | rPcRel | rExtern | rLength4 );
770 case tlvInitSectionOffset:
772 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
773 X86_64_RELOC_UNSIGNED | rExtern | rLength8);
776 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
777 X86_64_RELOC_UNSIGNED | rLength8);
780 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
781 X86_64_RELOC_SIGNED_1 | rPcRel | rExtern | rLength4 );
783 case ripRel32Minus1Anon:
784 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
785 X86_64_RELOC_SIGNED_1 | rPcRel | rLength4 );
788 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
789 X86_64_RELOC_SIGNED_2 | rPcRel | rExtern | rLength4 );
791 case ripRel32Minus2Anon:
792 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
793 X86_64_RELOC_SIGNED_2 | rPcRel | rLength4 );
796 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
797 X86_64_RELOC_SIGNED_4 | rPcRel | rExtern | rLength4 );
799 case ripRel32Minus4Anon:
800 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
801 X86_64_RELOC_SIGNED_4 | rPcRel | rLength4 );
804 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
805 X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
806 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
807 X86_64_RELOC_UNSIGNED | rExtern | rLength4 );
810 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
811 X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
812 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
813 X86_64_RELOC_UNSIGNED | rLength4 );
816 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
817 X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
818 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
819 X86_64_RELOC_UNSIGNED | rExtern | rLength8 );
822 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
823 X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
824 appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
825 X86_64_RELOC_UNSIGNED | rLength8 );
827 case unwindFDEToFunction:
828 case unwindInfoToEhFrame:
831 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
832 X86_64_RELOC_SUBTRACTOR | rExtern | rLength4 );
833 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
834 X86_64_RELOC_UNSIGNED | rExtern | rLength4 );
837 appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
838 X86_64_RELOC_SUBTRACTOR | rExtern | rLength8 );
839 appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
840 X86_64_RELOC_UNSIGNED | rExtern | rLength8 );
842 case ripRel32GotLoadNowLea:
843 llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
846 case lazyImmediateLocation:
847 llvm_unreachable("lazy reference kind implies Stubs pass was run");
851 llvm_unreachable("__unwind_info references should have been resolved");
854 // Fall into llvm_unreachable().
857 llvm_unreachable("unknown x86_64 Reference Kind");
860 std::unique_ptr<mach_o::ArchHandler> ArchHandler::create_x86_64() {
861 return std::unique_ptr<mach_o::ArchHandler>(new ArchHandler_x86_64());
864 } // namespace mach_o