1 //=== aarch64.h - Generic JITLink aarch64 edge kinds, utilities -*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Generic utilities for graphs representing aarch64 objects.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H
14 #define LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H
16 #include "TableManager.h"
17 #include "llvm/ExecutionEngine/JITLink/JITLink.h"
18 #include "llvm/ExecutionEngine/Orc/Shared/MemoryFlags.h"
24 /// Represents aarch64 fixups and other aarch64-specific edge kinds.
25 enum EdgeKind_aarch64 : Edge::Kind {
27 /// A plain 64-bit pointer value relocation.
30 /// Fixup <- Target + Addend : uint64
32 Pointer64 = Edge::FirstRelocation,
34 /// A plain 32-bit pointer value relocation.
37 /// Fixup <- Target + Addend : uint32
40 /// - The target must reside in the low 32-bits of the address space,
41 /// otherwise an out-of-range error will be returned.
47 /// Delta from the fixup to the target.
50 /// Fixup <- Target - Fixup + Addend : int64
56 /// Delta from the fixup to the target.
59 /// Fixup <- Target - Fixup + Addend : int64
62 /// - The result of the fixup expression must fit into an int32, otherwise
63 /// an out-of-range error will be returned.
67 /// A 64-bit negative delta.
69 /// Delta from target back to the fixup.
72 /// Fixup <- Fixup - Target + Addend : int64
76 /// A 32-bit negative delta.
78 /// Delta from the target back to the fixup.
81 /// Fixup <- Fixup - Target + Addend : int32
84 /// - The result of the fixup expression must fit into an int32, otherwise
85 /// an out-of-range error will be returned.
88 /// A 26-bit PC-relative branch.
90 /// Represents a PC-relative call or branch to a target within +/-128Mb. The
91 /// target must be 32-bit aligned.
94 /// Fixup <- (Target - Fixup + Addend) >> 2 : int26
97 /// The '26' in the name refers to the number operand bits and follows the
98 /// naming convention used by the corresponding ELF and MachO relocations.
99 /// Since the low two bits must be zero (because of the 32-bit alignment of
100 /// the target) the operand is effectively a signed 28-bit number.
104 /// - The result of the unshifted part of the fixup expression must be
105 /// 32-bit aligned otherwise an alignment error will be returned.
106 /// - The result of the fixup expression must fit into an int26 otherwise an
107 /// out-of-range error will be returned.
110 /// A 14-bit PC-relative test and branch.
112 /// Represents a PC-relative test and branch to a target within +/-32Kb. The
113 /// target must be 32-bit aligned.
115 /// Fixup expression:
116 /// Fixup <- (Target - Fixup + Addend) >> 2 : int14
119 /// The '14' in the name refers to the number operand bits and follows the
120 /// naming convention used by the corresponding ELF relocation.
121 /// Since the low two bits must be zero (because of the 32-bit alignment of
122 /// the target) the operand is effectively a signed 16-bit number.
126 /// - The result of the unshifted part of the fixup expression must be
127 /// 32-bit aligned otherwise an alignment error will be returned.
128 /// - The result of the fixup expression must fit into an int14 otherwise an
129 /// out-of-range error will be returned.
130 TestAndBranch14PCRel,
132 /// A 19-bit PC-relative conditional branch.
134 /// Represents a PC-relative conditional branch to a target within +/-1Mb. The
135 /// target must be 32-bit aligned.
137 /// Fixup expression:
138 /// Fixup <- (Target - Fixup + Addend) >> 2 : int19
141 /// The '19' in the name refers to the number operand bits and follows the
142 /// naming convention used by the corresponding ELF relocation.
143 /// Since the low two bits must be zero (because of the 32-bit alignment of
144 /// the target) the operand is effectively a signed 21-bit number.
148 /// - The result of the unshifted part of the fixup expression must be
149 /// 32-bit aligned otherwise an alignment error will be returned.
150 /// - The result of the fixup expression must fit into an int19 otherwise an
151 /// out-of-range error will be returned.
154 /// A 16-bit slice of the target address (which slice depends on the
155 /// instruction at the fixup location).
157 /// Used to fix up MOVK/MOVN/MOVZ instructions.
159 /// Fixup expression:
161 /// Fixup <- (Target + Addend) >> Shift : uint16
163 /// where Shift is encoded in the instruction at the fixup location.
167 /// The signed 21-bit delta from the fixup to the target.
169 /// Typically used to load a pointers at a PC-relative offset of +/- 1Mb. The
170 /// target must be 32-bit aligned.
172 /// Fixup expression:
174 /// Fixup <- (Target - Fixup) >> 2 : int19
177 /// - The result of the unshifted part of the fixup expression must be
178 /// 32-bit aligned otherwise an alignment error will be returned.
179 /// - The result of the fixup expression must fit into an an int19 or an
180 /// out-of-range error will be returned.
183 /// The signed 21-bit delta from the fixup to the target.
185 /// Fixup expression:
187 /// Fixup <- Target - Fixup + Addend : int21
193 /// - The result of the fixup expression must fit into an int21 otherwise an
194 /// out-of-range error will be returned.
197 /// The signed 21-bit delta from the fixup page to the page containing the
200 /// Fixup expression:
202 /// Fixup <- (((Target + Addend) & ~0xfff) - (Fixup & ~0xfff)) >> 12 : int21
208 /// - The result of the fixup expression must fit into an int21 otherwise an
209 /// out-of-range error will be returned.
212 /// The 12-bit (potentially shifted) offset of the target within its page.
214 /// Typically used to fix up LDR immediates.
216 /// Fixup expression:
218 /// Fixup <- ((Target + Addend) >> Shift) & 0xfff : uint12
220 /// where Shift is encoded in the size field of the instruction.
223 /// - The result of the unshifted part of the fixup expression must be
224 /// aligned otherwise an alignment error will be returned.
225 /// - The result of the fixup expression must fit into a uint12 otherwise an
226 /// out-of-range error will be returned.
229 /// A GOT entry getter/constructor, transformed to Page21 pointing at the GOT
230 /// entry for the original target.
232 /// Indicates that this edge should be transformed into a Page21 targeting
233 /// the GOT entry for the edge's current target, maintaining the same addend.
234 /// A GOT entry for the target should be created if one does not already
237 /// Edges of this kind are usually handled by a GOT builder pass inserted by
240 /// Fixup expression:
244 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
245 /// phase will result in an assert/unreachable during the fixup phase.
247 RequestGOTAndTransformToPage21,
249 /// A GOT entry getter/constructor, transformed to Pageoffset12 pointing at
250 /// the GOT entry for the original target.
252 /// Indicates that this edge should be transformed into a PageOffset12
253 /// targeting the GOT entry for the edge's current target, maintaining the
254 /// same addend. A GOT entry for the target should be created if one does not
257 /// Edges of this kind are usually handled by a GOT builder pass inserted by
260 /// Fixup expression:
264 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
265 /// phase will result in an assert/unreachable during the fixup phase.
267 RequestGOTAndTransformToPageOffset12,
269 /// A GOT entry getter/constructor, transformed to Delta32 pointing at the GOT
270 /// entry for the original target.
272 /// Indicates that this edge should be transformed into a Delta32/ targeting
273 /// the GOT entry for the edge's current target, maintaining the same addend.
274 /// A GOT entry for the target should be created if one does not already
277 /// Edges of this kind are usually handled by a GOT builder pass inserted by
280 /// Fixup expression:
284 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
285 /// phase will result in an assert/unreachable during the fixup phase.
287 RequestGOTAndTransformToDelta32,
289 /// A TLVP entry getter/constructor, transformed to Page21.
291 /// Indicates that this edge should be transformed into a Page21 targeting the
292 /// TLVP entry for the edge's current target. A TLVP entry for the target
293 /// should be created if one does not already exist.
295 /// Fixup expression:
299 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
300 /// phase will result in an assert/unreachable during the fixup phase.
302 RequestTLVPAndTransformToPage21,
304 /// A TLVP entry getter/constructor, transformed to PageOffset12.
306 /// Indicates that this edge should be transformed into a PageOffset12
307 /// targeting the TLVP entry for the edge's current target. A TLVP entry for
308 /// the target should be created if one does not already exist.
310 /// Fixup expression:
314 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
315 /// phase will result in an assert/unreachable during the fixup phase.
317 RequestTLVPAndTransformToPageOffset12,
319 /// A TLSDesc entry getter/constructor, transformed to Page21.
321 /// Indicates that this edge should be transformed into a Page21 targeting the
322 /// TLSDesc entry for the edge's current target. A TLSDesc entry for the
323 /// target should be created if one does not already exist.
325 /// Fixup expression:
329 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
330 /// phase will result in an assert/unreachable during the fixup phase.
332 RequestTLSDescEntryAndTransformToPage21,
334 /// A TLSDesc entry getter/constructor, transformed to PageOffset12.
336 /// Indicates that this edge should be transformed into a PageOffset12
337 /// targeting the TLSDesc entry for the edge's current target. A TLSDesc entry
338 /// for the target should be created if one does not already exist.
340 /// Fixup expression:
344 /// - *ASSERTION* Failure to handle edges of this kind prior to the fixup
345 /// phase will result in an assert/unreachable during the fixup phase.
347 RequestTLSDescEntryAndTransformToPageOffset12,
350 /// Returns a string name for the given aarch64 edge. For debugging purposes
352 const char *getEdgeKindName(Edge::Kind K);
354 // Returns whether the Instr is LD/ST (imm12)
355 inline bool isLoadStoreImm12(uint32_t Instr) {
356 constexpr uint32_t LoadStoreImm12Mask = 0x3b000000;
357 return (Instr & LoadStoreImm12Mask) == 0x39000000;
360 inline bool isTestAndBranchImm14(uint32_t Instr) {
361 constexpr uint32_t TestAndBranchImm14Mask = 0x7e000000;
362 return (Instr & TestAndBranchImm14Mask) == 0x36000000;
365 inline bool isCondBranchImm19(uint32_t Instr) {
366 constexpr uint32_t CondBranchImm19Mask = 0xfe000000;
367 return (Instr & CondBranchImm19Mask) == 0x54000000;
370 inline bool isCompAndBranchImm19(uint32_t Instr) {
371 constexpr uint32_t CompAndBranchImm19Mask = 0x7e000000;
372 return (Instr & CompAndBranchImm19Mask) == 0x34000000;
375 inline bool isADR(uint32_t Instr) {
376 constexpr uint32_t ADRMask = 0x9f000000;
377 return (Instr & ADRMask) == 0x10000000;
380 // Returns the amount the address operand of LD/ST (imm12)
381 // should be shifted right by.
383 // The shift value varies by the data size of LD/ST instruction.
384 // For instance, LDH instructoin needs the address to be shifted
386 inline unsigned getPageOffset12Shift(uint32_t Instr) {
387 constexpr uint32_t Vec128Mask = 0x04800000;
389 if (isLoadStoreImm12(Instr)) {
390 uint32_t ImplicitShift = Instr >> 30;
391 if (ImplicitShift == 0)
392 if ((Instr & Vec128Mask) == Vec128Mask)
395 return ImplicitShift;
401 // Returns whether the Instr is MOVK/MOVZ (imm16) with a zero immediate field
402 inline bool isMoveWideImm16(uint32_t Instr) {
403 constexpr uint32_t MoveWideImm16Mask = 0x5f9fffe0;
404 return (Instr & MoveWideImm16Mask) == 0x52800000;
407 // Returns the amount the address operand of MOVK/MOVZ (imm16)
408 // should be shifted right by.
410 // The shift value is specfied in the assembly as LSL #<shift>.
411 inline unsigned getMoveWide16Shift(uint32_t Instr) {
412 if (isMoveWideImm16(Instr)) {
413 uint32_t ImplicitShift = (Instr >> 21) & 0b11;
414 return ImplicitShift << 4;
420 /// Apply fixup expression for edge to block content.
421 inline Error applyFixup(LinkGraph &G, Block &B, const Edge &E) {
422 using namespace support;
424 char *BlockWorkingMem = B.getAlreadyMutableContent().data();
425 char *FixupPtr = BlockWorkingMem + E.getOffset();
426 orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset();
428 switch (E.getKind()) {
430 uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
431 *(ulittle64_t *)FixupPtr = Value;
435 uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
436 if (Value > std::numeric_limits<uint32_t>::max())
437 return makeTargetOutOfRangeError(G, B, E);
438 *(ulittle32_t *)FixupPtr = Value;
446 if (E.getKind() == Delta32 || E.getKind() == Delta64)
447 Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
449 Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
451 if (E.getKind() == Delta32 || E.getKind() == NegDelta32) {
452 if (Value < std::numeric_limits<int32_t>::min() ||
453 Value > std::numeric_limits<int32_t>::max())
454 return makeTargetOutOfRangeError(G, B, E);
455 *(little32_t *)FixupPtr = Value;
457 *(little64_t *)FixupPtr = Value;
460 case Branch26PCRel: {
461 assert((FixupAddress.getValue() & 0x3) == 0 &&
462 "Branch-inst is not 32-bit aligned");
464 int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
466 if (static_cast<uint64_t>(Value) & 0x3)
467 return make_error<JITLinkError>("BranchPCRel26 target is not 32-bit "
470 if (Value < -(1 << 27) || Value > ((1 << 27) - 1))
471 return makeTargetOutOfRangeError(G, B, E);
473 uint32_t RawInstr = *(little32_t *)FixupPtr;
474 assert((RawInstr & 0x7fffffff) == 0x14000000 &&
475 "RawInstr isn't a B or BR immediate instruction");
476 uint32_t Imm = (static_cast<uint32_t>(Value) & ((1 << 28) - 1)) >> 2;
477 uint32_t FixedInstr = RawInstr | Imm;
478 *(little32_t *)FixupPtr = FixedInstr;
482 uint64_t TargetOffset =
483 (E.getTarget().getAddress() + E.getAddend()).getValue();
485 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
486 assert(isMoveWideImm16(RawInstr) &&
487 "RawInstr isn't a MOVK/MOVZ instruction");
489 unsigned ImmShift = getMoveWide16Shift(RawInstr);
490 uint32_t Imm = (TargetOffset >> ImmShift) & 0xffff;
491 uint32_t FixedInstr = RawInstr | (Imm << 5);
492 *(ulittle32_t *)FixupPtr = FixedInstr;
496 assert((FixupAddress.getValue() & 0x3) == 0 && "LDR is not 32-bit aligned");
497 assert(E.getAddend() == 0 && "LDRLiteral19 with non-zero addend");
498 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
499 assert(RawInstr == 0x58000010 && "RawInstr isn't a 64-bit LDR literal");
500 int64_t Delta = E.getTarget().getAddress() - FixupAddress;
502 return make_error<JITLinkError>("LDR literal target is not 32-bit "
504 if (Delta < -(1 << 20) || Delta > ((1 << 20) - 1))
505 return makeTargetOutOfRangeError(G, B, E);
507 uint32_t EncodedImm = ((static_cast<uint32_t>(Delta) >> 2) & 0x7ffff) << 5;
508 uint32_t FixedInstr = RawInstr | EncodedImm;
509 *(ulittle32_t *)FixupPtr = FixedInstr;
513 assert((FixupAddress.getValue() & 0x3) == 0 && "ADR is not 32-bit aligned");
514 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
515 assert(isADR(RawInstr) && "RawInstr is not an ADR");
516 int64_t Delta = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
517 if (!isInt<21>(Delta))
518 return makeTargetOutOfRangeError(G, B, E);
519 auto UDelta = static_cast<uint32_t>(Delta);
520 uint32_t EncodedImmHi = ((UDelta >> 2) & 0x7ffff) << 5;
521 uint32_t EncodedImmLo = (UDelta & 0x3) << 29;
522 uint32_t FixedInstr = RawInstr | EncodedImmHi | EncodedImmLo;
523 *(ulittle32_t *)FixupPtr = FixedInstr;
526 case TestAndBranch14PCRel: {
527 assert((FixupAddress.getValue() & 0x3) == 0 &&
528 "Test and branch is not 32-bit aligned");
529 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
530 assert(isTestAndBranchImm14(RawInstr) &&
531 "RawInstr is not a test and branch");
532 int64_t Delta = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
534 return make_error<JITLinkError>(
535 "Test and branch literal target is not 32-bit aligned");
536 if (!isInt<16>(Delta))
537 return makeTargetOutOfRangeError(G, B, E);
538 uint32_t EncodedImm = ((static_cast<uint32_t>(Delta) >> 2) & 0x3fff) << 5;
539 uint32_t FixedInstr = RawInstr | EncodedImm;
540 *(ulittle32_t *)FixupPtr = FixedInstr;
543 case CondBranch19PCRel: {
544 assert((FixupAddress.getValue() & 0x3) == 0 &&
545 "Conditional branch is not 32-bit aligned");
546 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
547 assert((isCondBranchImm19(RawInstr) || isCompAndBranchImm19(RawInstr)) &&
548 "RawInstr is not a conditional branch");
549 int64_t Delta = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
551 return make_error<JITLinkError>(
552 "Conditional branch literal target is not 32-bit "
554 if (!isInt<21>(Delta))
555 return makeTargetOutOfRangeError(G, B, E);
556 uint32_t EncodedImm = ((static_cast<uint32_t>(Delta) >> 2) & 0x7ffff) << 5;
557 uint32_t FixedInstr = RawInstr | EncodedImm;
558 *(ulittle32_t *)FixupPtr = FixedInstr;
562 uint64_t TargetPage =
563 (E.getTarget().getAddress().getValue() + E.getAddend()) &
564 ~static_cast<uint64_t>(4096 - 1);
566 FixupAddress.getValue() & ~static_cast<uint64_t>(4096 - 1);
568 int64_t PageDelta = TargetPage - PCPage;
569 if (!isInt<33>(PageDelta))
570 return makeTargetOutOfRangeError(G, B, E);
572 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
573 assert((RawInstr & 0xffffffe0) == 0x90000000 &&
574 "RawInstr isn't an ADRP instruction");
575 uint32_t ImmLo = (static_cast<uint64_t>(PageDelta) >> 12) & 0x3;
576 uint32_t ImmHi = (static_cast<uint64_t>(PageDelta) >> 14) & 0x7ffff;
577 uint32_t FixedInstr = RawInstr | (ImmLo << 29) | (ImmHi << 5);
578 *(ulittle32_t *)FixupPtr = FixedInstr;
582 uint64_t TargetOffset =
583 (E.getTarget().getAddress() + E.getAddend()).getValue() & 0xfff;
585 uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
586 unsigned ImmShift = getPageOffset12Shift(RawInstr);
588 if (TargetOffset & ((1 << ImmShift) - 1))
589 return make_error<JITLinkError>("PAGEOFF12 target is not aligned");
591 uint32_t EncodedImm = (TargetOffset >> ImmShift) << 10;
592 uint32_t FixedInstr = RawInstr | EncodedImm;
593 *(ulittle32_t *)FixupPtr = FixedInstr;
597 return make_error<JITLinkError>(
598 "In graph " + G.getName() + ", section " + B.getSection().getName() +
599 " unsupported edge kind " + getEdgeKindName(E.getKind()));
602 return Error::success();
605 /// aarch64 pointer size.
606 constexpr uint64_t PointerSize = 8;
608 /// AArch64 null pointer content.
609 extern const char NullPointerContent[PointerSize];
611 /// AArch64 pointer jump stub content.
613 /// Contains the instruction sequence for an indirect jump via an in-memory
615 /// ADRP x16, ptr@page21
616 /// LDR x16, [x16, ptr@pageoff12]
618 extern const char PointerJumpStubContent[12];
620 /// Creates a new pointer block in the given section and returns an
621 /// Anonymous symobl pointing to it.
623 /// If InitialTarget is given then an Pointer64 relocation will be added to the
624 /// block pointing at InitialTarget.
626 /// The pointer block will have the following default values:
627 /// alignment: 64-bit
628 /// alignment-offset: 0
629 /// address: highest allowable (~7U)
630 inline Symbol &createAnonymousPointer(LinkGraph &G, Section &PointerSection,
631 Symbol *InitialTarget = nullptr,
632 uint64_t InitialAddend = 0) {
633 auto &B = G.createContentBlock(PointerSection, NullPointerContent,
634 orc::ExecutorAddr(~uint64_t(7)), 8, 0);
636 B.addEdge(Pointer64, 0, *InitialTarget, InitialAddend);
637 return G.addAnonymousSymbol(B, 0, 8, false, false);
640 /// Create a jump stub block that jumps via the pointer at the given symbol.
642 /// The stub block will have the following default values:
643 /// alignment: 32-bit
644 /// alignment-offset: 0
645 /// address: highest allowable: (~11U)
646 inline Block &createPointerJumpStubBlock(LinkGraph &G, Section &StubSection,
647 Symbol &PointerSymbol) {
648 auto &B = G.createContentBlock(StubSection, PointerJumpStubContent,
649 orc::ExecutorAddr(~uint64_t(11)), 1, 0);
650 B.addEdge(Page21, 0, PointerSymbol, 0);
651 B.addEdge(PageOffset12, 4, PointerSymbol, 0);
655 /// Create a jump stub that jumps via the pointer at the given symbol and
656 /// an anonymous symbol pointing to it. Return the anonymous symbol.
658 /// The stub block will be created by createPointerJumpStubBlock.
659 inline Symbol &createAnonymousPointerJumpStub(LinkGraph &G,
660 Section &StubSection,
661 Symbol &PointerSymbol) {
662 return G.addAnonymousSymbol(
663 createPointerJumpStubBlock(G, StubSection, PointerSymbol), 0,
664 sizeof(PointerJumpStubContent), true, false);
667 /// Global Offset Table Builder.
668 class GOTTableManager : public TableManager<GOTTableManager> {
670 static StringRef getSectionName() { return "$__GOT"; }
672 bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
673 Edge::Kind KindToSet = Edge::Invalid;
674 const char *BlockWorkingMem = B->getContent().data();
675 const char *FixupPtr = BlockWorkingMem + E.getOffset();
677 switch (E.getKind()) {
678 case aarch64::RequestGOTAndTransformToPage21:
679 case aarch64::RequestTLVPAndTransformToPage21: {
680 KindToSet = aarch64::Page21;
683 case aarch64::RequestGOTAndTransformToPageOffset12:
684 case aarch64::RequestTLVPAndTransformToPageOffset12: {
685 KindToSet = aarch64::PageOffset12;
686 uint32_t RawInstr = *(const support::ulittle32_t *)FixupPtr;
688 assert(E.getAddend() == 0 &&
689 "GOTPageOffset12/TLVPageOffset12 with non-zero addend");
690 assert((RawInstr & 0xfffffc00) == 0xf9400000 &&
691 "RawInstr isn't a 64-bit LDR immediate");
694 case aarch64::RequestGOTAndTransformToDelta32: {
695 KindToSet = aarch64::Delta32;
701 assert(KindToSet != Edge::Invalid &&
702 "Fell through switch, but no new kind to set");
703 DEBUG_WITH_TYPE("jitlink", {
704 dbgs() << " Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
705 << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
706 << formatv("{0:x}", E.getOffset()) << ")\n";
708 E.setKind(KindToSet);
709 E.setTarget(getEntryForTarget(G, E.getTarget()));
713 Symbol &createEntry(LinkGraph &G, Symbol &Target) {
714 return createAnonymousPointer(G, getGOTSection(G), &Target);
718 Section &getGOTSection(LinkGraph &G) {
720 GOTSection = &G.createSection(getSectionName(),
721 orc::MemProt::Read | orc::MemProt::Exec);
725 Section *GOTSection = nullptr;
728 /// Procedure Linkage Table Builder.
729 class PLTTableManager : public TableManager<PLTTableManager> {
731 PLTTableManager(GOTTableManager &GOT) : GOT(GOT) {}
733 static StringRef getSectionName() { return "$__STUBS"; }
735 bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
736 if (E.getKind() == aarch64::Branch26PCRel && !E.getTarget().isDefined()) {
737 DEBUG_WITH_TYPE("jitlink", {
738 dbgs() << " Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
739 << B->getFixupAddress(E) << " (" << B->getAddress() << " + "
740 << formatv("{0:x}", E.getOffset()) << ")\n";
742 E.setTarget(getEntryForTarget(G, E.getTarget()));
748 Symbol &createEntry(LinkGraph &G, Symbol &Target) {
749 return createAnonymousPointerJumpStub(G, getStubsSection(G),
750 GOT.getEntryForTarget(G, Target));
754 Section &getStubsSection(LinkGraph &G) {
756 StubsSection = &G.createSection(getSectionName(),
757 orc::MemProt::Read | orc::MemProt::Exec);
758 return *StubsSection;
761 GOTTableManager &GOT;
762 Section *StubsSection = nullptr;
765 } // namespace aarch64
766 } // namespace jitlink
769 #endif // LLVM_EXECUTIONENGINE_JITLINK_AARCH64_H