1 //===- X86_64.cpp ---------------------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "InputFiles.h"
10 #include "OutputSections.h"
12 #include "SyntheticSections.h"
14 #include "lld/Common/ErrorHandler.h"
15 #include "llvm/Object/ELF.h"
16 #include "llvm/Support/Endian.h"
19 using namespace llvm::object;
20 using namespace llvm::support::endian;
21 using namespace llvm::ELF;
23 using namespace lld::elf;
26 class X86_64 : public TargetInfo {
29 int getTlsGdRelaxSkip(RelType type) const override;
30 RelExpr getRelExpr(RelType type, const Symbol &s,
31 const uint8_t *loc) const override;
32 RelType getDynRel(RelType type) const override;
33 void writeGotPltHeader(uint8_t *buf) const override;
34 void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
35 void writePltHeader(uint8_t *buf) const override;
36 void writePlt(uint8_t *buf, const Symbol &sym,
37 uint64_t pltEntryAddr) const override;
38 void relocate(uint8_t *loc, const Relocation &rel,
39 uint64_t val) const override;
40 void applyJumpInstrMod(uint8_t *loc, JumpModType type,
41 unsigned size) const override;
43 RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
44 RelExpr expr) const override;
45 void relaxGot(uint8_t *loc, const Relocation &rel,
46 uint64_t val) const override;
47 void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
48 uint64_t val) const override;
49 void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
50 uint64_t val) const override;
51 void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
52 uint64_t val) const override;
53 void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
54 uint64_t val) const override;
55 bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
56 uint8_t stOther) const override;
57 bool deleteFallThruJmpInsn(InputSection &is, InputFile *file,
58 InputSection *nextIS) const override;
62 // This is vector of NOP instructions of sizes from 1 to 8 bytes. The
63 // appropriately sized instructions are used to fill the gaps between sections
64 // which are executed during fall through.
65 static const std::vector<std::vector<uint8_t>> nopInstructions = {
69 {0x0f, 0x1f, 0x40, 0x00},
70 {0x0f, 0x1f, 0x44, 0x00, 0x00},
71 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
72 {0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00},
73 {0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
74 {0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}};
77 copyRel = R_X86_64_COPY;
78 gotRel = R_X86_64_GLOB_DAT;
79 noneRel = R_X86_64_NONE;
80 pltRel = R_X86_64_JUMP_SLOT;
81 relativeRel = R_X86_64_RELATIVE;
82 iRelativeRel = R_X86_64_IRELATIVE;
83 symbolicRel = R_X86_64_64;
84 tlsDescRel = R_X86_64_TLSDESC;
85 tlsGotRel = R_X86_64_TPOFF64;
86 tlsModuleIndexRel = R_X86_64_DTPMOD64;
87 tlsOffsetRel = R_X86_64_DTPOFF64;
91 trapInstr = {0xcc, 0xcc, 0xcc, 0xcc}; // 0xcc = INT3
92 nopInstrs = nopInstructions;
94 // Align to the large page size (known as a superpage or huge page).
95 // FreeBSD automatically promotes large, superpage-aligned allocations.
96 defaultImageBase = 0x200000;
99 int X86_64::getTlsGdRelaxSkip(RelType type) const { return 2; }
101 // Opcodes for the different X86_64 jmp instructions.
102 enum JmpInsnOpcode : uint32_t {
117 // Given the first (optional) and second byte of the insn's opcode, this
118 // returns the corresponding enum value.
119 static JmpInsnOpcode getJmpInsnType(const uint8_t *first,
120 const uint8_t *second) {
124 if (first == nullptr)
127 if (*first == 0x0f) {
154 // Return the relocation index for input section IS with a specific Offset.
155 // Returns the maximum size of the vector if no such relocation is found.
156 static unsigned getRelocationWithOffset(const InputSection &is,
158 unsigned size = is.relocations.size();
159 for (unsigned i = size - 1; i + 1 > 0; --i) {
160 if (is.relocations[i].offset == offset && is.relocations[i].expr != R_NONE)
166 // Returns true if R corresponds to a relocation used for a jump instruction.
167 // TODO: Once special relocations for relaxable jump instructions are available,
168 // this should be modified to use those relocations.
169 static bool isRelocationForJmpInsn(Relocation &R) {
170 return R.type == R_X86_64_PLT32 || R.type == R_X86_64_PC32 ||
171 R.type == R_X86_64_PC8;
174 // Return true if Relocation R points to the first instruction in the
176 // TODO: Delete this once psABI reserves a new relocation type for fall thru
178 static bool isFallThruRelocation(InputSection &is, InputFile *file,
179 InputSection *nextIS, Relocation &r) {
180 if (!isRelocationForJmpInsn(r))
183 uint64_t addrLoc = is.getOutputSection()->addr + is.outSecOff + r.offset;
184 uint64_t targetOffset = InputSectionBase::getRelocTargetVA(
185 file, r.type, r.addend, addrLoc, *r.sym, r.expr);
187 // If this jmp is a fall thru, the target offset is the beginning of the
189 uint64_t nextSectionOffset =
190 nextIS->getOutputSection()->addr + nextIS->outSecOff;
191 return (addrLoc + 4 + targetOffset) == nextSectionOffset;
194 // Return the jmp instruction opcode that is the inverse of the given
195 // opcode. For example, JE inverted is JNE.
196 static JmpInsnOpcode invertJmpOpcode(const JmpInsnOpcode opcode) {
223 // Deletes direct jump instruction in input sections that jumps to the
224 // following section as it is not required. If there are two consecutive jump
225 // instructions, it checks if they can be flipped and one can be deleted.
235 // can be converted to:
238 // 10: je bar #jne flipped to je and the jmp is deleted.
241 bool X86_64::deleteFallThruJmpInsn(InputSection &is, InputFile *file,
242 InputSection *nextIS) const {
243 const unsigned sizeOfDirectJmpInsn = 5;
245 if (nextIS == nullptr)
248 if (is.getSize() < sizeOfDirectJmpInsn)
251 // If this jmp insn can be removed, it is the last insn and the
252 // relocation is 4 bytes before the end.
253 unsigned rIndex = getRelocationWithOffset(is, is.getSize() - 4);
254 if (rIndex == is.relocations.size())
257 Relocation &r = is.relocations[rIndex];
259 // Check if the relocation corresponds to a direct jmp.
260 const uint8_t *secContents = is.data().data();
261 // If it is not a direct jmp instruction, there is nothing to do here.
262 if (*(secContents + r.offset - 1) != 0xe9)
265 if (isFallThruRelocation(is, file, nextIS, r)) {
266 // This is a fall thru and can be deleted.
269 is.drop_back(sizeOfDirectJmpInsn);
274 // Now, check if flip and delete is possible.
275 const unsigned sizeOfJmpCCInsn = 6;
276 // To flip, there must be atleast one JmpCC and one direct jmp.
277 if (is.getSize() < sizeOfDirectJmpInsn + sizeOfJmpCCInsn)
281 getRelocationWithOffset(is, (is.getSize() - sizeOfDirectJmpInsn - 4));
282 if (rbIndex == is.relocations.size())
285 Relocation &rB = is.relocations[rbIndex];
287 const uint8_t *jmpInsnB = secContents + rB.offset - 1;
288 JmpInsnOpcode jmpOpcodeB = getJmpInsnType(jmpInsnB - 1, jmpInsnB);
289 if (jmpOpcodeB == J_UNKNOWN)
292 if (!isFallThruRelocation(is, file, nextIS, rB))
295 // jmpCC jumps to the fall thru block, the branch can be flipped and the
296 // jmp can be deleted.
297 JmpInsnOpcode jInvert = invertJmpOpcode(jmpOpcodeB);
298 if (jInvert == J_UNKNOWN)
300 is.jumpInstrMods.push_back({jInvert, (rB.offset - 1), 4});
301 // Move R's values to rB except the offset.
302 rB = {r.expr, r.type, rB.offset, r.addend, r.sym};
306 is.drop_back(sizeOfDirectJmpInsn);
311 RelExpr X86_64::getRelExpr(RelType type, const Symbol &s,
312 const uint8_t *loc) const {
313 if (type == R_X86_64_GOTTPOFF)
314 config->hasStaticTlsModel = true;
323 case R_X86_64_DTPOFF32:
324 case R_X86_64_DTPOFF64:
326 case R_X86_64_TPOFF32:
328 case R_X86_64_TLSDESC_CALL:
329 return R_TLSDESC_CALL;
334 case R_X86_64_SIZE32:
335 case R_X86_64_SIZE64:
347 case R_X86_64_GOTPC32_TLSDESC:
349 case R_X86_64_GOTPCREL:
350 case R_X86_64_GOTPCRELX:
351 case R_X86_64_REX_GOTPCRELX:
352 case R_X86_64_GOTTPOFF:
354 case R_X86_64_GOTOFF64:
356 case R_X86_64_GOTPC32:
357 case R_X86_64_GOTPC64:
358 return R_GOTPLTONLY_PC;
362 error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
363 ") against symbol " + toString(s));
368 void X86_64::writeGotPltHeader(uint8_t *buf) const {
369 // The first entry holds the value of _DYNAMIC. It is not clear why that is
370 // required, but it is documented in the psabi and the glibc dynamic linker
371 // seems to use it (note that this is relevant for linking ld.so, not any
373 write64le(buf, mainPart->dynamic->getVA());
376 void X86_64::writeGotPlt(uint8_t *buf, const Symbol &s) const {
377 // See comments in X86::writeGotPlt.
378 write64le(buf, s.getPltVA() + 6);
381 void X86_64::writePltHeader(uint8_t *buf) const {
382 const uint8_t pltData[] = {
383 0xff, 0x35, 0, 0, 0, 0, // pushq GOTPLT+8(%rip)
384 0xff, 0x25, 0, 0, 0, 0, // jmp *GOTPLT+16(%rip)
385 0x0f, 0x1f, 0x40, 0x00, // nop
387 memcpy(buf, pltData, sizeof(pltData));
388 uint64_t gotPlt = in.gotPlt->getVA();
389 uint64_t plt = in.ibtPlt ? in.ibtPlt->getVA() : in.plt->getVA();
390 write32le(buf + 2, gotPlt - plt + 2); // GOTPLT+8
391 write32le(buf + 8, gotPlt - plt + 4); // GOTPLT+16
394 void X86_64::writePlt(uint8_t *buf, const Symbol &sym,
395 uint64_t pltEntryAddr) const {
396 const uint8_t inst[] = {
397 0xff, 0x25, 0, 0, 0, 0, // jmpq *got(%rip)
398 0x68, 0, 0, 0, 0, // pushq <relocation index>
399 0xe9, 0, 0, 0, 0, // jmpq plt[0]
401 memcpy(buf, inst, sizeof(inst));
403 write32le(buf + 2, sym.getGotPltVA() - pltEntryAddr - 6);
404 write32le(buf + 7, sym.pltIndex);
405 write32le(buf + 12, in.plt->getVA() - pltEntryAddr - 16);
408 RelType X86_64::getDynRel(RelType type) const {
409 if (type == R_X86_64_64 || type == R_X86_64_PC64 || type == R_X86_64_SIZE32 ||
410 type == R_X86_64_SIZE64)
412 return R_X86_64_NONE;
415 void X86_64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
416 uint64_t val) const {
417 if (rel.type == R_X86_64_TLSGD) {
420 // leaq x@tlsgd(%rip), %rdi
423 // call __tls_get_addr@plt
424 // to the following two instructions.
425 const uint8_t inst[] = {
426 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00,
427 0x00, 0x00, // mov %fs:0x0,%rax
428 0x48, 0x8d, 0x80, 0, 0, 0, 0, // lea x@tpoff,%rax
430 memcpy(loc - 4, inst, sizeof(inst));
432 // The original code used a pc relative relocation and so we have to
433 // compensate for the -4 in had in the addend.
434 write32le(loc + 8, val + 4);
437 // lea x@tlsgd(%rip), %rax
439 // to the following two instructions.
440 assert(rel.type == R_X86_64_GOTPC32_TLSDESC);
441 if (memcmp(loc - 3, "\x48\x8d\x05", 3)) {
442 error(getErrorLocation(loc - 3) + "R_X86_64_GOTPC32_TLSDESC must be used "
443 "in callq *x@tlsdesc(%rip), %rax");
446 // movq $x@tpoff(%rip),%rax
449 write32le(loc, val + 4);
456 void X86_64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
457 uint64_t val) const {
458 if (rel.type == R_X86_64_TLSGD) {
461 // leaq x@tlsgd(%rip), %rdi
464 // call __tls_get_addr@plt
465 // to the following two instructions.
466 const uint8_t inst[] = {
467 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00,
468 0x00, 0x00, // mov %fs:0x0,%rax
469 0x48, 0x03, 0x05, 0, 0, 0, 0, // addq x@gottpoff(%rip),%rax
471 memcpy(loc - 4, inst, sizeof(inst));
473 // Both code sequences are PC relatives, but since we are moving the
474 // constant forward by 8 bytes we have to subtract the value by 8.
475 write32le(loc + 8, val - 8);
478 // lea x@tlsgd(%rip), %rax
480 // to the following two instructions.
481 assert(rel.type == R_X86_64_GOTPC32_TLSDESC);
482 if (memcmp(loc - 3, "\x48\x8d\x05", 3)) {
483 error(getErrorLocation(loc - 3) + "R_X86_64_GOTPC32_TLSDESC must be used "
484 "in callq *x@tlsdesc(%rip), %rax");
487 // movq x@gottpoff(%rip),%rax
496 // In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
497 // R_X86_64_TPOFF32 so that it does not use GOT.
498 void X86_64::relaxTlsIeToLe(uint8_t *loc, const Relocation &,
499 uint64_t val) const {
500 uint8_t *inst = loc - 3;
501 uint8_t reg = loc[-1] >> 3;
502 uint8_t *regSlot = loc - 1;
504 // Note that ADD with RSP or R12 is converted to ADD instead of LEA
505 // because LEA with these registers needs 4 bytes to encode and thus
506 // wouldn't fit the space.
508 if (memcmp(inst, "\x48\x03\x25", 3) == 0) {
509 // "addq foo@gottpoff(%rip),%rsp" -> "addq $foo,%rsp"
510 memcpy(inst, "\x48\x81\xc4", 3);
511 } else if (memcmp(inst, "\x4c\x03\x25", 3) == 0) {
512 // "addq foo@gottpoff(%rip),%r12" -> "addq $foo,%r12"
513 memcpy(inst, "\x49\x81\xc4", 3);
514 } else if (memcmp(inst, "\x4c\x03", 2) == 0) {
515 // "addq foo@gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]"
516 memcpy(inst, "\x4d\x8d", 2);
517 *regSlot = 0x80 | (reg << 3) | reg;
518 } else if (memcmp(inst, "\x48\x03", 2) == 0) {
519 // "addq foo@gottpoff(%rip),%reg -> "leaq foo(%reg),%reg"
520 memcpy(inst, "\x48\x8d", 2);
521 *regSlot = 0x80 | (reg << 3) | reg;
522 } else if (memcmp(inst, "\x4c\x8b", 2) == 0) {
523 // "movq foo@gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]"
524 memcpy(inst, "\x49\xc7", 2);
525 *regSlot = 0xc0 | reg;
526 } else if (memcmp(inst, "\x48\x8b", 2) == 0) {
527 // "movq foo@gottpoff(%rip),%reg" -> "movq $foo,%reg"
528 memcpy(inst, "\x48\xc7", 2);
529 *regSlot = 0xc0 | reg;
531 error(getErrorLocation(loc - 3) +
532 "R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only");
535 // The original code used a PC relative relocation.
536 // Need to compensate for the -4 it had in the addend.
537 write32le(loc, val + 4);
540 void X86_64::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel,
541 uint64_t val) const {
542 if (rel.type == R_X86_64_DTPOFF64) {
546 if (rel.type == R_X86_64_DTPOFF32) {
551 const uint8_t inst[] = {
552 0x66, 0x66, // .word 0x6666
554 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0,%rax
557 if (loc[4] == 0xe8) {
559 // leaq bar@tlsld(%rip), %rdi # 48 8d 3d <Loc>
560 // callq __tls_get_addr@PLT # e8 <disp32>
561 // leaq bar@dtpoff(%rax), %rcx
566 // leaq bar@tpoff(%rax), %rcx
567 memcpy(loc - 3, inst, sizeof(inst));
571 if (loc[4] == 0xff && loc[5] == 0x15) {
573 // leaq x@tlsld(%rip),%rdi # 48 8d 3d <Loc>
574 // call *__tls_get_addr@GOTPCREL(%rip) # ff 15 <disp32>
578 // See "Table 11.9: LD -> LE Code Transition (LP64)" in
579 // https://raw.githubusercontent.com/wiki/hjl-tools/x86-psABI/x86-64-psABI-1.0.pdf
581 memcpy(loc - 2, inst, sizeof(inst));
585 error(getErrorLocation(loc - 3) +
586 "expected R_X86_64_PLT32 or R_X86_64_GOTPCRELX after R_X86_64_TLSLD");
589 // A JumpInstrMod at a specific offset indicates that the jump instruction
590 // opcode at that offset must be modified. This is specifically used to relax
591 // jump instructions with basic block sections. This function looks at the
592 // JumpMod and effects the change.
593 void X86_64::applyJumpInstrMod(uint8_t *loc, JumpModType type,
594 unsigned size) const {
673 llvm_unreachable("Unknown Jump Relocation");
677 void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
680 checkIntUInt(loc, val, 8, rel);
684 checkInt(loc, val, 8, rel);
688 checkIntUInt(loc, val, 16, rel);
692 checkInt(loc, val, 16, rel);
696 checkUInt(loc, val, 32, rel);
700 case R_X86_64_TPOFF32:
702 case R_X86_64_GOTPC32:
703 case R_X86_64_GOTPC32_TLSDESC:
704 case R_X86_64_GOTPCREL:
705 case R_X86_64_GOTPCRELX:
706 case R_X86_64_REX_GOTPCRELX:
708 case R_X86_64_GOTTPOFF:
712 case R_X86_64_DTPOFF32:
713 case R_X86_64_SIZE32:
714 checkInt(loc, val, 32, rel);
718 case R_X86_64_DTPOFF64:
720 case R_X86_64_SIZE64:
722 case R_X86_64_GOTOFF64:
723 case R_X86_64_GOTPC64:
727 llvm_unreachable("unknown relocation");
731 RelExpr X86_64::adjustRelaxExpr(RelType type, const uint8_t *data,
732 RelExpr relExpr) const {
733 if (type != R_X86_64_GOTPCRELX && type != R_X86_64_REX_GOTPCRELX)
735 const uint8_t op = data[-2];
736 const uint8_t modRm = data[-1];
738 // FIXME: When PIC is disabled and foo is defined locally in the
739 // lower 32 bit address space, memory operand in mov can be converted into
740 // immediate operand. Otherwise, mov must be changed to lea. We support only
741 // latter relaxation at this moment.
743 return R_RELAX_GOT_PC;
745 // Relax call and jmp.
746 if (op == 0xff && (modRm == 0x15 || modRm == 0x25))
747 return R_RELAX_GOT_PC;
749 // Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor.
750 // If PIC then no relaxation is available.
751 // We also don't relax test/binop instructions without REX byte,
752 // they are 32bit operations and not common to have.
753 assert(type == R_X86_64_REX_GOTPCRELX);
754 return config->isPic ? relExpr : R_RELAX_GOT_PC_NOPIC;
757 // A subset of relaxations can only be applied for no-PIC. This method
758 // handles such relaxations. Instructions encoding information was taken from:
759 // "Intel 64 and IA-32 Architectures Software Developer's Manual V2"
760 // (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/
761 // 64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf)
762 static void relaxGotNoPic(uint8_t *loc, uint64_t val, uint8_t op,
764 const uint8_t rex = loc[-3];
765 // Convert "test %reg, foo@GOTPCREL(%rip)" to "test $foo, %reg".
767 // See "TEST-Logical Compare" (4-428 Vol. 2B),
768 // TEST r/m64, r64 uses "full" ModR / M byte (no opcode extension).
770 // ModR/M byte has form XX YYY ZZZ, where
771 // YYY is MODRM.reg(register 2), ZZZ is MODRM.rm(register 1).
772 // XX has different meanings:
773 // 00: The operand's memory address is in reg1.
774 // 01: The operand's memory address is reg1 + a byte-sized displacement.
775 // 10: The operand's memory address is reg1 + a word-sized displacement.
776 // 11: The operand is reg1 itself.
777 // If an instruction requires only one operand, the unused reg2 field
778 // holds extra opcode bits rather than a register code
779 // 0xC0 == 11 000 000 binary.
780 // 0x38 == 00 111 000 binary.
781 // We transfer reg2 to reg1 here as operand.
782 // See "2.1.3 ModR/M and SIB Bytes" (Vol. 2A 2-3).
783 loc[-1] = 0xc0 | (modRm & 0x38) >> 3; // ModR/M byte.
785 // Change opcode from TEST r/m64, r64 to TEST r/m64, imm32
786 // See "TEST-Logical Compare" (4-428 Vol. 2B).
789 // Move R bit to the B bit in REX byte.
790 // REX byte is encoded as 0100WRXB, where
791 // 0100 is 4bit fixed pattern.
792 // REX.W When 1, a 64-bit operand size is used. Otherwise, when 0, the
793 // default operand size is used (which is 32-bit for most but not all
795 // REX.R This 1-bit value is an extension to the MODRM.reg field.
796 // REX.X This 1-bit value is an extension to the SIB.index field.
797 // REX.B This 1-bit value is an extension to the MODRM.rm field or the
799 // See "2.2.1.2 More on REX Prefix Fields " (2-8 Vol. 2A).
800 loc[-3] = (rex & ~0x4) | (rex & 0x4) >> 2;
805 // If we are here then we need to relax the adc, add, and, cmp, or, sbb, sub
806 // or xor operations.
808 // Convert "binop foo@GOTPCREL(%rip), %reg" to "binop $foo, %reg".
809 // Logic is close to one for test instruction above, but we also
810 // write opcode extension here, see below for details.
811 loc[-1] = 0xc0 | (modRm & 0x38) >> 3 | (op & 0x3c); // ModR/M byte.
813 // Primary opcode is 0x81, opcode extension is one of:
814 // 000b = ADD, 001b is OR, 010b is ADC, 011b is SBB,
815 // 100b is AND, 101b is SUB, 110b is XOR, 111b is CMP.
816 // This value was wrote to MODRM.reg in a line above.
817 // See "3.2 INSTRUCTIONS (A-M)" (Vol. 2A 3-15),
818 // "INSTRUCTION SET REFERENCE, N-Z" (Vol. 2B 4-1) for
819 // descriptions about each operation.
821 loc[-3] = (rex & ~0x4) | (rex & 0x4) >> 2;
825 void X86_64::relaxGot(uint8_t *loc, const Relocation &, uint64_t val) const {
826 const uint8_t op = loc[-2];
827 const uint8_t modRm = loc[-1];
829 // Convert "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
837 // We are relaxing a rip relative to an absolute, so compensate
838 // for the old -4 addend.
839 assert(!config->isPic);
840 relaxGotNoPic(loc, val + 4, op, modRm);
844 // Convert call/jmp instructions.
846 // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call foo".
847 // Instead we convert to "addr32 call foo" where addr32 is an instruction
848 // prefix. That makes result expression to be a single instruction.
849 loc[-2] = 0x67; // addr32 prefix
850 loc[-1] = 0xe8; // call
855 // Convert "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop".
856 // jmp doesn't return, so it is fine to use nop here, it is just a stub.
857 assert(modRm == 0x25);
858 loc[-2] = 0xe9; // jmp
859 loc[3] = 0x90; // nop
860 write32le(loc - 1, val + 1);
863 // A split-stack prologue starts by checking the amount of stack remaining
864 // in one of two ways:
865 // A) Comparing of the stack pointer to a field in the tcb.
866 // B) Or a load of a stack pointer offset with an lea to r10 or r11.
867 bool X86_64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
868 uint8_t stOther) const {
870 error("Target doesn't support split stacks.");
877 // Replace "cmp %fs:0x70,%rsp" and subsequent branch
878 // with "stc, nopl 0x0(%rax,%rax,1)"
879 if (memcmp(loc, "\x64\x48\x3b\x24\x25", 5) == 0) {
880 memcpy(loc, "\xf9\x0f\x1f\x84\x00\x00\x00\x00", 8);
884 // Adjust "lea X(%rsp),%rYY" to lea "(X - 0x4000)(%rsp),%rYY" where rYY could
885 // be r10 or r11. The lea instruction feeds a subsequent compare which checks
886 // if there is X available stack space. Making X larger effectively reserves
887 // that much additional space. The stack grows downward so subtract the value.
888 if (memcmp(loc, "\x4c\x8d\x94\x24", 4) == 0 ||
889 memcmp(loc, "\x4c\x8d\x9c\x24", 4) == 0) {
890 // The offset bytes are encoded four bytes after the start of the
892 write32le(loc + 4, read32le(loc + 4) - 0x4000);
898 // If Intel Indirect Branch Tracking is enabled, we have to emit special PLT
899 // entries containing endbr64 instructions. A PLT entry will be split into two
900 // parts, one in .plt.sec (writePlt), and the other in .plt (writeIBTPlt).
902 class IntelIBT : public X86_64 {
905 void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
906 void writePlt(uint8_t *buf, const Symbol &sym,
907 uint64_t pltEntryAddr) const override;
908 void writeIBTPlt(uint8_t *buf, size_t numEntries) const override;
910 static const unsigned IBTPltHeaderSize = 16;
914 IntelIBT::IntelIBT() { pltHeaderSize = 0; }
916 void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const {
918 in.ibtPlt->getVA() + IBTPltHeaderSize + s.pltIndex * pltEntrySize;
922 void IntelIBT::writePlt(uint8_t *buf, const Symbol &sym,
923 uint64_t pltEntryAddr) const {
924 const uint8_t Inst[] = {
925 0xf3, 0x0f, 0x1e, 0xfa, // endbr64
926 0xff, 0x25, 0, 0, 0, 0, // jmpq *got(%rip)
927 0x66, 0x0f, 0x1f, 0x44, 0, 0, // nop
929 memcpy(buf, Inst, sizeof(Inst));
930 write32le(buf + 6, sym.getGotPltVA() - pltEntryAddr - 10);
933 void IntelIBT::writeIBTPlt(uint8_t *buf, size_t numEntries) const {
935 buf += IBTPltHeaderSize;
937 const uint8_t inst[] = {
938 0xf3, 0x0f, 0x1e, 0xfa, // endbr64
939 0x68, 0, 0, 0, 0, // pushq <relocation index>
940 0xe9, 0, 0, 0, 0, // jmpq plt[0]
944 for (size_t i = 0; i < numEntries; ++i) {
945 memcpy(buf, inst, sizeof(inst));
946 write32le(buf + 5, i);
947 write32le(buf + 10, -pltHeaderSize - sizeof(inst) * i - 30);
952 // These nonstandard PLT entries are to migtigate Spectre v2 security
953 // vulnerability. In order to mitigate Spectre v2, we want to avoid indirect
954 // branch instructions such as `jmp *GOTPLT(%rip)`. So, in the following PLT
955 // entries, we use a CALL followed by MOV and RET to do the same thing as an
956 // indirect jump. That instruction sequence is so-called "retpoline".
958 // We have two types of retpoline PLTs as a size optimization. If `-z now`
959 // is specified, all dynamic symbols are resolved at load-time. Thus, when
960 // that option is given, we can omit code for symbol lazy resolution.
962 class Retpoline : public X86_64 {
965 void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
966 void writePltHeader(uint8_t *buf) const override;
967 void writePlt(uint8_t *buf, const Symbol &sym,
968 uint64_t pltEntryAddr) const override;
971 class RetpolineZNow : public X86_64 {
974 void writeGotPlt(uint8_t *buf, const Symbol &s) const override {}
975 void writePltHeader(uint8_t *buf) const override;
976 void writePlt(uint8_t *buf, const Symbol &sym,
977 uint64_t pltEntryAddr) const override;
981 Retpoline::Retpoline() {
987 void Retpoline::writeGotPlt(uint8_t *buf, const Symbol &s) const {
988 write64le(buf, s.getPltVA() + 17);
991 void Retpoline::writePltHeader(uint8_t *buf) const {
992 const uint8_t insn[] = {
993 0xff, 0x35, 0, 0, 0, 0, // 0: pushq GOTPLT+8(%rip)
994 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, // 6: mov GOTPLT+16(%rip), %r11
995 0xe8, 0x0e, 0x00, 0x00, 0x00, // d: callq next
996 0xf3, 0x90, // 12: loop: pause
997 0x0f, 0xae, 0xe8, // 14: lfence
998 0xeb, 0xf9, // 17: jmp loop
999 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 19: int3; .align 16
1000 0x4c, 0x89, 0x1c, 0x24, // 20: next: mov %r11, (%rsp)
1002 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 25: int3; padding
1003 0xcc, 0xcc, 0xcc, 0xcc, // 2c: int3; padding
1005 memcpy(buf, insn, sizeof(insn));
1007 uint64_t gotPlt = in.gotPlt->getVA();
1008 uint64_t plt = in.plt->getVA();
1009 write32le(buf + 2, gotPlt - plt - 6 + 8);
1010 write32le(buf + 9, gotPlt - plt - 13 + 16);
1013 void Retpoline::writePlt(uint8_t *buf, const Symbol &sym,
1014 uint64_t pltEntryAddr) const {
1015 const uint8_t insn[] = {
1016 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, // 0: mov foo@GOTPLT(%rip), %r11
1017 0xe8, 0, 0, 0, 0, // 7: callq plt+0x20
1018 0xe9, 0, 0, 0, 0, // c: jmp plt+0x12
1019 0x68, 0, 0, 0, 0, // 11: pushq <relocation index>
1020 0xe9, 0, 0, 0, 0, // 16: jmp plt+0
1021 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 1b: int3; padding
1023 memcpy(buf, insn, sizeof(insn));
1025 uint64_t off = pltEntryAddr - in.plt->getVA();
1027 write32le(buf + 3, sym.getGotPltVA() - pltEntryAddr - 7);
1028 write32le(buf + 8, -off - 12 + 32);
1029 write32le(buf + 13, -off - 17 + 18);
1030 write32le(buf + 18, sym.pltIndex);
1031 write32le(buf + 23, -off - 27);
1034 RetpolineZNow::RetpolineZNow() {
1040 void RetpolineZNow::writePltHeader(uint8_t *buf) const {
1041 const uint8_t insn[] = {
1042 0xe8, 0x0b, 0x00, 0x00, 0x00, // 0: call next
1043 0xf3, 0x90, // 5: loop: pause
1044 0x0f, 0xae, 0xe8, // 7: lfence
1045 0xeb, 0xf9, // a: jmp loop
1046 0xcc, 0xcc, 0xcc, 0xcc, // c: int3; .align 16
1047 0x4c, 0x89, 0x1c, 0x24, // 10: next: mov %r11, (%rsp)
1049 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 15: int3; padding
1050 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 1a: int3; padding
1051 0xcc, // 1f: int3; padding
1053 memcpy(buf, insn, sizeof(insn));
1056 void RetpolineZNow::writePlt(uint8_t *buf, const Symbol &sym,
1057 uint64_t pltEntryAddr) const {
1058 const uint8_t insn[] = {
1059 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, // mov foo@GOTPLT(%rip), %r11
1060 0xe9, 0, 0, 0, 0, // jmp plt+0
1061 0xcc, 0xcc, 0xcc, 0xcc, // int3; padding
1063 memcpy(buf, insn, sizeof(insn));
1065 write32le(buf + 3, sym.getGotPltVA() - pltEntryAddr - 7);
1066 write32le(buf + 8, in.plt->getVA() - pltEntryAddr - 12);
1069 static TargetInfo *getTargetInfo() {
1070 if (config->zRetpolineplt) {
1072 static RetpolineZNow t;
1079 if (config->andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT) {
1088 TargetInfo *elf::getX86_64TargetInfo() { return getTargetInfo(); }