1 /* $NetBSD: ppc_reloc.c,v 1.10 2001/09/10 06:09:41 mycroft Exp $ */
4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD
6 * Copyright (C) 1998 Tsubai Masanari
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/param.h>
36 #include <sys/sysctl.h>
43 #include <machine/cpu.h>
44 #include <machine/md_var.h>
49 #if !defined(_CALL_ELF) || _CALL_ELF == 1
58 * Process the R_PPC_COPY relocations
61 do_copy_relocations(Obj_Entry *dstobj)
63 const Elf_Rela *relalim;
67 * COPY relocs are invalid outside of the main program
69 assert(dstobj->mainprog);
71 relalim = (const Elf_Rela *)((const char *) dstobj->rela +
73 for (rela = dstobj->rela; rela < relalim; rela++) {
75 const Elf_Sym *dstsym;
79 const Elf_Sym *srcsym = NULL;
80 const Obj_Entry *srcobj, *defobj;
84 if (ELF_R_TYPE(rela->r_info) != R_PPC_COPY) {
88 dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
89 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
90 name = dstobj->strtab + dstsym->st_name;
91 size = dstsym->st_size;
92 symlook_init(&req, name);
93 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
94 req.flags = SYMLOOK_EARLY;
96 for (srcobj = globallist_next(dstobj); srcobj != NULL;
97 srcobj = globallist_next(srcobj)) {
98 res = symlook_obj(&req, srcobj);
100 srcsym = req.sym_out;
101 defobj = req.defobj_out;
106 if (srcobj == NULL) {
107 _rtld_error("Undefined symbol \"%s\" "
108 " referenced from COPY"
109 " relocation in %s", name, dstobj->path);
113 srcaddr = (const void *)(defobj->relocbase+srcsym->st_value);
114 memcpy(dstaddr, srcaddr, size);
115 dbg("copy_reloc: src=%p,dst=%p,size=%zd\n",srcaddr,dstaddr,size);
123 * Perform early relocation of the run-time linker image
126 reloc_non_plt_self(Elf_Dyn *dynp, Elf_Addr relocbase)
128 const Elf_Rela *rela = NULL, *relalim;
133 * Extract the rela/relasz values from the dynamic section
135 for (; dynp->d_tag != DT_NULL; dynp++) {
136 switch (dynp->d_tag) {
138 rela = (const Elf_Rela *)(relocbase+dynp->d_un.d_ptr);
141 relasz = dynp->d_un.d_val;
147 * Relocate these values
149 relalim = (const Elf_Rela *)((const char *)rela + relasz);
150 for (; rela < relalim; rela++) {
151 where = (Elf_Addr *)(relocbase + rela->r_offset);
152 *where = (Elf_Addr)(relocbase + rela->r_addend);
158 * Relocate a non-PLT object with addend.
161 reloc_nonplt_object(Obj_Entry *obj_rtld __unused, Obj_Entry *obj,
162 const Elf_Rela *rela, SymCache *cache, int flags, RtldLockState *lockstate)
164 const Elf_Sym *def = NULL;
165 const Obj_Entry *defobj;
166 Elf_Addr *where, symval = 0;
169 * First, resolve symbol for relocations which
172 switch (ELF_R_TYPE(rela->r_info)) {
174 case R_PPC64_UADDR64: /* doubleword64 S + A */
177 case R_PPC64_DTPMOD64:
178 case R_PPC64_TPREL64:
179 case R_PPC64_DTPREL64:
180 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
181 flags, cache, lockstate);
186 * If symbol is IFUNC, only perform relocation
187 * when caller allowed it by passing
188 * SYMLOOK_IFUNC flag. Skip the relocations
191 * Also error out in case IFUNC relocations
192 * are specified for TLS, which cannot be
193 * usefully interpreted.
195 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
196 switch (ELF_R_TYPE(rela->r_info)) {
197 case R_PPC64_UADDR64:
200 if ((flags & SYMLOOK_IFUNC) == 0) {
201 dbg("Non-PLT reference to IFUNC found!");
202 obj->non_plt_gnu_ifunc = true;
205 symval = (Elf_Addr)rtld_resolve_ifunc(
209 _rtld_error("%s: IFUNC for TLS reloc",
214 if ((flags & SYMLOOK_IFUNC) != 0)
216 symval = (Elf_Addr)defobj->relocbase +
221 if ((flags & SYMLOOK_IFUNC) != 0)
225 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
227 switch (ELF_R_TYPE(rela->r_info)) {
230 case R_PPC64_UADDR64:
233 /* Don't issue write if unnecessary; avoid COW page fault */
234 if (*where != symval + rela->r_addend) {
235 *where = symval + rela->r_addend;
238 case R_PPC64_DTPMOD64:
239 *where = (Elf_Addr) defobj->tlsindex;
241 case R_PPC64_TPREL64:
243 * We lazily allocate offsets for static TLS as we
244 * see the first relocation that references the
245 * TLS block. This allows us to support (small
246 * amounts of) static TLS in dynamically loaded
247 * modules. If we run out of space, we generate an
250 if (!defobj->tls_done) {
251 if (!allocate_tls_offset(
252 __DECONST(Obj_Entry *, defobj))) {
253 _rtld_error("%s: No space available for static "
254 "Thread Local Storage", obj->path);
259 *(Elf_Addr **)where = *where * sizeof(Elf_Addr)
260 + (Elf_Addr *)(def->st_value + rela->r_addend
261 + defobj->tlsoffset - TLS_TP_OFFSET - TLS_TCB_SIZE);
263 case R_PPC64_DTPREL64:
264 *where += (Elf_Addr)(def->st_value + rela->r_addend
267 case R_PPC_RELATIVE: /* doubleword64 B + A */
268 symval = (Elf_Addr)(obj->relocbase + rela->r_addend);
270 /* As above, don't issue write unnecessarily */
271 if (*where != symval) {
277 * These are deferred until all other relocations
278 * have been done. All we do here is make sure
279 * that the COPY relocation is not in a shared
280 * library. They are allowed only in executable
283 if (!obj->mainprog) {
284 _rtld_error("%s: Unexpected R_COPY "
285 " relocation in shared library",
290 case R_PPC_IRELATIVE:
292 * These will be handled by reloc_iresolve().
294 obj->irelative = true;
298 * These will be handled by the plt/jmpslot routines
303 _rtld_error("%s: Unsupported relocation type %ld"
304 " in non-PLT relocations\n", obj->path,
305 ELF_R_TYPE(rela->r_info));
313 * Process non-PLT relocations
316 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
317 RtldLockState *lockstate)
319 const Elf_Rela *relalim;
320 const Elf_Rela *rela;
321 const Elf_Phdr *phdr;
323 int bytes = obj->dynsymcount * sizeof(SymCache);
327 * The dynamic loader may be called from a thread, we have
328 * limited amounts of stack available so we cannot use alloca().
330 if (obj != obj_rtld) {
331 cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON,
333 if (cache == MAP_FAILED)
339 * From the SVR4 PPC ABI:
340 * "The PowerPC family uses only the Elf32_Rela relocation
341 * entries with explicit addends."
343 relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
344 for (rela = obj->rela; rela < relalim; rela++) {
345 if (reloc_nonplt_object(obj_rtld, obj, rela, cache, flags,
352 munmap(cache, bytes);
355 * Synchronize icache for executable segments in case we made
358 for (phdr = obj->phdr;
359 (const char *)phdr < (const char *)obj->phdr + obj->phsize;
361 if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_X) != 0) {
362 __syncicache(obj->relocbase + phdr->p_vaddr,
372 * Initialise a PLT slot to the resolving trampoline
375 reloc_plt_object(Obj_Entry *obj, const Elf_Rela *rela)
377 Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
380 reloff = rela - obj->pltrela;
382 dbg(" reloc_plt_object: where=%p,reloff=%lx,glink=%#lx", (void *)where,
385 #if !defined(_CALL_ELF) || _CALL_ELF == 1
386 /* Glink code is 3 instructions after the first 32k, 2 before */
387 *where = (Elf_Addr)obj->glink + 32 +
388 8*((reloff < 0x8000) ? reloff : 0x8000) +
389 12*((reloff < 0x8000) ? 0 : (reloff - 0x8000));
391 /* 64-Bit ELF V2 ABI Specification, sec. 4.2.5.3. */
392 *where = (Elf_Addr)obj->glink + 4*reloff + 32;
399 * Process the PLT relocations.
402 reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused)
404 const Elf_Rela *relalim;
405 const Elf_Rela *rela;
407 if (obj->pltrelasize != 0) {
408 relalim = (const Elf_Rela *)((const char *)obj->pltrela +
410 for (rela = obj->pltrela; rela < relalim; rela++) {
412 #if defined(_CALL_ELF) && _CALL_ELF == 2
413 if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
414 dbg("ABI violation - found IRELATIVE in the PLT.");
415 obj->irelative = true;
420 * PowerPC(64) .rela.plt is composed of an array of
421 * R_PPC_JMP_SLOT relocations. Unlike other platforms,
422 * this is the ONLY relocation type that is valid here.
424 assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);
426 if (reloc_plt_object(obj, rela) < 0) {
436 * LD_BIND_NOW was set - force relocation for all jump slots
439 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
441 const Obj_Entry *defobj;
442 const Elf_Rela *relalim;
443 const Elf_Rela *rela;
448 relalim = (const Elf_Rela *)((const char *)obj->pltrela +
450 for (rela = obj->pltrela; rela < relalim; rela++) {
451 /* This isn't actually a jump slot, ignore it. */
452 if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE)
454 assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);
455 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
456 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
457 SYMLOOK_IN_PLT | flags, NULL, lockstate);
459 dbg("reloc_jmpslots: sym not found");
463 target = (Elf_Addr)(defobj->relocbase + def->st_value);
465 if (def == &sym_zero) {
466 /* Zero undefined weak symbols */
467 #if !defined(_CALL_ELF) || _CALL_ELF == 1
468 bzero(where, sizeof(struct funcdesc));
473 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
474 /* LD_BIND_NOW, ifunc in shared lib.*/
475 obj->gnu_ifunc = true;
478 reloc_jmpslot(where, target, defobj, obj,
479 (const Elf_Rel *) rela);
483 obj->jmpslots_done = true;
490 * Update the value of a PLT jump slot.
493 reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *defobj __unused,
494 const Obj_Entry *obj __unused, const Elf_Rel *rel __unused)
498 * At the PLT entry pointed at by `wherep', construct
499 * a direct transfer to the now fully resolved function
503 #if !defined(_CALL_ELF) || _CALL_ELF == 1
504 dbg(" reloc_jmpslot: where=%p, target=%p (%#lx + %#lx)",
505 (void *)wherep, (void *)target, *(Elf_Addr *)target,
506 (Elf_Addr)defobj->relocbase);
512 * For the trampoline, the second two elements of the function
513 * descriptor are unused, so we are fine replacing those at any time
514 * with the real ones with no thread safety implications. However, we
515 * need to make sure the main entry point pointer ([0]) is seen to be
516 * modified *after* the second two elements. This can't be done in
517 * general, since there are no barriers in the reading code, but put in
518 * some isyncs to at least make it a little better.
520 memcpy(wherep, (void *)target, sizeof(struct funcdesc));
521 wherep[2] = ((Elf_Addr *)target)[2];
522 wherep[1] = ((Elf_Addr *)target)[1];
523 __asm __volatile ("isync" : : : "memory");
524 wherep[0] = ((Elf_Addr *)target)[0];
525 __asm __volatile ("isync" : : : "memory");
527 if (((struct funcdesc *)(wherep))->addr < (Elf_Addr)defobj->relocbase) {
529 * It is possible (LD_BIND_NOW) that the function
530 * descriptor we are copying has not yet been relocated.
531 * If this happens, fix it. Don't worry about threading in
532 * this case since LD_BIND_NOW makes it irrelevant.
535 ((struct funcdesc *)(wherep))->addr +=
536 (Elf_Addr)defobj->relocbase;
537 ((struct funcdesc *)(wherep))->toc +=
538 (Elf_Addr)defobj->relocbase;
541 dbg(" reloc_jmpslot: where=%p, target=%p", (void *)wherep,
544 assert(target >= (Elf_Addr)defobj->relocbase);
549 if (*wherep != target)
559 reloc_iresolve(Obj_Entry *obj,
560 struct Struct_RtldLockState *lockstate)
563 * Since PLT slots on PowerPC64 are always R_PPC_JMP_SLOT,
564 * R_PPC_IRELATIVE is in RELA.
566 #if !defined(_CALL_ELF) || _CALL_ELF == 1
569 /* XXX not implemented */
572 const Elf_Rela *relalim;
573 const Elf_Rela *rela;
574 Elf_Addr *where, target, *ptr;
579 relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
580 for (rela = obj->rela; rela < relalim; rela++) {
581 if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
582 ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
583 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
585 lock_release(rtld_bind_lock, lockstate);
586 target = call_ifunc_resolver(ptr);
587 wlock_acquire(rtld_bind_lock, lockstate);
593 * XXX Remove me when lld is fixed!
594 * LLD currently makes illegal relocations in the PLT.
596 relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
597 for (rela = obj->pltrela; rela < relalim; rela++) {
598 if (ELF_R_TYPE(rela->r_info) == R_PPC_IRELATIVE) {
599 ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
600 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
602 lock_release(rtld_bind_lock, lockstate);
603 target = call_ifunc_resolver(ptr);
604 wlock_acquire(rtld_bind_lock, lockstate);
610 obj->irelative = false;
616 reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
617 struct Struct_RtldLockState *lockstate __unused)
619 #if !defined(_CALL_ELF) || _CALL_ELF == 1
620 _rtld_error("reloc_gnu_ifunc(): Not implemented!");
621 /* XXX not implemented */
625 const Elf_Rela *relalim;
626 const Elf_Rela *rela;
627 Elf_Addr *where, target;
629 const Obj_Entry *defobj;
633 relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
634 for (rela = obj->pltrela; rela < relalim; rela++) {
635 if (ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT) {
636 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
637 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
638 SYMLOOK_IN_PLT | flags, NULL, lockstate);
641 if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC)
643 lock_release(rtld_bind_lock, lockstate);
644 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
645 wlock_acquire(rtld_bind_lock, lockstate);
646 reloc_jmpslot(where, target, defobj, obj,
647 (const Elf_Rel *)rela);
650 obj->gnu_ifunc = false;
656 reloc_iresolve_nonplt(Obj_Entry *obj __unused,
657 struct Struct_RtldLockState *lockstate __unused)
663 init_pltgot(Obj_Entry *obj)
667 pltcall = obj->pltgot;
669 if (pltcall == NULL) {
673 #if defined(_CALL_ELF) && _CALL_ELF == 2
674 pltcall[0] = (Elf_Addr)&_rtld_bind_start;
675 pltcall[1] = (Elf_Addr)obj;
677 memcpy(pltcall, _rtld_bind_start, sizeof(struct funcdesc));
678 pltcall[2] = (Elf_Addr)obj;
683 * Actual values are 32 bit.
686 u_long cpu_features2;
689 powerpc64_abi_variant_hook(Elf_Auxinfo** aux_info)
692 * Since aux_info[] is easier to work with than aux, go ahead and
693 * initialize cpu_features / cpu_features2.
696 cpu_features2 = -1UL;
697 if (aux_info[AT_HWCAP] != NULL)
698 cpu_features = (uint32_t)aux_info[AT_HWCAP]->a_un.a_val;
699 if (aux_info[AT_HWCAP2] != NULL)
700 cpu_features2 = (uint32_t)aux_info[AT_HWCAP2]->a_un.a_val;
704 ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused)
716 allocate_initial_tls(Obj_Entry *list)
721 * Fix the size of the static TLS block by using the maximum
722 * offset allocated so far and adding a bit for dynamic modules to
726 tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA;
728 tp = (Elf_Addr **)((char *)allocate_tls(list, NULL, TLS_TCB_SIZE, 16)
729 + TLS_TP_OFFSET + TLS_TCB_SIZE);
731 __asm __volatile("mr 13,%0" :: "r"(tp));
735 __tls_get_addr(tls_index* ti)
740 __asm __volatile("mr %0,13" : "=r"(tp));
741 p = tls_get_addr_common((Elf_Addr**)((Elf_Addr)tp - TLS_TP_OFFSET
742 - TLS_TCB_SIZE), ti->ti_module, ti->ti_offset);
744 return (p + TLS_DTV_OFFSET);