1 /* $NetBSD: ppc_reloc.c,v 1.10 2001/09/10 06:09:41 mycroft Exp $ */
4 * SPDX-License-Identifier: BSD-2-Clause-NetBSD
6 * Copyright (C) 1998 Tsubai Masanari
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. The name of the author may not be used to endorse or promote products
18 * derived from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/param.h>
42 #include <machine/cpu.h>
43 #include <machine/md_var.h>
48 #if !defined(_CALL_ELF) || _CALL_ELF == 1
57 * Process the R_PPC_COPY relocations
60 do_copy_relocations(Obj_Entry *dstobj)
62 const Elf_Rela *relalim;
66 * COPY relocs are invalid outside of the main program
68 assert(dstobj->mainprog);
70 relalim = (const Elf_Rela *) ((caddr_t) dstobj->rela +
72 for (rela = dstobj->rela; rela < relalim; rela++) {
74 const Elf_Sym *dstsym;
78 const Elf_Sym *srcsym = NULL;
79 const Obj_Entry *srcobj, *defobj;
83 if (ELF_R_TYPE(rela->r_info) != R_PPC_COPY) {
87 dstaddr = (void *) (dstobj->relocbase + rela->r_offset);
88 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
89 name = dstobj->strtab + dstsym->st_name;
90 size = dstsym->st_size;
91 symlook_init(&req, name);
92 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
93 req.flags = SYMLOOK_EARLY;
95 for (srcobj = globallist_next(dstobj); srcobj != NULL;
96 srcobj = globallist_next(srcobj)) {
97 res = symlook_obj(&req, srcobj);
100 defobj = req.defobj_out;
105 if (srcobj == NULL) {
106 _rtld_error("Undefined symbol \"%s\" "
107 " referenced from COPY"
108 " relocation in %s", name, dstobj->path);
112 srcaddr = (const void *) (defobj->relocbase+srcsym->st_value);
113 memcpy(dstaddr, srcaddr, size);
114 dbg("copy_reloc: src=%p,dst=%p,size=%zd\n",srcaddr,dstaddr,size);
122 * Perform early relocation of the run-time linker image
125 reloc_non_plt_self(Elf_Dyn *dynp, Elf_Addr relocbase)
127 const Elf_Rela *rela = NULL, *relalim;
132 * Extract the rela/relasz values from the dynamic section
134 for (; dynp->d_tag != DT_NULL; dynp++) {
135 switch (dynp->d_tag) {
137 rela = (const Elf_Rela *)(relocbase+dynp->d_un.d_ptr);
140 relasz = dynp->d_un.d_val;
146 * Relocate these values
148 relalim = (const Elf_Rela *)((caddr_t)rela + relasz);
149 for (; rela < relalim; rela++) {
150 where = (Elf_Addr *)(relocbase + rela->r_offset);
151 *where = (Elf_Addr)(relocbase + rela->r_addend);
157 * Relocate a non-PLT object with addend.
160 reloc_nonplt_object(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela,
161 SymCache *cache, int flags, RtldLockState *lockstate)
163 Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
165 const Obj_Entry *defobj;
168 switch (ELF_R_TYPE(rela->r_info)) {
173 case R_PPC64_UADDR64: /* doubleword64 S + A */
176 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
177 flags, cache, lockstate);
182 tmp = (Elf_Addr)(defobj->relocbase + def->st_value +
185 /* Don't issue write if unnecessary; avoid COW page fault */
191 case R_PPC_RELATIVE: /* doubleword64 B + A */
192 tmp = (Elf_Addr)(obj->relocbase + rela->r_addend);
194 /* As above, don't issue write unnecessarily */
202 * These are deferred until all other relocations
203 * have been done. All we do here is make sure
204 * that the COPY relocation is not in a shared
205 * library. They are allowed only in executable
208 if (!obj->mainprog) {
209 _rtld_error("%s: Unexpected R_COPY "
210 " relocation in shared library",
218 * These will be handled by the plt/jmpslot routines
222 case R_PPC64_DTPMOD64:
223 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
224 flags, cache, lockstate);
229 *where = (Elf_Addr) defobj->tlsindex;
233 case R_PPC64_TPREL64:
234 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
235 flags, cache, lockstate);
241 * We lazily allocate offsets for static TLS as we
242 * see the first relocation that references the
243 * TLS block. This allows us to support (small
244 * amounts of) static TLS in dynamically loaded
245 * modules. If we run out of space, we generate an
248 if (!defobj->tls_done) {
249 if (!allocate_tls_offset((Obj_Entry*) defobj)) {
250 _rtld_error("%s: No space available for static "
251 "Thread Local Storage", obj->path);
256 *(Elf_Addr **)where = *where * sizeof(Elf_Addr)
257 + (Elf_Addr *)(def->st_value + rela->r_addend
258 + defobj->tlsoffset - TLS_TP_OFFSET);
262 case R_PPC64_DTPREL64:
263 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
264 flags, cache, lockstate);
269 *where += (Elf_Addr)(def->st_value + rela->r_addend
275 _rtld_error("%s: Unsupported relocation type %ld"
276 " in non-PLT relocations\n", obj->path,
277 ELF_R_TYPE(rela->r_info));
285 * Process non-PLT relocations
288 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
289 RtldLockState *lockstate)
291 const Elf_Rela *relalim;
292 const Elf_Rela *rela;
294 int bytes = obj->dynsymcount * sizeof(SymCache);
297 if ((flags & SYMLOOK_IFUNC) != 0)
298 /* XXX not implemented */
302 * The dynamic loader may be called from a thread, we have
303 * limited amounts of stack available so we cannot use alloca().
305 if (obj != obj_rtld) {
306 cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON,
308 if (cache == MAP_FAILED)
314 * From the SVR4 PPC ABI:
315 * "The PowerPC family uses only the Elf32_Rela relocation
316 * entries with explicit addends."
318 relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize);
319 for (rela = obj->rela; rela < relalim; rela++) {
320 if (reloc_nonplt_object(obj_rtld, obj, rela, cache, flags,
327 munmap(cache, bytes);
329 /* Synchronize icache for text seg in case we made any changes */
330 __syncicache(obj->mapbase, obj->textsize);
337 * Initialise a PLT slot to the resolving trampoline
340 reloc_plt_object(Obj_Entry *obj, const Elf_Rela *rela)
342 Elf_Addr *where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
345 reloff = rela - obj->pltrela;
347 dbg(" reloc_plt_object: where=%p,reloff=%lx,glink=%#lx", (void *)where,
350 #if !defined(_CALL_ELF) || _CALL_ELF == 1
351 /* Glink code is 3 instructions after the first 32k, 2 before */
352 *where = (Elf_Addr)obj->glink + 32 +
353 8*((reloff < 0x8000) ? reloff : 0x8000) +
354 12*((reloff < 0x8000) ? 0 : (reloff - 0x8000));
356 *where = (Elf_Addr)obj->glink + 4*reloff + 32;
364 * Process the PLT relocations.
367 reloc_plt(Obj_Entry *obj)
369 const Elf_Rela *relalim;
370 const Elf_Rela *rela;
372 if (obj->pltrelasize != 0) {
373 relalim = (const Elf_Rela *)((char *)obj->pltrela +
375 for (rela = obj->pltrela; rela < relalim; rela++) {
376 assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);
378 if (reloc_plt_object(obj, rela) < 0) {
389 * LD_BIND_NOW was set - force relocation for all jump slots
392 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
394 const Obj_Entry *defobj;
395 const Elf_Rela *relalim;
396 const Elf_Rela *rela;
401 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
402 for (rela = obj->pltrela; rela < relalim; rela++) {
403 assert(ELF_R_TYPE(rela->r_info) == R_PPC_JMP_SLOT);
404 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
405 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
406 SYMLOOK_IN_PLT | flags, NULL, lockstate);
408 dbg("reloc_jmpslots: sym not found");
412 target = (Elf_Addr)(defobj->relocbase + def->st_value);
414 if (def == &sym_zero) {
415 /* Zero undefined weak symbols */
416 #if !defined(_CALL_ELF) || _CALL_ELF == 1
417 bzero(where, sizeof(struct funcdesc));
422 reloc_jmpslot(where, target, defobj, obj,
423 (const Elf_Rel *) rela);
427 obj->jmpslots_done = true;
434 * Update the value of a PLT jump slot.
437 reloc_jmpslot(Elf_Addr *wherep, Elf_Addr target, const Obj_Entry *defobj,
438 const Obj_Entry *obj, const Elf_Rel *rel)
442 * At the PLT entry pointed at by `wherep', construct
443 * a direct transfer to the now fully resolved function
447 #if !defined(_CALL_ELF) || _CALL_ELF == 1
448 dbg(" reloc_jmpslot: where=%p, target=%p (%#lx + %#lx)",
449 (void *)wherep, (void *)target, *(Elf_Addr *)target,
450 (Elf_Addr)defobj->relocbase);
456 * For the trampoline, the second two elements of the function
457 * descriptor are unused, so we are fine replacing those at any time
458 * with the real ones with no thread safety implications. However, we
459 * need to make sure the main entry point pointer ([0]) is seen to be
460 * modified *after* the second two elements. This can't be done in
461 * general, since there are no barriers in the reading code, but put in
462 * some isyncs to at least make it a little better.
464 memcpy(wherep, (void *)target, sizeof(struct funcdesc));
465 wherep[2] = ((Elf_Addr *)target)[2];
466 wherep[1] = ((Elf_Addr *)target)[1];
467 __asm __volatile ("isync" : : : "memory");
468 wherep[0] = ((Elf_Addr *)target)[0];
469 __asm __volatile ("isync" : : : "memory");
471 if (((struct funcdesc *)(wherep))->addr < (Elf_Addr)defobj->relocbase) {
473 * It is possible (LD_BIND_NOW) that the function
474 * descriptor we are copying has not yet been relocated.
475 * If this happens, fix it. Don't worry about threading in
476 * this case since LD_BIND_NOW makes it irrelevant.
479 ((struct funcdesc *)(wherep))->addr +=
480 (Elf_Addr)defobj->relocbase;
481 ((struct funcdesc *)(wherep))->toc +=
482 (Elf_Addr)defobj->relocbase;
486 dbg(" reloc_jmpslot: where=%p, target=%p", (void *)wherep,
497 reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
500 /* XXX not implemented */
505 reloc_gnu_ifunc(Obj_Entry *obj, int flags,
506 struct Struct_RtldLockState *lockstate)
509 /* XXX not implemented */
514 init_pltgot(Obj_Entry *obj)
518 pltcall = obj->pltgot;
520 if (pltcall == NULL) {
524 #if defined(_CALL_ELF) && _CALL_ELF == 2
525 pltcall[0] = (Elf_Addr)&_rtld_bind_start;
526 pltcall[1] = (Elf_Addr)obj;
528 memcpy(pltcall, _rtld_bind_start, sizeof(struct funcdesc));
529 pltcall[2] = (Elf_Addr)obj;
534 ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused)
546 allocate_initial_tls(Obj_Entry *list)
551 * Fix the size of the static TLS block by using the maximum
552 * offset allocated so far and adding a bit for dynamic modules to
556 tls_static_space = tls_last_offset + tls_last_size + RTLD_STATIC_TLS_EXTRA;
558 tp = (Elf_Addr **) ((char *)allocate_tls(list, NULL, TLS_TCB_SIZE, 16)
559 + TLS_TP_OFFSET + TLS_TCB_SIZE);
561 __asm __volatile("mr 13,%0" :: "r"(tp));
565 __tls_get_addr(tls_index* ti)
570 __asm __volatile("mr %0,13" : "=r"(tp));
571 p = tls_get_addr_common((Elf_Addr**)((Elf_Addr)tp - TLS_TP_OFFSET
572 - TLS_TCB_SIZE), ti->ti_module, ti->ti_offset);
574 return (p + TLS_DTV_OFFSET);