2 * Copyright 1996, 1997, 1998, 1999 John D. Polstra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * Dynamic linker for ELF.
31 * John Polstra <jdp@polstra.com>.
34 #include <sys/param.h>
36 #include <machine/sysarch.h>
52 * Process the special R_X86_64_COPY relocations in the main program. These
53 * copy data from a shared object into a region in the main program's BSS
56 * Returns 0 on success, -1 on failure.
59 do_copy_relocations(Obj_Entry *dstobj)
61 const Elf_Rela *relalim;
64 assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */
66 relalim = (const Elf_Rela *) ((caddr_t) dstobj->rela + dstobj->relasize);
67 for (rela = dstobj->rela; rela < relalim; rela++) {
68 if (ELF_R_TYPE(rela->r_info) == R_X86_64_COPY) {
70 const Elf_Sym *dstsym;
74 const Elf_Sym *srcsym;
75 const Obj_Entry *srcobj, *defobj;
79 dstaddr = (void *) (dstobj->relocbase + rela->r_offset);
80 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
81 name = dstobj->strtab + dstsym->st_name;
82 size = dstsym->st_size;
83 symlook_init(&req, name);
84 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
86 for (srcobj = dstobj->next; srcobj != NULL; srcobj = srcobj->next) {
87 res = symlook_obj(&req, srcobj);
90 defobj = req.defobj_out;
96 _rtld_error("Undefined symbol \"%s\" referenced from COPY"
97 " relocation in %s", name, dstobj->path);
101 srcaddr = (const void *) (defobj->relocbase + srcsym->st_value);
102 memcpy(dstaddr, srcaddr, size);
109 /* Initialize the special GOT entries. */
111 init_pltgot(Obj_Entry *obj)
113 if (obj->pltgot != NULL) {
114 obj->pltgot[1] = (Elf_Addr) obj;
115 obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
119 /* Process the non-PLT relocations. */
121 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, RtldLockState *lockstate)
123 const Elf_Rela *relalim;
124 const Elf_Rela *rela;
129 * The dynamic loader may be called from a thread, we have
130 * limited amounts of stack available so we cannot use alloca().
132 if (obj != obj_rtld) {
133 cache = calloc(obj->nchains, sizeof(SymCache));
134 /* No need to check for NULL here */
138 relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize);
139 for (rela = obj->rela; rela < relalim; rela++) {
140 Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset);
141 Elf32_Addr *where32 = (Elf32_Addr *)where;
143 switch (ELF_R_TYPE(rela->r_info)) {
151 const Obj_Entry *defobj;
153 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
154 false, cache, lockstate);
158 *where = (Elf_Addr) (defobj->relocbase + def->st_value + rela->r_addend);
164 * I don't think the dynamic linker should ever see this
165 * type of relocation. But the binutils-2.6 tools sometimes
170 const Obj_Entry *defobj;
172 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
173 false, cache, lockstate);
177 *where32 = (Elf32_Addr) (unsigned long) (defobj->relocbase +
178 def->st_value + rela->r_addend - (Elf_Addr) where);
181 /* missing: R_X86_64_GOT32 R_X86_64_PLT32 */
185 * These are deferred until all other relocations have
186 * been done. All we do here is make sure that the COPY
187 * relocation is not in a shared library. They are allowed
188 * only in executable files.
190 if (!obj->mainprog) {
191 _rtld_error("%s: Unexpected R_X86_64_COPY relocation"
192 " in shared library", obj->path);
197 case R_X86_64_GLOB_DAT:
200 const Obj_Entry *defobj;
202 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
203 false, cache, lockstate);
207 *where = (Elf_Addr) (defobj->relocbase + def->st_value);
211 case R_X86_64_TPOFF64:
214 const Obj_Entry *defobj;
216 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
217 false, cache, lockstate);
222 * We lazily allocate offsets for static TLS as we
223 * see the first relocation that references the
224 * TLS block. This allows us to support (small
225 * amounts of) static TLS in dynamically loaded
226 * modules. If we run out of space, we generate an
229 if (!defobj->tls_done) {
230 if (!allocate_tls_offset((Obj_Entry*) defobj)) {
231 _rtld_error("%s: No space available for static "
232 "Thread Local Storage", obj->path);
237 *where = (Elf_Addr) (def->st_value - defobj->tlsoffset +
242 case R_X86_64_TPOFF32:
245 const Obj_Entry *defobj;
247 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
248 false, cache, lockstate);
253 * We lazily allocate offsets for static TLS as we
254 * see the first relocation that references the
255 * TLS block. This allows us to support (small
256 * amounts of) static TLS in dynamically loaded
257 * modules. If we run out of space, we generate an
260 if (!defobj->tls_done) {
261 if (!allocate_tls_offset((Obj_Entry*) defobj)) {
262 _rtld_error("%s: No space available for static "
263 "Thread Local Storage", obj->path);
268 *where32 = (Elf32_Addr) (def->st_value -
274 case R_X86_64_DTPMOD64:
277 const Obj_Entry *defobj;
279 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
280 false, cache, lockstate);
284 *where += (Elf_Addr) defobj->tlsindex;
288 case R_X86_64_DTPOFF64:
291 const Obj_Entry *defobj;
293 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
294 false, cache, lockstate);
298 *where += (Elf_Addr) (def->st_value + rela->r_addend);
302 case R_X86_64_DTPOFF32:
305 const Obj_Entry *defobj;
307 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
308 false, cache, lockstate);
312 *where32 += (Elf32_Addr) (def->st_value + rela->r_addend);
316 case R_X86_64_RELATIVE:
317 *where = (Elf_Addr)(obj->relocbase + rela->r_addend);
320 /* missing: R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16, R_X86_64_PC16, R_X86_64_8, R_X86_64_PC8 */
323 _rtld_error("%s: Unsupported relocation type %u"
324 " in non-PLT relocations\n", obj->path,
325 (unsigned int)ELF_R_TYPE(rela->r_info));
336 /* Process the PLT relocations. */
338 reloc_plt(Obj_Entry *obj)
340 const Elf_Rela *relalim;
341 const Elf_Rela *rela;
343 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
344 for (rela = obj->pltrela; rela < relalim; rela++) {
347 assert(ELF_R_TYPE(rela->r_info) == R_X86_64_JMP_SLOT);
349 /* Relocate the GOT slot pointing into the PLT. */
350 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
351 *where += (Elf_Addr)obj->relocbase;
356 /* Relocate the jump slots in an object. */
358 reloc_jmpslots(Obj_Entry *obj, RtldLockState *lockstate)
360 const Elf_Rela *relalim;
361 const Elf_Rela *rela;
363 if (obj->jmpslots_done)
365 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
366 for (rela = obj->pltrela; rela < relalim; rela++) {
367 Elf_Addr *where, target;
369 const Obj_Entry *defobj;
371 assert(ELF_R_TYPE(rela->r_info) == R_X86_64_JMP_SLOT);
372 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
373 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, true, NULL,
377 target = (Elf_Addr)(defobj->relocbase + def->st_value + rela->r_addend);
378 reloc_jmpslot(where, target, defobj, obj, (const Elf_Rel *)rela);
380 obj->jmpslots_done = true;
385 allocate_initial_tls(Obj_Entry *objs)
388 * Fix the size of the static TLS block by using the maximum
389 * offset allocated so far and adding a bit for dynamic modules to
392 tls_static_space = tls_last_offset + RTLD_STATIC_TLS_EXTRA;
393 amd64_set_fsbase(allocate_tls(objs, 0,
394 3*sizeof(Elf_Addr), sizeof(Elf_Addr)));
397 void *__tls_get_addr(tls_index *ti)
402 __asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
405 return tls_get_addr_common(&segbase[1], ti->ti_module, ti->ti_offset);