2 * Copyright 1996, 1997, 1998, 1999 John D. Polstra.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * Dynamic linker for ELF.
31 * John Polstra <jdp@polstra.com>.
34 #include <sys/param.h>
36 #include <machine/sysarch.h>
52 * Process the special R_X86_64_COPY relocations in the main program. These
53 * copy data from a shared object into a region in the main program's BSS
56 * Returns 0 on success, -1 on failure.
59 do_copy_relocations(Obj_Entry *dstobj)
61 const Elf_Rela *relalim;
64 assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */
66 relalim = (const Elf_Rela *) ((caddr_t) dstobj->rela + dstobj->relasize);
67 for (rela = dstobj->rela; rela < relalim; rela++) {
68 if (ELF_R_TYPE(rela->r_info) == R_X86_64_COPY) {
70 const Elf_Sym *dstsym;
75 const Elf_Sym *srcsym;
79 dstaddr = (void *) (dstobj->relocbase + rela->r_offset);
80 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
81 name = dstobj->strtab + dstsym->st_name;
82 hash = elf_hash(name);
83 size = dstsym->st_size;
84 ve = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
86 for (srcobj = dstobj->next; srcobj != NULL; srcobj = srcobj->next)
87 if ((srcsym = symlook_obj(name, hash, srcobj, ve, 0)) != NULL)
91 _rtld_error("Undefined symbol \"%s\" referenced from COPY"
92 " relocation in %s", name, dstobj->path);
96 srcaddr = (const void *) (srcobj->relocbase + srcsym->st_value);
97 memcpy(dstaddr, srcaddr, size);
104 /* Initialize the special GOT entries. */
106 init_pltgot(Obj_Entry *obj)
108 if (obj->pltgot != NULL) {
109 obj->pltgot[1] = (Elf_Addr) obj;
110 obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
114 /* Process the non-PLT relocations. */
116 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld)
118 const Elf_Rela *relalim;
119 const Elf_Rela *rela;
121 int bytes = obj->nchains * sizeof(SymCache);
125 * The dynamic loader may be called from a thread, we have
126 * limited amounts of stack available so we cannot use alloca().
128 cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0);
129 if (cache == MAP_FAILED)
132 relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize);
133 for (rela = obj->rela; rela < relalim; rela++) {
134 Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset);
135 Elf32_Addr *where32 = (Elf32_Addr *)where;
137 switch (ELF_R_TYPE(rela->r_info)) {
145 const Obj_Entry *defobj;
147 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
152 *where = (Elf_Addr) (defobj->relocbase + def->st_value + rela->r_addend);
158 * I don't think the dynamic linker should ever see this
159 * type of relocation. But the binutils-2.6 tools sometimes
164 const Obj_Entry *defobj;
166 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
171 *where32 = (Elf32_Addr) (unsigned long) (defobj->relocbase +
172 def->st_value + rela->r_addend - (Elf_Addr) where);
175 /* missing: R_X86_64_GOT32 R_X86_64_PLT32 */
179 * These are deferred until all other relocations have
180 * been done. All we do here is make sure that the COPY
181 * relocation is not in a shared library. They are allowed
182 * only in executable files.
184 if (!obj->mainprog) {
185 _rtld_error("%s: Unexpected R_X86_64_COPY relocation"
186 " in shared library", obj->path);
191 case R_X86_64_GLOB_DAT:
194 const Obj_Entry *defobj;
196 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
201 *where = (Elf_Addr) (defobj->relocbase + def->st_value);
205 case R_X86_64_TPOFF64:
208 const Obj_Entry *defobj;
210 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
216 * We lazily allocate offsets for static TLS as we
217 * see the first relocation that references the
218 * TLS block. This allows us to support (small
219 * amounts of) static TLS in dynamically loaded
220 * modules. If we run out of space, we generate an
223 if (!defobj->tls_done) {
224 if (!allocate_tls_offset((Obj_Entry*) defobj)) {
225 _rtld_error("%s: No space available for static "
226 "Thread Local Storage", obj->path);
231 *where = (Elf_Addr) (def->st_value - defobj->tlsoffset +
236 case R_X86_64_TPOFF32:
239 const Obj_Entry *defobj;
241 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
247 * We lazily allocate offsets for static TLS as we
248 * see the first relocation that references the
249 * TLS block. This allows us to support (small
250 * amounts of) static TLS in dynamically loaded
251 * modules. If we run out of space, we generate an
254 if (!defobj->tls_done) {
255 if (!allocate_tls_offset((Obj_Entry*) defobj)) {
256 _rtld_error("%s: No space available for static "
257 "Thread Local Storage", obj->path);
262 *where32 = (Elf32_Addr) (def->st_value -
268 case R_X86_64_DTPMOD64:
271 const Obj_Entry *defobj;
273 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
278 *where += (Elf_Addr) defobj->tlsindex;
282 case R_X86_64_DTPOFF64:
285 const Obj_Entry *defobj;
287 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
292 *where += (Elf_Addr) (def->st_value + rela->r_addend);
296 case R_X86_64_DTPOFF32:
299 const Obj_Entry *defobj;
301 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
306 *where32 += (Elf32_Addr) (def->st_value + rela->r_addend);
310 case R_X86_64_RELATIVE:
311 *where = (Elf_Addr)(obj->relocbase + rela->r_addend);
314 /* missing: R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16, R_X86_64_PC16, R_X86_64_8, R_X86_64_PC8 */
317 _rtld_error("%s: Unsupported relocation type %u"
318 " in non-PLT relocations\n", obj->path,
319 (unsigned int)ELF_R_TYPE(rela->r_info));
326 munmap(cache, bytes);
330 /* Process the PLT relocations. */
332 reloc_plt(Obj_Entry *obj)
334 const Elf_Rela *relalim;
335 const Elf_Rela *rela;
337 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
338 for (rela = obj->pltrela; rela < relalim; rela++) {
341 assert(ELF_R_TYPE(rela->r_info) == R_X86_64_JMP_SLOT);
343 /* Relocate the GOT slot pointing into the PLT. */
344 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
345 *where += (Elf_Addr)obj->relocbase;
350 /* Relocate the jump slots in an object. */
352 reloc_jmpslots(Obj_Entry *obj)
354 const Elf_Rela *relalim;
355 const Elf_Rela *rela;
357 if (obj->jmpslots_done)
359 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
360 for (rela = obj->pltrela; rela < relalim; rela++) {
361 Elf_Addr *where, target;
363 const Obj_Entry *defobj;
365 assert(ELF_R_TYPE(rela->r_info) == R_X86_64_JMP_SLOT);
366 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
367 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, true, NULL);
370 target = (Elf_Addr)(defobj->relocbase + def->st_value + rela->r_addend);
371 reloc_jmpslot(where, target, defobj, obj, (const Elf_Rel *)rela);
373 obj->jmpslots_done = true;
378 allocate_initial_tls(Obj_Entry *objs)
381 * Fix the size of the static TLS block by using the maximum
382 * offset allocated so far and adding a bit for dynamic modules to
385 tls_static_space = tls_last_offset + RTLD_STATIC_TLS_EXTRA;
386 amd64_set_fsbase(allocate_tls(objs, 0,
387 3*sizeof(Elf_Addr), sizeof(Elf_Addr)));
390 void *__tls_get_addr(tls_index *ti)
395 __asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
398 return tls_get_addr_common(&segbase[1], ti->ti_module, ti->ti_offset);