2 * Copyright (c) 2014-2015 The FreeBSD Foundation
5 * Portions of this software were developed by Andrew Turner
6 * under sponsorship from the FreeBSD Foundation.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/types.h>
39 #include "rtld_printf.h"
42 * It is possible for the compiler to emit relocations for unaligned data.
43 * We handle this situation with these inlines.
45 #define RELOC_ALIGNED_P(x) \
46 (((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
49 * This is not the correct prototype, but we only need it for
50 * a function pointer to a simple asm function.
52 void *_rtld_tlsdesc(void *);
53 void *_rtld_tlsdesc_dynamic(void *);
58 init_pltgot(Obj_Entry *obj)
61 if (obj->pltgot != NULL) {
62 obj->pltgot[1] = (Elf_Addr) obj;
63 obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start;
68 do_copy_relocations(Obj_Entry *dstobj)
70 const Obj_Entry *srcobj, *defobj;
71 const Elf_Rela *relalim;
73 const Elf_Sym *srcsym;
74 const Elf_Sym *dstsym;
83 * COPY relocs are invalid outside of the main program
85 assert(dstobj->mainprog);
87 relalim = (const Elf_Rela *)((char *)dstobj->rela +
89 for (rela = dstobj->rela; rela < relalim; rela++) {
90 if (ELF_R_TYPE(rela->r_info) != R_AARCH64_COPY)
93 dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
94 dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
95 name = dstobj->strtab + dstsym->st_name;
96 size = dstsym->st_size;
98 symlook_init(&req, name);
99 req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
100 req.flags = SYMLOOK_EARLY;
102 for (srcobj = globallist_next(dstobj); srcobj != NULL;
103 srcobj = globallist_next(srcobj)) {
104 res = symlook_obj(&req, srcobj);
106 srcsym = req.sym_out;
107 defobj = req.defobj_out;
111 if (srcobj == NULL) {
113 "Undefined symbol \"%s\" referenced from COPY relocation in %s",
118 srcaddr = (const void *)(defobj->relocbase + srcsym->st_value);
119 memcpy(dstaddr, srcaddr, size);
128 const Elf_Rela *rela;
131 static struct tls_data *
132 reloc_tlsdesc_alloc(Obj_Entry *obj, const Elf_Rela *rela)
134 struct tls_data *tlsdesc;
136 tlsdesc = xmalloc(sizeof(struct tls_data));
139 tlsdesc->rela = rela;
145 * Look up the symbol to find its tls index
148 rtld_tlsdesc_handle_locked(struct tls_data *tlsdesc, int flags,
149 RtldLockState *lockstate)
151 const Elf_Rela *rela;
153 const Obj_Entry *defobj;
156 rela = tlsdesc->rela;
159 def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, flags, NULL,
164 tlsdesc->index = defobj->tlsoffset + def->st_value + rela->r_addend;
166 return (tlsdesc->index);
170 rtld_tlsdesc_handle(struct tls_data *tlsdesc, int flags)
172 RtldLockState lockstate;
174 /* We have already found the index, return it */
175 if (tlsdesc->index >= 0)
176 return (tlsdesc->index);
178 wlock_acquire(rtld_bind_lock, &lockstate);
179 /* tlsdesc->index may have been set by another thread */
180 if (tlsdesc->index == -1)
181 rtld_tlsdesc_handle_locked(tlsdesc, flags, &lockstate);
182 lock_release(rtld_bind_lock, &lockstate);
184 return (tlsdesc->index);
188 reloc_tlsdesc(Obj_Entry *obj, const Elf_Rela *rela, Elf_Addr *where)
190 if (ELF_R_SYM(rela->r_info) == 0) {
191 where[0] = (Elf_Addr)_rtld_tlsdesc;
192 where[1] = obj->tlsoffset + rela->r_addend;
194 where[0] = (Elf_Addr)_rtld_tlsdesc_dynamic;
195 where[1] = (Elf_Addr)reloc_tlsdesc_alloc(obj, rela);
200 * Process the PLT relocations.
203 reloc_plt(Obj_Entry *obj)
205 const Elf_Rela *relalim;
206 const Elf_Rela *rela;
208 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
209 for (rela = obj->pltrela; rela < relalim; rela++) {
212 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
214 switch(ELF_R_TYPE(rela->r_info)) {
215 case R_AARCH64_JUMP_SLOT:
216 *where += (Elf_Addr)obj->relocbase;
218 case R_AARCH64_TLSDESC:
219 reloc_tlsdesc(obj, rela, where);
222 _rtld_error("Unknown relocation type %u in PLT",
223 (unsigned int)ELF_R_TYPE(rela->r_info));
232 * LD_BIND_NOW was set - force relocation for all jump slots
235 reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
237 const Obj_Entry *defobj;
238 const Elf_Rela *relalim;
239 const Elf_Rela *rela;
241 struct tls_data *tlsdesc;
243 relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize);
244 for (rela = obj->pltrela; rela < relalim; rela++) {
247 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
248 switch(ELF_R_TYPE(rela->r_info)) {
249 case R_AARCH64_JUMP_SLOT:
250 def = find_symdef(ELF_R_SYM(rela->r_info), obj,
251 &defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
253 dbg("reloc_jmpslots: sym not found");
257 *where = (Elf_Addr)(defobj->relocbase + def->st_value);
259 case R_AARCH64_TLSDESC:
260 if (ELF_R_SYM(rela->r_info) != 0) {
261 tlsdesc = (struct tls_data *)where[1];
262 if (tlsdesc->index == -1)
263 rtld_tlsdesc_handle_locked(tlsdesc,
264 SYMLOOK_IN_PLT | flags, lockstate);
268 _rtld_error("Unknown relocation type %x in jmpslot",
269 (unsigned int)ELF_R_TYPE(rela->r_info));
278 reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
281 /* XXX not implemented */
286 reloc_gnu_ifunc(Obj_Entry *obj, int flags,
287 struct Struct_RtldLockState *lockstate)
290 /* XXX not implemented */
295 reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *defobj,
296 const Obj_Entry *obj, const Elf_Rel *rel)
299 assert(ELF_R_TYPE(rel->r_info) == R_AARCH64_JUMP_SLOT);
301 if (*where != target)
308 ifunc_init(Elf_Auxinfo aux_info[__min_size(AT_COUNT)] __unused)
313 * Process non-PLT relocations
316 reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
317 RtldLockState *lockstate)
319 const Obj_Entry *defobj;
320 const Elf_Rela *relalim;
321 const Elf_Rela *rela;
325 unsigned long symnum;
327 if ((flags & SYMLOOK_IFUNC) != 0)
328 /* XXX not implemented */
332 * The dynamic loader may be called from a thread, we have
333 * limited amounts of stack available so we cannot use alloca().
338 cache = calloc(obj->dynsymcount, sizeof(SymCache));
339 /* No need to check for NULL here */
341 relalim = (const Elf_Rela *)((caddr_t)obj->rela + obj->relasize);
342 for (rela = obj->rela; rela < relalim; rela++) {
343 where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
344 symnum = ELF_R_SYM(rela->r_info);
346 switch (ELF_R_TYPE(rela->r_info)) {
347 case R_AARCH64_ABS64:
348 case R_AARCH64_GLOB_DAT:
349 def = find_symdef(symnum, obj, &defobj, flags, cache,
354 *where = (Elf_Addr)defobj->relocbase + def->st_value +
359 * These are deferred until all other relocations have
360 * been done. All we do here is make sure that the
361 * COPY relocation is not in a shared library. They
362 * are allowed only in executable files.
364 if (!obj->mainprog) {
365 _rtld_error("%s: Unexpected R_AARCH64_COPY "
366 "relocation in shared library", obj->path);
370 case R_AARCH64_TLSDESC:
371 reloc_tlsdesc(obj, rela, where);
373 case R_AARCH64_TLS_TPREL64:
374 def = find_symdef(symnum, obj, &defobj, flags, cache,
380 * We lazily allocate offsets for static TLS as we
381 * see the first relocation that references the
382 * TLS block. This allows us to support (small
383 * amounts of) static TLS in dynamically loaded
384 * modules. If we run out of space, we generate an
387 if (!defobj->tls_done) {
388 if (!allocate_tls_offset((Obj_Entry*) defobj)) {
390 "%s: No space available for static "
391 "Thread Local Storage", obj->path);
396 *where = def->st_value + rela->r_addend +
399 case R_AARCH64_RELATIVE:
400 *where = (Elf_Addr)(obj->relocbase + rela->r_addend);
403 rtld_printf("%s: Unhandled relocation %lu\n",
404 obj->path, ELF_R_TYPE(rela->r_info));
413 allocate_initial_tls(Obj_Entry *objs)
418 * Fix the size of the static TLS block by using the maximum
419 * offset allocated so far and adding a bit for dynamic modules to
422 tls_static_space = tls_last_offset + tls_last_size +
423 RTLD_STATIC_TLS_EXTRA;
425 tp = (Elf_Addr **) allocate_tls(objs, NULL, TLS_TCB_SIZE, 16);
427 asm volatile("msr tpidr_el0, %0" : : "r"(tp));