2 * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra.
3 * Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>.
4 * Copyright 2009-2012 Konstantin Belousov <kib@FreeBSD.ORG>.
5 * Copyright 2012 John Marino <draco@marino.st>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Dynamic linker for ELF.
34 * John Polstra <jdp@polstra.com>.
38 #error "GCC is needed to compile this file"
41 #include <sys/param.h>
42 #include <sys/mount.h>
45 #include <sys/sysctl.h>
47 #include <sys/utsname.h>
48 #include <sys/ktrace.h>
64 #include "rtld_printf.h"
68 #define PATH_RTLD "/libexec/ld-elf.so.1"
70 #define PATH_RTLD "/libexec/ld-elf32.so.1"
74 typedef void (*func_ptr_type)();
75 typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg);
78 * Function declarations.
80 static const char *basename(const char *);
81 static void die(void) __dead2;
82 static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **,
83 const Elf_Dyn **, const Elf_Dyn **);
84 static void digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *,
86 static void digest_dynamic(Obj_Entry *, int);
87 static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *);
88 static Obj_Entry *dlcheck(void *);
89 static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj,
90 int lo_flags, int mode, RtldLockState *lockstate);
91 static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int);
92 static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *);
93 static bool donelist_check(DoneList *, const Obj_Entry *);
94 static void errmsg_restore(char *);
95 static char *errmsg_save(void);
96 static void *fill_search_info(const char *, size_t, void *);
97 static char *find_library(const char *, const Obj_Entry *);
98 static const char *gethints(bool);
99 static void init_dag(Obj_Entry *);
100 static void init_rtld(caddr_t, Elf_Auxinfo **);
101 static void initlist_add_neededs(Needed_Entry *, Objlist *);
102 static void initlist_add_objects(Obj_Entry *, Obj_Entry **, Objlist *);
103 static void linkmap_add(Obj_Entry *);
104 static void linkmap_delete(Obj_Entry *);
105 static void load_filtees(Obj_Entry *, int flags, RtldLockState *);
106 static void unload_filtees(Obj_Entry *);
107 static int load_needed_objects(Obj_Entry *, int);
108 static int load_preload_objects(void);
109 static Obj_Entry *load_object(const char *, int fd, const Obj_Entry *, int);
110 static void map_stacks_exec(RtldLockState *);
111 static Obj_Entry *obj_from_addr(const void *);
112 static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *);
113 static void objlist_call_init(Objlist *, RtldLockState *);
114 static void objlist_clear(Objlist *);
115 static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *);
116 static void objlist_init(Objlist *);
117 static void objlist_push_head(Objlist *, Obj_Entry *);
118 static void objlist_push_tail(Objlist *, Obj_Entry *);
119 static void objlist_remove(Objlist *, Obj_Entry *);
120 static void *path_enumerate(const char *, path_enum_proc, void *);
121 static int relocate_object_dag(Obj_Entry *root, bool bind_now,
122 Obj_Entry *rtldobj, int flags, RtldLockState *lockstate);
123 static int relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
124 int flags, RtldLockState *lockstate);
125 static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, int,
127 static int resolve_objects_ifunc(Obj_Entry *first, bool bind_now,
128 int flags, RtldLockState *lockstate);
129 static int rtld_dirname(const char *, char *);
130 static int rtld_dirname_abs(const char *, char *);
131 static void *rtld_dlopen(const char *name, int fd, int mode);
132 static void rtld_exit(void);
133 static char *search_library_path(const char *, const char *);
134 static const void **get_program_var_addr(const char *, RtldLockState *);
135 static void set_program_var(const char *, const void *);
136 static int symlook_default(SymLook *, const Obj_Entry *refobj);
137 static int symlook_global(SymLook *, DoneList *);
138 static void symlook_init_from_req(SymLook *, const SymLook *);
139 static int symlook_list(SymLook *, const Objlist *, DoneList *);
140 static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *);
141 static int symlook_obj1_sysv(SymLook *, const Obj_Entry *);
142 static int symlook_obj1_gnu(SymLook *, const Obj_Entry *);
143 static void trace_loaded_objects(Obj_Entry *);
144 static void unlink_object(Obj_Entry *);
145 static void unload_object(Obj_Entry *);
146 static void unref_dag(Obj_Entry *);
147 static void ref_dag(Obj_Entry *);
148 static int origin_subst_one(char **, const char *, const char *,
149 const char *, char *);
150 static char *origin_subst(const char *, const char *);
151 static void preinit_main(void);
152 static int rtld_verify_versions(const Objlist *);
153 static int rtld_verify_object_versions(Obj_Entry *);
154 static void object_add_name(Obj_Entry *, const char *);
155 static int object_match_name(const Obj_Entry *, const char *);
156 static void ld_utrace_log(int, void *, void *, size_t, int, const char *);
157 static void rtld_fill_dl_phdr_info(const Obj_Entry *obj,
158 struct dl_phdr_info *phdr_info);
159 static uint32_t gnu_hash(const char *);
160 static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *,
161 const unsigned long);
163 void r_debug_state(struct r_debug *, struct link_map *) __noinline;
168 static char *error_message; /* Message for dlerror(), or NULL */
169 struct r_debug r_debug; /* for GDB; */
170 static bool libmap_disable; /* Disable libmap */
171 static bool ld_loadfltr; /* Immediate filters processing */
172 static char *libmap_override; /* Maps to use in addition to libmap.conf */
173 static bool trust; /* False for setuid and setgid programs */
174 static bool dangerous_ld_env; /* True if environment variables have been
175 used to affect the libraries loaded */
176 static char *ld_bind_now; /* Environment variable for immediate binding */
177 static char *ld_debug; /* Environment variable for debugging */
178 static char *ld_library_path; /* Environment variable for search path */
179 static char *ld_preload; /* Environment variable for libraries to
181 static char *ld_elf_hints_path; /* Environment variable for alternative hints path */
182 static char *ld_tracing; /* Called from ldd to print libs */
183 static char *ld_utrace; /* Use utrace() to log events. */
184 static Obj_Entry *obj_list; /* Head of linked list of shared objects */
185 static Obj_Entry **obj_tail; /* Link field of last object in list */
186 static Obj_Entry *obj_main; /* The main program shared object */
187 static Obj_Entry obj_rtld; /* The dynamic linker shared object */
188 static unsigned int obj_count; /* Number of objects in obj_list */
189 static unsigned int obj_loads; /* Number of objects in obj_list */
191 static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */
192 STAILQ_HEAD_INITIALIZER(list_global);
193 static Objlist list_main = /* Objects loaded at program startup */
194 STAILQ_HEAD_INITIALIZER(list_main);
195 static Objlist list_fini = /* Objects needing fini() calls */
196 STAILQ_HEAD_INITIALIZER(list_fini);
198 Elf_Sym sym_zero; /* For resolving undefined weak refs. */
200 #define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m);
202 extern Elf_Dyn _DYNAMIC;
203 #pragma weak _DYNAMIC
204 #ifndef RTLD_IS_DYNAMIC
205 #define RTLD_IS_DYNAMIC() (&_DYNAMIC != NULL)
208 int osreldate, pagesize;
210 long __stack_chk_guard[8] = {0, 0, 0, 0, 0, 0, 0, 0};
212 static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC;
213 static int max_stack_flags;
216 * Global declarations normally provided by crt1. The dynamic linker is
217 * not built with crt1, so we have to provide them ourselves.
223 * Used to pass argc, argv to init functions.
229 * Globals to control TLS allocation.
231 size_t tls_last_offset; /* Static TLS offset of last module */
232 size_t tls_last_size; /* Static TLS size of last module */
233 size_t tls_static_space; /* Static TLS space allocated */
234 int tls_dtv_generation = 1; /* Used to detect when dtv size changes */
235 int tls_max_index = 1; /* Largest module index allocated */
237 bool ld_library_path_rpath = true;
240 * Fill in a DoneList with an allocation large enough to hold all of
241 * the currently-loaded objects. Keep this as a macro since it calls
242 * alloca and we want that to occur within the scope of the caller.
244 #define donelist_init(dlp) \
245 ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \
246 assert((dlp)->objs != NULL), \
247 (dlp)->num_alloc = obj_count, \
250 #define UTRACE_DLOPEN_START 1
251 #define UTRACE_DLOPEN_STOP 2
252 #define UTRACE_DLCLOSE_START 3
253 #define UTRACE_DLCLOSE_STOP 4
254 #define UTRACE_LOAD_OBJECT 5
255 #define UTRACE_UNLOAD_OBJECT 6
256 #define UTRACE_ADD_RUNDEP 7
257 #define UTRACE_PRELOAD_FINISHED 8
258 #define UTRACE_INIT_CALL 9
259 #define UTRACE_FINI_CALL 10
262 char sig[4]; /* 'RTLD' */
265 void *mapbase; /* Used for 'parent' and 'init/fini' */
267 int refcnt; /* Used for 'mode' */
268 char name[MAXPATHLEN];
271 #define LD_UTRACE(e, h, mb, ms, r, n) do { \
272 if (ld_utrace != NULL) \
273 ld_utrace_log(e, h, mb, ms, r, n); \
277 ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize,
278 int refcnt, const char *name)
280 struct utrace_rtld ut;
288 ut.mapbase = mapbase;
289 ut.mapsize = mapsize;
291 bzero(ut.name, sizeof(ut.name));
293 strlcpy(ut.name, name, sizeof(ut.name));
294 utrace(&ut, sizeof(ut));
298 * Main entry point for dynamic linking. The first argument is the
299 * stack pointer. The stack is expected to be laid out as described
300 * in the SVR4 ABI specification, Intel 386 Processor Supplement.
301 * Specifically, the stack pointer points to a word containing
302 * ARGC. Following that in the stack is a null-terminated sequence
303 * of pointers to argument strings. Then comes a null-terminated
304 * sequence of pointers to environment strings. Finally, there is a
305 * sequence of "auxiliary vector" entries.
307 * The second argument points to a place to store the dynamic linker's
308 * exit procedure pointer and the third to a place to store the main
311 * The return value is the main program's entry point.
314 _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp)
316 Elf_Auxinfo *aux_info[AT_COUNT];
324 Objlist_Entry *entry;
326 Obj_Entry **preload_tail;
328 RtldLockState lockstate;
329 char *library_path_rpath;
334 * On entry, the dynamic linker itself has not been relocated yet.
335 * Be very careful not to reference any global data until after
336 * init_rtld has returned. It is OK to reference file-scope statics
337 * and string constants, and to call static and global functions.
340 /* Find the auxiliary vector on the stack. */
343 sp += argc + 1; /* Skip over arguments and NULL terminator */
345 while (*sp++ != 0) /* Skip over environment, and NULL terminator */
347 aux = (Elf_Auxinfo *) sp;
349 /* Digest the auxiliary vector. */
350 for (i = 0; i < AT_COUNT; i++)
352 for (auxp = aux; auxp->a_type != AT_NULL; auxp++) {
353 if (auxp->a_type < AT_COUNT)
354 aux_info[auxp->a_type] = auxp;
357 /* Initialize and relocate ourselves. */
358 assert(aux_info[AT_BASE] != NULL);
359 init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info);
361 __progname = obj_rtld.path;
362 argv0 = argv[0] != NULL ? argv[0] : "(null)";
367 if (aux_info[AT_CANARY] != NULL &&
368 aux_info[AT_CANARY]->a_un.a_ptr != NULL) {
369 i = aux_info[AT_CANARYLEN]->a_un.a_val;
370 if (i > sizeof(__stack_chk_guard))
371 i = sizeof(__stack_chk_guard);
372 memcpy(__stack_chk_guard, aux_info[AT_CANARY]->a_un.a_ptr, i);
377 len = sizeof(__stack_chk_guard);
378 if (sysctl(mib, 2, __stack_chk_guard, &len, NULL, 0) == -1 ||
379 len != sizeof(__stack_chk_guard)) {
380 /* If sysctl was unsuccessful, use the "terminator canary". */
381 ((unsigned char *)(void *)__stack_chk_guard)[0] = 0;
382 ((unsigned char *)(void *)__stack_chk_guard)[1] = 0;
383 ((unsigned char *)(void *)__stack_chk_guard)[2] = '\n';
384 ((unsigned char *)(void *)__stack_chk_guard)[3] = 255;
388 trust = !issetugid();
390 ld_bind_now = getenv(LD_ "BIND_NOW");
392 * If the process is tainted, then we un-set the dangerous environment
393 * variables. The process will be marked as tainted until setuid(2)
394 * is called. If any child process calls setuid(2) we do not want any
395 * future processes to honor the potentially un-safe variables.
398 if (unsetenv(LD_ "PRELOAD") || unsetenv(LD_ "LIBMAP") ||
399 unsetenv(LD_ "LIBRARY_PATH") || unsetenv(LD_ "LIBMAP_DISABLE") ||
400 unsetenv(LD_ "DEBUG") || unsetenv(LD_ "ELF_HINTS_PATH") ||
401 unsetenv(LD_ "LOADFLTR") || unsetenv(LD_ "LIBRARY_PATH_RPATH")) {
402 _rtld_error("environment corrupt; aborting");
406 ld_debug = getenv(LD_ "DEBUG");
407 libmap_disable = getenv(LD_ "LIBMAP_DISABLE") != NULL;
408 libmap_override = getenv(LD_ "LIBMAP");
409 ld_library_path = getenv(LD_ "LIBRARY_PATH");
410 ld_preload = getenv(LD_ "PRELOAD");
411 ld_elf_hints_path = getenv(LD_ "ELF_HINTS_PATH");
412 ld_loadfltr = getenv(LD_ "LOADFLTR") != NULL;
413 library_path_rpath = getenv(LD_ "LIBRARY_PATH_RPATH");
414 if (library_path_rpath != NULL) {
415 if (library_path_rpath[0] == 'y' ||
416 library_path_rpath[0] == 'Y' ||
417 library_path_rpath[0] == '1')
418 ld_library_path_rpath = true;
420 ld_library_path_rpath = false;
422 dangerous_ld_env = libmap_disable || (libmap_override != NULL) ||
423 (ld_library_path != NULL) || (ld_preload != NULL) ||
424 (ld_elf_hints_path != NULL) || ld_loadfltr;
425 ld_tracing = getenv(LD_ "TRACE_LOADED_OBJECTS");
426 ld_utrace = getenv(LD_ "UTRACE");
428 if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0)
429 ld_elf_hints_path = _PATH_ELF_HINTS;
431 if (ld_debug != NULL && *ld_debug != '\0')
433 dbg("%s is initialized, base address = %p", __progname,
434 (caddr_t) aux_info[AT_BASE]->a_un.a_ptr);
435 dbg("RTLD dynamic = %p", obj_rtld.dynamic);
436 dbg("RTLD pltgot = %p", obj_rtld.pltgot);
438 dbg("initializing thread locks");
442 * Load the main program, or process its program header if it is
445 if (aux_info[AT_EXECFD] != NULL) { /* Load the main program. */
446 int fd = aux_info[AT_EXECFD]->a_un.a_val;
447 dbg("loading main program");
448 obj_main = map_object(fd, argv0, NULL);
450 if (obj_main == NULL)
452 max_stack_flags = obj->stack_flags;
453 } else { /* Main program already loaded. */
454 const Elf_Phdr *phdr;
458 dbg("processing main program's program header");
459 assert(aux_info[AT_PHDR] != NULL);
460 phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr;
461 assert(aux_info[AT_PHNUM] != NULL);
462 phnum = aux_info[AT_PHNUM]->a_un.a_val;
463 assert(aux_info[AT_PHENT] != NULL);
464 assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr));
465 assert(aux_info[AT_ENTRY] != NULL);
466 entry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr;
467 if ((obj_main = digest_phdr(phdr, phnum, entry, argv0)) == NULL)
471 if (aux_info[AT_EXECPATH] != 0) {
473 char buf[MAXPATHLEN];
475 kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr;
476 dbg("AT_EXECPATH %p %s", kexecpath, kexecpath);
477 if (kexecpath[0] == '/')
478 obj_main->path = kexecpath;
479 else if (getcwd(buf, sizeof(buf)) == NULL ||
480 strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) ||
481 strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf))
482 obj_main->path = xstrdup(argv0);
484 obj_main->path = xstrdup(buf);
486 dbg("No AT_EXECPATH");
487 obj_main->path = xstrdup(argv0);
489 dbg("obj_main path %s", obj_main->path);
490 obj_main->mainprog = true;
492 if (aux_info[AT_STACKPROT] != NULL &&
493 aux_info[AT_STACKPROT]->a_un.a_val != 0)
494 stack_prot = aux_info[AT_STACKPROT]->a_un.a_val;
497 * Get the actual dynamic linker pathname from the executable if
498 * possible. (It should always be possible.) That ensures that
499 * gdb will find the right dynamic linker even if a non-standard
502 if (obj_main->interp != NULL &&
503 strcmp(obj_main->interp, obj_rtld.path) != 0) {
505 obj_rtld.path = xstrdup(obj_main->interp);
506 __progname = obj_rtld.path;
509 digest_dynamic(obj_main, 0);
510 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d",
511 obj_main->path, obj_main->valid_hash_sysv, obj_main->valid_hash_gnu,
512 obj_main->dynsymcount);
514 linkmap_add(obj_main);
515 linkmap_add(&obj_rtld);
517 /* Link the main program into the list of objects. */
518 *obj_tail = obj_main;
519 obj_tail = &obj_main->next;
523 /* Initialize a fake symbol for resolving undefined weak references. */
524 sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE);
525 sym_zero.st_shndx = SHN_UNDEF;
526 sym_zero.st_value = -(uintptr_t)obj_main->relocbase;
529 libmap_disable = (bool)lm_init(libmap_override);
531 dbg("loading LD_PRELOAD libraries");
532 if (load_preload_objects() == -1)
534 preload_tail = obj_tail;
536 dbg("loading needed objects");
537 if (load_needed_objects(obj_main, 0) == -1)
540 /* Make a list of all objects loaded at startup. */
541 for (obj = obj_list; obj != NULL; obj = obj->next) {
542 objlist_push_tail(&list_main, obj);
546 dbg("checking for required versions");
547 if (rtld_verify_versions(&list_main) == -1 && !ld_tracing)
550 if (ld_tracing) { /* We're done */
551 trace_loaded_objects(obj_main);
555 if (getenv(LD_ "DUMP_REL_PRE") != NULL) {
556 dump_relocations(obj_main);
561 * Processing tls relocations requires having the tls offsets
562 * initialized. Prepare offsets before starting initial
563 * relocation processing.
565 dbg("initializing initial thread local storage offsets");
566 STAILQ_FOREACH(entry, &list_main, link) {
568 * Allocate all the initial objects out of the static TLS
569 * block even if they didn't ask for it.
571 allocate_tls_offset(entry->obj);
574 if (relocate_objects(obj_main,
575 ld_bind_now != NULL && *ld_bind_now != '\0',
576 &obj_rtld, SYMLOOK_EARLY, NULL) == -1)
579 dbg("doing copy relocations");
580 if (do_copy_relocations(obj_main) == -1)
583 if (getenv(LD_ "DUMP_REL_POST") != NULL) {
584 dump_relocations(obj_main);
589 * Setup TLS for main thread. This must be done after the
590 * relocations are processed, since tls initialization section
591 * might be the subject for relocations.
593 dbg("initializing initial thread local storage");
594 allocate_initial_tls(obj_list);
596 dbg("initializing key program variables");
597 set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : "");
598 set_program_var("environ", env);
599 set_program_var("__elf_aux_vector", aux);
601 /* Make a list of init functions to call. */
602 objlist_init(&initlist);
603 initlist_add_objects(obj_list, preload_tail, &initlist);
605 r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */
607 map_stacks_exec(NULL);
609 dbg("resolving ifuncs");
610 if (resolve_objects_ifunc(obj_main,
611 ld_bind_now != NULL && *ld_bind_now != '\0', SYMLOOK_EARLY,
615 if (!obj_main->crt_no_init) {
617 * Make sure we don't call the main program's init and fini
618 * functions for binaries linked with old crt1 which calls
621 obj_main->init = obj_main->fini = (Elf_Addr)NULL;
622 obj_main->preinit_array = obj_main->init_array =
623 obj_main->fini_array = (Elf_Addr)NULL;
626 wlock_acquire(rtld_bind_lock, &lockstate);
627 if (obj_main->crt_no_init)
629 objlist_call_init(&initlist, &lockstate);
630 objlist_clear(&initlist);
631 dbg("loading filtees");
632 for (obj = obj_list->next; obj != NULL; obj = obj->next) {
633 if (ld_loadfltr || obj->z_loadfltr)
634 load_filtees(obj, 0, &lockstate);
636 lock_release(rtld_bind_lock, &lockstate);
638 dbg("transferring control to program entry point = %p", obj_main->entry);
640 /* Return the exit procedure and the program entry point. */
641 *exit_proc = rtld_exit;
643 return (func_ptr_type) obj_main->entry;
647 rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def)
652 ptr = (void *)make_function_pointer(def, obj);
653 target = ((Elf_Addr (*)(void))ptr)();
654 return ((void *)target);
658 _rtld_bind(Obj_Entry *obj, Elf_Size reloff)
662 const Obj_Entry *defobj;
665 RtldLockState lockstate;
667 rlock_acquire(rtld_bind_lock, &lockstate);
668 if (sigsetjmp(lockstate.env, 0) != 0)
669 lock_upgrade(rtld_bind_lock, &lockstate);
671 rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff);
673 rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff);
675 where = (Elf_Addr *) (obj->relocbase + rel->r_offset);
676 def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL,
680 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
681 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
683 target = (Elf_Addr)(defobj->relocbase + def->st_value);
685 dbg("\"%s\" in \"%s\" ==> %p in \"%s\"",
686 defobj->strtab + def->st_name, basename(obj->path),
687 (void *)target, basename(defobj->path));
690 * Write the new contents for the jmpslot. Note that depending on
691 * architecture, the value which we need to return back to the
692 * lazy binding trampoline may or may not be the target
693 * address. The value returned from reloc_jmpslot() is the value
694 * that the trampoline needs.
696 target = reloc_jmpslot(where, target, defobj, obj, rel);
697 lock_release(rtld_bind_lock, &lockstate);
702 * Error reporting function. Use it like printf. If formats the message
703 * into a buffer, and sets things up so that the next call to dlerror()
704 * will return the message.
707 _rtld_error(const char *fmt, ...)
709 static char buf[512];
713 rtld_vsnprintf(buf, sizeof buf, fmt, ap);
719 * Return a dynamically-allocated copy of the current error message, if any.
724 return error_message == NULL ? NULL : xstrdup(error_message);
728 * Restore the current error message from a copy which was previously saved
729 * by errmsg_save(). The copy is freed.
732 errmsg_restore(char *saved_msg)
734 if (saved_msg == NULL)
735 error_message = NULL;
737 _rtld_error("%s", saved_msg);
743 basename(const char *name)
745 const char *p = strrchr(name, '/');
746 return p != NULL ? p + 1 : name;
749 static struct utsname uts;
752 origin_subst_one(char **res, const char *real, const char *kw, const char *subst,
762 subst_len = kw_len = 0;
766 if (subst_len == 0) {
767 subst_len = strlen(subst);
771 *res = xmalloc(PATH_MAX);
774 if ((res1 - *res) + subst_len + (p1 - p) >= PATH_MAX) {
775 _rtld_error("Substitution of %s in %s cannot be performed",
777 if (may_free != NULL)
782 memcpy(res1, p, p1 - p);
784 memcpy(res1, subst, subst_len);
789 if (may_free != NULL)
792 *res = xstrdup(real);
796 if (may_free != NULL)
798 if (strlcat(res1, p, PATH_MAX - (res1 - *res)) >= PATH_MAX) {
808 origin_subst(const char *real, const char *origin_path)
810 char *res1, *res2, *res3, *res4;
812 if (uts.sysname[0] == '\0') {
813 if (uname(&uts) != 0) {
814 _rtld_error("utsname failed: %d", errno);
818 if (!origin_subst_one(&res1, real, "$ORIGIN", origin_path, NULL) ||
819 !origin_subst_one(&res2, res1, "$OSNAME", uts.sysname, res1) ||
820 !origin_subst_one(&res3, res2, "$OSREL", uts.release, res2) ||
821 !origin_subst_one(&res4, res3, "$PLATFORM", uts.machine, res3))
829 const char *msg = dlerror();
833 rtld_fdputstr(STDERR_FILENO, msg);
834 rtld_fdputchar(STDERR_FILENO, '\n');
839 * Process a shared object's DYNAMIC section, and save the important
840 * information in its Obj_Entry structure.
843 digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath,
844 const Elf_Dyn **dyn_soname, const Elf_Dyn **dyn_runpath)
847 Needed_Entry **needed_tail = &obj->needed;
848 Needed_Entry **needed_filtees_tail = &obj->needed_filtees;
849 Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees;
850 const Elf_Hashelt *hashtab;
851 const Elf32_Word *hashval;
852 Elf32_Word bkt, nmaskwords;
855 int plttype = DT_REL;
861 obj->bind_now = false;
862 for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) {
863 switch (dynp->d_tag) {
866 obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr);
870 obj->relsize = dynp->d_un.d_val;
874 assert(dynp->d_un.d_val == sizeof(Elf_Rel));
878 obj->pltrel = (const Elf_Rel *)
879 (obj->relocbase + dynp->d_un.d_ptr);
883 obj->pltrelsize = dynp->d_un.d_val;
887 obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr);
891 obj->relasize = dynp->d_un.d_val;
895 assert(dynp->d_un.d_val == sizeof(Elf_Rela));
899 plttype = dynp->d_un.d_val;
900 assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA);
904 obj->symtab = (const Elf_Sym *)
905 (obj->relocbase + dynp->d_un.d_ptr);
909 assert(dynp->d_un.d_val == sizeof(Elf_Sym));
913 obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr);
917 obj->strsize = dynp->d_un.d_val;
921 obj->verneed = (const Elf_Verneed *) (obj->relocbase +
926 obj->verneednum = dynp->d_un.d_val;
930 obj->verdef = (const Elf_Verdef *) (obj->relocbase +
935 obj->verdefnum = dynp->d_un.d_val;
939 obj->versyms = (const Elf_Versym *)(obj->relocbase +
945 hashtab = (const Elf_Hashelt *)(obj->relocbase +
947 obj->nbuckets = hashtab[0];
948 obj->nchains = hashtab[1];
949 obj->buckets = hashtab + 2;
950 obj->chains = obj->buckets + obj->nbuckets;
951 obj->valid_hash_sysv = obj->nbuckets > 0 && obj->nchains > 0 &&
952 obj->buckets != NULL;
958 hashtab = (const Elf_Hashelt *)(obj->relocbase +
960 obj->nbuckets_gnu = hashtab[0];
961 obj->symndx_gnu = hashtab[1];
962 nmaskwords = hashtab[2];
963 bloom_size32 = (__ELF_WORD_SIZE / 32) * nmaskwords;
964 /* Number of bitmask words is required to be power of 2 */
965 nmw_power2 = ((nmaskwords & (nmaskwords - 1)) == 0);
966 obj->maskwords_bm_gnu = nmaskwords - 1;
967 obj->shift2_gnu = hashtab[3];
968 obj->bloom_gnu = (Elf_Addr *) (hashtab + 4);
969 obj->buckets_gnu = hashtab + 4 + bloom_size32;
970 obj->chain_zero_gnu = obj->buckets_gnu + obj->nbuckets_gnu -
972 obj->valid_hash_gnu = nmw_power2 && obj->nbuckets_gnu > 0 &&
973 obj->buckets_gnu != NULL;
979 Needed_Entry *nep = NEW(Needed_Entry);
980 nep->name = dynp->d_un.d_val;
985 needed_tail = &nep->next;
991 Needed_Entry *nep = NEW(Needed_Entry);
992 nep->name = dynp->d_un.d_val;
996 *needed_filtees_tail = nep;
997 needed_filtees_tail = &nep->next;
1003 Needed_Entry *nep = NEW(Needed_Entry);
1004 nep->name = dynp->d_un.d_val;
1008 *needed_aux_filtees_tail = nep;
1009 needed_aux_filtees_tail = &nep->next;
1014 obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr);
1018 obj->textrel = true;
1022 obj->symbolic = true;
1027 * We have to wait until later to process this, because we
1028 * might not have gotten the address of the string table yet.
1038 *dyn_runpath = dynp;
1042 obj->init = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
1045 case DT_PREINIT_ARRAY:
1046 obj->preinit_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1049 case DT_PREINIT_ARRAYSZ:
1050 obj->preinit_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1054 obj->init_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1057 case DT_INIT_ARRAYSZ:
1058 obj->init_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1062 obj->fini = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
1066 obj->fini_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1069 case DT_FINI_ARRAYSZ:
1070 obj->fini_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1074 * Don't process DT_DEBUG on MIPS as the dynamic section
1075 * is mapped read-only. DT_MIPS_RLD_MAP is used instead.
1080 /* XXX - not implemented yet */
1082 dbg("Filling in DT_DEBUG entry");
1083 ((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug;
1088 if ((dynp->d_un.d_val & DF_ORIGIN) && trust)
1089 obj->z_origin = true;
1090 if (dynp->d_un.d_val & DF_SYMBOLIC)
1091 obj->symbolic = true;
1092 if (dynp->d_un.d_val & DF_TEXTREL)
1093 obj->textrel = true;
1094 if (dynp->d_un.d_val & DF_BIND_NOW)
1095 obj->bind_now = true;
1096 /*if (dynp->d_un.d_val & DF_STATIC_TLS)
1100 case DT_MIPS_LOCAL_GOTNO:
1101 obj->local_gotno = dynp->d_un.d_val;
1104 case DT_MIPS_SYMTABNO:
1105 obj->symtabno = dynp->d_un.d_val;
1108 case DT_MIPS_GOTSYM:
1109 obj->gotsym = dynp->d_un.d_val;
1112 case DT_MIPS_RLD_MAP:
1115 dbg("Filling in DT_DEBUG entry");
1116 ((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug;
1122 if (dynp->d_un.d_val & DF_1_NOOPEN)
1123 obj->z_noopen = true;
1124 if ((dynp->d_un.d_val & DF_1_ORIGIN) && trust)
1125 obj->z_origin = true;
1126 /*if (dynp->d_un.d_val & DF_1_GLOBAL)
1128 if (dynp->d_un.d_val & DF_1_BIND_NOW)
1129 obj->bind_now = true;
1130 if (dynp->d_un.d_val & DF_1_NODELETE)
1131 obj->z_nodelete = true;
1132 if (dynp->d_un.d_val & DF_1_LOADFLTR)
1133 obj->z_loadfltr = true;
1134 if (dynp->d_un.d_val & DF_1_NODEFLIB)
1135 obj->z_nodeflib = true;
1140 dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag,
1147 obj->traced = false;
1149 if (plttype == DT_RELA) {
1150 obj->pltrela = (const Elf_Rela *) obj->pltrel;
1152 obj->pltrelasize = obj->pltrelsize;
1153 obj->pltrelsize = 0;
1156 /* Determine size of dynsym table (equal to nchains of sysv hash) */
1157 if (obj->valid_hash_sysv)
1158 obj->dynsymcount = obj->nchains;
1159 else if (obj->valid_hash_gnu) {
1160 obj->dynsymcount = 0;
1161 for (bkt = 0; bkt < obj->nbuckets_gnu; bkt++) {
1162 if (obj->buckets_gnu[bkt] == 0)
1164 hashval = &obj->chain_zero_gnu[obj->buckets_gnu[bkt]];
1167 while ((*hashval++ & 1u) == 0);
1169 obj->dynsymcount += obj->symndx_gnu;
1174 digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath,
1175 const Elf_Dyn *dyn_soname, const Elf_Dyn *dyn_runpath)
1178 if (obj->z_origin && obj->origin_path == NULL) {
1179 obj->origin_path = xmalloc(PATH_MAX);
1180 if (rtld_dirname_abs(obj->path, obj->origin_path) == -1)
1184 if (dyn_runpath != NULL) {
1185 obj->runpath = (char *)obj->strtab + dyn_runpath->d_un.d_val;
1187 obj->runpath = origin_subst(obj->runpath, obj->origin_path);
1189 else if (dyn_rpath != NULL) {
1190 obj->rpath = (char *)obj->strtab + dyn_rpath->d_un.d_val;
1192 obj->rpath = origin_subst(obj->rpath, obj->origin_path);
1195 if (dyn_soname != NULL)
1196 object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val);
1200 digest_dynamic(Obj_Entry *obj, int early)
1202 const Elf_Dyn *dyn_rpath;
1203 const Elf_Dyn *dyn_soname;
1204 const Elf_Dyn *dyn_runpath;
1206 digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname, &dyn_runpath);
1207 digest_dynamic2(obj, dyn_rpath, dyn_soname, dyn_runpath);
1211 * Process a shared object's program header. This is used only for the
1212 * main program, when the kernel has already loaded the main program
1213 * into memory before calling the dynamic linker. It creates and
1214 * returns an Obj_Entry structure.
1217 digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path)
1220 const Elf_Phdr *phlimit = phdr + phnum;
1222 Elf_Addr note_start, note_end;
1226 for (ph = phdr; ph < phlimit; ph++) {
1227 if (ph->p_type != PT_PHDR)
1231 obj->phsize = ph->p_memsz;
1232 obj->relocbase = (caddr_t)phdr - ph->p_vaddr;
1236 obj->stack_flags = PF_X | PF_R | PF_W;
1238 for (ph = phdr; ph < phlimit; ph++) {
1239 switch (ph->p_type) {
1242 obj->interp = (const char *)(ph->p_vaddr + obj->relocbase);
1246 if (nsegs == 0) { /* First load segment */
1247 obj->vaddrbase = trunc_page(ph->p_vaddr);
1248 obj->mapbase = obj->vaddrbase + obj->relocbase;
1249 obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) -
1251 } else { /* Last load segment */
1252 obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) -
1259 obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase);
1264 obj->tlssize = ph->p_memsz;
1265 obj->tlsalign = ph->p_align;
1266 obj->tlsinitsize = ph->p_filesz;
1267 obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase);
1271 obj->stack_flags = ph->p_flags;
1275 obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr);
1276 obj->relro_size = round_page(ph->p_memsz);
1280 note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr;
1281 note_end = note_start + ph->p_filesz;
1282 digest_notes(obj, note_start, note_end);
1287 _rtld_error("%s: too few PT_LOAD segments", path);
1296 digest_notes(Obj_Entry *obj, Elf_Addr note_start, Elf_Addr note_end)
1298 const Elf_Note *note;
1299 const char *note_name;
1302 for (note = (const Elf_Note *)note_start; (Elf_Addr)note < note_end;
1303 note = (const Elf_Note *)((const char *)(note + 1) +
1304 roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1305 roundup2(note->n_descsz, sizeof(Elf32_Addr)))) {
1306 if (note->n_namesz != sizeof(NOTE_FREEBSD_VENDOR) ||
1307 note->n_descsz != sizeof(int32_t))
1309 if (note->n_type != ABI_NOTETYPE &&
1310 note->n_type != CRT_NOINIT_NOTETYPE)
1312 note_name = (const char *)(note + 1);
1313 if (strncmp(NOTE_FREEBSD_VENDOR, note_name,
1314 sizeof(NOTE_FREEBSD_VENDOR)) != 0)
1316 switch (note->n_type) {
1318 /* FreeBSD osrel note */
1319 p = (uintptr_t)(note + 1);
1320 p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
1321 obj->osrel = *(const int32_t *)(p);
1322 dbg("note osrel %d", obj->osrel);
1324 case CRT_NOINIT_NOTETYPE:
1325 /* FreeBSD 'crt does not call init' note */
1326 obj->crt_no_init = true;
1327 dbg("note crt_no_init");
1334 dlcheck(void *handle)
1338 for (obj = obj_list; obj != NULL; obj = obj->next)
1339 if (obj == (Obj_Entry *) handle)
1342 if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) {
1343 _rtld_error("Invalid shared object handle %p", handle);
1350 * If the given object is already in the donelist, return true. Otherwise
1351 * add the object to the list and return false.
1354 donelist_check(DoneList *dlp, const Obj_Entry *obj)
1358 for (i = 0; i < dlp->num_used; i++)
1359 if (dlp->objs[i] == obj)
1362 * Our donelist allocation should always be sufficient. But if
1363 * our threads locking isn't working properly, more shared objects
1364 * could have been loaded since we allocated the list. That should
1365 * never happen, but we'll handle it properly just in case it does.
1367 if (dlp->num_used < dlp->num_alloc)
1368 dlp->objs[dlp->num_used++] = obj;
1373 * Hash function for symbol table lookup. Don't even think about changing
1374 * this. It is specified by the System V ABI.
1377 elf_hash(const char *name)
1379 const unsigned char *p = (const unsigned char *) name;
1380 unsigned long h = 0;
1383 while (*p != '\0') {
1384 h = (h << 4) + *p++;
1385 if ((g = h & 0xf0000000) != 0)
1393 * The GNU hash function is the Daniel J. Bernstein hash clipped to 32 bits
1394 * unsigned in case it's implemented with a wider type.
1397 gnu_hash(const char *s)
1403 for (c = *s; c != '\0'; c = *++s)
1405 return (h & 0xffffffff);
1409 * Find the library with the given name, and return its full pathname.
1410 * The returned string is dynamically allocated. Generates an error
1411 * message and returns NULL if the library cannot be found.
1413 * If the second argument is non-NULL, then it refers to an already-
1414 * loaded shared object, whose library search path will be searched.
1416 * The search order is:
1417 * DT_RPATH in the referencing file _unless_ DT_RUNPATH is present (1)
1418 * DT_RPATH of the main object if DSO without defined DT_RUNPATH (1)
1420 * DT_RUNPATH in the referencing file
1421 * ldconfig hints (if -z nodefaultlib, filter out default library directories
1423 * /lib:/usr/lib _unless_ the referencing file is linked with -z nodefaultlib
1425 * (1) Handled in digest_dynamic2 - rpath left NULL if runpath defined.
1428 find_library(const char *xname, const Obj_Entry *refobj)
1434 objgiven = refobj != NULL;
1435 if (strchr(xname, '/') != NULL) { /* Hard coded pathname */
1436 if (xname[0] != '/' && !trust) {
1437 _rtld_error("Absolute pathname required for shared object \"%s\"",
1441 if (objgiven && refobj->z_origin)
1442 return origin_subst(xname, refobj->origin_path);
1444 return xstrdup(xname);
1447 if (libmap_disable || !objgiven ||
1448 (name = lm_find(refobj->path, xname)) == NULL)
1449 name = (char *)xname;
1451 dbg(" Searching for \"%s\"", name);
1454 * If refobj->rpath != NULL, then refobj->runpath is NULL. Fall
1455 * back to pre-conforming behaviour if user requested so with
1456 * LD_LIBRARY_PATH_RPATH environment variable and ignore -z
1459 if (objgiven && refobj->rpath != NULL && ld_library_path_rpath) {
1460 if ((pathname = search_library_path(name, ld_library_path)) != NULL ||
1462 (pathname = search_library_path(name, refobj->rpath)) != NULL) ||
1463 (pathname = search_library_path(name, gethints(false))) != NULL ||
1464 (pathname = search_library_path(name, STANDARD_LIBRARY_PATH)) != NULL)
1468 (pathname = search_library_path(name, refobj->rpath)) != NULL) ||
1469 (objgiven && refobj->runpath == NULL && refobj != obj_main &&
1470 (pathname = search_library_path(name, obj_main->rpath)) != NULL) ||
1471 (pathname = search_library_path(name, ld_library_path)) != NULL ||
1473 (pathname = search_library_path(name, refobj->runpath)) != NULL) ||
1474 (pathname = search_library_path(name, gethints(refobj->z_nodeflib)))
1476 (objgiven && !refobj->z_nodeflib &&
1477 (pathname = search_library_path(name, STANDARD_LIBRARY_PATH)) != NULL))
1481 if (objgiven && refobj->path != NULL) {
1482 _rtld_error("Shared object \"%s\" not found, required by \"%s\"",
1483 name, basename(refobj->path));
1485 _rtld_error("Shared object \"%s\" not found", name);
1491 * Given a symbol number in a referencing object, find the corresponding
1492 * definition of the symbol. Returns a pointer to the symbol, or NULL if
1493 * no definition was found. Returns a pointer to the Obj_Entry of the
1494 * defining object via the reference parameter DEFOBJ_OUT.
1497 find_symdef(unsigned long symnum, const Obj_Entry *refobj,
1498 const Obj_Entry **defobj_out, int flags, SymCache *cache,
1499 RtldLockState *lockstate)
1503 const Obj_Entry *defobj;
1509 * If we have already found this symbol, get the information from
1512 if (symnum >= refobj->dynsymcount)
1513 return NULL; /* Bad object */
1514 if (cache != NULL && cache[symnum].sym != NULL) {
1515 *defobj_out = cache[symnum].obj;
1516 return cache[symnum].sym;
1519 ref = refobj->symtab + symnum;
1520 name = refobj->strtab + ref->st_name;
1525 * We don't have to do a full scale lookup if the symbol is local.
1526 * We know it will bind to the instance in this load module; to
1527 * which we already have a pointer (ie ref). By not doing a lookup,
1528 * we not only improve performance, but it also avoids unresolvable
1529 * symbols when local symbols are not in the hash table. This has
1530 * been seen with the ia64 toolchain.
1532 if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) {
1533 if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) {
1534 _rtld_error("%s: Bogus symbol table entry %lu", refobj->path,
1537 symlook_init(&req, name);
1539 req.ventry = fetch_ventry(refobj, symnum);
1540 req.lockstate = lockstate;
1541 res = symlook_default(&req, refobj);
1544 defobj = req.defobj_out;
1552 * If we found no definition and the reference is weak, treat the
1553 * symbol as having the value zero.
1555 if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) {
1561 *defobj_out = defobj;
1562 /* Record the information in the cache to avoid subsequent lookups. */
1563 if (cache != NULL) {
1564 cache[symnum].sym = def;
1565 cache[symnum].obj = defobj;
1568 if (refobj != &obj_rtld)
1569 _rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name);
1575 * Return the search path from the ldconfig hints file, reading it if
1576 * necessary. If nostdlib is true, then the default search paths are
1577 * not added to result.
1579 * Returns NULL if there are problems with the hints file,
1580 * or if the search path there is empty.
1583 gethints(bool nostdlib)
1585 static char *hints, *filtered_path;
1586 struct elfhints_hdr hdr;
1587 struct fill_search_info_args sargs, hargs;
1588 struct dl_serinfo smeta, hmeta, *SLPinfo, *hintinfo;
1589 struct dl_serpath *SLPpath, *hintpath;
1591 unsigned int SLPndx, hintndx, fndx, fcount;
1596 /* First call, read the hints file */
1597 if (hints == NULL) {
1598 /* Keep from trying again in case the hints file is bad. */
1601 if ((fd = open(ld_elf_hints_path, O_RDONLY)) == -1)
1603 if (read(fd, &hdr, sizeof hdr) != sizeof hdr ||
1604 hdr.magic != ELFHINTS_MAGIC ||
1609 p = xmalloc(hdr.dirlistlen + 1);
1610 if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 ||
1611 read(fd, p, hdr.dirlistlen + 1) !=
1612 (ssize_t)hdr.dirlistlen + 1) {
1622 * If caller agreed to receive list which includes the default
1623 * paths, we are done. Otherwise, if we still did not
1624 * calculated filtered result, do it now.
1627 return (hints[0] != '\0' ? hints : NULL);
1628 if (filtered_path != NULL)
1632 * Obtain the list of all configured search paths, and the
1633 * list of the default paths.
1635 * First estimate the size of the results.
1637 smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
1639 hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
1642 sargs.request = RTLD_DI_SERINFOSIZE;
1643 sargs.serinfo = &smeta;
1644 hargs.request = RTLD_DI_SERINFOSIZE;
1645 hargs.serinfo = &hmeta;
1647 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &sargs);
1648 path_enumerate(p, fill_search_info, &hargs);
1650 SLPinfo = xmalloc(smeta.dls_size);
1651 hintinfo = xmalloc(hmeta.dls_size);
1654 * Next fetch both sets of paths.
1656 sargs.request = RTLD_DI_SERINFO;
1657 sargs.serinfo = SLPinfo;
1658 sargs.serpath = &SLPinfo->dls_serpath[0];
1659 sargs.strspace = (char *)&SLPinfo->dls_serpath[smeta.dls_cnt];
1661 hargs.request = RTLD_DI_SERINFO;
1662 hargs.serinfo = hintinfo;
1663 hargs.serpath = &hintinfo->dls_serpath[0];
1664 hargs.strspace = (char *)&hintinfo->dls_serpath[hmeta.dls_cnt];
1666 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &sargs);
1667 path_enumerate(p, fill_search_info, &hargs);
1670 * Now calculate the difference between two sets, by excluding
1671 * standard paths from the full set.
1675 filtered_path = xmalloc(hdr.dirlistlen + 1);
1676 hintpath = &hintinfo->dls_serpath[0];
1677 for (hintndx = 0; hintndx < hmeta.dls_cnt; hintndx++, hintpath++) {
1679 SLPpath = &SLPinfo->dls_serpath[0];
1681 * Check each standard path against current.
1683 for (SLPndx = 0; SLPndx < smeta.dls_cnt; SLPndx++, SLPpath++) {
1684 /* matched, skip the path */
1685 if (!strcmp(hintpath->dls_name, SLPpath->dls_name)) {
1693 * Not matched against any standard path, add the path
1694 * to result. Separate consequtive paths with ':'.
1697 filtered_path[fndx] = ':';
1701 flen = strlen(hintpath->dls_name);
1702 strncpy((filtered_path + fndx), hintpath->dls_name, flen);
1705 filtered_path[fndx] = '\0';
1711 return (filtered_path[0] != '\0' ? filtered_path : NULL);
1715 init_dag(Obj_Entry *root)
1717 const Needed_Entry *needed;
1718 const Objlist_Entry *elm;
1721 if (root->dag_inited)
1723 donelist_init(&donelist);
1725 /* Root object belongs to own DAG. */
1726 objlist_push_tail(&root->dldags, root);
1727 objlist_push_tail(&root->dagmembers, root);
1728 donelist_check(&donelist, root);
1731 * Add dependencies of root object to DAG in breadth order
1732 * by exploiting the fact that each new object get added
1733 * to the tail of the dagmembers list.
1735 STAILQ_FOREACH(elm, &root->dagmembers, link) {
1736 for (needed = elm->obj->needed; needed != NULL; needed = needed->next) {
1737 if (needed->obj == NULL || donelist_check(&donelist, needed->obj))
1739 objlist_push_tail(&needed->obj->dldags, root);
1740 objlist_push_tail(&root->dagmembers, needed->obj);
1743 root->dag_inited = true;
1747 process_nodelete(Obj_Entry *root)
1749 const Objlist_Entry *elm;
1752 * Walk over object DAG and process every dependent object that
1753 * is marked as DF_1_NODELETE. They need to grow their own DAG,
1754 * which then should have its reference upped separately.
1756 STAILQ_FOREACH(elm, &root->dagmembers, link) {
1757 if (elm->obj != NULL && elm->obj->z_nodelete &&
1758 !elm->obj->ref_nodel) {
1759 dbg("obj %s nodelete", elm->obj->path);
1762 elm->obj->ref_nodel = true;
1767 * Initialize the dynamic linker. The argument is the address at which
1768 * the dynamic linker has been mapped into memory. The primary task of
1769 * this function is to relocate the dynamic linker.
1772 init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info)
1774 Obj_Entry objtmp; /* Temporary rtld object */
1775 const Elf_Dyn *dyn_rpath;
1776 const Elf_Dyn *dyn_soname;
1777 const Elf_Dyn *dyn_runpath;
1780 * Conjure up an Obj_Entry structure for the dynamic linker.
1782 * The "path" member can't be initialized yet because string constants
1783 * cannot yet be accessed. Below we will set it correctly.
1785 memset(&objtmp, 0, sizeof(objtmp));
1788 objtmp.mapbase = mapbase;
1790 objtmp.relocbase = mapbase;
1792 if (RTLD_IS_DYNAMIC()) {
1793 objtmp.dynamic = rtld_dynamic(&objtmp);
1794 digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname, &dyn_runpath);
1795 assert(objtmp.needed == NULL);
1796 #if !defined(__mips__)
1797 /* MIPS has a bogus DT_TEXTREL. */
1798 assert(!objtmp.textrel);
1802 * Temporarily put the dynamic linker entry into the object list, so
1803 * that symbols can be found.
1806 relocate_objects(&objtmp, true, &objtmp, 0, NULL);
1809 /* Initialize the object list. */
1810 obj_tail = &obj_list;
1812 /* Now that non-local variables can be accesses, copy out obj_rtld. */
1813 memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld));
1815 if (aux_info[AT_PAGESZ] != NULL)
1816 pagesize = aux_info[AT_PAGESZ]->a_un.a_val;
1817 if (aux_info[AT_OSRELDATE] != NULL)
1818 osreldate = aux_info[AT_OSRELDATE]->a_un.a_val;
1820 digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname, dyn_runpath);
1822 /* Replace the path with a dynamically allocated copy. */
1823 obj_rtld.path = xstrdup(PATH_RTLD);
1825 r_debug.r_brk = r_debug_state;
1826 r_debug.r_state = RT_CONSISTENT;
1830 * Add the init functions from a needed object list (and its recursive
1831 * needed objects) to "list". This is not used directly; it is a helper
1832 * function for initlist_add_objects(). The write lock must be held
1833 * when this function is called.
1836 initlist_add_neededs(Needed_Entry *needed, Objlist *list)
1838 /* Recursively process the successor needed objects. */
1839 if (needed->next != NULL)
1840 initlist_add_neededs(needed->next, list);
1842 /* Process the current needed object. */
1843 if (needed->obj != NULL)
1844 initlist_add_objects(needed->obj, &needed->obj->next, list);
1848 * Scan all of the DAGs rooted in the range of objects from "obj" to
1849 * "tail" and add their init functions to "list". This recurses over
1850 * the DAGs and ensure the proper init ordering such that each object's
1851 * needed libraries are initialized before the object itself. At the
1852 * same time, this function adds the objects to the global finalization
1853 * list "list_fini" in the opposite order. The write lock must be
1854 * held when this function is called.
1857 initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail, Objlist *list)
1860 if (obj->init_scanned || obj->init_done)
1862 obj->init_scanned = true;
1864 /* Recursively process the successor objects. */
1865 if (&obj->next != tail)
1866 initlist_add_objects(obj->next, tail, list);
1868 /* Recursively process the needed objects. */
1869 if (obj->needed != NULL)
1870 initlist_add_neededs(obj->needed, list);
1871 if (obj->needed_filtees != NULL)
1872 initlist_add_neededs(obj->needed_filtees, list);
1873 if (obj->needed_aux_filtees != NULL)
1874 initlist_add_neededs(obj->needed_aux_filtees, list);
1876 /* Add the object to the init list. */
1877 if (obj->preinit_array != (Elf_Addr)NULL || obj->init != (Elf_Addr)NULL ||
1878 obj->init_array != (Elf_Addr)NULL)
1879 objlist_push_tail(list, obj);
1881 /* Add the object to the global fini list in the reverse order. */
1882 if ((obj->fini != (Elf_Addr)NULL || obj->fini_array != (Elf_Addr)NULL)
1883 && !obj->on_fini_list) {
1884 objlist_push_head(&list_fini, obj);
1885 obj->on_fini_list = true;
1890 #define FPTR_TARGET(f) ((Elf_Addr) (f))
1894 free_needed_filtees(Needed_Entry *n)
1896 Needed_Entry *needed, *needed1;
1898 for (needed = n; needed != NULL; needed = needed->next) {
1899 if (needed->obj != NULL) {
1900 dlclose(needed->obj);
1904 for (needed = n; needed != NULL; needed = needed1) {
1905 needed1 = needed->next;
1911 unload_filtees(Obj_Entry *obj)
1914 free_needed_filtees(obj->needed_filtees);
1915 obj->needed_filtees = NULL;
1916 free_needed_filtees(obj->needed_aux_filtees);
1917 obj->needed_aux_filtees = NULL;
1918 obj->filtees_loaded = false;
1922 load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags,
1923 RtldLockState *lockstate)
1926 for (; needed != NULL; needed = needed->next) {
1927 needed->obj = dlopen_object(obj->strtab + needed->name, -1, obj,
1928 flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) |
1929 RTLD_LOCAL, lockstate);
1934 load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate)
1937 lock_restart_for_upgrade(lockstate);
1938 if (!obj->filtees_loaded) {
1939 load_filtee1(obj, obj->needed_filtees, flags, lockstate);
1940 load_filtee1(obj, obj->needed_aux_filtees, flags, lockstate);
1941 obj->filtees_loaded = true;
1946 process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags)
1950 for (; needed != NULL; needed = needed->next) {
1951 obj1 = needed->obj = load_object(obj->strtab + needed->name, -1, obj,
1952 flags & ~RTLD_LO_NOLOAD);
1953 if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0)
1960 * Given a shared object, traverse its list of needed objects, and load
1961 * each of them. Returns 0 on success. Generates an error message and
1962 * returns -1 on failure.
1965 load_needed_objects(Obj_Entry *first, int flags)
1969 for (obj = first; obj != NULL; obj = obj->next) {
1970 if (process_needed(obj, obj->needed, flags) == -1)
1977 load_preload_objects(void)
1979 char *p = ld_preload;
1980 static const char delim[] = " \t:;";
1985 p += strspn(p, delim);
1986 while (*p != '\0') {
1987 size_t len = strcspn(p, delim);
1992 if (load_object(p, -1, NULL, 0) == NULL)
1993 return -1; /* XXX - cleanup */
1996 p += strspn(p, delim);
1998 LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL);
2003 printable_path(const char *path)
2006 return (path == NULL ? "<unknown>" : path);
2010 * Load a shared object into memory, if it is not already loaded. The
2011 * object may be specified by name or by user-supplied file descriptor
2012 * fd_u. In the later case, the fd_u descriptor is not closed, but its
2015 * Returns a pointer to the Obj_Entry for the object. Returns NULL
2019 load_object(const char *name, int fd_u, const Obj_Entry *refobj, int flags)
2027 for (obj = obj_list->next; obj != NULL; obj = obj->next) {
2028 if (object_match_name(obj, name))
2032 path = find_library(name, refobj);
2039 * If we didn't find a match by pathname, or the name is not
2040 * supplied, open the file and check again by device and inode.
2041 * This avoids false mismatches caused by multiple links or ".."
2044 * To avoid a race, we open the file and use fstat() rather than
2049 if ((fd = open(path, O_RDONLY)) == -1) {
2050 _rtld_error("Cannot open \"%s\"", path);
2057 _rtld_error("Cannot dup fd");
2062 if (fstat(fd, &sb) == -1) {
2063 _rtld_error("Cannot fstat \"%s\"", printable_path(path));
2068 for (obj = obj_list->next; obj != NULL; obj = obj->next)
2069 if (obj->ino == sb.st_ino && obj->dev == sb.st_dev)
2071 if (obj != NULL && name != NULL) {
2072 object_add_name(obj, name);
2077 if (flags & RTLD_LO_NOLOAD) {
2083 /* First use of this object, so we must map it in */
2084 obj = do_load_object(fd, name, path, &sb, flags);
2093 do_load_object(int fd, const char *name, char *path, struct stat *sbp,
2100 * but first, make sure that environment variables haven't been
2101 * used to circumvent the noexec flag on a filesystem.
2103 if (dangerous_ld_env) {
2104 if (fstatfs(fd, &fs) != 0) {
2105 _rtld_error("Cannot fstatfs \"%s\"", printable_path(path));
2108 if (fs.f_flags & MNT_NOEXEC) {
2109 _rtld_error("Cannot execute objects on %s\n", fs.f_mntonname);
2113 dbg("loading \"%s\"", printable_path(path));
2114 obj = map_object(fd, printable_path(path), sbp);
2119 * If DT_SONAME is present in the object, digest_dynamic2 already
2120 * added it to the object names.
2123 object_add_name(obj, name);
2125 digest_dynamic(obj, 0);
2126 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", obj->path,
2127 obj->valid_hash_sysv, obj->valid_hash_gnu, obj->dynsymcount);
2128 if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) ==
2130 dbg("refusing to load non-loadable \"%s\"", obj->path);
2131 _rtld_error("Cannot dlopen non-loadable %s", obj->path);
2132 munmap(obj->mapbase, obj->mapsize);
2138 obj_tail = &obj->next;
2141 linkmap_add(obj); /* for GDB & dlinfo() */
2142 max_stack_flags |= obj->stack_flags;
2144 dbg(" %p .. %p: %s", obj->mapbase,
2145 obj->mapbase + obj->mapsize - 1, obj->path);
2147 dbg(" WARNING: %s has impure text", obj->path);
2148 LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
2155 obj_from_addr(const void *addr)
2159 for (obj = obj_list; obj != NULL; obj = obj->next) {
2160 if (addr < (void *) obj->mapbase)
2162 if (addr < (void *) (obj->mapbase + obj->mapsize))
2171 Elf_Addr *preinit_addr;
2174 preinit_addr = (Elf_Addr *)obj_main->preinit_array;
2175 if (preinit_addr == NULL)
2178 for (index = 0; index < obj_main->preinit_array_num; index++) {
2179 if (preinit_addr[index] != 0 && preinit_addr[index] != 1) {
2180 dbg("calling preinit function for %s at %p", obj_main->path,
2181 (void *)preinit_addr[index]);
2182 LD_UTRACE(UTRACE_INIT_CALL, obj_main, (void *)preinit_addr[index],
2183 0, 0, obj_main->path);
2184 call_init_pointer(obj_main, preinit_addr[index]);
2190 * Call the finalization functions for each of the objects in "list"
2191 * belonging to the DAG of "root" and referenced once. If NULL "root"
2192 * is specified, every finalization function will be called regardless
2193 * of the reference count and the list elements won't be freed. All of
2194 * the objects are expected to have non-NULL fini functions.
2197 objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate)
2201 Elf_Addr *fini_addr;
2204 assert(root == NULL || root->refcount == 1);
2207 * Preserve the current error message since a fini function might
2208 * call into the dynamic linker and overwrite it.
2210 saved_msg = errmsg_save();
2212 STAILQ_FOREACH(elm, list, link) {
2213 if (root != NULL && (elm->obj->refcount != 1 ||
2214 objlist_find(&root->dagmembers, elm->obj) == NULL))
2216 /* Remove object from fini list to prevent recursive invocation. */
2217 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
2219 * XXX: If a dlopen() call references an object while the
2220 * fini function is in progress, we might end up trying to
2221 * unload the referenced object in dlclose() or the object
2222 * won't be unloaded although its fini function has been
2225 lock_release(rtld_bind_lock, lockstate);
2228 * It is legal to have both DT_FINI and DT_FINI_ARRAY defined.
2229 * When this happens, DT_FINI_ARRAY is processed first.
2231 fini_addr = (Elf_Addr *)elm->obj->fini_array;
2232 if (fini_addr != NULL && elm->obj->fini_array_num > 0) {
2233 for (index = elm->obj->fini_array_num - 1; index >= 0;
2235 if (fini_addr[index] != 0 && fini_addr[index] != 1) {
2236 dbg("calling fini function for %s at %p",
2237 elm->obj->path, (void *)fini_addr[index]);
2238 LD_UTRACE(UTRACE_FINI_CALL, elm->obj,
2239 (void *)fini_addr[index], 0, 0, elm->obj->path);
2240 call_initfini_pointer(elm->obj, fini_addr[index]);
2244 if (elm->obj->fini != (Elf_Addr)NULL) {
2245 dbg("calling fini function for %s at %p", elm->obj->path,
2246 (void *)elm->obj->fini);
2247 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini,
2248 0, 0, elm->obj->path);
2249 call_initfini_pointer(elm->obj, elm->obj->fini);
2251 wlock_acquire(rtld_bind_lock, lockstate);
2252 /* No need to free anything if process is going down. */
2256 * We must restart the list traversal after every fini call
2257 * because a dlclose() call from the fini function or from
2258 * another thread might have modified the reference counts.
2262 } while (elm != NULL);
2263 errmsg_restore(saved_msg);
2267 * Call the initialization functions for each of the objects in
2268 * "list". All of the objects are expected to have non-NULL init
2272 objlist_call_init(Objlist *list, RtldLockState *lockstate)
2277 Elf_Addr *init_addr;
2281 * Clean init_scanned flag so that objects can be rechecked and
2282 * possibly initialized earlier if any of vectors called below
2283 * cause the change by using dlopen.
2285 for (obj = obj_list; obj != NULL; obj = obj->next)
2286 obj->init_scanned = false;
2289 * Preserve the current error message since an init function might
2290 * call into the dynamic linker and overwrite it.
2292 saved_msg = errmsg_save();
2293 STAILQ_FOREACH(elm, list, link) {
2294 if (elm->obj->init_done) /* Initialized early. */
2297 * Race: other thread might try to use this object before current
2298 * one completes the initilization. Not much can be done here
2299 * without better locking.
2301 elm->obj->init_done = true;
2302 lock_release(rtld_bind_lock, lockstate);
2305 * It is legal to have both DT_INIT and DT_INIT_ARRAY defined.
2306 * When this happens, DT_INIT is processed first.
2308 if (elm->obj->init != (Elf_Addr)NULL) {
2309 dbg("calling init function for %s at %p", elm->obj->path,
2310 (void *)elm->obj->init);
2311 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init,
2312 0, 0, elm->obj->path);
2313 call_initfini_pointer(elm->obj, elm->obj->init);
2315 init_addr = (Elf_Addr *)elm->obj->init_array;
2316 if (init_addr != NULL) {
2317 for (index = 0; index < elm->obj->init_array_num; index++) {
2318 if (init_addr[index] != 0 && init_addr[index] != 1) {
2319 dbg("calling init function for %s at %p", elm->obj->path,
2320 (void *)init_addr[index]);
2321 LD_UTRACE(UTRACE_INIT_CALL, elm->obj,
2322 (void *)init_addr[index], 0, 0, elm->obj->path);
2323 call_init_pointer(elm->obj, init_addr[index]);
2327 wlock_acquire(rtld_bind_lock, lockstate);
2329 errmsg_restore(saved_msg);
2333 objlist_clear(Objlist *list)
2337 while (!STAILQ_EMPTY(list)) {
2338 elm = STAILQ_FIRST(list);
2339 STAILQ_REMOVE_HEAD(list, link);
2344 static Objlist_Entry *
2345 objlist_find(Objlist *list, const Obj_Entry *obj)
2349 STAILQ_FOREACH(elm, list, link)
2350 if (elm->obj == obj)
2356 objlist_init(Objlist *list)
2362 objlist_push_head(Objlist *list, Obj_Entry *obj)
2366 elm = NEW(Objlist_Entry);
2368 STAILQ_INSERT_HEAD(list, elm, link);
2372 objlist_push_tail(Objlist *list, Obj_Entry *obj)
2376 elm = NEW(Objlist_Entry);
2378 STAILQ_INSERT_TAIL(list, elm, link);
2382 objlist_remove(Objlist *list, Obj_Entry *obj)
2386 if ((elm = objlist_find(list, obj)) != NULL) {
2387 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
2393 * Relocate dag rooted in the specified object.
2394 * Returns 0 on success, or -1 on failure.
2398 relocate_object_dag(Obj_Entry *root, bool bind_now, Obj_Entry *rtldobj,
2399 int flags, RtldLockState *lockstate)
2405 STAILQ_FOREACH(elm, &root->dagmembers, link) {
2406 error = relocate_object(elm->obj, bind_now, rtldobj, flags,
2415 * Relocate single object.
2416 * Returns 0 on success, or -1 on failure.
2419 relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
2420 int flags, RtldLockState *lockstate)
2425 obj->relocated = true;
2427 dbg("relocating \"%s\"", obj->path);
2429 if (obj->symtab == NULL || obj->strtab == NULL ||
2430 !(obj->valid_hash_sysv || obj->valid_hash_gnu)) {
2431 _rtld_error("%s: Shared object has no run-time symbol table",
2437 /* There are relocations to the write-protected text segment. */
2438 if (mprotect(obj->mapbase, obj->textsize,
2439 PROT_READ|PROT_WRITE|PROT_EXEC) == -1) {
2440 _rtld_error("%s: Cannot write-enable text segment: %s",
2441 obj->path, rtld_strerror(errno));
2446 /* Process the non-PLT relocations. */
2447 if (reloc_non_plt(obj, rtldobj, flags, lockstate))
2450 if (obj->textrel) { /* Re-protected the text segment. */
2451 if (mprotect(obj->mapbase, obj->textsize,
2452 PROT_READ|PROT_EXEC) == -1) {
2453 _rtld_error("%s: Cannot write-protect text segment: %s",
2454 obj->path, rtld_strerror(errno));
2460 /* Set the special PLT or GOT entries. */
2463 /* Process the PLT relocations. */
2464 if (reloc_plt(obj) == -1)
2466 /* Relocate the jump slots if we are doing immediate binding. */
2467 if (obj->bind_now || bind_now)
2468 if (reloc_jmpslots(obj, flags, lockstate) == -1)
2471 if (obj->relro_size > 0) {
2472 if (mprotect(obj->relro_page, obj->relro_size,
2474 _rtld_error("%s: Cannot enforce relro protection: %s",
2475 obj->path, rtld_strerror(errno));
2481 * Set up the magic number and version in the Obj_Entry. These
2482 * were checked in the crt1.o from the original ElfKit, so we
2483 * set them for backward compatibility.
2485 obj->magic = RTLD_MAGIC;
2486 obj->version = RTLD_VERSION;
2492 * Relocate newly-loaded shared objects. The argument is a pointer to
2493 * the Obj_Entry for the first such object. All objects from the first
2494 * to the end of the list of objects are relocated. Returns 0 on success,
2498 relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj,
2499 int flags, RtldLockState *lockstate)
2504 for (error = 0, obj = first; obj != NULL; obj = obj->next) {
2505 error = relocate_object(obj, bind_now, rtldobj, flags,
2514 * The handling of R_MACHINE_IRELATIVE relocations and jumpslots
2515 * referencing STT_GNU_IFUNC symbols is postponed till the other
2516 * relocations are done. The indirect functions specified as
2517 * ifunc are allowed to call other symbols, so we need to have
2518 * objects relocated before asking for resolution from indirects.
2520 * The R_MACHINE_IRELATIVE slots are resolved in greedy fashion,
2521 * instead of the usual lazy handling of PLT slots. It is
2522 * consistent with how GNU does it.
2525 resolve_object_ifunc(Obj_Entry *obj, bool bind_now, int flags,
2526 RtldLockState *lockstate)
2528 if (obj->irelative && reloc_iresolve(obj, lockstate) == -1)
2530 if ((obj->bind_now || bind_now) && obj->gnu_ifunc &&
2531 reloc_gnu_ifunc(obj, flags, lockstate) == -1)
2537 resolve_objects_ifunc(Obj_Entry *first, bool bind_now, int flags,
2538 RtldLockState *lockstate)
2542 for (obj = first; obj != NULL; obj = obj->next) {
2543 if (resolve_object_ifunc(obj, bind_now, flags, lockstate) == -1)
2550 initlist_objects_ifunc(Objlist *list, bool bind_now, int flags,
2551 RtldLockState *lockstate)
2555 STAILQ_FOREACH(elm, list, link) {
2556 if (resolve_object_ifunc(elm->obj, bind_now, flags,
2564 * Cleanup procedure. It will be called (by the atexit mechanism) just
2565 * before the process exits.
2570 RtldLockState lockstate;
2572 wlock_acquire(rtld_bind_lock, &lockstate);
2574 objlist_call_fini(&list_fini, NULL, &lockstate);
2575 /* No need to remove the items from the list, since we are exiting. */
2576 if (!libmap_disable)
2578 lock_release(rtld_bind_lock, &lockstate);
2582 path_enumerate(const char *path, path_enum_proc callback, void *arg)
2590 path += strspn(path, ":;");
2591 while (*path != '\0') {
2595 len = strcspn(path, ":;");
2597 trans = lm_findn(NULL, path, len);
2599 res = callback(trans, strlen(trans), arg);
2602 res = callback(path, len, arg);
2608 path += strspn(path, ":;");
2614 struct try_library_args {
2622 try_library_path(const char *dir, size_t dirlen, void *param)
2624 struct try_library_args *arg;
2627 if (*dir == '/' || trust) {
2630 if (dirlen + 1 + arg->namelen + 1 > arg->buflen)
2633 pathname = arg->buffer;
2634 strncpy(pathname, dir, dirlen);
2635 pathname[dirlen] = '/';
2636 strcpy(pathname + dirlen + 1, arg->name);
2638 dbg(" Trying \"%s\"", pathname);
2639 if (access(pathname, F_OK) == 0) { /* We found it */
2640 pathname = xmalloc(dirlen + 1 + arg->namelen + 1);
2641 strcpy(pathname, arg->buffer);
2649 search_library_path(const char *name, const char *path)
2652 struct try_library_args arg;
2658 arg.namelen = strlen(name);
2659 arg.buffer = xmalloc(PATH_MAX);
2660 arg.buflen = PATH_MAX;
2662 p = path_enumerate(path, try_library_path, &arg);
2670 dlclose(void *handle)
2673 RtldLockState lockstate;
2675 wlock_acquire(rtld_bind_lock, &lockstate);
2676 root = dlcheck(handle);
2678 lock_release(rtld_bind_lock, &lockstate);
2681 LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount,
2684 /* Unreference the object and its dependencies. */
2685 root->dl_refcount--;
2687 if (root->refcount == 1) {
2689 * The object will be no longer referenced, so we must unload it.
2690 * First, call the fini functions.
2692 objlist_call_fini(&list_fini, root, &lockstate);
2696 /* Finish cleaning up the newly-unreferenced objects. */
2697 GDB_STATE(RT_DELETE,&root->linkmap);
2698 unload_object(root);
2699 GDB_STATE(RT_CONSISTENT,NULL);
2703 LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL);
2704 lock_release(rtld_bind_lock, &lockstate);
2711 char *msg = error_message;
2712 error_message = NULL;
2717 * This function is deprecated and has no effect.
2720 dllockinit(void *context,
2721 void *(*lock_create)(void *context),
2722 void (*rlock_acquire)(void *lock),
2723 void (*wlock_acquire)(void *lock),
2724 void (*lock_release)(void *lock),
2725 void (*lock_destroy)(void *lock),
2726 void (*context_destroy)(void *context))
2728 static void *cur_context;
2729 static void (*cur_context_destroy)(void *);
2731 /* Just destroy the context from the previous call, if necessary. */
2732 if (cur_context_destroy != NULL)
2733 cur_context_destroy(cur_context);
2734 cur_context = context;
2735 cur_context_destroy = context_destroy;
2739 dlopen(const char *name, int mode)
2742 return (rtld_dlopen(name, -1, mode));
2746 fdlopen(int fd, int mode)
2749 return (rtld_dlopen(NULL, fd, mode));
2753 rtld_dlopen(const char *name, int fd, int mode)
2755 RtldLockState lockstate;
2758 LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name);
2759 ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1";
2760 if (ld_tracing != NULL) {
2761 rlock_acquire(rtld_bind_lock, &lockstate);
2762 if (sigsetjmp(lockstate.env, 0) != 0)
2763 lock_upgrade(rtld_bind_lock, &lockstate);
2764 environ = (char **)*get_program_var_addr("environ", &lockstate);
2765 lock_release(rtld_bind_lock, &lockstate);
2767 lo_flags = RTLD_LO_DLOPEN;
2768 if (mode & RTLD_NODELETE)
2769 lo_flags |= RTLD_LO_NODELETE;
2770 if (mode & RTLD_NOLOAD)
2771 lo_flags |= RTLD_LO_NOLOAD;
2772 if (ld_tracing != NULL)
2773 lo_flags |= RTLD_LO_TRACE;
2775 return (dlopen_object(name, fd, obj_main, lo_flags,
2776 mode & (RTLD_MODEMASK | RTLD_GLOBAL), NULL));
2780 dlopen_cleanup(Obj_Entry *obj)
2785 if (obj->refcount == 0)
2790 dlopen_object(const char *name, int fd, Obj_Entry *refobj, int lo_flags,
2791 int mode, RtldLockState *lockstate)
2793 Obj_Entry **old_obj_tail;
2796 RtldLockState mlockstate;
2799 objlist_init(&initlist);
2801 if (lockstate == NULL && !(lo_flags & RTLD_LO_EARLY)) {
2802 wlock_acquire(rtld_bind_lock, &mlockstate);
2803 lockstate = &mlockstate;
2805 GDB_STATE(RT_ADD,NULL);
2807 old_obj_tail = obj_tail;
2809 if (name == NULL && fd == -1) {
2813 obj = load_object(name, fd, refobj, lo_flags);
2818 if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL)
2819 objlist_push_tail(&list_global, obj);
2820 if (*old_obj_tail != NULL) { /* We loaded something new. */
2821 assert(*old_obj_tail == obj);
2822 result = load_needed_objects(obj,
2823 lo_flags & (RTLD_LO_DLOPEN | RTLD_LO_EARLY));
2827 result = rtld_verify_versions(&obj->dagmembers);
2828 if (result != -1 && ld_tracing)
2830 if (result == -1 || relocate_object_dag(obj,
2831 (mode & RTLD_MODEMASK) == RTLD_NOW, &obj_rtld,
2832 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
2834 dlopen_cleanup(obj);
2836 } else if (lo_flags & RTLD_LO_EARLY) {
2838 * Do not call the init functions for early loaded
2839 * filtees. The image is still not initialized enough
2842 * Our object is found by the global object list and
2843 * will be ordered among all init calls done right
2844 * before transferring control to main.
2847 /* Make list of init functions to call. */
2848 initlist_add_objects(obj, &obj->next, &initlist);
2851 * Process all no_delete objects here, given them own
2852 * DAGs to prevent their dependencies from being unloaded.
2853 * This has to be done after we have loaded all of the
2854 * dependencies, so that we do not miss any.
2857 process_nodelete(obj);
2860 * Bump the reference counts for objects on this DAG. If
2861 * this is the first dlopen() call for the object that was
2862 * already loaded as a dependency, initialize the dag
2868 if ((lo_flags & RTLD_LO_TRACE) != 0)
2871 if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 ||
2872 obj->z_nodelete) && !obj->ref_nodel) {
2873 dbg("obj %s nodelete", obj->path);
2875 obj->z_nodelete = obj->ref_nodel = true;
2879 LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0,
2881 GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL);
2883 if (!(lo_flags & RTLD_LO_EARLY)) {
2884 map_stacks_exec(lockstate);
2887 if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW,
2888 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
2890 objlist_clear(&initlist);
2891 dlopen_cleanup(obj);
2892 if (lockstate == &mlockstate)
2893 lock_release(rtld_bind_lock, lockstate);
2897 if (!(lo_flags & RTLD_LO_EARLY)) {
2898 /* Call the init functions. */
2899 objlist_call_init(&initlist, lockstate);
2901 objlist_clear(&initlist);
2902 if (lockstate == &mlockstate)
2903 lock_release(rtld_bind_lock, lockstate);
2906 trace_loaded_objects(obj);
2907 if (lockstate == &mlockstate)
2908 lock_release(rtld_bind_lock, lockstate);
2913 do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve,
2917 const Obj_Entry *obj, *defobj;
2920 RtldLockState lockstate;
2928 symlook_init(&req, name);
2930 req.flags = flags | SYMLOOK_IN_PLT;
2931 req.lockstate = &lockstate;
2933 rlock_acquire(rtld_bind_lock, &lockstate);
2934 if (sigsetjmp(lockstate.env, 0) != 0)
2935 lock_upgrade(rtld_bind_lock, &lockstate);
2936 if (handle == NULL || handle == RTLD_NEXT ||
2937 handle == RTLD_DEFAULT || handle == RTLD_SELF) {
2939 if ((obj = obj_from_addr(retaddr)) == NULL) {
2940 _rtld_error("Cannot determine caller's shared object");
2941 lock_release(rtld_bind_lock, &lockstate);
2944 if (handle == NULL) { /* Just the caller's shared object. */
2945 res = symlook_obj(&req, obj);
2948 defobj = req.defobj_out;
2950 } else if (handle == RTLD_NEXT || /* Objects after caller's */
2951 handle == RTLD_SELF) { /* ... caller included */
2952 if (handle == RTLD_NEXT)
2954 for (; obj != NULL; obj = obj->next) {
2955 res = symlook_obj(&req, obj);
2958 ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) {
2960 defobj = req.defobj_out;
2961 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
2967 * Search the dynamic linker itself, and possibly resolve the
2968 * symbol from there. This is how the application links to
2969 * dynamic linker services such as dlopen.
2971 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
2972 res = symlook_obj(&req, &obj_rtld);
2975 defobj = req.defobj_out;
2979 assert(handle == RTLD_DEFAULT);
2980 res = symlook_default(&req, obj);
2982 defobj = req.defobj_out;
2987 if ((obj = dlcheck(handle)) == NULL) {
2988 lock_release(rtld_bind_lock, &lockstate);
2992 donelist_init(&donelist);
2993 if (obj->mainprog) {
2994 /* Handle obtained by dlopen(NULL, ...) implies global scope. */
2995 res = symlook_global(&req, &donelist);
2998 defobj = req.defobj_out;
3001 * Search the dynamic linker itself, and possibly resolve the
3002 * symbol from there. This is how the application links to
3003 * dynamic linker services such as dlopen.
3005 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
3006 res = symlook_obj(&req, &obj_rtld);
3009 defobj = req.defobj_out;
3014 /* Search the whole DAG rooted at the given object. */
3015 res = symlook_list(&req, &obj->dagmembers, &donelist);
3018 defobj = req.defobj_out;
3024 lock_release(rtld_bind_lock, &lockstate);
3027 * The value required by the caller is derived from the value
3028 * of the symbol. For the ia64 architecture, we need to
3029 * construct a function descriptor which the caller can use to
3030 * call the function with the right 'gp' value. For other
3031 * architectures and for non-functions, the value is simply
3032 * the relocated value of the symbol.
3034 if (ELF_ST_TYPE(def->st_info) == STT_FUNC)
3035 return (make_function_pointer(def, defobj));
3036 else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
3037 return (rtld_resolve_ifunc(defobj, def));
3038 else if (ELF_ST_TYPE(def->st_info) == STT_TLS) {
3040 return (__tls_get_addr(defobj->tlsindex, def->st_value));
3042 ti.ti_module = defobj->tlsindex;
3043 ti.ti_offset = def->st_value;
3044 return (__tls_get_addr(&ti));
3047 return (defobj->relocbase + def->st_value);
3050 _rtld_error("Undefined symbol \"%s\"", name);
3051 lock_release(rtld_bind_lock, &lockstate);
3056 dlsym(void *handle, const char *name)
3058 return do_dlsym(handle, name, __builtin_return_address(0), NULL,
3063 dlfunc(void *handle, const char *name)
3070 rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL,
3076 dlvsym(void *handle, const char *name, const char *version)
3080 ventry.name = version;
3082 ventry.hash = elf_hash(version);
3084 return do_dlsym(handle, name, __builtin_return_address(0), &ventry,
3089 _rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info)
3091 const Obj_Entry *obj;
3092 RtldLockState lockstate;
3094 rlock_acquire(rtld_bind_lock, &lockstate);
3095 obj = obj_from_addr(addr);
3097 _rtld_error("No shared object contains address");
3098 lock_release(rtld_bind_lock, &lockstate);
3101 rtld_fill_dl_phdr_info(obj, phdr_info);
3102 lock_release(rtld_bind_lock, &lockstate);
3107 dladdr(const void *addr, Dl_info *info)
3109 const Obj_Entry *obj;
3112 unsigned long symoffset;
3113 RtldLockState lockstate;
3115 rlock_acquire(rtld_bind_lock, &lockstate);
3116 obj = obj_from_addr(addr);
3118 _rtld_error("No shared object contains address");
3119 lock_release(rtld_bind_lock, &lockstate);
3122 info->dli_fname = obj->path;
3123 info->dli_fbase = obj->mapbase;
3124 info->dli_saddr = (void *)0;
3125 info->dli_sname = NULL;
3128 * Walk the symbol list looking for the symbol whose address is
3129 * closest to the address sent in.
3131 for (symoffset = 0; symoffset < obj->dynsymcount; symoffset++) {
3132 def = obj->symtab + symoffset;
3135 * For skip the symbol if st_shndx is either SHN_UNDEF or
3138 if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON)
3142 * If the symbol is greater than the specified address, or if it
3143 * is further away from addr than the current nearest symbol,
3146 symbol_addr = obj->relocbase + def->st_value;
3147 if (symbol_addr > addr || symbol_addr < info->dli_saddr)
3150 /* Update our idea of the nearest symbol. */
3151 info->dli_sname = obj->strtab + def->st_name;
3152 info->dli_saddr = symbol_addr;
3155 if (info->dli_saddr == addr)
3158 lock_release(rtld_bind_lock, &lockstate);
3163 dlinfo(void *handle, int request, void *p)
3165 const Obj_Entry *obj;
3166 RtldLockState lockstate;
3169 rlock_acquire(rtld_bind_lock, &lockstate);
3171 if (handle == NULL || handle == RTLD_SELF) {
3174 retaddr = __builtin_return_address(0); /* __GNUC__ only */
3175 if ((obj = obj_from_addr(retaddr)) == NULL)
3176 _rtld_error("Cannot determine caller's shared object");
3178 obj = dlcheck(handle);
3181 lock_release(rtld_bind_lock, &lockstate);
3187 case RTLD_DI_LINKMAP:
3188 *((struct link_map const **)p) = &obj->linkmap;
3190 case RTLD_DI_ORIGIN:
3191 error = rtld_dirname(obj->path, p);
3194 case RTLD_DI_SERINFOSIZE:
3195 case RTLD_DI_SERINFO:
3196 error = do_search_info(obj, request, (struct dl_serinfo *)p);
3200 _rtld_error("Invalid request %d passed to dlinfo()", request);
3204 lock_release(rtld_bind_lock, &lockstate);
3210 rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info)
3213 phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase;
3214 phdr_info->dlpi_name = STAILQ_FIRST(&obj->names) ?
3215 STAILQ_FIRST(&obj->names)->name : obj->path;
3216 phdr_info->dlpi_phdr = obj->phdr;
3217 phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]);
3218 phdr_info->dlpi_tls_modid = obj->tlsindex;
3219 phdr_info->dlpi_tls_data = obj->tlsinit;
3220 phdr_info->dlpi_adds = obj_loads;
3221 phdr_info->dlpi_subs = obj_loads - obj_count;
3225 dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param)
3227 struct dl_phdr_info phdr_info;
3228 const Obj_Entry *obj;
3229 RtldLockState bind_lockstate, phdr_lockstate;
3232 wlock_acquire(rtld_phdr_lock, &phdr_lockstate);
3233 rlock_acquire(rtld_bind_lock, &bind_lockstate);
3237 for (obj = obj_list; obj != NULL; obj = obj->next) {
3238 rtld_fill_dl_phdr_info(obj, &phdr_info);
3239 if ((error = callback(&phdr_info, sizeof phdr_info, param)) != 0)
3243 lock_release(rtld_bind_lock, &bind_lockstate);
3244 lock_release(rtld_phdr_lock, &phdr_lockstate);
3250 fill_search_info(const char *dir, size_t dirlen, void *param)
3252 struct fill_search_info_args *arg;
3256 if (arg->request == RTLD_DI_SERINFOSIZE) {
3257 arg->serinfo->dls_cnt ++;
3258 arg->serinfo->dls_size += sizeof(struct dl_serpath) + dirlen + 1;
3260 struct dl_serpath *s_entry;
3262 s_entry = arg->serpath;
3263 s_entry->dls_name = arg->strspace;
3264 s_entry->dls_flags = arg->flags;
3266 strncpy(arg->strspace, dir, dirlen);
3267 arg->strspace[dirlen] = '\0';
3269 arg->strspace += dirlen + 1;
3277 do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info)
3279 struct dl_serinfo _info;
3280 struct fill_search_info_args args;
3282 args.request = RTLD_DI_SERINFOSIZE;
3283 args.serinfo = &_info;
3285 _info.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
3288 path_enumerate(obj->rpath, fill_search_info, &args);
3289 path_enumerate(ld_library_path, fill_search_info, &args);
3290 path_enumerate(obj->runpath, fill_search_info, &args);
3291 path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args);
3292 if (!obj->z_nodeflib)
3293 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args);
3296 if (request == RTLD_DI_SERINFOSIZE) {
3297 info->dls_size = _info.dls_size;
3298 info->dls_cnt = _info.dls_cnt;
3302 if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) {
3303 _rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()");
3307 args.request = RTLD_DI_SERINFO;
3308 args.serinfo = info;
3309 args.serpath = &info->dls_serpath[0];
3310 args.strspace = (char *)&info->dls_serpath[_info.dls_cnt];
3312 args.flags = LA_SER_RUNPATH;
3313 if (path_enumerate(obj->rpath, fill_search_info, &args) != NULL)
3316 args.flags = LA_SER_LIBPATH;
3317 if (path_enumerate(ld_library_path, fill_search_info, &args) != NULL)
3320 args.flags = LA_SER_RUNPATH;
3321 if (path_enumerate(obj->runpath, fill_search_info, &args) != NULL)
3324 args.flags = LA_SER_CONFIG;
3325 if (path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args)
3329 args.flags = LA_SER_DEFAULT;
3330 if (!obj->z_nodeflib &&
3331 path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args) != NULL)
3337 rtld_dirname(const char *path, char *bname)
3341 /* Empty or NULL string gets treated as "." */
3342 if (path == NULL || *path == '\0') {
3348 /* Strip trailing slashes */
3349 endp = path + strlen(path) - 1;
3350 while (endp > path && *endp == '/')
3353 /* Find the start of the dir */
3354 while (endp > path && *endp != '/')
3357 /* Either the dir is "/" or there are no slashes */
3359 bname[0] = *endp == '/' ? '/' : '.';
3365 } while (endp > path && *endp == '/');
3368 if (endp - path + 2 > PATH_MAX)
3370 _rtld_error("Filename is too long: %s", path);
3374 strncpy(bname, path, endp - path + 1);
3375 bname[endp - path + 1] = '\0';
3380 rtld_dirname_abs(const char *path, char *base)
3382 char base_rel[PATH_MAX];
3384 if (rtld_dirname(path, base) == -1)
3388 if (getcwd(base_rel, sizeof(base_rel)) == NULL ||
3389 strlcat(base_rel, "/", sizeof(base_rel)) >= sizeof(base_rel) ||
3390 strlcat(base_rel, base, sizeof(base_rel)) >= sizeof(base_rel))
3392 strcpy(base, base_rel);
3397 linkmap_add(Obj_Entry *obj)
3399 struct link_map *l = &obj->linkmap;
3400 struct link_map *prev;
3402 obj->linkmap.l_name = obj->path;
3403 obj->linkmap.l_addr = obj->mapbase;
3404 obj->linkmap.l_ld = obj->dynamic;
3406 /* GDB needs load offset on MIPS to use the symbols */
3407 obj->linkmap.l_offs = obj->relocbase;
3410 if (r_debug.r_map == NULL) {
3416 * Scan to the end of the list, but not past the entry for the
3417 * dynamic linker, which we want to keep at the very end.
3419 for (prev = r_debug.r_map;
3420 prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap;
3421 prev = prev->l_next)
3424 /* Link in the new entry. */
3426 l->l_next = prev->l_next;
3427 if (l->l_next != NULL)
3428 l->l_next->l_prev = l;
3433 linkmap_delete(Obj_Entry *obj)
3435 struct link_map *l = &obj->linkmap;
3437 if (l->l_prev == NULL) {
3438 if ((r_debug.r_map = l->l_next) != NULL)
3439 l->l_next->l_prev = NULL;
3443 if ((l->l_prev->l_next = l->l_next) != NULL)
3444 l->l_next->l_prev = l->l_prev;
3448 * Function for the debugger to set a breakpoint on to gain control.
3450 * The two parameters allow the debugger to easily find and determine
3451 * what the runtime loader is doing and to whom it is doing it.
3453 * When the loadhook trap is hit (r_debug_state, set at program
3454 * initialization), the arguments can be found on the stack:
3456 * +8 struct link_map *m
3457 * +4 struct r_debug *rd
3461 r_debug_state(struct r_debug* rd, struct link_map *m)
3464 * The following is a hack to force the compiler to emit calls to
3465 * this function, even when optimizing. If the function is empty,
3466 * the compiler is not obliged to emit any code for calls to it,
3467 * even when marked __noinline. However, gdb depends on those
3470 __asm __volatile("" : : : "memory");
3474 * Get address of the pointer variable in the main program.
3475 * Prefer non-weak symbol over the weak one.
3477 static const void **
3478 get_program_var_addr(const char *name, RtldLockState *lockstate)
3483 symlook_init(&req, name);
3484 req.lockstate = lockstate;
3485 donelist_init(&donelist);
3486 if (symlook_global(&req, &donelist) != 0)
3488 if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC)
3489 return ((const void **)make_function_pointer(req.sym_out,
3491 else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC)
3492 return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out));
3494 return ((const void **)(req.defobj_out->relocbase +
3495 req.sym_out->st_value));
3499 * Set a pointer variable in the main program to the given value. This
3500 * is used to set key variables such as "environ" before any of the
3501 * init functions are called.
3504 set_program_var(const char *name, const void *value)
3508 if ((addr = get_program_var_addr(name, NULL)) != NULL) {
3509 dbg("\"%s\": *%p <-- %p", name, addr, value);
3515 * Search the global objects, including dependencies and main object,
3516 * for the given symbol.
3519 symlook_global(SymLook *req, DoneList *donelist)
3522 const Objlist_Entry *elm;
3525 symlook_init_from_req(&req1, req);
3527 /* Search all objects loaded at program start up. */
3528 if (req->defobj_out == NULL ||
3529 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
3530 res = symlook_list(&req1, &list_main, donelist);
3531 if (res == 0 && (req->defobj_out == NULL ||
3532 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
3533 req->sym_out = req1.sym_out;
3534 req->defobj_out = req1.defobj_out;
3535 assert(req->defobj_out != NULL);
3539 /* Search all DAGs whose roots are RTLD_GLOBAL objects. */
3540 STAILQ_FOREACH(elm, &list_global, link) {
3541 if (req->defobj_out != NULL &&
3542 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
3544 res = symlook_list(&req1, &elm->obj->dagmembers, donelist);
3545 if (res == 0 && (req->defobj_out == NULL ||
3546 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
3547 req->sym_out = req1.sym_out;
3548 req->defobj_out = req1.defobj_out;
3549 assert(req->defobj_out != NULL);
3553 return (req->sym_out != NULL ? 0 : ESRCH);
3557 * Given a symbol name in a referencing object, find the corresponding
3558 * definition of the symbol. Returns a pointer to the symbol, or NULL if
3559 * no definition was found. Returns a pointer to the Obj_Entry of the
3560 * defining object via the reference parameter DEFOBJ_OUT.
3563 symlook_default(SymLook *req, const Obj_Entry *refobj)
3566 const Objlist_Entry *elm;
3570 donelist_init(&donelist);
3571 symlook_init_from_req(&req1, req);
3573 /* Look first in the referencing object if linked symbolically. */
3574 if (refobj->symbolic && !donelist_check(&donelist, refobj)) {
3575 res = symlook_obj(&req1, refobj);
3577 req->sym_out = req1.sym_out;
3578 req->defobj_out = req1.defobj_out;
3579 assert(req->defobj_out != NULL);
3583 symlook_global(req, &donelist);
3585 /* Search all dlopened DAGs containing the referencing object. */
3586 STAILQ_FOREACH(elm, &refobj->dldags, link) {
3587 if (req->sym_out != NULL &&
3588 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
3590 res = symlook_list(&req1, &elm->obj->dagmembers, &donelist);
3591 if (res == 0 && (req->sym_out == NULL ||
3592 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
3593 req->sym_out = req1.sym_out;
3594 req->defobj_out = req1.defobj_out;
3595 assert(req->defobj_out != NULL);
3600 * Search the dynamic linker itself, and possibly resolve the
3601 * symbol from there. This is how the application links to
3602 * dynamic linker services such as dlopen.
3604 if (req->sym_out == NULL ||
3605 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
3606 res = symlook_obj(&req1, &obj_rtld);
3608 req->sym_out = req1.sym_out;
3609 req->defobj_out = req1.defobj_out;
3610 assert(req->defobj_out != NULL);
3614 return (req->sym_out != NULL ? 0 : ESRCH);
3618 symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp)
3621 const Obj_Entry *defobj;
3622 const Objlist_Entry *elm;
3628 STAILQ_FOREACH(elm, objlist, link) {
3629 if (donelist_check(dlp, elm->obj))
3631 symlook_init_from_req(&req1, req);
3632 if ((res = symlook_obj(&req1, elm->obj)) == 0) {
3633 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
3635 defobj = req1.defobj_out;
3636 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
3643 req->defobj_out = defobj;
3650 * Search the chain of DAGS cointed to by the given Needed_Entry
3651 * for a symbol of the given name. Each DAG is scanned completely
3652 * before advancing to the next one. Returns a pointer to the symbol,
3653 * or NULL if no definition was found.
3656 symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp)
3659 const Needed_Entry *n;
3660 const Obj_Entry *defobj;
3666 symlook_init_from_req(&req1, req);
3667 for (n = needed; n != NULL; n = n->next) {
3668 if (n->obj == NULL ||
3669 (res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0)
3671 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
3673 defobj = req1.defobj_out;
3674 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
3680 req->defobj_out = defobj;
3687 * Search the symbol table of a single shared object for a symbol of
3688 * the given name and version, if requested. Returns a pointer to the
3689 * symbol, or NULL if no definition was found. If the object is
3690 * filter, return filtered symbol from filtee.
3692 * The symbol's hash value is passed in for efficiency reasons; that
3693 * eliminates many recomputations of the hash value.
3696 symlook_obj(SymLook *req, const Obj_Entry *obj)
3700 int flags, res, mres;
3703 * If there is at least one valid hash at this point, we prefer to
3704 * use the faster GNU version if available.
3706 if (obj->valid_hash_gnu)
3707 mres = symlook_obj1_gnu(req, obj);
3708 else if (obj->valid_hash_sysv)
3709 mres = symlook_obj1_sysv(req, obj);
3714 if (obj->needed_filtees != NULL) {
3715 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
3716 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
3717 donelist_init(&donelist);
3718 symlook_init_from_req(&req1, req);
3719 res = symlook_needed(&req1, obj->needed_filtees, &donelist);
3721 req->sym_out = req1.sym_out;
3722 req->defobj_out = req1.defobj_out;
3726 if (obj->needed_aux_filtees != NULL) {
3727 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
3728 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
3729 donelist_init(&donelist);
3730 symlook_init_from_req(&req1, req);
3731 res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist);
3733 req->sym_out = req1.sym_out;
3734 req->defobj_out = req1.defobj_out;
3742 /* Symbol match routine common to both hash functions */
3744 matched_symbol(SymLook *req, const Obj_Entry *obj, Sym_Match_Result *result,
3745 const unsigned long symnum)
3748 const Elf_Sym *symp;
3751 symp = obj->symtab + symnum;
3752 strp = obj->strtab + symp->st_name;
3754 switch (ELF_ST_TYPE(symp->st_info)) {
3760 if (symp->st_value == 0)
3764 if (symp->st_shndx != SHN_UNDEF)
3767 else if (((req->flags & SYMLOOK_IN_PLT) == 0) &&
3768 (ELF_ST_TYPE(symp->st_info) == STT_FUNC))
3775 if (req->name[0] != strp[0] || strcmp(req->name, strp) != 0)
3778 if (req->ventry == NULL) {
3779 if (obj->versyms != NULL) {
3780 verndx = VER_NDX(obj->versyms[symnum]);
3781 if (verndx > obj->vernum) {
3783 "%s: symbol %s references wrong version %d",
3784 obj->path, obj->strtab + symnum, verndx);
3788 * If we are not called from dlsym (i.e. this
3789 * is a normal relocation from unversioned
3790 * binary), accept the symbol immediately if
3791 * it happens to have first version after this
3792 * shared object became versioned. Otherwise,
3793 * if symbol is versioned and not hidden,
3794 * remember it. If it is the only symbol with
3795 * this name exported by the shared object, it
3796 * will be returned as a match by the calling
3797 * function. If symbol is global (verndx < 2)
3798 * accept it unconditionally.
3800 if ((req->flags & SYMLOOK_DLSYM) == 0 &&
3801 verndx == VER_NDX_GIVEN) {
3802 result->sym_out = symp;
3805 else if (verndx >= VER_NDX_GIVEN) {
3806 if ((obj->versyms[symnum] & VER_NDX_HIDDEN)
3808 if (result->vsymp == NULL)
3809 result->vsymp = symp;
3815 result->sym_out = symp;
3818 if (obj->versyms == NULL) {
3819 if (object_match_name(obj, req->ventry->name)) {
3820 _rtld_error("%s: object %s should provide version %s "
3821 "for symbol %s", obj_rtld.path, obj->path,
3822 req->ventry->name, obj->strtab + symnum);
3826 verndx = VER_NDX(obj->versyms[symnum]);
3827 if (verndx > obj->vernum) {
3828 _rtld_error("%s: symbol %s references wrong version %d",
3829 obj->path, obj->strtab + symnum, verndx);
3832 if (obj->vertab[verndx].hash != req->ventry->hash ||
3833 strcmp(obj->vertab[verndx].name, req->ventry->name)) {
3835 * Version does not match. Look if this is a
3836 * global symbol and if it is not hidden. If
3837 * global symbol (verndx < 2) is available,
3838 * use it. Do not return symbol if we are
3839 * called by dlvsym, because dlvsym looks for
3840 * a specific version and default one is not
3841 * what dlvsym wants.
3843 if ((req->flags & SYMLOOK_DLSYM) ||
3844 (verndx >= VER_NDX_GIVEN) ||
3845 (obj->versyms[symnum] & VER_NDX_HIDDEN))
3849 result->sym_out = symp;
3854 * Search for symbol using SysV hash function.
3855 * obj->buckets is known not to be NULL at this point; the test for this was
3856 * performed with the obj->valid_hash_sysv assignment.
3859 symlook_obj1_sysv(SymLook *req, const Obj_Entry *obj)
3861 unsigned long symnum;
3862 Sym_Match_Result matchres;
3864 matchres.sym_out = NULL;
3865 matchres.vsymp = NULL;
3866 matchres.vcount = 0;
3868 for (symnum = obj->buckets[req->hash % obj->nbuckets];
3869 symnum != STN_UNDEF; symnum = obj->chains[symnum]) {
3870 if (symnum >= obj->nchains)
3871 return (ESRCH); /* Bad object */
3873 if (matched_symbol(req, obj, &matchres, symnum)) {
3874 req->sym_out = matchres.sym_out;
3875 req->defobj_out = obj;
3879 if (matchres.vcount == 1) {
3880 req->sym_out = matchres.vsymp;
3881 req->defobj_out = obj;
3887 /* Search for symbol using GNU hash function */
3889 symlook_obj1_gnu(SymLook *req, const Obj_Entry *obj)
3891 Elf_Addr bloom_word;
3892 const Elf32_Word *hashval;
3894 Sym_Match_Result matchres;
3895 unsigned int h1, h2;
3896 unsigned long symnum;
3898 matchres.sym_out = NULL;
3899 matchres.vsymp = NULL;
3900 matchres.vcount = 0;
3902 /* Pick right bitmask word from Bloom filter array */
3903 bloom_word = obj->bloom_gnu[(req->hash_gnu / __ELF_WORD_SIZE) &
3904 obj->maskwords_bm_gnu];
3906 /* Calculate modulus word size of gnu hash and its derivative */
3907 h1 = req->hash_gnu & (__ELF_WORD_SIZE - 1);
3908 h2 = ((req->hash_gnu >> obj->shift2_gnu) & (__ELF_WORD_SIZE - 1));
3910 /* Filter out the "definitely not in set" queries */
3911 if (((bloom_word >> h1) & (bloom_word >> h2) & 1) == 0)
3914 /* Locate hash chain and corresponding value element*/
3915 bucket = obj->buckets_gnu[req->hash_gnu % obj->nbuckets_gnu];
3918 hashval = &obj->chain_zero_gnu[bucket];
3920 if (((*hashval ^ req->hash_gnu) >> 1) == 0) {
3921 symnum = hashval - obj->chain_zero_gnu;
3922 if (matched_symbol(req, obj, &matchres, symnum)) {
3923 req->sym_out = matchres.sym_out;
3924 req->defobj_out = obj;
3928 } while ((*hashval++ & 1) == 0);
3929 if (matchres.vcount == 1) {
3930 req->sym_out = matchres.vsymp;
3931 req->defobj_out = obj;
3938 trace_loaded_objects(Obj_Entry *obj)
3940 char *fmt1, *fmt2, *fmt, *main_local, *list_containers;
3943 if ((main_local = getenv(LD_ "TRACE_LOADED_OBJECTS_PROGNAME")) == NULL)
3946 if ((fmt1 = getenv(LD_ "TRACE_LOADED_OBJECTS_FMT1")) == NULL)
3947 fmt1 = "\t%o => %p (%x)\n";
3949 if ((fmt2 = getenv(LD_ "TRACE_LOADED_OBJECTS_FMT2")) == NULL)
3950 fmt2 = "\t%o (%x)\n";
3952 list_containers = getenv(LD_ "TRACE_LOADED_OBJECTS_ALL");
3954 for (; obj; obj = obj->next) {
3955 Needed_Entry *needed;
3959 if (list_containers && obj->needed != NULL)
3960 rtld_printf("%s:\n", obj->path);
3961 for (needed = obj->needed; needed; needed = needed->next) {
3962 if (needed->obj != NULL) {
3963 if (needed->obj->traced && !list_containers)
3965 needed->obj->traced = true;
3966 path = needed->obj->path;
3970 name = (char *)obj->strtab + needed->name;
3971 is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */
3973 fmt = is_lib ? fmt1 : fmt2;
3974 while ((c = *fmt++) != '\0') {
4000 rtld_putstr(main_local);
4003 rtld_putstr(obj_main->path);
4010 rtld_printf("%d", sodp->sod_major);
4013 rtld_printf("%d", sodp->sod_minor);
4020 rtld_printf("%p", needed->obj ? needed->obj->mapbase :
4033 * Unload a dlopened object and its dependencies from memory and from
4034 * our data structures. It is assumed that the DAG rooted in the
4035 * object has already been unreferenced, and that the object has a
4036 * reference count of 0.
4039 unload_object(Obj_Entry *root)
4044 assert(root->refcount == 0);
4047 * Pass over the DAG removing unreferenced objects from
4048 * appropriate lists.
4050 unlink_object(root);
4052 /* Unmap all objects that are no longer referenced. */
4053 linkp = &obj_list->next;
4054 while ((obj = *linkp) != NULL) {
4055 if (obj->refcount == 0) {
4056 LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
4058 dbg("unloading \"%s\"", obj->path);
4059 unload_filtees(root);
4060 munmap(obj->mapbase, obj->mapsize);
4061 linkmap_delete(obj);
4072 unlink_object(Obj_Entry *root)
4076 if (root->refcount == 0) {
4077 /* Remove the object from the RTLD_GLOBAL list. */
4078 objlist_remove(&list_global, root);
4080 /* Remove the object from all objects' DAG lists. */
4081 STAILQ_FOREACH(elm, &root->dagmembers, link) {
4082 objlist_remove(&elm->obj->dldags, root);
4083 if (elm->obj != root)
4084 unlink_object(elm->obj);
4090 ref_dag(Obj_Entry *root)
4094 assert(root->dag_inited);
4095 STAILQ_FOREACH(elm, &root->dagmembers, link)
4096 elm->obj->refcount++;
4100 unref_dag(Obj_Entry *root)
4104 assert(root->dag_inited);
4105 STAILQ_FOREACH(elm, &root->dagmembers, link)
4106 elm->obj->refcount--;
4110 * Common code for MD __tls_get_addr().
4112 static void *tls_get_addr_slow(Elf_Addr **, int, size_t) __noinline;
4114 tls_get_addr_slow(Elf_Addr **dtvp, int index, size_t offset)
4116 Elf_Addr *newdtv, *dtv;
4117 RtldLockState lockstate;
4121 /* Check dtv generation in case new modules have arrived */
4122 if (dtv[0] != tls_dtv_generation) {
4123 wlock_acquire(rtld_bind_lock, &lockstate);
4124 newdtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
4126 if (to_copy > tls_max_index)
4127 to_copy = tls_max_index;
4128 memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr));
4129 newdtv[0] = tls_dtv_generation;
4130 newdtv[1] = tls_max_index;
4132 lock_release(rtld_bind_lock, &lockstate);
4133 dtv = *dtvp = newdtv;
4136 /* Dynamically allocate module TLS if necessary */
4137 if (dtv[index + 1] == 0) {
4138 /* Signal safe, wlock will block out signals. */
4139 wlock_acquire(rtld_bind_lock, &lockstate);
4140 if (!dtv[index + 1])
4141 dtv[index + 1] = (Elf_Addr)allocate_module_tls(index);
4142 lock_release(rtld_bind_lock, &lockstate);
4144 return ((void *)(dtv[index + 1] + offset));
4148 tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset)
4153 /* Check dtv generation in case new modules have arrived */
4154 if (__predict_true(dtv[0] == tls_dtv_generation &&
4155 dtv[index + 1] != 0))
4156 return ((void *)(dtv[index + 1] + offset));
4157 return (tls_get_addr_slow(dtvp, index, offset));
4160 #if defined(__arm__) || defined(__ia64__) || defined(__powerpc__)
4163 * Allocate Static TLS using the Variant I method.
4166 allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign)
4175 if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE)
4178 assert(tcbsize >= TLS_TCB_SIZE);
4179 tcb = xcalloc(1, tls_static_space - TLS_TCB_SIZE + tcbsize);
4180 tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE);
4182 if (oldtcb != NULL) {
4183 memcpy(tls, oldtcb, tls_static_space);
4186 /* Adjust the DTV. */
4188 for (i = 0; i < dtv[1]; i++) {
4189 if (dtv[i+2] >= (Elf_Addr)oldtcb &&
4190 dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) {
4191 dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tls;
4195 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
4197 dtv[0] = tls_dtv_generation;
4198 dtv[1] = tls_max_index;
4200 for (obj = objs; obj; obj = obj->next) {
4201 if (obj->tlsoffset > 0) {
4202 addr = (Elf_Addr)tls + obj->tlsoffset;
4203 if (obj->tlsinitsize > 0)
4204 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
4205 if (obj->tlssize > obj->tlsinitsize)
4206 memset((void*) (addr + obj->tlsinitsize), 0,
4207 obj->tlssize - obj->tlsinitsize);
4208 dtv[obj->tlsindex + 1] = addr;
4217 free_tls(void *tcb, size_t tcbsize, size_t tcbalign)
4220 Elf_Addr tlsstart, tlsend;
4223 assert(tcbsize >= TLS_TCB_SIZE);
4225 tlsstart = (Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE;
4226 tlsend = tlsstart + tls_static_space;
4228 dtv = *(Elf_Addr **)tlsstart;
4230 for (i = 0; i < dtvsize; i++) {
4231 if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) {
4232 free((void*)dtv[i+2]);
4241 #if defined(__i386__) || defined(__amd64__) || defined(__sparc64__) || \
4245 * Allocate Static TLS using the Variant II method.
4248 allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign)
4253 Elf_Addr *dtv, *olddtv;
4254 Elf_Addr segbase, oldsegbase, addr;
4257 size = round(tls_static_space, tcbalign);
4259 assert(tcbsize >= 2*sizeof(Elf_Addr));
4260 tls = xcalloc(1, size + tcbsize);
4261 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
4263 segbase = (Elf_Addr)(tls + size);
4264 ((Elf_Addr*)segbase)[0] = segbase;
4265 ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv;
4267 dtv[0] = tls_dtv_generation;
4268 dtv[1] = tls_max_index;
4272 * Copy the static TLS block over whole.
4274 oldsegbase = (Elf_Addr) oldtls;
4275 memcpy((void *)(segbase - tls_static_space),
4276 (const void *)(oldsegbase - tls_static_space),
4280 * If any dynamic TLS blocks have been created tls_get_addr(),
4283 olddtv = ((Elf_Addr**)oldsegbase)[1];
4284 for (i = 0; i < olddtv[1]; i++) {
4285 if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) {
4286 dtv[i+2] = olddtv[i+2];
4292 * We assume that this block was the one we created with
4293 * allocate_initial_tls().
4295 free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr));
4297 for (obj = objs; obj; obj = obj->next) {
4298 if (obj->tlsoffset) {
4299 addr = segbase - obj->tlsoffset;
4300 memset((void*) (addr + obj->tlsinitsize),
4301 0, obj->tlssize - obj->tlsinitsize);
4303 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
4304 dtv[obj->tlsindex + 1] = addr;
4309 return (void*) segbase;
4313 free_tls(void *tls, size_t tcbsize, size_t tcbalign)
4318 Elf_Addr tlsstart, tlsend;
4321 * Figure out the size of the initial TLS block so that we can
4322 * find stuff which ___tls_get_addr() allocated dynamically.
4324 size = round(tls_static_space, tcbalign);
4326 dtv = ((Elf_Addr**)tls)[1];
4328 tlsend = (Elf_Addr) tls;
4329 tlsstart = tlsend - size;
4330 for (i = 0; i < dtvsize; i++) {
4331 if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] > tlsend)) {
4332 free((void*) dtv[i+2]);
4336 free((void*) tlsstart);
4343 * Allocate TLS block for module with given index.
4346 allocate_module_tls(int index)
4351 for (obj = obj_list; obj; obj = obj->next) {
4352 if (obj->tlsindex == index)
4356 _rtld_error("Can't find module with TLS index %d", index);
4360 p = malloc(obj->tlssize);
4362 _rtld_error("Cannot allocate TLS block for index %d", index);
4365 memcpy(p, obj->tlsinit, obj->tlsinitsize);
4366 memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
4372 allocate_tls_offset(Obj_Entry *obj)
4379 if (obj->tlssize == 0) {
4380 obj->tls_done = true;
4384 if (obj->tlsindex == 1)
4385 off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign);
4387 off = calculate_tls_offset(tls_last_offset, tls_last_size,
4388 obj->tlssize, obj->tlsalign);
4391 * If we have already fixed the size of the static TLS block, we
4392 * must stay within that size. When allocating the static TLS, we
4393 * leave a small amount of space spare to be used for dynamically
4394 * loading modules which use static TLS.
4396 if (tls_static_space) {
4397 if (calculate_tls_end(off, obj->tlssize) > tls_static_space)
4401 tls_last_offset = obj->tlsoffset = off;
4402 tls_last_size = obj->tlssize;
4403 obj->tls_done = true;
4409 free_tls_offset(Obj_Entry *obj)
4413 * If we were the last thing to allocate out of the static TLS
4414 * block, we give our space back to the 'allocator'. This is a
4415 * simplistic workaround to allow libGL.so.1 to be loaded and
4416 * unloaded multiple times.
4418 if (calculate_tls_end(obj->tlsoffset, obj->tlssize)
4419 == calculate_tls_end(tls_last_offset, tls_last_size)) {
4420 tls_last_offset -= obj->tlssize;
4426 _rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign)
4429 RtldLockState lockstate;
4431 wlock_acquire(rtld_bind_lock, &lockstate);
4432 ret = allocate_tls(obj_list, oldtls, tcbsize, tcbalign);
4433 lock_release(rtld_bind_lock, &lockstate);
4438 _rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign)
4440 RtldLockState lockstate;
4442 wlock_acquire(rtld_bind_lock, &lockstate);
4443 free_tls(tcb, tcbsize, tcbalign);
4444 lock_release(rtld_bind_lock, &lockstate);
4448 object_add_name(Obj_Entry *obj, const char *name)
4454 entry = malloc(sizeof(Name_Entry) + len);
4456 if (entry != NULL) {
4457 strcpy(entry->name, name);
4458 STAILQ_INSERT_TAIL(&obj->names, entry, link);
4463 object_match_name(const Obj_Entry *obj, const char *name)
4467 STAILQ_FOREACH(entry, &obj->names, link) {
4468 if (strcmp(name, entry->name) == 0)
4475 locate_dependency(const Obj_Entry *obj, const char *name)
4477 const Objlist_Entry *entry;
4478 const Needed_Entry *needed;
4480 STAILQ_FOREACH(entry, &list_main, link) {
4481 if (object_match_name(entry->obj, name))
4485 for (needed = obj->needed; needed != NULL; needed = needed->next) {
4486 if (strcmp(obj->strtab + needed->name, name) == 0 ||
4487 (needed->obj != NULL && object_match_name(needed->obj, name))) {
4489 * If there is DT_NEEDED for the name we are looking for,
4490 * we are all set. Note that object might not be found if
4491 * dependency was not loaded yet, so the function can
4492 * return NULL here. This is expected and handled
4493 * properly by the caller.
4495 return (needed->obj);
4498 _rtld_error("%s: Unexpected inconsistency: dependency %s not found",
4504 check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj,
4505 const Elf_Vernaux *vna)
4507 const Elf_Verdef *vd;
4508 const char *vername;
4510 vername = refobj->strtab + vna->vna_name;
4511 vd = depobj->verdef;
4513 _rtld_error("%s: version %s required by %s not defined",
4514 depobj->path, vername, refobj->path);
4518 if (vd->vd_version != VER_DEF_CURRENT) {
4519 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
4520 depobj->path, vd->vd_version);
4523 if (vna->vna_hash == vd->vd_hash) {
4524 const Elf_Verdaux *aux = (const Elf_Verdaux *)
4525 ((char *)vd + vd->vd_aux);
4526 if (strcmp(vername, depobj->strtab + aux->vda_name) == 0)
4529 if (vd->vd_next == 0)
4531 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
4533 if (vna->vna_flags & VER_FLG_WEAK)
4535 _rtld_error("%s: version %s required by %s not found",
4536 depobj->path, vername, refobj->path);
4541 rtld_verify_object_versions(Obj_Entry *obj)
4543 const Elf_Verneed *vn;
4544 const Elf_Verdef *vd;
4545 const Elf_Verdaux *vda;
4546 const Elf_Vernaux *vna;
4547 const Obj_Entry *depobj;
4548 int maxvernum, vernum;
4550 if (obj->ver_checked)
4552 obj->ver_checked = true;
4556 * Walk over defined and required version records and figure out
4557 * max index used by any of them. Do very basic sanity checking
4561 while (vn != NULL) {
4562 if (vn->vn_version != VER_NEED_CURRENT) {
4563 _rtld_error("%s: Unsupported version %d of Elf_Verneed entry",
4564 obj->path, vn->vn_version);
4567 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux);
4569 vernum = VER_NEED_IDX(vna->vna_other);
4570 if (vernum > maxvernum)
4572 if (vna->vna_next == 0)
4574 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next);
4576 if (vn->vn_next == 0)
4578 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next);
4582 while (vd != NULL) {
4583 if (vd->vd_version != VER_DEF_CURRENT) {
4584 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
4585 obj->path, vd->vd_version);
4588 vernum = VER_DEF_IDX(vd->vd_ndx);
4589 if (vernum > maxvernum)
4591 if (vd->vd_next == 0)
4593 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
4600 * Store version information in array indexable by version index.
4601 * Verify that object version requirements are satisfied along the
4604 obj->vernum = maxvernum + 1;
4605 obj->vertab = xcalloc(obj->vernum, sizeof(Ver_Entry));
4608 while (vd != NULL) {
4609 if ((vd->vd_flags & VER_FLG_BASE) == 0) {
4610 vernum = VER_DEF_IDX(vd->vd_ndx);
4611 assert(vernum <= maxvernum);
4612 vda = (const Elf_Verdaux *)((char *)vd + vd->vd_aux);
4613 obj->vertab[vernum].hash = vd->vd_hash;
4614 obj->vertab[vernum].name = obj->strtab + vda->vda_name;
4615 obj->vertab[vernum].file = NULL;
4616 obj->vertab[vernum].flags = 0;
4618 if (vd->vd_next == 0)
4620 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
4624 while (vn != NULL) {
4625 depobj = locate_dependency(obj, obj->strtab + vn->vn_file);
4628 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux);
4630 if (check_object_provided_version(obj, depobj, vna))
4632 vernum = VER_NEED_IDX(vna->vna_other);
4633 assert(vernum <= maxvernum);
4634 obj->vertab[vernum].hash = vna->vna_hash;
4635 obj->vertab[vernum].name = obj->strtab + vna->vna_name;
4636 obj->vertab[vernum].file = obj->strtab + vn->vn_file;
4637 obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ?
4638 VER_INFO_HIDDEN : 0;
4639 if (vna->vna_next == 0)
4641 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next);
4643 if (vn->vn_next == 0)
4645 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next);
4651 rtld_verify_versions(const Objlist *objlist)
4653 Objlist_Entry *entry;
4657 STAILQ_FOREACH(entry, objlist, link) {
4659 * Skip dummy objects or objects that have their version requirements
4662 if (entry->obj->strtab == NULL || entry->obj->vertab != NULL)
4664 if (rtld_verify_object_versions(entry->obj) == -1) {
4666 if (ld_tracing == NULL)
4670 if (rc == 0 || ld_tracing != NULL)
4671 rc = rtld_verify_object_versions(&obj_rtld);
4676 fetch_ventry(const Obj_Entry *obj, unsigned long symnum)
4681 vernum = VER_NDX(obj->versyms[symnum]);
4682 if (vernum >= obj->vernum) {
4683 _rtld_error("%s: symbol %s has wrong verneed value %d",
4684 obj->path, obj->strtab + symnum, vernum);
4685 } else if (obj->vertab[vernum].hash != 0) {
4686 return &obj->vertab[vernum];
4693 _rtld_get_stack_prot(void)
4696 return (stack_prot);
4700 map_stacks_exec(RtldLockState *lockstate)
4702 void (*thr_map_stacks_exec)(void);
4704 if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0)
4706 thr_map_stacks_exec = (void (*)(void))(uintptr_t)
4707 get_program_var_addr("__pthread_map_stacks_exec", lockstate);
4708 if (thr_map_stacks_exec != NULL) {
4709 stack_prot |= PROT_EXEC;
4710 thr_map_stacks_exec();
4715 symlook_init(SymLook *dst, const char *name)
4718 bzero(dst, sizeof(*dst));
4720 dst->hash = elf_hash(name);
4721 dst->hash_gnu = gnu_hash(name);
4725 symlook_init_from_req(SymLook *dst, const SymLook *src)
4728 dst->name = src->name;
4729 dst->hash = src->hash;
4730 dst->hash_gnu = src->hash_gnu;
4731 dst->ventry = src->ventry;
4732 dst->flags = src->flags;
4733 dst->defobj_out = NULL;
4734 dst->sym_out = NULL;
4735 dst->lockstate = src->lockstate;
4739 * Overrides for libc_pic-provided functions.
4743 __getosreldate(void)
4753 oid[1] = KERN_OSRELDATE;
4755 len = sizeof(osrel);
4756 error = sysctl(oid, 2, &osrel, &len, NULL, 0);
4757 if (error == 0 && osrel > 0 && len == sizeof(osrel))
4769 void (*__cleanup)(void);
4770 int __isthreaded = 0;
4771 int _thread_autoinit_dummy_decl = 1;
4774 * No unresolved symbols for rtld.
4777 __pthread_cxa_finalize(struct dl_phdr_info *a)
4782 __stack_chk_fail(void)
4785 _rtld_error("stack overflow detected; terminated");
4788 __weak_reference(__stack_chk_fail, __stack_chk_fail_local);
4794 _rtld_error("buffer overflow detected; terminated");
4799 rtld_strerror(int errnum)
4802 if (errnum < 0 || errnum >= sys_nerr)
4803 return ("Unknown error");
4804 return (sys_errlist[errnum]);