2 * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra.
3 * Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>.
4 * Copyright 2009-2012 Konstantin Belousov <kib@FreeBSD.ORG>.
5 * Copyright 2012 John Marino <draco@marino.st>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * Dynamic linker for ELF.
34 * John Polstra <jdp@polstra.com>.
37 #include <sys/param.h>
38 #include <sys/mount.h>
41 #include <sys/sysctl.h>
43 #include <sys/utsname.h>
44 #include <sys/ktrace.h>
61 #include "rtld_printf.h"
62 #include "rtld_utrace.h"
66 typedef void (*func_ptr_type)();
67 typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg);
70 * Function declarations.
72 static const char *basename(const char *);
73 static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **,
74 const Elf_Dyn **, const Elf_Dyn **);
75 static void digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *,
77 static void digest_dynamic(Obj_Entry *, int);
78 static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *);
79 static Obj_Entry *dlcheck(void *);
80 static int dlclose_locked(void *, RtldLockState *);
81 static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj,
82 int lo_flags, int mode, RtldLockState *lockstate);
83 static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int);
84 static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *);
85 static bool donelist_check(DoneList *, const Obj_Entry *);
86 static void errmsg_restore(char *);
87 static char *errmsg_save(void);
88 static void *fill_search_info(const char *, size_t, void *);
89 static char *find_library(const char *, const Obj_Entry *, int *);
90 static const char *gethints(bool);
91 static void hold_object(Obj_Entry *);
92 static void unhold_object(Obj_Entry *);
93 static void init_dag(Obj_Entry *);
94 static void init_marker(Obj_Entry *);
95 static void init_pagesizes(Elf_Auxinfo **aux_info);
96 static void init_rtld(caddr_t, Elf_Auxinfo **);
97 static void initlist_add_neededs(Needed_Entry *, Objlist *);
98 static void initlist_add_objects(Obj_Entry *, Obj_Entry *, Objlist *);
99 static void linkmap_add(Obj_Entry *);
100 static void linkmap_delete(Obj_Entry *);
101 static void load_filtees(Obj_Entry *, int flags, RtldLockState *);
102 static void unload_filtees(Obj_Entry *, RtldLockState *);
103 static int load_needed_objects(Obj_Entry *, int);
104 static int load_preload_objects(void);
105 static Obj_Entry *load_object(const char *, int fd, const Obj_Entry *, int);
106 static void map_stacks_exec(RtldLockState *);
107 static int obj_enforce_relro(Obj_Entry *);
108 static Obj_Entry *obj_from_addr(const void *);
109 static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *);
110 static void objlist_call_init(Objlist *, RtldLockState *);
111 static void objlist_clear(Objlist *);
112 static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *);
113 static void objlist_init(Objlist *);
114 static void objlist_push_head(Objlist *, Obj_Entry *);
115 static void objlist_push_tail(Objlist *, Obj_Entry *);
116 static void objlist_put_after(Objlist *, Obj_Entry *, Obj_Entry *);
117 static void objlist_remove(Objlist *, Obj_Entry *);
118 static int parse_libdir(const char *);
119 static void *path_enumerate(const char *, path_enum_proc, void *);
120 static void release_object(Obj_Entry *);
121 static int relocate_object_dag(Obj_Entry *root, bool bind_now,
122 Obj_Entry *rtldobj, int flags, RtldLockState *lockstate);
123 static int relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
124 int flags, RtldLockState *lockstate);
125 static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, int,
127 static int resolve_objects_ifunc(Obj_Entry *first, bool bind_now,
128 int flags, RtldLockState *lockstate);
129 static int rtld_dirname(const char *, char *);
130 static int rtld_dirname_abs(const char *, char *);
131 static void *rtld_dlopen(const char *name, int fd, int mode);
132 static void rtld_exit(void);
133 static char *search_library_path(const char *, const char *);
134 static char *search_library_pathfds(const char *, const char *, int *);
135 static const void **get_program_var_addr(const char *, RtldLockState *);
136 static void set_program_var(const char *, const void *);
137 static int symlook_default(SymLook *, const Obj_Entry *refobj);
138 static int symlook_global(SymLook *, DoneList *);
139 static void symlook_init_from_req(SymLook *, const SymLook *);
140 static int symlook_list(SymLook *, const Objlist *, DoneList *);
141 static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *);
142 static int symlook_obj1_sysv(SymLook *, const Obj_Entry *);
143 static int symlook_obj1_gnu(SymLook *, const Obj_Entry *);
144 static void trace_loaded_objects(Obj_Entry *);
145 static void unlink_object(Obj_Entry *);
146 static void unload_object(Obj_Entry *, RtldLockState *lockstate);
147 static void unref_dag(Obj_Entry *);
148 static void ref_dag(Obj_Entry *);
149 static char *origin_subst_one(Obj_Entry *, char *, const char *,
151 static char *origin_subst(Obj_Entry *, char *);
152 static bool obj_resolve_origin(Obj_Entry *obj);
153 static void preinit_main(void);
154 static int rtld_verify_versions(const Objlist *);
155 static int rtld_verify_object_versions(Obj_Entry *);
156 static void object_add_name(Obj_Entry *, const char *);
157 static int object_match_name(const Obj_Entry *, const char *);
158 static void ld_utrace_log(int, void *, void *, size_t, int, const char *);
159 static void rtld_fill_dl_phdr_info(const Obj_Entry *obj,
160 struct dl_phdr_info *phdr_info);
161 static uint32_t gnu_hash(const char *);
162 static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *,
163 const unsigned long);
165 void r_debug_state(struct r_debug *, struct link_map *) __noinline __exported;
166 void _r_debug_postinit(struct link_map *) __noinline __exported;
168 int __sys_openat(int, const char *, int, ...);
173 static char *error_message; /* Message for dlerror(), or NULL */
174 struct r_debug r_debug __exported; /* for GDB; */
175 static bool libmap_disable; /* Disable libmap */
176 static bool ld_loadfltr; /* Immediate filters processing */
177 static char *libmap_override; /* Maps to use in addition to libmap.conf */
178 static bool trust; /* False for setuid and setgid programs */
179 static bool dangerous_ld_env; /* True if environment variables have been
180 used to affect the libraries loaded */
181 bool ld_bind_not; /* Disable PLT update */
182 static char *ld_bind_now; /* Environment variable for immediate binding */
183 static char *ld_debug; /* Environment variable for debugging */
184 static char *ld_library_path; /* Environment variable for search path */
185 static char *ld_library_dirs; /* Environment variable for library descriptors */
186 static char *ld_preload; /* Environment variable for libraries to
188 static char *ld_elf_hints_path; /* Environment variable for alternative hints path */
189 static char *ld_tracing; /* Called from ldd to print libs */
190 static char *ld_utrace; /* Use utrace() to log events. */
191 static struct obj_entry_q obj_list; /* Queue of all loaded objects */
192 static Obj_Entry *obj_main; /* The main program shared object */
193 static Obj_Entry obj_rtld; /* The dynamic linker shared object */
194 static unsigned int obj_count; /* Number of objects in obj_list */
195 static unsigned int obj_loads; /* Number of loads of objects (gen count) */
197 static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */
198 STAILQ_HEAD_INITIALIZER(list_global);
199 static Objlist list_main = /* Objects loaded at program startup */
200 STAILQ_HEAD_INITIALIZER(list_main);
201 static Objlist list_fini = /* Objects needing fini() calls */
202 STAILQ_HEAD_INITIALIZER(list_fini);
204 Elf_Sym sym_zero; /* For resolving undefined weak refs. */
206 #define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m);
208 extern Elf_Dyn _DYNAMIC;
209 #pragma weak _DYNAMIC
211 int dlclose(void *) __exported;
212 char *dlerror(void) __exported;
213 void *dlopen(const char *, int) __exported;
214 void *fdlopen(int, int) __exported;
215 void *dlsym(void *, const char *) __exported;
216 dlfunc_t dlfunc(void *, const char *) __exported;
217 void *dlvsym(void *, const char *, const char *) __exported;
218 int dladdr(const void *, Dl_info *) __exported;
219 void dllockinit(void *, void *(*)(void *), void (*)(void *), void (*)(void *),
220 void (*)(void *), void (*)(void *), void (*)(void *)) __exported;
221 int dlinfo(void *, int , void *) __exported;
222 int dl_iterate_phdr(__dl_iterate_hdr_callback, void *) __exported;
223 int _rtld_addr_phdr(const void *, struct dl_phdr_info *) __exported;
224 int _rtld_get_stack_prot(void) __exported;
225 int _rtld_is_dlopened(void *) __exported;
226 void _rtld_error(const char *, ...) __exported;
228 int npagesizes, osreldate;
231 long __stack_chk_guard[8] = {0, 0, 0, 0, 0, 0, 0, 0};
233 static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC;
234 static int max_stack_flags;
237 * Global declarations normally provided by crt1. The dynamic linker is
238 * not built with crt1, so we have to provide them ourselves.
244 * Used to pass argc, argv to init functions.
250 * Globals to control TLS allocation.
252 size_t tls_last_offset; /* Static TLS offset of last module */
253 size_t tls_last_size; /* Static TLS size of last module */
254 size_t tls_static_space; /* Static TLS space allocated */
255 size_t tls_static_max_align;
256 int tls_dtv_generation = 1; /* Used to detect when dtv size changes */
257 int tls_max_index = 1; /* Largest module index allocated */
259 bool ld_library_path_rpath = false;
262 * Globals for path names, and such
264 char *ld_elf_hints_default = _PATH_ELF_HINTS;
265 char *ld_path_libmap_conf = _PATH_LIBMAP_CONF;
266 char *ld_path_rtld = _PATH_RTLD;
267 char *ld_standard_library_path = STANDARD_LIBRARY_PATH;
268 char *ld_env_prefix = LD_;
271 * Fill in a DoneList with an allocation large enough to hold all of
272 * the currently-loaded objects. Keep this as a macro since it calls
273 * alloca and we want that to occur within the scope of the caller.
275 #define donelist_init(dlp) \
276 ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \
277 assert((dlp)->objs != NULL), \
278 (dlp)->num_alloc = obj_count, \
281 #define LD_UTRACE(e, h, mb, ms, r, n) do { \
282 if (ld_utrace != NULL) \
283 ld_utrace_log(e, h, mb, ms, r, n); \
287 ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize,
288 int refcnt, const char *name)
290 struct utrace_rtld ut;
291 static const char rtld_utrace_sig[RTLD_UTRACE_SIG_SZ] = RTLD_UTRACE_SIG;
293 memcpy(ut.sig, rtld_utrace_sig, sizeof(ut.sig));
296 ut.mapbase = mapbase;
297 ut.mapsize = mapsize;
299 bzero(ut.name, sizeof(ut.name));
301 strlcpy(ut.name, name, sizeof(ut.name));
302 utrace(&ut, sizeof(ut));
305 #ifdef RTLD_VARIANT_ENV_NAMES
307 * construct the env variable based on the type of binary that's
310 static inline const char *
313 static char buffer[128];
315 strlcpy(buffer, ld_env_prefix, sizeof(buffer));
316 strlcat(buffer, var, sizeof(buffer));
324 * Main entry point for dynamic linking. The first argument is the
325 * stack pointer. The stack is expected to be laid out as described
326 * in the SVR4 ABI specification, Intel 386 Processor Supplement.
327 * Specifically, the stack pointer points to a word containing
328 * ARGC. Following that in the stack is a null-terminated sequence
329 * of pointers to argument strings. Then comes a null-terminated
330 * sequence of pointers to environment strings. Finally, there is a
331 * sequence of "auxiliary vector" entries.
333 * The second argument points to a place to store the dynamic linker's
334 * exit procedure pointer and the third to a place to store the main
337 * The return value is the main program's entry point.
340 _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp)
342 Elf_Auxinfo *aux_info[AT_COUNT];
350 Objlist_Entry *entry;
352 Obj_Entry *preload_tail;
353 Obj_Entry *last_interposer;
355 RtldLockState lockstate;
356 char *library_path_rpath;
361 * On entry, the dynamic linker itself has not been relocated yet.
362 * Be very careful not to reference any global data until after
363 * init_rtld has returned. It is OK to reference file-scope statics
364 * and string constants, and to call static and global functions.
367 /* Find the auxiliary vector on the stack. */
370 sp += argc + 1; /* Skip over arguments and NULL terminator */
372 while (*sp++ != 0) /* Skip over environment, and NULL terminator */
374 aux = (Elf_Auxinfo *) sp;
376 /* Digest the auxiliary vector. */
377 for (i = 0; i < AT_COUNT; i++)
379 for (auxp = aux; auxp->a_type != AT_NULL; auxp++) {
380 if (auxp->a_type < AT_COUNT)
381 aux_info[auxp->a_type] = auxp;
384 /* Initialize and relocate ourselves. */
385 assert(aux_info[AT_BASE] != NULL);
386 init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info);
388 __progname = obj_rtld.path;
389 argv0 = argv[0] != NULL ? argv[0] : "(null)";
394 if (aux_info[AT_CANARY] != NULL &&
395 aux_info[AT_CANARY]->a_un.a_ptr != NULL) {
396 i = aux_info[AT_CANARYLEN]->a_un.a_val;
397 if (i > sizeof(__stack_chk_guard))
398 i = sizeof(__stack_chk_guard);
399 memcpy(__stack_chk_guard, aux_info[AT_CANARY]->a_un.a_ptr, i);
404 len = sizeof(__stack_chk_guard);
405 if (sysctl(mib, 2, __stack_chk_guard, &len, NULL, 0) == -1 ||
406 len != sizeof(__stack_chk_guard)) {
407 /* If sysctl was unsuccessful, use the "terminator canary". */
408 ((unsigned char *)(void *)__stack_chk_guard)[0] = 0;
409 ((unsigned char *)(void *)__stack_chk_guard)[1] = 0;
410 ((unsigned char *)(void *)__stack_chk_guard)[2] = '\n';
411 ((unsigned char *)(void *)__stack_chk_guard)[3] = 255;
415 trust = !issetugid();
417 md_abi_variant_hook(aux_info);
419 ld_bind_now = getenv(_LD("BIND_NOW"));
422 * If the process is tainted, then we un-set the dangerous environment
423 * variables. The process will be marked as tainted until setuid(2)
424 * is called. If any child process calls setuid(2) we do not want any
425 * future processes to honor the potentially un-safe variables.
428 if (unsetenv(_LD("PRELOAD")) || unsetenv(_LD("LIBMAP")) ||
429 unsetenv(_LD("LIBRARY_PATH")) || unsetenv(_LD("LIBRARY_PATH_FDS")) ||
430 unsetenv(_LD("LIBMAP_DISABLE")) || unsetenv(_LD("BIND_NOT")) ||
431 unsetenv(_LD("DEBUG")) || unsetenv(_LD("ELF_HINTS_PATH")) ||
432 unsetenv(_LD("LOADFLTR")) || unsetenv(_LD("LIBRARY_PATH_RPATH"))) {
433 _rtld_error("environment corrupt; aborting");
437 ld_debug = getenv(_LD("DEBUG"));
438 if (ld_bind_now == NULL)
439 ld_bind_not = getenv(_LD("BIND_NOT")) != NULL;
440 libmap_disable = getenv(_LD("LIBMAP_DISABLE")) != NULL;
441 libmap_override = getenv(_LD("LIBMAP"));
442 ld_library_path = getenv(_LD("LIBRARY_PATH"));
443 ld_library_dirs = getenv(_LD("LIBRARY_PATH_FDS"));
444 ld_preload = getenv(_LD("PRELOAD"));
445 ld_elf_hints_path = getenv(_LD("ELF_HINTS_PATH"));
446 ld_loadfltr = getenv(_LD("LOADFLTR")) != NULL;
447 library_path_rpath = getenv(_LD("LIBRARY_PATH_RPATH"));
448 if (library_path_rpath != NULL) {
449 if (library_path_rpath[0] == 'y' ||
450 library_path_rpath[0] == 'Y' ||
451 library_path_rpath[0] == '1')
452 ld_library_path_rpath = true;
454 ld_library_path_rpath = false;
456 dangerous_ld_env = libmap_disable || (libmap_override != NULL) ||
457 (ld_library_path != NULL) || (ld_preload != NULL) ||
458 (ld_elf_hints_path != NULL) || ld_loadfltr;
459 ld_tracing = getenv(_LD("TRACE_LOADED_OBJECTS"));
460 ld_utrace = getenv(_LD("UTRACE"));
462 if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0)
463 ld_elf_hints_path = ld_elf_hints_default;
465 if (ld_debug != NULL && *ld_debug != '\0')
467 dbg("%s is initialized, base address = %p", __progname,
468 (caddr_t) aux_info[AT_BASE]->a_un.a_ptr);
469 dbg("RTLD dynamic = %p", obj_rtld.dynamic);
470 dbg("RTLD pltgot = %p", obj_rtld.pltgot);
472 dbg("initializing thread locks");
476 * Load the main program, or process its program header if it is
479 if (aux_info[AT_EXECFD] != NULL) { /* Load the main program. */
480 int fd = aux_info[AT_EXECFD]->a_un.a_val;
481 dbg("loading main program");
482 obj_main = map_object(fd, argv0, NULL);
484 if (obj_main == NULL)
486 max_stack_flags = obj->stack_flags;
487 } else { /* Main program already loaded. */
488 const Elf_Phdr *phdr;
492 dbg("processing main program's program header");
493 assert(aux_info[AT_PHDR] != NULL);
494 phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr;
495 assert(aux_info[AT_PHNUM] != NULL);
496 phnum = aux_info[AT_PHNUM]->a_un.a_val;
497 assert(aux_info[AT_PHENT] != NULL);
498 assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr));
499 assert(aux_info[AT_ENTRY] != NULL);
500 entry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr;
501 if ((obj_main = digest_phdr(phdr, phnum, entry, argv0)) == NULL)
505 if (aux_info[AT_EXECPATH] != NULL) {
507 char buf[MAXPATHLEN];
509 kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr;
510 dbg("AT_EXECPATH %p %s", kexecpath, kexecpath);
511 if (kexecpath[0] == '/')
512 obj_main->path = kexecpath;
513 else if (getcwd(buf, sizeof(buf)) == NULL ||
514 strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) ||
515 strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf))
516 obj_main->path = xstrdup(argv0);
518 obj_main->path = xstrdup(buf);
520 dbg("No AT_EXECPATH");
521 obj_main->path = xstrdup(argv0);
523 dbg("obj_main path %s", obj_main->path);
524 obj_main->mainprog = true;
526 if (aux_info[AT_STACKPROT] != NULL &&
527 aux_info[AT_STACKPROT]->a_un.a_val != 0)
528 stack_prot = aux_info[AT_STACKPROT]->a_un.a_val;
532 * Get the actual dynamic linker pathname from the executable if
533 * possible. (It should always be possible.) That ensures that
534 * gdb will find the right dynamic linker even if a non-standard
537 if (obj_main->interp != NULL &&
538 strcmp(obj_main->interp, obj_rtld.path) != 0) {
540 obj_rtld.path = xstrdup(obj_main->interp);
541 __progname = obj_rtld.path;
545 digest_dynamic(obj_main, 0);
546 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d",
547 obj_main->path, obj_main->valid_hash_sysv, obj_main->valid_hash_gnu,
548 obj_main->dynsymcount);
550 linkmap_add(obj_main);
551 linkmap_add(&obj_rtld);
553 /* Link the main program into the list of objects. */
554 TAILQ_INSERT_HEAD(&obj_list, obj_main, next);
558 /* Initialize a fake symbol for resolving undefined weak references. */
559 sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE);
560 sym_zero.st_shndx = SHN_UNDEF;
561 sym_zero.st_value = -(uintptr_t)obj_main->relocbase;
564 libmap_disable = (bool)lm_init(libmap_override);
566 dbg("loading LD_PRELOAD libraries");
567 if (load_preload_objects() == -1)
569 preload_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q));
571 dbg("loading needed objects");
572 if (load_needed_objects(obj_main, 0) == -1)
575 /* Make a list of all objects loaded at startup. */
576 last_interposer = obj_main;
577 TAILQ_FOREACH(obj, &obj_list, next) {
580 if (obj->z_interpose && obj != obj_main) {
581 objlist_put_after(&list_main, last_interposer, obj);
582 last_interposer = obj;
584 objlist_push_tail(&list_main, obj);
589 dbg("checking for required versions");
590 if (rtld_verify_versions(&list_main) == -1 && !ld_tracing)
593 if (ld_tracing) { /* We're done */
594 trace_loaded_objects(obj_main);
598 if (getenv(_LD("DUMP_REL_PRE")) != NULL) {
599 dump_relocations(obj_main);
604 * Processing tls relocations requires having the tls offsets
605 * initialized. Prepare offsets before starting initial
606 * relocation processing.
608 dbg("initializing initial thread local storage offsets");
609 STAILQ_FOREACH(entry, &list_main, link) {
611 * Allocate all the initial objects out of the static TLS
612 * block even if they didn't ask for it.
614 allocate_tls_offset(entry->obj);
617 if (relocate_objects(obj_main,
618 ld_bind_now != NULL && *ld_bind_now != '\0',
619 &obj_rtld, SYMLOOK_EARLY, NULL) == -1)
622 dbg("doing copy relocations");
623 if (do_copy_relocations(obj_main) == -1)
626 dbg("enforcing main obj relro");
627 if (obj_enforce_relro(obj_main) == -1)
630 if (getenv(_LD("DUMP_REL_POST")) != NULL) {
631 dump_relocations(obj_main);
636 * Setup TLS for main thread. This must be done after the
637 * relocations are processed, since tls initialization section
638 * might be the subject for relocations.
640 dbg("initializing initial thread local storage");
641 allocate_initial_tls(globallist_curr(TAILQ_FIRST(&obj_list)));
643 dbg("initializing key program variables");
644 set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : "");
645 set_program_var("environ", env);
646 set_program_var("__elf_aux_vector", aux);
648 /* Make a list of init functions to call. */
649 objlist_init(&initlist);
650 initlist_add_objects(globallist_curr(TAILQ_FIRST(&obj_list)),
651 preload_tail, &initlist);
653 r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */
655 map_stacks_exec(NULL);
658 dbg("resolving ifuncs");
659 if (resolve_objects_ifunc(obj_main,
660 ld_bind_now != NULL && *ld_bind_now != '\0', SYMLOOK_EARLY,
664 if (!obj_main->crt_no_init) {
666 * Make sure we don't call the main program's init and fini
667 * functions for binaries linked with old crt1 which calls
670 obj_main->init = obj_main->fini = (Elf_Addr)NULL;
671 obj_main->preinit_array = obj_main->init_array =
672 obj_main->fini_array = (Elf_Addr)NULL;
675 wlock_acquire(rtld_bind_lock, &lockstate);
676 if (obj_main->crt_no_init)
678 objlist_call_init(&initlist, &lockstate);
679 _r_debug_postinit(&obj_main->linkmap);
680 objlist_clear(&initlist);
681 dbg("loading filtees");
682 TAILQ_FOREACH(obj, &obj_list, next) {
685 if (ld_loadfltr || obj->z_loadfltr)
686 load_filtees(obj, 0, &lockstate);
688 lock_release(rtld_bind_lock, &lockstate);
690 dbg("transferring control to program entry point = %p", obj_main->entry);
692 /* Return the exit procedure and the program entry point. */
693 *exit_proc = rtld_exit;
695 return (func_ptr_type) obj_main->entry;
699 rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def)
704 ptr = (void *)make_function_pointer(def, obj);
705 target = call_ifunc_resolver(ptr);
706 return ((void *)target);
710 * NB: MIPS uses a private version of this function (_mips_rtld_bind).
711 * Changes to this function should be applied there as well.
714 _rtld_bind(Obj_Entry *obj, Elf_Size reloff)
718 const Obj_Entry *defobj;
721 RtldLockState lockstate;
723 rlock_acquire(rtld_bind_lock, &lockstate);
724 if (sigsetjmp(lockstate.env, 0) != 0)
725 lock_upgrade(rtld_bind_lock, &lockstate);
727 rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff);
729 rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff);
731 where = (Elf_Addr *) (obj->relocbase + rel->r_offset);
732 def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, SYMLOOK_IN_PLT,
736 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
737 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
739 target = (Elf_Addr)(defobj->relocbase + def->st_value);
741 dbg("\"%s\" in \"%s\" ==> %p in \"%s\"",
742 defobj->strtab + def->st_name, basename(obj->path),
743 (void *)target, basename(defobj->path));
746 * Write the new contents for the jmpslot. Note that depending on
747 * architecture, the value which we need to return back to the
748 * lazy binding trampoline may or may not be the target
749 * address. The value returned from reloc_jmpslot() is the value
750 * that the trampoline needs.
752 target = reloc_jmpslot(where, target, defobj, obj, rel);
753 lock_release(rtld_bind_lock, &lockstate);
758 * Error reporting function. Use it like printf. If formats the message
759 * into a buffer, and sets things up so that the next call to dlerror()
760 * will return the message.
763 _rtld_error(const char *fmt, ...)
765 static char buf[512];
769 rtld_vsnprintf(buf, sizeof buf, fmt, ap);
772 LD_UTRACE(UTRACE_RTLD_ERROR, NULL, NULL, 0, 0, error_message);
776 * Return a dynamically-allocated copy of the current error message, if any.
781 return error_message == NULL ? NULL : xstrdup(error_message);
785 * Restore the current error message from a copy which was previously saved
786 * by errmsg_save(). The copy is freed.
789 errmsg_restore(char *saved_msg)
791 if (saved_msg == NULL)
792 error_message = NULL;
794 _rtld_error("%s", saved_msg);
800 basename(const char *name)
802 const char *p = strrchr(name, '/');
803 return p != NULL ? p + 1 : name;
806 static struct utsname uts;
809 origin_subst_one(Obj_Entry *obj, char *real, const char *kw,
810 const char *subst, bool may_free)
812 char *p, *p1, *res, *resp;
813 int subst_len, kw_len, subst_count, old_len, new_len;
818 * First, count the number of the keyword occurrences, to
819 * preallocate the final string.
821 for (p = real, subst_count = 0;; p = p1 + kw_len, subst_count++) {
828 * If the keyword is not found, just return.
830 * Return non-substituted string if resolution failed. We
831 * cannot do anything more reasonable, the failure mode of the
832 * caller is unresolved library anyway.
834 if (subst_count == 0 || (obj != NULL && !obj_resolve_origin(obj)))
835 return (may_free ? real : xstrdup(real));
837 subst = obj->origin_path;
840 * There is indeed something to substitute. Calculate the
841 * length of the resulting string, and allocate it.
843 subst_len = strlen(subst);
844 old_len = strlen(real);
845 new_len = old_len + (subst_len - kw_len) * subst_count;
846 res = xmalloc(new_len + 1);
849 * Now, execute the substitution loop.
851 for (p = real, resp = res, *resp = '\0';;) {
854 /* Copy the prefix before keyword. */
855 memcpy(resp, p, p1 - p);
857 /* Keyword replacement. */
858 memcpy(resp, subst, subst_len);
866 /* Copy to the end of string and finish. */
874 origin_subst(Obj_Entry *obj, char *real)
876 char *res1, *res2, *res3, *res4;
878 if (obj == NULL || !trust)
879 return (xstrdup(real));
880 if (uts.sysname[0] == '\0') {
881 if (uname(&uts) != 0) {
882 _rtld_error("utsname failed: %d", errno);
886 res1 = origin_subst_one(obj, real, "$ORIGIN", NULL, false);
887 res2 = origin_subst_one(NULL, res1, "$OSNAME", uts.sysname, true);
888 res3 = origin_subst_one(NULL, res2, "$OSREL", uts.release, true);
889 res4 = origin_subst_one(NULL, res3, "$PLATFORM", uts.machine, true);
896 const char *msg = dlerror();
900 rtld_fdputstr(STDERR_FILENO, msg);
901 rtld_fdputchar(STDERR_FILENO, '\n');
906 * Process a shared object's DYNAMIC section, and save the important
907 * information in its Obj_Entry structure.
910 digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath,
911 const Elf_Dyn **dyn_soname, const Elf_Dyn **dyn_runpath)
914 Needed_Entry **needed_tail = &obj->needed;
915 Needed_Entry **needed_filtees_tail = &obj->needed_filtees;
916 Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees;
917 const Elf_Hashelt *hashtab;
918 const Elf32_Word *hashval;
919 Elf32_Word bkt, nmaskwords;
921 int plttype = DT_REL;
927 obj->bind_now = false;
928 for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) {
929 switch (dynp->d_tag) {
932 obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr);
936 obj->relsize = dynp->d_un.d_val;
940 assert(dynp->d_un.d_val == sizeof(Elf_Rel));
944 obj->pltrel = (const Elf_Rel *)
945 (obj->relocbase + dynp->d_un.d_ptr);
949 obj->pltrelsize = dynp->d_un.d_val;
953 obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr);
957 obj->relasize = dynp->d_un.d_val;
961 assert(dynp->d_un.d_val == sizeof(Elf_Rela));
965 plttype = dynp->d_un.d_val;
966 assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA);
970 obj->symtab = (const Elf_Sym *)
971 (obj->relocbase + dynp->d_un.d_ptr);
975 assert(dynp->d_un.d_val == sizeof(Elf_Sym));
979 obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr);
983 obj->strsize = dynp->d_un.d_val;
987 obj->verneed = (const Elf_Verneed *) (obj->relocbase +
992 obj->verneednum = dynp->d_un.d_val;
996 obj->verdef = (const Elf_Verdef *) (obj->relocbase +
1001 obj->verdefnum = dynp->d_un.d_val;
1005 obj->versyms = (const Elf_Versym *)(obj->relocbase +
1011 hashtab = (const Elf_Hashelt *)(obj->relocbase +
1013 obj->nbuckets = hashtab[0];
1014 obj->nchains = hashtab[1];
1015 obj->buckets = hashtab + 2;
1016 obj->chains = obj->buckets + obj->nbuckets;
1017 obj->valid_hash_sysv = obj->nbuckets > 0 && obj->nchains > 0 &&
1018 obj->buckets != NULL;
1024 hashtab = (const Elf_Hashelt *)(obj->relocbase +
1026 obj->nbuckets_gnu = hashtab[0];
1027 obj->symndx_gnu = hashtab[1];
1028 nmaskwords = hashtab[2];
1029 bloom_size32 = (__ELF_WORD_SIZE / 32) * nmaskwords;
1030 obj->maskwords_bm_gnu = nmaskwords - 1;
1031 obj->shift2_gnu = hashtab[3];
1032 obj->bloom_gnu = (Elf_Addr *) (hashtab + 4);
1033 obj->buckets_gnu = hashtab + 4 + bloom_size32;
1034 obj->chain_zero_gnu = obj->buckets_gnu + obj->nbuckets_gnu -
1036 /* Number of bitmask words is required to be power of 2 */
1037 obj->valid_hash_gnu = powerof2(nmaskwords) &&
1038 obj->nbuckets_gnu > 0 && obj->buckets_gnu != NULL;
1044 Needed_Entry *nep = NEW(Needed_Entry);
1045 nep->name = dynp->d_un.d_val;
1050 needed_tail = &nep->next;
1056 Needed_Entry *nep = NEW(Needed_Entry);
1057 nep->name = dynp->d_un.d_val;
1061 *needed_filtees_tail = nep;
1062 needed_filtees_tail = &nep->next;
1068 Needed_Entry *nep = NEW(Needed_Entry);
1069 nep->name = dynp->d_un.d_val;
1073 *needed_aux_filtees_tail = nep;
1074 needed_aux_filtees_tail = &nep->next;
1079 obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr);
1083 obj->textrel = true;
1087 obj->symbolic = true;
1092 * We have to wait until later to process this, because we
1093 * might not have gotten the address of the string table yet.
1103 *dyn_runpath = dynp;
1107 obj->init = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
1110 case DT_PREINIT_ARRAY:
1111 obj->preinit_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1114 case DT_PREINIT_ARRAYSZ:
1115 obj->preinit_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1119 obj->init_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1122 case DT_INIT_ARRAYSZ:
1123 obj->init_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1127 obj->fini = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
1131 obj->fini_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1134 case DT_FINI_ARRAYSZ:
1135 obj->fini_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1139 * Don't process DT_DEBUG on MIPS as the dynamic section
1140 * is mapped read-only. DT_MIPS_RLD_MAP is used instead.
1146 dbg("Filling in DT_DEBUG entry");
1147 ((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug;
1152 if (dynp->d_un.d_val & DF_ORIGIN)
1153 obj->z_origin = true;
1154 if (dynp->d_un.d_val & DF_SYMBOLIC)
1155 obj->symbolic = true;
1156 if (dynp->d_un.d_val & DF_TEXTREL)
1157 obj->textrel = true;
1158 if (dynp->d_un.d_val & DF_BIND_NOW)
1159 obj->bind_now = true;
1160 /*if (dynp->d_un.d_val & DF_STATIC_TLS)
1164 case DT_MIPS_LOCAL_GOTNO:
1165 obj->local_gotno = dynp->d_un.d_val;
1168 case DT_MIPS_SYMTABNO:
1169 obj->symtabno = dynp->d_un.d_val;
1172 case DT_MIPS_GOTSYM:
1173 obj->gotsym = dynp->d_un.d_val;
1176 case DT_MIPS_RLD_MAP:
1177 *((Elf_Addr *)(dynp->d_un.d_ptr)) = (Elf_Addr) &r_debug;
1181 #ifdef __powerpc64__
1182 case DT_PPC64_GLINK:
1183 obj->glink = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
1188 if (dynp->d_un.d_val & DF_1_NOOPEN)
1189 obj->z_noopen = true;
1190 if (dynp->d_un.d_val & DF_1_ORIGIN)
1191 obj->z_origin = true;
1192 if (dynp->d_un.d_val & DF_1_GLOBAL)
1193 obj->z_global = true;
1194 if (dynp->d_un.d_val & DF_1_BIND_NOW)
1195 obj->bind_now = true;
1196 if (dynp->d_un.d_val & DF_1_NODELETE)
1197 obj->z_nodelete = true;
1198 if (dynp->d_un.d_val & DF_1_LOADFLTR)
1199 obj->z_loadfltr = true;
1200 if (dynp->d_un.d_val & DF_1_INTERPOSE)
1201 obj->z_interpose = true;
1202 if (dynp->d_un.d_val & DF_1_NODEFLIB)
1203 obj->z_nodeflib = true;
1208 dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag,
1215 obj->traced = false;
1217 if (plttype == DT_RELA) {
1218 obj->pltrela = (const Elf_Rela *) obj->pltrel;
1220 obj->pltrelasize = obj->pltrelsize;
1221 obj->pltrelsize = 0;
1224 /* Determine size of dynsym table (equal to nchains of sysv hash) */
1225 if (obj->valid_hash_sysv)
1226 obj->dynsymcount = obj->nchains;
1227 else if (obj->valid_hash_gnu) {
1228 obj->dynsymcount = 0;
1229 for (bkt = 0; bkt < obj->nbuckets_gnu; bkt++) {
1230 if (obj->buckets_gnu[bkt] == 0)
1232 hashval = &obj->chain_zero_gnu[obj->buckets_gnu[bkt]];
1235 while ((*hashval++ & 1u) == 0);
1237 obj->dynsymcount += obj->symndx_gnu;
1242 obj_resolve_origin(Obj_Entry *obj)
1245 if (obj->origin_path != NULL)
1247 obj->origin_path = xmalloc(PATH_MAX);
1248 return (rtld_dirname_abs(obj->path, obj->origin_path) != -1);
1252 digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath,
1253 const Elf_Dyn *dyn_soname, const Elf_Dyn *dyn_runpath)
1256 if (obj->z_origin && !obj_resolve_origin(obj))
1259 if (dyn_runpath != NULL) {
1260 obj->runpath = (char *)obj->strtab + dyn_runpath->d_un.d_val;
1261 obj->runpath = origin_subst(obj, obj->runpath);
1262 } else if (dyn_rpath != NULL) {
1263 obj->rpath = (char *)obj->strtab + dyn_rpath->d_un.d_val;
1264 obj->rpath = origin_subst(obj, obj->rpath);
1266 if (dyn_soname != NULL)
1267 object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val);
1271 digest_dynamic(Obj_Entry *obj, int early)
1273 const Elf_Dyn *dyn_rpath;
1274 const Elf_Dyn *dyn_soname;
1275 const Elf_Dyn *dyn_runpath;
1277 digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname, &dyn_runpath);
1278 digest_dynamic2(obj, dyn_rpath, dyn_soname, dyn_runpath);
1282 * Process a shared object's program header. This is used only for the
1283 * main program, when the kernel has already loaded the main program
1284 * into memory before calling the dynamic linker. It creates and
1285 * returns an Obj_Entry structure.
1288 digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path)
1291 const Elf_Phdr *phlimit = phdr + phnum;
1293 Elf_Addr note_start, note_end;
1297 for (ph = phdr; ph < phlimit; ph++) {
1298 if (ph->p_type != PT_PHDR)
1302 obj->phsize = ph->p_memsz;
1303 obj->relocbase = (caddr_t)phdr - ph->p_vaddr;
1307 obj->stack_flags = PF_X | PF_R | PF_W;
1309 for (ph = phdr; ph < phlimit; ph++) {
1310 switch (ph->p_type) {
1313 obj->interp = (const char *)(ph->p_vaddr + obj->relocbase);
1317 if (nsegs == 0) { /* First load segment */
1318 obj->vaddrbase = trunc_page(ph->p_vaddr);
1319 obj->mapbase = obj->vaddrbase + obj->relocbase;
1320 obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) -
1322 } else { /* Last load segment */
1323 obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) -
1330 obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase);
1335 obj->tlssize = ph->p_memsz;
1336 obj->tlsalign = ph->p_align;
1337 obj->tlsinitsize = ph->p_filesz;
1338 obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase);
1342 obj->stack_flags = ph->p_flags;
1346 obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr);
1347 obj->relro_size = round_page(ph->p_memsz);
1351 note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr;
1352 note_end = note_start + ph->p_filesz;
1353 digest_notes(obj, note_start, note_end);
1358 _rtld_error("%s: too few PT_LOAD segments", path);
1367 digest_notes(Obj_Entry *obj, Elf_Addr note_start, Elf_Addr note_end)
1369 const Elf_Note *note;
1370 const char *note_name;
1373 for (note = (const Elf_Note *)note_start; (Elf_Addr)note < note_end;
1374 note = (const Elf_Note *)((const char *)(note + 1) +
1375 roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1376 roundup2(note->n_descsz, sizeof(Elf32_Addr)))) {
1377 if (note->n_namesz != sizeof(NOTE_FREEBSD_VENDOR) ||
1378 note->n_descsz != sizeof(int32_t))
1380 if (note->n_type != NT_FREEBSD_ABI_TAG &&
1381 note->n_type != NT_FREEBSD_NOINIT_TAG)
1383 note_name = (const char *)(note + 1);
1384 if (strncmp(NOTE_FREEBSD_VENDOR, note_name,
1385 sizeof(NOTE_FREEBSD_VENDOR)) != 0)
1387 switch (note->n_type) {
1388 case NT_FREEBSD_ABI_TAG:
1389 /* FreeBSD osrel note */
1390 p = (uintptr_t)(note + 1);
1391 p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
1392 obj->osrel = *(const int32_t *)(p);
1393 dbg("note osrel %d", obj->osrel);
1395 case NT_FREEBSD_NOINIT_TAG:
1396 /* FreeBSD 'crt does not call init' note */
1397 obj->crt_no_init = true;
1398 dbg("note crt_no_init");
1405 dlcheck(void *handle)
1409 TAILQ_FOREACH(obj, &obj_list, next) {
1410 if (obj == (Obj_Entry *) handle)
1414 if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) {
1415 _rtld_error("Invalid shared object handle %p", handle);
1422 * If the given object is already in the donelist, return true. Otherwise
1423 * add the object to the list and return false.
1426 donelist_check(DoneList *dlp, const Obj_Entry *obj)
1430 for (i = 0; i < dlp->num_used; i++)
1431 if (dlp->objs[i] == obj)
1434 * Our donelist allocation should always be sufficient. But if
1435 * our threads locking isn't working properly, more shared objects
1436 * could have been loaded since we allocated the list. That should
1437 * never happen, but we'll handle it properly just in case it does.
1439 if (dlp->num_used < dlp->num_alloc)
1440 dlp->objs[dlp->num_used++] = obj;
1445 * Hash function for symbol table lookup. Don't even think about changing
1446 * this. It is specified by the System V ABI.
1449 elf_hash(const char *name)
1451 const unsigned char *p = (const unsigned char *) name;
1452 unsigned long h = 0;
1455 while (*p != '\0') {
1456 h = (h << 4) + *p++;
1457 if ((g = h & 0xf0000000) != 0)
1465 * The GNU hash function is the Daniel J. Bernstein hash clipped to 32 bits
1466 * unsigned in case it's implemented with a wider type.
1469 gnu_hash(const char *s)
1475 for (c = *s; c != '\0'; c = *++s)
1477 return (h & 0xffffffff);
1482 * Find the library with the given name, and return its full pathname.
1483 * The returned string is dynamically allocated. Generates an error
1484 * message and returns NULL if the library cannot be found.
1486 * If the second argument is non-NULL, then it refers to an already-
1487 * loaded shared object, whose library search path will be searched.
1489 * If a library is successfully located via LD_LIBRARY_PATH_FDS, its
1490 * descriptor (which is close-on-exec) will be passed out via the third
1493 * The search order is:
1494 * DT_RPATH in the referencing file _unless_ DT_RUNPATH is present (1)
1495 * DT_RPATH of the main object if DSO without defined DT_RUNPATH (1)
1497 * DT_RUNPATH in the referencing file
1498 * ldconfig hints (if -z nodefaultlib, filter out default library directories
1500 * /lib:/usr/lib _unless_ the referencing file is linked with -z nodefaultlib
1502 * (1) Handled in digest_dynamic2 - rpath left NULL if runpath defined.
1505 find_library(const char *xname, const Obj_Entry *refobj, int *fdp)
1509 bool nodeflib, objgiven;
1511 objgiven = refobj != NULL;
1512 if (strchr(xname, '/') != NULL) { /* Hard coded pathname */
1513 if (xname[0] != '/' && !trust) {
1514 _rtld_error("Absolute pathname required for shared object \"%s\"",
1518 return (origin_subst(__DECONST(Obj_Entry *, refobj),
1519 __DECONST(char *, xname)));
1522 if (libmap_disable || !objgiven ||
1523 (name = lm_find(refobj->path, xname)) == NULL)
1524 name = (char *)xname;
1526 dbg(" Searching for \"%s\"", name);
1529 * If refobj->rpath != NULL, then refobj->runpath is NULL. Fall
1530 * back to pre-conforming behaviour if user requested so with
1531 * LD_LIBRARY_PATH_RPATH environment variable and ignore -z
1534 if (objgiven && refobj->rpath != NULL && ld_library_path_rpath) {
1535 if ((pathname = search_library_path(name, ld_library_path)) != NULL ||
1537 (pathname = search_library_path(name, refobj->rpath)) != NULL) ||
1538 (pathname = search_library_pathfds(name, ld_library_dirs, fdp)) != NULL ||
1539 (pathname = search_library_path(name, gethints(false))) != NULL ||
1540 (pathname = search_library_path(name, ld_standard_library_path)) != NULL)
1543 nodeflib = objgiven ? refobj->z_nodeflib : false;
1545 (pathname = search_library_path(name, refobj->rpath)) != NULL) ||
1546 (objgiven && refobj->runpath == NULL && refobj != obj_main &&
1547 (pathname = search_library_path(name, obj_main->rpath)) != NULL) ||
1548 (pathname = search_library_path(name, ld_library_path)) != NULL ||
1550 (pathname = search_library_path(name, refobj->runpath)) != NULL) ||
1551 (pathname = search_library_pathfds(name, ld_library_dirs, fdp)) != NULL ||
1552 (pathname = search_library_path(name, gethints(nodeflib))) != NULL ||
1553 (objgiven && !nodeflib &&
1554 (pathname = search_library_path(name, ld_standard_library_path)) != NULL))
1558 if (objgiven && refobj->path != NULL) {
1559 _rtld_error("Shared object \"%s\" not found, required by \"%s\"",
1560 name, basename(refobj->path));
1562 _rtld_error("Shared object \"%s\" not found", name);
1568 * Given a symbol number in a referencing object, find the corresponding
1569 * definition of the symbol. Returns a pointer to the symbol, or NULL if
1570 * no definition was found. Returns a pointer to the Obj_Entry of the
1571 * defining object via the reference parameter DEFOBJ_OUT.
1574 find_symdef(unsigned long symnum, const Obj_Entry *refobj,
1575 const Obj_Entry **defobj_out, int flags, SymCache *cache,
1576 RtldLockState *lockstate)
1580 const Obj_Entry *defobj;
1586 * If we have already found this symbol, get the information from
1589 if (symnum >= refobj->dynsymcount)
1590 return NULL; /* Bad object */
1591 if (cache != NULL && cache[symnum].sym != NULL) {
1592 *defobj_out = cache[symnum].obj;
1593 return cache[symnum].sym;
1596 ref = refobj->symtab + symnum;
1597 name = refobj->strtab + ref->st_name;
1602 * We don't have to do a full scale lookup if the symbol is local.
1603 * We know it will bind to the instance in this load module; to
1604 * which we already have a pointer (ie ref). By not doing a lookup,
1605 * we not only improve performance, but it also avoids unresolvable
1606 * symbols when local symbols are not in the hash table. This has
1607 * been seen with the ia64 toolchain.
1609 if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) {
1610 if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) {
1611 _rtld_error("%s: Bogus symbol table entry %lu", refobj->path,
1614 symlook_init(&req, name);
1616 req.ventry = fetch_ventry(refobj, symnum);
1617 req.lockstate = lockstate;
1618 res = symlook_default(&req, refobj);
1621 defobj = req.defobj_out;
1629 * If we found no definition and the reference is weak, treat the
1630 * symbol as having the value zero.
1632 if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) {
1638 *defobj_out = defobj;
1639 /* Record the information in the cache to avoid subsequent lookups. */
1640 if (cache != NULL) {
1641 cache[symnum].sym = def;
1642 cache[symnum].obj = defobj;
1645 if (refobj != &obj_rtld)
1646 _rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name);
1652 * Return the search path from the ldconfig hints file, reading it if
1653 * necessary. If nostdlib is true, then the default search paths are
1654 * not added to result.
1656 * Returns NULL if there are problems with the hints file,
1657 * or if the search path there is empty.
1660 gethints(bool nostdlib)
1662 static char *hints, *filtered_path;
1663 static struct elfhints_hdr hdr;
1664 struct fill_search_info_args sargs, hargs;
1665 struct dl_serinfo smeta, hmeta, *SLPinfo, *hintinfo;
1666 struct dl_serpath *SLPpath, *hintpath;
1668 struct stat hint_stat;
1669 unsigned int SLPndx, hintndx, fndx, fcount;
1675 /* First call, read the hints file */
1676 if (hints == NULL) {
1677 /* Keep from trying again in case the hints file is bad. */
1680 if ((fd = open(ld_elf_hints_path, O_RDONLY | O_CLOEXEC)) == -1)
1684 * Check of hdr.dirlistlen value against type limit
1685 * intends to pacify static analyzers. Further
1686 * paranoia leads to checks that dirlist is fully
1687 * contained in the file range.
1689 if (read(fd, &hdr, sizeof hdr) != sizeof hdr ||
1690 hdr.magic != ELFHINTS_MAGIC ||
1691 hdr.version != 1 || hdr.dirlistlen > UINT_MAX / 2 ||
1692 fstat(fd, &hint_stat) == -1) {
1699 if (dl + hdr.dirlist < dl)
1702 if (dl + hdr.dirlistlen < dl)
1704 dl += hdr.dirlistlen;
1705 if (dl > hint_stat.st_size)
1707 p = xmalloc(hdr.dirlistlen + 1);
1709 if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 ||
1710 read(fd, p, hdr.dirlistlen + 1) !=
1711 (ssize_t)hdr.dirlistlen + 1 || p[hdr.dirlistlen] != '\0') {
1720 * If caller agreed to receive list which includes the default
1721 * paths, we are done. Otherwise, if we still did not
1722 * calculated filtered result, do it now.
1725 return (hints[0] != '\0' ? hints : NULL);
1726 if (filtered_path != NULL)
1730 * Obtain the list of all configured search paths, and the
1731 * list of the default paths.
1733 * First estimate the size of the results.
1735 smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
1737 hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
1740 sargs.request = RTLD_DI_SERINFOSIZE;
1741 sargs.serinfo = &smeta;
1742 hargs.request = RTLD_DI_SERINFOSIZE;
1743 hargs.serinfo = &hmeta;
1745 path_enumerate(ld_standard_library_path, fill_search_info, &sargs);
1746 path_enumerate(hints, fill_search_info, &hargs);
1748 SLPinfo = xmalloc(smeta.dls_size);
1749 hintinfo = xmalloc(hmeta.dls_size);
1752 * Next fetch both sets of paths.
1754 sargs.request = RTLD_DI_SERINFO;
1755 sargs.serinfo = SLPinfo;
1756 sargs.serpath = &SLPinfo->dls_serpath[0];
1757 sargs.strspace = (char *)&SLPinfo->dls_serpath[smeta.dls_cnt];
1759 hargs.request = RTLD_DI_SERINFO;
1760 hargs.serinfo = hintinfo;
1761 hargs.serpath = &hintinfo->dls_serpath[0];
1762 hargs.strspace = (char *)&hintinfo->dls_serpath[hmeta.dls_cnt];
1764 path_enumerate(ld_standard_library_path, fill_search_info, &sargs);
1765 path_enumerate(hints, fill_search_info, &hargs);
1768 * Now calculate the difference between two sets, by excluding
1769 * standard paths from the full set.
1773 filtered_path = xmalloc(hdr.dirlistlen + 1);
1774 hintpath = &hintinfo->dls_serpath[0];
1775 for (hintndx = 0; hintndx < hmeta.dls_cnt; hintndx++, hintpath++) {
1777 SLPpath = &SLPinfo->dls_serpath[0];
1779 * Check each standard path against current.
1781 for (SLPndx = 0; SLPndx < smeta.dls_cnt; SLPndx++, SLPpath++) {
1782 /* matched, skip the path */
1783 if (!strcmp(hintpath->dls_name, SLPpath->dls_name)) {
1791 * Not matched against any standard path, add the path
1792 * to result. Separate consequtive paths with ':'.
1795 filtered_path[fndx] = ':';
1799 flen = strlen(hintpath->dls_name);
1800 strncpy((filtered_path + fndx), hintpath->dls_name, flen);
1803 filtered_path[fndx] = '\0';
1809 return (filtered_path[0] != '\0' ? filtered_path : NULL);
1813 init_dag(Obj_Entry *root)
1815 const Needed_Entry *needed;
1816 const Objlist_Entry *elm;
1819 if (root->dag_inited)
1821 donelist_init(&donelist);
1823 /* Root object belongs to own DAG. */
1824 objlist_push_tail(&root->dldags, root);
1825 objlist_push_tail(&root->dagmembers, root);
1826 donelist_check(&donelist, root);
1829 * Add dependencies of root object to DAG in breadth order
1830 * by exploiting the fact that each new object get added
1831 * to the tail of the dagmembers list.
1833 STAILQ_FOREACH(elm, &root->dagmembers, link) {
1834 for (needed = elm->obj->needed; needed != NULL; needed = needed->next) {
1835 if (needed->obj == NULL || donelist_check(&donelist, needed->obj))
1837 objlist_push_tail(&needed->obj->dldags, root);
1838 objlist_push_tail(&root->dagmembers, needed->obj);
1841 root->dag_inited = true;
1845 init_marker(Obj_Entry *marker)
1848 bzero(marker, sizeof(*marker));
1849 marker->marker = true;
1853 globallist_curr(const Obj_Entry *obj)
1860 return (__DECONST(Obj_Entry *, obj));
1861 obj = TAILQ_PREV(obj, obj_entry_q, next);
1866 globallist_next(const Obj_Entry *obj)
1870 obj = TAILQ_NEXT(obj, next);
1874 return (__DECONST(Obj_Entry *, obj));
1878 /* Prevent the object from being unmapped while the bind lock is dropped. */
1880 hold_object(Obj_Entry *obj)
1887 unhold_object(Obj_Entry *obj)
1890 assert(obj->holdcount > 0);
1891 if (--obj->holdcount == 0 && obj->unholdfree)
1892 release_object(obj);
1896 process_z(Obj_Entry *root)
1898 const Objlist_Entry *elm;
1902 * Walk over object DAG and process every dependent object
1903 * that is marked as DF_1_NODELETE or DF_1_GLOBAL. They need
1904 * to grow their own DAG.
1906 * For DF_1_GLOBAL, DAG is required for symbol lookups in
1907 * symlook_global() to work.
1909 * For DF_1_NODELETE, the DAG should have its reference upped.
1911 STAILQ_FOREACH(elm, &root->dagmembers, link) {
1915 if (obj->z_nodelete && !obj->ref_nodel) {
1916 dbg("obj %s -z nodelete", obj->path);
1919 obj->ref_nodel = true;
1921 if (obj->z_global && objlist_find(&list_global, obj) == NULL) {
1922 dbg("obj %s -z global", obj->path);
1923 objlist_push_tail(&list_global, obj);
1929 * Initialize the dynamic linker. The argument is the address at which
1930 * the dynamic linker has been mapped into memory. The primary task of
1931 * this function is to relocate the dynamic linker.
1934 init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info)
1936 Obj_Entry objtmp; /* Temporary rtld object */
1937 const Elf_Ehdr *ehdr;
1938 const Elf_Dyn *dyn_rpath;
1939 const Elf_Dyn *dyn_soname;
1940 const Elf_Dyn *dyn_runpath;
1942 #ifdef RTLD_INIT_PAGESIZES_EARLY
1943 /* The page size is required by the dynamic memory allocator. */
1944 init_pagesizes(aux_info);
1948 * Conjure up an Obj_Entry structure for the dynamic linker.
1950 * The "path" member can't be initialized yet because string constants
1951 * cannot yet be accessed. Below we will set it correctly.
1953 memset(&objtmp, 0, sizeof(objtmp));
1956 objtmp.mapbase = mapbase;
1958 objtmp.relocbase = mapbase;
1961 objtmp.dynamic = rtld_dynamic(&objtmp);
1962 digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname, &dyn_runpath);
1963 assert(objtmp.needed == NULL);
1964 #if !defined(__mips__)
1965 /* MIPS has a bogus DT_TEXTREL. */
1966 assert(!objtmp.textrel);
1969 * Temporarily put the dynamic linker entry into the object list, so
1970 * that symbols can be found.
1972 relocate_objects(&objtmp, true, &objtmp, 0, NULL);
1974 ehdr = (Elf_Ehdr *)mapbase;
1975 objtmp.phdr = (Elf_Phdr *)((char *)mapbase + ehdr->e_phoff);
1976 objtmp.phsize = ehdr->e_phnum * sizeof(objtmp.phdr[0]);
1978 /* Initialize the object list. */
1979 TAILQ_INIT(&obj_list);
1981 /* Now that non-local variables can be accesses, copy out obj_rtld. */
1982 memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld));
1984 #ifndef RTLD_INIT_PAGESIZES_EARLY
1985 /* The page size is required by the dynamic memory allocator. */
1986 init_pagesizes(aux_info);
1989 if (aux_info[AT_OSRELDATE] != NULL)
1990 osreldate = aux_info[AT_OSRELDATE]->a_un.a_val;
1992 digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname, dyn_runpath);
1994 /* Replace the path with a dynamically allocated copy. */
1995 obj_rtld.path = xstrdup(ld_path_rtld);
1997 r_debug.r_brk = r_debug_state;
1998 r_debug.r_state = RT_CONSISTENT;
2002 * Retrieve the array of supported page sizes. The kernel provides the page
2003 * sizes in increasing order.
2006 init_pagesizes(Elf_Auxinfo **aux_info)
2008 static size_t psa[MAXPAGESIZES];
2012 if (aux_info[AT_PAGESIZES] != NULL && aux_info[AT_PAGESIZESLEN] !=
2014 size = aux_info[AT_PAGESIZESLEN]->a_un.a_val;
2015 pagesizes = aux_info[AT_PAGESIZES]->a_un.a_ptr;
2018 if (sysctlnametomib("hw.pagesizes", mib, &len) == 0)
2021 /* As a fallback, retrieve the base page size. */
2022 size = sizeof(psa[0]);
2023 if (aux_info[AT_PAGESZ] != NULL) {
2024 psa[0] = aux_info[AT_PAGESZ]->a_un.a_val;
2028 mib[1] = HW_PAGESIZE;
2032 if (sysctl(mib, len, psa, &size, NULL, 0) == -1) {
2033 _rtld_error("sysctl for hw.pagesize(s) failed");
2039 npagesizes = size / sizeof(pagesizes[0]);
2040 /* Discard any invalid entries at the end of the array. */
2041 while (npagesizes > 0 && pagesizes[npagesizes - 1] == 0)
2046 * Add the init functions from a needed object list (and its recursive
2047 * needed objects) to "list". This is not used directly; it is a helper
2048 * function for initlist_add_objects(). The write lock must be held
2049 * when this function is called.
2052 initlist_add_neededs(Needed_Entry *needed, Objlist *list)
2054 /* Recursively process the successor needed objects. */
2055 if (needed->next != NULL)
2056 initlist_add_neededs(needed->next, list);
2058 /* Process the current needed object. */
2059 if (needed->obj != NULL)
2060 initlist_add_objects(needed->obj, needed->obj, list);
2064 * Scan all of the DAGs rooted in the range of objects from "obj" to
2065 * "tail" and add their init functions to "list". This recurses over
2066 * the DAGs and ensure the proper init ordering such that each object's
2067 * needed libraries are initialized before the object itself. At the
2068 * same time, this function adds the objects to the global finalization
2069 * list "list_fini" in the opposite order. The write lock must be
2070 * held when this function is called.
2073 initlist_add_objects(Obj_Entry *obj, Obj_Entry *tail, Objlist *list)
2077 if (obj->init_scanned || obj->init_done)
2079 obj->init_scanned = true;
2081 /* Recursively process the successor objects. */
2082 nobj = globallist_next(obj);
2083 if (nobj != NULL && obj != tail)
2084 initlist_add_objects(nobj, tail, list);
2086 /* Recursively process the needed objects. */
2087 if (obj->needed != NULL)
2088 initlist_add_neededs(obj->needed, list);
2089 if (obj->needed_filtees != NULL)
2090 initlist_add_neededs(obj->needed_filtees, list);
2091 if (obj->needed_aux_filtees != NULL)
2092 initlist_add_neededs(obj->needed_aux_filtees, list);
2094 /* Add the object to the init list. */
2095 if (obj->preinit_array != (Elf_Addr)NULL || obj->init != (Elf_Addr)NULL ||
2096 obj->init_array != (Elf_Addr)NULL)
2097 objlist_push_tail(list, obj);
2099 /* Add the object to the global fini list in the reverse order. */
2100 if ((obj->fini != (Elf_Addr)NULL || obj->fini_array != (Elf_Addr)NULL)
2101 && !obj->on_fini_list) {
2102 objlist_push_head(&list_fini, obj);
2103 obj->on_fini_list = true;
2108 #define FPTR_TARGET(f) ((Elf_Addr) (f))
2112 free_needed_filtees(Needed_Entry *n, RtldLockState *lockstate)
2114 Needed_Entry *needed, *needed1;
2116 for (needed = n; needed != NULL; needed = needed->next) {
2117 if (needed->obj != NULL) {
2118 dlclose_locked(needed->obj, lockstate);
2122 for (needed = n; needed != NULL; needed = needed1) {
2123 needed1 = needed->next;
2129 unload_filtees(Obj_Entry *obj, RtldLockState *lockstate)
2132 free_needed_filtees(obj->needed_filtees, lockstate);
2133 obj->needed_filtees = NULL;
2134 free_needed_filtees(obj->needed_aux_filtees, lockstate);
2135 obj->needed_aux_filtees = NULL;
2136 obj->filtees_loaded = false;
2140 load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags,
2141 RtldLockState *lockstate)
2144 for (; needed != NULL; needed = needed->next) {
2145 needed->obj = dlopen_object(obj->strtab + needed->name, -1, obj,
2146 flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) |
2147 RTLD_LOCAL, lockstate);
2152 load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate)
2155 lock_restart_for_upgrade(lockstate);
2156 if (!obj->filtees_loaded) {
2157 load_filtee1(obj, obj->needed_filtees, flags, lockstate);
2158 load_filtee1(obj, obj->needed_aux_filtees, flags, lockstate);
2159 obj->filtees_loaded = true;
2164 process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags)
2168 for (; needed != NULL; needed = needed->next) {
2169 obj1 = needed->obj = load_object(obj->strtab + needed->name, -1, obj,
2170 flags & ~RTLD_LO_NOLOAD);
2171 if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0)
2178 * Given a shared object, traverse its list of needed objects, and load
2179 * each of them. Returns 0 on success. Generates an error message and
2180 * returns -1 on failure.
2183 load_needed_objects(Obj_Entry *first, int flags)
2187 for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
2190 if (process_needed(obj, obj->needed, flags) == -1)
2197 load_preload_objects(void)
2199 char *p = ld_preload;
2201 static const char delim[] = " \t:;";
2206 p += strspn(p, delim);
2207 while (*p != '\0') {
2208 size_t len = strcspn(p, delim);
2213 obj = load_object(p, -1, NULL, 0);
2215 return -1; /* XXX - cleanup */
2216 obj->z_interpose = true;
2219 p += strspn(p, delim);
2221 LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL);
2226 printable_path(const char *path)
2229 return (path == NULL ? "<unknown>" : path);
2233 * Load a shared object into memory, if it is not already loaded. The
2234 * object may be specified by name or by user-supplied file descriptor
2235 * fd_u. In the later case, the fd_u descriptor is not closed, but its
2238 * Returns a pointer to the Obj_Entry for the object. Returns NULL
2242 load_object(const char *name, int fd_u, const Obj_Entry *refobj, int flags)
2251 TAILQ_FOREACH(obj, &obj_list, next) {
2252 if (obj->marker || obj->doomed)
2254 if (object_match_name(obj, name))
2258 path = find_library(name, refobj, &fd);
2266 * search_library_pathfds() opens a fresh file descriptor for the
2267 * library, so there is no need to dup().
2269 } else if (fd_u == -1) {
2271 * If we didn't find a match by pathname, or the name is not
2272 * supplied, open the file and check again by device and inode.
2273 * This avoids false mismatches caused by multiple links or ".."
2276 * To avoid a race, we open the file and use fstat() rather than
2279 if ((fd = open(path, O_RDONLY | O_CLOEXEC | O_VERIFY)) == -1) {
2280 _rtld_error("Cannot open \"%s\"", path);
2285 fd = fcntl(fd_u, F_DUPFD_CLOEXEC, 0);
2287 _rtld_error("Cannot dup fd");
2292 if (fstat(fd, &sb) == -1) {
2293 _rtld_error("Cannot fstat \"%s\"", printable_path(path));
2298 TAILQ_FOREACH(obj, &obj_list, next) {
2299 if (obj->marker || obj->doomed)
2301 if (obj->ino == sb.st_ino && obj->dev == sb.st_dev)
2304 if (obj != NULL && name != NULL) {
2305 object_add_name(obj, name);
2310 if (flags & RTLD_LO_NOLOAD) {
2316 /* First use of this object, so we must map it in */
2317 obj = do_load_object(fd, name, path, &sb, flags);
2326 do_load_object(int fd, const char *name, char *path, struct stat *sbp,
2333 * but first, make sure that environment variables haven't been
2334 * used to circumvent the noexec flag on a filesystem.
2336 if (dangerous_ld_env) {
2337 if (fstatfs(fd, &fs) != 0) {
2338 _rtld_error("Cannot fstatfs \"%s\"", printable_path(path));
2341 if (fs.f_flags & MNT_NOEXEC) {
2342 _rtld_error("Cannot execute objects on %s\n", fs.f_mntonname);
2346 dbg("loading \"%s\"", printable_path(path));
2347 obj = map_object(fd, printable_path(path), sbp);
2352 * If DT_SONAME is present in the object, digest_dynamic2 already
2353 * added it to the object names.
2356 object_add_name(obj, name);
2358 digest_dynamic(obj, 0);
2359 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", obj->path,
2360 obj->valid_hash_sysv, obj->valid_hash_gnu, obj->dynsymcount);
2361 if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) ==
2363 dbg("refusing to load non-loadable \"%s\"", obj->path);
2364 _rtld_error("Cannot dlopen non-loadable %s", obj->path);
2365 munmap(obj->mapbase, obj->mapsize);
2370 obj->dlopened = (flags & RTLD_LO_DLOPEN) != 0;
2371 TAILQ_INSERT_TAIL(&obj_list, obj, next);
2374 linkmap_add(obj); /* for GDB & dlinfo() */
2375 max_stack_flags |= obj->stack_flags;
2377 dbg(" %p .. %p: %s", obj->mapbase,
2378 obj->mapbase + obj->mapsize - 1, obj->path);
2380 dbg(" WARNING: %s has impure text", obj->path);
2381 LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
2388 obj_from_addr(const void *addr)
2392 TAILQ_FOREACH(obj, &obj_list, next) {
2395 if (addr < (void *) obj->mapbase)
2397 if (addr < (void *) (obj->mapbase + obj->mapsize))
2406 Elf_Addr *preinit_addr;
2409 preinit_addr = (Elf_Addr *)obj_main->preinit_array;
2410 if (preinit_addr == NULL)
2413 for (index = 0; index < obj_main->preinit_array_num; index++) {
2414 if (preinit_addr[index] != 0 && preinit_addr[index] != 1) {
2415 dbg("calling preinit function for %s at %p", obj_main->path,
2416 (void *)preinit_addr[index]);
2417 LD_UTRACE(UTRACE_INIT_CALL, obj_main, (void *)preinit_addr[index],
2418 0, 0, obj_main->path);
2419 call_init_pointer(obj_main, preinit_addr[index]);
2425 * Call the finalization functions for each of the objects in "list"
2426 * belonging to the DAG of "root" and referenced once. If NULL "root"
2427 * is specified, every finalization function will be called regardless
2428 * of the reference count and the list elements won't be freed. All of
2429 * the objects are expected to have non-NULL fini functions.
2432 objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate)
2436 Elf_Addr *fini_addr;
2439 assert(root == NULL || root->refcount == 1);
2442 root->doomed = true;
2445 * Preserve the current error message since a fini function might
2446 * call into the dynamic linker and overwrite it.
2448 saved_msg = errmsg_save();
2450 STAILQ_FOREACH(elm, list, link) {
2451 if (root != NULL && (elm->obj->refcount != 1 ||
2452 objlist_find(&root->dagmembers, elm->obj) == NULL))
2454 /* Remove object from fini list to prevent recursive invocation. */
2455 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
2456 /* Ensure that new references cannot be acquired. */
2457 elm->obj->doomed = true;
2459 hold_object(elm->obj);
2460 lock_release(rtld_bind_lock, lockstate);
2462 * It is legal to have both DT_FINI and DT_FINI_ARRAY defined.
2463 * When this happens, DT_FINI_ARRAY is processed first.
2465 fini_addr = (Elf_Addr *)elm->obj->fini_array;
2466 if (fini_addr != NULL && elm->obj->fini_array_num > 0) {
2467 for (index = elm->obj->fini_array_num - 1; index >= 0;
2469 if (fini_addr[index] != 0 && fini_addr[index] != 1) {
2470 dbg("calling fini function for %s at %p",
2471 elm->obj->path, (void *)fini_addr[index]);
2472 LD_UTRACE(UTRACE_FINI_CALL, elm->obj,
2473 (void *)fini_addr[index], 0, 0, elm->obj->path);
2474 call_initfini_pointer(elm->obj, fini_addr[index]);
2478 if (elm->obj->fini != (Elf_Addr)NULL) {
2479 dbg("calling fini function for %s at %p", elm->obj->path,
2480 (void *)elm->obj->fini);
2481 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini,
2482 0, 0, elm->obj->path);
2483 call_initfini_pointer(elm->obj, elm->obj->fini);
2485 wlock_acquire(rtld_bind_lock, lockstate);
2486 unhold_object(elm->obj);
2487 /* No need to free anything if process is going down. */
2491 * We must restart the list traversal after every fini call
2492 * because a dlclose() call from the fini function or from
2493 * another thread might have modified the reference counts.
2497 } while (elm != NULL);
2498 errmsg_restore(saved_msg);
2502 * Call the initialization functions for each of the objects in
2503 * "list". All of the objects are expected to have non-NULL init
2507 objlist_call_init(Objlist *list, RtldLockState *lockstate)
2512 Elf_Addr *init_addr;
2516 * Clean init_scanned flag so that objects can be rechecked and
2517 * possibly initialized earlier if any of vectors called below
2518 * cause the change by using dlopen.
2520 TAILQ_FOREACH(obj, &obj_list, next) {
2523 obj->init_scanned = false;
2527 * Preserve the current error message since an init function might
2528 * call into the dynamic linker and overwrite it.
2530 saved_msg = errmsg_save();
2531 STAILQ_FOREACH(elm, list, link) {
2532 if (elm->obj->init_done) /* Initialized early. */
2535 * Race: other thread might try to use this object before current
2536 * one completes the initialization. Not much can be done here
2537 * without better locking.
2539 elm->obj->init_done = true;
2540 hold_object(elm->obj);
2541 lock_release(rtld_bind_lock, lockstate);
2544 * It is legal to have both DT_INIT and DT_INIT_ARRAY defined.
2545 * When this happens, DT_INIT is processed first.
2547 if (elm->obj->init != (Elf_Addr)NULL) {
2548 dbg("calling init function for %s at %p", elm->obj->path,
2549 (void *)elm->obj->init);
2550 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init,
2551 0, 0, elm->obj->path);
2552 call_initfini_pointer(elm->obj, elm->obj->init);
2554 init_addr = (Elf_Addr *)elm->obj->init_array;
2555 if (init_addr != NULL) {
2556 for (index = 0; index < elm->obj->init_array_num; index++) {
2557 if (init_addr[index] != 0 && init_addr[index] != 1) {
2558 dbg("calling init function for %s at %p", elm->obj->path,
2559 (void *)init_addr[index]);
2560 LD_UTRACE(UTRACE_INIT_CALL, elm->obj,
2561 (void *)init_addr[index], 0, 0, elm->obj->path);
2562 call_init_pointer(elm->obj, init_addr[index]);
2566 wlock_acquire(rtld_bind_lock, lockstate);
2567 unhold_object(elm->obj);
2569 errmsg_restore(saved_msg);
2573 objlist_clear(Objlist *list)
2577 while (!STAILQ_EMPTY(list)) {
2578 elm = STAILQ_FIRST(list);
2579 STAILQ_REMOVE_HEAD(list, link);
2584 static Objlist_Entry *
2585 objlist_find(Objlist *list, const Obj_Entry *obj)
2589 STAILQ_FOREACH(elm, list, link)
2590 if (elm->obj == obj)
2596 objlist_init(Objlist *list)
2602 objlist_push_head(Objlist *list, Obj_Entry *obj)
2606 elm = NEW(Objlist_Entry);
2608 STAILQ_INSERT_HEAD(list, elm, link);
2612 objlist_push_tail(Objlist *list, Obj_Entry *obj)
2616 elm = NEW(Objlist_Entry);
2618 STAILQ_INSERT_TAIL(list, elm, link);
2622 objlist_put_after(Objlist *list, Obj_Entry *listobj, Obj_Entry *obj)
2624 Objlist_Entry *elm, *listelm;
2626 STAILQ_FOREACH(listelm, list, link) {
2627 if (listelm->obj == listobj)
2630 elm = NEW(Objlist_Entry);
2632 if (listelm != NULL)
2633 STAILQ_INSERT_AFTER(list, listelm, elm, link);
2635 STAILQ_INSERT_TAIL(list, elm, link);
2639 objlist_remove(Objlist *list, Obj_Entry *obj)
2643 if ((elm = objlist_find(list, obj)) != NULL) {
2644 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
2650 * Relocate dag rooted in the specified object.
2651 * Returns 0 on success, or -1 on failure.
2655 relocate_object_dag(Obj_Entry *root, bool bind_now, Obj_Entry *rtldobj,
2656 int flags, RtldLockState *lockstate)
2662 STAILQ_FOREACH(elm, &root->dagmembers, link) {
2663 error = relocate_object(elm->obj, bind_now, rtldobj, flags,
2672 * Prepare for, or clean after, relocating an object marked with
2673 * DT_TEXTREL or DF_TEXTREL. Before relocating, all read-only
2674 * segments are remapped read-write. After relocations are done, the
2675 * segment's permissions are returned back to the modes specified in
2676 * the phdrs. If any relocation happened, or always for wired
2677 * program, COW is triggered.
2680 reloc_textrel_prot(Obj_Entry *obj, bool before)
2687 for (l = obj->phsize / sizeof(*ph), ph = obj->phdr; l > 0;
2689 if (ph->p_type != PT_LOAD || (ph->p_flags & PF_W) != 0)
2691 base = obj->relocbase + trunc_page(ph->p_vaddr);
2692 sz = round_page(ph->p_vaddr + ph->p_filesz) -
2693 trunc_page(ph->p_vaddr);
2694 prot = convert_prot(ph->p_flags) | (before ? PROT_WRITE : 0);
2695 if (mprotect(base, sz, prot) == -1) {
2696 _rtld_error("%s: Cannot write-%sable text segment: %s",
2697 obj->path, before ? "en" : "dis",
2698 rtld_strerror(errno));
2706 * Relocate single object.
2707 * Returns 0 on success, or -1 on failure.
2710 relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
2711 int flags, RtldLockState *lockstate)
2716 obj->relocated = true;
2718 dbg("relocating \"%s\"", obj->path);
2720 if (obj->symtab == NULL || obj->strtab == NULL ||
2721 !(obj->valid_hash_sysv || obj->valid_hash_gnu)) {
2722 _rtld_error("%s: Shared object has no run-time symbol table",
2727 /* There are relocations to the write-protected text segment. */
2728 if (obj->textrel && reloc_textrel_prot(obj, true) != 0)
2731 /* Process the non-PLT non-IFUNC relocations. */
2732 if (reloc_non_plt(obj, rtldobj, flags, lockstate))
2735 /* Re-protected the text segment. */
2736 if (obj->textrel && reloc_textrel_prot(obj, false) != 0)
2739 /* Set the special PLT or GOT entries. */
2742 /* Process the PLT relocations. */
2743 if (reloc_plt(obj) == -1)
2745 /* Relocate the jump slots if we are doing immediate binding. */
2746 if (obj->bind_now || bind_now)
2747 if (reloc_jmpslots(obj, flags, lockstate) == -1)
2751 * Process the non-PLT IFUNC relocations. The relocations are
2752 * processed in two phases, because IFUNC resolvers may
2753 * reference other symbols, which must be readily processed
2754 * before resolvers are called.
2756 if (obj->non_plt_gnu_ifunc &&
2757 reloc_non_plt(obj, rtldobj, flags | SYMLOOK_IFUNC, lockstate))
2760 if (!obj->mainprog && obj_enforce_relro(obj) == -1)
2764 * Set up the magic number and version in the Obj_Entry. These
2765 * were checked in the crt1.o from the original ElfKit, so we
2766 * set them for backward compatibility.
2768 obj->magic = RTLD_MAGIC;
2769 obj->version = RTLD_VERSION;
2775 * Relocate newly-loaded shared objects. The argument is a pointer to
2776 * the Obj_Entry for the first such object. All objects from the first
2777 * to the end of the list of objects are relocated. Returns 0 on success,
2781 relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj,
2782 int flags, RtldLockState *lockstate)
2787 for (error = 0, obj = first; obj != NULL;
2788 obj = TAILQ_NEXT(obj, next)) {
2791 error = relocate_object(obj, bind_now, rtldobj, flags,
2800 * The handling of R_MACHINE_IRELATIVE relocations and jumpslots
2801 * referencing STT_GNU_IFUNC symbols is postponed till the other
2802 * relocations are done. The indirect functions specified as
2803 * ifunc are allowed to call other symbols, so we need to have
2804 * objects relocated before asking for resolution from indirects.
2806 * The R_MACHINE_IRELATIVE slots are resolved in greedy fashion,
2807 * instead of the usual lazy handling of PLT slots. It is
2808 * consistent with how GNU does it.
2811 resolve_object_ifunc(Obj_Entry *obj, bool bind_now, int flags,
2812 RtldLockState *lockstate)
2814 if (obj->irelative && reloc_iresolve(obj, lockstate) == -1)
2816 if ((obj->bind_now || bind_now) && obj->gnu_ifunc &&
2817 reloc_gnu_ifunc(obj, flags, lockstate) == -1)
2823 resolve_objects_ifunc(Obj_Entry *first, bool bind_now, int flags,
2824 RtldLockState *lockstate)
2828 for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
2831 if (resolve_object_ifunc(obj, bind_now, flags, lockstate) == -1)
2838 initlist_objects_ifunc(Objlist *list, bool bind_now, int flags,
2839 RtldLockState *lockstate)
2843 STAILQ_FOREACH(elm, list, link) {
2844 if (resolve_object_ifunc(elm->obj, bind_now, flags,
2852 * Cleanup procedure. It will be called (by the atexit mechanism) just
2853 * before the process exits.
2858 RtldLockState lockstate;
2860 wlock_acquire(rtld_bind_lock, &lockstate);
2862 objlist_call_fini(&list_fini, NULL, &lockstate);
2863 /* No need to remove the items from the list, since we are exiting. */
2864 if (!libmap_disable)
2866 lock_release(rtld_bind_lock, &lockstate);
2870 * Iterate over a search path, translate each element, and invoke the
2871 * callback on the result.
2874 path_enumerate(const char *path, path_enum_proc callback, void *arg)
2880 path += strspn(path, ":;");
2881 while (*path != '\0') {
2885 len = strcspn(path, ":;");
2886 trans = lm_findn(NULL, path, len);
2888 res = callback(trans, strlen(trans), arg);
2890 res = callback(path, len, arg);
2896 path += strspn(path, ":;");
2902 struct try_library_args {
2910 try_library_path(const char *dir, size_t dirlen, void *param)
2912 struct try_library_args *arg;
2915 if (*dir == '/' || trust) {
2918 if (dirlen + 1 + arg->namelen + 1 > arg->buflen)
2921 pathname = arg->buffer;
2922 strncpy(pathname, dir, dirlen);
2923 pathname[dirlen] = '/';
2924 strcpy(pathname + dirlen + 1, arg->name);
2926 dbg(" Trying \"%s\"", pathname);
2927 if (access(pathname, F_OK) == 0) { /* We found it */
2928 pathname = xmalloc(dirlen + 1 + arg->namelen + 1);
2929 strcpy(pathname, arg->buffer);
2937 search_library_path(const char *name, const char *path)
2940 struct try_library_args arg;
2946 arg.namelen = strlen(name);
2947 arg.buffer = xmalloc(PATH_MAX);
2948 arg.buflen = PATH_MAX;
2950 p = path_enumerate(path, try_library_path, &arg);
2959 * Finds the library with the given name using the directory descriptors
2960 * listed in the LD_LIBRARY_PATH_FDS environment variable.
2962 * Returns a freshly-opened close-on-exec file descriptor for the library,
2963 * or -1 if the library cannot be found.
2966 search_library_pathfds(const char *name, const char *path, int *fdp)
2968 char *envcopy, *fdstr, *found, *last_token;
2972 dbg("%s('%s', '%s', fdp)", __func__, name, path);
2974 /* Don't load from user-specified libdirs into setuid binaries. */
2978 /* We can't do anything if LD_LIBRARY_PATH_FDS isn't set. */
2982 /* LD_LIBRARY_PATH_FDS only works with relative paths. */
2983 if (name[0] == '/') {
2984 dbg("Absolute path (%s) passed to %s", name, __func__);
2989 * Use strtok_r() to walk the FD:FD:FD list. This requires a local
2990 * copy of the path, as strtok_r rewrites separator tokens
2994 envcopy = xstrdup(path);
2995 for (fdstr = strtok_r(envcopy, ":", &last_token); fdstr != NULL;
2996 fdstr = strtok_r(NULL, ":", &last_token)) {
2997 dirfd = parse_libdir(fdstr);
3000 fd = __sys_openat(dirfd, name, O_RDONLY | O_CLOEXEC | O_VERIFY);
3003 len = strlen(fdstr) + strlen(name) + 3;
3004 found = xmalloc(len);
3005 if (rtld_snprintf(found, len, "#%d/%s", dirfd, name) < 0) {
3006 _rtld_error("error generating '%d/%s'",
3010 dbg("open('%s') => %d", found, fd);
3021 dlclose(void *handle)
3023 RtldLockState lockstate;
3026 wlock_acquire(rtld_bind_lock, &lockstate);
3027 error = dlclose_locked(handle, &lockstate);
3028 lock_release(rtld_bind_lock, &lockstate);
3033 dlclose_locked(void *handle, RtldLockState *lockstate)
3037 root = dlcheck(handle);
3040 LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount,
3043 /* Unreference the object and its dependencies. */
3044 root->dl_refcount--;
3046 if (root->refcount == 1) {
3048 * The object will be no longer referenced, so we must unload it.
3049 * First, call the fini functions.
3051 objlist_call_fini(&list_fini, root, lockstate);
3055 /* Finish cleaning up the newly-unreferenced objects. */
3056 GDB_STATE(RT_DELETE,&root->linkmap);
3057 unload_object(root, lockstate);
3058 GDB_STATE(RT_CONSISTENT,NULL);
3062 LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL);
3069 char *msg = error_message;
3070 error_message = NULL;
3075 * This function is deprecated and has no effect.
3078 dllockinit(void *context,
3079 void *(*lock_create)(void *context),
3080 void (*rlock_acquire)(void *lock),
3081 void (*wlock_acquire)(void *lock),
3082 void (*lock_release)(void *lock),
3083 void (*lock_destroy)(void *lock),
3084 void (*context_destroy)(void *context))
3086 static void *cur_context;
3087 static void (*cur_context_destroy)(void *);
3089 /* Just destroy the context from the previous call, if necessary. */
3090 if (cur_context_destroy != NULL)
3091 cur_context_destroy(cur_context);
3092 cur_context = context;
3093 cur_context_destroy = context_destroy;
3097 dlopen(const char *name, int mode)
3100 return (rtld_dlopen(name, -1, mode));
3104 fdlopen(int fd, int mode)
3107 return (rtld_dlopen(NULL, fd, mode));
3111 rtld_dlopen(const char *name, int fd, int mode)
3113 RtldLockState lockstate;
3116 LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name);
3117 ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1";
3118 if (ld_tracing != NULL) {
3119 rlock_acquire(rtld_bind_lock, &lockstate);
3120 if (sigsetjmp(lockstate.env, 0) != 0)
3121 lock_upgrade(rtld_bind_lock, &lockstate);
3122 environ = (char **)*get_program_var_addr("environ", &lockstate);
3123 lock_release(rtld_bind_lock, &lockstate);
3125 lo_flags = RTLD_LO_DLOPEN;
3126 if (mode & RTLD_NODELETE)
3127 lo_flags |= RTLD_LO_NODELETE;
3128 if (mode & RTLD_NOLOAD)
3129 lo_flags |= RTLD_LO_NOLOAD;
3130 if (ld_tracing != NULL)
3131 lo_flags |= RTLD_LO_TRACE;
3133 return (dlopen_object(name, fd, obj_main, lo_flags,
3134 mode & (RTLD_MODEMASK | RTLD_GLOBAL), NULL));
3138 dlopen_cleanup(Obj_Entry *obj, RtldLockState *lockstate)
3143 if (obj->refcount == 0)
3144 unload_object(obj, lockstate);
3148 dlopen_object(const char *name, int fd, Obj_Entry *refobj, int lo_flags,
3149 int mode, RtldLockState *lockstate)
3151 Obj_Entry *old_obj_tail;
3154 RtldLockState mlockstate;
3157 objlist_init(&initlist);
3159 if (lockstate == NULL && !(lo_flags & RTLD_LO_EARLY)) {
3160 wlock_acquire(rtld_bind_lock, &mlockstate);
3161 lockstate = &mlockstate;
3163 GDB_STATE(RT_ADD,NULL);
3165 old_obj_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q));
3167 if (name == NULL && fd == -1) {
3171 obj = load_object(name, fd, refobj, lo_flags);
3176 if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL)
3177 objlist_push_tail(&list_global, obj);
3178 if (globallist_next(old_obj_tail) != NULL) {
3179 /* We loaded something new. */
3180 assert(globallist_next(old_obj_tail) == obj);
3181 result = load_needed_objects(obj,
3182 lo_flags & (RTLD_LO_DLOPEN | RTLD_LO_EARLY));
3186 result = rtld_verify_versions(&obj->dagmembers);
3187 if (result != -1 && ld_tracing)
3189 if (result == -1 || relocate_object_dag(obj,
3190 (mode & RTLD_MODEMASK) == RTLD_NOW, &obj_rtld,
3191 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
3193 dlopen_cleanup(obj, lockstate);
3195 } else if (lo_flags & RTLD_LO_EARLY) {
3197 * Do not call the init functions for early loaded
3198 * filtees. The image is still not initialized enough
3201 * Our object is found by the global object list and
3202 * will be ordered among all init calls done right
3203 * before transferring control to main.
3206 /* Make list of init functions to call. */
3207 initlist_add_objects(obj, obj, &initlist);
3210 * Process all no_delete or global objects here, given
3211 * them own DAGs to prevent their dependencies from being
3212 * unloaded. This has to be done after we have loaded all
3213 * of the dependencies, so that we do not miss any.
3219 * Bump the reference counts for objects on this DAG. If
3220 * this is the first dlopen() call for the object that was
3221 * already loaded as a dependency, initialize the dag
3227 if ((lo_flags & RTLD_LO_TRACE) != 0)
3230 if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 ||
3231 obj->z_nodelete) && !obj->ref_nodel) {
3232 dbg("obj %s nodelete", obj->path);
3234 obj->z_nodelete = obj->ref_nodel = true;
3238 LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0,
3240 GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL);
3242 if (!(lo_flags & RTLD_LO_EARLY)) {
3243 map_stacks_exec(lockstate);
3246 if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW,
3247 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
3249 objlist_clear(&initlist);
3250 dlopen_cleanup(obj, lockstate);
3251 if (lockstate == &mlockstate)
3252 lock_release(rtld_bind_lock, lockstate);
3256 if (!(lo_flags & RTLD_LO_EARLY)) {
3257 /* Call the init functions. */
3258 objlist_call_init(&initlist, lockstate);
3260 objlist_clear(&initlist);
3261 if (lockstate == &mlockstate)
3262 lock_release(rtld_bind_lock, lockstate);
3265 trace_loaded_objects(obj);
3266 if (lockstate == &mlockstate)
3267 lock_release(rtld_bind_lock, lockstate);
3272 do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve,
3276 const Obj_Entry *obj, *defobj;
3279 RtldLockState lockstate;
3286 symlook_init(&req, name);
3288 req.flags = flags | SYMLOOK_IN_PLT;
3289 req.lockstate = &lockstate;
3291 LD_UTRACE(UTRACE_DLSYM_START, handle, NULL, 0, 0, name);
3292 rlock_acquire(rtld_bind_lock, &lockstate);
3293 if (sigsetjmp(lockstate.env, 0) != 0)
3294 lock_upgrade(rtld_bind_lock, &lockstate);
3295 if (handle == NULL || handle == RTLD_NEXT ||
3296 handle == RTLD_DEFAULT || handle == RTLD_SELF) {
3298 if ((obj = obj_from_addr(retaddr)) == NULL) {
3299 _rtld_error("Cannot determine caller's shared object");
3300 lock_release(rtld_bind_lock, &lockstate);
3301 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
3304 if (handle == NULL) { /* Just the caller's shared object. */
3305 res = symlook_obj(&req, obj);
3308 defobj = req.defobj_out;
3310 } else if (handle == RTLD_NEXT || /* Objects after caller's */
3311 handle == RTLD_SELF) { /* ... caller included */
3312 if (handle == RTLD_NEXT)
3313 obj = globallist_next(obj);
3314 for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
3317 res = symlook_obj(&req, obj);
3320 ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) {
3322 defobj = req.defobj_out;
3323 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
3329 * Search the dynamic linker itself, and possibly resolve the
3330 * symbol from there. This is how the application links to
3331 * dynamic linker services such as dlopen.
3333 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
3334 res = symlook_obj(&req, &obj_rtld);
3337 defobj = req.defobj_out;
3341 assert(handle == RTLD_DEFAULT);
3342 res = symlook_default(&req, obj);
3344 defobj = req.defobj_out;
3349 if ((obj = dlcheck(handle)) == NULL) {
3350 lock_release(rtld_bind_lock, &lockstate);
3351 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
3355 donelist_init(&donelist);
3356 if (obj->mainprog) {
3357 /* Handle obtained by dlopen(NULL, ...) implies global scope. */
3358 res = symlook_global(&req, &donelist);
3361 defobj = req.defobj_out;
3364 * Search the dynamic linker itself, and possibly resolve the
3365 * symbol from there. This is how the application links to
3366 * dynamic linker services such as dlopen.
3368 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
3369 res = symlook_obj(&req, &obj_rtld);
3372 defobj = req.defobj_out;
3377 /* Search the whole DAG rooted at the given object. */
3378 res = symlook_list(&req, &obj->dagmembers, &donelist);
3381 defobj = req.defobj_out;
3387 lock_release(rtld_bind_lock, &lockstate);
3390 * The value required by the caller is derived from the value
3391 * of the symbol. this is simply the relocated value of the
3394 if (ELF_ST_TYPE(def->st_info) == STT_FUNC)
3395 sym = make_function_pointer(def, defobj);
3396 else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
3397 sym = rtld_resolve_ifunc(defobj, def);
3398 else if (ELF_ST_TYPE(def->st_info) == STT_TLS) {
3399 ti.ti_module = defobj->tlsindex;
3400 ti.ti_offset = def->st_value;
3401 sym = __tls_get_addr(&ti);
3403 sym = defobj->relocbase + def->st_value;
3404 LD_UTRACE(UTRACE_DLSYM_STOP, handle, sym, 0, 0, name);
3408 _rtld_error("Undefined symbol \"%s\"", name);
3409 lock_release(rtld_bind_lock, &lockstate);
3410 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
3415 dlsym(void *handle, const char *name)
3417 return do_dlsym(handle, name, __builtin_return_address(0), NULL,
3422 dlfunc(void *handle, const char *name)
3429 rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL,
3435 dlvsym(void *handle, const char *name, const char *version)
3439 ventry.name = version;
3441 ventry.hash = elf_hash(version);
3443 return do_dlsym(handle, name, __builtin_return_address(0), &ventry,
3448 _rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info)
3450 const Obj_Entry *obj;
3451 RtldLockState lockstate;
3453 rlock_acquire(rtld_bind_lock, &lockstate);
3454 obj = obj_from_addr(addr);
3456 _rtld_error("No shared object contains address");
3457 lock_release(rtld_bind_lock, &lockstate);
3460 rtld_fill_dl_phdr_info(obj, phdr_info);
3461 lock_release(rtld_bind_lock, &lockstate);
3466 dladdr(const void *addr, Dl_info *info)
3468 const Obj_Entry *obj;
3471 unsigned long symoffset;
3472 RtldLockState lockstate;
3474 rlock_acquire(rtld_bind_lock, &lockstate);
3475 obj = obj_from_addr(addr);
3477 _rtld_error("No shared object contains address");
3478 lock_release(rtld_bind_lock, &lockstate);
3481 info->dli_fname = obj->path;
3482 info->dli_fbase = obj->mapbase;
3483 info->dli_saddr = (void *)0;
3484 info->dli_sname = NULL;
3487 * Walk the symbol list looking for the symbol whose address is
3488 * closest to the address sent in.
3490 for (symoffset = 0; symoffset < obj->dynsymcount; symoffset++) {
3491 def = obj->symtab + symoffset;
3494 * For skip the symbol if st_shndx is either SHN_UNDEF or
3497 if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON)
3501 * If the symbol is greater than the specified address, or if it
3502 * is further away from addr than the current nearest symbol,
3505 symbol_addr = obj->relocbase + def->st_value;
3506 if (symbol_addr > addr || symbol_addr < info->dli_saddr)
3509 /* Update our idea of the nearest symbol. */
3510 info->dli_sname = obj->strtab + def->st_name;
3511 info->dli_saddr = symbol_addr;
3514 if (info->dli_saddr == addr)
3517 lock_release(rtld_bind_lock, &lockstate);
3522 dlinfo(void *handle, int request, void *p)
3524 const Obj_Entry *obj;
3525 RtldLockState lockstate;
3528 rlock_acquire(rtld_bind_lock, &lockstate);
3530 if (handle == NULL || handle == RTLD_SELF) {
3533 retaddr = __builtin_return_address(0); /* __GNUC__ only */
3534 if ((obj = obj_from_addr(retaddr)) == NULL)
3535 _rtld_error("Cannot determine caller's shared object");
3537 obj = dlcheck(handle);
3540 lock_release(rtld_bind_lock, &lockstate);
3546 case RTLD_DI_LINKMAP:
3547 *((struct link_map const **)p) = &obj->linkmap;
3549 case RTLD_DI_ORIGIN:
3550 error = rtld_dirname(obj->path, p);
3553 case RTLD_DI_SERINFOSIZE:
3554 case RTLD_DI_SERINFO:
3555 error = do_search_info(obj, request, (struct dl_serinfo *)p);
3559 _rtld_error("Invalid request %d passed to dlinfo()", request);
3563 lock_release(rtld_bind_lock, &lockstate);
3569 rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info)
3572 phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase;
3573 phdr_info->dlpi_name = obj->path;
3574 phdr_info->dlpi_phdr = obj->phdr;
3575 phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]);
3576 phdr_info->dlpi_tls_modid = obj->tlsindex;
3577 phdr_info->dlpi_tls_data = obj->tlsinit;
3578 phdr_info->dlpi_adds = obj_loads;
3579 phdr_info->dlpi_subs = obj_loads - obj_count;
3583 dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param)
3585 struct dl_phdr_info phdr_info;
3586 Obj_Entry *obj, marker;
3587 RtldLockState bind_lockstate, phdr_lockstate;
3590 init_marker(&marker);
3593 wlock_acquire(rtld_phdr_lock, &phdr_lockstate);
3594 wlock_acquire(rtld_bind_lock, &bind_lockstate);
3595 for (obj = globallist_curr(TAILQ_FIRST(&obj_list)); obj != NULL;) {
3596 TAILQ_INSERT_AFTER(&obj_list, obj, &marker, next);
3597 rtld_fill_dl_phdr_info(obj, &phdr_info);
3599 lock_release(rtld_bind_lock, &bind_lockstate);
3601 error = callback(&phdr_info, sizeof phdr_info, param);
3603 wlock_acquire(rtld_bind_lock, &bind_lockstate);
3605 obj = globallist_next(&marker);
3606 TAILQ_REMOVE(&obj_list, &marker, next);
3608 lock_release(rtld_bind_lock, &bind_lockstate);
3609 lock_release(rtld_phdr_lock, &phdr_lockstate);
3615 rtld_fill_dl_phdr_info(&obj_rtld, &phdr_info);
3616 lock_release(rtld_bind_lock, &bind_lockstate);
3617 error = callback(&phdr_info, sizeof(phdr_info), param);
3619 lock_release(rtld_phdr_lock, &phdr_lockstate);
3624 fill_search_info(const char *dir, size_t dirlen, void *param)
3626 struct fill_search_info_args *arg;
3630 if (arg->request == RTLD_DI_SERINFOSIZE) {
3631 arg->serinfo->dls_cnt ++;
3632 arg->serinfo->dls_size += sizeof(struct dl_serpath) + dirlen + 1;
3634 struct dl_serpath *s_entry;
3636 s_entry = arg->serpath;
3637 s_entry->dls_name = arg->strspace;
3638 s_entry->dls_flags = arg->flags;
3640 strncpy(arg->strspace, dir, dirlen);
3641 arg->strspace[dirlen] = '\0';
3643 arg->strspace += dirlen + 1;
3651 do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info)
3653 struct dl_serinfo _info;
3654 struct fill_search_info_args args;
3656 args.request = RTLD_DI_SERINFOSIZE;
3657 args.serinfo = &_info;
3659 _info.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
3662 path_enumerate(obj->rpath, fill_search_info, &args);
3663 path_enumerate(ld_library_path, fill_search_info, &args);
3664 path_enumerate(obj->runpath, fill_search_info, &args);
3665 path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args);
3666 if (!obj->z_nodeflib)
3667 path_enumerate(ld_standard_library_path, fill_search_info, &args);
3670 if (request == RTLD_DI_SERINFOSIZE) {
3671 info->dls_size = _info.dls_size;
3672 info->dls_cnt = _info.dls_cnt;
3676 if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) {
3677 _rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()");
3681 args.request = RTLD_DI_SERINFO;
3682 args.serinfo = info;
3683 args.serpath = &info->dls_serpath[0];
3684 args.strspace = (char *)&info->dls_serpath[_info.dls_cnt];
3686 args.flags = LA_SER_RUNPATH;
3687 if (path_enumerate(obj->rpath, fill_search_info, &args) != NULL)
3690 args.flags = LA_SER_LIBPATH;
3691 if (path_enumerate(ld_library_path, fill_search_info, &args) != NULL)
3694 args.flags = LA_SER_RUNPATH;
3695 if (path_enumerate(obj->runpath, fill_search_info, &args) != NULL)
3698 args.flags = LA_SER_CONFIG;
3699 if (path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args)
3703 args.flags = LA_SER_DEFAULT;
3704 if (!obj->z_nodeflib &&
3705 path_enumerate(ld_standard_library_path, fill_search_info, &args) != NULL)
3711 rtld_dirname(const char *path, char *bname)
3715 /* Empty or NULL string gets treated as "." */
3716 if (path == NULL || *path == '\0') {
3722 /* Strip trailing slashes */
3723 endp = path + strlen(path) - 1;
3724 while (endp > path && *endp == '/')
3727 /* Find the start of the dir */
3728 while (endp > path && *endp != '/')
3731 /* Either the dir is "/" or there are no slashes */
3733 bname[0] = *endp == '/' ? '/' : '.';
3739 } while (endp > path && *endp == '/');
3742 if (endp - path + 2 > PATH_MAX)
3744 _rtld_error("Filename is too long: %s", path);
3748 strncpy(bname, path, endp - path + 1);
3749 bname[endp - path + 1] = '\0';
3754 rtld_dirname_abs(const char *path, char *base)
3758 if (realpath(path, base) == NULL)
3760 dbg("%s -> %s", path, base);
3761 last = strrchr(base, '/');
3770 linkmap_add(Obj_Entry *obj)
3772 struct link_map *l = &obj->linkmap;
3773 struct link_map *prev;
3775 obj->linkmap.l_name = obj->path;
3776 obj->linkmap.l_addr = obj->mapbase;
3777 obj->linkmap.l_ld = obj->dynamic;
3779 /* GDB needs load offset on MIPS to use the symbols */
3780 obj->linkmap.l_offs = obj->relocbase;
3783 if (r_debug.r_map == NULL) {
3789 * Scan to the end of the list, but not past the entry for the
3790 * dynamic linker, which we want to keep at the very end.
3792 for (prev = r_debug.r_map;
3793 prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap;
3794 prev = prev->l_next)
3797 /* Link in the new entry. */
3799 l->l_next = prev->l_next;
3800 if (l->l_next != NULL)
3801 l->l_next->l_prev = l;
3806 linkmap_delete(Obj_Entry *obj)
3808 struct link_map *l = &obj->linkmap;
3810 if (l->l_prev == NULL) {
3811 if ((r_debug.r_map = l->l_next) != NULL)
3812 l->l_next->l_prev = NULL;
3816 if ((l->l_prev->l_next = l->l_next) != NULL)
3817 l->l_next->l_prev = l->l_prev;
3821 * Function for the debugger to set a breakpoint on to gain control.
3823 * The two parameters allow the debugger to easily find and determine
3824 * what the runtime loader is doing and to whom it is doing it.
3826 * When the loadhook trap is hit (r_debug_state, set at program
3827 * initialization), the arguments can be found on the stack:
3829 * +8 struct link_map *m
3830 * +4 struct r_debug *rd
3834 r_debug_state(struct r_debug* rd, struct link_map *m)
3837 * The following is a hack to force the compiler to emit calls to
3838 * this function, even when optimizing. If the function is empty,
3839 * the compiler is not obliged to emit any code for calls to it,
3840 * even when marked __noinline. However, gdb depends on those
3843 __compiler_membar();
3847 * A function called after init routines have completed. This can be used to
3848 * break before a program's entry routine is called, and can be used when
3849 * main is not available in the symbol table.
3852 _r_debug_postinit(struct link_map *m)
3855 /* See r_debug_state(). */
3856 __compiler_membar();
3860 release_object(Obj_Entry *obj)
3863 if (obj->holdcount > 0) {
3864 obj->unholdfree = true;
3867 munmap(obj->mapbase, obj->mapsize);
3868 linkmap_delete(obj);
3873 * Get address of the pointer variable in the main program.
3874 * Prefer non-weak symbol over the weak one.
3876 static const void **
3877 get_program_var_addr(const char *name, RtldLockState *lockstate)
3882 symlook_init(&req, name);
3883 req.lockstate = lockstate;
3884 donelist_init(&donelist);
3885 if (symlook_global(&req, &donelist) != 0)
3887 if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC)
3888 return ((const void **)make_function_pointer(req.sym_out,
3890 else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC)
3891 return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out));
3893 return ((const void **)(req.defobj_out->relocbase +
3894 req.sym_out->st_value));
3898 * Set a pointer variable in the main program to the given value. This
3899 * is used to set key variables such as "environ" before any of the
3900 * init functions are called.
3903 set_program_var(const char *name, const void *value)
3907 if ((addr = get_program_var_addr(name, NULL)) != NULL) {
3908 dbg("\"%s\": *%p <-- %p", name, addr, value);
3914 * Search the global objects, including dependencies and main object,
3915 * for the given symbol.
3918 symlook_global(SymLook *req, DoneList *donelist)
3921 const Objlist_Entry *elm;
3924 symlook_init_from_req(&req1, req);
3926 /* Search all objects loaded at program start up. */
3927 if (req->defobj_out == NULL ||
3928 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
3929 res = symlook_list(&req1, &list_main, donelist);
3930 if (res == 0 && (req->defobj_out == NULL ||
3931 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
3932 req->sym_out = req1.sym_out;
3933 req->defobj_out = req1.defobj_out;
3934 assert(req->defobj_out != NULL);
3938 /* Search all DAGs whose roots are RTLD_GLOBAL objects. */
3939 STAILQ_FOREACH(elm, &list_global, link) {
3940 if (req->defobj_out != NULL &&
3941 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
3943 res = symlook_list(&req1, &elm->obj->dagmembers, donelist);
3944 if (res == 0 && (req->defobj_out == NULL ||
3945 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
3946 req->sym_out = req1.sym_out;
3947 req->defobj_out = req1.defobj_out;
3948 assert(req->defobj_out != NULL);
3952 return (req->sym_out != NULL ? 0 : ESRCH);
3956 * Given a symbol name in a referencing object, find the corresponding
3957 * definition of the symbol. Returns a pointer to the symbol, or NULL if
3958 * no definition was found. Returns a pointer to the Obj_Entry of the
3959 * defining object via the reference parameter DEFOBJ_OUT.
3962 symlook_default(SymLook *req, const Obj_Entry *refobj)
3965 const Objlist_Entry *elm;
3969 donelist_init(&donelist);
3970 symlook_init_from_req(&req1, req);
3973 * Look first in the referencing object if linked symbolically,
3974 * and similarly handle protected symbols.
3976 res = symlook_obj(&req1, refobj);
3977 if (res == 0 && (refobj->symbolic ||
3978 ELF_ST_VISIBILITY(req1.sym_out->st_other) == STV_PROTECTED)) {
3979 req->sym_out = req1.sym_out;
3980 req->defobj_out = req1.defobj_out;
3981 assert(req->defobj_out != NULL);
3983 if (refobj->symbolic || req->defobj_out != NULL)
3984 donelist_check(&donelist, refobj);
3986 symlook_global(req, &donelist);
3988 /* Search all dlopened DAGs containing the referencing object. */
3989 STAILQ_FOREACH(elm, &refobj->dldags, link) {
3990 if (req->sym_out != NULL &&
3991 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
3993 res = symlook_list(&req1, &elm->obj->dagmembers, &donelist);
3994 if (res == 0 && (req->sym_out == NULL ||
3995 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
3996 req->sym_out = req1.sym_out;
3997 req->defobj_out = req1.defobj_out;
3998 assert(req->defobj_out != NULL);
4003 * Search the dynamic linker itself, and possibly resolve the
4004 * symbol from there. This is how the application links to
4005 * dynamic linker services such as dlopen.
4007 if (req->sym_out == NULL ||
4008 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
4009 res = symlook_obj(&req1, &obj_rtld);
4011 req->sym_out = req1.sym_out;
4012 req->defobj_out = req1.defobj_out;
4013 assert(req->defobj_out != NULL);
4017 return (req->sym_out != NULL ? 0 : ESRCH);
4021 symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp)
4024 const Obj_Entry *defobj;
4025 const Objlist_Entry *elm;
4031 STAILQ_FOREACH(elm, objlist, link) {
4032 if (donelist_check(dlp, elm->obj))
4034 symlook_init_from_req(&req1, req);
4035 if ((res = symlook_obj(&req1, elm->obj)) == 0) {
4036 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
4038 defobj = req1.defobj_out;
4039 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
4046 req->defobj_out = defobj;
4053 * Search the chain of DAGS cointed to by the given Needed_Entry
4054 * for a symbol of the given name. Each DAG is scanned completely
4055 * before advancing to the next one. Returns a pointer to the symbol,
4056 * or NULL if no definition was found.
4059 symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp)
4062 const Needed_Entry *n;
4063 const Obj_Entry *defobj;
4069 symlook_init_from_req(&req1, req);
4070 for (n = needed; n != NULL; n = n->next) {
4071 if (n->obj == NULL ||
4072 (res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0)
4074 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
4076 defobj = req1.defobj_out;
4077 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
4083 req->defobj_out = defobj;
4090 * Search the symbol table of a single shared object for a symbol of
4091 * the given name and version, if requested. Returns a pointer to the
4092 * symbol, or NULL if no definition was found. If the object is
4093 * filter, return filtered symbol from filtee.
4095 * The symbol's hash value is passed in for efficiency reasons; that
4096 * eliminates many recomputations of the hash value.
4099 symlook_obj(SymLook *req, const Obj_Entry *obj)
4103 int flags, res, mres;
4106 * If there is at least one valid hash at this point, we prefer to
4107 * use the faster GNU version if available.
4109 if (obj->valid_hash_gnu)
4110 mres = symlook_obj1_gnu(req, obj);
4111 else if (obj->valid_hash_sysv)
4112 mres = symlook_obj1_sysv(req, obj);
4117 if (obj->needed_filtees != NULL) {
4118 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
4119 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
4120 donelist_init(&donelist);
4121 symlook_init_from_req(&req1, req);
4122 res = symlook_needed(&req1, obj->needed_filtees, &donelist);
4124 req->sym_out = req1.sym_out;
4125 req->defobj_out = req1.defobj_out;
4129 if (obj->needed_aux_filtees != NULL) {
4130 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
4131 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
4132 donelist_init(&donelist);
4133 symlook_init_from_req(&req1, req);
4134 res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist);
4136 req->sym_out = req1.sym_out;
4137 req->defobj_out = req1.defobj_out;
4145 /* Symbol match routine common to both hash functions */
4147 matched_symbol(SymLook *req, const Obj_Entry *obj, Sym_Match_Result *result,
4148 const unsigned long symnum)
4151 const Elf_Sym *symp;
4154 symp = obj->symtab + symnum;
4155 strp = obj->strtab + symp->st_name;
4157 switch (ELF_ST_TYPE(symp->st_info)) {
4163 if (symp->st_value == 0)
4167 if (symp->st_shndx != SHN_UNDEF)
4170 else if (((req->flags & SYMLOOK_IN_PLT) == 0) &&
4171 (ELF_ST_TYPE(symp->st_info) == STT_FUNC))
4178 if (req->name[0] != strp[0] || strcmp(req->name, strp) != 0)
4181 if (req->ventry == NULL) {
4182 if (obj->versyms != NULL) {
4183 verndx = VER_NDX(obj->versyms[symnum]);
4184 if (verndx > obj->vernum) {
4186 "%s: symbol %s references wrong version %d",
4187 obj->path, obj->strtab + symnum, verndx);
4191 * If we are not called from dlsym (i.e. this
4192 * is a normal relocation from unversioned
4193 * binary), accept the symbol immediately if
4194 * it happens to have first version after this
4195 * shared object became versioned. Otherwise,
4196 * if symbol is versioned and not hidden,
4197 * remember it. If it is the only symbol with
4198 * this name exported by the shared object, it
4199 * will be returned as a match by the calling
4200 * function. If symbol is global (verndx < 2)
4201 * accept it unconditionally.
4203 if ((req->flags & SYMLOOK_DLSYM) == 0 &&
4204 verndx == VER_NDX_GIVEN) {
4205 result->sym_out = symp;
4208 else if (verndx >= VER_NDX_GIVEN) {
4209 if ((obj->versyms[symnum] & VER_NDX_HIDDEN)
4211 if (result->vsymp == NULL)
4212 result->vsymp = symp;
4218 result->sym_out = symp;
4221 if (obj->versyms == NULL) {
4222 if (object_match_name(obj, req->ventry->name)) {
4223 _rtld_error("%s: object %s should provide version %s "
4224 "for symbol %s", obj_rtld.path, obj->path,
4225 req->ventry->name, obj->strtab + symnum);
4229 verndx = VER_NDX(obj->versyms[symnum]);
4230 if (verndx > obj->vernum) {
4231 _rtld_error("%s: symbol %s references wrong version %d",
4232 obj->path, obj->strtab + symnum, verndx);
4235 if (obj->vertab[verndx].hash != req->ventry->hash ||
4236 strcmp(obj->vertab[verndx].name, req->ventry->name)) {
4238 * Version does not match. Look if this is a
4239 * global symbol and if it is not hidden. If
4240 * global symbol (verndx < 2) is available,
4241 * use it. Do not return symbol if we are
4242 * called by dlvsym, because dlvsym looks for
4243 * a specific version and default one is not
4244 * what dlvsym wants.
4246 if ((req->flags & SYMLOOK_DLSYM) ||
4247 (verndx >= VER_NDX_GIVEN) ||
4248 (obj->versyms[symnum] & VER_NDX_HIDDEN))
4252 result->sym_out = symp;
4257 * Search for symbol using SysV hash function.
4258 * obj->buckets is known not to be NULL at this point; the test for this was
4259 * performed with the obj->valid_hash_sysv assignment.
4262 symlook_obj1_sysv(SymLook *req, const Obj_Entry *obj)
4264 unsigned long symnum;
4265 Sym_Match_Result matchres;
4267 matchres.sym_out = NULL;
4268 matchres.vsymp = NULL;
4269 matchres.vcount = 0;
4271 for (symnum = obj->buckets[req->hash % obj->nbuckets];
4272 symnum != STN_UNDEF; symnum = obj->chains[symnum]) {
4273 if (symnum >= obj->nchains)
4274 return (ESRCH); /* Bad object */
4276 if (matched_symbol(req, obj, &matchres, symnum)) {
4277 req->sym_out = matchres.sym_out;
4278 req->defobj_out = obj;
4282 if (matchres.vcount == 1) {
4283 req->sym_out = matchres.vsymp;
4284 req->defobj_out = obj;
4290 /* Search for symbol using GNU hash function */
4292 symlook_obj1_gnu(SymLook *req, const Obj_Entry *obj)
4294 Elf_Addr bloom_word;
4295 const Elf32_Word *hashval;
4297 Sym_Match_Result matchres;
4298 unsigned int h1, h2;
4299 unsigned long symnum;
4301 matchres.sym_out = NULL;
4302 matchres.vsymp = NULL;
4303 matchres.vcount = 0;
4305 /* Pick right bitmask word from Bloom filter array */
4306 bloom_word = obj->bloom_gnu[(req->hash_gnu / __ELF_WORD_SIZE) &
4307 obj->maskwords_bm_gnu];
4309 /* Calculate modulus word size of gnu hash and its derivative */
4310 h1 = req->hash_gnu & (__ELF_WORD_SIZE - 1);
4311 h2 = ((req->hash_gnu >> obj->shift2_gnu) & (__ELF_WORD_SIZE - 1));
4313 /* Filter out the "definitely not in set" queries */
4314 if (((bloom_word >> h1) & (bloom_word >> h2) & 1) == 0)
4317 /* Locate hash chain and corresponding value element*/
4318 bucket = obj->buckets_gnu[req->hash_gnu % obj->nbuckets_gnu];
4321 hashval = &obj->chain_zero_gnu[bucket];
4323 if (((*hashval ^ req->hash_gnu) >> 1) == 0) {
4324 symnum = hashval - obj->chain_zero_gnu;
4325 if (matched_symbol(req, obj, &matchres, symnum)) {
4326 req->sym_out = matchres.sym_out;
4327 req->defobj_out = obj;
4331 } while ((*hashval++ & 1) == 0);
4332 if (matchres.vcount == 1) {
4333 req->sym_out = matchres.vsymp;
4334 req->defobj_out = obj;
4341 trace_loaded_objects(Obj_Entry *obj)
4343 char *fmt1, *fmt2, *fmt, *main_local, *list_containers;
4346 if ((main_local = getenv(_LD("TRACE_LOADED_OBJECTS_PROGNAME"))) == NULL)
4349 if ((fmt1 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT1"))) == NULL)
4350 fmt1 = "\t%o => %p (%x)\n";
4352 if ((fmt2 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT2"))) == NULL)
4353 fmt2 = "\t%o (%x)\n";
4355 list_containers = getenv(_LD("TRACE_LOADED_OBJECTS_ALL"));
4357 for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
4358 Needed_Entry *needed;
4364 if (list_containers && obj->needed != NULL)
4365 rtld_printf("%s:\n", obj->path);
4366 for (needed = obj->needed; needed; needed = needed->next) {
4367 if (needed->obj != NULL) {
4368 if (needed->obj->traced && !list_containers)
4370 needed->obj->traced = true;
4371 path = needed->obj->path;
4375 name = (char *)obj->strtab + needed->name;
4376 is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */
4378 fmt = is_lib ? fmt1 : fmt2;
4379 while ((c = *fmt++) != '\0') {
4405 rtld_putstr(main_local);
4408 rtld_putstr(obj_main->path);
4415 rtld_printf("%d", sodp->sod_major);
4418 rtld_printf("%d", sodp->sod_minor);
4425 rtld_printf("%p", needed->obj ? needed->obj->mapbase :
4438 * Unload a dlopened object and its dependencies from memory and from
4439 * our data structures. It is assumed that the DAG rooted in the
4440 * object has already been unreferenced, and that the object has a
4441 * reference count of 0.
4444 unload_object(Obj_Entry *root, RtldLockState *lockstate)
4446 Obj_Entry marker, *obj, *next;
4448 assert(root->refcount == 0);
4451 * Pass over the DAG removing unreferenced objects from
4452 * appropriate lists.
4454 unlink_object(root);
4456 /* Unmap all objects that are no longer referenced. */
4457 for (obj = TAILQ_FIRST(&obj_list); obj != NULL; obj = next) {
4458 next = TAILQ_NEXT(obj, next);
4459 if (obj->marker || obj->refcount != 0)
4461 LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase,
4462 obj->mapsize, 0, obj->path);
4463 dbg("unloading \"%s\"", obj->path);
4465 * Unlink the object now to prevent new references from
4466 * being acquired while the bind lock is dropped in
4467 * recursive dlclose() invocations.
4469 TAILQ_REMOVE(&obj_list, obj, next);
4472 if (obj->filtees_loaded) {
4474 init_marker(&marker);
4475 TAILQ_INSERT_BEFORE(next, &marker, next);
4476 unload_filtees(obj, lockstate);
4477 next = TAILQ_NEXT(&marker, next);
4478 TAILQ_REMOVE(&obj_list, &marker, next);
4480 unload_filtees(obj, lockstate);
4482 release_object(obj);
4487 unlink_object(Obj_Entry *root)
4491 if (root->refcount == 0) {
4492 /* Remove the object from the RTLD_GLOBAL list. */
4493 objlist_remove(&list_global, root);
4495 /* Remove the object from all objects' DAG lists. */
4496 STAILQ_FOREACH(elm, &root->dagmembers, link) {
4497 objlist_remove(&elm->obj->dldags, root);
4498 if (elm->obj != root)
4499 unlink_object(elm->obj);
4505 ref_dag(Obj_Entry *root)
4509 assert(root->dag_inited);
4510 STAILQ_FOREACH(elm, &root->dagmembers, link)
4511 elm->obj->refcount++;
4515 unref_dag(Obj_Entry *root)
4519 assert(root->dag_inited);
4520 STAILQ_FOREACH(elm, &root->dagmembers, link)
4521 elm->obj->refcount--;
4525 * Common code for MD __tls_get_addr().
4527 static void *tls_get_addr_slow(Elf_Addr **, int, size_t) __noinline;
4529 tls_get_addr_slow(Elf_Addr **dtvp, int index, size_t offset)
4531 Elf_Addr *newdtv, *dtv;
4532 RtldLockState lockstate;
4536 /* Check dtv generation in case new modules have arrived */
4537 if (dtv[0] != tls_dtv_generation) {
4538 wlock_acquire(rtld_bind_lock, &lockstate);
4539 newdtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
4541 if (to_copy > tls_max_index)
4542 to_copy = tls_max_index;
4543 memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr));
4544 newdtv[0] = tls_dtv_generation;
4545 newdtv[1] = tls_max_index;
4547 lock_release(rtld_bind_lock, &lockstate);
4548 dtv = *dtvp = newdtv;
4551 /* Dynamically allocate module TLS if necessary */
4552 if (dtv[index + 1] == 0) {
4553 /* Signal safe, wlock will block out signals. */
4554 wlock_acquire(rtld_bind_lock, &lockstate);
4555 if (!dtv[index + 1])
4556 dtv[index + 1] = (Elf_Addr)allocate_module_tls(index);
4557 lock_release(rtld_bind_lock, &lockstate);
4559 return ((void *)(dtv[index + 1] + offset));
4563 tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset)
4568 /* Check dtv generation in case new modules have arrived */
4569 if (__predict_true(dtv[0] == tls_dtv_generation &&
4570 dtv[index + 1] != 0))
4571 return ((void *)(dtv[index + 1] + offset));
4572 return (tls_get_addr_slow(dtvp, index, offset));
4575 #if defined(__aarch64__) || defined(__arm__) || defined(__mips__) || \
4576 defined(__powerpc__) || defined(__riscv__)
4579 * Allocate Static TLS using the Variant I method.
4582 allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign)
4591 if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE)
4594 assert(tcbsize >= TLS_TCB_SIZE);
4595 tcb = xcalloc(1, tls_static_space - TLS_TCB_SIZE + tcbsize);
4596 tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE);
4598 if (oldtcb != NULL) {
4599 memcpy(tls, oldtcb, tls_static_space);
4602 /* Adjust the DTV. */
4604 for (i = 0; i < dtv[1]; i++) {
4605 if (dtv[i+2] >= (Elf_Addr)oldtcb &&
4606 dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) {
4607 dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tls;
4611 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
4613 dtv[0] = tls_dtv_generation;
4614 dtv[1] = tls_max_index;
4616 for (obj = globallist_curr(objs); obj != NULL;
4617 obj = globallist_next(obj)) {
4618 if (obj->tlsoffset > 0) {
4619 addr = (Elf_Addr)tls + obj->tlsoffset;
4620 if (obj->tlsinitsize > 0)
4621 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
4622 if (obj->tlssize > obj->tlsinitsize)
4623 memset((void*) (addr + obj->tlsinitsize), 0,
4624 obj->tlssize - obj->tlsinitsize);
4625 dtv[obj->tlsindex + 1] = addr;
4634 free_tls(void *tcb, size_t tcbsize, size_t tcbalign)
4637 Elf_Addr tlsstart, tlsend;
4640 assert(tcbsize >= TLS_TCB_SIZE);
4642 tlsstart = (Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE;
4643 tlsend = tlsstart + tls_static_space;
4645 dtv = *(Elf_Addr **)tlsstart;
4647 for (i = 0; i < dtvsize; i++) {
4648 if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) {
4649 free((void*)dtv[i+2]);
4658 #if defined(__i386__) || defined(__amd64__) || defined(__sparc64__)
4661 * Allocate Static TLS using the Variant II method.
4664 allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign)
4667 size_t size, ralign;
4669 Elf_Addr *dtv, *olddtv;
4670 Elf_Addr segbase, oldsegbase, addr;
4674 if (tls_static_max_align > ralign)
4675 ralign = tls_static_max_align;
4676 size = round(tls_static_space, ralign) + round(tcbsize, ralign);
4678 assert(tcbsize >= 2*sizeof(Elf_Addr));
4679 tls = malloc_aligned(size, ralign);
4680 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
4682 segbase = (Elf_Addr)(tls + round(tls_static_space, ralign));
4683 ((Elf_Addr*)segbase)[0] = segbase;
4684 ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv;
4686 dtv[0] = tls_dtv_generation;
4687 dtv[1] = tls_max_index;
4691 * Copy the static TLS block over whole.
4693 oldsegbase = (Elf_Addr) oldtls;
4694 memcpy((void *)(segbase - tls_static_space),
4695 (const void *)(oldsegbase - tls_static_space),
4699 * If any dynamic TLS blocks have been created tls_get_addr(),
4702 olddtv = ((Elf_Addr**)oldsegbase)[1];
4703 for (i = 0; i < olddtv[1]; i++) {
4704 if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) {
4705 dtv[i+2] = olddtv[i+2];
4711 * We assume that this block was the one we created with
4712 * allocate_initial_tls().
4714 free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr));
4716 for (obj = objs; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
4717 if (obj->marker || obj->tlsoffset == 0)
4719 addr = segbase - obj->tlsoffset;
4720 memset((void*) (addr + obj->tlsinitsize),
4721 0, obj->tlssize - obj->tlsinitsize);
4723 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
4724 dtv[obj->tlsindex + 1] = addr;
4728 return (void*) segbase;
4732 free_tls(void *tls, size_t tcbsize, size_t tcbalign)
4735 size_t size, ralign;
4737 Elf_Addr tlsstart, tlsend;
4740 * Figure out the size of the initial TLS block so that we can
4741 * find stuff which ___tls_get_addr() allocated dynamically.
4744 if (tls_static_max_align > ralign)
4745 ralign = tls_static_max_align;
4746 size = round(tls_static_space, ralign);
4748 dtv = ((Elf_Addr**)tls)[1];
4750 tlsend = (Elf_Addr) tls;
4751 tlsstart = tlsend - size;
4752 for (i = 0; i < dtvsize; i++) {
4753 if (dtv[i + 2] != 0 && (dtv[i + 2] < tlsstart || dtv[i + 2] > tlsend)) {
4754 free_aligned((void *)dtv[i + 2]);
4758 free_aligned((void *)tlsstart);
4765 * Allocate TLS block for module with given index.
4768 allocate_module_tls(int index)
4773 TAILQ_FOREACH(obj, &obj_list, next) {
4776 if (obj->tlsindex == index)
4780 _rtld_error("Can't find module with TLS index %d", index);
4784 p = malloc_aligned(obj->tlssize, obj->tlsalign);
4785 memcpy(p, obj->tlsinit, obj->tlsinitsize);
4786 memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
4792 allocate_tls_offset(Obj_Entry *obj)
4799 if (obj->tlssize == 0) {
4800 obj->tls_done = true;
4804 if (tls_last_offset == 0)
4805 off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign);
4807 off = calculate_tls_offset(tls_last_offset, tls_last_size,
4808 obj->tlssize, obj->tlsalign);
4811 * If we have already fixed the size of the static TLS block, we
4812 * must stay within that size. When allocating the static TLS, we
4813 * leave a small amount of space spare to be used for dynamically
4814 * loading modules which use static TLS.
4816 if (tls_static_space != 0) {
4817 if (calculate_tls_end(off, obj->tlssize) > tls_static_space)
4819 } else if (obj->tlsalign > tls_static_max_align) {
4820 tls_static_max_align = obj->tlsalign;
4823 tls_last_offset = obj->tlsoffset = off;
4824 tls_last_size = obj->tlssize;
4825 obj->tls_done = true;
4831 free_tls_offset(Obj_Entry *obj)
4835 * If we were the last thing to allocate out of the static TLS
4836 * block, we give our space back to the 'allocator'. This is a
4837 * simplistic workaround to allow libGL.so.1 to be loaded and
4838 * unloaded multiple times.
4840 if (calculate_tls_end(obj->tlsoffset, obj->tlssize)
4841 == calculate_tls_end(tls_last_offset, tls_last_size)) {
4842 tls_last_offset -= obj->tlssize;
4848 _rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign)
4851 RtldLockState lockstate;
4853 wlock_acquire(rtld_bind_lock, &lockstate);
4854 ret = allocate_tls(globallist_curr(TAILQ_FIRST(&obj_list)), oldtls,
4856 lock_release(rtld_bind_lock, &lockstate);
4861 _rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign)
4863 RtldLockState lockstate;
4865 wlock_acquire(rtld_bind_lock, &lockstate);
4866 free_tls(tcb, tcbsize, tcbalign);
4867 lock_release(rtld_bind_lock, &lockstate);
4871 object_add_name(Obj_Entry *obj, const char *name)
4877 entry = malloc(sizeof(Name_Entry) + len);
4879 if (entry != NULL) {
4880 strcpy(entry->name, name);
4881 STAILQ_INSERT_TAIL(&obj->names, entry, link);
4886 object_match_name(const Obj_Entry *obj, const char *name)
4890 STAILQ_FOREACH(entry, &obj->names, link) {
4891 if (strcmp(name, entry->name) == 0)
4898 locate_dependency(const Obj_Entry *obj, const char *name)
4900 const Objlist_Entry *entry;
4901 const Needed_Entry *needed;
4903 STAILQ_FOREACH(entry, &list_main, link) {
4904 if (object_match_name(entry->obj, name))
4908 for (needed = obj->needed; needed != NULL; needed = needed->next) {
4909 if (strcmp(obj->strtab + needed->name, name) == 0 ||
4910 (needed->obj != NULL && object_match_name(needed->obj, name))) {
4912 * If there is DT_NEEDED for the name we are looking for,
4913 * we are all set. Note that object might not be found if
4914 * dependency was not loaded yet, so the function can
4915 * return NULL here. This is expected and handled
4916 * properly by the caller.
4918 return (needed->obj);
4921 _rtld_error("%s: Unexpected inconsistency: dependency %s not found",
4927 check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj,
4928 const Elf_Vernaux *vna)
4930 const Elf_Verdef *vd;
4931 const char *vername;
4933 vername = refobj->strtab + vna->vna_name;
4934 vd = depobj->verdef;
4936 _rtld_error("%s: version %s required by %s not defined",
4937 depobj->path, vername, refobj->path);
4941 if (vd->vd_version != VER_DEF_CURRENT) {
4942 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
4943 depobj->path, vd->vd_version);
4946 if (vna->vna_hash == vd->vd_hash) {
4947 const Elf_Verdaux *aux = (const Elf_Verdaux *)
4948 ((char *)vd + vd->vd_aux);
4949 if (strcmp(vername, depobj->strtab + aux->vda_name) == 0)
4952 if (vd->vd_next == 0)
4954 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
4956 if (vna->vna_flags & VER_FLG_WEAK)
4958 _rtld_error("%s: version %s required by %s not found",
4959 depobj->path, vername, refobj->path);
4964 rtld_verify_object_versions(Obj_Entry *obj)
4966 const Elf_Verneed *vn;
4967 const Elf_Verdef *vd;
4968 const Elf_Verdaux *vda;
4969 const Elf_Vernaux *vna;
4970 const Obj_Entry *depobj;
4971 int maxvernum, vernum;
4973 if (obj->ver_checked)
4975 obj->ver_checked = true;
4979 * Walk over defined and required version records and figure out
4980 * max index used by any of them. Do very basic sanity checking
4984 while (vn != NULL) {
4985 if (vn->vn_version != VER_NEED_CURRENT) {
4986 _rtld_error("%s: Unsupported version %d of Elf_Verneed entry",
4987 obj->path, vn->vn_version);
4990 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux);
4992 vernum = VER_NEED_IDX(vna->vna_other);
4993 if (vernum > maxvernum)
4995 if (vna->vna_next == 0)
4997 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next);
4999 if (vn->vn_next == 0)
5001 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next);
5005 while (vd != NULL) {
5006 if (vd->vd_version != VER_DEF_CURRENT) {
5007 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
5008 obj->path, vd->vd_version);
5011 vernum = VER_DEF_IDX(vd->vd_ndx);
5012 if (vernum > maxvernum)
5014 if (vd->vd_next == 0)
5016 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
5023 * Store version information in array indexable by version index.
5024 * Verify that object version requirements are satisfied along the
5027 obj->vernum = maxvernum + 1;
5028 obj->vertab = xcalloc(obj->vernum, sizeof(Ver_Entry));
5031 while (vd != NULL) {
5032 if ((vd->vd_flags & VER_FLG_BASE) == 0) {
5033 vernum = VER_DEF_IDX(vd->vd_ndx);
5034 assert(vernum <= maxvernum);
5035 vda = (const Elf_Verdaux *)((char *)vd + vd->vd_aux);
5036 obj->vertab[vernum].hash = vd->vd_hash;
5037 obj->vertab[vernum].name = obj->strtab + vda->vda_name;
5038 obj->vertab[vernum].file = NULL;
5039 obj->vertab[vernum].flags = 0;
5041 if (vd->vd_next == 0)
5043 vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
5047 while (vn != NULL) {
5048 depobj = locate_dependency(obj, obj->strtab + vn->vn_file);
5051 vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux);
5053 if (check_object_provided_version(obj, depobj, vna))
5055 vernum = VER_NEED_IDX(vna->vna_other);
5056 assert(vernum <= maxvernum);
5057 obj->vertab[vernum].hash = vna->vna_hash;
5058 obj->vertab[vernum].name = obj->strtab + vna->vna_name;
5059 obj->vertab[vernum].file = obj->strtab + vn->vn_file;
5060 obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ?
5061 VER_INFO_HIDDEN : 0;
5062 if (vna->vna_next == 0)
5064 vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next);
5066 if (vn->vn_next == 0)
5068 vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next);
5074 rtld_verify_versions(const Objlist *objlist)
5076 Objlist_Entry *entry;
5080 STAILQ_FOREACH(entry, objlist, link) {
5082 * Skip dummy objects or objects that have their version requirements
5085 if (entry->obj->strtab == NULL || entry->obj->vertab != NULL)
5087 if (rtld_verify_object_versions(entry->obj) == -1) {
5089 if (ld_tracing == NULL)
5093 if (rc == 0 || ld_tracing != NULL)
5094 rc = rtld_verify_object_versions(&obj_rtld);
5099 fetch_ventry(const Obj_Entry *obj, unsigned long symnum)
5104 vernum = VER_NDX(obj->versyms[symnum]);
5105 if (vernum >= obj->vernum) {
5106 _rtld_error("%s: symbol %s has wrong verneed value %d",
5107 obj->path, obj->strtab + symnum, vernum);
5108 } else if (obj->vertab[vernum].hash != 0) {
5109 return &obj->vertab[vernum];
5116 _rtld_get_stack_prot(void)
5119 return (stack_prot);
5123 _rtld_is_dlopened(void *arg)
5126 RtldLockState lockstate;
5129 rlock_acquire(rtld_bind_lock, &lockstate);
5132 obj = obj_from_addr(arg);
5134 _rtld_error("No shared object contains address");
5135 lock_release(rtld_bind_lock, &lockstate);
5138 res = obj->dlopened ? 1 : 0;
5139 lock_release(rtld_bind_lock, &lockstate);
5144 obj_enforce_relro(Obj_Entry *obj)
5147 if (obj->relro_size > 0 && mprotect(obj->relro_page, obj->relro_size,
5149 _rtld_error("%s: Cannot enforce relro protection: %s",
5150 obj->path, rtld_strerror(errno));
5157 map_stacks_exec(RtldLockState *lockstate)
5159 void (*thr_map_stacks_exec)(void);
5161 if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0)
5163 thr_map_stacks_exec = (void (*)(void))(uintptr_t)
5164 get_program_var_addr("__pthread_map_stacks_exec", lockstate);
5165 if (thr_map_stacks_exec != NULL) {
5166 stack_prot |= PROT_EXEC;
5167 thr_map_stacks_exec();
5172 symlook_init(SymLook *dst, const char *name)
5175 bzero(dst, sizeof(*dst));
5177 dst->hash = elf_hash(name);
5178 dst->hash_gnu = gnu_hash(name);
5182 symlook_init_from_req(SymLook *dst, const SymLook *src)
5185 dst->name = src->name;
5186 dst->hash = src->hash;
5187 dst->hash_gnu = src->hash_gnu;
5188 dst->ventry = src->ventry;
5189 dst->flags = src->flags;
5190 dst->defobj_out = NULL;
5191 dst->sym_out = NULL;
5192 dst->lockstate = src->lockstate;
5197 * Parse a file descriptor number without pulling in more of libc (e.g. atoi).
5200 parse_libdir(const char *str)
5202 static const int RADIX = 10; /* XXXJA: possibly support hex? */
5209 for (c = *str; c != '\0'; c = *++str) {
5210 if (c < '0' || c > '9')
5217 /* Make sure we actually parsed something. */
5219 _rtld_error("failed to parse directory FD from '%s'", str);
5226 * Overrides for libc_pic-provided functions.
5230 __getosreldate(void)
5240 oid[1] = KERN_OSRELDATE;
5242 len = sizeof(osrel);
5243 error = sysctl(oid, 2, &osrel, &len, NULL, 0);
5244 if (error == 0 && osrel > 0 && len == sizeof(osrel))
5256 void (*__cleanup)(void);
5257 int __isthreaded = 0;
5258 int _thread_autoinit_dummy_decl = 1;
5261 * No unresolved symbols for rtld.
5264 __pthread_cxa_finalize(struct dl_phdr_info *a)
5269 __stack_chk_fail(void)
5272 _rtld_error("stack overflow detected; terminated");
5275 __weak_reference(__stack_chk_fail, __stack_chk_fail_local);
5281 _rtld_error("buffer overflow detected; terminated");
5286 rtld_strerror(int errnum)
5289 if (errnum < 0 || errnum >= sys_nerr)
5290 return ("Unknown error");
5291 return (sys_errlist[errnum]);