2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra.
5 * Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>.
6 * Copyright 2009-2013 Konstantin Belousov <kib@FreeBSD.ORG>.
7 * Copyright 2012 John Marino <draco@marino.st>.
8 * Copyright 2014-2017 The FreeBSD Foundation
11 * Portions of this software were developed by Konstantin Belousov
12 * under sponsorship from the FreeBSD Foundation.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Dynamic linker for ELF.
38 * John Polstra <jdp@polstra.com>.
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include <sys/param.h>
45 #include <sys/mount.h>
48 #include <sys/sysctl.h>
50 #include <sys/utsname.h>
51 #include <sys/ktrace.h>
68 #include "rtld_printf.h"
69 #include "rtld_malloc.h"
70 #include "rtld_utrace.h"
74 typedef void (*func_ptr_type)(void);
75 typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg);
78 /* Variables that cannot be static: */
79 extern struct r_debug r_debug; /* For GDB */
80 extern int _thread_autoinit_dummy_decl;
81 extern char* __progname;
82 extern void (*__cleanup)(void);
86 * Function declarations.
88 static const char *basename(const char *);
89 static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **,
90 const Elf_Dyn **, const Elf_Dyn **);
91 static void digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *,
93 static void digest_dynamic(Obj_Entry *, int);
94 static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *);
95 static void distribute_static_tls(Objlist *, RtldLockState *);
96 static Obj_Entry *dlcheck(void *);
97 static int dlclose_locked(void *, RtldLockState *);
98 static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj,
99 int lo_flags, int mode, RtldLockState *lockstate);
100 static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int);
101 static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *);
102 static bool donelist_check(DoneList *, const Obj_Entry *);
103 static void errmsg_restore(char *);
104 static char *errmsg_save(void);
105 static void *fill_search_info(const char *, size_t, void *);
106 static char *find_library(const char *, const Obj_Entry *, int *);
107 static const char *gethints(bool);
108 static void hold_object(Obj_Entry *);
109 static void unhold_object(Obj_Entry *);
110 static void init_dag(Obj_Entry *);
111 static void init_marker(Obj_Entry *);
112 static void init_pagesizes(Elf_Auxinfo **aux_info);
113 static void init_rtld(caddr_t, Elf_Auxinfo **);
114 static void initlist_add_neededs(Needed_Entry *, Objlist *);
115 static void initlist_add_objects(Obj_Entry *, Obj_Entry *, Objlist *);
116 static int initlist_objects_ifunc(Objlist *, bool, int, RtldLockState *);
117 static void linkmap_add(Obj_Entry *);
118 static void linkmap_delete(Obj_Entry *);
119 static void load_filtees(Obj_Entry *, int flags, RtldLockState *);
120 static void unload_filtees(Obj_Entry *, RtldLockState *);
121 static int load_needed_objects(Obj_Entry *, int);
122 static int load_preload_objects(void);
123 static Obj_Entry *load_object(const char *, int fd, const Obj_Entry *, int);
124 static void map_stacks_exec(RtldLockState *);
125 static int obj_disable_relro(Obj_Entry *);
126 static int obj_enforce_relro(Obj_Entry *);
127 static Obj_Entry *obj_from_addr(const void *);
128 static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *);
129 static void objlist_call_init(Objlist *, RtldLockState *);
130 static void objlist_clear(Objlist *);
131 static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *);
132 static void objlist_init(Objlist *);
133 static void objlist_push_head(Objlist *, Obj_Entry *);
134 static void objlist_push_tail(Objlist *, Obj_Entry *);
135 static void objlist_put_after(Objlist *, Obj_Entry *, Obj_Entry *);
136 static void objlist_remove(Objlist *, Obj_Entry *);
137 static int open_binary_fd(const char *argv0, bool search_in_path);
138 static int parse_args(char* argv[], int argc, bool *use_pathp, int *fdp);
139 static int parse_integer(const char *);
140 static void *path_enumerate(const char *, path_enum_proc, const char *, void *);
141 static void print_usage(const char *argv0);
142 static void release_object(Obj_Entry *);
143 static int relocate_object_dag(Obj_Entry *root, bool bind_now,
144 Obj_Entry *rtldobj, int flags, RtldLockState *lockstate);
145 static int relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
146 int flags, RtldLockState *lockstate);
147 static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, int,
149 static int resolve_object_ifunc(Obj_Entry *, bool, int, RtldLockState *);
150 static int rtld_dirname(const char *, char *);
151 static int rtld_dirname_abs(const char *, char *);
152 static void *rtld_dlopen(const char *name, int fd, int mode);
153 static void rtld_exit(void);
154 static void rtld_nop_exit(void);
155 static char *search_library_path(const char *, const char *, const char *,
157 static char *search_library_pathfds(const char *, const char *, int *);
158 static const void **get_program_var_addr(const char *, RtldLockState *);
159 static void set_program_var(const char *, const void *);
160 static int symlook_default(SymLook *, const Obj_Entry *refobj);
161 static int symlook_global(SymLook *, DoneList *);
162 static void symlook_init_from_req(SymLook *, const SymLook *);
163 static int symlook_list(SymLook *, const Objlist *, DoneList *);
164 static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *);
165 static int symlook_obj1_sysv(SymLook *, const Obj_Entry *);
166 static int symlook_obj1_gnu(SymLook *, const Obj_Entry *);
167 static void trace_loaded_objects(Obj_Entry *);
168 static void unlink_object(Obj_Entry *);
169 static void unload_object(Obj_Entry *, RtldLockState *lockstate);
170 static void unref_dag(Obj_Entry *);
171 static void ref_dag(Obj_Entry *);
172 static char *origin_subst_one(Obj_Entry *, char *, const char *,
174 static char *origin_subst(Obj_Entry *, const char *);
175 static bool obj_resolve_origin(Obj_Entry *obj);
176 static void preinit_main(void);
177 static int rtld_verify_versions(const Objlist *);
178 static int rtld_verify_object_versions(Obj_Entry *);
179 static void object_add_name(Obj_Entry *, const char *);
180 static int object_match_name(const Obj_Entry *, const char *);
181 static void ld_utrace_log(int, void *, void *, size_t, int, const char *);
182 static void rtld_fill_dl_phdr_info(const Obj_Entry *obj,
183 struct dl_phdr_info *phdr_info);
184 static uint32_t gnu_hash(const char *);
185 static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *,
186 const unsigned long);
188 void r_debug_state(struct r_debug *, struct link_map *) __noinline __exported;
189 void _r_debug_postinit(struct link_map *) __noinline __exported;
191 int __sys_openat(int, const char *, int, ...);
196 static char *error_message; /* Message for dlerror(), or NULL */
197 struct r_debug r_debug __exported; /* for GDB; */
198 static bool libmap_disable; /* Disable libmap */
199 static bool ld_loadfltr; /* Immediate filters processing */
200 static char *libmap_override; /* Maps to use in addition to libmap.conf */
201 static bool trust; /* False for setuid and setgid programs */
202 static bool dangerous_ld_env; /* True if environment variables have been
203 used to affect the libraries loaded */
204 bool ld_bind_not; /* Disable PLT update */
205 static char *ld_bind_now; /* Environment variable for immediate binding */
206 static char *ld_debug; /* Environment variable for debugging */
207 static char *ld_library_path; /* Environment variable for search path */
208 static char *ld_library_dirs; /* Environment variable for library descriptors */
209 static char *ld_preload; /* Environment variable for libraries to
211 static const char *ld_elf_hints_path; /* Environment variable for alternative hints path */
212 static const char *ld_tracing; /* Called from ldd to print libs */
213 static char *ld_utrace; /* Use utrace() to log events. */
214 static struct obj_entry_q obj_list; /* Queue of all loaded objects */
215 static Obj_Entry *obj_main; /* The main program shared object */
216 static Obj_Entry obj_rtld; /* The dynamic linker shared object */
217 static unsigned int obj_count; /* Number of objects in obj_list */
218 static unsigned int obj_loads; /* Number of loads of objects (gen count) */
220 static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */
221 STAILQ_HEAD_INITIALIZER(list_global);
222 static Objlist list_main = /* Objects loaded at program startup */
223 STAILQ_HEAD_INITIALIZER(list_main);
224 static Objlist list_fini = /* Objects needing fini() calls */
225 STAILQ_HEAD_INITIALIZER(list_fini);
227 Elf_Sym sym_zero; /* For resolving undefined weak refs. */
229 #define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m);
231 extern Elf_Dyn _DYNAMIC;
232 #pragma weak _DYNAMIC
234 int dlclose(void *) __exported;
235 char *dlerror(void) __exported;
236 void *dlopen(const char *, int) __exported;
237 void *fdlopen(int, int) __exported;
238 void *dlsym(void *, const char *) __exported;
239 dlfunc_t dlfunc(void *, const char *) __exported;
240 void *dlvsym(void *, const char *, const char *) __exported;
241 int dladdr(const void *, Dl_info *) __exported;
242 void dllockinit(void *, void *(*)(void *), void (*)(void *), void (*)(void *),
243 void (*)(void *), void (*)(void *), void (*)(void *)) __exported;
244 int dlinfo(void *, int , void *) __exported;
245 int dl_iterate_phdr(__dl_iterate_hdr_callback, void *) __exported;
246 int _rtld_addr_phdr(const void *, struct dl_phdr_info *) __exported;
247 int _rtld_get_stack_prot(void) __exported;
248 int _rtld_is_dlopened(void *) __exported;
249 void _rtld_error(const char *, ...) __exported;
251 /* Only here to fix -Wmissing-prototypes warnings */
252 int __getosreldate(void);
253 void __pthread_cxa_finalize(struct dl_phdr_info *a);
254 func_ptr_type _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp);
255 Elf_Addr _rtld_bind(Obj_Entry *obj, Elf_Size reloff);
259 static int osreldate;
262 static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC;
263 static int max_stack_flags;
266 * Global declarations normally provided by crt1. The dynamic linker is
267 * not built with crt1, so we have to provide them ourselves.
273 * Used to pass argc, argv to init functions.
279 * Globals to control TLS allocation.
281 size_t tls_last_offset; /* Static TLS offset of last module */
282 size_t tls_last_size; /* Static TLS size of last module */
283 size_t tls_static_space; /* Static TLS space allocated */
284 static size_t tls_static_max_align;
285 Elf_Addr tls_dtv_generation = 1; /* Used to detect when dtv size changes */
286 int tls_max_index = 1; /* Largest module index allocated */
288 static bool ld_library_path_rpath = false;
291 * Globals for path names, and such
293 const char *ld_elf_hints_default = _PATH_ELF_HINTS;
294 const char *ld_path_libmap_conf = _PATH_LIBMAP_CONF;
295 const char *ld_path_rtld = _PATH_RTLD;
296 const char *ld_standard_library_path = STANDARD_LIBRARY_PATH;
297 const char *ld_env_prefix = LD_;
299 static void (*rtld_exit_ptr)(void);
302 * Fill in a DoneList with an allocation large enough to hold all of
303 * the currently-loaded objects. Keep this as a macro since it calls
304 * alloca and we want that to occur within the scope of the caller.
306 #define donelist_init(dlp) \
307 ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \
308 assert((dlp)->objs != NULL), \
309 (dlp)->num_alloc = obj_count, \
312 #define LD_UTRACE(e, h, mb, ms, r, n) do { \
313 if (ld_utrace != NULL) \
314 ld_utrace_log(e, h, mb, ms, r, n); \
318 ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize,
319 int refcnt, const char *name)
321 struct utrace_rtld ut;
322 static const char rtld_utrace_sig[RTLD_UTRACE_SIG_SZ] = RTLD_UTRACE_SIG;
324 memcpy(ut.sig, rtld_utrace_sig, sizeof(ut.sig));
327 ut.mapbase = mapbase;
328 ut.mapsize = mapsize;
330 bzero(ut.name, sizeof(ut.name));
332 strlcpy(ut.name, name, sizeof(ut.name));
333 utrace(&ut, sizeof(ut));
336 #ifdef RTLD_VARIANT_ENV_NAMES
338 * construct the env variable based on the type of binary that's
341 static inline const char *
344 static char buffer[128];
346 strlcpy(buffer, ld_env_prefix, sizeof(buffer));
347 strlcat(buffer, var, sizeof(buffer));
355 * Main entry point for dynamic linking. The first argument is the
356 * stack pointer. The stack is expected to be laid out as described
357 * in the SVR4 ABI specification, Intel 386 Processor Supplement.
358 * Specifically, the stack pointer points to a word containing
359 * ARGC. Following that in the stack is a null-terminated sequence
360 * of pointers to argument strings. Then comes a null-terminated
361 * sequence of pointers to environment strings. Finally, there is a
362 * sequence of "auxiliary vector" entries.
364 * The second argument points to a place to store the dynamic linker's
365 * exit procedure pointer and the third to a place to store the main
368 * The return value is the main program's entry point.
371 _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp)
373 Elf_Auxinfo *aux, *auxp, *auxpf, *aux_info[AT_COUNT];
374 Objlist_Entry *entry;
375 Obj_Entry *last_interposer, *obj, *preload_tail;
376 const Elf_Phdr *phdr;
378 RtldLockState lockstate;
381 char **argv, **env, **envp, *kexecpath, *library_path_rpath;
384 char buf[MAXPATHLEN];
385 int argc, fd, i, phnum, rtld_argc;
386 bool dir_enable, explicit_fd, search_in_path;
389 * On entry, the dynamic linker itself has not been relocated yet.
390 * Be very careful not to reference any global data until after
391 * init_rtld has returned. It is OK to reference file-scope statics
392 * and string constants, and to call static and global functions.
395 /* Find the auxiliary vector on the stack. */
399 sp += argc + 1; /* Skip over arguments and NULL terminator */
401 while (*sp++ != 0) /* Skip over environment, and NULL terminator */
403 aux = (Elf_Auxinfo *) sp;
405 /* Digest the auxiliary vector. */
406 for (i = 0; i < AT_COUNT; i++)
408 for (auxp = aux; auxp->a_type != AT_NULL; auxp++) {
409 if (auxp->a_type < AT_COUNT)
410 aux_info[auxp->a_type] = auxp;
413 /* Initialize and relocate ourselves. */
414 assert(aux_info[AT_BASE] != NULL);
415 init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info);
417 __progname = obj_rtld.path;
418 argv0 = argv[0] != NULL ? argv[0] : "(null)";
423 trust = !issetugid();
425 md_abi_variant_hook(aux_info);
428 if (aux_info[AT_EXECFD] != NULL) {
429 fd = aux_info[AT_EXECFD]->a_un.a_val;
431 assert(aux_info[AT_PHDR] != NULL);
432 phdr = (const Elf_Phdr *)aux_info[AT_PHDR]->a_un.a_ptr;
433 if (phdr == obj_rtld.phdr) {
435 _rtld_error("Tainted process refusing to run binary %s",
439 dbg("opening main program in direct exec mode");
441 rtld_argc = parse_args(argv, argc, &search_in_path, &fd);
442 argv0 = argv[rtld_argc];
443 explicit_fd = (fd != -1);
445 fd = open_binary_fd(argv0, search_in_path);
446 if (fstat(fd, &st) == -1) {
447 _rtld_error("Failed to fstat FD %d (%s): %s", fd,
448 explicit_fd ? "user-provided descriptor" : argv0,
449 rtld_strerror(errno));
454 * Rough emulation of the permission checks done by
455 * execve(2), only Unix DACs are checked, ACLs are
456 * ignored. Preserve the semantic of disabling owner
457 * to execute if owner x bit is cleared, even if
458 * others x bit is enabled.
459 * mmap(2) does not allow to mmap with PROT_EXEC if
460 * binary' file comes from noexec mount. We cannot
461 * set a text reference on the binary.
464 if (st.st_uid == geteuid()) {
465 if ((st.st_mode & S_IXUSR) != 0)
467 } else if (st.st_gid == getegid()) {
468 if ((st.st_mode & S_IXGRP) != 0)
470 } else if ((st.st_mode & S_IXOTH) != 0) {
474 _rtld_error("No execute permission for binary %s",
480 * For direct exec mode, argv[0] is the interpreter
481 * name, we must remove it and shift arguments left
482 * before invoking binary main. Since stack layout
483 * places environment pointers and aux vectors right
484 * after the terminating NULL, we must shift
485 * environment and aux as well.
487 main_argc = argc - rtld_argc;
488 for (i = 0; i <= main_argc; i++)
489 argv[i] = argv[i + rtld_argc];
491 environ = env = envp = argv + main_argc + 1;
493 *envp = *(envp + rtld_argc);
495 } while (*envp != NULL);
496 aux = auxp = (Elf_Auxinfo *)envp;
497 auxpf = (Elf_Auxinfo *)(envp + rtld_argc);
498 for (;; auxp++, auxpf++) {
500 if (auxp->a_type == AT_NULL)
504 _rtld_error("No binary");
510 ld_bind_now = getenv(_LD("BIND_NOW"));
513 * If the process is tainted, then we un-set the dangerous environment
514 * variables. The process will be marked as tainted until setuid(2)
515 * is called. If any child process calls setuid(2) we do not want any
516 * future processes to honor the potentially un-safe variables.
519 if (unsetenv(_LD("PRELOAD")) || unsetenv(_LD("LIBMAP")) ||
520 unsetenv(_LD("LIBRARY_PATH")) || unsetenv(_LD("LIBRARY_PATH_FDS")) ||
521 unsetenv(_LD("LIBMAP_DISABLE")) || unsetenv(_LD("BIND_NOT")) ||
522 unsetenv(_LD("DEBUG")) || unsetenv(_LD("ELF_HINTS_PATH")) ||
523 unsetenv(_LD("LOADFLTR")) || unsetenv(_LD("LIBRARY_PATH_RPATH"))) {
524 _rtld_error("environment corrupt; aborting");
528 ld_debug = getenv(_LD("DEBUG"));
529 if (ld_bind_now == NULL)
530 ld_bind_not = getenv(_LD("BIND_NOT")) != NULL;
531 libmap_disable = getenv(_LD("LIBMAP_DISABLE")) != NULL;
532 libmap_override = getenv(_LD("LIBMAP"));
533 ld_library_path = getenv(_LD("LIBRARY_PATH"));
534 ld_library_dirs = getenv(_LD("LIBRARY_PATH_FDS"));
535 ld_preload = getenv(_LD("PRELOAD"));
536 ld_elf_hints_path = getenv(_LD("ELF_HINTS_PATH"));
537 ld_loadfltr = getenv(_LD("LOADFLTR")) != NULL;
538 library_path_rpath = getenv(_LD("LIBRARY_PATH_RPATH"));
539 if (library_path_rpath != NULL) {
540 if (library_path_rpath[0] == 'y' ||
541 library_path_rpath[0] == 'Y' ||
542 library_path_rpath[0] == '1')
543 ld_library_path_rpath = true;
545 ld_library_path_rpath = false;
547 dangerous_ld_env = libmap_disable || (libmap_override != NULL) ||
548 (ld_library_path != NULL) || (ld_preload != NULL) ||
549 (ld_elf_hints_path != NULL) || ld_loadfltr;
550 ld_tracing = getenv(_LD("TRACE_LOADED_OBJECTS"));
551 ld_utrace = getenv(_LD("UTRACE"));
553 if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0)
554 ld_elf_hints_path = ld_elf_hints_default;
556 if (ld_debug != NULL && *ld_debug != '\0')
558 dbg("%s is initialized, base address = %p", __progname,
559 (caddr_t) aux_info[AT_BASE]->a_un.a_ptr);
560 dbg("RTLD dynamic = %p", obj_rtld.dynamic);
561 dbg("RTLD pltgot = %p", obj_rtld.pltgot);
563 dbg("initializing thread locks");
567 * Load the main program, or process its program header if it is
570 if (fd != -1) { /* Load the main program. */
571 dbg("loading main program");
572 obj_main = map_object(fd, argv0, NULL);
574 if (obj_main == NULL)
576 max_stack_flags = obj_main->stack_flags;
577 } else { /* Main program already loaded. */
578 dbg("processing main program's program header");
579 assert(aux_info[AT_PHDR] != NULL);
580 phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr;
581 assert(aux_info[AT_PHNUM] != NULL);
582 phnum = aux_info[AT_PHNUM]->a_un.a_val;
583 assert(aux_info[AT_PHENT] != NULL);
584 assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr));
585 assert(aux_info[AT_ENTRY] != NULL);
586 imgentry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr;
587 if ((obj_main = digest_phdr(phdr, phnum, imgentry, argv0)) == NULL)
591 if (aux_info[AT_EXECPATH] != NULL && fd == -1) {
592 kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr;
593 dbg("AT_EXECPATH %p %s", kexecpath, kexecpath);
594 if (kexecpath[0] == '/')
595 obj_main->path = kexecpath;
596 else if (getcwd(buf, sizeof(buf)) == NULL ||
597 strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) ||
598 strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf))
599 obj_main->path = xstrdup(argv0);
601 obj_main->path = xstrdup(buf);
603 dbg("No AT_EXECPATH or direct exec");
604 obj_main->path = xstrdup(argv0);
606 dbg("obj_main path %s", obj_main->path);
607 obj_main->mainprog = true;
609 if (aux_info[AT_STACKPROT] != NULL &&
610 aux_info[AT_STACKPROT]->a_un.a_val != 0)
611 stack_prot = aux_info[AT_STACKPROT]->a_un.a_val;
615 * Get the actual dynamic linker pathname from the executable if
616 * possible. (It should always be possible.) That ensures that
617 * gdb will find the right dynamic linker even if a non-standard
620 if (obj_main->interp != NULL &&
621 strcmp(obj_main->interp, obj_rtld.path) != 0) {
623 obj_rtld.path = xstrdup(obj_main->interp);
624 __progname = obj_rtld.path;
628 digest_dynamic(obj_main, 0);
629 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d",
630 obj_main->path, obj_main->valid_hash_sysv, obj_main->valid_hash_gnu,
631 obj_main->dynsymcount);
633 linkmap_add(obj_main);
634 linkmap_add(&obj_rtld);
636 /* Link the main program into the list of objects. */
637 TAILQ_INSERT_HEAD(&obj_list, obj_main, next);
641 /* Initialize a fake symbol for resolving undefined weak references. */
642 sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE);
643 sym_zero.st_shndx = SHN_UNDEF;
644 sym_zero.st_value = -(uintptr_t)obj_main->relocbase;
647 libmap_disable = (bool)lm_init(libmap_override);
649 dbg("loading LD_PRELOAD libraries");
650 if (load_preload_objects() == -1)
652 preload_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q));
654 dbg("loading needed objects");
655 if (load_needed_objects(obj_main, 0) == -1)
658 /* Make a list of all objects loaded at startup. */
659 last_interposer = obj_main;
660 TAILQ_FOREACH(obj, &obj_list, next) {
663 if (obj->z_interpose && obj != obj_main) {
664 objlist_put_after(&list_main, last_interposer, obj);
665 last_interposer = obj;
667 objlist_push_tail(&list_main, obj);
672 dbg("checking for required versions");
673 if (rtld_verify_versions(&list_main) == -1 && !ld_tracing)
676 if (ld_tracing) { /* We're done */
677 trace_loaded_objects(obj_main);
681 if (getenv(_LD("DUMP_REL_PRE")) != NULL) {
682 dump_relocations(obj_main);
687 * Processing tls relocations requires having the tls offsets
688 * initialized. Prepare offsets before starting initial
689 * relocation processing.
691 dbg("initializing initial thread local storage offsets");
692 STAILQ_FOREACH(entry, &list_main, link) {
694 * Allocate all the initial objects out of the static TLS
695 * block even if they didn't ask for it.
697 allocate_tls_offset(entry->obj);
700 if (relocate_objects(obj_main,
701 ld_bind_now != NULL && *ld_bind_now != '\0',
702 &obj_rtld, SYMLOOK_EARLY, NULL) == -1)
705 dbg("doing copy relocations");
706 if (do_copy_relocations(obj_main) == -1)
709 if (getenv(_LD("DUMP_REL_POST")) != NULL) {
710 dump_relocations(obj_main);
717 * Setup TLS for main thread. This must be done after the
718 * relocations are processed, since tls initialization section
719 * might be the subject for relocations.
721 dbg("initializing initial thread local storage");
722 allocate_initial_tls(globallist_curr(TAILQ_FIRST(&obj_list)));
724 dbg("initializing key program variables");
725 set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : "");
726 set_program_var("environ", env);
727 set_program_var("__elf_aux_vector", aux);
729 /* Make a list of init functions to call. */
730 objlist_init(&initlist);
731 initlist_add_objects(globallist_curr(TAILQ_FIRST(&obj_list)),
732 preload_tail, &initlist);
734 r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */
736 map_stacks_exec(NULL);
738 if (!obj_main->crt_no_init) {
740 * Make sure we don't call the main program's init and fini
741 * functions for binaries linked with old crt1 which calls
744 obj_main->init = obj_main->fini = (Elf_Addr)NULL;
745 obj_main->preinit_array = obj_main->init_array =
746 obj_main->fini_array = (Elf_Addr)NULL;
750 * Execute MD initializers required before we call the objects'
755 wlock_acquire(rtld_bind_lock, &lockstate);
757 dbg("resolving ifuncs");
758 if (initlist_objects_ifunc(&initlist, ld_bind_now != NULL &&
759 *ld_bind_now != '\0', SYMLOOK_EARLY, &lockstate) == -1)
762 rtld_exit_ptr = rtld_exit;
763 if (obj_main->crt_no_init)
765 objlist_call_init(&initlist, &lockstate);
766 _r_debug_postinit(&obj_main->linkmap);
767 objlist_clear(&initlist);
768 dbg("loading filtees");
769 TAILQ_FOREACH(obj, &obj_list, next) {
772 if (ld_loadfltr || obj->z_loadfltr)
773 load_filtees(obj, 0, &lockstate);
776 dbg("enforcing main obj relro");
777 if (obj_enforce_relro(obj_main) == -1)
780 lock_release(rtld_bind_lock, &lockstate);
782 dbg("transferring control to program entry point = %p", obj_main->entry);
784 /* Return the exit procedure and the program entry point. */
785 *exit_proc = rtld_exit_ptr;
787 return (func_ptr_type) obj_main->entry;
791 rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def)
796 ptr = (void *)make_function_pointer(def, obj);
797 target = call_ifunc_resolver(ptr);
798 return ((void *)target);
802 * NB: MIPS uses a private version of this function (_mips_rtld_bind).
803 * Changes to this function should be applied there as well.
806 _rtld_bind(Obj_Entry *obj, Elf_Size reloff)
810 const Obj_Entry *defobj;
813 RtldLockState lockstate;
815 rlock_acquire(rtld_bind_lock, &lockstate);
816 if (sigsetjmp(lockstate.env, 0) != 0)
817 lock_upgrade(rtld_bind_lock, &lockstate);
819 rel = (const Elf_Rel *)((const char *)obj->pltrel + reloff);
821 rel = (const Elf_Rel *)((const char *)obj->pltrela + reloff);
823 where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
824 def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, SYMLOOK_IN_PLT,
828 if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
829 target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
831 target = (Elf_Addr)(defobj->relocbase + def->st_value);
833 dbg("\"%s\" in \"%s\" ==> %p in \"%s\"",
834 defobj->strtab + def->st_name, basename(obj->path),
835 (void *)target, basename(defobj->path));
838 * Write the new contents for the jmpslot. Note that depending on
839 * architecture, the value which we need to return back to the
840 * lazy binding trampoline may or may not be the target
841 * address. The value returned from reloc_jmpslot() is the value
842 * that the trampoline needs.
844 target = reloc_jmpslot(where, target, defobj, obj, rel);
845 lock_release(rtld_bind_lock, &lockstate);
850 * Error reporting function. Use it like printf. If formats the message
851 * into a buffer, and sets things up so that the next call to dlerror()
852 * will return the message.
855 _rtld_error(const char *fmt, ...)
857 static char buf[512];
861 rtld_vsnprintf(buf, sizeof buf, fmt, ap);
864 LD_UTRACE(UTRACE_RTLD_ERROR, NULL, NULL, 0, 0, error_message);
868 * Return a dynamically-allocated copy of the current error message, if any.
873 return error_message == NULL ? NULL : xstrdup(error_message);
877 * Restore the current error message from a copy which was previously saved
878 * by errmsg_save(). The copy is freed.
881 errmsg_restore(char *saved_msg)
883 if (saved_msg == NULL)
884 error_message = NULL;
886 _rtld_error("%s", saved_msg);
892 basename(const char *name)
894 const char *p = strrchr(name, '/');
895 return p != NULL ? p + 1 : name;
898 static struct utsname uts;
901 origin_subst_one(Obj_Entry *obj, char *real, const char *kw,
902 const char *subst, bool may_free)
904 char *p, *p1, *res, *resp;
905 int subst_len, kw_len, subst_count, old_len, new_len;
910 * First, count the number of the keyword occurrences, to
911 * preallocate the final string.
913 for (p = real, subst_count = 0;; p = p1 + kw_len, subst_count++) {
920 * If the keyword is not found, just return.
922 * Return non-substituted string if resolution failed. We
923 * cannot do anything more reasonable, the failure mode of the
924 * caller is unresolved library anyway.
926 if (subst_count == 0 || (obj != NULL && !obj_resolve_origin(obj)))
927 return (may_free ? real : xstrdup(real));
929 subst = obj->origin_path;
932 * There is indeed something to substitute. Calculate the
933 * length of the resulting string, and allocate it.
935 subst_len = strlen(subst);
936 old_len = strlen(real);
937 new_len = old_len + (subst_len - kw_len) * subst_count;
938 res = xmalloc(new_len + 1);
941 * Now, execute the substitution loop.
943 for (p = real, resp = res, *resp = '\0';;) {
946 /* Copy the prefix before keyword. */
947 memcpy(resp, p, p1 - p);
949 /* Keyword replacement. */
950 memcpy(resp, subst, subst_len);
958 /* Copy to the end of string and finish. */
966 origin_subst(Obj_Entry *obj, const char *real)
968 char *res1, *res2, *res3, *res4;
970 if (obj == NULL || !trust)
971 return (xstrdup(real));
972 if (uts.sysname[0] == '\0') {
973 if (uname(&uts) != 0) {
974 _rtld_error("utsname failed: %d", errno);
978 /* __DECONST is safe here since without may_free real is unchanged */
979 res1 = origin_subst_one(obj, __DECONST(char *, real), "$ORIGIN", NULL,
981 res2 = origin_subst_one(NULL, res1, "$OSNAME", uts.sysname, true);
982 res3 = origin_subst_one(NULL, res2, "$OSREL", uts.release, true);
983 res4 = origin_subst_one(NULL, res3, "$PLATFORM", uts.machine, true);
990 const char *msg = dlerror();
994 rtld_fdputstr(STDERR_FILENO, _BASENAME_RTLD ": ");
995 rtld_fdputstr(STDERR_FILENO, msg);
996 rtld_fdputchar(STDERR_FILENO, '\n');
1001 * Process a shared object's DYNAMIC section, and save the important
1002 * information in its Obj_Entry structure.
1005 digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath,
1006 const Elf_Dyn **dyn_soname, const Elf_Dyn **dyn_runpath)
1008 const Elf_Dyn *dynp;
1009 Needed_Entry **needed_tail = &obj->needed;
1010 Needed_Entry **needed_filtees_tail = &obj->needed_filtees;
1011 Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees;
1012 const Elf_Hashelt *hashtab;
1013 const Elf32_Word *hashval;
1014 Elf32_Word bkt, nmaskwords;
1016 int plttype = DT_REL;
1020 *dyn_runpath = NULL;
1022 obj->bind_now = false;
1023 for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) {
1024 switch (dynp->d_tag) {
1027 obj->rel = (const Elf_Rel *)(obj->relocbase + dynp->d_un.d_ptr);
1031 obj->relsize = dynp->d_un.d_val;
1035 assert(dynp->d_un.d_val == sizeof(Elf_Rel));
1039 obj->pltrel = (const Elf_Rel *)
1040 (obj->relocbase + dynp->d_un.d_ptr);
1044 obj->pltrelsize = dynp->d_un.d_val;
1048 obj->rela = (const Elf_Rela *)(obj->relocbase + dynp->d_un.d_ptr);
1052 obj->relasize = dynp->d_un.d_val;
1056 assert(dynp->d_un.d_val == sizeof(Elf_Rela));
1060 plttype = dynp->d_un.d_val;
1061 assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA);
1065 obj->symtab = (const Elf_Sym *)
1066 (obj->relocbase + dynp->d_un.d_ptr);
1070 assert(dynp->d_un.d_val == sizeof(Elf_Sym));
1074 obj->strtab = (const char *)(obj->relocbase + dynp->d_un.d_ptr);
1078 obj->strsize = dynp->d_un.d_val;
1082 obj->verneed = (const Elf_Verneed *)(obj->relocbase +
1087 obj->verneednum = dynp->d_un.d_val;
1091 obj->verdef = (const Elf_Verdef *)(obj->relocbase +
1096 obj->verdefnum = dynp->d_un.d_val;
1100 obj->versyms = (const Elf_Versym *)(obj->relocbase +
1106 hashtab = (const Elf_Hashelt *)(obj->relocbase +
1108 obj->nbuckets = hashtab[0];
1109 obj->nchains = hashtab[1];
1110 obj->buckets = hashtab + 2;
1111 obj->chains = obj->buckets + obj->nbuckets;
1112 obj->valid_hash_sysv = obj->nbuckets > 0 && obj->nchains > 0 &&
1113 obj->buckets != NULL;
1119 hashtab = (const Elf_Hashelt *)(obj->relocbase +
1121 obj->nbuckets_gnu = hashtab[0];
1122 obj->symndx_gnu = hashtab[1];
1123 nmaskwords = hashtab[2];
1124 bloom_size32 = (__ELF_WORD_SIZE / 32) * nmaskwords;
1125 obj->maskwords_bm_gnu = nmaskwords - 1;
1126 obj->shift2_gnu = hashtab[3];
1127 obj->bloom_gnu = (const Elf_Addr *)(hashtab + 4);
1128 obj->buckets_gnu = hashtab + 4 + bloom_size32;
1129 obj->chain_zero_gnu = obj->buckets_gnu + obj->nbuckets_gnu -
1131 /* Number of bitmask words is required to be power of 2 */
1132 obj->valid_hash_gnu = powerof2(nmaskwords) &&
1133 obj->nbuckets_gnu > 0 && obj->buckets_gnu != NULL;
1139 Needed_Entry *nep = NEW(Needed_Entry);
1140 nep->name = dynp->d_un.d_val;
1145 needed_tail = &nep->next;
1151 Needed_Entry *nep = NEW(Needed_Entry);
1152 nep->name = dynp->d_un.d_val;
1156 *needed_filtees_tail = nep;
1157 needed_filtees_tail = &nep->next;
1163 Needed_Entry *nep = NEW(Needed_Entry);
1164 nep->name = dynp->d_un.d_val;
1168 *needed_aux_filtees_tail = nep;
1169 needed_aux_filtees_tail = &nep->next;
1174 obj->pltgot = (Elf_Addr *)(obj->relocbase + dynp->d_un.d_ptr);
1178 obj->textrel = true;
1182 obj->symbolic = true;
1187 * We have to wait until later to process this, because we
1188 * might not have gotten the address of the string table yet.
1198 *dyn_runpath = dynp;
1202 obj->init = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1205 case DT_PREINIT_ARRAY:
1206 obj->preinit_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1209 case DT_PREINIT_ARRAYSZ:
1210 obj->preinit_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1214 obj->init_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1217 case DT_INIT_ARRAYSZ:
1218 obj->init_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1222 obj->fini = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1226 obj->fini_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1229 case DT_FINI_ARRAYSZ:
1230 obj->fini_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
1234 * Don't process DT_DEBUG on MIPS as the dynamic section
1235 * is mapped read-only. DT_MIPS_RLD_MAP is used instead.
1241 dbg("Filling in DT_DEBUG entry");
1242 (__DECONST(Elf_Dyn *, dynp))->d_un.d_ptr = (Elf_Addr)&r_debug;
1247 if (dynp->d_un.d_val & DF_ORIGIN)
1248 obj->z_origin = true;
1249 if (dynp->d_un.d_val & DF_SYMBOLIC)
1250 obj->symbolic = true;
1251 if (dynp->d_un.d_val & DF_TEXTREL)
1252 obj->textrel = true;
1253 if (dynp->d_un.d_val & DF_BIND_NOW)
1254 obj->bind_now = true;
1255 if (dynp->d_un.d_val & DF_STATIC_TLS)
1256 obj->static_tls = true;
1259 case DT_MIPS_LOCAL_GOTNO:
1260 obj->local_gotno = dynp->d_un.d_val;
1263 case DT_MIPS_SYMTABNO:
1264 obj->symtabno = dynp->d_un.d_val;
1267 case DT_MIPS_GOTSYM:
1268 obj->gotsym = dynp->d_un.d_val;
1271 case DT_MIPS_RLD_MAP:
1272 *((Elf_Addr *)(dynp->d_un.d_ptr)) = (Elf_Addr) &r_debug;
1275 case DT_MIPS_PLTGOT:
1276 obj->mips_pltgot = (Elf_Addr *)(obj->relocbase +
1282 #ifdef __powerpc64__
1283 case DT_PPC64_GLINK:
1284 obj->glink = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
1289 if (dynp->d_un.d_val & DF_1_NOOPEN)
1290 obj->z_noopen = true;
1291 if (dynp->d_un.d_val & DF_1_ORIGIN)
1292 obj->z_origin = true;
1293 if (dynp->d_un.d_val & DF_1_GLOBAL)
1294 obj->z_global = true;
1295 if (dynp->d_un.d_val & DF_1_BIND_NOW)
1296 obj->bind_now = true;
1297 if (dynp->d_un.d_val & DF_1_NODELETE)
1298 obj->z_nodelete = true;
1299 if (dynp->d_un.d_val & DF_1_LOADFLTR)
1300 obj->z_loadfltr = true;
1301 if (dynp->d_un.d_val & DF_1_INTERPOSE)
1302 obj->z_interpose = true;
1303 if (dynp->d_un.d_val & DF_1_NODEFLIB)
1304 obj->z_nodeflib = true;
1309 dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag,
1316 obj->traced = false;
1318 if (plttype == DT_RELA) {
1319 obj->pltrela = (const Elf_Rela *) obj->pltrel;
1321 obj->pltrelasize = obj->pltrelsize;
1322 obj->pltrelsize = 0;
1325 /* Determine size of dynsym table (equal to nchains of sysv hash) */
1326 if (obj->valid_hash_sysv)
1327 obj->dynsymcount = obj->nchains;
1328 else if (obj->valid_hash_gnu) {
1329 obj->dynsymcount = 0;
1330 for (bkt = 0; bkt < obj->nbuckets_gnu; bkt++) {
1331 if (obj->buckets_gnu[bkt] == 0)
1333 hashval = &obj->chain_zero_gnu[obj->buckets_gnu[bkt]];
1336 while ((*hashval++ & 1u) == 0);
1338 obj->dynsymcount += obj->symndx_gnu;
1343 obj_resolve_origin(Obj_Entry *obj)
1346 if (obj->origin_path != NULL)
1348 obj->origin_path = xmalloc(PATH_MAX);
1349 return (rtld_dirname_abs(obj->path, obj->origin_path) != -1);
1353 digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath,
1354 const Elf_Dyn *dyn_soname, const Elf_Dyn *dyn_runpath)
1357 if (obj->z_origin && !obj_resolve_origin(obj))
1360 if (dyn_runpath != NULL) {
1361 obj->runpath = (const char *)obj->strtab + dyn_runpath->d_un.d_val;
1362 obj->runpath = origin_subst(obj, obj->runpath);
1363 } else if (dyn_rpath != NULL) {
1364 obj->rpath = (const char *)obj->strtab + dyn_rpath->d_un.d_val;
1365 obj->rpath = origin_subst(obj, obj->rpath);
1367 if (dyn_soname != NULL)
1368 object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val);
1372 digest_dynamic(Obj_Entry *obj, int early)
1374 const Elf_Dyn *dyn_rpath;
1375 const Elf_Dyn *dyn_soname;
1376 const Elf_Dyn *dyn_runpath;
1378 digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname, &dyn_runpath);
1379 digest_dynamic2(obj, dyn_rpath, dyn_soname, dyn_runpath);
1383 * Process a shared object's program header. This is used only for the
1384 * main program, when the kernel has already loaded the main program
1385 * into memory before calling the dynamic linker. It creates and
1386 * returns an Obj_Entry structure.
1389 digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path)
1392 const Elf_Phdr *phlimit = phdr + phnum;
1394 Elf_Addr note_start, note_end;
1398 for (ph = phdr; ph < phlimit; ph++) {
1399 if (ph->p_type != PT_PHDR)
1403 obj->phsize = ph->p_memsz;
1404 obj->relocbase = __DECONST(char *, phdr) - ph->p_vaddr;
1408 obj->stack_flags = PF_X | PF_R | PF_W;
1410 for (ph = phdr; ph < phlimit; ph++) {
1411 switch (ph->p_type) {
1414 obj->interp = (const char *)(ph->p_vaddr + obj->relocbase);
1418 if (nsegs == 0) { /* First load segment */
1419 obj->vaddrbase = trunc_page(ph->p_vaddr);
1420 obj->mapbase = obj->vaddrbase + obj->relocbase;
1421 obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) -
1423 } else { /* Last load segment */
1424 obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) -
1431 obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase);
1436 obj->tlssize = ph->p_memsz;
1437 obj->tlsalign = ph->p_align;
1438 obj->tlsinitsize = ph->p_filesz;
1439 obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase);
1443 obj->stack_flags = ph->p_flags;
1447 obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr);
1448 obj->relro_size = round_page(ph->p_memsz);
1452 note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr;
1453 note_end = note_start + ph->p_filesz;
1454 digest_notes(obj, note_start, note_end);
1459 _rtld_error("%s: too few PT_LOAD segments", path);
1468 digest_notes(Obj_Entry *obj, Elf_Addr note_start, Elf_Addr note_end)
1470 const Elf_Note *note;
1471 const char *note_name;
1474 for (note = (const Elf_Note *)note_start; (Elf_Addr)note < note_end;
1475 note = (const Elf_Note *)((const char *)(note + 1) +
1476 roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1477 roundup2(note->n_descsz, sizeof(Elf32_Addr)))) {
1478 if (note->n_namesz != sizeof(NOTE_FREEBSD_VENDOR) ||
1479 note->n_descsz != sizeof(int32_t))
1481 if (note->n_type != NT_FREEBSD_ABI_TAG &&
1482 note->n_type != NT_FREEBSD_FEATURE_CTL &&
1483 note->n_type != NT_FREEBSD_NOINIT_TAG)
1485 note_name = (const char *)(note + 1);
1486 if (strncmp(NOTE_FREEBSD_VENDOR, note_name,
1487 sizeof(NOTE_FREEBSD_VENDOR)) != 0)
1489 switch (note->n_type) {
1490 case NT_FREEBSD_ABI_TAG:
1491 /* FreeBSD osrel note */
1492 p = (uintptr_t)(note + 1);
1493 p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
1494 obj->osrel = *(const int32_t *)(p);
1495 dbg("note osrel %d", obj->osrel);
1497 case NT_FREEBSD_FEATURE_CTL:
1498 /* FreeBSD ABI feature control note */
1499 p = (uintptr_t)(note + 1);
1500 p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
1501 obj->fctl0 = *(const uint32_t *)(p);
1502 dbg("note fctl0 %#x", obj->fctl0);
1504 case NT_FREEBSD_NOINIT_TAG:
1505 /* FreeBSD 'crt does not call init' note */
1506 obj->crt_no_init = true;
1507 dbg("note crt_no_init");
1514 dlcheck(void *handle)
1518 TAILQ_FOREACH(obj, &obj_list, next) {
1519 if (obj == (Obj_Entry *) handle)
1523 if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) {
1524 _rtld_error("Invalid shared object handle %p", handle);
1531 * If the given object is already in the donelist, return true. Otherwise
1532 * add the object to the list and return false.
1535 donelist_check(DoneList *dlp, const Obj_Entry *obj)
1539 for (i = 0; i < dlp->num_used; i++)
1540 if (dlp->objs[i] == obj)
1543 * Our donelist allocation should always be sufficient. But if
1544 * our threads locking isn't working properly, more shared objects
1545 * could have been loaded since we allocated the list. That should
1546 * never happen, but we'll handle it properly just in case it does.
1548 if (dlp->num_used < dlp->num_alloc)
1549 dlp->objs[dlp->num_used++] = obj;
1554 * Hash function for symbol table lookup. Don't even think about changing
1555 * this. It is specified by the System V ABI.
1558 elf_hash(const char *name)
1560 const unsigned char *p = (const unsigned char *) name;
1561 unsigned long h = 0;
1564 while (*p != '\0') {
1565 h = (h << 4) + *p++;
1566 if ((g = h & 0xf0000000) != 0)
1574 * The GNU hash function is the Daniel J. Bernstein hash clipped to 32 bits
1575 * unsigned in case it's implemented with a wider type.
1578 gnu_hash(const char *s)
1584 for (c = *s; c != '\0'; c = *++s)
1586 return (h & 0xffffffff);
1591 * Find the library with the given name, and return its full pathname.
1592 * The returned string is dynamically allocated. Generates an error
1593 * message and returns NULL if the library cannot be found.
1595 * If the second argument is non-NULL, then it refers to an already-
1596 * loaded shared object, whose library search path will be searched.
1598 * If a library is successfully located via LD_LIBRARY_PATH_FDS, its
1599 * descriptor (which is close-on-exec) will be passed out via the third
1602 * The search order is:
1603 * DT_RPATH in the referencing file _unless_ DT_RUNPATH is present (1)
1604 * DT_RPATH of the main object if DSO without defined DT_RUNPATH (1)
1606 * DT_RUNPATH in the referencing file
1607 * ldconfig hints (if -z nodefaultlib, filter out default library directories
1609 * /lib:/usr/lib _unless_ the referencing file is linked with -z nodefaultlib
1611 * (1) Handled in digest_dynamic2 - rpath left NULL if runpath defined.
1614 find_library(const char *xname, const Obj_Entry *refobj, int *fdp)
1616 char *pathname, *refobj_path;
1618 bool nodeflib, objgiven;
1620 objgiven = refobj != NULL;
1622 if (libmap_disable || !objgiven ||
1623 (name = lm_find(refobj->path, xname)) == NULL)
1626 if (strchr(name, '/') != NULL) { /* Hard coded pathname */
1627 if (name[0] != '/' && !trust) {
1628 _rtld_error("Absolute pathname required "
1629 "for shared object \"%s\"", name);
1632 return (origin_subst(__DECONST(Obj_Entry *, refobj),
1633 __DECONST(char *, name)));
1636 dbg(" Searching for \"%s\"", name);
1637 refobj_path = objgiven ? refobj->path : NULL;
1640 * If refobj->rpath != NULL, then refobj->runpath is NULL. Fall
1641 * back to pre-conforming behaviour if user requested so with
1642 * LD_LIBRARY_PATH_RPATH environment variable and ignore -z
1645 if (objgiven && refobj->rpath != NULL && ld_library_path_rpath) {
1646 pathname = search_library_path(name, ld_library_path,
1648 if (pathname != NULL)
1650 if (refobj != NULL) {
1651 pathname = search_library_path(name, refobj->rpath,
1653 if (pathname != NULL)
1656 pathname = search_library_pathfds(name, ld_library_dirs, fdp);
1657 if (pathname != NULL)
1659 pathname = search_library_path(name, gethints(false),
1661 if (pathname != NULL)
1663 pathname = search_library_path(name, ld_standard_library_path,
1665 if (pathname != NULL)
1668 nodeflib = objgiven ? refobj->z_nodeflib : false;
1670 pathname = search_library_path(name, refobj->rpath,
1672 if (pathname != NULL)
1675 if (objgiven && refobj->runpath == NULL && refobj != obj_main) {
1676 pathname = search_library_path(name, obj_main->rpath,
1678 if (pathname != NULL)
1681 pathname = search_library_path(name, ld_library_path,
1683 if (pathname != NULL)
1686 pathname = search_library_path(name, refobj->runpath,
1688 if (pathname != NULL)
1691 pathname = search_library_pathfds(name, ld_library_dirs, fdp);
1692 if (pathname != NULL)
1694 pathname = search_library_path(name, gethints(nodeflib),
1696 if (pathname != NULL)
1698 if (objgiven && !nodeflib) {
1699 pathname = search_library_path(name,
1700 ld_standard_library_path, refobj_path, fdp);
1701 if (pathname != NULL)
1706 if (objgiven && refobj->path != NULL) {
1707 _rtld_error("Shared object \"%s\" not found, "
1708 "required by \"%s\"", name, basename(refobj->path));
1710 _rtld_error("Shared object \"%s\" not found", name);
1716 * Given a symbol number in a referencing object, find the corresponding
1717 * definition of the symbol. Returns a pointer to the symbol, or NULL if
1718 * no definition was found. Returns a pointer to the Obj_Entry of the
1719 * defining object via the reference parameter DEFOBJ_OUT.
1722 find_symdef(unsigned long symnum, const Obj_Entry *refobj,
1723 const Obj_Entry **defobj_out, int flags, SymCache *cache,
1724 RtldLockState *lockstate)
1728 const Obj_Entry *defobj;
1729 const Ver_Entry *ve;
1735 * If we have already found this symbol, get the information from
1738 if (symnum >= refobj->dynsymcount)
1739 return NULL; /* Bad object */
1740 if (cache != NULL && cache[symnum].sym != NULL) {
1741 *defobj_out = cache[symnum].obj;
1742 return cache[symnum].sym;
1745 ref = refobj->symtab + symnum;
1746 name = refobj->strtab + ref->st_name;
1752 * We don't have to do a full scale lookup if the symbol is local.
1753 * We know it will bind to the instance in this load module; to
1754 * which we already have a pointer (ie ref). By not doing a lookup,
1755 * we not only improve performance, but it also avoids unresolvable
1756 * symbols when local symbols are not in the hash table. This has
1757 * been seen with the ia64 toolchain.
1759 if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) {
1760 if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) {
1761 _rtld_error("%s: Bogus symbol table entry %lu", refobj->path,
1764 symlook_init(&req, name);
1766 ve = req.ventry = fetch_ventry(refobj, symnum);
1767 req.lockstate = lockstate;
1768 res = symlook_default(&req, refobj);
1771 defobj = req.defobj_out;
1779 * If we found no definition and the reference is weak, treat the
1780 * symbol as having the value zero.
1782 if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) {
1788 *defobj_out = defobj;
1789 /* Record the information in the cache to avoid subsequent lookups. */
1790 if (cache != NULL) {
1791 cache[symnum].sym = def;
1792 cache[symnum].obj = defobj;
1795 if (refobj != &obj_rtld)
1796 _rtld_error("%s: Undefined symbol \"%s%s%s\"", refobj->path, name,
1797 ve != NULL ? "@" : "", ve != NULL ? ve->name : "");
1803 * Return the search path from the ldconfig hints file, reading it if
1804 * necessary. If nostdlib is true, then the default search paths are
1805 * not added to result.
1807 * Returns NULL if there are problems with the hints file,
1808 * or if the search path there is empty.
1811 gethints(bool nostdlib)
1813 static char *filtered_path;
1814 static const char *hints;
1815 static struct elfhints_hdr hdr;
1816 struct fill_search_info_args sargs, hargs;
1817 struct dl_serinfo smeta, hmeta, *SLPinfo, *hintinfo;
1818 struct dl_serpath *SLPpath, *hintpath;
1820 struct stat hint_stat;
1821 unsigned int SLPndx, hintndx, fndx, fcount;
1827 /* First call, read the hints file */
1828 if (hints == NULL) {
1829 /* Keep from trying again in case the hints file is bad. */
1832 if ((fd = open(ld_elf_hints_path, O_RDONLY | O_CLOEXEC)) == -1)
1836 * Check of hdr.dirlistlen value against type limit
1837 * intends to pacify static analyzers. Further
1838 * paranoia leads to checks that dirlist is fully
1839 * contained in the file range.
1841 if (read(fd, &hdr, sizeof hdr) != sizeof hdr ||
1842 hdr.magic != ELFHINTS_MAGIC ||
1843 hdr.version != 1 || hdr.dirlistlen > UINT_MAX / 2 ||
1844 fstat(fd, &hint_stat) == -1) {
1851 if (dl + hdr.dirlist < dl)
1854 if (dl + hdr.dirlistlen < dl)
1856 dl += hdr.dirlistlen;
1857 if (dl > hint_stat.st_size)
1859 p = xmalloc(hdr.dirlistlen + 1);
1860 if (pread(fd, p, hdr.dirlistlen + 1,
1861 hdr.strtab + hdr.dirlist) != (ssize_t)hdr.dirlistlen + 1 ||
1862 p[hdr.dirlistlen] != '\0') {
1871 * If caller agreed to receive list which includes the default
1872 * paths, we are done. Otherwise, if we still did not
1873 * calculated filtered result, do it now.
1876 return (hints[0] != '\0' ? hints : NULL);
1877 if (filtered_path != NULL)
1881 * Obtain the list of all configured search paths, and the
1882 * list of the default paths.
1884 * First estimate the size of the results.
1886 smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
1888 hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
1891 sargs.request = RTLD_DI_SERINFOSIZE;
1892 sargs.serinfo = &smeta;
1893 hargs.request = RTLD_DI_SERINFOSIZE;
1894 hargs.serinfo = &hmeta;
1896 path_enumerate(ld_standard_library_path, fill_search_info, NULL,
1898 path_enumerate(hints, fill_search_info, NULL, &hargs);
1900 SLPinfo = xmalloc(smeta.dls_size);
1901 hintinfo = xmalloc(hmeta.dls_size);
1904 * Next fetch both sets of paths.
1906 sargs.request = RTLD_DI_SERINFO;
1907 sargs.serinfo = SLPinfo;
1908 sargs.serpath = &SLPinfo->dls_serpath[0];
1909 sargs.strspace = (char *)&SLPinfo->dls_serpath[smeta.dls_cnt];
1911 hargs.request = RTLD_DI_SERINFO;
1912 hargs.serinfo = hintinfo;
1913 hargs.serpath = &hintinfo->dls_serpath[0];
1914 hargs.strspace = (char *)&hintinfo->dls_serpath[hmeta.dls_cnt];
1916 path_enumerate(ld_standard_library_path, fill_search_info, NULL,
1918 path_enumerate(hints, fill_search_info, NULL, &hargs);
1921 * Now calculate the difference between two sets, by excluding
1922 * standard paths from the full set.
1926 filtered_path = xmalloc(hdr.dirlistlen + 1);
1927 hintpath = &hintinfo->dls_serpath[0];
1928 for (hintndx = 0; hintndx < hmeta.dls_cnt; hintndx++, hintpath++) {
1930 SLPpath = &SLPinfo->dls_serpath[0];
1932 * Check each standard path against current.
1934 for (SLPndx = 0; SLPndx < smeta.dls_cnt; SLPndx++, SLPpath++) {
1935 /* matched, skip the path */
1936 if (!strcmp(hintpath->dls_name, SLPpath->dls_name)) {
1944 * Not matched against any standard path, add the path
1945 * to result. Separate consequtive paths with ':'.
1948 filtered_path[fndx] = ':';
1952 flen = strlen(hintpath->dls_name);
1953 strncpy((filtered_path + fndx), hintpath->dls_name, flen);
1956 filtered_path[fndx] = '\0';
1962 return (filtered_path[0] != '\0' ? filtered_path : NULL);
1966 init_dag(Obj_Entry *root)
1968 const Needed_Entry *needed;
1969 const Objlist_Entry *elm;
1972 if (root->dag_inited)
1974 donelist_init(&donelist);
1976 /* Root object belongs to own DAG. */
1977 objlist_push_tail(&root->dldags, root);
1978 objlist_push_tail(&root->dagmembers, root);
1979 donelist_check(&donelist, root);
1982 * Add dependencies of root object to DAG in breadth order
1983 * by exploiting the fact that each new object get added
1984 * to the tail of the dagmembers list.
1986 STAILQ_FOREACH(elm, &root->dagmembers, link) {
1987 for (needed = elm->obj->needed; needed != NULL; needed = needed->next) {
1988 if (needed->obj == NULL || donelist_check(&donelist, needed->obj))
1990 objlist_push_tail(&needed->obj->dldags, root);
1991 objlist_push_tail(&root->dagmembers, needed->obj);
1994 root->dag_inited = true;
1998 init_marker(Obj_Entry *marker)
2001 bzero(marker, sizeof(*marker));
2002 marker->marker = true;
2006 globallist_curr(const Obj_Entry *obj)
2013 return (__DECONST(Obj_Entry *, obj));
2014 obj = TAILQ_PREV(obj, obj_entry_q, next);
2019 globallist_next(const Obj_Entry *obj)
2023 obj = TAILQ_NEXT(obj, next);
2027 return (__DECONST(Obj_Entry *, obj));
2031 /* Prevent the object from being unmapped while the bind lock is dropped. */
2033 hold_object(Obj_Entry *obj)
2040 unhold_object(Obj_Entry *obj)
2043 assert(obj->holdcount > 0);
2044 if (--obj->holdcount == 0 && obj->unholdfree)
2045 release_object(obj);
2049 process_z(Obj_Entry *root)
2051 const Objlist_Entry *elm;
2055 * Walk over object DAG and process every dependent object
2056 * that is marked as DF_1_NODELETE or DF_1_GLOBAL. They need
2057 * to grow their own DAG.
2059 * For DF_1_GLOBAL, DAG is required for symbol lookups in
2060 * symlook_global() to work.
2062 * For DF_1_NODELETE, the DAG should have its reference upped.
2064 STAILQ_FOREACH(elm, &root->dagmembers, link) {
2068 if (obj->z_nodelete && !obj->ref_nodel) {
2069 dbg("obj %s -z nodelete", obj->path);
2072 obj->ref_nodel = true;
2074 if (obj->z_global && objlist_find(&list_global, obj) == NULL) {
2075 dbg("obj %s -z global", obj->path);
2076 objlist_push_tail(&list_global, obj);
2082 * Initialize the dynamic linker. The argument is the address at which
2083 * the dynamic linker has been mapped into memory. The primary task of
2084 * this function is to relocate the dynamic linker.
2087 init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info)
2089 Obj_Entry objtmp; /* Temporary rtld object */
2090 const Elf_Ehdr *ehdr;
2091 const Elf_Dyn *dyn_rpath;
2092 const Elf_Dyn *dyn_soname;
2093 const Elf_Dyn *dyn_runpath;
2095 #ifdef RTLD_INIT_PAGESIZES_EARLY
2096 /* The page size is required by the dynamic memory allocator. */
2097 init_pagesizes(aux_info);
2101 * Conjure up an Obj_Entry structure for the dynamic linker.
2103 * The "path" member can't be initialized yet because string constants
2104 * cannot yet be accessed. Below we will set it correctly.
2106 memset(&objtmp, 0, sizeof(objtmp));
2109 objtmp.mapbase = mapbase;
2111 objtmp.relocbase = mapbase;
2114 objtmp.dynamic = rtld_dynamic(&objtmp);
2115 digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname, &dyn_runpath);
2116 assert(objtmp.needed == NULL);
2117 #if !defined(__mips__)
2118 /* MIPS has a bogus DT_TEXTREL. */
2119 assert(!objtmp.textrel);
2122 * Temporarily put the dynamic linker entry into the object list, so
2123 * that symbols can be found.
2125 relocate_objects(&objtmp, true, &objtmp, 0, NULL);
2127 ehdr = (Elf_Ehdr *)mapbase;
2128 objtmp.phdr = (Elf_Phdr *)((char *)mapbase + ehdr->e_phoff);
2129 objtmp.phsize = ehdr->e_phnum * sizeof(objtmp.phdr[0]);
2131 /* Initialize the object list. */
2132 TAILQ_INIT(&obj_list);
2134 /* Now that non-local variables can be accesses, copy out obj_rtld. */
2135 memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld));
2137 #ifndef RTLD_INIT_PAGESIZES_EARLY
2138 /* The page size is required by the dynamic memory allocator. */
2139 init_pagesizes(aux_info);
2142 if (aux_info[AT_OSRELDATE] != NULL)
2143 osreldate = aux_info[AT_OSRELDATE]->a_un.a_val;
2145 digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname, dyn_runpath);
2147 /* Replace the path with a dynamically allocated copy. */
2148 obj_rtld.path = xstrdup(ld_path_rtld);
2150 r_debug.r_brk = r_debug_state;
2151 r_debug.r_state = RT_CONSISTENT;
2155 * Retrieve the array of supported page sizes. The kernel provides the page
2156 * sizes in increasing order.
2159 init_pagesizes(Elf_Auxinfo **aux_info)
2161 static size_t psa[MAXPAGESIZES];
2165 if (aux_info[AT_PAGESIZES] != NULL && aux_info[AT_PAGESIZESLEN] !=
2167 size = aux_info[AT_PAGESIZESLEN]->a_un.a_val;
2168 pagesizes = aux_info[AT_PAGESIZES]->a_un.a_ptr;
2171 if (sysctlnametomib("hw.pagesizes", mib, &len) == 0)
2174 /* As a fallback, retrieve the base page size. */
2175 size = sizeof(psa[0]);
2176 if (aux_info[AT_PAGESZ] != NULL) {
2177 psa[0] = aux_info[AT_PAGESZ]->a_un.a_val;
2181 mib[1] = HW_PAGESIZE;
2185 if (sysctl(mib, len, psa, &size, NULL, 0) == -1) {
2186 _rtld_error("sysctl for hw.pagesize(s) failed");
2192 npagesizes = size / sizeof(pagesizes[0]);
2193 /* Discard any invalid entries at the end of the array. */
2194 while (npagesizes > 0 && pagesizes[npagesizes - 1] == 0)
2199 * Add the init functions from a needed object list (and its recursive
2200 * needed objects) to "list". This is not used directly; it is a helper
2201 * function for initlist_add_objects(). The write lock must be held
2202 * when this function is called.
2205 initlist_add_neededs(Needed_Entry *needed, Objlist *list)
2207 /* Recursively process the successor needed objects. */
2208 if (needed->next != NULL)
2209 initlist_add_neededs(needed->next, list);
2211 /* Process the current needed object. */
2212 if (needed->obj != NULL)
2213 initlist_add_objects(needed->obj, needed->obj, list);
2217 * Scan all of the DAGs rooted in the range of objects from "obj" to
2218 * "tail" and add their init functions to "list". This recurses over
2219 * the DAGs and ensure the proper init ordering such that each object's
2220 * needed libraries are initialized before the object itself. At the
2221 * same time, this function adds the objects to the global finalization
2222 * list "list_fini" in the opposite order. The write lock must be
2223 * held when this function is called.
2226 initlist_add_objects(Obj_Entry *obj, Obj_Entry *tail, Objlist *list)
2230 if (obj->init_scanned || obj->init_done)
2232 obj->init_scanned = true;
2234 /* Recursively process the successor objects. */
2235 nobj = globallist_next(obj);
2236 if (nobj != NULL && obj != tail)
2237 initlist_add_objects(nobj, tail, list);
2239 /* Recursively process the needed objects. */
2240 if (obj->needed != NULL)
2241 initlist_add_neededs(obj->needed, list);
2242 if (obj->needed_filtees != NULL)
2243 initlist_add_neededs(obj->needed_filtees, list);
2244 if (obj->needed_aux_filtees != NULL)
2245 initlist_add_neededs(obj->needed_aux_filtees, list);
2247 /* Add the object to the init list. */
2248 objlist_push_tail(list, obj);
2250 /* Add the object to the global fini list in the reverse order. */
2251 if ((obj->fini != (Elf_Addr)NULL || obj->fini_array != (Elf_Addr)NULL)
2252 && !obj->on_fini_list) {
2253 objlist_push_head(&list_fini, obj);
2254 obj->on_fini_list = true;
2259 #define FPTR_TARGET(f) ((Elf_Addr) (f))
2263 free_needed_filtees(Needed_Entry *n, RtldLockState *lockstate)
2265 Needed_Entry *needed, *needed1;
2267 for (needed = n; needed != NULL; needed = needed->next) {
2268 if (needed->obj != NULL) {
2269 dlclose_locked(needed->obj, lockstate);
2273 for (needed = n; needed != NULL; needed = needed1) {
2274 needed1 = needed->next;
2280 unload_filtees(Obj_Entry *obj, RtldLockState *lockstate)
2283 free_needed_filtees(obj->needed_filtees, lockstate);
2284 obj->needed_filtees = NULL;
2285 free_needed_filtees(obj->needed_aux_filtees, lockstate);
2286 obj->needed_aux_filtees = NULL;
2287 obj->filtees_loaded = false;
2291 load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags,
2292 RtldLockState *lockstate)
2295 for (; needed != NULL; needed = needed->next) {
2296 needed->obj = dlopen_object(obj->strtab + needed->name, -1, obj,
2297 flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) |
2298 RTLD_LOCAL, lockstate);
2303 load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate)
2306 lock_restart_for_upgrade(lockstate);
2307 if (!obj->filtees_loaded) {
2308 load_filtee1(obj, obj->needed_filtees, flags, lockstate);
2309 load_filtee1(obj, obj->needed_aux_filtees, flags, lockstate);
2310 obj->filtees_loaded = true;
2315 process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags)
2319 for (; needed != NULL; needed = needed->next) {
2320 obj1 = needed->obj = load_object(obj->strtab + needed->name, -1, obj,
2321 flags & ~RTLD_LO_NOLOAD);
2322 if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0)
2329 * Given a shared object, traverse its list of needed objects, and load
2330 * each of them. Returns 0 on success. Generates an error message and
2331 * returns -1 on failure.
2334 load_needed_objects(Obj_Entry *first, int flags)
2338 for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
2341 if (process_needed(obj, obj->needed, flags) == -1)
2348 load_preload_objects(void)
2350 char *p = ld_preload;
2352 static const char delim[] = " \t:;";
2357 p += strspn(p, delim);
2358 while (*p != '\0') {
2359 size_t len = strcspn(p, delim);
2364 obj = load_object(p, -1, NULL, 0);
2366 return -1; /* XXX - cleanup */
2367 obj->z_interpose = true;
2370 p += strspn(p, delim);
2372 LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL);
2377 printable_path(const char *path)
2380 return (path == NULL ? "<unknown>" : path);
2384 * Load a shared object into memory, if it is not already loaded. The
2385 * object may be specified by name or by user-supplied file descriptor
2386 * fd_u. In the later case, the fd_u descriptor is not closed, but its
2389 * Returns a pointer to the Obj_Entry for the object. Returns NULL
2393 load_object(const char *name, int fd_u, const Obj_Entry *refobj, int flags)
2402 TAILQ_FOREACH(obj, &obj_list, next) {
2403 if (obj->marker || obj->doomed)
2405 if (object_match_name(obj, name))
2409 path = find_library(name, refobj, &fd);
2417 * search_library_pathfds() opens a fresh file descriptor for the
2418 * library, so there is no need to dup().
2420 } else if (fd_u == -1) {
2422 * If we didn't find a match by pathname, or the name is not
2423 * supplied, open the file and check again by device and inode.
2424 * This avoids false mismatches caused by multiple links or ".."
2427 * To avoid a race, we open the file and use fstat() rather than
2430 if ((fd = open(path, O_RDONLY | O_CLOEXEC | O_VERIFY)) == -1) {
2431 _rtld_error("Cannot open \"%s\"", path);
2436 fd = fcntl(fd_u, F_DUPFD_CLOEXEC, 0);
2438 _rtld_error("Cannot dup fd");
2443 if (fstat(fd, &sb) == -1) {
2444 _rtld_error("Cannot fstat \"%s\"", printable_path(path));
2449 TAILQ_FOREACH(obj, &obj_list, next) {
2450 if (obj->marker || obj->doomed)
2452 if (obj->ino == sb.st_ino && obj->dev == sb.st_dev)
2455 if (obj != NULL && name != NULL) {
2456 object_add_name(obj, name);
2461 if (flags & RTLD_LO_NOLOAD) {
2467 /* First use of this object, so we must map it in */
2468 obj = do_load_object(fd, name, path, &sb, flags);
2477 do_load_object(int fd, const char *name, char *path, struct stat *sbp,
2484 * but first, make sure that environment variables haven't been
2485 * used to circumvent the noexec flag on a filesystem.
2487 if (dangerous_ld_env) {
2488 if (fstatfs(fd, &fs) != 0) {
2489 _rtld_error("Cannot fstatfs \"%s\"", printable_path(path));
2492 if (fs.f_flags & MNT_NOEXEC) {
2493 _rtld_error("Cannot execute objects on %s", fs.f_mntonname);
2497 dbg("loading \"%s\"", printable_path(path));
2498 obj = map_object(fd, printable_path(path), sbp);
2503 * If DT_SONAME is present in the object, digest_dynamic2 already
2504 * added it to the object names.
2507 object_add_name(obj, name);
2509 digest_dynamic(obj, 0);
2510 dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", obj->path,
2511 obj->valid_hash_sysv, obj->valid_hash_gnu, obj->dynsymcount);
2512 if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) ==
2514 dbg("refusing to load non-loadable \"%s\"", obj->path);
2515 _rtld_error("Cannot dlopen non-loadable %s", obj->path);
2516 munmap(obj->mapbase, obj->mapsize);
2521 obj->dlopened = (flags & RTLD_LO_DLOPEN) != 0;
2522 TAILQ_INSERT_TAIL(&obj_list, obj, next);
2525 linkmap_add(obj); /* for GDB & dlinfo() */
2526 max_stack_flags |= obj->stack_flags;
2528 dbg(" %p .. %p: %s", obj->mapbase,
2529 obj->mapbase + obj->mapsize - 1, obj->path);
2531 dbg(" WARNING: %s has impure text", obj->path);
2532 LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
2539 obj_from_addr(const void *addr)
2543 TAILQ_FOREACH(obj, &obj_list, next) {
2546 if (addr < (void *) obj->mapbase)
2548 if (addr < (void *)(obj->mapbase + obj->mapsize))
2557 Elf_Addr *preinit_addr;
2560 preinit_addr = (Elf_Addr *)obj_main->preinit_array;
2561 if (preinit_addr == NULL)
2564 for (index = 0; index < obj_main->preinit_array_num; index++) {
2565 if (preinit_addr[index] != 0 && preinit_addr[index] != 1) {
2566 dbg("calling preinit function for %s at %p", obj_main->path,
2567 (void *)preinit_addr[index]);
2568 LD_UTRACE(UTRACE_INIT_CALL, obj_main, (void *)preinit_addr[index],
2569 0, 0, obj_main->path);
2570 call_init_pointer(obj_main, preinit_addr[index]);
2576 * Call the finalization functions for each of the objects in "list"
2577 * belonging to the DAG of "root" and referenced once. If NULL "root"
2578 * is specified, every finalization function will be called regardless
2579 * of the reference count and the list elements won't be freed. All of
2580 * the objects are expected to have non-NULL fini functions.
2583 objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate)
2587 Elf_Addr *fini_addr;
2590 assert(root == NULL || root->refcount == 1);
2593 root->doomed = true;
2596 * Preserve the current error message since a fini function might
2597 * call into the dynamic linker and overwrite it.
2599 saved_msg = errmsg_save();
2601 STAILQ_FOREACH(elm, list, link) {
2602 if (root != NULL && (elm->obj->refcount != 1 ||
2603 objlist_find(&root->dagmembers, elm->obj) == NULL))
2605 /* Remove object from fini list to prevent recursive invocation. */
2606 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
2607 /* Ensure that new references cannot be acquired. */
2608 elm->obj->doomed = true;
2610 hold_object(elm->obj);
2611 lock_release(rtld_bind_lock, lockstate);
2613 * It is legal to have both DT_FINI and DT_FINI_ARRAY defined.
2614 * When this happens, DT_FINI_ARRAY is processed first.
2616 fini_addr = (Elf_Addr *)elm->obj->fini_array;
2617 if (fini_addr != NULL && elm->obj->fini_array_num > 0) {
2618 for (index = elm->obj->fini_array_num - 1; index >= 0;
2620 if (fini_addr[index] != 0 && fini_addr[index] != 1) {
2621 dbg("calling fini function for %s at %p",
2622 elm->obj->path, (void *)fini_addr[index]);
2623 LD_UTRACE(UTRACE_FINI_CALL, elm->obj,
2624 (void *)fini_addr[index], 0, 0, elm->obj->path);
2625 call_initfini_pointer(elm->obj, fini_addr[index]);
2629 if (elm->obj->fini != (Elf_Addr)NULL) {
2630 dbg("calling fini function for %s at %p", elm->obj->path,
2631 (void *)elm->obj->fini);
2632 LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini,
2633 0, 0, elm->obj->path);
2634 call_initfini_pointer(elm->obj, elm->obj->fini);
2636 wlock_acquire(rtld_bind_lock, lockstate);
2637 unhold_object(elm->obj);
2638 /* No need to free anything if process is going down. */
2642 * We must restart the list traversal after every fini call
2643 * because a dlclose() call from the fini function or from
2644 * another thread might have modified the reference counts.
2648 } while (elm != NULL);
2649 errmsg_restore(saved_msg);
2653 * Call the initialization functions for each of the objects in
2654 * "list". All of the objects are expected to have non-NULL init
2658 objlist_call_init(Objlist *list, RtldLockState *lockstate)
2663 Elf_Addr *init_addr;
2664 void (*reg)(void (*)(void));
2668 * Clean init_scanned flag so that objects can be rechecked and
2669 * possibly initialized earlier if any of vectors called below
2670 * cause the change by using dlopen.
2672 TAILQ_FOREACH(obj, &obj_list, next) {
2675 obj->init_scanned = false;
2679 * Preserve the current error message since an init function might
2680 * call into the dynamic linker and overwrite it.
2682 saved_msg = errmsg_save();
2683 STAILQ_FOREACH(elm, list, link) {
2684 if (elm->obj->init_done) /* Initialized early. */
2687 * Race: other thread might try to use this object before current
2688 * one completes the initialization. Not much can be done here
2689 * without better locking.
2691 elm->obj->init_done = true;
2692 hold_object(elm->obj);
2694 if (elm->obj == obj_main && obj_main->crt_no_init) {
2695 reg = (void (*)(void (*)(void)))get_program_var_addr(
2696 "__libc_atexit", lockstate);
2698 lock_release(rtld_bind_lock, lockstate);
2701 rtld_exit_ptr = rtld_nop_exit;
2705 * It is legal to have both DT_INIT and DT_INIT_ARRAY defined.
2706 * When this happens, DT_INIT is processed first.
2708 if (elm->obj->init != (Elf_Addr)NULL) {
2709 dbg("calling init function for %s at %p", elm->obj->path,
2710 (void *)elm->obj->init);
2711 LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init,
2712 0, 0, elm->obj->path);
2713 call_initfini_pointer(elm->obj, elm->obj->init);
2715 init_addr = (Elf_Addr *)elm->obj->init_array;
2716 if (init_addr != NULL) {
2717 for (index = 0; index < elm->obj->init_array_num; index++) {
2718 if (init_addr[index] != 0 && init_addr[index] != 1) {
2719 dbg("calling init function for %s at %p", elm->obj->path,
2720 (void *)init_addr[index]);
2721 LD_UTRACE(UTRACE_INIT_CALL, elm->obj,
2722 (void *)init_addr[index], 0, 0, elm->obj->path);
2723 call_init_pointer(elm->obj, init_addr[index]);
2727 wlock_acquire(rtld_bind_lock, lockstate);
2728 unhold_object(elm->obj);
2730 errmsg_restore(saved_msg);
2734 objlist_clear(Objlist *list)
2738 while (!STAILQ_EMPTY(list)) {
2739 elm = STAILQ_FIRST(list);
2740 STAILQ_REMOVE_HEAD(list, link);
2745 static Objlist_Entry *
2746 objlist_find(Objlist *list, const Obj_Entry *obj)
2750 STAILQ_FOREACH(elm, list, link)
2751 if (elm->obj == obj)
2757 objlist_init(Objlist *list)
2763 objlist_push_head(Objlist *list, Obj_Entry *obj)
2767 elm = NEW(Objlist_Entry);
2769 STAILQ_INSERT_HEAD(list, elm, link);
2773 objlist_push_tail(Objlist *list, Obj_Entry *obj)
2777 elm = NEW(Objlist_Entry);
2779 STAILQ_INSERT_TAIL(list, elm, link);
2783 objlist_put_after(Objlist *list, Obj_Entry *listobj, Obj_Entry *obj)
2785 Objlist_Entry *elm, *listelm;
2787 STAILQ_FOREACH(listelm, list, link) {
2788 if (listelm->obj == listobj)
2791 elm = NEW(Objlist_Entry);
2793 if (listelm != NULL)
2794 STAILQ_INSERT_AFTER(list, listelm, elm, link);
2796 STAILQ_INSERT_TAIL(list, elm, link);
2800 objlist_remove(Objlist *list, Obj_Entry *obj)
2804 if ((elm = objlist_find(list, obj)) != NULL) {
2805 STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
2811 * Relocate dag rooted in the specified object.
2812 * Returns 0 on success, or -1 on failure.
2816 relocate_object_dag(Obj_Entry *root, bool bind_now, Obj_Entry *rtldobj,
2817 int flags, RtldLockState *lockstate)
2823 STAILQ_FOREACH(elm, &root->dagmembers, link) {
2824 error = relocate_object(elm->obj, bind_now, rtldobj, flags,
2833 * Prepare for, or clean after, relocating an object marked with
2834 * DT_TEXTREL or DF_TEXTREL. Before relocating, all read-only
2835 * segments are remapped read-write. After relocations are done, the
2836 * segment's permissions are returned back to the modes specified in
2837 * the phdrs. If any relocation happened, or always for wired
2838 * program, COW is triggered.
2841 reloc_textrel_prot(Obj_Entry *obj, bool before)
2848 for (l = obj->phsize / sizeof(*ph), ph = obj->phdr; l > 0;
2850 if (ph->p_type != PT_LOAD || (ph->p_flags & PF_W) != 0)
2852 base = obj->relocbase + trunc_page(ph->p_vaddr);
2853 sz = round_page(ph->p_vaddr + ph->p_filesz) -
2854 trunc_page(ph->p_vaddr);
2855 prot = convert_prot(ph->p_flags) | (before ? PROT_WRITE : 0);
2856 if (mprotect(base, sz, prot) == -1) {
2857 _rtld_error("%s: Cannot write-%sable text segment: %s",
2858 obj->path, before ? "en" : "dis",
2859 rtld_strerror(errno));
2867 * Relocate single object.
2868 * Returns 0 on success, or -1 on failure.
2871 relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
2872 int flags, RtldLockState *lockstate)
2877 obj->relocated = true;
2879 dbg("relocating \"%s\"", obj->path);
2881 if (obj->symtab == NULL || obj->strtab == NULL ||
2882 !(obj->valid_hash_sysv || obj->valid_hash_gnu)) {
2883 _rtld_error("%s: Shared object has no run-time symbol table",
2888 /* There are relocations to the write-protected text segment. */
2889 if (obj->textrel && reloc_textrel_prot(obj, true) != 0)
2892 /* Process the non-PLT non-IFUNC relocations. */
2893 if (reloc_non_plt(obj, rtldobj, flags, lockstate))
2896 /* Re-protected the text segment. */
2897 if (obj->textrel && reloc_textrel_prot(obj, false) != 0)
2900 /* Set the special PLT or GOT entries. */
2903 /* Process the PLT relocations. */
2904 if (reloc_plt(obj, flags, lockstate) == -1)
2906 /* Relocate the jump slots if we are doing immediate binding. */
2907 if ((obj->bind_now || bind_now) && reloc_jmpslots(obj, flags,
2911 if (!obj->mainprog && obj_enforce_relro(obj) == -1)
2915 * Set up the magic number and version in the Obj_Entry. These
2916 * were checked in the crt1.o from the original ElfKit, so we
2917 * set them for backward compatibility.
2919 obj->magic = RTLD_MAGIC;
2920 obj->version = RTLD_VERSION;
2926 * Relocate newly-loaded shared objects. The argument is a pointer to
2927 * the Obj_Entry for the first such object. All objects from the first
2928 * to the end of the list of objects are relocated. Returns 0 on success,
2932 relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj,
2933 int flags, RtldLockState *lockstate)
2938 for (error = 0, obj = first; obj != NULL;
2939 obj = TAILQ_NEXT(obj, next)) {
2942 error = relocate_object(obj, bind_now, rtldobj, flags,
2951 * The handling of R_MACHINE_IRELATIVE relocations and jumpslots
2952 * referencing STT_GNU_IFUNC symbols is postponed till the other
2953 * relocations are done. The indirect functions specified as
2954 * ifunc are allowed to call other symbols, so we need to have
2955 * objects relocated before asking for resolution from indirects.
2957 * The R_MACHINE_IRELATIVE slots are resolved in greedy fashion,
2958 * instead of the usual lazy handling of PLT slots. It is
2959 * consistent with how GNU does it.
2962 resolve_object_ifunc(Obj_Entry *obj, bool bind_now, int flags,
2963 RtldLockState *lockstate)
2966 if (obj->ifuncs_resolved)
2968 obj->ifuncs_resolved = true;
2969 if (!obj->irelative && !((obj->bind_now || bind_now) && obj->gnu_ifunc))
2971 if (obj_disable_relro(obj) == -1 ||
2972 (obj->irelative && reloc_iresolve(obj, lockstate) == -1) ||
2973 ((obj->bind_now || bind_now) && obj->gnu_ifunc &&
2974 reloc_gnu_ifunc(obj, flags, lockstate) == -1) ||
2975 obj_enforce_relro(obj) == -1)
2981 initlist_objects_ifunc(Objlist *list, bool bind_now, int flags,
2982 RtldLockState *lockstate)
2987 STAILQ_FOREACH(elm, list, link) {
2991 if (resolve_object_ifunc(obj, bind_now, flags,
2999 * Cleanup procedure. It will be called (by the atexit mechanism) just
3000 * before the process exits.
3005 RtldLockState lockstate;
3007 wlock_acquire(rtld_bind_lock, &lockstate);
3009 objlist_call_fini(&list_fini, NULL, &lockstate);
3010 /* No need to remove the items from the list, since we are exiting. */
3011 if (!libmap_disable)
3013 lock_release(rtld_bind_lock, &lockstate);
3022 * Iterate over a search path, translate each element, and invoke the
3023 * callback on the result.
3026 path_enumerate(const char *path, path_enum_proc callback,
3027 const char *refobj_path, void *arg)
3033 path += strspn(path, ":;");
3034 while (*path != '\0') {
3038 len = strcspn(path, ":;");
3039 trans = lm_findn(refobj_path, path, len);
3041 res = callback(trans, strlen(trans), arg);
3043 res = callback(path, len, arg);
3049 path += strspn(path, ":;");
3055 struct try_library_args {
3064 try_library_path(const char *dir, size_t dirlen, void *param)
3066 struct try_library_args *arg;
3070 if (*dir == '/' || trust) {
3073 if (dirlen + 1 + arg->namelen + 1 > arg->buflen)
3076 pathname = arg->buffer;
3077 strncpy(pathname, dir, dirlen);
3078 pathname[dirlen] = '/';
3079 strcpy(pathname + dirlen + 1, arg->name);
3081 dbg(" Trying \"%s\"", pathname);
3082 fd = open(pathname, O_RDONLY | O_CLOEXEC | O_VERIFY);
3084 dbg(" Opened \"%s\", fd %d", pathname, fd);
3085 pathname = xmalloc(dirlen + 1 + arg->namelen + 1);
3086 strcpy(pathname, arg->buffer);
3090 dbg(" Failed to open \"%s\": %s",
3091 pathname, rtld_strerror(errno));
3098 search_library_path(const char *name, const char *path,
3099 const char *refobj_path, int *fdp)
3102 struct try_library_args arg;
3108 arg.namelen = strlen(name);
3109 arg.buffer = xmalloc(PATH_MAX);
3110 arg.buflen = PATH_MAX;
3113 p = path_enumerate(path, try_library_path, refobj_path, &arg);
3123 * Finds the library with the given name using the directory descriptors
3124 * listed in the LD_LIBRARY_PATH_FDS environment variable.
3126 * Returns a freshly-opened close-on-exec file descriptor for the library,
3127 * or -1 if the library cannot be found.
3130 search_library_pathfds(const char *name, const char *path, int *fdp)
3132 char *envcopy, *fdstr, *found, *last_token;
3136 dbg("%s('%s', '%s', fdp)", __func__, name, path);
3138 /* Don't load from user-specified libdirs into setuid binaries. */
3142 /* We can't do anything if LD_LIBRARY_PATH_FDS isn't set. */
3146 /* LD_LIBRARY_PATH_FDS only works with relative paths. */
3147 if (name[0] == '/') {
3148 dbg("Absolute path (%s) passed to %s", name, __func__);
3153 * Use strtok_r() to walk the FD:FD:FD list. This requires a local
3154 * copy of the path, as strtok_r rewrites separator tokens
3158 envcopy = xstrdup(path);
3159 for (fdstr = strtok_r(envcopy, ":", &last_token); fdstr != NULL;
3160 fdstr = strtok_r(NULL, ":", &last_token)) {
3161 dirfd = parse_integer(fdstr);
3163 _rtld_error("failed to parse directory FD: '%s'",
3167 fd = __sys_openat(dirfd, name, O_RDONLY | O_CLOEXEC | O_VERIFY);
3170 len = strlen(fdstr) + strlen(name) + 3;
3171 found = xmalloc(len);
3172 if (rtld_snprintf(found, len, "#%d/%s", dirfd, name) < 0) {
3173 _rtld_error("error generating '%d/%s'",
3177 dbg("open('%s') => %d", found, fd);
3188 dlclose(void *handle)
3190 RtldLockState lockstate;
3193 wlock_acquire(rtld_bind_lock, &lockstate);
3194 error = dlclose_locked(handle, &lockstate);
3195 lock_release(rtld_bind_lock, &lockstate);
3200 dlclose_locked(void *handle, RtldLockState *lockstate)
3204 root = dlcheck(handle);
3207 LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount,
3210 /* Unreference the object and its dependencies. */
3211 root->dl_refcount--;
3213 if (root->refcount == 1) {
3215 * The object will be no longer referenced, so we must unload it.
3216 * First, call the fini functions.
3218 objlist_call_fini(&list_fini, root, lockstate);
3222 /* Finish cleaning up the newly-unreferenced objects. */
3223 GDB_STATE(RT_DELETE,&root->linkmap);
3224 unload_object(root, lockstate);
3225 GDB_STATE(RT_CONSISTENT,NULL);
3229 LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL);
3236 char *msg = error_message;
3237 error_message = NULL;
3242 * This function is deprecated and has no effect.
3245 dllockinit(void *context,
3246 void *(*_lock_create)(void *context) __unused,
3247 void (*_rlock_acquire)(void *lock) __unused,
3248 void (*_wlock_acquire)(void *lock) __unused,
3249 void (*_lock_release)(void *lock) __unused,
3250 void (*_lock_destroy)(void *lock) __unused,
3251 void (*context_destroy)(void *context))
3253 static void *cur_context;
3254 static void (*cur_context_destroy)(void *);
3256 /* Just destroy the context from the previous call, if necessary. */
3257 if (cur_context_destroy != NULL)
3258 cur_context_destroy(cur_context);
3259 cur_context = context;
3260 cur_context_destroy = context_destroy;
3264 dlopen(const char *name, int mode)
3267 return (rtld_dlopen(name, -1, mode));
3271 fdlopen(int fd, int mode)
3274 return (rtld_dlopen(NULL, fd, mode));
3278 rtld_dlopen(const char *name, int fd, int mode)
3280 RtldLockState lockstate;
3283 LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name);
3284 ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1";
3285 if (ld_tracing != NULL) {
3286 rlock_acquire(rtld_bind_lock, &lockstate);
3287 if (sigsetjmp(lockstate.env, 0) != 0)
3288 lock_upgrade(rtld_bind_lock, &lockstate);
3289 environ = __DECONST(char **, *get_program_var_addr("environ", &lockstate));
3290 lock_release(rtld_bind_lock, &lockstate);
3292 lo_flags = RTLD_LO_DLOPEN;
3293 if (mode & RTLD_NODELETE)
3294 lo_flags |= RTLD_LO_NODELETE;
3295 if (mode & RTLD_NOLOAD)
3296 lo_flags |= RTLD_LO_NOLOAD;
3297 if (ld_tracing != NULL)
3298 lo_flags |= RTLD_LO_TRACE;
3300 return (dlopen_object(name, fd, obj_main, lo_flags,
3301 mode & (RTLD_MODEMASK | RTLD_GLOBAL), NULL));
3305 dlopen_cleanup(Obj_Entry *obj, RtldLockState *lockstate)
3310 if (obj->refcount == 0)
3311 unload_object(obj, lockstate);
3315 dlopen_object(const char *name, int fd, Obj_Entry *refobj, int lo_flags,
3316 int mode, RtldLockState *lockstate)
3318 Obj_Entry *old_obj_tail;
3321 RtldLockState mlockstate;
3324 objlist_init(&initlist);
3326 if (lockstate == NULL && !(lo_flags & RTLD_LO_EARLY)) {
3327 wlock_acquire(rtld_bind_lock, &mlockstate);
3328 lockstate = &mlockstate;
3330 GDB_STATE(RT_ADD,NULL);
3332 old_obj_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q));
3334 if (name == NULL && fd == -1) {
3338 obj = load_object(name, fd, refobj, lo_flags);
3343 if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL)
3344 objlist_push_tail(&list_global, obj);
3345 if (globallist_next(old_obj_tail) != NULL) {
3346 /* We loaded something new. */
3347 assert(globallist_next(old_obj_tail) == obj);
3349 if ((lo_flags & RTLD_LO_EARLY) == 0 && obj->static_tls &&
3350 !allocate_tls_offset(obj)) {
3351 _rtld_error("%s: No space available "
3352 "for static Thread Local Storage", obj->path);
3356 result = load_needed_objects(obj, lo_flags & (RTLD_LO_DLOPEN |
3361 result = rtld_verify_versions(&obj->dagmembers);
3362 if (result != -1 && ld_tracing)
3364 if (result == -1 || relocate_object_dag(obj,
3365 (mode & RTLD_MODEMASK) == RTLD_NOW, &obj_rtld,
3366 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
3368 dlopen_cleanup(obj, lockstate);
3370 } else if (lo_flags & RTLD_LO_EARLY) {
3372 * Do not call the init functions for early loaded
3373 * filtees. The image is still not initialized enough
3376 * Our object is found by the global object list and
3377 * will be ordered among all init calls done right
3378 * before transferring control to main.
3381 /* Make list of init functions to call. */
3382 initlist_add_objects(obj, obj, &initlist);
3385 * Process all no_delete or global objects here, given
3386 * them own DAGs to prevent their dependencies from being
3387 * unloaded. This has to be done after we have loaded all
3388 * of the dependencies, so that we do not miss any.
3394 * Bump the reference counts for objects on this DAG. If
3395 * this is the first dlopen() call for the object that was
3396 * already loaded as a dependency, initialize the dag
3402 if ((lo_flags & RTLD_LO_TRACE) != 0)
3405 if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 ||
3406 obj->z_nodelete) && !obj->ref_nodel) {
3407 dbg("obj %s nodelete", obj->path);
3409 obj->z_nodelete = obj->ref_nodel = true;
3413 LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0,
3415 GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL);
3417 if ((lo_flags & RTLD_LO_EARLY) == 0) {
3418 map_stacks_exec(lockstate);
3420 distribute_static_tls(&initlist, lockstate);
3423 if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW,
3424 (lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
3426 objlist_clear(&initlist);
3427 dlopen_cleanup(obj, lockstate);
3428 if (lockstate == &mlockstate)
3429 lock_release(rtld_bind_lock, lockstate);
3433 if (!(lo_flags & RTLD_LO_EARLY)) {
3434 /* Call the init functions. */
3435 objlist_call_init(&initlist, lockstate);
3437 objlist_clear(&initlist);
3438 if (lockstate == &mlockstate)
3439 lock_release(rtld_bind_lock, lockstate);
3442 trace_loaded_objects(obj);
3443 if (lockstate == &mlockstate)
3444 lock_release(rtld_bind_lock, lockstate);
3449 do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve,
3453 const Obj_Entry *obj, *defobj;
3456 RtldLockState lockstate;
3463 symlook_init(&req, name);
3465 req.flags = flags | SYMLOOK_IN_PLT;
3466 req.lockstate = &lockstate;
3468 LD_UTRACE(UTRACE_DLSYM_START, handle, NULL, 0, 0, name);
3469 rlock_acquire(rtld_bind_lock, &lockstate);
3470 if (sigsetjmp(lockstate.env, 0) != 0)
3471 lock_upgrade(rtld_bind_lock, &lockstate);
3472 if (handle == NULL || handle == RTLD_NEXT ||
3473 handle == RTLD_DEFAULT || handle == RTLD_SELF) {
3475 if ((obj = obj_from_addr(retaddr)) == NULL) {
3476 _rtld_error("Cannot determine caller's shared object");
3477 lock_release(rtld_bind_lock, &lockstate);
3478 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
3481 if (handle == NULL) { /* Just the caller's shared object. */
3482 res = symlook_obj(&req, obj);
3485 defobj = req.defobj_out;
3487 } else if (handle == RTLD_NEXT || /* Objects after caller's */
3488 handle == RTLD_SELF) { /* ... caller included */
3489 if (handle == RTLD_NEXT)
3490 obj = globallist_next(obj);
3491 for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
3494 res = symlook_obj(&req, obj);
3497 ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) {
3499 defobj = req.defobj_out;
3500 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
3506 * Search the dynamic linker itself, and possibly resolve the
3507 * symbol from there. This is how the application links to
3508 * dynamic linker services such as dlopen.
3510 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
3511 res = symlook_obj(&req, &obj_rtld);
3514 defobj = req.defobj_out;
3518 assert(handle == RTLD_DEFAULT);
3519 res = symlook_default(&req, obj);
3521 defobj = req.defobj_out;
3526 if ((obj = dlcheck(handle)) == NULL) {
3527 lock_release(rtld_bind_lock, &lockstate);
3528 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
3532 donelist_init(&donelist);
3533 if (obj->mainprog) {
3534 /* Handle obtained by dlopen(NULL, ...) implies global scope. */
3535 res = symlook_global(&req, &donelist);
3538 defobj = req.defobj_out;
3541 * Search the dynamic linker itself, and possibly resolve the
3542 * symbol from there. This is how the application links to
3543 * dynamic linker services such as dlopen.
3545 if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
3546 res = symlook_obj(&req, &obj_rtld);
3549 defobj = req.defobj_out;
3554 /* Search the whole DAG rooted at the given object. */
3555 res = symlook_list(&req, &obj->dagmembers, &donelist);
3558 defobj = req.defobj_out;
3564 lock_release(rtld_bind_lock, &lockstate);
3567 * The value required by the caller is derived from the value
3568 * of the symbol. this is simply the relocated value of the
3571 if (ELF_ST_TYPE(def->st_info) == STT_FUNC)
3572 sym = make_function_pointer(def, defobj);
3573 else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
3574 sym = rtld_resolve_ifunc(defobj, def);
3575 else if (ELF_ST_TYPE(def->st_info) == STT_TLS) {
3576 ti.ti_module = defobj->tlsindex;
3577 ti.ti_offset = def->st_value;
3578 sym = __tls_get_addr(&ti);
3580 sym = defobj->relocbase + def->st_value;
3581 LD_UTRACE(UTRACE_DLSYM_STOP, handle, sym, 0, 0, name);
3585 _rtld_error("Undefined symbol \"%s%s%s\"", name, ve != NULL ? "@" : "",
3586 ve != NULL ? ve->name : "");
3587 lock_release(rtld_bind_lock, &lockstate);
3588 LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
3593 dlsym(void *handle, const char *name)
3595 return do_dlsym(handle, name, __builtin_return_address(0), NULL,
3600 dlfunc(void *handle, const char *name)
3607 rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL,
3613 dlvsym(void *handle, const char *name, const char *version)
3617 ventry.name = version;
3619 ventry.hash = elf_hash(version);
3621 return do_dlsym(handle, name, __builtin_return_address(0), &ventry,
3626 _rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info)
3628 const Obj_Entry *obj;
3629 RtldLockState lockstate;
3631 rlock_acquire(rtld_bind_lock, &lockstate);
3632 obj = obj_from_addr(addr);
3634 _rtld_error("No shared object contains address");
3635 lock_release(rtld_bind_lock, &lockstate);
3638 rtld_fill_dl_phdr_info(obj, phdr_info);
3639 lock_release(rtld_bind_lock, &lockstate);
3644 dladdr(const void *addr, Dl_info *info)
3646 const Obj_Entry *obj;
3649 unsigned long symoffset;
3650 RtldLockState lockstate;
3652 rlock_acquire(rtld_bind_lock, &lockstate);
3653 obj = obj_from_addr(addr);
3655 _rtld_error("No shared object contains address");
3656 lock_release(rtld_bind_lock, &lockstate);
3659 info->dli_fname = obj->path;
3660 info->dli_fbase = obj->mapbase;
3661 info->dli_saddr = (void *)0;
3662 info->dli_sname = NULL;
3665 * Walk the symbol list looking for the symbol whose address is
3666 * closest to the address sent in.
3668 for (symoffset = 0; symoffset < obj->dynsymcount; symoffset++) {
3669 def = obj->symtab + symoffset;
3672 * For skip the symbol if st_shndx is either SHN_UNDEF or
3675 if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON)
3679 * If the symbol is greater than the specified address, or if it
3680 * is further away from addr than the current nearest symbol,
3683 symbol_addr = obj->relocbase + def->st_value;
3684 if (symbol_addr > addr || symbol_addr < info->dli_saddr)
3687 /* Update our idea of the nearest symbol. */
3688 info->dli_sname = obj->strtab + def->st_name;
3689 info->dli_saddr = symbol_addr;
3692 if (info->dli_saddr == addr)
3695 lock_release(rtld_bind_lock, &lockstate);
3700 dlinfo(void *handle, int request, void *p)
3702 const Obj_Entry *obj;
3703 RtldLockState lockstate;
3706 rlock_acquire(rtld_bind_lock, &lockstate);
3708 if (handle == NULL || handle == RTLD_SELF) {
3711 retaddr = __builtin_return_address(0); /* __GNUC__ only */
3712 if ((obj = obj_from_addr(retaddr)) == NULL)
3713 _rtld_error("Cannot determine caller's shared object");
3715 obj = dlcheck(handle);
3718 lock_release(rtld_bind_lock, &lockstate);
3724 case RTLD_DI_LINKMAP:
3725 *((struct link_map const **)p) = &obj->linkmap;
3727 case RTLD_DI_ORIGIN:
3728 error = rtld_dirname(obj->path, p);
3731 case RTLD_DI_SERINFOSIZE:
3732 case RTLD_DI_SERINFO:
3733 error = do_search_info(obj, request, (struct dl_serinfo *)p);
3737 _rtld_error("Invalid request %d passed to dlinfo()", request);
3741 lock_release(rtld_bind_lock, &lockstate);
3747 rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info)
3750 phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase;
3751 phdr_info->dlpi_name = obj->path;
3752 phdr_info->dlpi_phdr = obj->phdr;
3753 phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]);
3754 phdr_info->dlpi_tls_modid = obj->tlsindex;
3755 phdr_info->dlpi_tls_data = obj->tlsinit;
3756 phdr_info->dlpi_adds = obj_loads;
3757 phdr_info->dlpi_subs = obj_loads - obj_count;
3761 dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param)
3763 struct dl_phdr_info phdr_info;
3764 Obj_Entry *obj, marker;
3765 RtldLockState bind_lockstate, phdr_lockstate;
3768 init_marker(&marker);
3771 wlock_acquire(rtld_phdr_lock, &phdr_lockstate);
3772 wlock_acquire(rtld_bind_lock, &bind_lockstate);
3773 for (obj = globallist_curr(TAILQ_FIRST(&obj_list)); obj != NULL;) {
3774 TAILQ_INSERT_AFTER(&obj_list, obj, &marker, next);
3775 rtld_fill_dl_phdr_info(obj, &phdr_info);
3777 lock_release(rtld_bind_lock, &bind_lockstate);
3779 error = callback(&phdr_info, sizeof phdr_info, param);
3781 wlock_acquire(rtld_bind_lock, &bind_lockstate);
3783 obj = globallist_next(&marker);
3784 TAILQ_REMOVE(&obj_list, &marker, next);
3786 lock_release(rtld_bind_lock, &bind_lockstate);
3787 lock_release(rtld_phdr_lock, &phdr_lockstate);
3793 rtld_fill_dl_phdr_info(&obj_rtld, &phdr_info);
3794 lock_release(rtld_bind_lock, &bind_lockstate);
3795 error = callback(&phdr_info, sizeof(phdr_info), param);
3797 lock_release(rtld_phdr_lock, &phdr_lockstate);
3802 fill_search_info(const char *dir, size_t dirlen, void *param)
3804 struct fill_search_info_args *arg;
3808 if (arg->request == RTLD_DI_SERINFOSIZE) {
3809 arg->serinfo->dls_cnt ++;
3810 arg->serinfo->dls_size += sizeof(struct dl_serpath) + dirlen + 1;
3812 struct dl_serpath *s_entry;
3814 s_entry = arg->serpath;
3815 s_entry->dls_name = arg->strspace;
3816 s_entry->dls_flags = arg->flags;
3818 strncpy(arg->strspace, dir, dirlen);
3819 arg->strspace[dirlen] = '\0';
3821 arg->strspace += dirlen + 1;
3829 do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info)
3831 struct dl_serinfo _info;
3832 struct fill_search_info_args args;
3834 args.request = RTLD_DI_SERINFOSIZE;
3835 args.serinfo = &_info;
3837 _info.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
3840 path_enumerate(obj->rpath, fill_search_info, NULL, &args);
3841 path_enumerate(ld_library_path, fill_search_info, NULL, &args);
3842 path_enumerate(obj->runpath, fill_search_info, NULL, &args);
3843 path_enumerate(gethints(obj->z_nodeflib), fill_search_info, NULL, &args);
3844 if (!obj->z_nodeflib)
3845 path_enumerate(ld_standard_library_path, fill_search_info, NULL, &args);
3848 if (request == RTLD_DI_SERINFOSIZE) {
3849 info->dls_size = _info.dls_size;
3850 info->dls_cnt = _info.dls_cnt;
3854 if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) {
3855 _rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()");
3859 args.request = RTLD_DI_SERINFO;
3860 args.serinfo = info;
3861 args.serpath = &info->dls_serpath[0];
3862 args.strspace = (char *)&info->dls_serpath[_info.dls_cnt];
3864 args.flags = LA_SER_RUNPATH;
3865 if (path_enumerate(obj->rpath, fill_search_info, NULL, &args) != NULL)
3868 args.flags = LA_SER_LIBPATH;
3869 if (path_enumerate(ld_library_path, fill_search_info, NULL, &args) != NULL)
3872 args.flags = LA_SER_RUNPATH;
3873 if (path_enumerate(obj->runpath, fill_search_info, NULL, &args) != NULL)
3876 args.flags = LA_SER_CONFIG;
3877 if (path_enumerate(gethints(obj->z_nodeflib), fill_search_info, NULL, &args)
3881 args.flags = LA_SER_DEFAULT;
3882 if (!obj->z_nodeflib && path_enumerate(ld_standard_library_path,
3883 fill_search_info, NULL, &args) != NULL)
3889 rtld_dirname(const char *path, char *bname)
3893 /* Empty or NULL string gets treated as "." */
3894 if (path == NULL || *path == '\0') {
3900 /* Strip trailing slashes */
3901 endp = path + strlen(path) - 1;
3902 while (endp > path && *endp == '/')
3905 /* Find the start of the dir */
3906 while (endp > path && *endp != '/')
3909 /* Either the dir is "/" or there are no slashes */
3911 bname[0] = *endp == '/' ? '/' : '.';
3917 } while (endp > path && *endp == '/');
3920 if (endp - path + 2 > PATH_MAX)
3922 _rtld_error("Filename is too long: %s", path);
3926 strncpy(bname, path, endp - path + 1);
3927 bname[endp - path + 1] = '\0';
3932 rtld_dirname_abs(const char *path, char *base)
3936 if (realpath(path, base) == NULL)
3938 dbg("%s -> %s", path, base);
3939 last = strrchr(base, '/');
3948 linkmap_add(Obj_Entry *obj)
3950 struct link_map *l = &obj->linkmap;
3951 struct link_map *prev;
3953 obj->linkmap.l_name = obj->path;
3954 obj->linkmap.l_addr = obj->mapbase;
3955 obj->linkmap.l_ld = obj->dynamic;
3957 /* GDB needs load offset on MIPS to use the symbols */
3958 obj->linkmap.l_offs = obj->relocbase;
3961 if (r_debug.r_map == NULL) {
3967 * Scan to the end of the list, but not past the entry for the
3968 * dynamic linker, which we want to keep at the very end.
3970 for (prev = r_debug.r_map;
3971 prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap;
3972 prev = prev->l_next)
3975 /* Link in the new entry. */
3977 l->l_next = prev->l_next;
3978 if (l->l_next != NULL)
3979 l->l_next->l_prev = l;
3984 linkmap_delete(Obj_Entry *obj)
3986 struct link_map *l = &obj->linkmap;
3988 if (l->l_prev == NULL) {
3989 if ((r_debug.r_map = l->l_next) != NULL)
3990 l->l_next->l_prev = NULL;
3994 if ((l->l_prev->l_next = l->l_next) != NULL)
3995 l->l_next->l_prev = l->l_prev;
3999 * Function for the debugger to set a breakpoint on to gain control.
4001 * The two parameters allow the debugger to easily find and determine
4002 * what the runtime loader is doing and to whom it is doing it.
4004 * When the loadhook trap is hit (r_debug_state, set at program
4005 * initialization), the arguments can be found on the stack:
4007 * +8 struct link_map *m
4008 * +4 struct r_debug *rd
4012 r_debug_state(struct r_debug* rd __unused, struct link_map *m __unused)
4015 * The following is a hack to force the compiler to emit calls to
4016 * this function, even when optimizing. If the function is empty,
4017 * the compiler is not obliged to emit any code for calls to it,
4018 * even when marked __noinline. However, gdb depends on those
4021 __compiler_membar();
4025 * A function called after init routines have completed. This can be used to
4026 * break before a program's entry routine is called, and can be used when
4027 * main is not available in the symbol table.
4030 _r_debug_postinit(struct link_map *m __unused)
4033 /* See r_debug_state(). */
4034 __compiler_membar();
4038 release_object(Obj_Entry *obj)
4041 if (obj->holdcount > 0) {
4042 obj->unholdfree = true;
4045 munmap(obj->mapbase, obj->mapsize);
4046 linkmap_delete(obj);
4051 * Get address of the pointer variable in the main program.
4052 * Prefer non-weak symbol over the weak one.
4054 static const void **
4055 get_program_var_addr(const char *name, RtldLockState *lockstate)
4060 symlook_init(&req, name);
4061 req.lockstate = lockstate;
4062 donelist_init(&donelist);
4063 if (symlook_global(&req, &donelist) != 0)
4065 if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC)
4066 return ((const void **)make_function_pointer(req.sym_out,
4068 else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC)
4069 return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out));
4071 return ((const void **)(req.defobj_out->relocbase +
4072 req.sym_out->st_value));
4076 * Set a pointer variable in the main program to the given value. This
4077 * is used to set key variables such as "environ" before any of the
4078 * init functions are called.
4081 set_program_var(const char *name, const void *value)
4085 if ((addr = get_program_var_addr(name, NULL)) != NULL) {
4086 dbg("\"%s\": *%p <-- %p", name, addr, value);
4092 * Search the global objects, including dependencies and main object,
4093 * for the given symbol.
4096 symlook_global(SymLook *req, DoneList *donelist)
4099 const Objlist_Entry *elm;
4102 symlook_init_from_req(&req1, req);
4104 /* Search all objects loaded at program start up. */
4105 if (req->defobj_out == NULL ||
4106 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
4107 res = symlook_list(&req1, &list_main, donelist);
4108 if (res == 0 && (req->defobj_out == NULL ||
4109 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
4110 req->sym_out = req1.sym_out;
4111 req->defobj_out = req1.defobj_out;
4112 assert(req->defobj_out != NULL);
4116 /* Search all DAGs whose roots are RTLD_GLOBAL objects. */
4117 STAILQ_FOREACH(elm, &list_global, link) {
4118 if (req->defobj_out != NULL &&
4119 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
4121 res = symlook_list(&req1, &elm->obj->dagmembers, donelist);
4122 if (res == 0 && (req->defobj_out == NULL ||
4123 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
4124 req->sym_out = req1.sym_out;
4125 req->defobj_out = req1.defobj_out;
4126 assert(req->defobj_out != NULL);
4130 return (req->sym_out != NULL ? 0 : ESRCH);
4134 * Given a symbol name in a referencing object, find the corresponding
4135 * definition of the symbol. Returns a pointer to the symbol, or NULL if
4136 * no definition was found. Returns a pointer to the Obj_Entry of the
4137 * defining object via the reference parameter DEFOBJ_OUT.
4140 symlook_default(SymLook *req, const Obj_Entry *refobj)
4143 const Objlist_Entry *elm;
4147 donelist_init(&donelist);
4148 symlook_init_from_req(&req1, req);
4151 * Look first in the referencing object if linked symbolically,
4152 * and similarly handle protected symbols.
4154 res = symlook_obj(&req1, refobj);
4155 if (res == 0 && (refobj->symbolic ||
4156 ELF_ST_VISIBILITY(req1.sym_out->st_other) == STV_PROTECTED)) {
4157 req->sym_out = req1.sym_out;
4158 req->defobj_out = req1.defobj_out;
4159 assert(req->defobj_out != NULL);
4161 if (refobj->symbolic || req->defobj_out != NULL)
4162 donelist_check(&donelist, refobj);
4164 symlook_global(req, &donelist);
4166 /* Search all dlopened DAGs containing the referencing object. */
4167 STAILQ_FOREACH(elm, &refobj->dldags, link) {
4168 if (req->sym_out != NULL &&
4169 ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
4171 res = symlook_list(&req1, &elm->obj->dagmembers, &donelist);
4172 if (res == 0 && (req->sym_out == NULL ||
4173 ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
4174 req->sym_out = req1.sym_out;
4175 req->defobj_out = req1.defobj_out;
4176 assert(req->defobj_out != NULL);
4181 * Search the dynamic linker itself, and possibly resolve the
4182 * symbol from there. This is how the application links to
4183 * dynamic linker services such as dlopen.
4185 if (req->sym_out == NULL ||
4186 ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
4187 res = symlook_obj(&req1, &obj_rtld);
4189 req->sym_out = req1.sym_out;
4190 req->defobj_out = req1.defobj_out;
4191 assert(req->defobj_out != NULL);
4195 return (req->sym_out != NULL ? 0 : ESRCH);
4199 symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp)
4202 const Obj_Entry *defobj;
4203 const Objlist_Entry *elm;
4209 STAILQ_FOREACH(elm, objlist, link) {
4210 if (donelist_check(dlp, elm->obj))
4212 symlook_init_from_req(&req1, req);
4213 if ((res = symlook_obj(&req1, elm->obj)) == 0) {
4214 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
4216 defobj = req1.defobj_out;
4217 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
4224 req->defobj_out = defobj;
4231 * Search the chain of DAGS cointed to by the given Needed_Entry
4232 * for a symbol of the given name. Each DAG is scanned completely
4233 * before advancing to the next one. Returns a pointer to the symbol,
4234 * or NULL if no definition was found.
4237 symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp)
4240 const Needed_Entry *n;
4241 const Obj_Entry *defobj;
4247 symlook_init_from_req(&req1, req);
4248 for (n = needed; n != NULL; n = n->next) {
4249 if (n->obj == NULL ||
4250 (res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0)
4252 if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
4254 defobj = req1.defobj_out;
4255 if (ELF_ST_BIND(def->st_info) != STB_WEAK)
4261 req->defobj_out = defobj;
4268 * Search the symbol table of a single shared object for a symbol of
4269 * the given name and version, if requested. Returns a pointer to the
4270 * symbol, or NULL if no definition was found. If the object is
4271 * filter, return filtered symbol from filtee.
4273 * The symbol's hash value is passed in for efficiency reasons; that
4274 * eliminates many recomputations of the hash value.
4277 symlook_obj(SymLook *req, const Obj_Entry *obj)
4281 int flags, res, mres;
4284 * If there is at least one valid hash at this point, we prefer to
4285 * use the faster GNU version if available.
4287 if (obj->valid_hash_gnu)
4288 mres = symlook_obj1_gnu(req, obj);
4289 else if (obj->valid_hash_sysv)
4290 mres = symlook_obj1_sysv(req, obj);
4295 if (obj->needed_filtees != NULL) {
4296 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
4297 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
4298 donelist_init(&donelist);
4299 symlook_init_from_req(&req1, req);
4300 res = symlook_needed(&req1, obj->needed_filtees, &donelist);
4302 req->sym_out = req1.sym_out;
4303 req->defobj_out = req1.defobj_out;
4307 if (obj->needed_aux_filtees != NULL) {
4308 flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
4309 load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
4310 donelist_init(&donelist);
4311 symlook_init_from_req(&req1, req);
4312 res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist);
4314 req->sym_out = req1.sym_out;
4315 req->defobj_out = req1.defobj_out;
4323 /* Symbol match routine common to both hash functions */
4325 matched_symbol(SymLook *req, const Obj_Entry *obj, Sym_Match_Result *result,
4326 const unsigned long symnum)
4329 const Elf_Sym *symp;
4332 symp = obj->symtab + symnum;
4333 strp = obj->strtab + symp->st_name;
4335 switch (ELF_ST_TYPE(symp->st_info)) {
4341 if (symp->st_value == 0)
4345 if (symp->st_shndx != SHN_UNDEF)
4348 else if (((req->flags & SYMLOOK_IN_PLT) == 0) &&
4349 (ELF_ST_TYPE(symp->st_info) == STT_FUNC))
4356 if (req->name[0] != strp[0] || strcmp(req->name, strp) != 0)
4359 if (req->ventry == NULL) {
4360 if (obj->versyms != NULL) {
4361 verndx = VER_NDX(obj->versyms[symnum]);
4362 if (verndx > obj->vernum) {
4364 "%s: symbol %s references wrong version %d",
4365 obj->path, obj->strtab + symnum, verndx);
4369 * If we are not called from dlsym (i.e. this
4370 * is a normal relocation from unversioned
4371 * binary), accept the symbol immediately if
4372 * it happens to have first version after this
4373 * shared object became versioned. Otherwise,
4374 * if symbol is versioned and not hidden,
4375 * remember it. If it is the only symbol with
4376 * this name exported by the shared object, it
4377 * will be returned as a match by the calling
4378 * function. If symbol is global (verndx < 2)
4379 * accept it unconditionally.
4381 if ((req->flags & SYMLOOK_DLSYM) == 0 &&
4382 verndx == VER_NDX_GIVEN) {
4383 result->sym_out = symp;
4386 else if (verndx >= VER_NDX_GIVEN) {
4387 if ((obj->versyms[symnum] & VER_NDX_HIDDEN)
4389 if (result->vsymp == NULL)
4390 result->vsymp = symp;
4396 result->sym_out = symp;
4399 if (obj->versyms == NULL) {
4400 if (object_match_name(obj, req->ventry->name)) {
4401 _rtld_error("%s: object %s should provide version %s "
4402 "for symbol %s", obj_rtld.path, obj->path,
4403 req->ventry->name, obj->strtab + symnum);
4407 verndx = VER_NDX(obj->versyms[symnum]);
4408 if (verndx > obj->vernum) {
4409 _rtld_error("%s: symbol %s references wrong version %d",
4410 obj->path, obj->strtab + symnum, verndx);
4413 if (obj->vertab[verndx].hash != req->ventry->hash ||
4414 strcmp(obj->vertab[verndx].name, req->ventry->name)) {
4416 * Version does not match. Look if this is a
4417 * global symbol and if it is not hidden. If
4418 * global symbol (verndx < 2) is available,
4419 * use it. Do not return symbol if we are
4420 * called by dlvsym, because dlvsym looks for
4421 * a specific version and default one is not
4422 * what dlvsym wants.
4424 if ((req->flags & SYMLOOK_DLSYM) ||
4425 (verndx >= VER_NDX_GIVEN) ||
4426 (obj->versyms[symnum] & VER_NDX_HIDDEN))
4430 result->sym_out = symp;
4435 * Search for symbol using SysV hash function.
4436 * obj->buckets is known not to be NULL at this point; the test for this was
4437 * performed with the obj->valid_hash_sysv assignment.
4440 symlook_obj1_sysv(SymLook *req, const Obj_Entry *obj)
4442 unsigned long symnum;
4443 Sym_Match_Result matchres;
4445 matchres.sym_out = NULL;
4446 matchres.vsymp = NULL;
4447 matchres.vcount = 0;
4449 for (symnum = obj->buckets[req->hash % obj->nbuckets];
4450 symnum != STN_UNDEF; symnum = obj->chains[symnum]) {
4451 if (symnum >= obj->nchains)
4452 return (ESRCH); /* Bad object */
4454 if (matched_symbol(req, obj, &matchres, symnum)) {
4455 req->sym_out = matchres.sym_out;
4456 req->defobj_out = obj;
4460 if (matchres.vcount == 1) {
4461 req->sym_out = matchres.vsymp;
4462 req->defobj_out = obj;
4468 /* Search for symbol using GNU hash function */
4470 symlook_obj1_gnu(SymLook *req, const Obj_Entry *obj)
4472 Elf_Addr bloom_word;
4473 const Elf32_Word *hashval;
4475 Sym_Match_Result matchres;
4476 unsigned int h1, h2;
4477 unsigned long symnum;
4479 matchres.sym_out = NULL;
4480 matchres.vsymp = NULL;
4481 matchres.vcount = 0;
4483 /* Pick right bitmask word from Bloom filter array */
4484 bloom_word = obj->bloom_gnu[(req->hash_gnu / __ELF_WORD_SIZE) &
4485 obj->maskwords_bm_gnu];
4487 /* Calculate modulus word size of gnu hash and its derivative */
4488 h1 = req->hash_gnu & (__ELF_WORD_SIZE - 1);
4489 h2 = ((req->hash_gnu >> obj->shift2_gnu) & (__ELF_WORD_SIZE - 1));
4491 /* Filter out the "definitely not in set" queries */
4492 if (((bloom_word >> h1) & (bloom_word >> h2) & 1) == 0)
4495 /* Locate hash chain and corresponding value element*/
4496 bucket = obj->buckets_gnu[req->hash_gnu % obj->nbuckets_gnu];
4499 hashval = &obj->chain_zero_gnu[bucket];
4501 if (((*hashval ^ req->hash_gnu) >> 1) == 0) {
4502 symnum = hashval - obj->chain_zero_gnu;
4503 if (matched_symbol(req, obj, &matchres, symnum)) {
4504 req->sym_out = matchres.sym_out;
4505 req->defobj_out = obj;
4509 } while ((*hashval++ & 1) == 0);
4510 if (matchres.vcount == 1) {
4511 req->sym_out = matchres.vsymp;
4512 req->defobj_out = obj;
4519 trace_loaded_objects(Obj_Entry *obj)
4521 const char *fmt1, *fmt2, *fmt, *main_local, *list_containers;
4524 if ((main_local = getenv(_LD("TRACE_LOADED_OBJECTS_PROGNAME"))) == NULL)
4527 if ((fmt1 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT1"))) == NULL)
4528 fmt1 = "\t%o => %p (%x)\n";
4530 if ((fmt2 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT2"))) == NULL)
4531 fmt2 = "\t%o (%x)\n";
4533 list_containers = getenv(_LD("TRACE_LOADED_OBJECTS_ALL"));
4535 for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
4536 Needed_Entry *needed;
4537 const char *name, *path;
4542 if (list_containers && obj->needed != NULL)
4543 rtld_printf("%s:\n", obj->path);
4544 for (needed = obj->needed; needed; needed = needed->next) {
4545 if (needed->obj != NULL) {
4546 if (needed->obj->traced && !list_containers)
4548 needed->obj->traced = true;
4549 path = needed->obj->path;
4553 name = obj->strtab + needed->name;
4554 is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */
4556 fmt = is_lib ? fmt1 : fmt2;
4557 while ((c = *fmt++) != '\0') {
4583 rtld_putstr(main_local);
4586 rtld_putstr(obj_main->path);
4593 rtld_printf("%d", sodp->sod_major);
4596 rtld_printf("%d", sodp->sod_minor);
4603 rtld_printf("%p", needed->obj ? needed->obj->mapbase :
4616 * Unload a dlopened object and its dependencies from memory and from
4617 * our data structures. It is assumed that the DAG rooted in the
4618 * object has already been unreferenced, and that the object has a
4619 * reference count of 0.
4622 unload_object(Obj_Entry *root, RtldLockState *lockstate)
4624 Obj_Entry marker, *obj, *next;
4626 assert(root->refcount == 0);
4629 * Pass over the DAG removing unreferenced objects from
4630 * appropriate lists.
4632 unlink_object(root);
4634 /* Unmap all objects that are no longer referenced. */
4635 for (obj = TAILQ_FIRST(&obj_list); obj != NULL; obj = next) {
4636 next = TAILQ_NEXT(obj, next);
4637 if (obj->marker || obj->refcount != 0)
4639 LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase,
4640 obj->mapsize, 0, obj->path);
4641 dbg("unloading \"%s\"", obj->path);
4643 * Unlink the object now to prevent new references from
4644 * being acquired while the bind lock is dropped in
4645 * recursive dlclose() invocations.
4647 TAILQ_REMOVE(&obj_list, obj, next);
4650 if (obj->filtees_loaded) {
4652 init_marker(&marker);
4653 TAILQ_INSERT_BEFORE(next, &marker, next);
4654 unload_filtees(obj, lockstate);
4655 next = TAILQ_NEXT(&marker, next);
4656 TAILQ_REMOVE(&obj_list, &marker, next);
4658 unload_filtees(obj, lockstate);
4660 release_object(obj);
4665 unlink_object(Obj_Entry *root)
4669 if (root->refcount == 0) {
4670 /* Remove the object from the RTLD_GLOBAL list. */
4671 objlist_remove(&list_global, root);
4673 /* Remove the object from all objects' DAG lists. */
4674 STAILQ_FOREACH(elm, &root->dagmembers, link) {
4675 objlist_remove(&elm->obj->dldags, root);
4676 if (elm->obj != root)
4677 unlink_object(elm->obj);
4683 ref_dag(Obj_Entry *root)
4687 assert(root->dag_inited);
4688 STAILQ_FOREACH(elm, &root->dagmembers, link)
4689 elm->obj->refcount++;
4693 unref_dag(Obj_Entry *root)
4697 assert(root->dag_inited);
4698 STAILQ_FOREACH(elm, &root->dagmembers, link)
4699 elm->obj->refcount--;
4703 * Common code for MD __tls_get_addr().
4705 static void *tls_get_addr_slow(Elf_Addr **, int, size_t) __noinline;
4707 tls_get_addr_slow(Elf_Addr **dtvp, int index, size_t offset)
4709 Elf_Addr *newdtv, *dtv;
4710 RtldLockState lockstate;
4714 /* Check dtv generation in case new modules have arrived */
4715 if (dtv[0] != tls_dtv_generation) {
4716 wlock_acquire(rtld_bind_lock, &lockstate);
4717 newdtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
4719 if (to_copy > tls_max_index)
4720 to_copy = tls_max_index;
4721 memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr));
4722 newdtv[0] = tls_dtv_generation;
4723 newdtv[1] = tls_max_index;
4725 lock_release(rtld_bind_lock, &lockstate);
4726 dtv = *dtvp = newdtv;
4729 /* Dynamically allocate module TLS if necessary */
4730 if (dtv[index + 1] == 0) {
4731 /* Signal safe, wlock will block out signals. */
4732 wlock_acquire(rtld_bind_lock, &lockstate);
4733 if (!dtv[index + 1])
4734 dtv[index + 1] = (Elf_Addr)allocate_module_tls(index);
4735 lock_release(rtld_bind_lock, &lockstate);
4737 return ((void *)(dtv[index + 1] + offset));
4741 tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset)
4746 /* Check dtv generation in case new modules have arrived */
4747 if (__predict_true(dtv[0] == tls_dtv_generation &&
4748 dtv[index + 1] != 0))
4749 return ((void *)(dtv[index + 1] + offset));
4750 return (tls_get_addr_slow(dtvp, index, offset));
4753 #if defined(__aarch64__) || defined(__arm__) || defined(__mips__) || \
4754 defined(__powerpc__) || defined(__riscv)
4757 * Return pointer to allocated TLS block
4760 get_tls_block_ptr(void *tcb, size_t tcbsize)
4762 size_t extra_size, post_size, pre_size, tls_block_size;
4763 size_t tls_init_align;
4765 tls_init_align = MAX(obj_main->tlsalign, 1);
4767 /* Compute fragments sizes. */
4768 extra_size = tcbsize - TLS_TCB_SIZE;
4769 post_size = calculate_tls_post_size(tls_init_align);
4770 tls_block_size = tcbsize + post_size;
4771 pre_size = roundup2(tls_block_size, tls_init_align) - tls_block_size;
4773 return ((char *)tcb - pre_size - extra_size);
4777 * Allocate Static TLS using the Variant I method.
4779 * For details on the layout, see lib/libc/gen/tls.c.
4781 * NB: rtld's tls_static_space variable includes TLS_TCB_SIZE and post_size as
4782 * it is based on tls_last_offset, and TLS offsets here are really TCB
4783 * offsets, whereas libc's tls_static_space is just the executable's static
4787 allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign)
4791 Elf_Addr *dtv, **tcb;
4794 size_t extra_size, maxalign, post_size, pre_size, tls_block_size;
4795 size_t tls_init_align;
4797 if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE)
4800 assert(tcbsize >= TLS_TCB_SIZE);
4801 maxalign = MAX(tcbalign, tls_static_max_align);
4802 tls_init_align = MAX(obj_main->tlsalign, 1);
4804 /* Compute fragmets sizes. */
4805 extra_size = tcbsize - TLS_TCB_SIZE;
4806 post_size = calculate_tls_post_size(tls_init_align);
4807 tls_block_size = tcbsize + post_size;
4808 pre_size = roundup2(tls_block_size, tls_init_align) - tls_block_size;
4809 tls_block_size += pre_size + tls_static_space - TLS_TCB_SIZE - post_size;
4811 /* Allocate whole TLS block */
4812 tls_block = malloc_aligned(tls_block_size, maxalign);
4813 tcb = (Elf_Addr **)(tls_block + pre_size + extra_size);
4815 if (oldtcb != NULL) {
4816 memcpy(tls_block, get_tls_block_ptr(oldtcb, tcbsize),
4818 free_aligned(get_tls_block_ptr(oldtcb, tcbsize));
4820 /* Adjust the DTV. */
4822 for (i = 0; i < dtv[1]; i++) {
4823 if (dtv[i+2] >= (Elf_Addr)oldtcb &&
4824 dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) {
4825 dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tcb;
4829 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
4831 dtv[0] = tls_dtv_generation;
4832 dtv[1] = tls_max_index;
4834 for (obj = globallist_curr(objs); obj != NULL;
4835 obj = globallist_next(obj)) {
4836 if (obj->tlsoffset > 0) {
4837 addr = (Elf_Addr)tcb + obj->tlsoffset;
4838 if (obj->tlsinitsize > 0)
4839 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
4840 if (obj->tlssize > obj->tlsinitsize)
4841 memset((void*)(addr + obj->tlsinitsize), 0,
4842 obj->tlssize - obj->tlsinitsize);
4843 dtv[obj->tlsindex + 1] = addr;
4852 free_tls(void *tcb, size_t tcbsize, size_t tcbalign __unused)
4855 Elf_Addr tlsstart, tlsend;
4857 size_t dtvsize, i, tls_init_align;
4859 assert(tcbsize >= TLS_TCB_SIZE);
4860 tls_init_align = MAX(obj_main->tlsalign, 1);
4862 /* Compute fragments sizes. */
4863 post_size = calculate_tls_post_size(tls_init_align);
4865 tlsstart = (Elf_Addr)tcb + TLS_TCB_SIZE + post_size;
4866 tlsend = (Elf_Addr)tcb + tls_static_space;
4868 dtv = *(Elf_Addr **)tcb;
4870 for (i = 0; i < dtvsize; i++) {
4871 if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) {
4872 free((void*)dtv[i+2]);
4876 free_aligned(get_tls_block_ptr(tcb, tcbsize));
4881 #if defined(__i386__) || defined(__amd64__) || defined(__sparc64__)
4884 * Allocate Static TLS using the Variant II method.
4887 allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign)
4890 size_t size, ralign;
4892 Elf_Addr *dtv, *olddtv;
4893 Elf_Addr segbase, oldsegbase, addr;
4897 if (tls_static_max_align > ralign)
4898 ralign = tls_static_max_align;
4899 size = round(tls_static_space, ralign) + round(tcbsize, ralign);
4901 assert(tcbsize >= 2*sizeof(Elf_Addr));
4902 tls = malloc_aligned(size, ralign);
4903 dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
4905 segbase = (Elf_Addr)(tls + round(tls_static_space, ralign));
4906 ((Elf_Addr*)segbase)[0] = segbase;
4907 ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv;
4909 dtv[0] = tls_dtv_generation;
4910 dtv[1] = tls_max_index;
4914 * Copy the static TLS block over whole.
4916 oldsegbase = (Elf_Addr) oldtls;
4917 memcpy((void *)(segbase - tls_static_space),
4918 (const void *)(oldsegbase - tls_static_space),
4922 * If any dynamic TLS blocks have been created tls_get_addr(),
4925 olddtv = ((Elf_Addr**)oldsegbase)[1];
4926 for (i = 0; i < olddtv[1]; i++) {
4927 if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) {
4928 dtv[i+2] = olddtv[i+2];
4934 * We assume that this block was the one we created with
4935 * allocate_initial_tls().
4937 free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr));
4939 for (obj = objs; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
4940 if (obj->marker || obj->tlsoffset == 0)
4942 addr = segbase - obj->tlsoffset;
4943 memset((void*)(addr + obj->tlsinitsize),
4944 0, obj->tlssize - obj->tlsinitsize);
4946 memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
4947 obj->static_tls_copied = true;
4949 dtv[obj->tlsindex + 1] = addr;
4953 return (void*) segbase;
4957 free_tls(void *tls, size_t tcbsize __unused, size_t tcbalign)
4960 size_t size, ralign;
4962 Elf_Addr tlsstart, tlsend;
4965 * Figure out the size of the initial TLS block so that we can
4966 * find stuff which ___tls_get_addr() allocated dynamically.
4969 if (tls_static_max_align > ralign)
4970 ralign = tls_static_max_align;
4971 size = round(tls_static_space, ralign);
4973 dtv = ((Elf_Addr**)tls)[1];
4975 tlsend = (Elf_Addr) tls;
4976 tlsstart = tlsend - size;
4977 for (i = 0; i < dtvsize; i++) {
4978 if (dtv[i + 2] != 0 && (dtv[i + 2] < tlsstart || dtv[i + 2] > tlsend)) {
4979 free_aligned((void *)dtv[i + 2]);
4983 free_aligned((void *)tlsstart);
4990 * Allocate TLS block for module with given index.
4993 allocate_module_tls(int index)
4998 TAILQ_FOREACH(obj, &obj_list, next) {
5001 if (obj->tlsindex == index)
5005 _rtld_error("Can't find module with TLS index %d", index);
5009 p = malloc_aligned(obj->tlssize, obj->tlsalign);
5010 memcpy(p, obj->tlsinit, obj->tlsinitsize);
5011 memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
5017 allocate_tls_offset(Obj_Entry *obj)
5024 if (obj->tlssize == 0) {
5025 obj->tls_done = true;
5029 if (tls_last_offset == 0)
5030 off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign);
5032 off = calculate_tls_offset(tls_last_offset, tls_last_size,
5033 obj->tlssize, obj->tlsalign);
5036 * If we have already fixed the size of the static TLS block, we
5037 * must stay within that size. When allocating the static TLS, we
5038 * leave a small amount of space spare to be used for dynamically
5039 * loading modules which use static TLS.
5041 if (tls_static_space != 0) {
5042 if (calculate_tls_end(off, obj->tlssize) > tls_static_space)
5044 } else if (obj->tlsalign > tls_static_max_align) {
5045 tls_static_max_align = obj->tlsalign;
5048 tls_last_offset = obj->tlsoffset = off;
5049 tls_last_size = obj->tlssize;
5050 obj->tls_done = true;
5056 free_tls_offset(Obj_Entry *obj)
5060 * If we were the last thing to allocate out of the static TLS
5061 * block, we give our space back to the 'allocator'. This is a
5062 * simplistic workaround to allow libGL.so.1 to be loaded and
5063 * unloaded multiple times.
5065 if (calculate_tls_end(obj->tlsoffset, obj->tlssize)
5066 == calculate_tls_end(tls_last_offset, tls_last_size)) {
5067 tls_last_offset -= obj->tlssize;
5073 _rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign)
5076 RtldLockState lockstate;
5078 wlock_acquire(rtld_bind_lock, &lockstate);
5079 ret = allocate_tls(globallist_curr(TAILQ_FIRST(&obj_list)), oldtls,
5081 lock_release(rtld_bind_lock, &lockstate);
5086 _rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign)
5088 RtldLockState lockstate;
5090 wlock_acquire(rtld_bind_lock, &lockstate);
5091 free_tls(tcb, tcbsize, tcbalign);
5092 lock_release(rtld_bind_lock, &lockstate);
5096 object_add_name(Obj_Entry *obj, const char *name)
5102 entry = malloc(sizeof(Name_Entry) + len);
5104 if (entry != NULL) {
5105 strcpy(entry->name, name);
5106 STAILQ_INSERT_TAIL(&obj->names, entry, link);
5111 object_match_name(const Obj_Entry *obj, const char *name)
5115 STAILQ_FOREACH(entry, &obj->names, link) {
5116 if (strcmp(name, entry->name) == 0)
5123 locate_dependency(const Obj_Entry *obj, const char *name)
5125 const Objlist_Entry *entry;
5126 const Needed_Entry *needed;
5128 STAILQ_FOREACH(entry, &list_main, link) {
5129 if (object_match_name(entry->obj, name))
5133 for (needed = obj->needed; needed != NULL; needed = needed->next) {
5134 if (strcmp(obj->strtab + needed->name, name) == 0 ||
5135 (needed->obj != NULL && object_match_name(needed->obj, name))) {
5137 * If there is DT_NEEDED for the name we are looking for,
5138 * we are all set. Note that object might not be found if
5139 * dependency was not loaded yet, so the function can
5140 * return NULL here. This is expected and handled
5141 * properly by the caller.
5143 return (needed->obj);
5146 _rtld_error("%s: Unexpected inconsistency: dependency %s not found",
5152 check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj,
5153 const Elf_Vernaux *vna)
5155 const Elf_Verdef *vd;
5156 const char *vername;
5158 vername = refobj->strtab + vna->vna_name;
5159 vd = depobj->verdef;
5161 _rtld_error("%s: version %s required by %s not defined",
5162 depobj->path, vername, refobj->path);
5166 if (vd->vd_version != VER_DEF_CURRENT) {
5167 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
5168 depobj->path, vd->vd_version);
5171 if (vna->vna_hash == vd->vd_hash) {
5172 const Elf_Verdaux *aux = (const Elf_Verdaux *)
5173 ((const char *)vd + vd->vd_aux);
5174 if (strcmp(vername, depobj->strtab + aux->vda_name) == 0)
5177 if (vd->vd_next == 0)
5179 vd = (const Elf_Verdef *)((const char *)vd + vd->vd_next);
5181 if (vna->vna_flags & VER_FLG_WEAK)
5183 _rtld_error("%s: version %s required by %s not found",
5184 depobj->path, vername, refobj->path);
5189 rtld_verify_object_versions(Obj_Entry *obj)
5191 const Elf_Verneed *vn;
5192 const Elf_Verdef *vd;
5193 const Elf_Verdaux *vda;
5194 const Elf_Vernaux *vna;
5195 const Obj_Entry *depobj;
5196 int maxvernum, vernum;
5198 if (obj->ver_checked)
5200 obj->ver_checked = true;
5204 * Walk over defined and required version records and figure out
5205 * max index used by any of them. Do very basic sanity checking
5209 while (vn != NULL) {
5210 if (vn->vn_version != VER_NEED_CURRENT) {
5211 _rtld_error("%s: Unsupported version %d of Elf_Verneed entry",
5212 obj->path, vn->vn_version);
5215 vna = (const Elf_Vernaux *)((const char *)vn + vn->vn_aux);
5217 vernum = VER_NEED_IDX(vna->vna_other);
5218 if (vernum > maxvernum)
5220 if (vna->vna_next == 0)
5222 vna = (const Elf_Vernaux *)((const char *)vna + vna->vna_next);
5224 if (vn->vn_next == 0)
5226 vn = (const Elf_Verneed *)((const char *)vn + vn->vn_next);
5230 while (vd != NULL) {
5231 if (vd->vd_version != VER_DEF_CURRENT) {
5232 _rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
5233 obj->path, vd->vd_version);
5236 vernum = VER_DEF_IDX(vd->vd_ndx);
5237 if (vernum > maxvernum)
5239 if (vd->vd_next == 0)
5241 vd = (const Elf_Verdef *)((const char *)vd + vd->vd_next);
5248 * Store version information in array indexable by version index.
5249 * Verify that object version requirements are satisfied along the
5252 obj->vernum = maxvernum + 1;
5253 obj->vertab = xcalloc(obj->vernum, sizeof(Ver_Entry));
5256 while (vd != NULL) {
5257 if ((vd->vd_flags & VER_FLG_BASE) == 0) {
5258 vernum = VER_DEF_IDX(vd->vd_ndx);
5259 assert(vernum <= maxvernum);
5260 vda = (const Elf_Verdaux *)((const char *)vd + vd->vd_aux);
5261 obj->vertab[vernum].hash = vd->vd_hash;
5262 obj->vertab[vernum].name = obj->strtab + vda->vda_name;
5263 obj->vertab[vernum].file = NULL;
5264 obj->vertab[vernum].flags = 0;
5266 if (vd->vd_next == 0)
5268 vd = (const Elf_Verdef *)((const char *)vd + vd->vd_next);
5272 while (vn != NULL) {
5273 depobj = locate_dependency(obj, obj->strtab + vn->vn_file);
5276 vna = (const Elf_Vernaux *)((const char *)vn + vn->vn_aux);
5278 if (check_object_provided_version(obj, depobj, vna))
5280 vernum = VER_NEED_IDX(vna->vna_other);
5281 assert(vernum <= maxvernum);
5282 obj->vertab[vernum].hash = vna->vna_hash;
5283 obj->vertab[vernum].name = obj->strtab + vna->vna_name;
5284 obj->vertab[vernum].file = obj->strtab + vn->vn_file;
5285 obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ?
5286 VER_INFO_HIDDEN : 0;
5287 if (vna->vna_next == 0)
5289 vna = (const Elf_Vernaux *)((const char *)vna + vna->vna_next);
5291 if (vn->vn_next == 0)
5293 vn = (const Elf_Verneed *)((const char *)vn + vn->vn_next);
5299 rtld_verify_versions(const Objlist *objlist)
5301 Objlist_Entry *entry;
5305 STAILQ_FOREACH(entry, objlist, link) {
5307 * Skip dummy objects or objects that have their version requirements
5310 if (entry->obj->strtab == NULL || entry->obj->vertab != NULL)
5312 if (rtld_verify_object_versions(entry->obj) == -1) {
5314 if (ld_tracing == NULL)
5318 if (rc == 0 || ld_tracing != NULL)
5319 rc = rtld_verify_object_versions(&obj_rtld);
5324 fetch_ventry(const Obj_Entry *obj, unsigned long symnum)
5329 vernum = VER_NDX(obj->versyms[symnum]);
5330 if (vernum >= obj->vernum) {
5331 _rtld_error("%s: symbol %s has wrong verneed value %d",
5332 obj->path, obj->strtab + symnum, vernum);
5333 } else if (obj->vertab[vernum].hash != 0) {
5334 return &obj->vertab[vernum];
5341 _rtld_get_stack_prot(void)
5344 return (stack_prot);
5348 _rtld_is_dlopened(void *arg)
5351 RtldLockState lockstate;
5354 rlock_acquire(rtld_bind_lock, &lockstate);
5357 obj = obj_from_addr(arg);
5359 _rtld_error("No shared object contains address");
5360 lock_release(rtld_bind_lock, &lockstate);
5363 res = obj->dlopened ? 1 : 0;
5364 lock_release(rtld_bind_lock, &lockstate);
5369 obj_remap_relro(Obj_Entry *obj, int prot)
5372 if (obj->relro_size > 0 && mprotect(obj->relro_page, obj->relro_size,
5374 _rtld_error("%s: Cannot set relro protection to %#x: %s",
5375 obj->path, prot, rtld_strerror(errno));
5382 obj_disable_relro(Obj_Entry *obj)
5385 return (obj_remap_relro(obj, PROT_READ | PROT_WRITE));
5389 obj_enforce_relro(Obj_Entry *obj)
5392 return (obj_remap_relro(obj, PROT_READ));
5396 map_stacks_exec(RtldLockState *lockstate)
5398 void (*thr_map_stacks_exec)(void);
5400 if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0)
5402 thr_map_stacks_exec = (void (*)(void))(uintptr_t)
5403 get_program_var_addr("__pthread_map_stacks_exec", lockstate);
5404 if (thr_map_stacks_exec != NULL) {
5405 stack_prot |= PROT_EXEC;
5406 thr_map_stacks_exec();
5411 distribute_static_tls(Objlist *list, RtldLockState *lockstate)
5415 void (*distrib)(size_t, void *, size_t, size_t);
5417 distrib = (void (*)(size_t, void *, size_t, size_t))(uintptr_t)
5418 get_program_var_addr("__pthread_distribute_static_tls", lockstate);
5419 if (distrib == NULL)
5421 STAILQ_FOREACH(elm, list, link) {
5423 if (obj->marker || !obj->tls_done || obj->static_tls_copied)
5425 distrib(obj->tlsoffset, obj->tlsinit, obj->tlsinitsize,
5427 obj->static_tls_copied = true;
5432 symlook_init(SymLook *dst, const char *name)
5435 bzero(dst, sizeof(*dst));
5437 dst->hash = elf_hash(name);
5438 dst->hash_gnu = gnu_hash(name);
5442 symlook_init_from_req(SymLook *dst, const SymLook *src)
5445 dst->name = src->name;
5446 dst->hash = src->hash;
5447 dst->hash_gnu = src->hash_gnu;
5448 dst->ventry = src->ventry;
5449 dst->flags = src->flags;
5450 dst->defobj_out = NULL;
5451 dst->sym_out = NULL;
5452 dst->lockstate = src->lockstate;
5456 open_binary_fd(const char *argv0, bool search_in_path)
5458 char *pathenv, *pe, binpath[PATH_MAX];
5461 if (search_in_path && strchr(argv0, '/') == NULL) {
5462 pathenv = getenv("PATH");
5463 if (pathenv == NULL) {
5464 _rtld_error("-p and no PATH environment variable");
5467 pathenv = strdup(pathenv);
5468 if (pathenv == NULL) {
5469 _rtld_error("Cannot allocate memory");
5474 while ((pe = strsep(&pathenv, ":")) != NULL) {
5475 if (strlcpy(binpath, pe, sizeof(binpath)) >=
5478 if (binpath[0] != '\0' &&
5479 strlcat(binpath, "/", sizeof(binpath)) >=
5482 if (strlcat(binpath, argv0, sizeof(binpath)) >=
5485 fd = open(binpath, O_RDONLY | O_CLOEXEC | O_VERIFY);
5486 if (fd != -1 || errno != ENOENT)
5491 fd = open(argv0, O_RDONLY | O_CLOEXEC | O_VERIFY);
5495 _rtld_error("Cannot open %s: %s", argv0, rtld_strerror(errno));
5502 * Parse a set of command-line arguments.
5505 parse_args(char* argv[], int argc, bool *use_pathp, int *fdp)
5508 int fd, i, j, arglen;
5511 dbg("Parsing command-line arguments");
5515 for (i = 1; i < argc; i++ ) {
5517 dbg("argv[%d]: '%s'", i, arg);
5520 * rtld arguments end with an explicit "--" or with the first
5521 * non-prefixed argument.
5523 if (strcmp(arg, "--") == 0) {
5531 * All other arguments are single-character options that can
5532 * be combined, so we need to search through `arg` for them.
5534 arglen = strlen(arg);
5535 for (j = 1; j < arglen; j++) {
5538 print_usage(argv[0]);
5540 } else if (opt == 'f') {
5542 * -f XX can be used to specify a descriptor for the
5543 * binary named at the command line (i.e., the later
5544 * argument will specify the process name but the
5545 * descriptor is what will actually be executed)
5547 if (j != arglen - 1) {
5548 /* -f must be the last option in, e.g., -abcf */
5549 _rtld_error("Invalid options: %s", arg);
5553 fd = parse_integer(argv[i]);
5555 _rtld_error("Invalid file descriptor: '%s'",
5561 } else if (opt == 'p') {
5564 _rtld_error("Invalid argument: '%s'", arg);
5565 print_usage(argv[0]);
5575 * Parse a file descriptor number without pulling in more of libc (e.g. atoi).
5578 parse_integer(const char *str)
5580 static const int RADIX = 10; /* XXXJA: possibly support hex? */
5587 for (c = *str; c != '\0'; c = *++str) {
5588 if (c < '0' || c > '9')
5595 /* Make sure we actually parsed something. */
5602 print_usage(const char *argv0)
5605 rtld_printf("Usage: %s [-h] [-f <FD>] [--] <binary> [<args>]\n"
5608 " -h Display this help message\n"
5609 " -p Search in PATH for named binary\n"
5610 " -f <FD> Execute <FD> instead of searching for <binary>\n"
5611 " -- End of RTLD options\n"
5612 " <binary> Name of process to execute\n"
5613 " <args> Arguments to the executed process\n", argv0);
5617 * Overrides for libc_pic-provided functions.
5621 __getosreldate(void)
5631 oid[1] = KERN_OSRELDATE;
5633 len = sizeof(osrel);
5634 error = sysctl(oid, 2, &osrel, &len, NULL, 0);
5635 if (error == 0 && osrel > 0 && len == sizeof(osrel))
5647 void (*__cleanup)(void);
5648 int __isthreaded = 0;
5649 int _thread_autoinit_dummy_decl = 1;
5652 * No unresolved symbols for rtld.
5655 __pthread_cxa_finalize(struct dl_phdr_info *a __unused)
5660 rtld_strerror(int errnum)
5663 if (errnum < 0 || errnum >= sys_nerr)
5664 return ("Unknown error");
5665 return (sys_errlist[errnum]);
5669 * No ifunc relocations.
5672 memset(void *dest, int c, size_t len)
5676 for (i = 0; i < len; i++)
5677 ((char *)dest)[i] = c;
5682 bzero(void *dest, size_t len)
5686 for (i = 0; i < len; i++)
5687 ((char *)dest)[i] = 0;
5692 malloc(size_t nbytes)
5695 return (__crt_malloc(nbytes));
5699 calloc(size_t num, size_t size)
5702 return (__crt_calloc(num, size));
5713 realloc(void *cp, size_t nbytes)
5716 return (__crt_realloc(cp, nbytes));