2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3 * Copyright (c) 1998 Peter Wemm <peter@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
33 #include <sys/linker.h>
34 #include <sys/module.h>
35 #include <sys/stdint.h>
37 #include <machine/elf.h>
42 #include "bootstrap.h"
44 #define COPYOUT(s,d,l) archsw.arch_copyout((vm_offset_t)(s), d, l)
46 #if defined(__i386__) && __ELF_WORD_SIZE == 64
49 #define ELF_TARG_CLASS ELFCLASS64
50 #define ELF_TARG_MACH EM_X86_64
53 typedef struct elf_file {
75 static int __elfN(loadimage)(struct preloaded_file *mp, elf_file_t ef, u_int64_t loadaddr);
76 static int __elfN(lookup_symbol)(struct preloaded_file *mp, elf_file_t ef, const char* name, Elf_Sym* sym);
77 static int __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
78 Elf_Addr p, void *val, size_t len);
79 static int __elfN(parse_modmetadata)(struct preloaded_file *mp, elf_file_t ef,
80 Elf_Addr p_start, Elf_Addr p_end);
81 static symaddr_fn __elfN(symaddr);
82 static char *fake_modname(const char *name);
84 const char *__elfN(kerneltype) = "elf kernel";
85 const char *__elfN(moduletype) = "elf module";
87 u_int64_t __elfN(relocation_offset) = 0;
90 __elfN(load_elf_header)(char *filename, elf_file_t ef)
97 * Open the image, read and validate the ELF header
99 if (filename == NULL) /* can't handle nameless */
101 if ((ef->fd = open(filename, O_RDONLY)) == -1)
103 ef->firstpage = malloc(PAGE_SIZE);
104 if (ef->firstpage == NULL) {
108 bytes_read = read(ef->fd, ef->firstpage, PAGE_SIZE);
109 ef->firstlen = (size_t)bytes_read;
110 if (bytes_read < 0 || ef->firstlen <= sizeof(Elf_Ehdr)) {
111 err = EFTYPE; /* could be EIO, but may be small file */
114 ehdr = ef->ehdr = (Elf_Ehdr *)ef->firstpage;
117 if (!IS_ELF(*ehdr)) {
121 if (ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || /* Layout ? */
122 ehdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
123 ehdr->e_ident[EI_VERSION] != EV_CURRENT || /* Version ? */
124 ehdr->e_version != EV_CURRENT ||
125 ehdr->e_machine != ELF_TARG_MACH) { /* Machine ? */
133 if (ef->firstpage != NULL) {
135 ef->firstpage = NULL;
145 * Attempt to load the file (file) as an ELF module. It will be stored at
146 * (dest), and a pointer to a module structure describing the loaded object
147 * will be saved in (result).
150 __elfN(loadfile)(char *filename, u_int64_t dest, struct preloaded_file **result)
152 return (__elfN(loadfile_raw)(filename, dest, result, 0));
156 __elfN(loadfile_raw)(char *filename, u_int64_t dest,
157 struct preloaded_file **result, int multiboot)
159 struct preloaded_file *fp, *kfp;
165 bzero(&ef, sizeof(struct elf_file));
168 err = __elfN(load_elf_header)(filename, &ef);
175 * Check to see what sort of module we are.
177 kfp = file_findfile(NULL, __elfN(kerneltype));
180 * Kernels can be ET_DYN, so just assume the first loaded object is the
181 * kernel. This assumption will be checked later.
186 if (ef.kernel || ehdr->e_type == ET_EXEC) {
187 /* Looks like a kernel */
189 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: kernel already loaded\n");
194 * Calculate destination address based on kernel entrypoint.
196 * For ARM, the destination address is independent of any values in the
197 * elf header (an ARM kernel can be loaded at any 2MB boundary), so we
198 * leave dest set to the value calculated by archsw.arch_loadaddr() and
199 * passed in to this function.
202 if (ehdr->e_type == ET_EXEC)
203 dest = (ehdr->e_entry & ~PAGE_MASK);
205 if ((ehdr->e_entry & ~PAGE_MASK) == 0) {
206 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: not a kernel (maybe static binary?)\n");
212 } else if (ehdr->e_type == ET_DYN) {
213 /* Looks like a kld module */
214 if (multiboot != 0) {
215 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module as multiboot\n");
220 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module before kernel\n");
224 if (strcmp(__elfN(kerneltype), kfp->f_type)) {
225 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: can't load module with kernel type '%s'\n", kfp->f_type);
229 /* Looks OK, got ahead */
237 if (archsw.arch_loadaddr != NULL)
238 dest = archsw.arch_loadaddr(LOAD_ELF, ehdr, dest);
240 dest = roundup(dest, PAGE_SIZE);
243 * Ok, we think we should handle this.
247 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadfile: cannot allocate module info\n");
251 if (ef.kernel == 1 && multiboot == 0)
252 setenv("kernelname", filename, 1);
253 fp->f_name = strdup(filename);
255 fp->f_type = strdup(ef.kernel ?
256 __elfN(kerneltype) : __elfN(moduletype));
258 fp->f_type = strdup("elf multiboot kernel");
262 printf("%s entry at 0x%jx\n", filename, (uintmax_t)ehdr->e_entry);
264 printf("%s ", filename);
267 fp->f_size = __elfN(loadimage)(fp, &ef, dest);
268 if (fp->f_size == 0 || fp->f_addr == 0)
271 /* save exec header as metadata */
272 file_addmetadata(fp, MODINFOMD_ELFHDR, sizeof(*ehdr), ehdr);
274 /* Load OK, return module pointer */
275 *result = (struct preloaded_file *)fp;
292 * With the file (fd) open on the image, and (ehdr) containing
293 * the Elf header, load the image at (off)
296 __elfN(loadimage)(struct preloaded_file *fp, elf_file_t ef, u_int64_t off)
301 Elf_Phdr *phdr, *php;
305 vm_offset_t firstaddr;
306 vm_offset_t lastaddr;
319 Elf_Addr p_start, p_end;
324 firstaddr = lastaddr = 0;
326 if (ehdr->e_type == ET_EXEC) {
327 #if defined(__i386__) || defined(__amd64__)
328 #if __ELF_WORD_SIZE == 64
329 off = - (off & 0xffffffffff000000ull);/* x86_64 relocates after locore */
331 off = - (off & 0xff000000u); /* i386 relocates after locore */
333 #elif defined(__powerpc__)
335 * On the purely virtual memory machines like e500, the kernel is
336 * linked against its final VA range, which is most often not
337 * available at the loader stage, but only after kernel initializes
338 * and completes its VM settings. In such cases we cannot use p_vaddr
339 * field directly to load ELF segments, but put them at some
340 * 'load-time' locations.
342 if (off & 0xf0000000u) {
343 off = -(off & 0xf0000000u);
345 * XXX the physical load address should not be hardcoded. Note
346 * that the Book-E kernel assumes that it's loaded at a 16MB
347 * boundary for now...
350 ehdr->e_entry += off;
352 printf("Converted entry 0x%08x\n", ehdr->e_entry);
356 #elif defined(__arm__)
358 * The elf headers in arm kernels specify virtual addresses in all
359 * header fields, even the ones that should be physical addresses.
360 * We assume the entry point is in the first page, and masking the page
361 * offset will leave us with the virtual address the kernel was linked
362 * at. We subtract that from the load offset, making 'off' into the
363 * value which, when added to a virtual address in an elf header,
364 * translates it to a physical address. We do the va->pa conversion on
365 * the entry point address in the header now, so that later we can
366 * launch the kernel by just jumping to that address.
368 off -= ehdr->e_entry & ~PAGE_MASK;
369 ehdr->e_entry += off;
371 printf("ehdr->e_entry 0x%08x, va<->pa off %llx\n", ehdr->e_entry, off);
374 off = 0; /* other archs use direct mapped kernels */
380 __elfN(relocation_offset) = off;
382 if ((ehdr->e_phoff + ehdr->e_phnum * sizeof(*phdr)) > ef->firstlen) {
383 printf("elf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: program header not within first page\n");
386 phdr = (Elf_Phdr *)(ef->firstpage + ehdr->e_phoff);
388 for (i = 0; i < ehdr->e_phnum; i++) {
389 /* We want to load PT_LOAD segments only.. */
390 if (phdr[i].p_type != PT_LOAD)
394 printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx",
395 (long)phdr[i].p_filesz, (long)phdr[i].p_offset,
396 (long)(phdr[i].p_vaddr + off),
397 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1));
399 if ((phdr[i].p_flags & PF_W) == 0) {
400 printf("text=0x%lx ", (long)phdr[i].p_filesz);
402 printf("data=0x%lx", (long)phdr[i].p_filesz);
403 if (phdr[i].p_filesz < phdr[i].p_memsz)
404 printf("+0x%lx", (long)(phdr[i].p_memsz -phdr[i].p_filesz));
409 if (ef->firstlen > phdr[i].p_offset) {
410 fpcopy = ef->firstlen - phdr[i].p_offset;
411 archsw.arch_copyin(ef->firstpage + phdr[i].p_offset,
412 phdr[i].p_vaddr + off, fpcopy);
414 if (phdr[i].p_filesz > fpcopy) {
415 if (kern_pread(ef->fd, phdr[i].p_vaddr + off + fpcopy,
416 phdr[i].p_filesz - fpcopy, phdr[i].p_offset + fpcopy) != 0) {
417 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
418 "_loadimage: read failed\n");
422 /* clear space from oversized segments; eg: bss */
423 if (phdr[i].p_filesz < phdr[i].p_memsz) {
425 printf(" (bss: 0x%lx-0x%lx)",
426 (long)(phdr[i].p_vaddr + off + phdr[i].p_filesz),
427 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1));
430 kern_bzero(phdr[i].p_vaddr + off + phdr[i].p_filesz,
431 phdr[i].p_memsz - phdr[i].p_filesz);
437 if (archsw.arch_loadseg != NULL)
438 archsw.arch_loadseg(ehdr, phdr + i, off);
440 if (firstaddr == 0 || firstaddr > (phdr[i].p_vaddr + off))
441 firstaddr = phdr[i].p_vaddr + off;
442 if (lastaddr == 0 || lastaddr < (phdr[i].p_vaddr + off + phdr[i].p_memsz))
443 lastaddr = phdr[i].p_vaddr + off + phdr[i].p_memsz;
445 lastaddr = roundup(lastaddr, sizeof(long));
448 * Get the section headers. We need this for finding the .ctors
449 * section as well as for loading any symbols. Both may be hard
450 * to do if reading from a .gz file as it involves seeking. I
451 * think the rule is going to have to be that you must strip a
452 * file to remove symbols before gzipping it.
454 chunk = ehdr->e_shnum * ehdr->e_shentsize;
455 if (chunk == 0 || ehdr->e_shoff == 0)
457 shdr = alloc_pread(ef->fd, ehdr->e_shoff, chunk);
459 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
460 "_loadimage: failed to read section headers");
463 file_addmetadata(fp, MODINFOMD_SHDR, chunk, shdr);
466 * Read the section string table and look for the .ctors section.
467 * We need to tell the kernel where it is so that it can call the
470 chunk = shdr[ehdr->e_shstrndx].sh_size;
472 shstr = alloc_pread(ef->fd, shdr[ehdr->e_shstrndx].sh_offset, chunk);
474 for (i = 0; i < ehdr->e_shnum; i++) {
475 if (strcmp(shstr + shdr[i].sh_name, ".ctors") != 0)
477 ctors = shdr[i].sh_addr;
478 file_addmetadata(fp, MODINFOMD_CTORS_ADDR, sizeof(ctors),
480 size = shdr[i].sh_size;
481 file_addmetadata(fp, MODINFOMD_CTORS_SIZE, sizeof(size),
490 * Now load any symbols.
494 for (i = 0; i < ehdr->e_shnum; i++) {
495 if (shdr[i].sh_type != SHT_SYMTAB)
497 for (j = 0; j < ehdr->e_phnum; j++) {
498 if (phdr[j].p_type != PT_LOAD)
500 if (shdr[i].sh_offset >= phdr[j].p_offset &&
501 (shdr[i].sh_offset + shdr[i].sh_size <=
502 phdr[j].p_offset + phdr[j].p_filesz)) {
503 shdr[i].sh_offset = 0;
508 if (shdr[i].sh_offset == 0 || shdr[i].sh_size == 0)
509 continue; /* alread loaded in a PT_LOAD above */
510 /* Save it for loading below */
512 symstrindex = shdr[i].sh_link;
514 if (symtabindex < 0 || symstrindex < 0)
517 /* Ok, committed to a load. */
522 for (i = symtabindex; i >= 0; i = symstrindex) {
526 switch(shdr[i].sh_type) {
527 case SHT_SYMTAB: /* Symbol table */
530 case SHT_STRTAB: /* String table */
539 size = shdr[i].sh_size;
540 archsw.arch_copyin(&size, lastaddr, sizeof(size));
541 lastaddr += sizeof(size);
544 printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname,
545 (uintmax_t)shdr[i].sh_size, (uintmax_t)shdr[i].sh_offset,
546 (uintmax_t)lastaddr, (uintmax_t)(lastaddr + shdr[i].sh_size));
548 if (i == symstrindex)
550 printf("0x%lx+0x%lx", (long)sizeof(size), (long)size);
553 if (lseek(ef->fd, (off_t)shdr[i].sh_offset, SEEK_SET) == -1) {
554 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: could not seek for symbols - skipped!");
559 result = archsw.arch_readin(ef->fd, lastaddr, shdr[i].sh_size);
560 if (result < 0 || (size_t)result != shdr[i].sh_size) {
561 printf("\nelf" __XSTRING(__ELF_WORD_SIZE) "_loadimage: could not read symbols - skipped! (%ju != %ju)", (uintmax_t)result,
562 (uintmax_t)shdr[i].sh_size);
567 /* Reset offsets relative to ssym */
568 lastaddr += shdr[i].sh_size;
569 lastaddr = roundup(lastaddr, sizeof(size));
570 if (i == symtabindex)
572 else if (i == symstrindex)
580 file_addmetadata(fp, MODINFOMD_SSYM, sizeof(ssym), &ssym);
581 file_addmetadata(fp, MODINFOMD_ESYM, sizeof(esym), &esym);
586 ret = lastaddr - firstaddr;
587 fp->f_addr = firstaddr;
590 for (i = 0; i < ehdr->e_phnum; i++) {
591 if (phdr[i].p_type == PT_DYNAMIC) {
594 file_addmetadata(fp, MODINFOMD_DYNAMIC, sizeof(adp), &adp);
599 if (php == NULL) /* this is bad, we cannot get to symbols or _DYNAMIC */
602 ndp = php->p_filesz / sizeof(Elf_Dyn);
605 dp = malloc(php->p_filesz);
608 archsw.arch_copyout(php->p_vaddr + off, dp, php->p_filesz);
611 for (i = 0; i < ndp; i++) {
612 if (dp[i].d_tag == 0)
614 switch (dp[i].d_tag) {
616 ef->hashtab = (Elf_Hashelt*)(uintptr_t)(dp[i].d_un.d_ptr + off);
619 ef->strtab = (char *)(uintptr_t)(dp[i].d_un.d_ptr + off);
622 ef->strsz = dp[i].d_un.d_val;
625 ef->symtab = (Elf_Sym*)(uintptr_t)(dp[i].d_un.d_ptr + off);
628 ef->rel = (Elf_Rel *)(uintptr_t)(dp[i].d_un.d_ptr + off);
631 ef->relsz = dp[i].d_un.d_val;
634 ef->rela = (Elf_Rela *)(uintptr_t)(dp[i].d_un.d_ptr + off);
637 ef->relasz = dp[i].d_un.d_val;
643 if (ef->hashtab == NULL || ef->symtab == NULL ||
644 ef->strtab == NULL || ef->strsz == 0)
646 COPYOUT(ef->hashtab, &ef->nbuckets, sizeof(ef->nbuckets));
647 COPYOUT(ef->hashtab + 1, &ef->nchains, sizeof(ef->nchains));
648 ef->buckets = ef->hashtab + 2;
649 ef->chains = ef->buckets + ef->nbuckets;
651 if (__elfN(lookup_symbol)(fp, ef, "__start_set_modmetadata_set", &sym) != 0)
653 p_start = sym.st_value + ef->off;
654 if (__elfN(lookup_symbol)(fp, ef, "__stop_set_modmetadata_set", &sym) != 0)
656 p_end = sym.st_value + ef->off;
658 if (__elfN(parse_modmetadata)(fp, ef, p_start, p_end) == 0)
661 if (ef->kernel) /* kernel must not depend on anything */
672 static char invalid_name[] = "bad";
675 fake_modname(const char *name)
681 sp = strrchr(name, '/');
686 ep = strrchr(name, '.');
690 ep = invalid_name + sizeof(invalid_name) - 1;
693 ep = name + strlen(name);
695 fp = malloc(len + 1);
703 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
704 struct mod_metadata64 {
705 int md_version; /* structure version MDTV_* */
706 int md_type; /* type of entry MDT_* */
707 u_int64_t md_data; /* specific data */
708 u_int64_t md_cval; /* common string label */
711 #if defined(__amd64__) && __ELF_WORD_SIZE == 32
712 struct mod_metadata32 {
713 int md_version; /* structure version MDTV_* */
714 int md_type; /* type of entry MDT_* */
715 u_int32_t md_data; /* specific data */
716 u_int32_t md_cval; /* common string label */
721 __elfN(load_modmetadata)(struct preloaded_file *fp, u_int64_t dest)
725 Elf_Shdr *sh_meta, *shdr = NULL;
726 Elf_Shdr *sh_data[2];
727 char *shstrtab = NULL;
729 Elf_Addr p_start, p_end;
731 bzero(&ef, sizeof(struct elf_file));
734 err = __elfN(load_elf_header)(fp->f_name, &ef);
738 if (ef.kernel == 1 || ef.ehdr->e_type == ET_EXEC) {
740 } else if (ef.ehdr->e_type != ET_DYN) {
745 size = ef.ehdr->e_shnum * ef.ehdr->e_shentsize;
746 shdr = alloc_pread(ef.fd, ef.ehdr->e_shoff, size);
753 shstrtab = alloc_pread(ef.fd, shdr[ef.ehdr->e_shstrndx].sh_offset,
754 shdr[ef.ehdr->e_shstrndx].sh_size);
755 if (shstrtab == NULL) {
756 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
757 "load_modmetadata: unable to load shstrtab\n");
762 /* Find set_modmetadata_set and data sections. */
763 sh_data[0] = sh_data[1] = sh_meta = NULL;
764 for (i = 0, j = 0; i < ef.ehdr->e_shnum; i++) {
765 if (strcmp(&shstrtab[shdr[i].sh_name],
766 "set_modmetadata_set") == 0) {
769 if ((strcmp(&shstrtab[shdr[i].sh_name], ".data") == 0) ||
770 (strcmp(&shstrtab[shdr[i].sh_name], ".rodata") == 0)) {
771 sh_data[j++] = &shdr[i];
774 if (sh_meta == NULL || sh_data[0] == NULL || sh_data[1] == NULL) {
775 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
776 "load_modmetadata: unable to find set_modmetadata_set or data sections\n");
781 /* Load set_modmetadata_set into memory */
782 err = kern_pread(ef.fd, dest, sh_meta->sh_size, sh_meta->sh_offset);
784 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
785 "load_modmetadata: unable to load set_modmetadata_set: %d\n", err);
789 p_end = dest + sh_meta->sh_size;
790 dest += sh_meta->sh_size;
792 /* Load data sections into memory. */
793 err = kern_pread(ef.fd, dest, sh_data[0]->sh_size,
794 sh_data[0]->sh_offset);
796 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
797 "load_modmetadata: unable to load data: %d\n", err);
802 * We have to increment the dest, so that the offset is the same into
803 * both the .rodata and .data sections.
805 ef.off = -(sh_data[0]->sh_addr - dest);
806 dest += (sh_data[1]->sh_addr - sh_data[0]->sh_addr);
808 err = kern_pread(ef.fd, dest, sh_data[1]->sh_size,
809 sh_data[1]->sh_offset);
811 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
812 "load_modmetadata: unable to load data: %d\n", err);
816 err = __elfN(parse_modmetadata)(fp, &ef, p_start, p_end);
818 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
819 "load_modmetadata: unable to parse metadata: %d\n", err);
824 if (shstrtab != NULL)
828 if (ef.firstpage != NULL)
836 __elfN(parse_modmetadata)(struct preloaded_file *fp, elf_file_t ef,
837 Elf_Addr p_start, Elf_Addr p_end)
839 struct mod_metadata md;
840 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
841 struct mod_metadata64 md64;
842 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
843 struct mod_metadata32 md32;
845 struct mod_depend *mdepend;
846 struct mod_version mver;
848 int error, modcnt, minfolen;
854 COPYOUT(p, &v, sizeof(v));
855 error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
856 if (error == EOPNOTSUPP)
860 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
861 COPYOUT(v, &md64, sizeof(md64));
862 error = __elfN(reloc_ptr)(fp, ef, v, &md64, sizeof(md64));
863 if (error == EOPNOTSUPP) {
864 md64.md_cval += ef->off;
865 md64.md_data += ef->off;
866 } else if (error != 0)
868 md.md_version = md64.md_version;
869 md.md_type = md64.md_type;
870 md.md_cval = (const char *)(uintptr_t)md64.md_cval;
871 md.md_data = (void *)(uintptr_t)md64.md_data;
872 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
873 COPYOUT(v, &md32, sizeof(md32));
874 error = __elfN(reloc_ptr)(fp, ef, v, &md32, sizeof(md32));
875 if (error == EOPNOTSUPP) {
876 md32.md_cval += ef->off;
877 md32.md_data += ef->off;
878 } else if (error != 0)
880 md.md_version = md32.md_version;
881 md.md_type = md32.md_type;
882 md.md_cval = (const char *)(uintptr_t)md32.md_cval;
883 md.md_data = (void *)(uintptr_t)md32.md_data;
885 COPYOUT(v, &md, sizeof(md));
886 error = __elfN(reloc_ptr)(fp, ef, v, &md, sizeof(md));
887 if (error == EOPNOTSUPP) {
888 md.md_cval += ef->off;
889 md.md_data += ef->off;
890 } else if (error != 0)
893 p += sizeof(Elf_Addr);
896 if (ef->kernel) /* kernel must not depend on anything */
898 s = strdupout((vm_offset_t)md.md_cval);
899 minfolen = sizeof(*mdepend) + strlen(s) + 1;
900 mdepend = malloc(minfolen);
903 COPYOUT((vm_offset_t)md.md_data, mdepend, sizeof(*mdepend));
904 strcpy((char*)(mdepend + 1), s);
906 file_addmetadata(fp, MODINFOMD_DEPLIST, minfolen, mdepend);
910 s = strdupout((vm_offset_t)md.md_cval);
911 COPYOUT((vm_offset_t)md.md_data, &mver, sizeof(mver));
912 file_addmodule(fp, s, mver.mv_version, NULL);
919 s = fake_modname(fp->f_name);
920 file_addmodule(fp, s, 1, NULL);
927 elf_hash(const char *name)
929 const unsigned char *p = (const unsigned char *) name;
935 if ((g = h & 0xf0000000) != 0)
942 static const char __elfN(bad_symtable)[] = "elf" __XSTRING(__ELF_WORD_SIZE) "_lookup_symbol: corrupt symbol table\n";
944 __elfN(lookup_symbol)(struct preloaded_file *fp, elf_file_t ef, const char* name,
952 hash = elf_hash(name);
953 COPYOUT(&ef->buckets[hash % ef->nbuckets], &symnum, sizeof(symnum));
955 while (symnum != STN_UNDEF) {
956 if (symnum >= ef->nchains) {
957 printf(__elfN(bad_symtable));
961 COPYOUT(ef->symtab + symnum, &sym, sizeof(sym));
962 if (sym.st_name == 0) {
963 printf(__elfN(bad_symtable));
967 strp = strdupout((vm_offset_t)(ef->strtab + sym.st_name));
968 if (strcmp(name, strp) == 0) {
970 if (sym.st_shndx != SHN_UNDEF ||
971 (sym.st_value != 0 &&
972 ELF_ST_TYPE(sym.st_info) == STT_FUNC)) {
979 COPYOUT(&ef->chains[symnum], &symnum, sizeof(symnum));
985 * Apply any intra-module relocations to the value. p is the load address
986 * of the value and val/len is the value to be modified. This does NOT modify
987 * the image in-place, because this is done by kern_linker later on.
989 * Returns EOPNOTSUPP if no relocation method is supplied.
992 __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
993 Elf_Addr p, void *val, size_t len)
1001 * The kernel is already relocated, but we still want to apply
1002 * offset adjustments.
1005 return (EOPNOTSUPP);
1007 for (n = 0; n < ef->relsz / sizeof(r); n++) {
1008 COPYOUT(ef->rel + n, &r, sizeof(r));
1010 error = __elfN(reloc)(ef, __elfN(symaddr), &r, ELF_RELOC_REL,
1011 ef->off, p, val, len);
1015 for (n = 0; n < ef->relasz / sizeof(a); n++) {
1016 COPYOUT(ef->rela + n, &a, sizeof(a));
1018 error = __elfN(reloc)(ef, __elfN(symaddr), &a, ELF_RELOC_RELA,
1019 ef->off, p, val, len);
1028 __elfN(symaddr)(struct elf_file *ef, Elf_Size symidx)
1031 /* Symbol lookup by index not required here. */