2 * Copyright (c) 1998 Michael Smith <msmith@freebsd.org>
3 * Copyright (c) 1998 Peter Wemm <peter@freebsd.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #include <sys/endian.h>
34 #include <sys/linker.h>
35 #include <sys/module.h>
36 #include <sys/stdint.h>
38 #include <machine/elf.h>
41 #include <sys/link_elf.h>
43 #include "bootstrap.h"
45 #define COPYOUT(s,d,l) archsw.arch_copyout((vm_offset_t)(s), d, l)
47 #if defined(__i386__) && __ELF_WORD_SIZE == 64
50 #define ELF_TARG_CLASS ELFCLASS64
51 #define ELF_TARG_MACH EM_X86_64
54 typedef struct elf_file {
74 #ifdef LOADER_VERIEXEC_VECTX
79 #ifdef LOADER_VERIEXEC_VECTX
80 #define VECTX_HANDLE(ef) (ef)->vctx
82 #define VECTX_HANDLE(ef) (ef)->fd
85 static int __elfN(loadimage)(struct preloaded_file *mp, elf_file_t ef,
87 static int __elfN(lookup_symbol)(elf_file_t ef, const char* name,
88 Elf_Sym *sym, unsigned char type);
89 static int __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
90 Elf_Addr p, void *val, size_t len);
91 static int __elfN(parse_modmetadata)(struct preloaded_file *mp, elf_file_t ef,
92 Elf_Addr p_start, Elf_Addr p_end);
93 static symaddr_fn __elfN(symaddr);
94 static char *fake_modname(const char *name);
96 const char *__elfN(kerneltype) = "elf kernel";
97 const char *__elfN(moduletype) = "elf module";
99 uint64_t __elfN(relocation_offset) = 0;
102 extern void elf_wrong_field_size(void);
103 #define CONVERT_FIELD(b, f, e) \
104 switch (sizeof((b)->f)) { \
106 (b)->f = e ## 16toh((b)->f); \
109 (b)->f = e ## 32toh((b)->f); \
112 (b)->f = e ## 64toh((b)->f); \
115 /* Force a link time error. */ \
116 elf_wrong_field_size(); \
120 #define CONVERT_SWITCH(h, d, f) \
121 switch ((h)->e_ident[EI_DATA]) { \
133 static int elf_header_convert(Elf_Ehdr *ehdr)
136 * Fixup ELF header endianness.
138 * The Xhdr structure was loaded using block read call to optimize file
139 * accesses. It might happen, that the endianness of the system memory
140 * is different that endianness of the ELF header. Swap fields here to
141 * guarantee that Xhdr always contain valid data regardless of
144 #define HEADER_FIELDS(b, e) \
145 CONVERT_FIELD(b, e_type, e); \
146 CONVERT_FIELD(b, e_machine, e); \
147 CONVERT_FIELD(b, e_version, e); \
148 CONVERT_FIELD(b, e_entry, e); \
149 CONVERT_FIELD(b, e_phoff, e); \
150 CONVERT_FIELD(b, e_shoff, e); \
151 CONVERT_FIELD(b, e_flags, e); \
152 CONVERT_FIELD(b, e_ehsize, e); \
153 CONVERT_FIELD(b, e_phentsize, e); \
154 CONVERT_FIELD(b, e_phnum, e); \
155 CONVERT_FIELD(b, e_shentsize, e); \
156 CONVERT_FIELD(b, e_shnum, e); \
157 CONVERT_FIELD(b, e_shstrndx, e)
159 CONVERT_SWITCH(ehdr, ehdr, HEADER_FIELDS);
166 static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr)
168 #define PROGRAM_HEADER_FIELDS(b, e) \
169 CONVERT_FIELD(b, p_type, e); \
170 CONVERT_FIELD(b, p_flags, e); \
171 CONVERT_FIELD(b, p_offset, e); \
172 CONVERT_FIELD(b, p_vaddr, e); \
173 CONVERT_FIELD(b, p_paddr, e); \
174 CONVERT_FIELD(b, p_filesz, e); \
175 CONVERT_FIELD(b, p_memsz, e); \
176 CONVERT_FIELD(b, p_align, e)
178 CONVERT_SWITCH(ehdr, phdr, PROGRAM_HEADER_FIELDS);
180 #undef PROGRAM_HEADER_FIELDS
185 static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr)
187 #define SECTION_HEADER_FIELDS(b, e) \
188 CONVERT_FIELD(b, sh_name, e); \
189 CONVERT_FIELD(b, sh_type, e); \
190 CONVERT_FIELD(b, sh_link, e); \
191 CONVERT_FIELD(b, sh_info, e); \
192 CONVERT_FIELD(b, sh_flags, e); \
193 CONVERT_FIELD(b, sh_addr, e); \
194 CONVERT_FIELD(b, sh_offset, e); \
195 CONVERT_FIELD(b, sh_size, e); \
196 CONVERT_FIELD(b, sh_addralign, e); \
197 CONVERT_FIELD(b, sh_entsize, e)
199 CONVERT_SWITCH(ehdr, shdr, SECTION_HEADER_FIELDS);
201 #undef SECTION_HEADER_FIELDS
205 #undef CONVERT_SWITCH
208 static int elf_header_convert(Elf_Ehdr *ehdr)
213 static int elf_program_header_convert(const Elf_Ehdr *ehdr, Elf_Phdr *phdr)
218 static int elf_section_header_convert(const Elf_Ehdr *ehdr, Elf_Shdr *shdr)
226 is_kernphys_relocatable(elf_file_t ef)
230 return (__elfN(lookup_symbol)(ef, "kernphys", &sym, STT_OBJECT) == 0);
236 is_tg_kernel_support(struct preloaded_file *fp, elf_file_t ef)
239 Elf_Addr p_start, p_end, v, p;
243 if (__elfN(lookup_symbol)(ef, "__start_set_vt_drv_set", &sym, STT_NOTYPE) != 0)
245 p_start = sym.st_value + ef->off;
246 if (__elfN(lookup_symbol)(ef, "__stop_set_vt_drv_set", &sym, STT_NOTYPE) != 0)
248 p_end = sym.st_value + ef->off;
251 * Walk through vt_drv_set, each vt driver structure starts with
252 * static 16 chars for driver name. If we have "vbefb", return true.
254 for (p = p_start; p < p_end; p += sizeof(Elf_Addr)) {
255 COPYOUT(p, &v, sizeof(v));
257 error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
258 if (error == EOPNOTSUPP)
262 COPYOUT(v, &vd_name, sizeof(vd_name));
263 if (strncmp(vd_name, "vbefb", sizeof(vd_name)) == 0)
272 __elfN(load_elf_header)(char *filename, elf_file_t ef)
279 * Open the image, read and validate the ELF header
281 if (filename == NULL) /* can't handle nameless */
283 if ((ef->fd = open(filename, O_RDONLY)) == -1)
285 ef->firstpage = malloc(PAGE_SIZE);
286 if (ef->firstpage == NULL) {
291 #ifdef LOADER_VERIEXEC_VECTX
295 ef->vctx = vectx_open(ef->fd, filename, 0L, NULL, &verror, __func__);
297 printf("Unverified %s: %s\n", filename, ve_error_get());
304 bytes_read = VECTX_READ(VECTX_HANDLE(ef), ef->firstpage, PAGE_SIZE);
305 ef->firstlen = (size_t)bytes_read;
306 if (bytes_read < 0 || ef->firstlen <= sizeof(Elf_Ehdr)) {
307 err = EFTYPE; /* could be EIO, but may be small file */
310 ehdr = ef->ehdr = (Elf_Ehdr *)ef->firstpage;
313 if (!IS_ELF(*ehdr)) {
318 if (ehdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || /* Layout ? */
319 ehdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
320 ehdr->e_ident[EI_VERSION] != EV_CURRENT) /* Version ? */ {
325 err = elf_header_convert(ehdr);
329 if (ehdr->e_version != EV_CURRENT || ehdr->e_machine != ELF_TARG_MACH) {
335 #if defined(LOADER_VERIEXEC) && !defined(LOADER_VERIEXEC_VECTX)
336 if (verify_file(ef->fd, filename, bytes_read, VE_MUST, __func__) < 0) {
344 if (ef->firstpage != NULL) {
346 ef->firstpage = NULL;
349 #ifdef LOADER_VERIEXEC_VECTX
359 * Attempt to load the file (file) as an ELF module. It will be stored at
360 * (dest), and a pointer to a module structure describing the loaded object
361 * will be saved in (result).
364 __elfN(loadfile)(char *filename, uint64_t dest, struct preloaded_file **result)
366 return (__elfN(loadfile_raw)(filename, dest, result, 0));
370 __elfN(loadfile_raw)(char *filename, uint64_t dest,
371 struct preloaded_file **result, int multiboot)
373 struct preloaded_file *fp, *kfp;
379 bzero(&ef, sizeof(struct elf_file));
382 err = __elfN(load_elf_header)(filename, &ef);
389 * Check to see what sort of module we are.
391 kfp = file_findfile(NULL, __elfN(kerneltype));
394 * Kernels can be ET_DYN, so just assume the first loaded object is the
395 * kernel. This assumption will be checked later.
400 if (ef.kernel || ehdr->e_type == ET_EXEC) {
401 /* Looks like a kernel */
403 printf("elf" __XSTRING(__ELF_WORD_SIZE)
404 "_loadfile: kernel already loaded\n");
409 * Calculate destination address based on kernel entrypoint.
411 * For ARM, the destination address is independent of any values
412 * in the elf header (an ARM kernel can be loaded at any 2MB
413 * boundary), so we leave dest set to the value calculated by
414 * archsw.arch_loadaddr() and passed in to this function.
417 if (ehdr->e_type == ET_EXEC)
418 dest = (ehdr->e_entry & ~PAGE_MASK);
420 if ((ehdr->e_entry & ~PAGE_MASK) == 0) {
421 printf("elf" __XSTRING(__ELF_WORD_SIZE)
422 "_loadfile: not a kernel (maybe static binary?)\n");
428 } else if (ehdr->e_type == ET_DYN) {
429 /* Looks like a kld module */
430 if (multiboot != 0) {
431 printf("elf" __XSTRING(__ELF_WORD_SIZE)
432 "_loadfile: can't load module as multiboot\n");
437 printf("elf" __XSTRING(__ELF_WORD_SIZE)
438 "_loadfile: can't load module before kernel\n");
442 if (strcmp(__elfN(kerneltype), kfp->f_type)) {
443 printf("elf" __XSTRING(__ELF_WORD_SIZE)
444 "_loadfile: can't load module with kernel type '%s'\n",
449 /* Looks OK, got ahead */
457 if (archsw.arch_loadaddr != NULL)
458 dest = archsw.arch_loadaddr(LOAD_ELF, ehdr, dest);
460 dest = roundup(dest, PAGE_SIZE);
463 * Ok, we think we should handle this.
467 printf("elf" __XSTRING(__ELF_WORD_SIZE)
468 "_loadfile: cannot allocate module info\n");
472 if (ef.kernel == 1 && multiboot == 0)
473 setenv("kernelname", filename, 1);
474 fp->f_name = strdup(filename);
476 fp->f_type = strdup(ef.kernel ?
477 __elfN(kerneltype) : __elfN(moduletype));
479 fp->f_type = strdup("elf multiboot kernel");
481 if (module_verbose >= MODULE_VERBOSE_FULL) {
483 printf("%s entry at 0x%jx\n", filename,
484 (uintmax_t)ehdr->e_entry);
485 } else if (module_verbose > MODULE_VERBOSE_SILENT)
486 printf("%s ", filename);
488 fp->f_size = __elfN(loadimage)(fp, &ef, dest);
489 if (fp->f_size == 0 || fp->f_addr == 0)
492 /* save exec header as metadata */
493 file_addmetadata(fp, MODINFOMD_ELFHDR, sizeof(*ehdr), ehdr);
495 /* Load OK, return module pointer */
496 *result = (struct preloaded_file *)fp;
499 fp->f_kernphys_relocatable = multiboot || is_kernphys_relocatable(&ef);
502 fp->f_tg_kernel_support = is_tg_kernel_support(fp, &ef);
514 #ifdef LOADER_VERIEXEC_VECTX
515 if (!err && ef.vctx) {
518 verror = vectx_close(ef.vctx, VE_MUST, __func__);
531 * With the file (fd) open on the image, and (ehdr) containing
532 * the Elf header, load the image at (off)
535 __elfN(loadimage)(struct preloaded_file *fp, elf_file_t ef, uint64_t off)
540 Elf_Phdr *phdr, *php;
544 vm_offset_t firstaddr;
545 vm_offset_t lastaddr;
558 Elf_Addr p_start, p_end;
563 firstaddr = lastaddr = 0;
568 if (ehdr->e_type == ET_EXEC) {
570 #if defined(__i386__) || defined(__amd64__)
571 #if __ELF_WORD_SIZE == 64
572 /* x86_64 relocates after locore */
573 off = - (off & 0xffffffffff000000ull);
575 /* i386 relocates after locore */
576 off = - (off & 0xff000000u);
578 #elif defined(__powerpc__)
580 * On the purely virtual memory machines like e500, the kernel
581 * is linked against its final VA range, which is most often
582 * not available at the loader stage, but only after kernel
583 * initializes and completes its VM settings. In such cases we
584 * cannot use p_vaddr field directly to load ELF segments, but
585 * put them at some 'load-time' locations.
587 if (off & 0xf0000000u) {
588 off = -(off & 0xf0000000u);
590 * XXX the physical load address should not be
591 * hardcoded. Note that the Book-E kernel assumes that
592 * it's loaded at a 16MB boundary for now...
596 ehdr->e_entry += off;
597 if (module_verbose >= MODULE_VERBOSE_FULL)
598 printf("Converted entry 0x%jx\n",
599 (uintmax_t)ehdr->e_entry);
601 #elif defined(__arm__) && !defined(EFI)
603 * The elf headers in arm kernels specify virtual addresses in
604 * all header fields, even the ones that should be physical
605 * addresses. We assume the entry point is in the first page,
606 * and masking the page offset will leave us with the virtual
607 * address the kernel was linked at. We subtract that from the
608 * load offset, making 'off' into the value which, when added
609 * to a virtual address in an elf header, translates it to a
610 * physical address. We do the va->pa conversion on the entry
611 * point address in the header now, so that later we can launch
612 * the kernel by just jumping to that address.
614 * When booting from UEFI the copyin and copyout functions
615 * handle adjusting the location relative to the first virtual
616 * address. Because of this there is no need to adjust the
617 * offset or entry point address as these will both be handled
620 off -= ehdr->e_entry & ~PAGE_MASK;
621 ehdr->e_entry += off;
622 if (module_verbose >= MODULE_VERBOSE_FULL)
623 printf("ehdr->e_entry 0x%jx, va<->pa off %llx\n",
624 (uintmax_t)ehdr->e_entry, off);
626 off = 0; /* other archs use direct mapped kernels */
632 __elfN(relocation_offset) = off;
634 if ((ehdr->e_phoff + ehdr->e_phnum * sizeof(*phdr)) > ef->firstlen) {
635 printf("elf" __XSTRING(__ELF_WORD_SIZE)
636 "_loadimage: program header not within first page\n");
639 phdr = (Elf_Phdr *)(ef->firstpage + ehdr->e_phoff);
641 for (i = 0; i < ehdr->e_phnum; i++) {
642 if (elf_program_header_convert(ehdr, phdr))
645 /* We want to load PT_LOAD segments only.. */
646 if (phdr[i].p_type != PT_LOAD)
649 if (module_verbose >= MODULE_VERBOSE_FULL) {
650 printf("Segment: 0x%lx@0x%lx -> 0x%lx-0x%lx",
651 (long)phdr[i].p_filesz, (long)phdr[i].p_offset,
652 (long)(phdr[i].p_vaddr + off),
653 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz - 1));
654 } else if (module_verbose > MODULE_VERBOSE_SILENT) {
655 if ((phdr[i].p_flags & PF_W) == 0) {
656 printf("text=0x%lx ", (long)phdr[i].p_filesz);
658 printf("data=0x%lx", (long)phdr[i].p_filesz);
659 if (phdr[i].p_filesz < phdr[i].p_memsz)
660 printf("+0x%lx", (long)(phdr[i].p_memsz -
666 if (ef->firstlen > phdr[i].p_offset) {
667 fpcopy = ef->firstlen - phdr[i].p_offset;
668 archsw.arch_copyin(ef->firstpage + phdr[i].p_offset,
669 phdr[i].p_vaddr + off, fpcopy);
671 if (phdr[i].p_filesz > fpcopy) {
672 if (kern_pread(VECTX_HANDLE(ef),
673 phdr[i].p_vaddr + off + fpcopy,
674 phdr[i].p_filesz - fpcopy,
675 phdr[i].p_offset + fpcopy) != 0) {
676 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
677 "_loadimage: read failed\n");
681 /* clear space from oversized segments; eg: bss */
682 if (phdr[i].p_filesz < phdr[i].p_memsz) {
683 if (module_verbose >= MODULE_VERBOSE_FULL) {
684 printf(" (bss: 0x%lx-0x%lx)",
685 (long)(phdr[i].p_vaddr + off + phdr[i].p_filesz),
686 (long)(phdr[i].p_vaddr + off + phdr[i].p_memsz -1));
688 kern_bzero(phdr[i].p_vaddr + off + phdr[i].p_filesz,
689 phdr[i].p_memsz - phdr[i].p_filesz);
691 if (module_verbose >= MODULE_VERBOSE_FULL)
694 if (archsw.arch_loadseg != NULL)
695 archsw.arch_loadseg(ehdr, phdr + i, off);
697 if (firstaddr == 0 || firstaddr > (phdr[i].p_vaddr + off))
698 firstaddr = phdr[i].p_vaddr + off;
699 if (lastaddr == 0 || lastaddr <
700 (phdr[i].p_vaddr + off + phdr[i].p_memsz))
701 lastaddr = phdr[i].p_vaddr + off + phdr[i].p_memsz;
703 lastaddr = roundup(lastaddr, sizeof(long));
706 * Get the section headers. We need this for finding the .ctors
707 * section as well as for loading any symbols. Both may be hard
708 * to do if reading from a .gz file as it involves seeking. I
709 * think the rule is going to have to be that you must strip a
710 * file to remove symbols before gzipping it.
712 chunk = (size_t)ehdr->e_shnum * (size_t)ehdr->e_shentsize;
713 if (chunk == 0 || ehdr->e_shoff == 0)
715 shdr = alloc_pread(VECTX_HANDLE(ef), ehdr->e_shoff, chunk);
717 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
718 "_loadimage: failed to read section headers");
722 for (i = 0; i < ehdr->e_shnum; i++)
723 elf_section_header_convert(ehdr, &shdr[i]);
725 file_addmetadata(fp, MODINFOMD_SHDR, chunk, shdr);
728 * Read the section string table and look for the .ctors section.
729 * We need to tell the kernel where it is so that it can call the
732 chunk = shdr[ehdr->e_shstrndx].sh_size;
734 shstr = alloc_pread(VECTX_HANDLE(ef),
735 shdr[ehdr->e_shstrndx].sh_offset, chunk);
737 for (i = 0; i < ehdr->e_shnum; i++) {
738 if (strcmp(shstr + shdr[i].sh_name,
741 ctors = shdr[i].sh_addr;
742 file_addmetadata(fp, MODINFOMD_CTORS_ADDR,
743 sizeof(ctors), &ctors);
744 size = shdr[i].sh_size;
745 file_addmetadata(fp, MODINFOMD_CTORS_SIZE,
746 sizeof(size), &size);
754 * Now load any symbols.
758 for (i = 0; i < ehdr->e_shnum; i++) {
759 if (shdr[i].sh_type != SHT_SYMTAB)
761 for (j = 0; j < ehdr->e_phnum; j++) {
762 if (phdr[j].p_type != PT_LOAD)
764 if (shdr[i].sh_offset >= phdr[j].p_offset &&
765 (shdr[i].sh_offset + shdr[i].sh_size <=
766 phdr[j].p_offset + phdr[j].p_filesz)) {
767 shdr[i].sh_offset = 0;
772 if (shdr[i].sh_offset == 0 || shdr[i].sh_size == 0)
773 continue; /* alread loaded in a PT_LOAD above */
774 /* Save it for loading below */
776 symstrindex = shdr[i].sh_link;
778 if (symtabindex < 0 || symstrindex < 0)
781 /* Ok, committed to a load. */
782 if (module_verbose >= MODULE_VERBOSE_FULL)
785 for (i = symtabindex; i >= 0; i = symstrindex) {
788 switch(shdr[i].sh_type) {
789 case SHT_SYMTAB: /* Symbol table */
792 case SHT_STRTAB: /* String table */
799 size = shdr[i].sh_size;
801 archsw.arch_copyin(&size, lastaddr, sizeof(size));
802 lastaddr += sizeof(size);
804 if (module_verbose >= MODULE_VERBOSE_FULL) {
805 printf("\n%s: 0x%jx@0x%jx -> 0x%jx-0x%jx", secname,
806 (uintmax_t)shdr[i].sh_size, (uintmax_t)shdr[i].sh_offset,
808 (uintmax_t)(lastaddr + shdr[i].sh_size));
809 } else if (module_verbose > MODULE_VERBOSE_SILENT) {
810 if (i == symstrindex)
812 printf("0x%lx+0x%lx", (long)sizeof(size), (long)size);
814 if (VECTX_LSEEK(VECTX_HANDLE(ef), (off_t)shdr[i].sh_offset, SEEK_SET) == -1) {
815 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
816 "_loadimage: could not seek for symbols - skipped!");
821 result = archsw.arch_readin(VECTX_HANDLE(ef), lastaddr, shdr[i].sh_size);
822 if (result < 0 || (size_t)result != shdr[i].sh_size) {
823 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
824 "_loadimage: could not read symbols - skipped! "
825 "(%ju != %ju)", (uintmax_t)result,
826 (uintmax_t)shdr[i].sh_size);
831 /* Reset offsets relative to ssym */
832 lastaddr += shdr[i].sh_size;
833 lastaddr = roundup(lastaddr, sizeof(size));
834 if (i == symtabindex)
836 else if (i == symstrindex)
840 if (module_verbose >= MODULE_VERBOSE_FULL)
843 file_addmetadata(fp, MODINFOMD_SSYM, sizeof(ssym), &ssym);
844 file_addmetadata(fp, MODINFOMD_ESYM, sizeof(esym), &esym);
847 if (module_verbose > MODULE_VERBOSE_SILENT)
850 ret = lastaddr - firstaddr;
851 fp->f_addr = firstaddr;
854 for (i = 0; i < ehdr->e_phnum; i++) {
855 if (phdr[i].p_type == PT_DYNAMIC) {
858 file_addmetadata(fp, MODINFOMD_DYNAMIC, sizeof(adp),
864 if (php == NULL) /* this is bad, we cannot get to symbols or _DYNAMIC */
867 ndp = php->p_filesz / sizeof(Elf_Dyn);
870 dp = malloc(php->p_filesz);
873 archsw.arch_copyout(php->p_vaddr + off, dp, php->p_filesz);
876 for (i = 0; i < ndp; i++) {
877 if (dp[i].d_tag == 0)
879 switch (dp[i].d_tag) {
882 (Elf_Hashelt*)(uintptr_t)(dp[i].d_un.d_ptr + off);
886 (char *)(uintptr_t)(dp[i].d_un.d_ptr + off);
889 ef->strsz = dp[i].d_un.d_val;
893 (Elf_Sym *)(uintptr_t)(dp[i].d_un.d_ptr + off);
897 (Elf_Rel *)(uintptr_t)(dp[i].d_un.d_ptr + off);
900 ef->relsz = dp[i].d_un.d_val;
904 (Elf_Rela *)(uintptr_t)(dp[i].d_un.d_ptr + off);
907 ef->relasz = dp[i].d_un.d_val;
913 if (ef->hashtab == NULL || ef->symtab == NULL ||
914 ef->strtab == NULL || ef->strsz == 0)
916 COPYOUT(ef->hashtab, &ef->nbuckets, sizeof(ef->nbuckets));
917 COPYOUT(ef->hashtab + 1, &ef->nchains, sizeof(ef->nchains));
918 ef->buckets = ef->hashtab + 2;
919 ef->chains = ef->buckets + ef->nbuckets;
921 if (__elfN(lookup_symbol)(ef, "__start_set_modmetadata_set", &sym,
924 p_start = sym.st_value + ef->off;
925 if (__elfN(lookup_symbol)(ef, "__stop_set_modmetadata_set", &sym,
928 p_end = sym.st_value + ef->off;
930 if (__elfN(parse_modmetadata)(fp, ef, p_start, p_end) == 0)
933 if (ef->kernel) /* kernel must not depend on anything */
944 static char invalid_name[] = "bad";
947 fake_modname(const char *name)
953 sp = strrchr(name, '/');
959 ep = strrchr(sp, '.');
961 ep = sp + strlen(sp);
965 ep = invalid_name + sizeof(invalid_name) - 1;
969 fp = malloc(len + 1);
977 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
978 struct mod_metadata64 {
979 int md_version; /* structure version MDTV_* */
980 int md_type; /* type of entry MDT_* */
981 uint64_t md_data; /* specific data */
982 uint64_t md_cval; /* common string label */
985 #if defined(__amd64__) && __ELF_WORD_SIZE == 32
986 struct mod_metadata32 {
987 int md_version; /* structure version MDTV_* */
988 int md_type; /* type of entry MDT_* */
989 uint32_t md_data; /* specific data */
990 uint32_t md_cval; /* common string label */
995 __elfN(load_modmetadata)(struct preloaded_file *fp, uint64_t dest)
999 Elf_Shdr *sh_meta, *shdr = NULL;
1000 Elf_Shdr *sh_data[2];
1001 char *shstrtab = NULL;
1003 Elf_Addr p_start, p_end;
1005 bzero(&ef, sizeof(struct elf_file));
1008 err = __elfN(load_elf_header)(fp->f_name, &ef);
1012 if (ef.kernel == 1 || ef.ehdr->e_type == ET_EXEC) {
1014 } else if (ef.ehdr->e_type != ET_DYN) {
1019 size = (size_t)ef.ehdr->e_shnum * (size_t)ef.ehdr->e_shentsize;
1020 shdr = alloc_pread(VECTX_HANDLE(&ef), ef.ehdr->e_shoff, size);
1026 /* Load shstrtab. */
1027 shstrtab = alloc_pread(VECTX_HANDLE(&ef), shdr[ef.ehdr->e_shstrndx].sh_offset,
1028 shdr[ef.ehdr->e_shstrndx].sh_size);
1029 if (shstrtab == NULL) {
1030 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1031 "load_modmetadata: unable to load shstrtab\n");
1036 /* Find set_modmetadata_set and data sections. */
1037 sh_data[0] = sh_data[1] = sh_meta = NULL;
1038 for (i = 0, j = 0; i < ef.ehdr->e_shnum; i++) {
1039 if (strcmp(&shstrtab[shdr[i].sh_name],
1040 "set_modmetadata_set") == 0) {
1043 if ((strcmp(&shstrtab[shdr[i].sh_name], ".data") == 0) ||
1044 (strcmp(&shstrtab[shdr[i].sh_name], ".rodata") == 0)) {
1045 sh_data[j++] = &shdr[i];
1048 if (sh_meta == NULL || sh_data[0] == NULL || sh_data[1] == NULL) {
1049 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1050 "load_modmetadata: unable to find set_modmetadata_set or data sections\n");
1055 /* Load set_modmetadata_set into memory */
1056 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_meta->sh_size, sh_meta->sh_offset);
1058 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1059 "load_modmetadata: unable to load set_modmetadata_set: %d\n", err);
1063 p_end = dest + sh_meta->sh_size;
1064 dest += sh_meta->sh_size;
1066 /* Load data sections into memory. */
1067 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[0]->sh_size,
1068 sh_data[0]->sh_offset);
1070 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1071 "load_modmetadata: unable to load data: %d\n", err);
1076 * We have to increment the dest, so that the offset is the same into
1077 * both the .rodata and .data sections.
1079 ef.off = -(sh_data[0]->sh_addr - dest);
1080 dest += (sh_data[1]->sh_addr - sh_data[0]->sh_addr);
1082 err = kern_pread(VECTX_HANDLE(&ef), dest, sh_data[1]->sh_size,
1083 sh_data[1]->sh_offset);
1085 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1086 "load_modmetadata: unable to load data: %d\n", err);
1090 err = __elfN(parse_modmetadata)(fp, &ef, p_start, p_end);
1092 printf("\nelf" __XSTRING(__ELF_WORD_SIZE)
1093 "load_modmetadata: unable to parse metadata: %d\n", err);
1098 if (shstrtab != NULL)
1102 if (ef.firstpage != NULL)
1105 #ifdef LOADER_VERIEXEC_VECTX
1106 if (!err && ef.vctx) {
1109 verror = vectx_close(ef.vctx, VE_MUST, __func__);
1122 __elfN(parse_modmetadata)(struct preloaded_file *fp, elf_file_t ef,
1123 Elf_Addr p_start, Elf_Addr p_end)
1125 struct mod_metadata md;
1126 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1127 struct mod_metadata64 md64;
1128 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1129 struct mod_metadata32 md32;
1131 struct mod_depend *mdepend;
1132 struct mod_version mver;
1134 int error, modcnt, minfolen;
1140 COPYOUT(p, &v, sizeof(v));
1141 error = __elfN(reloc_ptr)(fp, ef, p, &v, sizeof(v));
1142 if (error == EOPNOTSUPP)
1144 else if (error != 0)
1146 #if (defined(__i386__) || defined(__powerpc__)) && __ELF_WORD_SIZE == 64
1147 COPYOUT(v, &md64, sizeof(md64));
1148 error = __elfN(reloc_ptr)(fp, ef, v, &md64, sizeof(md64));
1149 if (error == EOPNOTSUPP) {
1150 md64.md_cval += ef->off;
1151 md64.md_data += ef->off;
1152 } else if (error != 0)
1154 md.md_version = md64.md_version;
1155 md.md_type = md64.md_type;
1156 md.md_cval = (const char *)(uintptr_t)md64.md_cval;
1157 md.md_data = (void *)(uintptr_t)md64.md_data;
1158 #elif defined(__amd64__) && __ELF_WORD_SIZE == 32
1159 COPYOUT(v, &md32, sizeof(md32));
1160 error = __elfN(reloc_ptr)(fp, ef, v, &md32, sizeof(md32));
1161 if (error == EOPNOTSUPP) {
1162 md32.md_cval += ef->off;
1163 md32.md_data += ef->off;
1164 } else if (error != 0)
1166 md.md_version = md32.md_version;
1167 md.md_type = md32.md_type;
1168 md.md_cval = (const char *)(uintptr_t)md32.md_cval;
1169 md.md_data = (void *)(uintptr_t)md32.md_data;
1171 COPYOUT(v, &md, sizeof(md));
1172 error = __elfN(reloc_ptr)(fp, ef, v, &md, sizeof(md));
1173 if (error == EOPNOTSUPP) {
1174 md.md_cval += ef->off;
1175 md.md_data = (void *)((uintptr_t)md.md_data +
1176 (uintptr_t)ef->off);
1177 } else if (error != 0)
1180 p += sizeof(Elf_Addr);
1181 switch(md.md_type) {
1183 if (ef->kernel) /* kernel must not depend on anything */
1185 s = strdupout((vm_offset_t)md.md_cval);
1186 minfolen = sizeof(*mdepend) + strlen(s) + 1;
1187 mdepend = malloc(minfolen);
1188 if (mdepend == NULL)
1190 COPYOUT((vm_offset_t)md.md_data, mdepend,
1192 strcpy((char*)(mdepend + 1), s);
1194 file_addmetadata(fp, MODINFOMD_DEPLIST, minfolen,
1199 s = strdupout((vm_offset_t)md.md_cval);
1200 COPYOUT((vm_offset_t)md.md_data, &mver, sizeof(mver));
1201 file_addmodule(fp, s, mver.mv_version, NULL);
1208 s = fake_modname(fp->f_name);
1209 file_addmodule(fp, s, 1, NULL);
1215 static unsigned long
1216 elf_hash(const char *name)
1218 const unsigned char *p = (const unsigned char *) name;
1219 unsigned long h = 0;
1222 while (*p != '\0') {
1223 h = (h << 4) + *p++;
1224 if ((g = h & 0xf0000000) != 0)
1231 static const char __elfN(bad_symtable)[] = "elf" __XSTRING(__ELF_WORD_SIZE)
1232 "_lookup_symbol: corrupt symbol table\n";
1234 __elfN(lookup_symbol)(elf_file_t ef, const char* name, Elf_Sym *symp,
1242 if (ef->nbuckets == 0) {
1243 printf(__elfN(bad_symtable));
1247 hash = elf_hash(name);
1248 COPYOUT(&ef->buckets[hash % ef->nbuckets], &symnum, sizeof(symnum));
1250 while (symnum != STN_UNDEF) {
1251 if (symnum >= ef->nchains) {
1252 printf(__elfN(bad_symtable));
1256 COPYOUT(ef->symtab + symnum, &sym, sizeof(sym));
1257 if (sym.st_name == 0) {
1258 printf(__elfN(bad_symtable));
1262 strp = strdupout((vm_offset_t)(ef->strtab + sym.st_name));
1263 if (strcmp(name, strp) == 0) {
1265 if (sym.st_shndx != SHN_UNDEF && sym.st_value != 0 &&
1266 ELF_ST_TYPE(sym.st_info) == type) {
1273 COPYOUT(&ef->chains[symnum], &symnum, sizeof(symnum));
1279 * Apply any intra-module relocations to the value. p is the load address
1280 * of the value and val/len is the value to be modified. This does NOT modify
1281 * the image in-place, because this is done by kern_linker later on.
1283 * Returns EOPNOTSUPP if no relocation method is supplied.
1286 __elfN(reloc_ptr)(struct preloaded_file *mp, elf_file_t ef,
1287 Elf_Addr p, void *val, size_t len)
1295 * The kernel is already relocated, but we still want to apply
1296 * offset adjustments.
1299 return (EOPNOTSUPP);
1301 for (n = 0; n < ef->relsz / sizeof(r); n++) {
1302 COPYOUT(ef->rel + n, &r, sizeof(r));
1304 error = __elfN(reloc)(ef, __elfN(symaddr), &r, ELF_RELOC_REL,
1305 ef->off, p, val, len);
1309 for (n = 0; n < ef->relasz / sizeof(a); n++) {
1310 COPYOUT(ef->rela + n, &a, sizeof(a));
1312 error = __elfN(reloc)(ef, __elfN(symaddr), &a, ELF_RELOC_RELA,
1313 ef->off, p, val, len);
1322 __elfN(symaddr)(struct elf_file *ef, Elf_Size symidx)
1325 /* Symbol lookup by index not required here. */