2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 Søren Schmidt
4 * Copyright (c) 1996 Peter Wemm
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include "opt_compat.h"
36 #include <sys/param.h>
38 #include <sys/fcntl.h>
39 #include <sys/imgact.h>
40 #include <sys/imgact_elf.h>
41 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/mutex.h>
47 #include <sys/namei.h>
48 #include <sys/pioctl.h>
50 #include <sys/procfs.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sf_buf.h>
53 #include <sys/systm.h>
54 #include <sys/signalvar.h>
57 #include <sys/syscall.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysent.h>
60 #include <sys/vnode.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_param.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
70 #include <machine/elf.h>
71 #include <machine/md_var.h>
73 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
74 #include <machine/fpu.h>
75 #include <compat/ia32/ia32_reg.h>
78 #define OLD_EI_BRAND 8
80 static int __elfN(check_header)(const Elf_Ehdr *hdr);
81 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
83 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
84 u_long *entry, size_t pagesize);
85 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
86 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
87 vm_prot_t prot, size_t pagesize);
88 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
90 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
93 int __elfN(fallback_brand) = -1;
94 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
95 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
96 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
97 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
98 &__elfN(fallback_brand));
100 static int elf_trace = 0;
101 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
103 static int elf_legacy_coredump = 0;
104 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
105 &elf_legacy_coredump, 0, "");
107 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
110 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
114 for (i = 0; i < MAX_BRANDS; i++) {
115 if (elf_brand_list[i] == NULL) {
116 elf_brand_list[i] = entry;
126 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
130 for (i = 0; i < MAX_BRANDS; i++) {
131 if (elf_brand_list[i] == entry) {
132 elf_brand_list[i] = NULL;
142 __elfN(brand_inuse)(Elf_Brandinfo *entry)
147 sx_slock(&allproc_lock);
148 LIST_FOREACH(p, &allproc, p_list) {
149 if (p->p_sysent == entry->sysvec) {
154 sx_sunlock(&allproc_lock);
159 static Elf_Brandinfo *
160 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
166 * We support three types of branding -- (1) the ELF EI_OSABI field
167 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
168 * branding w/in the ELF header, and (3) path of the `interp_path'
169 * field. We should also look for an ".note.ABI-tag" ELF section now
170 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
173 /* If the executable has a brand, search for it in the brand list. */
174 for (i = 0; i < MAX_BRANDS; i++) {
175 bi = elf_brand_list[i];
176 if (bi != NULL && hdr->e_machine == bi->machine &&
177 (hdr->e_ident[EI_OSABI] == bi->brand ||
178 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
179 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
183 /* Lacking a known brand, search for a recognized interpreter. */
184 if (interp != NULL) {
185 for (i = 0; i < MAX_BRANDS; i++) {
186 bi = elf_brand_list[i];
187 if (bi != NULL && hdr->e_machine == bi->machine &&
188 strcmp(interp, bi->interp_path) == 0)
193 /* Lacking a recognized interpreter, try the default brand */
194 for (i = 0; i < MAX_BRANDS; i++) {
195 bi = elf_brand_list[i];
196 if (bi != NULL && hdr->e_machine == bi->machine &&
197 __elfN(fallback_brand) == bi->brand)
204 __elfN(check_header)(const Elf_Ehdr *hdr)
210 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
211 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
212 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
213 hdr->e_phentsize != sizeof(Elf_Phdr) ||
214 hdr->e_version != ELF_TARG_VER)
218 * Make sure we have at least one brand for this machine.
221 for (i = 0; i < MAX_BRANDS; i++) {
222 bi = elf_brand_list[i];
223 if (bi != NULL && bi->machine == hdr->e_machine)
233 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
234 vm_offset_t start, vm_offset_t end, vm_prot_t prot)
241 * Create the page if it doesn't exist yet. Ignore errors.
244 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
245 VM_PROT_ALL, VM_PROT_ALL, 0);
249 * Find the page from the underlying object.
252 sf = vm_imgact_map_page(object, offset);
254 return (KERN_FAILURE);
255 off = offset - trunc_page(offset);
256 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
258 vm_imgact_unmap_page(sf);
260 return (KERN_FAILURE);
264 return (KERN_SUCCESS);
268 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
269 vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
276 if (start != trunc_page(start)) {
277 rv = __elfN(map_partial)(map, object, offset, start,
278 round_page(start), prot);
281 offset += round_page(start) - start;
282 start = round_page(start);
284 if (end != round_page(end)) {
285 rv = __elfN(map_partial)(map, object, offset +
286 trunc_page(end) - start, trunc_page(end), end, prot);
289 end = trunc_page(end);
292 if (offset & PAGE_MASK) {
294 * The mapping is not page aligned. This means we have
295 * to copy the data. Sigh.
297 rv = vm_map_find(map, NULL, 0, &start, end - start,
298 FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
302 return (KERN_SUCCESS);
303 for (; start < end; start += sz) {
304 sf = vm_imgact_map_page(object, offset);
306 return (KERN_FAILURE);
307 off = offset - trunc_page(offset);
309 if (sz > PAGE_SIZE - off)
310 sz = PAGE_SIZE - off;
311 error = copyout((caddr_t)sf_buf_kva(sf) + off,
313 vm_imgact_unmap_page(sf);
315 return (KERN_FAILURE);
322 rv = vm_map_insert(map, object, offset, start, end,
323 prot, VM_PROT_ALL, cow);
328 return (KERN_SUCCESS);
333 __elfN(load_section)(struct vmspace *vmspace,
334 vm_object_t object, vm_offset_t offset,
335 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
340 vm_offset_t map_addr;
343 vm_offset_t file_addr;
346 * It's necessary to fail if the filsz + offset taken from the
347 * header is greater than the actual file pager object's size.
348 * If we were to allow this, then the vm_map_find() below would
349 * walk right off the end of the file object and into the ether.
351 * While I'm here, might as well check for something else that
352 * is invalid: filsz cannot be greater than memsz.
354 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
356 uprintf("elf_load_section: truncated ELF file\n");
360 #define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
361 #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
363 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
364 file_addr = trunc_page_ps(offset, pagesize);
367 * We have two choices. We can either clear the data in the last page
368 * of an oversized mapping, or we can start the anon mapping a page
369 * early and copy the initialized data into that first page. We
370 * choose the second..
373 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
375 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
378 vm_object_reference(object);
380 /* cow flags: don't dump readonly sections in core */
381 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
382 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
384 rv = __elfN(map_insert)(&vmspace->vm_map,
386 file_addr, /* file offset */
387 map_addr, /* virtual start */
388 map_addr + map_len,/* virtual end */
391 if (rv != KERN_SUCCESS) {
392 vm_object_deallocate(object);
396 /* we can stop now if we've covered it all */
397 if (memsz == filsz) {
404 * We have to get the remaining bit of the file into the first part
405 * of the oversized map segment. This is normally because the .data
406 * segment in the file is extended to provide bss. It's a neat idea
407 * to try and save a page, but it's a pain in the behind to implement.
409 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
410 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
411 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
414 /* This had damn well better be true! */
416 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
417 map_addr + map_len, VM_PROT_ALL, 0);
418 if (rv != KERN_SUCCESS) {
426 sf = vm_imgact_map_page(object, offset + filsz);
430 /* send the page fragment to user space */
431 off = trunc_page_ps(offset + filsz, pagesize) -
432 trunc_page(offset + filsz);
433 error = copyout((caddr_t)sf_buf_kva(sf) + off,
434 (caddr_t)map_addr, copy_len);
435 vm_imgact_unmap_page(sf);
442 * set it to the specified protection.
443 * XXX had better undo the damage from pasting over the cracks here!
445 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
446 round_page(map_addr + map_len), prot, FALSE);
452 * Load the file "file" into memory. It may be either a shared object
455 * The "addr" reference parameter is in/out. On entry, it specifies
456 * the address where a shared object should be loaded. If the file is
457 * an executable, this value is ignored. On exit, "addr" specifies
458 * where the file was actually loaded.
460 * The "entry" reference parameter is out only. On exit, it specifies
461 * the entry point for the loaded file.
464 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
465 u_long *entry, size_t pagesize)
470 struct image_params image_params;
472 const Elf_Ehdr *hdr = NULL;
473 const Elf_Phdr *phdr = NULL;
474 struct nameidata *nd;
475 struct vmspace *vmspace = p->p_vmspace;
477 struct image_params *imgp;
480 u_long base_addr = 0;
481 int vfslocked, error, i, numsegs;
483 if (curthread->td_proc != p)
484 panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */
486 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
488 attr = &tempdata->attr;
489 imgp = &tempdata->image_params;
492 * Initialize part of the common data
496 imgp->firstpage = NULL;
497 imgp->image_header = NULL;
499 imgp->execlabel = NULL;
502 NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
505 if ((error = namei(nd)) != 0) {
509 vfslocked = NDHASGIANT(nd);
510 NDFREE(nd, NDF_ONLY_PNBUF);
511 imgp->vp = nd->ni_vp;
514 * Check permissions, modes, uid, etc on the file, and "open" it.
516 error = exec_check_permissions(imgp);
520 error = exec_map_first_page(imgp);
525 * Also make certain that the interpreter stays the same, so set
526 * its VV_TEXT flag, too.
528 nd->ni_vp->v_vflag |= VV_TEXT;
530 imgp->object = nd->ni_vp->v_object;
532 hdr = (const Elf_Ehdr *)imgp->image_header;
533 if ((error = __elfN(check_header)(hdr)) != 0)
535 if (hdr->e_type == ET_DYN)
537 else if (hdr->e_type == ET_EXEC)
544 /* Only support headers that fit within first page for now */
545 /* (multiplication of two Elf_Half fields will not overflow) */
546 if ((hdr->e_phoff > PAGE_SIZE) ||
547 (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
552 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
554 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
555 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */
557 if (phdr[i].p_flags & PF_X)
558 prot |= VM_PROT_EXECUTE;
559 if (phdr[i].p_flags & PF_W)
560 prot |= VM_PROT_WRITE;
561 if (phdr[i].p_flags & PF_R)
562 prot |= VM_PROT_READ;
564 if ((error = __elfN(load_section)(vmspace,
565 imgp->object, phdr[i].p_offset,
566 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
567 phdr[i].p_memsz, phdr[i].p_filesz, prot,
571 * Establish the base address if this is the
575 base_addr = trunc_page(phdr[i].p_vaddr +
581 *entry = (unsigned long)hdr->e_entry + rbase;
585 exec_unmap_first_page(imgp);
590 VFS_UNLOCK_GIANT(vfslocked);
591 free(tempdata, M_TEMP);
597 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
599 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
600 const Elf_Phdr *phdr;
601 Elf_Auxargs *elf_auxargs = NULL;
602 struct vmspace *vmspace;
604 u_long text_size = 0, data_size = 0, total_size = 0;
605 u_long text_addr = 0, data_addr = 0;
606 u_long seg_size, seg_addr;
607 u_long addr, entry = 0, proghdr = 0;
609 const char *interp = NULL;
610 Elf_Brandinfo *brand_info;
612 struct thread *td = curthread;
613 struct sysentvec *sv;
616 * Do we have a valid ELF header ?
618 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
619 * if particular brand doesn't support it.
621 if (__elfN(check_header)(hdr) != 0 ||
622 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
626 * From here on down, we return an errno, not -1, as we've
627 * detected an ELF file.
630 if ((hdr->e_phoff > PAGE_SIZE) ||
631 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
632 /* Only support headers in first page for now */
635 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
636 for (i = 0; i < hdr->e_phnum; i++) {
637 switch (phdr[i].p_type) {
638 case PT_INTERP: /* Path to interpreter */
639 if (phdr[i].p_filesz > MAXPATHLEN ||
640 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
642 interp = imgp->image_header + phdr[i].p_offset;
649 brand_info = __elfN(get_brandinfo)(hdr, interp);
650 if (brand_info == NULL) {
651 uprintf("ELF binary type \"%u\" not known.\n",
652 hdr->e_ident[EI_OSABI]);
655 if (hdr->e_type == ET_DYN &&
656 (brand_info->flags & BI_CAN_EXEC_DYN) == 0)
658 sv = brand_info->sysvec;
659 if (interp != NULL && brand_info->interp_newpath != NULL)
660 interp = brand_info->interp_newpath;
663 * Avoid a possible deadlock if the current address space is destroyed
664 * and that address space maps the locked vnode. In the common case,
665 * the locked vnode's v_usecount is decremented but remains greater
666 * than zero. Consequently, the vnode lock is not needed by vrele().
667 * However, in cases where the vnode lock is external, such as nullfs,
668 * v_usecount may become zero.
670 VOP_UNLOCK(imgp->vp, 0, td);
672 exec_new_vmspace(imgp, sv);
674 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
676 vmspace = imgp->proc->p_vmspace;
678 for (i = 0; i < hdr->e_phnum; i++) {
679 switch (phdr[i].p_type) {
680 case PT_LOAD: /* Loadable segment */
682 if (phdr[i].p_flags & PF_X)
683 prot |= VM_PROT_EXECUTE;
684 if (phdr[i].p_flags & PF_W)
685 prot |= VM_PROT_WRITE;
686 if (phdr[i].p_flags & PF_R)
687 prot |= VM_PROT_READ;
689 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
691 * Some x86 binaries assume read == executable,
692 * notably the M3 runtime and therefore cvsup
694 if (prot & VM_PROT_READ)
695 prot |= VM_PROT_EXECUTE;
698 if ((error = __elfN(load_section)(vmspace,
699 imgp->object, phdr[i].p_offset,
700 (caddr_t)(uintptr_t)phdr[i].p_vaddr,
701 phdr[i].p_memsz, phdr[i].p_filesz, prot,
702 sv->sv_pagesize)) != 0)
706 * If this segment contains the program headers,
707 * remember their virtual address for the AT_PHDR
708 * aux entry. Static binaries don't usually include
711 if (phdr[i].p_offset == 0 &&
712 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
714 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
716 seg_addr = trunc_page(phdr[i].p_vaddr);
717 seg_size = round_page(phdr[i].p_memsz +
718 phdr[i].p_vaddr - seg_addr);
721 * Is this .text or .data? We can't use
722 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
723 * alpha terribly and possibly does other bad
724 * things so we stick to the old way of figuring
725 * it out: If the segment contains the program
726 * entry point, it's a text segment, otherwise it
729 * Note that obreak() assumes that data_addr +
730 * data_size == end of data load area, and the ELF
731 * file format expects segments to be sorted by
732 * address. If multiple data segments exist, the
733 * last one will be used.
735 if (hdr->e_entry >= phdr[i].p_vaddr &&
736 hdr->e_entry < (phdr[i].p_vaddr +
738 text_size = seg_size;
739 text_addr = seg_addr;
740 entry = (u_long)hdr->e_entry;
742 data_size = seg_size;
743 data_addr = seg_addr;
745 total_size += seg_size;
747 case PT_PHDR: /* Program header table info */
748 proghdr = phdr[i].p_vaddr;
755 if (data_addr == 0 && data_size == 0) {
756 data_addr = text_addr;
757 data_size = text_size;
761 * Check limits. It should be safe to check the
762 * limits after loading the segments since we do
763 * not actually fault in all the segments pages.
765 PROC_LOCK(imgp->proc);
766 if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
767 text_size > maxtsiz ||
768 total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
769 PROC_UNLOCK(imgp->proc);
773 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
774 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
775 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
776 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
779 * We load the dynamic linker where a userland call
780 * to mmap(0, ...) would put it. The rationale behind this
781 * calculation is that it leaves room for the heap to grow to
782 * its maximum allowed size.
784 addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
785 lim_max(imgp->proc, RLIMIT_DATA));
786 PROC_UNLOCK(imgp->proc);
788 imgp->entry_addr = entry;
790 imgp->proc->p_sysent = sv;
791 if (interp != NULL) {
792 VOP_UNLOCK(imgp->vp, 0, td);
793 if (brand_info->emul_path != NULL &&
794 brand_info->emul_path[0] != '\0') {
795 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
796 snprintf(path, MAXPATHLEN, "%s%s",
797 brand_info->emul_path, interp);
798 error = __elfN(load_file)(imgp->proc, path, &addr,
799 &imgp->entry_addr, sv->sv_pagesize);
804 if (interp != NULL) {
805 error = __elfN(load_file)(imgp->proc, interp, &addr,
806 &imgp->entry_addr, sv->sv_pagesize);
808 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
810 uprintf("ELF interpreter %s not found\n", interp);
816 * Construct auxargs table (used by the fixup routine)
818 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
819 elf_auxargs->execfd = -1;
820 elf_auxargs->phdr = proghdr;
821 elf_auxargs->phent = hdr->e_phentsize;
822 elf_auxargs->phnum = hdr->e_phnum;
823 elf_auxargs->pagesz = PAGE_SIZE;
824 elf_auxargs->base = addr;
825 elf_auxargs->flags = 0;
826 elf_auxargs->entry = entry;
827 elf_auxargs->trace = elf_trace;
829 imgp->auxargs = elf_auxargs;
830 imgp->interpreted = 0;
835 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
838 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
840 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
844 base = (Elf_Addr *)*stack_base;
845 pos = base + (imgp->args->argc + imgp->args->envc + 2);
848 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
850 if (args->execfd != -1) {
851 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
853 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
854 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
855 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
856 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
857 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
858 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
859 AUXARGS_ENTRY(pos, AT_BASE, args->base);
860 AUXARGS_ENTRY(pos, AT_NULL, 0);
862 free(imgp->auxargs, M_TEMP);
863 imgp->auxargs = NULL;
866 suword(base, (long)imgp->args->argc);
867 *stack_base = (register_t *)base;
872 * Code for generating ELF core dumps.
875 typedef void (*segment_callback)(vm_map_entry_t, void *);
877 /* Closure for cb_put_phdr(). */
878 struct phdr_closure {
879 Elf_Phdr *phdr; /* Program header to fill in */
880 Elf_Off offset; /* Offset of segment in core file */
883 /* Closure for cb_size_segment(). */
884 struct sseg_closure {
885 int count; /* Count of writable segments. */
886 size_t size; /* Total size of all writable segments. */
889 static void cb_put_phdr(vm_map_entry_t, void *);
890 static void cb_size_segment(vm_map_entry_t, void *);
891 static void each_writable_segment(struct thread *, segment_callback, void *);
892 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
893 int, void *, size_t);
894 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
895 static void __elfN(putnote)(void *, size_t *, const char *, int,
896 const void *, size_t);
898 extern int osreldate;
901 __elfN(coredump)(td, vp, limit)
906 struct ucred *cred = td->td_ucred;
908 struct sseg_closure seginfo;
912 /* Size the program segments. */
915 each_writable_segment(td, cb_size_segment, &seginfo);
918 * Calculate the size of the core file header area by making
919 * a dry run of generating it. Nothing is written, but the
920 * size is calculated.
923 __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
925 if (hdrsize + seginfo.size >= limit)
929 * Allocate memory for building the header, fill it up,
932 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
936 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
938 /* Write the contents of all of the writable segments. */
944 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
946 for (i = 0; i < seginfo.count; i++) {
947 error = vn_rdwr_inchunks(UIO_WRITE, vp,
948 (caddr_t)(uintptr_t)php->p_vaddr,
949 php->p_filesz, offset, UIO_USERSPACE,
950 IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
951 curthread); /* XXXKSE */
954 offset += php->p_filesz;
964 * A callback for each_writable_segment() to write out the segment's
965 * program header entry.
968 cb_put_phdr(entry, closure)
969 vm_map_entry_t entry;
972 struct phdr_closure *phc = (struct phdr_closure *)closure;
973 Elf_Phdr *phdr = phc->phdr;
975 phc->offset = round_page(phc->offset);
977 phdr->p_type = PT_LOAD;
978 phdr->p_offset = phc->offset;
979 phdr->p_vaddr = entry->start;
981 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
982 phdr->p_align = PAGE_SIZE;
984 if (entry->protection & VM_PROT_READ)
985 phdr->p_flags |= PF_R;
986 if (entry->protection & VM_PROT_WRITE)
987 phdr->p_flags |= PF_W;
988 if (entry->protection & VM_PROT_EXECUTE)
989 phdr->p_flags |= PF_X;
991 phc->offset += phdr->p_filesz;
996 * A callback for each_writable_segment() to gather information about
997 * the number of segments and their total size.
1000 cb_size_segment(entry, closure)
1001 vm_map_entry_t entry;
1004 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1007 ssc->size += entry->end - entry->start;
1011 * For each writable segment in the process's memory map, call the given
1012 * function with a pointer to the map entry and some arbitrary
1013 * caller-supplied data.
1016 each_writable_segment(td, func, closure)
1018 segment_callback func;
1021 struct proc *p = td->td_proc;
1022 vm_map_t map = &p->p_vmspace->vm_map;
1023 vm_map_entry_t entry;
1025 for (entry = map->header.next; entry != &map->header;
1026 entry = entry->next) {
1030 * Don't dump inaccessible mappings, deal with legacy
1033 * Note that read-only segments related to the elf binary
1034 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1035 * need to arbitrarily ignore such segments.
1037 if (elf_legacy_coredump) {
1038 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1041 if ((entry->protection & VM_PROT_ALL) == 0)
1046 * Dont include memory segment in the coredump if
1047 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1048 * madvise(2). Do not dump submaps (i.e. parts of the
1051 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1054 if ((obj = entry->object.vm_object) == NULL)
1057 /* Find the deepest backing object. */
1058 while (obj->backing_object != NULL)
1059 obj = obj->backing_object;
1061 /* Ignore memory-mapped devices and such things. */
1062 if (obj->type != OBJT_DEFAULT &&
1063 obj->type != OBJT_SWAP &&
1064 obj->type != OBJT_VNODE)
1067 (*func)(entry, closure);
1072 * Write the core file header to the file, including padding up to
1073 * the page boundary.
1076 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1086 /* Fill in the header. */
1087 bzero(hdr, hdrsize);
1089 __elfN(puthdr)(td, hdr, &off, numsegs);
1091 /* Write it to the core file. */
1092 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1093 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1097 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1098 typedef struct prstatus32 elf_prstatus_t;
1099 typedef struct prpsinfo32 elf_prpsinfo_t;
1100 typedef struct fpreg32 elf_prfpregset_t;
1101 typedef struct fpreg32 elf_fpregset_t;
1102 typedef struct reg32 elf_gregset_t;
1104 typedef prstatus_t elf_prstatus_t;
1105 typedef prpsinfo_t elf_prpsinfo_t;
1106 typedef prfpregset_t elf_prfpregset_t;
1107 typedef prfpregset_t elf_fpregset_t;
1108 typedef gregset_t elf_gregset_t;
1112 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1115 elf_prstatus_t status;
1116 elf_prfpregset_t fpregset;
1117 elf_prpsinfo_t psinfo;
1119 elf_prstatus_t *status;
1120 elf_prfpregset_t *fpregset;
1121 elf_prpsinfo_t *psinfo;
1124 size_t ehoff, noteoff, notesz, phoff;
1129 *off += sizeof(Elf_Ehdr);
1132 *off += (numsegs + 1) * sizeof(Elf_Phdr);
1136 * Don't allocate space for the notes if we're just calculating
1137 * the size of the header. We also don't collect the data.
1140 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1141 status = &tempdata->status;
1142 fpregset = &tempdata->fpregset;
1143 psinfo = &tempdata->psinfo;
1152 psinfo->pr_version = PRPSINFO_VERSION;
1153 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1154 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1156 * XXX - We don't fill in the command line arguments properly
1159 strlcpy(psinfo->pr_psargs, p->p_comm,
1160 sizeof(psinfo->pr_psargs));
1162 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1166 * To have the debugger select the right thread (LWP) as the initial
1167 * thread, we dump the state of the thread passed to us in td first.
1168 * This is the thread that causes the core dump and thus likely to
1169 * be the right thread one wants to have selected in the debugger.
1172 while (thr != NULL) {
1174 status->pr_version = PRSTATUS_VERSION;
1175 status->pr_statussz = sizeof(elf_prstatus_t);
1176 status->pr_gregsetsz = sizeof(elf_gregset_t);
1177 status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1178 status->pr_osreldate = osreldate;
1179 status->pr_cursig = p->p_sig;
1180 status->pr_pid = thr->td_tid;
1181 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1182 fill_regs32(thr, &status->pr_reg);
1183 fill_fpregs32(thr, fpregset);
1185 fill_regs(thr, &status->pr_reg);
1186 fill_fpregs(thr, fpregset);
1189 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1191 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1194 * Allow for MD specific notes, as well as any MD
1195 * specific preparations for writing MI notes.
1197 __elfN(dump_thread)(thr, dst, off);
1199 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1200 TAILQ_NEXT(thr, td_plist);
1202 thr = TAILQ_NEXT(thr, td_plist);
1205 notesz = *off - noteoff;
1208 free(tempdata, M_TEMP);
1210 /* Align up to a page boundary for the program segments. */
1211 *off = round_page(*off);
1216 struct phdr_closure phc;
1219 * Fill in the ELF header.
1221 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1222 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1223 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1224 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1225 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1226 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1227 ehdr->e_ident[EI_DATA] = ELF_DATA;
1228 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1229 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1230 ehdr->e_ident[EI_ABIVERSION] = 0;
1231 ehdr->e_ident[EI_PAD] = 0;
1232 ehdr->e_type = ET_CORE;
1233 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1234 ehdr->e_machine = EM_386;
1236 ehdr->e_machine = ELF_ARCH;
1238 ehdr->e_version = EV_CURRENT;
1240 ehdr->e_phoff = phoff;
1242 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1243 ehdr->e_phentsize = sizeof(Elf_Phdr);
1244 ehdr->e_phnum = numsegs + 1;
1245 ehdr->e_shentsize = sizeof(Elf_Shdr);
1247 ehdr->e_shstrndx = SHN_UNDEF;
1250 * Fill in the program header entries.
1252 phdr = (Elf_Phdr *)((char *)dst + phoff);
1254 /* The note segement. */
1255 phdr->p_type = PT_NOTE;
1256 phdr->p_offset = noteoff;
1259 phdr->p_filesz = notesz;
1265 /* All the writable segments from the program. */
1268 each_writable_segment(td, cb_put_phdr, &phc);
1273 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1274 const void *desc, size_t descsz)
1278 note.n_namesz = strlen(name) + 1;
1279 note.n_descsz = descsz;
1282 bcopy(¬e, (char *)dst + *off, sizeof note);
1283 *off += sizeof note;
1285 bcopy(name, (char *)dst + *off, note.n_namesz);
1286 *off += roundup2(note.n_namesz, sizeof(Elf_Size));
1288 bcopy(desc, (char *)dst + *off, note.n_descsz);
1289 *off += roundup2(note.n_descsz, sizeof(Elf_Size));
1293 * Tell kern_execve.c about it, with a little help from the linker.
1295 static struct execsw __elfN(execsw) = {
1296 __CONCAT(exec_, __elfN(imgact)),
1297 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1299 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));