2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 Søren Schmidt
4 * Copyright (c) 1996 Peter Wemm
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include "opt_compat.h"
36 #include <sys/param.h>
38 #include <sys/fcntl.h>
39 #include <sys/imgact.h>
40 #include <sys/imgact_elf.h>
41 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/mount.h>
45 #include <sys/mutex.h>
47 #include <sys/namei.h>
48 #include <sys/pioctl.h>
50 #include <sys/procfs.h>
51 #include <sys/resourcevar.h>
52 #include <sys/sf_buf.h>
53 #include <sys/systm.h>
54 #include <sys/signalvar.h>
57 #include <sys/syscall.h>
58 #include <sys/sysctl.h>
59 #include <sys/sysent.h>
60 #include <sys/vnode.h>
63 #include <vm/vm_kern.h>
64 #include <vm/vm_param.h>
66 #include <vm/vm_map.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
70 #include <machine/elf.h>
71 #include <machine/md_var.h>
73 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
74 #include <machine/fpu.h>
75 #include <compat/ia32/ia32_reg.h>
78 #define OLD_EI_BRAND 8
80 static int __elfN(check_header)(const Elf_Ehdr *hdr);
81 static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
83 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
84 u_long *entry, size_t pagesize);
85 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
86 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
87 vm_prot_t prot, size_t pagesize);
88 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
90 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
93 int __elfN(fallback_brand) = -1;
94 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
95 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
96 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
97 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
98 &__elfN(fallback_brand));
100 static int elf_trace = 0;
101 SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
103 static int elf_legacy_coredump = 0;
104 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
105 &elf_legacy_coredump, 0, "");
107 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
110 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
114 for (i = 0; i < MAX_BRANDS; i++) {
115 if (elf_brand_list[i] == NULL) {
116 elf_brand_list[i] = entry;
126 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
130 for (i = 0; i < MAX_BRANDS; i++) {
131 if (elf_brand_list[i] == entry) {
132 elf_brand_list[i] = NULL;
142 __elfN(brand_inuse)(Elf_Brandinfo *entry)
147 sx_slock(&allproc_lock);
148 LIST_FOREACH(p, &allproc, p_list) {
149 if (p->p_sysent == entry->sysvec) {
154 sx_sunlock(&allproc_lock);
159 static Elf_Brandinfo *
160 __elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
166 * We support three types of branding -- (1) the ELF EI_OSABI field
167 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
168 * branding w/in the ELF header, and (3) path of the `interp_path'
169 * field. We should also look for an ".note.ABI-tag" ELF section now
170 * in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
173 /* If the executable has a brand, search for it in the brand list. */
174 for (i = 0; i < MAX_BRANDS; i++) {
175 bi = elf_brand_list[i];
176 if (bi != NULL && hdr->e_machine == bi->machine &&
177 (hdr->e_ident[EI_OSABI] == bi->brand ||
178 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
179 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
183 /* Lacking a known brand, search for a recognized interpreter. */
184 if (interp != NULL) {
185 for (i = 0; i < MAX_BRANDS; i++) {
186 bi = elf_brand_list[i];
187 if (bi != NULL && hdr->e_machine == bi->machine &&
188 strcmp(interp, bi->interp_path) == 0)
193 /* Lacking a recognized interpreter, try the default brand */
194 for (i = 0; i < MAX_BRANDS; i++) {
195 bi = elf_brand_list[i];
196 if (bi != NULL && hdr->e_machine == bi->machine &&
197 __elfN(fallback_brand) == bi->brand)
204 __elfN(check_header)(const Elf_Ehdr *hdr)
210 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
211 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
212 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
213 hdr->e_phentsize != sizeof(Elf_Phdr) ||
214 hdr->e_version != ELF_TARG_VER)
218 * Make sure we have at least one brand for this machine.
221 for (i = 0; i < MAX_BRANDS; i++) {
222 bi = elf_brand_list[i];
223 if (bi != NULL && bi->machine == hdr->e_machine)
233 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
234 vm_offset_t start, vm_offset_t end, vm_prot_t prot)
241 * Create the page if it doesn't exist yet. Ignore errors.
244 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
245 VM_PROT_ALL, VM_PROT_ALL, 0);
249 * Find the page from the underlying object.
252 sf = vm_imgact_map_page(object, offset);
254 return (KERN_FAILURE);
255 off = offset - trunc_page(offset);
256 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
258 vm_imgact_unmap_page(sf);
260 return (KERN_FAILURE);
264 return (KERN_SUCCESS);
268 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
269 vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
276 if (start != trunc_page(start)) {
277 rv = __elfN(map_partial)(map, object, offset, start,
278 round_page(start), prot);
281 offset += round_page(start) - start;
282 start = round_page(start);
284 if (end != round_page(end)) {
285 rv = __elfN(map_partial)(map, object, offset +
286 trunc_page(end) - start, trunc_page(end), end, prot);
289 end = trunc_page(end);
292 if (offset & PAGE_MASK) {
294 * The mapping is not page aligned. This means we have
295 * to copy the data. Sigh.
297 rv = vm_map_find(map, NULL, 0, &start, end - start,
298 FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
302 return (KERN_SUCCESS);
303 for (; start < end; start += sz) {
304 sf = vm_imgact_map_page(object, offset);
306 return (KERN_FAILURE);
307 off = offset - trunc_page(offset);
309 if (sz > PAGE_SIZE - off)
310 sz = PAGE_SIZE - off;
311 error = copyout((caddr_t)sf_buf_kva(sf) + off,
313 vm_imgact_unmap_page(sf);
315 return (KERN_FAILURE);
321 vm_object_reference(object);
323 rv = vm_map_insert(map, object, offset, start, end,
324 prot, VM_PROT_ALL, cow);
326 if (rv != KERN_SUCCESS)
327 vm_object_deallocate(object);
331 return (KERN_SUCCESS);
336 __elfN(load_section)(struct vmspace *vmspace,
337 vm_object_t object, vm_offset_t offset,
338 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
343 vm_offset_t map_addr;
346 vm_offset_t file_addr;
349 * It's necessary to fail if the filsz + offset taken from the
350 * header is greater than the actual file pager object's size.
351 * If we were to allow this, then the vm_map_find() below would
352 * walk right off the end of the file object and into the ether.
354 * While I'm here, might as well check for something else that
355 * is invalid: filsz cannot be greater than memsz.
357 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
359 uprintf("elf_load_section: truncated ELF file\n");
363 #define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
364 #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
366 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
367 file_addr = trunc_page_ps(offset, pagesize);
370 * We have two choices. We can either clear the data in the last page
371 * of an oversized mapping, or we can start the anon mapping a page
372 * early and copy the initialized data into that first page. We
373 * choose the second..
376 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
378 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
381 /* cow flags: don't dump readonly sections in core */
382 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
383 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
385 rv = __elfN(map_insert)(&vmspace->vm_map,
387 file_addr, /* file offset */
388 map_addr, /* virtual start */
389 map_addr + map_len,/* virtual end */
392 if (rv != KERN_SUCCESS)
395 /* we can stop now if we've covered it all */
396 if (memsz == filsz) {
403 * We have to get the remaining bit of the file into the first part
404 * of the oversized map segment. This is normally because the .data
405 * segment in the file is extended to provide bss. It's a neat idea
406 * to try and save a page, but it's a pain in the behind to implement.
408 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
409 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
410 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
413 /* This had damn well better be true! */
415 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
416 map_addr + map_len, VM_PROT_ALL, 0);
417 if (rv != KERN_SUCCESS) {
425 sf = vm_imgact_map_page(object, offset + filsz);
429 /* send the page fragment to user space */
430 off = trunc_page_ps(offset + filsz, pagesize) -
431 trunc_page(offset + filsz);
432 error = copyout((caddr_t)sf_buf_kva(sf) + off,
433 (caddr_t)map_addr, copy_len);
434 vm_imgact_unmap_page(sf);
441 * set it to the specified protection.
442 * XXX had better undo the damage from pasting over the cracks here!
444 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
445 round_page(map_addr + map_len), prot, FALSE);
451 * Load the file "file" into memory. It may be either a shared object
454 * The "addr" reference parameter is in/out. On entry, it specifies
455 * the address where a shared object should be loaded. If the file is
456 * an executable, this value is ignored. On exit, "addr" specifies
457 * where the file was actually loaded.
459 * The "entry" reference parameter is out only. On exit, it specifies
460 * the entry point for the loaded file.
463 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
464 u_long *entry, size_t pagesize)
469 struct image_params image_params;
471 const Elf_Ehdr *hdr = NULL;
472 const Elf_Phdr *phdr = NULL;
473 struct nameidata *nd;
474 struct vmspace *vmspace = p->p_vmspace;
476 struct image_params *imgp;
479 u_long base_addr = 0;
480 int vfslocked, error, i, numsegs;
482 if (curthread->td_proc != p)
483 panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */
485 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
487 attr = &tempdata->attr;
488 imgp = &tempdata->image_params;
491 * Initialize part of the common data
495 imgp->firstpage = NULL;
496 imgp->image_header = NULL;
498 imgp->execlabel = NULL;
501 NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
504 if ((error = namei(nd)) != 0) {
508 vfslocked = NDHASGIANT(nd);
509 NDFREE(nd, NDF_ONLY_PNBUF);
510 imgp->vp = nd->ni_vp;
513 * Check permissions, modes, uid, etc on the file, and "open" it.
515 error = exec_check_permissions(imgp);
519 error = exec_map_first_page(imgp);
524 * Also make certain that the interpreter stays the same, so set
525 * its VV_TEXT flag, too.
527 nd->ni_vp->v_vflag |= VV_TEXT;
529 imgp->object = nd->ni_vp->v_object;
531 hdr = (const Elf_Ehdr *)imgp->image_header;
532 if ((error = __elfN(check_header)(hdr)) != 0)
534 if (hdr->e_type == ET_DYN)
536 else if (hdr->e_type == ET_EXEC)
543 /* Only support headers that fit within first page for now */
544 /* (multiplication of two Elf_Half fields will not overflow) */
545 if ((hdr->e_phoff > PAGE_SIZE) ||
546 (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
551 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
553 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
554 if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */
556 if (phdr[i].p_flags & PF_X)
557 prot |= VM_PROT_EXECUTE;
558 if (phdr[i].p_flags & PF_W)
559 prot |= VM_PROT_WRITE;
560 if (phdr[i].p_flags & PF_R)
561 prot |= VM_PROT_READ;
563 if ((error = __elfN(load_section)(vmspace,
564 imgp->object, phdr[i].p_offset,
565 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
566 phdr[i].p_memsz, phdr[i].p_filesz, prot,
570 * Establish the base address if this is the
574 base_addr = trunc_page(phdr[i].p_vaddr +
580 *entry = (unsigned long)hdr->e_entry + rbase;
584 exec_unmap_first_page(imgp);
589 VFS_UNLOCK_GIANT(vfslocked);
590 free(tempdata, M_TEMP);
596 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
598 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
599 const Elf_Phdr *phdr;
600 Elf_Auxargs *elf_auxargs;
601 struct vmspace *vmspace;
603 u_long text_size = 0, data_size = 0, total_size = 0;
604 u_long text_addr = 0, data_addr = 0;
605 u_long seg_size, seg_addr;
606 u_long addr, entry = 0, proghdr = 0;
608 const char *interp = NULL;
609 Elf_Brandinfo *brand_info;
611 struct thread *td = curthread;
612 struct sysentvec *sv;
615 * Do we have a valid ELF header ?
617 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
618 * if particular brand doesn't support it.
620 if (__elfN(check_header)(hdr) != 0 ||
621 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
625 * From here on down, we return an errno, not -1, as we've
626 * detected an ELF file.
629 if ((hdr->e_phoff > PAGE_SIZE) ||
630 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
631 /* Only support headers in first page for now */
634 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
635 for (i = 0; i < hdr->e_phnum; i++) {
636 if (phdr[i].p_type == PT_INTERP) {
637 /* Path to interpreter */
638 if (phdr[i].p_filesz > MAXPATHLEN ||
639 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
641 interp = imgp->image_header + phdr[i].p_offset;
646 brand_info = __elfN(get_brandinfo)(hdr, interp);
647 if (brand_info == NULL) {
648 uprintf("ELF binary type \"%u\" not known.\n",
649 hdr->e_ident[EI_OSABI]);
652 if (hdr->e_type == ET_DYN &&
653 (brand_info->flags & BI_CAN_EXEC_DYN) == 0)
655 sv = brand_info->sysvec;
656 if (interp != NULL && brand_info->interp_newpath != NULL)
657 interp = brand_info->interp_newpath;
660 * Avoid a possible deadlock if the current address space is destroyed
661 * and that address space maps the locked vnode. In the common case,
662 * the locked vnode's v_usecount is decremented but remains greater
663 * than zero. Consequently, the vnode lock is not needed by vrele().
664 * However, in cases where the vnode lock is external, such as nullfs,
665 * v_usecount may become zero.
667 VOP_UNLOCK(imgp->vp, 0, td);
669 exec_new_vmspace(imgp, sv);
671 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
673 vmspace = imgp->proc->p_vmspace;
675 for (i = 0; i < hdr->e_phnum; i++) {
676 switch (phdr[i].p_type) {
677 case PT_LOAD: /* Loadable segment */
679 if (phdr[i].p_flags & PF_X)
680 prot |= VM_PROT_EXECUTE;
681 if (phdr[i].p_flags & PF_W)
682 prot |= VM_PROT_WRITE;
683 if (phdr[i].p_flags & PF_R)
684 prot |= VM_PROT_READ;
686 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
688 * Some x86 binaries assume read == executable,
689 * notably the M3 runtime and therefore cvsup
691 if (prot & VM_PROT_READ)
692 prot |= VM_PROT_EXECUTE;
695 if ((error = __elfN(load_section)(vmspace,
696 imgp->object, phdr[i].p_offset,
697 (caddr_t)(uintptr_t)phdr[i].p_vaddr,
698 phdr[i].p_memsz, phdr[i].p_filesz, prot,
699 sv->sv_pagesize)) != 0)
703 * If this segment contains the program headers,
704 * remember their virtual address for the AT_PHDR
705 * aux entry. Static binaries don't usually include
708 if (phdr[i].p_offset == 0 &&
709 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
711 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
713 seg_addr = trunc_page(phdr[i].p_vaddr);
714 seg_size = round_page(phdr[i].p_memsz +
715 phdr[i].p_vaddr - seg_addr);
718 * Is this .text or .data? We can't use
719 * VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
720 * alpha terribly and possibly does other bad
721 * things so we stick to the old way of figuring
722 * it out: If the segment contains the program
723 * entry point, it's a text segment, otherwise it
726 * Note that obreak() assumes that data_addr +
727 * data_size == end of data load area, and the ELF
728 * file format expects segments to be sorted by
729 * address. If multiple data segments exist, the
730 * last one will be used.
732 if (hdr->e_entry >= phdr[i].p_vaddr &&
733 hdr->e_entry < (phdr[i].p_vaddr +
735 text_size = seg_size;
736 text_addr = seg_addr;
737 entry = (u_long)hdr->e_entry;
739 data_size = seg_size;
740 data_addr = seg_addr;
742 total_size += seg_size;
744 case PT_PHDR: /* Program header table info */
745 proghdr = phdr[i].p_vaddr;
752 if (data_addr == 0 && data_size == 0) {
753 data_addr = text_addr;
754 data_size = text_size;
758 * Check limits. It should be safe to check the
759 * limits after loading the segments since we do
760 * not actually fault in all the segments pages.
762 PROC_LOCK(imgp->proc);
763 if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
764 text_size > maxtsiz ||
765 total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
766 PROC_UNLOCK(imgp->proc);
770 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
771 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
772 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
773 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
776 * We load the dynamic linker where a userland call
777 * to mmap(0, ...) would put it. The rationale behind this
778 * calculation is that it leaves room for the heap to grow to
779 * its maximum allowed size.
781 addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
782 lim_max(imgp->proc, RLIMIT_DATA));
783 PROC_UNLOCK(imgp->proc);
785 imgp->entry_addr = entry;
787 imgp->proc->p_sysent = sv;
788 if (interp != NULL) {
789 VOP_UNLOCK(imgp->vp, 0, td);
790 if (brand_info->emul_path != NULL &&
791 brand_info->emul_path[0] != '\0') {
792 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
793 snprintf(path, MAXPATHLEN, "%s%s",
794 brand_info->emul_path, interp);
795 error = __elfN(load_file)(imgp->proc, path, &addr,
796 &imgp->entry_addr, sv->sv_pagesize);
801 if (interp != NULL) {
802 error = __elfN(load_file)(imgp->proc, interp, &addr,
803 &imgp->entry_addr, sv->sv_pagesize);
805 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
807 uprintf("ELF interpreter %s not found\n", interp);
813 * Construct auxargs table (used by the fixup routine)
815 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
816 elf_auxargs->execfd = -1;
817 elf_auxargs->phdr = proghdr;
818 elf_auxargs->phent = hdr->e_phentsize;
819 elf_auxargs->phnum = hdr->e_phnum;
820 elf_auxargs->pagesz = PAGE_SIZE;
821 elf_auxargs->base = addr;
822 elf_auxargs->flags = 0;
823 elf_auxargs->entry = entry;
824 elf_auxargs->trace = elf_trace;
826 imgp->auxargs = elf_auxargs;
827 imgp->interpreted = 0;
832 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
835 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
837 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
841 base = (Elf_Addr *)*stack_base;
842 pos = base + (imgp->args->argc + imgp->args->envc + 2);
845 AUXARGS_ENTRY(pos, AT_DEBUG, 1);
847 if (args->execfd != -1) {
848 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
850 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
851 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
852 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
853 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
854 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
855 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
856 AUXARGS_ENTRY(pos, AT_BASE, args->base);
857 AUXARGS_ENTRY(pos, AT_NULL, 0);
859 free(imgp->auxargs, M_TEMP);
860 imgp->auxargs = NULL;
863 suword(base, (long)imgp->args->argc);
864 *stack_base = (register_t *)base;
869 * Code for generating ELF core dumps.
872 typedef void (*segment_callback)(vm_map_entry_t, void *);
874 /* Closure for cb_put_phdr(). */
875 struct phdr_closure {
876 Elf_Phdr *phdr; /* Program header to fill in */
877 Elf_Off offset; /* Offset of segment in core file */
880 /* Closure for cb_size_segment(). */
881 struct sseg_closure {
882 int count; /* Count of writable segments. */
883 size_t size; /* Total size of all writable segments. */
886 static void cb_put_phdr(vm_map_entry_t, void *);
887 static void cb_size_segment(vm_map_entry_t, void *);
888 static void each_writable_segment(struct thread *, segment_callback, void *);
889 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
890 int, void *, size_t);
891 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
892 static void __elfN(putnote)(void *, size_t *, const char *, int,
893 const void *, size_t);
895 extern int osreldate;
898 __elfN(coredump)(td, vp, limit)
903 struct ucred *cred = td->td_ucred;
905 struct sseg_closure seginfo;
909 /* Size the program segments. */
912 each_writable_segment(td, cb_size_segment, &seginfo);
915 * Calculate the size of the core file header area by making
916 * a dry run of generating it. Nothing is written, but the
917 * size is calculated.
920 __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
922 if (hdrsize + seginfo.size >= limit)
926 * Allocate memory for building the header, fill it up,
929 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
933 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
935 /* Write the contents of all of the writable segments. */
941 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
943 for (i = 0; i < seginfo.count; i++) {
944 error = vn_rdwr_inchunks(UIO_WRITE, vp,
945 (caddr_t)(uintptr_t)php->p_vaddr,
946 php->p_filesz, offset, UIO_USERSPACE,
947 IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
948 curthread); /* XXXKSE */
951 offset += php->p_filesz;
961 * A callback for each_writable_segment() to write out the segment's
962 * program header entry.
965 cb_put_phdr(entry, closure)
966 vm_map_entry_t entry;
969 struct phdr_closure *phc = (struct phdr_closure *)closure;
970 Elf_Phdr *phdr = phc->phdr;
972 phc->offset = round_page(phc->offset);
974 phdr->p_type = PT_LOAD;
975 phdr->p_offset = phc->offset;
976 phdr->p_vaddr = entry->start;
978 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
979 phdr->p_align = PAGE_SIZE;
981 if (entry->protection & VM_PROT_READ)
982 phdr->p_flags |= PF_R;
983 if (entry->protection & VM_PROT_WRITE)
984 phdr->p_flags |= PF_W;
985 if (entry->protection & VM_PROT_EXECUTE)
986 phdr->p_flags |= PF_X;
988 phc->offset += phdr->p_filesz;
993 * A callback for each_writable_segment() to gather information about
994 * the number of segments and their total size.
997 cb_size_segment(entry, closure)
998 vm_map_entry_t entry;
1001 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1004 ssc->size += entry->end - entry->start;
1008 * For each writable segment in the process's memory map, call the given
1009 * function with a pointer to the map entry and some arbitrary
1010 * caller-supplied data.
1013 each_writable_segment(td, func, closure)
1015 segment_callback func;
1018 struct proc *p = td->td_proc;
1019 vm_map_t map = &p->p_vmspace->vm_map;
1020 vm_map_entry_t entry;
1022 for (entry = map->header.next; entry != &map->header;
1023 entry = entry->next) {
1027 * Don't dump inaccessible mappings, deal with legacy
1030 * Note that read-only segments related to the elf binary
1031 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1032 * need to arbitrarily ignore such segments.
1034 if (elf_legacy_coredump) {
1035 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1038 if ((entry->protection & VM_PROT_ALL) == 0)
1043 * Dont include memory segment in the coredump if
1044 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1045 * madvise(2). Do not dump submaps (i.e. parts of the
1048 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1051 if ((obj = entry->object.vm_object) == NULL)
1054 /* Find the deepest backing object. */
1055 while (obj->backing_object != NULL)
1056 obj = obj->backing_object;
1058 /* Ignore memory-mapped devices and such things. */
1059 if (obj->type != OBJT_DEFAULT &&
1060 obj->type != OBJT_SWAP &&
1061 obj->type != OBJT_VNODE)
1064 (*func)(entry, closure);
1069 * Write the core file header to the file, including padding up to
1070 * the page boundary.
1073 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
1083 /* Fill in the header. */
1084 bzero(hdr, hdrsize);
1086 __elfN(puthdr)(td, hdr, &off, numsegs);
1088 /* Write it to the core file. */
1089 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1090 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1094 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1095 typedef struct prstatus32 elf_prstatus_t;
1096 typedef struct prpsinfo32 elf_prpsinfo_t;
1097 typedef struct fpreg32 elf_prfpregset_t;
1098 typedef struct fpreg32 elf_fpregset_t;
1099 typedef struct reg32 elf_gregset_t;
1101 typedef prstatus_t elf_prstatus_t;
1102 typedef prpsinfo_t elf_prpsinfo_t;
1103 typedef prfpregset_t elf_prfpregset_t;
1104 typedef prfpregset_t elf_fpregset_t;
1105 typedef gregset_t elf_gregset_t;
1109 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1112 elf_prstatus_t status;
1113 elf_prfpregset_t fpregset;
1114 elf_prpsinfo_t psinfo;
1116 elf_prstatus_t *status;
1117 elf_prfpregset_t *fpregset;
1118 elf_prpsinfo_t *psinfo;
1121 size_t ehoff, noteoff, notesz, phoff;
1126 *off += sizeof(Elf_Ehdr);
1129 *off += (numsegs + 1) * sizeof(Elf_Phdr);
1133 * Don't allocate space for the notes if we're just calculating
1134 * the size of the header. We also don't collect the data.
1137 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1138 status = &tempdata->status;
1139 fpregset = &tempdata->fpregset;
1140 psinfo = &tempdata->psinfo;
1149 psinfo->pr_version = PRPSINFO_VERSION;
1150 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1151 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1153 * XXX - We don't fill in the command line arguments properly
1156 strlcpy(psinfo->pr_psargs, p->p_comm,
1157 sizeof(psinfo->pr_psargs));
1159 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1163 * To have the debugger select the right thread (LWP) as the initial
1164 * thread, we dump the state of the thread passed to us in td first.
1165 * This is the thread that causes the core dump and thus likely to
1166 * be the right thread one wants to have selected in the debugger.
1169 while (thr != NULL) {
1171 status->pr_version = PRSTATUS_VERSION;
1172 status->pr_statussz = sizeof(elf_prstatus_t);
1173 status->pr_gregsetsz = sizeof(elf_gregset_t);
1174 status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1175 status->pr_osreldate = osreldate;
1176 status->pr_cursig = p->p_sig;
1177 status->pr_pid = thr->td_tid;
1178 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1179 fill_regs32(thr, &status->pr_reg);
1180 fill_fpregs32(thr, fpregset);
1182 fill_regs(thr, &status->pr_reg);
1183 fill_fpregs(thr, fpregset);
1186 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1188 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1191 * Allow for MD specific notes, as well as any MD
1192 * specific preparations for writing MI notes.
1194 __elfN(dump_thread)(thr, dst, off);
1196 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1197 TAILQ_NEXT(thr, td_plist);
1199 thr = TAILQ_NEXT(thr, td_plist);
1202 notesz = *off - noteoff;
1205 free(tempdata, M_TEMP);
1207 /* Align up to a page boundary for the program segments. */
1208 *off = round_page(*off);
1213 struct phdr_closure phc;
1216 * Fill in the ELF header.
1218 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1219 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1220 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1221 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1222 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1223 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1224 ehdr->e_ident[EI_DATA] = ELF_DATA;
1225 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1226 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1227 ehdr->e_ident[EI_ABIVERSION] = 0;
1228 ehdr->e_ident[EI_PAD] = 0;
1229 ehdr->e_type = ET_CORE;
1230 #if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32
1231 ehdr->e_machine = EM_386;
1233 ehdr->e_machine = ELF_ARCH;
1235 ehdr->e_version = EV_CURRENT;
1237 ehdr->e_phoff = phoff;
1239 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1240 ehdr->e_phentsize = sizeof(Elf_Phdr);
1241 ehdr->e_phnum = numsegs + 1;
1242 ehdr->e_shentsize = sizeof(Elf_Shdr);
1244 ehdr->e_shstrndx = SHN_UNDEF;
1247 * Fill in the program header entries.
1249 phdr = (Elf_Phdr *)((char *)dst + phoff);
1251 /* The note segement. */
1252 phdr->p_type = PT_NOTE;
1253 phdr->p_offset = noteoff;
1256 phdr->p_filesz = notesz;
1262 /* All the writable segments from the program. */
1265 each_writable_segment(td, cb_put_phdr, &phc);
1270 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1271 const void *desc, size_t descsz)
1275 note.n_namesz = strlen(name) + 1;
1276 note.n_descsz = descsz;
1279 bcopy(¬e, (char *)dst + *off, sizeof note);
1280 *off += sizeof note;
1282 bcopy(name, (char *)dst + *off, note.n_namesz);
1283 *off += roundup2(note.n_namesz, sizeof(Elf_Size));
1285 bcopy(desc, (char *)dst + *off, note.n_descsz);
1286 *off += roundup2(note.n_descsz, sizeof(Elf_Size));
1290 * Tell kern_execve.c about it, with a little help from the linker.
1292 static struct execsw __elfN(execsw) = {
1293 __CONCAT(exec_, __elfN(imgact)),
1294 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1296 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));