2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 Søren Schmidt
4 * Copyright (c) 1996 Peter Wemm
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include "opt_capsicum.h"
35 #include "opt_compat.h"
38 #include <sys/param.h>
39 #include <sys/capability.h>
41 #include <sys/fcntl.h>
42 #include <sys/imgact.h>
43 #include <sys/imgact_elf.h>
44 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/mount.h>
48 #include <sys/mutex.h>
50 #include <sys/namei.h>
51 #include <sys/pioctl.h>
53 #include <sys/procfs.h>
54 #include <sys/racct.h>
55 #include <sys/resourcevar.h>
56 #include <sys/sf_buf.h>
58 #include <sys/systm.h>
59 #include <sys/signalvar.h>
62 #include <sys/syscall.h>
63 #include <sys/sysctl.h>
64 #include <sys/sysent.h>
65 #include <sys/vnode.h>
66 #include <sys/syslog.h>
67 #include <sys/eventhandler.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_param.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_object.h>
77 #include <vm/vm_extern.h>
79 #include <machine/elf.h>
80 #include <machine/md_var.h>
82 #define OLD_EI_BRAND 8
84 static int __elfN(check_header)(const Elf_Ehdr *hdr);
85 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
86 const char *interp, int32_t *osrel);
87 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
88 u_long *entry, size_t pagesize);
89 static int __elfN(load_section)(struct vmspace *vmspace, vm_object_t object,
90 vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
91 vm_prot_t prot, size_t pagesize);
92 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
93 static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note,
95 static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
96 static boolean_t __elfN(check_note)(struct image_params *imgp,
97 Elf_Brandnote *checknote, int32_t *osrel);
98 static vm_prot_t __elfN(trans_prot)(Elf_Word);
99 static Elf_Word __elfN(untrans_prot)(vm_prot_t);
101 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
104 #ifdef COMPRESS_USER_CORES
105 static int compress_core(gzFile, char *, char *, unsigned int,
107 #define CORE_BUF_SIZE (16 * 1024)
110 int __elfN(fallback_brand) = -1;
111 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
112 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
113 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
114 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
115 &__elfN(fallback_brand));
117 static int elf_legacy_coredump = 0;
118 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
119 &elf_legacy_coredump, 0, "");
121 static int __elfN(nxstack) = 0;
122 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
123 nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
124 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack");
126 #if __ELF_WORD_SIZE == 32
127 #if defined(__amd64__) || defined(__ia64__)
128 int i386_read_exec = 0;
129 SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
130 "enable execution from readable segments");
134 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
136 #define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
137 #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
138 #define aligned(a, t) (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
140 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
142 Elf_Brandnote __elfN(freebsd_brandnote) = {
143 .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR),
144 .hdr.n_descsz = sizeof(int32_t),
146 .vendor = FREEBSD_ABI_VENDOR,
147 .flags = BN_TRANSLATE_OSREL,
148 .trans_osrel = __elfN(freebsd_trans_osrel)
152 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
156 p = (uintptr_t)(note + 1);
157 p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
158 *osrel = *(const int32_t *)(p);
163 static const char GNU_ABI_VENDOR[] = "GNU";
164 static int GNU_KFREEBSD_ABI_DESC = 3;
166 Elf_Brandnote __elfN(kfreebsd_brandnote) = {
167 .hdr.n_namesz = sizeof(GNU_ABI_VENDOR),
168 .hdr.n_descsz = 16, /* XXX at least 16 */
170 .vendor = GNU_ABI_VENDOR,
171 .flags = BN_TRANSLATE_OSREL,
172 .trans_osrel = kfreebsd_trans_osrel
176 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
178 const Elf32_Word *desc;
181 p = (uintptr_t)(note + 1);
182 p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
184 desc = (const Elf32_Word *)p;
185 if (desc[0] != GNU_KFREEBSD_ABI_DESC)
189 * Debian GNU/kFreeBSD embed the earliest compatible kernel version
190 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
192 *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
198 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
202 for (i = 0; i < MAX_BRANDS; i++) {
203 if (elf_brand_list[i] == NULL) {
204 elf_brand_list[i] = entry;
208 if (i == MAX_BRANDS) {
209 printf("WARNING: %s: could not insert brandinfo entry: %p\n",
217 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
221 for (i = 0; i < MAX_BRANDS; i++) {
222 if (elf_brand_list[i] == entry) {
223 elf_brand_list[i] = NULL;
233 __elfN(brand_inuse)(Elf_Brandinfo *entry)
238 sx_slock(&allproc_lock);
239 FOREACH_PROC_IN_SYSTEM(p) {
240 if (p->p_sysent == entry->sysvec) {
245 sx_sunlock(&allproc_lock);
250 static Elf_Brandinfo *
251 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
254 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
260 * We support four types of branding -- (1) the ELF EI_OSABI field
261 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
262 * branding w/in the ELF header, (3) path of the `interp_path'
263 * field, and (4) the ".note.ABI-tag" ELF section.
266 /* Look for an ".note.ABI-tag" ELF section */
267 for (i = 0; i < MAX_BRANDS; i++) {
268 bi = elf_brand_list[i];
271 if (hdr->e_machine == bi->machine && (bi->flags &
272 (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
273 ret = __elfN(check_note)(imgp, bi->brand_note, osrel);
279 /* If the executable has a brand, search for it in the brand list. */
280 for (i = 0; i < MAX_BRANDS; i++) {
281 bi = elf_brand_list[i];
282 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
284 if (hdr->e_machine == bi->machine &&
285 (hdr->e_ident[EI_OSABI] == bi->brand ||
286 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
287 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
291 /* Lacking a known brand, search for a recognized interpreter. */
292 if (interp != NULL) {
293 for (i = 0; i < MAX_BRANDS; i++) {
294 bi = elf_brand_list[i];
295 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
297 if (hdr->e_machine == bi->machine &&
298 strcmp(interp, bi->interp_path) == 0)
303 /* Lacking a recognized interpreter, try the default brand */
304 for (i = 0; i < MAX_BRANDS; i++) {
305 bi = elf_brand_list[i];
306 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
308 if (hdr->e_machine == bi->machine &&
309 __elfN(fallback_brand) == bi->brand)
316 __elfN(check_header)(const Elf_Ehdr *hdr)
322 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
323 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
324 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
325 hdr->e_phentsize != sizeof(Elf_Phdr) ||
326 hdr->e_version != ELF_TARG_VER)
330 * Make sure we have at least one brand for this machine.
333 for (i = 0; i < MAX_BRANDS; i++) {
334 bi = elf_brand_list[i];
335 if (bi != NULL && bi->machine == hdr->e_machine)
345 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
346 vm_offset_t start, vm_offset_t end, vm_prot_t prot)
353 * Create the page if it doesn't exist yet. Ignore errors.
356 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
357 VM_PROT_ALL, VM_PROT_ALL, 0);
361 * Find the page from the underlying object.
364 sf = vm_imgact_map_page(object, offset);
366 return (KERN_FAILURE);
367 off = offset - trunc_page(offset);
368 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
370 vm_imgact_unmap_page(sf);
372 return (KERN_FAILURE);
376 return (KERN_SUCCESS);
380 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
381 vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
388 if (start != trunc_page(start)) {
389 rv = __elfN(map_partial)(map, object, offset, start,
390 round_page(start), prot);
393 offset += round_page(start) - start;
394 start = round_page(start);
396 if (end != round_page(end)) {
397 rv = __elfN(map_partial)(map, object, offset +
398 trunc_page(end) - start, trunc_page(end), end, prot);
401 end = trunc_page(end);
404 if (offset & PAGE_MASK) {
406 * The mapping is not page aligned. This means we have
407 * to copy the data. Sigh.
409 rv = vm_map_find(map, NULL, 0, &start, end - start,
410 FALSE, prot | VM_PROT_WRITE, VM_PROT_ALL, 0);
414 return (KERN_SUCCESS);
415 for (; start < end; start += sz) {
416 sf = vm_imgact_map_page(object, offset);
418 return (KERN_FAILURE);
419 off = offset - trunc_page(offset);
421 if (sz > PAGE_SIZE - off)
422 sz = PAGE_SIZE - off;
423 error = copyout((caddr_t)sf_buf_kva(sf) + off,
425 vm_imgact_unmap_page(sf);
427 return (KERN_FAILURE);
433 vm_object_reference(object);
435 rv = vm_map_insert(map, object, offset, start, end,
436 prot, VM_PROT_ALL, cow);
438 if (rv != KERN_SUCCESS)
439 vm_object_deallocate(object);
443 return (KERN_SUCCESS);
448 __elfN(load_section)(struct vmspace *vmspace,
449 vm_object_t object, vm_offset_t offset,
450 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
455 vm_offset_t map_addr;
458 vm_offset_t file_addr;
461 * It's necessary to fail if the filsz + offset taken from the
462 * header is greater than the actual file pager object's size.
463 * If we were to allow this, then the vm_map_find() below would
464 * walk right off the end of the file object and into the ether.
466 * While I'm here, might as well check for something else that
467 * is invalid: filsz cannot be greater than memsz.
469 if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
471 uprintf("elf_load_section: truncated ELF file\n");
475 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
476 file_addr = trunc_page_ps(offset, pagesize);
479 * We have two choices. We can either clear the data in the last page
480 * of an oversized mapping, or we can start the anon mapping a page
481 * early and copy the initialized data into that first page. We
482 * choose the second..
485 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
487 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
490 /* cow flags: don't dump readonly sections in core */
491 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
492 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
494 rv = __elfN(map_insert)(&vmspace->vm_map,
496 file_addr, /* file offset */
497 map_addr, /* virtual start */
498 map_addr + map_len,/* virtual end */
501 if (rv != KERN_SUCCESS)
504 /* we can stop now if we've covered it all */
505 if (memsz == filsz) {
512 * We have to get the remaining bit of the file into the first part
513 * of the oversized map segment. This is normally because the .data
514 * segment in the file is extended to provide bss. It's a neat idea
515 * to try and save a page, but it's a pain in the behind to implement.
517 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
518 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
519 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
522 /* This had damn well better be true! */
524 rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
525 map_addr + map_len, VM_PROT_ALL, 0);
526 if (rv != KERN_SUCCESS) {
534 sf = vm_imgact_map_page(object, offset + filsz);
538 /* send the page fragment to user space */
539 off = trunc_page_ps(offset + filsz, pagesize) -
540 trunc_page(offset + filsz);
541 error = copyout((caddr_t)sf_buf_kva(sf) + off,
542 (caddr_t)map_addr, copy_len);
543 vm_imgact_unmap_page(sf);
550 * set it to the specified protection.
551 * XXX had better undo the damage from pasting over the cracks here!
553 vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
554 round_page(map_addr + map_len), prot, FALSE);
560 * Load the file "file" into memory. It may be either a shared object
563 * The "addr" reference parameter is in/out. On entry, it specifies
564 * the address where a shared object should be loaded. If the file is
565 * an executable, this value is ignored. On exit, "addr" specifies
566 * where the file was actually loaded.
568 * The "entry" reference parameter is out only. On exit, it specifies
569 * the entry point for the loaded file.
572 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
573 u_long *entry, size_t pagesize)
578 struct image_params image_params;
580 const Elf_Ehdr *hdr = NULL;
581 const Elf_Phdr *phdr = NULL;
582 struct nameidata *nd;
583 struct vmspace *vmspace = p->p_vmspace;
585 struct image_params *imgp;
588 u_long base_addr = 0;
589 int vfslocked, error, i, numsegs;
591 #ifdef CAPABILITY_MODE
593 * XXXJA: This check can go away once we are sufficiently confident
594 * that the checks in namei() are correct.
596 if (IN_CAPABILITY_MODE(curthread))
600 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
602 attr = &tempdata->attr;
603 imgp = &tempdata->image_params;
606 * Initialize part of the common data
610 imgp->firstpage = NULL;
611 imgp->image_header = NULL;
613 imgp->execlabel = NULL;
615 NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
618 if ((error = namei(nd)) != 0) {
622 vfslocked = NDHASGIANT(nd);
623 NDFREE(nd, NDF_ONLY_PNBUF);
624 imgp->vp = nd->ni_vp;
627 * Check permissions, modes, uid, etc on the file, and "open" it.
629 error = exec_check_permissions(imgp);
633 error = exec_map_first_page(imgp);
638 * Also make certain that the interpreter stays the same, so set
639 * its VV_TEXT flag, too.
641 nd->ni_vp->v_vflag |= VV_TEXT;
643 imgp->object = nd->ni_vp->v_object;
645 hdr = (const Elf_Ehdr *)imgp->image_header;
646 if ((error = __elfN(check_header)(hdr)) != 0)
648 if (hdr->e_type == ET_DYN)
650 else if (hdr->e_type == ET_EXEC)
657 /* Only support headers that fit within first page for now */
658 /* (multiplication of two Elf_Half fields will not overflow) */
659 if ((hdr->e_phoff > PAGE_SIZE) ||
660 (hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE - hdr->e_phoff) {
665 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
666 if (!aligned(phdr, Elf_Addr)) {
671 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
672 if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) {
673 /* Loadable segment */
674 prot = __elfN(trans_prot)(phdr[i].p_flags);
675 if ((error = __elfN(load_section)(vmspace,
676 imgp->object, phdr[i].p_offset,
677 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
678 phdr[i].p_memsz, phdr[i].p_filesz, prot,
682 * Establish the base address if this is the
686 base_addr = trunc_page(phdr[i].p_vaddr +
692 *entry = (unsigned long)hdr->e_entry + rbase;
696 exec_unmap_first_page(imgp);
701 VFS_UNLOCK_GIANT(vfslocked);
702 free(tempdata, M_TEMP);
708 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
710 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
711 const Elf_Phdr *phdr;
712 Elf_Auxargs *elf_auxargs;
713 struct vmspace *vmspace;
715 u_long text_size = 0, data_size = 0, total_size = 0;
716 u_long text_addr = 0, data_addr = 0;
717 u_long seg_size, seg_addr;
718 u_long addr, baddr, et_dyn_addr, entry = 0, proghdr = 0;
721 const char *interp = NULL, *newinterp = NULL;
722 Elf_Brandinfo *brand_info;
724 struct sysentvec *sv;
727 * Do we have a valid ELF header ?
729 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
730 * if particular brand doesn't support it.
732 if (__elfN(check_header)(hdr) != 0 ||
733 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
737 * From here on down, we return an errno, not -1, as we've
738 * detected an ELF file.
741 if ((hdr->e_phoff > PAGE_SIZE) ||
742 (hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
743 /* Only support headers in first page for now */
746 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
747 if (!aligned(phdr, Elf_Addr))
751 for (i = 0; i < hdr->e_phnum; i++) {
752 switch (phdr[i].p_type) {
755 baddr = phdr[i].p_vaddr;
759 /* Path to interpreter */
760 if (phdr[i].p_filesz > MAXPATHLEN ||
761 phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE)
763 interp = imgp->image_header + phdr[i].p_offset;
768 __elfN(trans_prot)(phdr[i].p_flags);
773 brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel);
774 if (brand_info == NULL) {
775 uprintf("ELF binary type \"%u\" not known.\n",
776 hdr->e_ident[EI_OSABI]);
779 if (hdr->e_type == ET_DYN) {
780 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0)
783 * Honour the base load address from the dso if it is
784 * non-zero for some reason.
787 et_dyn_addr = ET_DYN_LOAD_ADDR;
792 sv = brand_info->sysvec;
793 if (interp != NULL && brand_info->interp_newpath != NULL)
794 newinterp = brand_info->interp_newpath;
797 * Avoid a possible deadlock if the current address space is destroyed
798 * and that address space maps the locked vnode. In the common case,
799 * the locked vnode's v_usecount is decremented but remains greater
800 * than zero. Consequently, the vnode lock is not needed by vrele().
801 * However, in cases where the vnode lock is external, such as nullfs,
802 * v_usecount may become zero.
804 VOP_UNLOCK(imgp->vp, 0);
806 error = exec_new_vmspace(imgp, sv);
807 imgp->proc->p_sysent = sv;
809 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
813 vmspace = imgp->proc->p_vmspace;
815 for (i = 0; i < hdr->e_phnum; i++) {
816 switch (phdr[i].p_type) {
817 case PT_LOAD: /* Loadable segment */
818 if (phdr[i].p_memsz == 0)
820 prot = __elfN(trans_prot)(phdr[i].p_flags);
822 #if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
824 * Some x86 binaries assume read == executable,
825 * notably the M3 runtime and therefore cvsup
827 if (prot & VM_PROT_READ)
828 prot |= VM_PROT_EXECUTE;
831 if ((error = __elfN(load_section)(vmspace,
832 imgp->object, phdr[i].p_offset,
833 (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr,
834 phdr[i].p_memsz, phdr[i].p_filesz, prot,
835 sv->sv_pagesize)) != 0)
839 * If this segment contains the program headers,
840 * remember their virtual address for the AT_PHDR
841 * aux entry. Static binaries don't usually include
844 if (phdr[i].p_offset == 0 &&
845 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
847 proghdr = phdr[i].p_vaddr + hdr->e_phoff +
850 seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
851 seg_size = round_page(phdr[i].p_memsz +
852 phdr[i].p_vaddr + et_dyn_addr - seg_addr);
855 * Make the largest executable segment the official
856 * text segment and all others data.
858 * Note that obreak() assumes that data_addr +
859 * data_size == end of data load area, and the ELF
860 * file format expects segments to be sorted by
861 * address. If multiple data segments exist, the
862 * last one will be used.
865 if (phdr[i].p_flags & PF_X && text_size < seg_size) {
866 text_size = seg_size;
867 text_addr = seg_addr;
869 data_size = seg_size;
870 data_addr = seg_addr;
872 total_size += seg_size;
874 case PT_PHDR: /* Program header table info */
875 proghdr = phdr[i].p_vaddr + et_dyn_addr;
882 if (data_addr == 0 && data_size == 0) {
883 data_addr = text_addr;
884 data_size = text_size;
887 entry = (u_long)hdr->e_entry + et_dyn_addr;
890 * Check limits. It should be safe to check the
891 * limits after loading the segments since we do
892 * not actually fault in all the segments pages.
894 PROC_LOCK(imgp->proc);
895 if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
896 text_size > maxtsiz ||
897 total_size > lim_cur(imgp->proc, RLIMIT_VMEM) ||
898 racct_set(imgp->proc, RACCT_DATA, data_size) != 0 ||
899 racct_set(imgp->proc, RACCT_VMEM, total_size) != 0) {
900 PROC_UNLOCK(imgp->proc);
904 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
905 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
906 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
907 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
910 * We load the dynamic linker where a userland call
911 * to mmap(0, ...) would put it. The rationale behind this
912 * calculation is that it leaves room for the heap to grow to
913 * its maximum allowed size.
915 addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
916 lim_max(imgp->proc, RLIMIT_DATA));
917 PROC_UNLOCK(imgp->proc);
919 imgp->entry_addr = entry;
921 if (interp != NULL) {
922 int have_interp = FALSE;
923 VOP_UNLOCK(imgp->vp, 0);
924 if (brand_info->emul_path != NULL &&
925 brand_info->emul_path[0] != '\0') {
926 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
927 snprintf(path, MAXPATHLEN, "%s%s",
928 brand_info->emul_path, interp);
929 error = __elfN(load_file)(imgp->proc, path, &addr,
930 &imgp->entry_addr, sv->sv_pagesize);
935 if (!have_interp && newinterp != NULL) {
936 error = __elfN(load_file)(imgp->proc, newinterp, &addr,
937 &imgp->entry_addr, sv->sv_pagesize);
942 error = __elfN(load_file)(imgp->proc, interp, &addr,
943 &imgp->entry_addr, sv->sv_pagesize);
945 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
947 uprintf("ELF interpreter %s not found\n", interp);
954 * Construct auxargs table (used by the fixup routine)
956 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
957 elf_auxargs->execfd = -1;
958 elf_auxargs->phdr = proghdr;
959 elf_auxargs->phent = hdr->e_phentsize;
960 elf_auxargs->phnum = hdr->e_phnum;
961 elf_auxargs->pagesz = PAGE_SIZE;
962 elf_auxargs->base = addr;
963 elf_auxargs->flags = 0;
964 elf_auxargs->entry = entry;
966 imgp->auxargs = elf_auxargs;
967 imgp->interpreted = 0;
968 imgp->reloc_base = addr;
969 imgp->proc->p_osrel = osrel;
974 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
977 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
979 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
983 base = (Elf_Addr *)*stack_base;
984 pos = base + (imgp->args->argc + imgp->args->envc + 2);
986 if (args->execfd != -1)
987 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
988 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
989 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
990 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
991 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
992 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
993 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
994 AUXARGS_ENTRY(pos, AT_BASE, args->base);
995 if (imgp->execpathp != 0)
996 AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp);
997 AUXARGS_ENTRY(pos, AT_OSRELDATE, osreldate);
998 if (imgp->canary != 0) {
999 AUXARGS_ENTRY(pos, AT_CANARY, imgp->canary);
1000 AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen);
1002 AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus);
1003 if (imgp->pagesizes != 0) {
1004 AUXARGS_ENTRY(pos, AT_PAGESIZES, imgp->pagesizes);
1005 AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen);
1007 AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj
1008 != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
1009 imgp->sysent->sv_stackprot);
1010 AUXARGS_ENTRY(pos, AT_NULL, 0);
1012 free(imgp->auxargs, M_TEMP);
1013 imgp->auxargs = NULL;
1016 suword(base, (long)imgp->args->argc);
1017 *stack_base = (register_t *)base;
1022 * Code for generating ELF core dumps.
1025 typedef void (*segment_callback)(vm_map_entry_t, void *);
1027 /* Closure for cb_put_phdr(). */
1028 struct phdr_closure {
1029 Elf_Phdr *phdr; /* Program header to fill in */
1030 Elf_Off offset; /* Offset of segment in core file */
1033 /* Closure for cb_size_segment(). */
1034 struct sseg_closure {
1035 int count; /* Count of writable segments. */
1036 size_t size; /* Total size of all writable segments. */
1039 static void cb_put_phdr(vm_map_entry_t, void *);
1040 static void cb_size_segment(vm_map_entry_t, void *);
1041 static void each_writable_segment(struct thread *, segment_callback, void *);
1042 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
1043 int, void *, size_t, gzFile);
1044 static void __elfN(puthdr)(struct thread *, void *, size_t *, int);
1045 static void __elfN(putnote)(void *, size_t *, const char *, int,
1046 const void *, size_t);
1048 #ifdef COMPRESS_USER_CORES
1049 extern int compress_user_cores;
1050 extern int compress_user_cores_gzlevel;
1054 core_output(struct vnode *vp, void *base, size_t len, off_t offset,
1055 struct ucred *active_cred, struct ucred *file_cred,
1056 struct thread *td, char *core_buf, gzFile gzfile) {
1060 #ifdef COMPRESS_USER_CORES
1061 error = compress_core(gzfile, base, core_buf, len, td);
1063 panic("shouldn't be here");
1066 error = vn_rdwr_inchunks(UIO_WRITE, vp, base, len, offset,
1067 UIO_USERSPACE, IO_UNIT | IO_DIRECT, active_cred, file_cred,
1074 __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags)
1076 struct ucred *cred = td->td_ucred;
1078 struct sseg_closure seginfo;
1082 gzFile gzfile = Z_NULL;
1083 char *core_buf = NULL;
1084 #ifdef COMPRESS_USER_CORES
1085 char gzopen_flags[8];
1087 int doing_compress = flags & IMGACT_CORE_COMPRESS;
1092 #ifdef COMPRESS_USER_CORES
1093 if (doing_compress) {
1096 if (compress_user_cores_gzlevel >= 0 &&
1097 compress_user_cores_gzlevel <= 9)
1098 *p++ = '0' + compress_user_cores_gzlevel;
1100 gzfile = gz_open("", gzopen_flags, vp);
1101 if (gzfile == Z_NULL) {
1105 core_buf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO);
1113 /* Size the program segments. */
1116 each_writable_segment(td, cb_size_segment, &seginfo);
1119 * Calculate the size of the core file header area by making
1120 * a dry run of generating it. Nothing is written, but the
1121 * size is calculated.
1124 __elfN(puthdr)(td, (void *)NULL, &hdrsize, seginfo.count);
1127 PROC_LOCK(td->td_proc);
1128 error = racct_add(td->td_proc, RACCT_CORE, hdrsize + seginfo.size);
1129 PROC_UNLOCK(td->td_proc);
1135 if (hdrsize + seginfo.size >= limit) {
1141 * Allocate memory for building the header, fill it up,
1144 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
1149 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize,
1152 /* Write the contents of all of the writable segments. */
1158 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
1160 for (i = 0; i < seginfo.count; i++) {
1161 error = core_output(vp, (caddr_t)(uintptr_t)php->p_vaddr,
1162 php->p_filesz, offset, cred, NOCRED, curthread, core_buf, gzfile);
1165 offset += php->p_filesz;
1171 "Failed to write core file for process %s (error %d)\n",
1172 curproc->p_comm, error);
1176 #ifdef COMPRESS_USER_CORES
1178 free(core_buf, M_TEMP);
1189 * A callback for each_writable_segment() to write out the segment's
1190 * program header entry.
1193 cb_put_phdr(entry, closure)
1194 vm_map_entry_t entry;
1197 struct phdr_closure *phc = (struct phdr_closure *)closure;
1198 Elf_Phdr *phdr = phc->phdr;
1200 phc->offset = round_page(phc->offset);
1202 phdr->p_type = PT_LOAD;
1203 phdr->p_offset = phc->offset;
1204 phdr->p_vaddr = entry->start;
1206 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1207 phdr->p_align = PAGE_SIZE;
1208 phdr->p_flags = __elfN(untrans_prot)(entry->protection);
1210 phc->offset += phdr->p_filesz;
1215 * A callback for each_writable_segment() to gather information about
1216 * the number of segments and their total size.
1219 cb_size_segment(entry, closure)
1220 vm_map_entry_t entry;
1223 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1226 ssc->size += entry->end - entry->start;
1230 * For each writable segment in the process's memory map, call the given
1231 * function with a pointer to the map entry and some arbitrary
1232 * caller-supplied data.
1235 each_writable_segment(td, func, closure)
1237 segment_callback func;
1240 struct proc *p = td->td_proc;
1241 vm_map_t map = &p->p_vmspace->vm_map;
1242 vm_map_entry_t entry;
1243 vm_object_t backing_object, object;
1244 boolean_t ignore_entry;
1246 vm_map_lock_read(map);
1247 for (entry = map->header.next; entry != &map->header;
1248 entry = entry->next) {
1250 * Don't dump inaccessible mappings, deal with legacy
1253 * Note that read-only segments related to the elf binary
1254 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1255 * need to arbitrarily ignore such segments.
1257 if (elf_legacy_coredump) {
1258 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1261 if ((entry->protection & VM_PROT_ALL) == 0)
1266 * Dont include memory segment in the coredump if
1267 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1268 * madvise(2). Do not dump submaps (i.e. parts of the
1271 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1274 if ((object = entry->object.vm_object) == NULL)
1277 /* Ignore memory-mapped devices and such things. */
1278 VM_OBJECT_LOCK(object);
1279 while ((backing_object = object->backing_object) != NULL) {
1280 VM_OBJECT_LOCK(backing_object);
1281 VM_OBJECT_UNLOCK(object);
1282 object = backing_object;
1284 ignore_entry = object->type != OBJT_DEFAULT &&
1285 object->type != OBJT_SWAP && object->type != OBJT_VNODE;
1286 VM_OBJECT_UNLOCK(object);
1290 (*func)(entry, closure);
1292 vm_map_unlock_read(map);
1296 * Write the core file header to the file, including padding up to
1297 * the page boundary.
1300 __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize, gzfile)
1311 /* Fill in the header. */
1312 bzero(hdr, hdrsize);
1314 __elfN(puthdr)(td, hdr, &off, numsegs);
1317 /* Write it to the core file. */
1318 return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
1319 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
1322 #ifdef COMPRESS_USER_CORES
1323 if (gzwrite(gzfile, hdr, hdrsize) != hdrsize) {
1325 "Failed to compress core file header for process"
1326 " %s.\n", curproc->p_comm);
1333 panic("shouldn't be here");
1338 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1339 #include <compat/freebsd32/freebsd32.h>
1341 typedef struct prstatus32 elf_prstatus_t;
1342 typedef struct prpsinfo32 elf_prpsinfo_t;
1343 typedef struct fpreg32 elf_prfpregset_t;
1344 typedef struct fpreg32 elf_fpregset_t;
1345 typedef struct reg32 elf_gregset_t;
1346 typedef struct thrmisc32 elf_thrmisc_t;
1348 typedef prstatus_t elf_prstatus_t;
1349 typedef prpsinfo_t elf_prpsinfo_t;
1350 typedef prfpregset_t elf_prfpregset_t;
1351 typedef prfpregset_t elf_fpregset_t;
1352 typedef gregset_t elf_gregset_t;
1353 typedef thrmisc_t elf_thrmisc_t;
1357 __elfN(puthdr)(struct thread *td, void *dst, size_t *off, int numsegs)
1360 elf_prstatus_t status;
1361 elf_prfpregset_t fpregset;
1362 elf_prpsinfo_t psinfo;
1363 elf_thrmisc_t thrmisc;
1365 elf_prstatus_t *status;
1366 elf_prfpregset_t *fpregset;
1367 elf_prpsinfo_t *psinfo;
1368 elf_thrmisc_t *thrmisc;
1371 size_t ehoff, noteoff, notesz, phoff;
1376 *off += sizeof(Elf_Ehdr);
1379 *off += (numsegs + 1) * sizeof(Elf_Phdr);
1383 * Don't allocate space for the notes if we're just calculating
1384 * the size of the header. We also don't collect the data.
1387 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO|M_WAITOK);
1388 status = &tempdata->status;
1389 fpregset = &tempdata->fpregset;
1390 psinfo = &tempdata->psinfo;
1391 thrmisc = &tempdata->thrmisc;
1401 psinfo->pr_version = PRPSINFO_VERSION;
1402 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1403 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1405 * XXX - We don't fill in the command line arguments properly
1408 strlcpy(psinfo->pr_psargs, p->p_comm,
1409 sizeof(psinfo->pr_psargs));
1411 __elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
1415 * To have the debugger select the right thread (LWP) as the initial
1416 * thread, we dump the state of the thread passed to us in td first.
1417 * This is the thread that causes the core dump and thus likely to
1418 * be the right thread one wants to have selected in the debugger.
1421 while (thr != NULL) {
1423 status->pr_version = PRSTATUS_VERSION;
1424 status->pr_statussz = sizeof(elf_prstatus_t);
1425 status->pr_gregsetsz = sizeof(elf_gregset_t);
1426 status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1427 status->pr_osreldate = osreldate;
1428 status->pr_cursig = p->p_sig;
1429 status->pr_pid = thr->td_tid;
1430 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1431 fill_regs32(thr, &status->pr_reg);
1432 fill_fpregs32(thr, fpregset);
1434 fill_regs(thr, &status->pr_reg);
1435 fill_fpregs(thr, fpregset);
1437 memset(&thrmisc->_pad, 0, sizeof (thrmisc->_pad));
1438 strcpy(thrmisc->pr_tname, thr->td_name);
1440 __elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
1442 __elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
1444 __elfN(putnote)(dst, off, "FreeBSD", NT_THRMISC, thrmisc,
1447 * Allow for MD specific notes, as well as any MD
1448 * specific preparations for writing MI notes.
1450 __elfN(dump_thread)(thr, dst, off);
1452 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1453 TAILQ_NEXT(thr, td_plist);
1455 thr = TAILQ_NEXT(thr, td_plist);
1458 notesz = *off - noteoff;
1461 free(tempdata, M_TEMP);
1463 /* Align up to a page boundary for the program segments. */
1464 *off = round_page(*off);
1469 struct phdr_closure phc;
1472 * Fill in the ELF header.
1474 ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
1475 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1476 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1477 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1478 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1479 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1480 ehdr->e_ident[EI_DATA] = ELF_DATA;
1481 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1482 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1483 ehdr->e_ident[EI_ABIVERSION] = 0;
1484 ehdr->e_ident[EI_PAD] = 0;
1485 ehdr->e_type = ET_CORE;
1486 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1487 ehdr->e_machine = ELF_ARCH32;
1489 ehdr->e_machine = ELF_ARCH;
1491 ehdr->e_version = EV_CURRENT;
1493 ehdr->e_phoff = phoff;
1495 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1496 ehdr->e_phentsize = sizeof(Elf_Phdr);
1497 ehdr->e_phnum = numsegs + 1;
1498 ehdr->e_shentsize = sizeof(Elf_Shdr);
1500 ehdr->e_shstrndx = SHN_UNDEF;
1503 * Fill in the program header entries.
1505 phdr = (Elf_Phdr *)((char *)dst + phoff);
1507 /* The note segement. */
1508 phdr->p_type = PT_NOTE;
1509 phdr->p_offset = noteoff;
1512 phdr->p_filesz = notesz;
1518 /* All the writable segments from the program. */
1521 each_writable_segment(td, cb_put_phdr, &phc);
1526 __elfN(putnote)(void *dst, size_t *off, const char *name, int type,
1527 const void *desc, size_t descsz)
1531 note.n_namesz = strlen(name) + 1;
1532 note.n_descsz = descsz;
1535 bcopy(¬e, (char *)dst + *off, sizeof note);
1536 *off += sizeof note;
1538 bcopy(name, (char *)dst + *off, note.n_namesz);
1539 *off += roundup2(note.n_namesz, sizeof(Elf_Size));
1541 bcopy(desc, (char *)dst + *off, note.n_descsz);
1542 *off += roundup2(note.n_descsz, sizeof(Elf_Size));
1546 __elfN(parse_notes)(struct image_params *imgp, Elf_Brandnote *checknote,
1547 int32_t *osrel, const Elf_Phdr *pnote)
1549 const Elf_Note *note, *note0, *note_end;
1550 const char *note_name;
1553 if (pnote == NULL || pnote->p_offset >= PAGE_SIZE ||
1554 pnote->p_offset + pnote->p_filesz >= PAGE_SIZE)
1557 note = note0 = (const Elf_Note *)(imgp->image_header + pnote->p_offset);
1558 note_end = (const Elf_Note *)(imgp->image_header +
1559 pnote->p_offset + pnote->p_filesz);
1560 for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
1561 if (!aligned(note, Elf32_Addr))
1563 if (note->n_namesz != checknote->hdr.n_namesz ||
1564 note->n_descsz != checknote->hdr.n_descsz ||
1565 note->n_type != checknote->hdr.n_type)
1567 note_name = (const char *)(note + 1);
1568 if (strncmp(checknote->vendor, note_name,
1569 checknote->hdr.n_namesz) != 0)
1573 * Fetch the osreldate for binary
1574 * from the ELF OSABI-note if necessary.
1576 if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 &&
1577 checknote->trans_osrel != NULL)
1578 return (checknote->trans_osrel(note, osrel));
1582 note = (const Elf_Note *)((const char *)(note + 1) +
1583 roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
1584 roundup2(note->n_descsz, sizeof(Elf32_Addr)));
1591 * Try to find the appropriate ABI-note section for checknote,
1592 * fetch the osreldate for binary from the ELF OSABI-note. Only the
1593 * first page of the image is searched, the same as for headers.
1596 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote,
1599 const Elf_Phdr *phdr;
1600 const Elf_Ehdr *hdr;
1603 hdr = (const Elf_Ehdr *)imgp->image_header;
1604 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
1606 for (i = 0; i < hdr->e_phnum; i++) {
1607 if (phdr[i].p_type == PT_NOTE &&
1608 __elfN(parse_notes)(imgp, checknote, osrel, &phdr[i]))
1616 * Tell kern_execve.c about it, with a little help from the linker.
1618 static struct execsw __elfN(execsw) = {
1619 __CONCAT(exec_, __elfN(imgact)),
1620 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
1622 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
1624 #ifdef COMPRESS_USER_CORES
1626 * Compress and write out a core segment for a user process.
1628 * 'inbuf' is the starting address of a VM segment in the process' address
1629 * space that is to be compressed and written out to the core file. 'dest_buf'
1630 * is a buffer in the kernel's address space. The segment is copied from
1631 * 'inbuf' to 'dest_buf' first before being processed by the compression
1632 * routine gzwrite(). This copying is necessary because the content of the VM
1633 * segment may change between the compression pass and the crc-computation pass
1634 * in gzwrite(). This is because realtime threads may preempt the UNIX kernel.
1637 compress_core (gzFile file, char *inbuf, char *dest_buf, unsigned int len,
1642 unsigned int chunk_len;
1645 chunk_len = (len > CORE_BUF_SIZE) ? CORE_BUF_SIZE : len;
1646 copyin(inbuf, dest_buf, chunk_len);
1647 len_compressed = gzwrite(file, dest_buf, chunk_len);
1649 EVENTHANDLER_INVOKE(app_coredump_progress, td, len_compressed);
1651 if ((unsigned int)len_compressed != chunk_len) {
1653 "compress_core: length mismatch (0x%x returned, "
1654 "0x%x expected)\n", len_compressed, chunk_len);
1655 EVENTHANDLER_INVOKE(app_coredump_error, td,
1656 "compress_core: length mismatch %x -> %x",
1657 chunk_len, len_compressed);
1668 #endif /* COMPRESS_USER_CORES */
1671 __elfN(trans_prot)(Elf_Word flags)
1677 prot |= VM_PROT_EXECUTE;
1679 prot |= VM_PROT_WRITE;
1681 prot |= VM_PROT_READ;
1682 #if __ELF_WORD_SIZE == 32
1683 #if defined(__amd64__) || defined(__ia64__)
1684 if (i386_read_exec && (flags & PF_R))
1685 prot |= VM_PROT_EXECUTE;
1692 __elfN(untrans_prot)(vm_prot_t prot)
1697 if (prot & VM_PROT_EXECUTE)
1699 if (prot & VM_PROT_READ)
1701 if (prot & VM_PROT_WRITE)