2 * Copyright (c) 2000 David O'Brien
3 * Copyright (c) 1995-1996 Søren Schmidt
4 * Copyright (c) 1996 Peter Wemm
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer
12 * in this position and unchanged.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include "opt_capsicum.h"
35 #include "opt_compat.h"
38 #include <sys/param.h>
39 #include <sys/capsicum.h>
41 #include <sys/fcntl.h>
42 #include <sys/imgact.h>
43 #include <sys/imgact_elf.h>
45 #include <sys/kernel.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
50 #include <sys/namei.h>
51 #include <sys/pioctl.h>
53 #include <sys/procfs.h>
54 #include <sys/racct.h>
55 #include <sys/resourcevar.h>
56 #include <sys/rwlock.h>
58 #include <sys/sf_buf.h>
60 #include <sys/systm.h>
61 #include <sys/signalvar.h>
64 #include <sys/syscall.h>
65 #include <sys/sysctl.h>
66 #include <sys/sysent.h>
67 #include <sys/vnode.h>
68 #include <sys/syslog.h>
69 #include <sys/eventhandler.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_param.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_extern.h>
82 #include <machine/elf.h>
83 #include <machine/md_var.h>
85 #define ELF_NOTE_ROUNDSIZE 4
86 #define OLD_EI_BRAND 8
88 static int __elfN(check_header)(const Elf_Ehdr *hdr);
89 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
90 const char *interp, int interp_name_len, int32_t *osrel);
91 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
92 u_long *entry, size_t pagesize);
93 static int __elfN(load_section)(struct image_params *imgp, vm_offset_t offset,
94 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
96 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
97 static boolean_t __elfN(freebsd_trans_osrel)(const Elf_Note *note,
99 static boolean_t kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
100 static boolean_t __elfN(check_note)(struct image_params *imgp,
101 Elf_Brandnote *checknote, int32_t *osrel);
102 static vm_prot_t __elfN(trans_prot)(Elf_Word);
103 static Elf_Word __elfN(untrans_prot)(vm_prot_t);
105 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
108 #ifdef COMPRESS_USER_CORES
109 static int compress_core(gzFile, char *, char *, unsigned int,
112 #define CORE_BUF_SIZE (16 * 1024)
114 int __elfN(fallback_brand) = -1;
115 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
116 fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
117 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
118 TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
119 &__elfN(fallback_brand));
121 static int elf_legacy_coredump = 0;
122 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
123 &elf_legacy_coredump, 0, "");
125 int __elfN(nxstack) =
126 #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */
131 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
132 nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
133 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack");
135 #if __ELF_WORD_SIZE == 32
136 #if defined(__amd64__) || defined(__ia64__)
137 int i386_read_exec = 0;
138 SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
139 "enable execution from readable segments");
143 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
145 #define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
146 #define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
147 #define aligned(a, t) (trunc_page_ps((u_long)(a), sizeof(t)) == (u_long)(a))
149 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
151 Elf_Brandnote __elfN(freebsd_brandnote) = {
152 .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR),
153 .hdr.n_descsz = sizeof(int32_t),
155 .vendor = FREEBSD_ABI_VENDOR,
156 .flags = BN_TRANSLATE_OSREL,
157 .trans_osrel = __elfN(freebsd_trans_osrel)
161 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
165 p = (uintptr_t)(note + 1);
166 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
167 *osrel = *(const int32_t *)(p);
172 static const char GNU_ABI_VENDOR[] = "GNU";
173 static int GNU_KFREEBSD_ABI_DESC = 3;
175 Elf_Brandnote __elfN(kfreebsd_brandnote) = {
176 .hdr.n_namesz = sizeof(GNU_ABI_VENDOR),
177 .hdr.n_descsz = 16, /* XXX at least 16 */
179 .vendor = GNU_ABI_VENDOR,
180 .flags = BN_TRANSLATE_OSREL,
181 .trans_osrel = kfreebsd_trans_osrel
185 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
187 const Elf32_Word *desc;
190 p = (uintptr_t)(note + 1);
191 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
193 desc = (const Elf32_Word *)p;
194 if (desc[0] != GNU_KFREEBSD_ABI_DESC)
198 * Debian GNU/kFreeBSD embed the earliest compatible kernel version
199 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
201 *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
207 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
211 for (i = 0; i < MAX_BRANDS; i++) {
212 if (elf_brand_list[i] == NULL) {
213 elf_brand_list[i] = entry;
217 if (i == MAX_BRANDS) {
218 printf("WARNING: %s: could not insert brandinfo entry: %p\n",
226 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
230 for (i = 0; i < MAX_BRANDS; i++) {
231 if (elf_brand_list[i] == entry) {
232 elf_brand_list[i] = NULL;
242 __elfN(brand_inuse)(Elf_Brandinfo *entry)
247 sx_slock(&allproc_lock);
248 FOREACH_PROC_IN_SYSTEM(p) {
249 if (p->p_sysent == entry->sysvec) {
254 sx_sunlock(&allproc_lock);
259 static Elf_Brandinfo *
260 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
261 int interp_name_len, int32_t *osrel)
263 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
264 Elf_Brandinfo *bi, *bi_m;
269 * We support four types of branding -- (1) the ELF EI_OSABI field
270 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
271 * branding w/in the ELF header, (3) path of the `interp_path'
272 * field, and (4) the ".note.ABI-tag" ELF section.
275 /* Look for an ".note.ABI-tag" ELF section */
277 for (i = 0; i < MAX_BRANDS; i++) {
278 bi = elf_brand_list[i];
281 if (hdr->e_machine == bi->machine && (bi->flags &
282 (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
283 ret = __elfN(check_note)(imgp, bi->brand_note, osrel);
285 * If note checker claimed the binary, but the
286 * interpreter path in the image does not
287 * match default one for the brand, try to
288 * search for other brands with the same
289 * interpreter. Either there is better brand
290 * with the right interpreter, or, failing
291 * this, we return first brand which accepted
292 * our note and, optionally, header.
294 if (ret && bi_m == NULL && (strlen(bi->interp_path) +
295 1 != interp_name_len || strncmp(interp,
296 bi->interp_path, interp_name_len) != 0)) {
307 /* If the executable has a brand, search for it in the brand list. */
308 for (i = 0; i < MAX_BRANDS; i++) {
309 bi = elf_brand_list[i];
310 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
312 if (hdr->e_machine == bi->machine &&
313 (hdr->e_ident[EI_OSABI] == bi->brand ||
314 strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
315 bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
319 /* Lacking a known brand, search for a recognized interpreter. */
320 if (interp != NULL) {
321 for (i = 0; i < MAX_BRANDS; i++) {
322 bi = elf_brand_list[i];
323 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
325 if (hdr->e_machine == bi->machine &&
326 /* ELF image p_filesz includes terminating zero */
327 strlen(bi->interp_path) + 1 == interp_name_len &&
328 strncmp(interp, bi->interp_path, interp_name_len)
334 /* Lacking a recognized interpreter, try the default brand */
335 for (i = 0; i < MAX_BRANDS; i++) {
336 bi = elf_brand_list[i];
337 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY)
339 if (hdr->e_machine == bi->machine &&
340 __elfN(fallback_brand) == bi->brand)
347 __elfN(check_header)(const Elf_Ehdr *hdr)
353 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
354 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
355 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
356 hdr->e_phentsize != sizeof(Elf_Phdr) ||
357 hdr->e_version != ELF_TARG_VER)
361 * Make sure we have at least one brand for this machine.
364 for (i = 0; i < MAX_BRANDS; i++) {
365 bi = elf_brand_list[i];
366 if (bi != NULL && bi->machine == hdr->e_machine)
376 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
377 vm_offset_t start, vm_offset_t end, vm_prot_t prot)
384 * Create the page if it doesn't exist yet. Ignore errors.
387 vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end),
388 VM_PROT_ALL, VM_PROT_ALL, 0);
392 * Find the page from the underlying object.
395 sf = vm_imgact_map_page(object, offset);
397 return (KERN_FAILURE);
398 off = offset - trunc_page(offset);
399 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
401 vm_imgact_unmap_page(sf);
403 return (KERN_FAILURE);
406 return (KERN_SUCCESS);
410 __elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
411 vm_offset_t start, vm_offset_t end, vm_prot_t prot, int cow)
418 if (start != trunc_page(start)) {
419 rv = __elfN(map_partial)(map, object, offset, start,
420 round_page(start), prot);
423 offset += round_page(start) - start;
424 start = round_page(start);
426 if (end != round_page(end)) {
427 rv = __elfN(map_partial)(map, object, offset +
428 trunc_page(end) - start, trunc_page(end), end, prot);
431 end = trunc_page(end);
434 if (offset & PAGE_MASK) {
436 * The mapping is not page aligned. This means we have
437 * to copy the data. Sigh.
439 rv = vm_map_find(map, NULL, 0, &start, end - start, 0,
440 VMFS_NO_SPACE, prot | VM_PROT_WRITE, VM_PROT_ALL,
442 if (rv != KERN_SUCCESS)
445 return (KERN_SUCCESS);
446 for (; start < end; start += sz) {
447 sf = vm_imgact_map_page(object, offset);
449 return (KERN_FAILURE);
450 off = offset - trunc_page(offset);
452 if (sz > PAGE_SIZE - off)
453 sz = PAGE_SIZE - off;
454 error = copyout((caddr_t)sf_buf_kva(sf) + off,
456 vm_imgact_unmap_page(sf);
458 return (KERN_FAILURE);
463 vm_object_reference(object);
465 rv = vm_map_insert(map, object, offset, start, end,
466 prot, VM_PROT_ALL, cow);
468 if (rv != KERN_SUCCESS)
469 vm_object_deallocate(object);
473 return (KERN_SUCCESS);
478 __elfN(load_section)(struct image_params *imgp, vm_offset_t offset,
479 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
486 vm_offset_t map_addr;
489 vm_offset_t file_addr;
492 * It's necessary to fail if the filsz + offset taken from the
493 * header is greater than the actual file pager object's size.
494 * If we were to allow this, then the vm_map_find() below would
495 * walk right off the end of the file object and into the ether.
497 * While I'm here, might as well check for something else that
498 * is invalid: filsz cannot be greater than memsz.
500 if ((off_t)filsz + offset > imgp->attr->va_size || filsz > memsz) {
501 uprintf("elf_load_section: truncated ELF file\n");
505 object = imgp->object;
506 map = &imgp->proc->p_vmspace->vm_map;
507 map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
508 file_addr = trunc_page_ps(offset, pagesize);
511 * We have two choices. We can either clear the data in the last page
512 * of an oversized mapping, or we can start the anon mapping a page
513 * early and copy the initialized data into that first page. We
514 * choose the second..
517 map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
519 map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
522 /* cow flags: don't dump readonly sections in core */
523 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
524 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
526 rv = __elfN(map_insert)(map,
528 file_addr, /* file offset */
529 map_addr, /* virtual start */
530 map_addr + map_len,/* virtual end */
533 if (rv != KERN_SUCCESS)
536 /* we can stop now if we've covered it all */
537 if (memsz == filsz) {
544 * We have to get the remaining bit of the file into the first part
545 * of the oversized map segment. This is normally because the .data
546 * segment in the file is extended to provide bss. It's a neat idea
547 * to try and save a page, but it's a pain in the behind to implement.
549 copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
550 map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
551 map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
554 /* This had damn well better be true! */
556 rv = __elfN(map_insert)(map, NULL, 0, map_addr, map_addr +
557 map_len, VM_PROT_ALL, 0);
558 if (rv != KERN_SUCCESS) {
566 sf = vm_imgact_map_page(object, offset + filsz);
570 /* send the page fragment to user space */
571 off = trunc_page_ps(offset + filsz, pagesize) -
572 trunc_page(offset + filsz);
573 error = copyout((caddr_t)sf_buf_kva(sf) + off,
574 (caddr_t)map_addr, copy_len);
575 vm_imgact_unmap_page(sf);
582 * set it to the specified protection.
583 * XXX had better undo the damage from pasting over the cracks here!
585 vm_map_protect(map, trunc_page(map_addr), round_page(map_addr +
586 map_len), prot, FALSE);
592 * Load the file "file" into memory. It may be either a shared object
595 * The "addr" reference parameter is in/out. On entry, it specifies
596 * the address where a shared object should be loaded. If the file is
597 * an executable, this value is ignored. On exit, "addr" specifies
598 * where the file was actually loaded.
600 * The "entry" reference parameter is out only. On exit, it specifies
601 * the entry point for the loaded file.
604 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
605 u_long *entry, size_t pagesize)
610 struct image_params image_params;
612 const Elf_Ehdr *hdr = NULL;
613 const Elf_Phdr *phdr = NULL;
614 struct nameidata *nd;
616 struct image_params *imgp;
619 u_long base_addr = 0;
620 int error, i, numsegs;
622 #ifdef CAPABILITY_MODE
624 * XXXJA: This check can go away once we are sufficiently confident
625 * that the checks in namei() are correct.
627 if (IN_CAPABILITY_MODE(curthread))
631 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
633 attr = &tempdata->attr;
634 imgp = &tempdata->image_params;
637 * Initialize part of the common data
641 imgp->firstpage = NULL;
642 imgp->image_header = NULL;
644 imgp->execlabel = NULL;
646 NDINIT(nd, LOOKUP, LOCKLEAF | FOLLOW, UIO_SYSSPACE, file, curthread);
647 if ((error = namei(nd)) != 0) {
651 NDFREE(nd, NDF_ONLY_PNBUF);
652 imgp->vp = nd->ni_vp;
655 * Check permissions, modes, uid, etc on the file, and "open" it.
657 error = exec_check_permissions(imgp);
661 error = exec_map_first_page(imgp);
666 * Also make certain that the interpreter stays the same, so set
667 * its VV_TEXT flag, too.
669 VOP_SET_TEXT(nd->ni_vp);
671 imgp->object = nd->ni_vp->v_object;
673 hdr = (const Elf_Ehdr *)imgp->image_header;
674 if ((error = __elfN(check_header)(hdr)) != 0)
676 if (hdr->e_type == ET_DYN)
678 else if (hdr->e_type == ET_EXEC)
685 /* Only support headers that fit within first page for now */
686 if ((hdr->e_phoff > PAGE_SIZE) ||
687 (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) {
692 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
693 if (!aligned(phdr, Elf_Addr)) {
698 for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
699 if (phdr[i].p_type == PT_LOAD && phdr[i].p_memsz != 0) {
700 /* Loadable segment */
701 prot = __elfN(trans_prot)(phdr[i].p_flags);
702 error = __elfN(load_section)(imgp, phdr[i].p_offset,
703 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
704 phdr[i].p_memsz, phdr[i].p_filesz, prot, pagesize);
708 * Establish the base address if this is the
712 base_addr = trunc_page(phdr[i].p_vaddr +
718 *entry = (unsigned long)hdr->e_entry + rbase;
722 exec_unmap_first_page(imgp);
727 free(tempdata, M_TEMP);
733 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
737 const Elf_Phdr *phdr;
738 Elf_Auxargs *elf_auxargs;
739 struct vmspace *vmspace;
740 const char *err_str, *newinterp;
741 char *interp, *interp_buf, *path;
742 Elf_Brandinfo *brand_info;
743 struct sysentvec *sv;
745 u_long text_size, data_size, total_size, text_addr, data_addr;
746 u_long seg_size, seg_addr, addr, baddr, et_dyn_addr, entry, proghdr;
748 int error, i, n, interp_name_len, have_interp;
750 hdr = (const Elf_Ehdr *)imgp->image_header;
753 * Do we have a valid ELF header ?
755 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
756 * if particular brand doesn't support it.
758 if (__elfN(check_header)(hdr) != 0 ||
759 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
763 * From here on down, we return an errno, not -1, as we've
764 * detected an ELF file.
767 if ((hdr->e_phoff > PAGE_SIZE) ||
768 (u_int)hdr->e_phentsize * hdr->e_phnum > PAGE_SIZE - hdr->e_phoff) {
769 /* Only support headers in first page for now */
770 uprintf("Program headers not in the first page\n");
773 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
774 if (!aligned(phdr, Elf_Addr)) {
775 uprintf("Unaligned program headers\n");
782 text_size = data_size = total_size = text_addr = data_addr = 0;
785 err_str = newinterp = NULL;
786 interp = interp_buf = NULL;
789 for (i = 0; i < hdr->e_phnum; i++) {
790 switch (phdr[i].p_type) {
793 baddr = phdr[i].p_vaddr;
797 /* Path to interpreter */
798 if (phdr[i].p_filesz < 2 ||
799 phdr[i].p_filesz > MAXPATHLEN) {
800 uprintf("Invalid PT_INTERP\n");
804 if (interp != NULL) {
805 uprintf("Multiple PT_INTERP headers\n");
809 interp_name_len = phdr[i].p_filesz;
810 if (phdr[i].p_offset > PAGE_SIZE ||
811 interp_name_len > PAGE_SIZE - phdr[i].p_offset) {
812 VOP_UNLOCK(imgp->vp, 0);
813 interp_buf = malloc(interp_name_len + 1, M_TEMP,
815 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
816 error = vn_rdwr(UIO_READ, imgp->vp, interp_buf,
817 interp_name_len, phdr[i].p_offset,
818 UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred,
821 uprintf("i/o error PT_INTERP\n");
824 interp_buf[interp_name_len] = '\0';
827 interp = __DECONST(char *, imgp->image_header) +
829 if (interp[interp_name_len - 1] != '\0') {
830 uprintf("Invalid PT_INTERP\n");
839 __elfN(trans_prot)(phdr[i].p_flags);
840 imgp->stack_sz = phdr[i].p_memsz;
845 brand_info = __elfN(get_brandinfo)(imgp, interp, interp_name_len,
847 if (brand_info == NULL) {
848 uprintf("ELF binary type \"%u\" not known.\n",
849 hdr->e_ident[EI_OSABI]);
853 if (hdr->e_type == ET_DYN) {
854 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) {
855 uprintf("Cannot execute shared object\n");
860 * Honour the base load address from the dso if it is
861 * non-zero for some reason.
864 et_dyn_addr = ET_DYN_LOAD_ADDR;
869 sv = brand_info->sysvec;
870 if (interp != NULL && brand_info->interp_newpath != NULL)
871 newinterp = brand_info->interp_newpath;
874 * Avoid a possible deadlock if the current address space is destroyed
875 * and that address space maps the locked vnode. In the common case,
876 * the locked vnode's v_usecount is decremented but remains greater
877 * than zero. Consequently, the vnode lock is not needed by vrele().
878 * However, in cases where the vnode lock is external, such as nullfs,
879 * v_usecount may become zero.
881 * The VV_TEXT flag prevents modifications to the executable while
882 * the vnode is unlocked.
884 VOP_UNLOCK(imgp->vp, 0);
886 error = exec_new_vmspace(imgp, sv);
887 imgp->proc->p_sysent = sv;
889 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
893 for (i = 0; i < hdr->e_phnum; i++) {
894 switch (phdr[i].p_type) {
895 case PT_LOAD: /* Loadable segment */
896 if (phdr[i].p_memsz == 0)
898 prot = __elfN(trans_prot)(phdr[i].p_flags);
899 error = __elfN(load_section)(imgp, phdr[i].p_offset,
900 (caddr_t)(uintptr_t)phdr[i].p_vaddr + et_dyn_addr,
901 phdr[i].p_memsz, phdr[i].p_filesz, prot,
907 * If this segment contains the program headers,
908 * remember their virtual address for the AT_PHDR
909 * aux entry. Static binaries don't usually include
912 if (phdr[i].p_offset == 0 &&
913 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
915 proghdr = phdr[i].p_vaddr + hdr->e_phoff +
918 seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
919 seg_size = round_page(phdr[i].p_memsz +
920 phdr[i].p_vaddr + et_dyn_addr - seg_addr);
923 * Make the largest executable segment the official
924 * text segment and all others data.
926 * Note that obreak() assumes that data_addr +
927 * data_size == end of data load area, and the ELF
928 * file format expects segments to be sorted by
929 * address. If multiple data segments exist, the
930 * last one will be used.
933 if (phdr[i].p_flags & PF_X && text_size < seg_size) {
934 text_size = seg_size;
935 text_addr = seg_addr;
937 data_size = seg_size;
938 data_addr = seg_addr;
940 total_size += seg_size;
942 case PT_PHDR: /* Program header table info */
943 proghdr = phdr[i].p_vaddr + et_dyn_addr;
950 if (data_addr == 0 && data_size == 0) {
951 data_addr = text_addr;
952 data_size = text_size;
955 entry = (u_long)hdr->e_entry + et_dyn_addr;
958 * Check limits. It should be safe to check the
959 * limits after loading the segments since we do
960 * not actually fault in all the segments pages.
962 PROC_LOCK(imgp->proc);
963 if (data_size > lim_cur(imgp->proc, RLIMIT_DATA))
964 err_str = "Data segment size exceeds process limit";
965 else if (text_size > maxtsiz)
966 err_str = "Text segment size exceeds system limit";
967 else if (total_size > lim_cur(imgp->proc, RLIMIT_VMEM))
968 err_str = "Total segment size exceeds process limit";
969 else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0)
970 err_str = "Data segment size exceeds resource limit";
971 else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0)
972 err_str = "Total segment size exceeds resource limit";
973 if (err_str != NULL) {
974 PROC_UNLOCK(imgp->proc);
975 uprintf("%s\n", err_str);
980 vmspace = imgp->proc->p_vmspace;
981 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
982 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
983 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
984 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
987 * We load the dynamic linker where a userland call
988 * to mmap(0, ...) would put it. The rationale behind this
989 * calculation is that it leaves room for the heap to grow to
990 * its maximum allowed size.
992 addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(imgp->proc,
994 PROC_UNLOCK(imgp->proc);
996 imgp->entry_addr = entry;
998 if (interp != NULL) {
1000 VOP_UNLOCK(imgp->vp, 0);
1001 if (brand_info->emul_path != NULL &&
1002 brand_info->emul_path[0] != '\0') {
1003 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1004 snprintf(path, MAXPATHLEN, "%s%s",
1005 brand_info->emul_path, interp);
1006 error = __elfN(load_file)(imgp->proc, path, &addr,
1007 &imgp->entry_addr, sv->sv_pagesize);
1012 if (!have_interp && newinterp != NULL &&
1013 (brand_info->interp_path == NULL ||
1014 strcmp(interp, brand_info->interp_path) == 0)) {
1015 error = __elfN(load_file)(imgp->proc, newinterp, &addr,
1016 &imgp->entry_addr, sv->sv_pagesize);
1021 error = __elfN(load_file)(imgp->proc, interp, &addr,
1022 &imgp->entry_addr, sv->sv_pagesize);
1024 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
1026 uprintf("ELF interpreter %s not found, error %d\n",
1034 * Construct auxargs table (used by the fixup routine)
1036 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
1037 elf_auxargs->execfd = -1;
1038 elf_auxargs->phdr = proghdr;
1039 elf_auxargs->phent = hdr->e_phentsize;
1040 elf_auxargs->phnum = hdr->e_phnum;
1041 elf_auxargs->pagesz = PAGE_SIZE;
1042 elf_auxargs->base = addr;
1043 elf_auxargs->flags = 0;
1044 elf_auxargs->entry = entry;
1046 imgp->auxargs = elf_auxargs;
1047 imgp->interpreted = 0;
1048 imgp->reloc_base = addr;
1049 imgp->proc->p_osrel = osrel;
1050 imgp->proc->p_elf_machine = hdr->e_machine;
1051 imgp->proc->p_elf_flags = hdr->e_flags;
1054 free(interp_buf, M_TEMP);
1058 #define suword __CONCAT(suword, __ELF_WORD_SIZE)
1061 __elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
1063 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
1067 base = (Elf_Addr *)*stack_base;
1068 pos = base + (imgp->args->argc + imgp->args->envc + 2);
1070 if (args->execfd != -1)
1071 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
1072 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
1073 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
1074 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
1075 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
1076 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
1077 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
1078 AUXARGS_ENTRY(pos, AT_BASE, args->base);
1079 if (imgp->execpathp != 0)
1080 AUXARGS_ENTRY(pos, AT_EXECPATH, imgp->execpathp);
1081 AUXARGS_ENTRY(pos, AT_OSRELDATE,
1082 imgp->proc->p_ucred->cr_prison->pr_osreldate);
1083 if (imgp->canary != 0) {
1084 AUXARGS_ENTRY(pos, AT_CANARY, imgp->canary);
1085 AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen);
1087 AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus);
1088 if (imgp->pagesizes != 0) {
1089 AUXARGS_ENTRY(pos, AT_PAGESIZES, imgp->pagesizes);
1090 AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen);
1092 if (imgp->sysent->sv_timekeep_base != 0) {
1093 AUXARGS_ENTRY(pos, AT_TIMEKEEP,
1094 imgp->sysent->sv_timekeep_base);
1096 AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj
1097 != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
1098 imgp->sysent->sv_stackprot);
1099 AUXARGS_ENTRY(pos, AT_NULL, 0);
1101 free(imgp->auxargs, M_TEMP);
1102 imgp->auxargs = NULL;
1105 suword(base, (long)imgp->args->argc);
1106 *stack_base = (register_t *)base;
1111 * Code for generating ELF core dumps.
1114 typedef void (*segment_callback)(vm_map_entry_t, void *);
1116 /* Closure for cb_put_phdr(). */
1117 struct phdr_closure {
1118 Elf_Phdr *phdr; /* Program header to fill in */
1119 Elf_Off offset; /* Offset of segment in core file */
1122 /* Closure for cb_size_segment(). */
1123 struct sseg_closure {
1124 int count; /* Count of writable segments. */
1125 size_t size; /* Total size of all writable segments. */
1128 typedef void (*outfunc_t)(void *, struct sbuf *, size_t *);
1131 int type; /* Note type. */
1132 outfunc_t outfunc; /* Output function. */
1133 void *outarg; /* Argument for the output function. */
1134 size_t outsize; /* Output size. */
1135 TAILQ_ENTRY(note_info) link; /* Link to the next note info. */
1138 TAILQ_HEAD(note_info_list, note_info);
1140 static void cb_put_phdr(vm_map_entry_t, void *);
1141 static void cb_size_segment(vm_map_entry_t, void *);
1142 static void each_writable_segment(struct thread *, segment_callback, void *);
1143 static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
1144 int, void *, size_t, struct note_info_list *, size_t, gzFile);
1145 static void __elfN(prepare_notes)(struct thread *, struct note_info_list *,
1147 static void __elfN(puthdr)(struct thread *, void *, size_t, int, size_t);
1148 static void __elfN(putnote)(struct note_info *, struct sbuf *);
1149 static size_t register_note(struct note_info_list *, int, outfunc_t, void *);
1150 static int sbuf_drain_core_output(void *, const char *, int);
1151 static int sbuf_drain_count(void *arg, const char *data, int len);
1153 static void __elfN(note_fpregset)(void *, struct sbuf *, size_t *);
1154 static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *);
1155 static void __elfN(note_prstatus)(void *, struct sbuf *, size_t *);
1156 static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *);
1157 static void __elfN(note_thrmisc)(void *, struct sbuf *, size_t *);
1158 static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *);
1159 static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *);
1160 static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *);
1161 static void note_procstat_files(void *, struct sbuf *, size_t *);
1162 static void note_procstat_groups(void *, struct sbuf *, size_t *);
1163 static void note_procstat_osrel(void *, struct sbuf *, size_t *);
1164 static void note_procstat_rlimit(void *, struct sbuf *, size_t *);
1165 static void note_procstat_umask(void *, struct sbuf *, size_t *);
1166 static void note_procstat_vmmap(void *, struct sbuf *, size_t *);
1168 #ifdef COMPRESS_USER_CORES
1169 extern int compress_user_cores;
1170 extern int compress_user_cores_gzlevel;
1174 core_output(struct vnode *vp, void *base, size_t len, off_t offset,
1175 struct ucred *active_cred, struct ucred *file_cred,
1176 struct thread *td, char *core_buf, gzFile gzfile) {
1180 #ifdef COMPRESS_USER_CORES
1181 error = compress_core(gzfile, base, core_buf, len, td);
1183 panic("shouldn't be here");
1187 * EFAULT is a non-fatal error that we can get, for example,
1188 * if the segment is backed by a file but extends beyond its
1191 error = vn_rdwr_inchunks(UIO_WRITE, vp, base, len, offset,
1192 UIO_USERSPACE, IO_UNIT | IO_DIRECT, active_cred, file_cred,
1194 if (error == EFAULT) {
1195 log(LOG_WARNING, "Failed to fully fault in a core file "
1196 "segment at VA %p with size 0x%zx to be written at "
1197 "offset 0x%jx for process %s\n", base, len, offset,
1201 * Write a "real" zero byte at the end of the target
1202 * region in the case this is the last segment.
1203 * The intermediate space will be implicitly
1206 error = vn_rdwr_inchunks(UIO_WRITE, vp,
1207 __DECONST(void *, zero_region), 1, offset + len - 1,
1208 UIO_SYSSPACE, IO_UNIT | IO_DIRECT, active_cred,
1209 file_cred, NULL, td);
1215 /* Coredump output parameters for sbuf drain routine. */
1216 struct sbuf_drain_core_params {
1218 struct ucred *active_cred;
1219 struct ucred *file_cred;
1222 #ifdef COMPRESS_USER_CORES
1228 * Drain into a core file.
1231 sbuf_drain_core_output(void *arg, const char *data, int len)
1233 struct sbuf_drain_core_params *p;
1236 p = (struct sbuf_drain_core_params *)arg;
1239 * Some kern_proc out routines that print to this sbuf may
1240 * call us with the process lock held. Draining with the
1241 * non-sleepable lock held is unsafe. The lock is needed for
1242 * those routines when dumping a live process. In our case we
1243 * can safely release the lock before draining and acquire
1246 locked = PROC_LOCKED(p->td->td_proc);
1248 PROC_UNLOCK(p->td->td_proc);
1249 #ifdef COMPRESS_USER_CORES
1250 if (p->gzfile != Z_NULL)
1251 error = compress_core(p->gzfile, NULL, __DECONST(char *, data),
1255 error = vn_rdwr_inchunks(UIO_WRITE, p->vp,
1256 __DECONST(void *, data), len, p->offset, UIO_SYSSPACE,
1257 IO_UNIT | IO_DIRECT, p->active_cred, p->file_cred, NULL,
1260 PROC_LOCK(p->td->td_proc);
1268 * Drain into a counter.
1271 sbuf_drain_count(void *arg, const char *data __unused, int len)
1275 sizep = (size_t *)arg;
1281 __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags)
1283 struct ucred *cred = td->td_ucred;
1285 struct sseg_closure seginfo;
1286 struct note_info_list notelst;
1287 struct note_info *ninfo;
1289 size_t hdrsize, notesz, coresize;
1291 gzFile gzfile = Z_NULL;
1292 char *core_buf = NULL;
1293 #ifdef COMPRESS_USER_CORES
1294 char gzopen_flags[8];
1296 int doing_compress = flags & IMGACT_CORE_COMPRESS;
1300 TAILQ_INIT(¬elst);
1302 #ifdef COMPRESS_USER_CORES
1303 if (doing_compress) {
1306 if (compress_user_cores_gzlevel >= 0 &&
1307 compress_user_cores_gzlevel <= 9)
1308 *p++ = '0' + compress_user_cores_gzlevel;
1310 gzfile = gz_open("", gzopen_flags, vp);
1311 if (gzfile == Z_NULL) {
1315 core_buf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO);
1323 /* Size the program segments. */
1326 each_writable_segment(td, cb_size_segment, &seginfo);
1329 * Collect info about the core file header area.
1331 hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count);
1332 __elfN(prepare_notes)(td, ¬elst, ¬esz);
1333 coresize = round_page(hdrsize + notesz) + seginfo.size;
1337 PROC_LOCK(td->td_proc);
1338 error = racct_add(td->td_proc, RACCT_CORE, coresize);
1339 PROC_UNLOCK(td->td_proc);
1346 if (coresize >= limit) {
1352 * Allocate memory for building the header, fill it up,
1353 * and write it out following the notes.
1355 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
1356 error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize,
1357 ¬elst, notesz, gzfile);
1359 /* Write the contents of all of the writable segments. */
1365 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
1366 offset = round_page(hdrsize + notesz);
1367 for (i = 0; i < seginfo.count; i++) {
1368 error = core_output(vp, (caddr_t)(uintptr_t)php->p_vaddr,
1369 php->p_filesz, offset, cred, NOCRED, curthread, core_buf, gzfile);
1372 offset += php->p_filesz;
1378 "Failed to write core file for process %s (error %d)\n",
1379 curproc->p_comm, error);
1383 #ifdef COMPRESS_USER_CORES
1385 free(core_buf, M_TEMP);
1389 while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) {
1390 TAILQ_REMOVE(¬elst, ninfo, link);
1391 free(ninfo, M_TEMP);
1400 * A callback for each_writable_segment() to write out the segment's
1401 * program header entry.
1404 cb_put_phdr(entry, closure)
1405 vm_map_entry_t entry;
1408 struct phdr_closure *phc = (struct phdr_closure *)closure;
1409 Elf_Phdr *phdr = phc->phdr;
1411 phc->offset = round_page(phc->offset);
1413 phdr->p_type = PT_LOAD;
1414 phdr->p_offset = phc->offset;
1415 phdr->p_vaddr = entry->start;
1417 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1418 phdr->p_align = PAGE_SIZE;
1419 phdr->p_flags = __elfN(untrans_prot)(entry->protection);
1421 phc->offset += phdr->p_filesz;
1426 * A callback for each_writable_segment() to gather information about
1427 * the number of segments and their total size.
1430 cb_size_segment(entry, closure)
1431 vm_map_entry_t entry;
1434 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1437 ssc->size += entry->end - entry->start;
1441 * For each writable segment in the process's memory map, call the given
1442 * function with a pointer to the map entry and some arbitrary
1443 * caller-supplied data.
1446 each_writable_segment(td, func, closure)
1448 segment_callback func;
1451 struct proc *p = td->td_proc;
1452 vm_map_t map = &p->p_vmspace->vm_map;
1453 vm_map_entry_t entry;
1454 vm_object_t backing_object, object;
1455 boolean_t ignore_entry;
1457 vm_map_lock_read(map);
1458 for (entry = map->header.next; entry != &map->header;
1459 entry = entry->next) {
1461 * Don't dump inaccessible mappings, deal with legacy
1464 * Note that read-only segments related to the elf binary
1465 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1466 * need to arbitrarily ignore such segments.
1468 if (elf_legacy_coredump) {
1469 if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
1472 if ((entry->protection & VM_PROT_ALL) == 0)
1477 * Dont include memory segment in the coredump if
1478 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1479 * madvise(2). Do not dump submaps (i.e. parts of the
1482 if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
1485 if ((object = entry->object.vm_object) == NULL)
1488 /* Ignore memory-mapped devices and such things. */
1489 VM_OBJECT_RLOCK(object);
1490 while ((backing_object = object->backing_object) != NULL) {
1491 VM_OBJECT_RLOCK(backing_object);
1492 VM_OBJECT_RUNLOCK(object);
1493 object = backing_object;
1495 ignore_entry = object->type != OBJT_DEFAULT &&
1496 object->type != OBJT_SWAP && object->type != OBJT_VNODE &&
1497 object->type != OBJT_PHYS;
1498 VM_OBJECT_RUNLOCK(object);
1502 (*func)(entry, closure);
1504 vm_map_unlock_read(map);
1508 * Write the core file header to the file, including padding up to
1509 * the page boundary.
1512 __elfN(corehdr)(struct thread *td, struct vnode *vp, struct ucred *cred,
1513 int numsegs, void *hdr, size_t hdrsize, struct note_info_list *notelst,
1514 size_t notesz, gzFile gzfile)
1516 struct sbuf_drain_core_params params;
1517 struct note_info *ninfo;
1521 /* Fill in the header. */
1522 bzero(hdr, hdrsize);
1523 __elfN(puthdr)(td, hdr, hdrsize, numsegs, notesz);
1526 params.active_cred = cred;
1527 params.file_cred = NOCRED;
1530 #ifdef COMPRESS_USER_CORES
1531 params.gzfile = gzfile;
1533 sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN);
1534 sbuf_set_drain(sb, sbuf_drain_core_output, ¶ms);
1535 sbuf_start_section(sb, NULL);
1536 sbuf_bcat(sb, hdr, hdrsize);
1537 TAILQ_FOREACH(ninfo, notelst, link)
1538 __elfN(putnote)(ninfo, sb);
1539 /* Align up to a page boundary for the program segments. */
1540 sbuf_end_section(sb, -1, PAGE_SIZE, 0);
1541 error = sbuf_finish(sb);
1548 __elfN(prepare_notes)(struct thread *td, struct note_info_list *list,
1558 size += register_note(list, NT_PRPSINFO, __elfN(note_prpsinfo), p);
1561 * To have the debugger select the right thread (LWP) as the initial
1562 * thread, we dump the state of the thread passed to us in td first.
1563 * This is the thread that causes the core dump and thus likely to
1564 * be the right thread one wants to have selected in the debugger.
1567 while (thr != NULL) {
1568 size += register_note(list, NT_PRSTATUS,
1569 __elfN(note_prstatus), thr);
1570 size += register_note(list, NT_FPREGSET,
1571 __elfN(note_fpregset), thr);
1572 size += register_note(list, NT_THRMISC,
1573 __elfN(note_thrmisc), thr);
1574 size += register_note(list, -1,
1575 __elfN(note_threadmd), thr);
1577 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1578 TAILQ_NEXT(thr, td_plist);
1580 thr = TAILQ_NEXT(thr, td_plist);
1583 size += register_note(list, NT_PROCSTAT_PROC,
1584 __elfN(note_procstat_proc), p);
1585 size += register_note(list, NT_PROCSTAT_FILES,
1586 note_procstat_files, p);
1587 size += register_note(list, NT_PROCSTAT_VMMAP,
1588 note_procstat_vmmap, p);
1589 size += register_note(list, NT_PROCSTAT_GROUPS,
1590 note_procstat_groups, p);
1591 size += register_note(list, NT_PROCSTAT_UMASK,
1592 note_procstat_umask, p);
1593 size += register_note(list, NT_PROCSTAT_RLIMIT,
1594 note_procstat_rlimit, p);
1595 size += register_note(list, NT_PROCSTAT_OSREL,
1596 note_procstat_osrel, p);
1597 size += register_note(list, NT_PROCSTAT_PSSTRINGS,
1598 __elfN(note_procstat_psstrings), p);
1599 size += register_note(list, NT_PROCSTAT_AUXV,
1600 __elfN(note_procstat_auxv), p);
1606 __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs,
1611 struct phdr_closure phc;
1613 ehdr = (Elf_Ehdr *)hdr;
1614 phdr = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr));
1616 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1617 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1618 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1619 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1620 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1621 ehdr->e_ident[EI_DATA] = ELF_DATA;
1622 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1623 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1624 ehdr->e_ident[EI_ABIVERSION] = 0;
1625 ehdr->e_ident[EI_PAD] = 0;
1626 ehdr->e_type = ET_CORE;
1627 ehdr->e_machine = td->td_proc->p_elf_machine;
1628 ehdr->e_version = EV_CURRENT;
1630 ehdr->e_phoff = sizeof(Elf_Ehdr);
1631 ehdr->e_flags = td->td_proc->p_elf_flags;
1632 ehdr->e_ehsize = sizeof(Elf_Ehdr);
1633 ehdr->e_phentsize = sizeof(Elf_Phdr);
1634 ehdr->e_phnum = numsegs + 1;
1635 ehdr->e_shentsize = sizeof(Elf_Shdr);
1637 ehdr->e_shstrndx = SHN_UNDEF;
1640 * Fill in the program header entries.
1643 /* The note segement. */
1644 phdr->p_type = PT_NOTE;
1645 phdr->p_offset = hdrsize;
1648 phdr->p_filesz = notesz;
1650 phdr->p_flags = PF_R;
1651 phdr->p_align = ELF_NOTE_ROUNDSIZE;
1654 /* All the writable segments from the program. */
1656 phc.offset = round_page(hdrsize + notesz);
1657 each_writable_segment(td, cb_put_phdr, &phc);
1661 register_note(struct note_info_list *list, int type, outfunc_t out, void *arg)
1663 struct note_info *ninfo;
1664 size_t size, notesize;
1667 out(arg, NULL, &size);
1668 ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK);
1670 ninfo->outfunc = out;
1671 ninfo->outarg = arg;
1672 ninfo->outsize = size;
1673 TAILQ_INSERT_TAIL(list, ninfo, link);
1678 notesize = sizeof(Elf_Note) + /* note header */
1679 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) +
1681 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */
1687 append_note_data(const void *src, void *dst, size_t len)
1691 padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE);
1693 bcopy(src, dst, len);
1694 bzero((char *)dst + len, padded_len - len);
1696 return (padded_len);
1700 __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp)
1708 note = (Elf_Note *)buf;
1709 note->n_namesz = sizeof(FREEBSD_ABI_VENDOR);
1710 note->n_descsz = size;
1711 note->n_type = type;
1712 buf += sizeof(*note);
1713 buf += append_note_data(FREEBSD_ABI_VENDOR, buf,
1714 sizeof(FREEBSD_ABI_VENDOR));
1715 append_note_data(src, buf, size);
1720 notesize = sizeof(Elf_Note) + /* note header */
1721 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) +
1723 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */
1729 __elfN(putnote)(struct note_info *ninfo, struct sbuf *sb)
1732 ssize_t old_len, sect_len;
1733 size_t new_len, descsz, i;
1735 if (ninfo->type == -1) {
1736 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
1740 note.n_namesz = sizeof(FREEBSD_ABI_VENDOR);
1741 note.n_descsz = ninfo->outsize;
1742 note.n_type = ninfo->type;
1744 sbuf_bcat(sb, ¬e, sizeof(note));
1745 sbuf_start_section(sb, &old_len);
1746 sbuf_bcat(sb, FREEBSD_ABI_VENDOR, sizeof(FREEBSD_ABI_VENDOR));
1747 sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
1748 if (note.n_descsz == 0)
1750 sbuf_start_section(sb, &old_len);
1751 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
1752 sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
1756 new_len = (size_t)sect_len;
1757 descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE);
1758 if (new_len < descsz) {
1760 * It is expected that individual note emitters will correctly
1761 * predict their expected output size and fill up to that size
1762 * themselves, padding in a format-specific way if needed.
1763 * However, in case they don't, just do it here with zeros.
1765 for (i = 0; i < descsz - new_len; i++)
1767 } else if (new_len > descsz) {
1769 * We can't always truncate sb -- we may have drained some
1772 KASSERT(new_len == descsz, ("%s: Note type %u changed as we "
1773 "read it (%zu > %zu). Since it is longer than "
1774 "expected, this coredump's notes are corrupt. THIS "
1775 "IS A BUG in the note_procstat routine for type %u.\n",
1776 __func__, (unsigned)note.n_type, new_len, descsz,
1777 (unsigned)note.n_type));
1782 * Miscellaneous note out functions.
1785 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1786 #include <compat/freebsd32/freebsd32.h>
1788 typedef struct prstatus32 elf_prstatus_t;
1789 typedef struct prpsinfo32 elf_prpsinfo_t;
1790 typedef struct fpreg32 elf_prfpregset_t;
1791 typedef struct fpreg32 elf_fpregset_t;
1792 typedef struct reg32 elf_gregset_t;
1793 typedef struct thrmisc32 elf_thrmisc_t;
1794 #define ELF_KERN_PROC_MASK KERN_PROC_MASK32
1795 typedef struct kinfo_proc32 elf_kinfo_proc_t;
1796 typedef uint32_t elf_ps_strings_t;
1798 typedef prstatus_t elf_prstatus_t;
1799 typedef prpsinfo_t elf_prpsinfo_t;
1800 typedef prfpregset_t elf_prfpregset_t;
1801 typedef prfpregset_t elf_fpregset_t;
1802 typedef gregset_t elf_gregset_t;
1803 typedef thrmisc_t elf_thrmisc_t;
1804 #define ELF_KERN_PROC_MASK 0
1805 typedef struct kinfo_proc elf_kinfo_proc_t;
1806 typedef vm_offset_t elf_ps_strings_t;
1810 __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep)
1816 elf_prpsinfo_t *psinfo;
1819 p = (struct proc *)arg;
1821 KASSERT(*sizep == sizeof(*psinfo), ("invalid size"));
1822 psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK);
1823 psinfo->pr_version = PRPSINFO_VERSION;
1824 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
1825 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
1827 if (p->p_args != NULL) {
1828 len = sizeof(psinfo->pr_psargs) - 1;
1829 if (len > p->p_args->ar_length)
1830 len = p->p_args->ar_length;
1831 memcpy(psinfo->pr_psargs, p->p_args->ar_args, len);
1837 sbuf_new(&sbarg, psinfo->pr_psargs,
1838 sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN);
1839 error = proc_getargv(curthread, p, &sbarg);
1841 if (sbuf_finish(&sbarg) == 0)
1842 len = sbuf_len(&sbarg) - 1;
1844 len = sizeof(psinfo->pr_psargs) - 1;
1845 sbuf_delete(&sbarg);
1847 if (error || len == 0)
1848 strlcpy(psinfo->pr_psargs, p->p_comm,
1849 sizeof(psinfo->pr_psargs));
1851 KASSERT(len < sizeof(psinfo->pr_psargs),
1852 ("len is too long: %zu vs %zu", len,
1853 sizeof(psinfo->pr_psargs)));
1854 cp = psinfo->pr_psargs;
1857 cp = memchr(cp, '\0', end - cp);
1863 psinfo->pr_pid = p->p_pid;
1864 sbuf_bcat(sb, psinfo, sizeof(*psinfo));
1865 free(psinfo, M_TEMP);
1867 *sizep = sizeof(*psinfo);
1871 __elfN(note_prstatus)(void *arg, struct sbuf *sb, size_t *sizep)
1874 elf_prstatus_t *status;
1876 td = (struct thread *)arg;
1878 KASSERT(*sizep == sizeof(*status), ("invalid size"));
1879 status = malloc(sizeof(*status), M_TEMP, M_ZERO | M_WAITOK);
1880 status->pr_version = PRSTATUS_VERSION;
1881 status->pr_statussz = sizeof(elf_prstatus_t);
1882 status->pr_gregsetsz = sizeof(elf_gregset_t);
1883 status->pr_fpregsetsz = sizeof(elf_fpregset_t);
1884 status->pr_osreldate = osreldate;
1885 status->pr_cursig = td->td_proc->p_sig;
1886 status->pr_pid = td->td_tid;
1887 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1888 fill_regs32(td, &status->pr_reg);
1890 fill_regs(td, &status->pr_reg);
1892 sbuf_bcat(sb, status, sizeof(*status));
1893 free(status, M_TEMP);
1895 *sizep = sizeof(*status);
1899 __elfN(note_fpregset)(void *arg, struct sbuf *sb, size_t *sizep)
1902 elf_prfpregset_t *fpregset;
1904 td = (struct thread *)arg;
1906 KASSERT(*sizep == sizeof(*fpregset), ("invalid size"));
1907 fpregset = malloc(sizeof(*fpregset), M_TEMP, M_ZERO | M_WAITOK);
1908 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
1909 fill_fpregs32(td, fpregset);
1911 fill_fpregs(td, fpregset);
1913 sbuf_bcat(sb, fpregset, sizeof(*fpregset));
1914 free(fpregset, M_TEMP);
1916 *sizep = sizeof(*fpregset);
1920 __elfN(note_thrmisc)(void *arg, struct sbuf *sb, size_t *sizep)
1923 elf_thrmisc_t thrmisc;
1925 td = (struct thread *)arg;
1927 KASSERT(*sizep == sizeof(thrmisc), ("invalid size"));
1928 bzero(&thrmisc._pad, sizeof(thrmisc._pad));
1929 strcpy(thrmisc.pr_tname, td->td_name);
1930 sbuf_bcat(sb, &thrmisc, sizeof(thrmisc));
1932 *sizep = sizeof(thrmisc);
1936 * Allow for MD specific notes, as well as any MD
1937 * specific preparations for writing MI notes.
1940 __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep)
1946 td = (struct thread *)arg;
1948 if (size != 0 && sb != NULL)
1949 buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK);
1953 __elfN(dump_thread)(td, buf, &size);
1954 KASSERT(sb == NULL || *sizep == size, ("invalid size"));
1955 if (size != 0 && sb != NULL)
1956 sbuf_bcat(sb, buf, size);
1961 #ifdef KINFO_PROC_SIZE
1962 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
1966 __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep)
1972 p = (struct proc *)arg;
1973 size = sizeof(structsize) + p->p_numthreads *
1974 sizeof(elf_kinfo_proc_t);
1977 KASSERT(*sizep == size, ("invalid size"));
1978 structsize = sizeof(elf_kinfo_proc_t);
1979 sbuf_bcat(sb, &structsize, sizeof(structsize));
1981 kern_proc_out(p, sb, ELF_KERN_PROC_MASK);
1986 #ifdef KINFO_FILE_SIZE
1987 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
1991 note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep)
1994 size_t size, sect_sz, i;
1995 ssize_t start_len, sect_len;
1996 int structsize, filedesc_flags;
1998 if (coredump_pack_fileinfo)
1999 filedesc_flags = KERN_FILEDESC_PACK_KINFO;
2003 p = (struct proc *)arg;
2004 structsize = sizeof(struct kinfo_file);
2007 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
2008 sbuf_set_drain(sb, sbuf_drain_count, &size);
2009 sbuf_bcat(sb, &structsize, sizeof(structsize));
2011 kern_proc_filedesc_out(p, sb, -1, filedesc_flags);
2016 sbuf_start_section(sb, &start_len);
2018 sbuf_bcat(sb, &structsize, sizeof(structsize));
2020 kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize),
2023 sect_len = sbuf_end_section(sb, start_len, 0, 0);
2028 KASSERT(sect_sz <= *sizep,
2029 ("kern_proc_filedesc_out did not respect maxlen; "
2030 "requested %zu, got %zu", *sizep - sizeof(structsize),
2031 sect_sz - sizeof(structsize)));
2033 for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++)
2038 #ifdef KINFO_VMENTRY_SIZE
2039 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
2043 note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep)
2047 int structsize, vmmap_flags;
2049 if (coredump_pack_vmmapinfo)
2050 vmmap_flags = KERN_VMMAP_PACK_KINFO;
2054 p = (struct proc *)arg;
2055 structsize = sizeof(struct kinfo_vmentry);
2058 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
2059 sbuf_set_drain(sb, sbuf_drain_count, &size);
2060 sbuf_bcat(sb, &structsize, sizeof(structsize));
2062 kern_proc_vmmap_out(p, sb, -1, vmmap_flags);
2067 sbuf_bcat(sb, &structsize, sizeof(structsize));
2069 kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize),
2075 note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep)
2081 p = (struct proc *)arg;
2082 size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t);
2084 KASSERT(*sizep == size, ("invalid size"));
2085 structsize = sizeof(gid_t);
2086 sbuf_bcat(sb, &structsize, sizeof(structsize));
2087 sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups *
2094 note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep)
2100 p = (struct proc *)arg;
2101 size = sizeof(structsize) + sizeof(p->p_fd->fd_cmask);
2103 KASSERT(*sizep == size, ("invalid size"));
2104 structsize = sizeof(p->p_fd->fd_cmask);
2105 sbuf_bcat(sb, &structsize, sizeof(structsize));
2106 sbuf_bcat(sb, &p->p_fd->fd_cmask, sizeof(p->p_fd->fd_cmask));
2112 note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep)
2115 struct rlimit rlim[RLIM_NLIMITS];
2119 p = (struct proc *)arg;
2120 size = sizeof(structsize) + sizeof(rlim);
2122 KASSERT(*sizep == size, ("invalid size"));
2123 structsize = sizeof(rlim);
2124 sbuf_bcat(sb, &structsize, sizeof(structsize));
2126 for (i = 0; i < RLIM_NLIMITS; i++)
2127 lim_rlimit(p, i, &rlim[i]);
2129 sbuf_bcat(sb, rlim, sizeof(rlim));
2135 note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep)
2141 p = (struct proc *)arg;
2142 size = sizeof(structsize) + sizeof(p->p_osrel);
2144 KASSERT(*sizep == size, ("invalid size"));
2145 structsize = sizeof(p->p_osrel);
2146 sbuf_bcat(sb, &structsize, sizeof(structsize));
2147 sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel));
2153 __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep)
2156 elf_ps_strings_t ps_strings;
2160 p = (struct proc *)arg;
2161 size = sizeof(structsize) + sizeof(ps_strings);
2163 KASSERT(*sizep == size, ("invalid size"));
2164 structsize = sizeof(ps_strings);
2165 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2166 ps_strings = PTROUT(p->p_sysent->sv_psstrings);
2168 ps_strings = p->p_sysent->sv_psstrings;
2170 sbuf_bcat(sb, &structsize, sizeof(structsize));
2171 sbuf_bcat(sb, &ps_strings, sizeof(ps_strings));
2177 __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep)
2183 p = (struct proc *)arg;
2186 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
2187 sbuf_set_drain(sb, sbuf_drain_count, &size);
2188 sbuf_bcat(sb, &structsize, sizeof(structsize));
2190 proc_getauxv(curthread, p, sb);
2196 structsize = sizeof(Elf_Auxinfo);
2197 sbuf_bcat(sb, &structsize, sizeof(structsize));
2199 proc_getauxv(curthread, p, sb);
2205 __elfN(parse_notes)(struct image_params *imgp, Elf_Brandnote *checknote,
2206 int32_t *osrel, const Elf_Phdr *pnote)
2208 const Elf_Note *note, *note0, *note_end;
2209 const char *note_name;
2214 /* We need some limit, might as well use PAGE_SIZE. */
2215 if (pnote == NULL || pnote->p_filesz > PAGE_SIZE)
2217 ASSERT_VOP_LOCKED(imgp->vp, "parse_notes");
2218 if (pnote->p_offset > PAGE_SIZE ||
2219 pnote->p_filesz > PAGE_SIZE - pnote->p_offset) {
2220 VOP_UNLOCK(imgp->vp, 0);
2221 buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK);
2222 vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY);
2223 error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz,
2224 pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED,
2225 curthread->td_ucred, NOCRED, NULL, curthread);
2227 uprintf("i/o error PT_NOTE\n");
2231 note = note0 = (const Elf_Note *)buf;
2232 note_end = (const Elf_Note *)(buf + pnote->p_filesz);
2234 note = note0 = (const Elf_Note *)(imgp->image_header +
2236 note_end = (const Elf_Note *)(imgp->image_header +
2237 pnote->p_offset + pnote->p_filesz);
2240 for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
2241 if (!aligned(note, Elf32_Addr) || (const char *)note_end -
2242 (const char *)note < sizeof(Elf_Note)) {
2246 if (note->n_namesz != checknote->hdr.n_namesz ||
2247 note->n_descsz != checknote->hdr.n_descsz ||
2248 note->n_type != checknote->hdr.n_type)
2250 note_name = (const char *)(note + 1);
2251 if (note_name + checknote->hdr.n_namesz >=
2252 (const char *)note_end || strncmp(checknote->vendor,
2253 note_name, checknote->hdr.n_namesz) != 0)
2257 * Fetch the osreldate for binary
2258 * from the ELF OSABI-note if necessary.
2260 if ((checknote->flags & BN_TRANSLATE_OSREL) != 0 &&
2261 checknote->trans_osrel != NULL) {
2262 res = checknote->trans_osrel(note, osrel);
2268 note = (const Elf_Note *)((const char *)(note + 1) +
2269 roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) +
2270 roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE));
2279 * Try to find the appropriate ABI-note section for checknote,
2280 * fetch the osreldate for binary from the ELF OSABI-note. Only the
2281 * first page of the image is searched, the same as for headers.
2284 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *checknote,
2287 const Elf_Phdr *phdr;
2288 const Elf_Ehdr *hdr;
2291 hdr = (const Elf_Ehdr *)imgp->image_header;
2292 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
2294 for (i = 0; i < hdr->e_phnum; i++) {
2295 if (phdr[i].p_type == PT_NOTE &&
2296 __elfN(parse_notes)(imgp, checknote, osrel, &phdr[i]))
2304 * Tell kern_execve.c about it, with a little help from the linker.
2306 static struct execsw __elfN(execsw) = {
2307 __CONCAT(exec_, __elfN(imgact)),
2308 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
2310 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
2312 #ifdef COMPRESS_USER_CORES
2314 * Compress and write out a core segment for a user process.
2316 * 'inbuf' is the starting address of a VM segment in the process' address
2317 * space that is to be compressed and written out to the core file. 'dest_buf'
2318 * is a buffer in the kernel's address space. The segment is copied from
2319 * 'inbuf' to 'dest_buf' first before being processed by the compression
2320 * routine gzwrite(). This copying is necessary because the content of the VM
2321 * segment may change between the compression pass and the crc-computation pass
2322 * in gzwrite(). This is because realtime threads may preempt the UNIX kernel.
2324 * If inbuf is NULL it is assumed that data is already copied to 'dest_buf'.
2327 compress_core (gzFile file, char *inbuf, char *dest_buf, unsigned int len,
2332 unsigned int chunk_len;
2335 if (inbuf != NULL) {
2336 chunk_len = (len > CORE_BUF_SIZE) ? CORE_BUF_SIZE : len;
2339 * We can get EFAULT error here. In that case zero out
2340 * the current chunk of the segment.
2342 error = copyin(inbuf, dest_buf, chunk_len);
2344 bzero(dest_buf, chunk_len);
2351 len_compressed = gzwrite(file, dest_buf, chunk_len);
2353 EVENTHANDLER_INVOKE(app_coredump_progress, td, len_compressed);
2355 if ((unsigned int)len_compressed != chunk_len) {
2357 "compress_core: length mismatch (0x%x returned, "
2358 "0x%x expected)\n", len_compressed, chunk_len);
2359 EVENTHANDLER_INVOKE(app_coredump_error, td,
2360 "compress_core: length mismatch %x -> %x",
2361 chunk_len, len_compressed);
2371 #endif /* COMPRESS_USER_CORES */
2374 __elfN(trans_prot)(Elf_Word flags)
2380 prot |= VM_PROT_EXECUTE;
2382 prot |= VM_PROT_WRITE;
2384 prot |= VM_PROT_READ;
2385 #if __ELF_WORD_SIZE == 32
2386 #if defined(__amd64__) || defined(__ia64__)
2387 if (i386_read_exec && (flags & PF_R))
2388 prot |= VM_PROT_EXECUTE;
2395 __elfN(untrans_prot)(vm_prot_t prot)
2400 if (prot & VM_PROT_EXECUTE)
2402 if (prot & VM_PROT_READ)
2404 if (prot & VM_PROT_WRITE)