2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2017 Dell EMC
5 * Copyright (c) 2000-2001, 2003 David O'Brien
6 * Copyright (c) 1995-1996 Søren Schmidt
7 * Copyright (c) 1996 Peter Wemm
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer
15 * in this position and unchanged.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_capsicum.h"
39 #include <sys/param.h>
40 #include <sys/capsicum.h>
41 #include <sys/compressor.h>
43 #include <sys/fcntl.h>
44 #include <sys/imgact.h>
45 #include <sys/imgact_elf.h>
47 #include <sys/kernel.h>
49 #include <sys/malloc.h>
50 #include <sys/mount.h>
52 #include <sys/namei.h>
54 #include <sys/procfs.h>
55 #include <sys/ptrace.h>
56 #include <sys/racct.h>
57 #include <sys/resourcevar.h>
58 #include <sys/rwlock.h>
60 #include <sys/sf_buf.h>
62 #include <sys/systm.h>
63 #include <sys/signalvar.h>
66 #include <sys/syscall.h>
67 #include <sys/sysctl.h>
68 #include <sys/sysent.h>
69 #include <sys/vnode.h>
70 #include <sys/syslog.h>
71 #include <sys/eventhandler.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_param.h>
78 #include <vm/vm_map.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_extern.h>
82 #include <machine/elf.h>
83 #include <machine/md_var.h>
85 #define ELF_NOTE_ROUNDSIZE 4
86 #define OLD_EI_BRAND 8
88 static int __elfN(check_header)(const Elf_Ehdr *hdr);
89 static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
90 const char *interp, int32_t *osrel, uint32_t *fctl0);
91 static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
93 static int __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset,
94 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot);
95 static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
96 static bool __elfN(freebsd_trans_osrel)(const Elf_Note *note,
98 static bool kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel);
99 static boolean_t __elfN(check_note)(struct image_params *imgp,
100 Elf_Brandnote *checknote, int32_t *osrel, boolean_t *has_fctl0,
102 static vm_prot_t __elfN(trans_prot)(Elf_Word);
103 static Elf_Word __elfN(untrans_prot)(vm_prot_t);
105 SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE),
106 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
109 #define CORE_BUF_SIZE (16 * 1024)
111 int __elfN(fallback_brand) = -1;
112 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
113 fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0,
114 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
116 static int elf_legacy_coredump = 0;
117 SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
118 &elf_legacy_coredump, 0,
119 "include all and only RW pages in core dumps");
121 int __elfN(nxstack) =
122 #if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \
123 (defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \
129 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
130 nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
131 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack");
133 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
134 int i386_read_exec = 0;
135 SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
136 "enable execution from readable segments");
139 static u_long __elfN(pie_base) = ET_DYN_LOAD_ADDR;
141 sysctl_pie_base(SYSCTL_HANDLER_ARGS)
146 val = __elfN(pie_base);
147 error = sysctl_handle_long(oidp, &val, 0, req);
148 if (error != 0 || req->newptr == NULL)
150 if ((val & PAGE_MASK) != 0)
152 __elfN(pie_base) = val;
155 SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base,
156 CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
157 sysctl_pie_base, "LU",
158 "PIE load base without randomization");
160 SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr,
161 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
163 #define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr)
165 static int __elfN(aslr_enabled) = 0;
166 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN,
167 &__elfN(aslr_enabled), 0,
168 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
169 ": enable address map randomization");
171 static int __elfN(pie_aslr_enabled) = 0;
172 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN,
173 &__elfN(pie_aslr_enabled), 0,
174 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
175 ": enable address map randomization for PIE binaries");
177 static int __elfN(aslr_honor_sbrk) = 1;
178 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW,
179 &__elfN(aslr_honor_sbrk), 0,
180 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used");
182 static int __elfN(aslr_stack_gap) = 3;
183 SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack_gap, CTLFLAG_RW,
184 &__elfN(aslr_stack_gap), 0,
185 __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
186 ": maximum percentage of main stack to waste on a random gap");
188 static int __elfN(sigfastblock) = 1;
189 SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock,
190 CTLFLAG_RWTUN, &__elfN(sigfastblock), 0,
191 "enable sigfastblock for new processes");
193 static bool __elfN(allow_wx) = true;
194 SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx,
195 CTLFLAG_RWTUN, &__elfN(allow_wx), 0,
196 "Allow pages to be mapped simultaneously writable and executable");
198 static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
200 #define aligned(a, t) (rounddown2((u_long)(a), sizeof(t)) == (u_long)(a))
202 static const char FREEBSD_ABI_VENDOR[] = "FreeBSD";
204 Elf_Brandnote __elfN(freebsd_brandnote) = {
205 .hdr.n_namesz = sizeof(FREEBSD_ABI_VENDOR),
206 .hdr.n_descsz = sizeof(int32_t),
207 .hdr.n_type = NT_FREEBSD_ABI_TAG,
208 .vendor = FREEBSD_ABI_VENDOR,
209 .flags = BN_TRANSLATE_OSREL,
210 .trans_osrel = __elfN(freebsd_trans_osrel)
214 __elfN(freebsd_trans_osrel)(const Elf_Note *note, int32_t *osrel)
218 p = (uintptr_t)(note + 1);
219 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
220 *osrel = *(const int32_t *)(p);
225 static const char GNU_ABI_VENDOR[] = "GNU";
226 static int GNU_KFREEBSD_ABI_DESC = 3;
228 Elf_Brandnote __elfN(kfreebsd_brandnote) = {
229 .hdr.n_namesz = sizeof(GNU_ABI_VENDOR),
230 .hdr.n_descsz = 16, /* XXX at least 16 */
232 .vendor = GNU_ABI_VENDOR,
233 .flags = BN_TRANSLATE_OSREL,
234 .trans_osrel = kfreebsd_trans_osrel
238 kfreebsd_trans_osrel(const Elf_Note *note, int32_t *osrel)
240 const Elf32_Word *desc;
243 p = (uintptr_t)(note + 1);
244 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
246 desc = (const Elf32_Word *)p;
247 if (desc[0] != GNU_KFREEBSD_ABI_DESC)
251 * Debian GNU/kFreeBSD embed the earliest compatible kernel version
252 * (__FreeBSD_version: <major><two digit minor>Rxx) in the LSB way.
254 *osrel = desc[1] * 100000 + desc[2] * 1000 + desc[3];
260 __elfN(insert_brand_entry)(Elf_Brandinfo *entry)
264 for (i = 0; i < MAX_BRANDS; i++) {
265 if (elf_brand_list[i] == NULL) {
266 elf_brand_list[i] = entry;
270 if (i == MAX_BRANDS) {
271 printf("WARNING: %s: could not insert brandinfo entry: %p\n",
279 __elfN(remove_brand_entry)(Elf_Brandinfo *entry)
283 for (i = 0; i < MAX_BRANDS; i++) {
284 if (elf_brand_list[i] == entry) {
285 elf_brand_list[i] = NULL;
295 __elfN(brand_inuse)(Elf_Brandinfo *entry)
300 sx_slock(&allproc_lock);
301 FOREACH_PROC_IN_SYSTEM(p) {
302 if (p->p_sysent == entry->sysvec) {
307 sx_sunlock(&allproc_lock);
312 static Elf_Brandinfo *
313 __elfN(get_brandinfo)(struct image_params *imgp, const char *interp,
314 int32_t *osrel, uint32_t *fctl0)
316 const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
317 Elf_Brandinfo *bi, *bi_m;
318 boolean_t ret, has_fctl0;
319 int i, interp_name_len;
321 interp_name_len = interp != NULL ? strlen(interp) + 1 : 0;
324 * We support four types of branding -- (1) the ELF EI_OSABI field
325 * that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
326 * branding w/in the ELF header, (3) path of the `interp_path'
327 * field, and (4) the ".note.ABI-tag" ELF section.
330 /* Look for an ".note.ABI-tag" ELF section */
332 for (i = 0; i < MAX_BRANDS; i++) {
333 bi = elf_brand_list[i];
336 if (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0)
338 if (hdr->e_machine == bi->machine && (bi->flags &
339 (BI_BRAND_NOTE|BI_BRAND_NOTE_MANDATORY)) != 0) {
343 ret = __elfN(check_note)(imgp, bi->brand_note, osrel,
345 /* Give brand a chance to veto check_note's guess */
346 if (ret && bi->header_supported) {
347 ret = bi->header_supported(imgp, osrel,
348 has_fctl0 ? fctl0 : NULL);
351 * If note checker claimed the binary, but the
352 * interpreter path in the image does not
353 * match default one for the brand, try to
354 * search for other brands with the same
355 * interpreter. Either there is better brand
356 * with the right interpreter, or, failing
357 * this, we return first brand which accepted
358 * our note and, optionally, header.
360 if (ret && bi_m == NULL && interp != NULL &&
361 (bi->interp_path == NULL ||
362 (strlen(bi->interp_path) + 1 != interp_name_len ||
363 strncmp(interp, bi->interp_path, interp_name_len)
375 /* If the executable has a brand, search for it in the brand list. */
376 for (i = 0; i < MAX_BRANDS; i++) {
377 bi = elf_brand_list[i];
378 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 ||
379 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0))
381 if (hdr->e_machine == bi->machine &&
382 (hdr->e_ident[EI_OSABI] == bi->brand ||
383 (bi->compat_3_brand != NULL &&
384 strcmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
385 bi->compat_3_brand) == 0))) {
386 /* Looks good, but give brand a chance to veto */
387 if (bi->header_supported == NULL ||
388 bi->header_supported(imgp, NULL, NULL)) {
390 * Again, prefer strictly matching
393 if (interp_name_len == 0 &&
394 bi->interp_path == NULL)
396 if (bi->interp_path != NULL &&
397 strlen(bi->interp_path) + 1 ==
398 interp_name_len && strncmp(interp,
399 bi->interp_path, interp_name_len) == 0)
409 /* No known brand, see if the header is recognized by any brand */
410 for (i = 0; i < MAX_BRANDS; i++) {
411 bi = elf_brand_list[i];
412 if (bi == NULL || bi->flags & BI_BRAND_NOTE_MANDATORY ||
413 bi->header_supported == NULL)
415 if (hdr->e_machine == bi->machine) {
416 ret = bi->header_supported(imgp, NULL, NULL);
422 /* Lacking a known brand, search for a recognized interpreter. */
423 if (interp != NULL) {
424 for (i = 0; i < MAX_BRANDS; i++) {
425 bi = elf_brand_list[i];
426 if (bi == NULL || (bi->flags &
427 (BI_BRAND_NOTE_MANDATORY | BI_BRAND_ONLY_STATIC))
430 if (hdr->e_machine == bi->machine &&
431 bi->interp_path != NULL &&
432 /* ELF image p_filesz includes terminating zero */
433 strlen(bi->interp_path) + 1 == interp_name_len &&
434 strncmp(interp, bi->interp_path, interp_name_len)
435 == 0 && (bi->header_supported == NULL ||
436 bi->header_supported(imgp, NULL, NULL)))
441 /* Lacking a recognized interpreter, try the default brand */
442 for (i = 0; i < MAX_BRANDS; i++) {
443 bi = elf_brand_list[i];
444 if (bi == NULL || (bi->flags & BI_BRAND_NOTE_MANDATORY) != 0 ||
445 (interp != NULL && (bi->flags & BI_BRAND_ONLY_STATIC) != 0))
447 if (hdr->e_machine == bi->machine &&
448 __elfN(fallback_brand) == bi->brand &&
449 (bi->header_supported == NULL ||
450 bi->header_supported(imgp, NULL, NULL)))
457 __elfN(phdr_in_zero_page)(const Elf_Ehdr *hdr)
459 return (hdr->e_phoff <= PAGE_SIZE &&
460 (u_int)hdr->e_phentsize * hdr->e_phnum <= PAGE_SIZE - hdr->e_phoff);
464 __elfN(check_header)(const Elf_Ehdr *hdr)
470 hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
471 hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
472 hdr->e_ident[EI_VERSION] != EV_CURRENT ||
473 hdr->e_phentsize != sizeof(Elf_Phdr) ||
474 hdr->e_version != ELF_TARG_VER)
478 * Make sure we have at least one brand for this machine.
481 for (i = 0; i < MAX_BRANDS; i++) {
482 bi = elf_brand_list[i];
483 if (bi != NULL && bi->machine == hdr->e_machine)
493 __elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
494 vm_offset_t start, vm_offset_t end, vm_prot_t prot)
501 * Create the page if it doesn't exist yet. Ignore errors.
503 vm_map_fixed(map, NULL, 0, trunc_page(start), round_page(end) -
504 trunc_page(start), VM_PROT_ALL, VM_PROT_ALL, MAP_CHECK_EXCL);
507 * Find the page from the underlying object.
509 if (object != NULL) {
510 sf = vm_imgact_map_page(object, offset);
512 return (KERN_FAILURE);
513 off = offset - trunc_page(offset);
514 error = copyout((caddr_t)sf_buf_kva(sf) + off, (caddr_t)start,
516 vm_imgact_unmap_page(sf);
518 return (KERN_FAILURE);
521 return (KERN_SUCCESS);
525 __elfN(map_insert)(struct image_params *imgp, vm_map_t map, vm_object_t object,
526 vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot,
532 int error, locked, rv;
534 if (start != trunc_page(start)) {
535 rv = __elfN(map_partial)(map, object, offset, start,
536 round_page(start), prot);
537 if (rv != KERN_SUCCESS)
539 offset += round_page(start) - start;
540 start = round_page(start);
542 if (end != round_page(end)) {
543 rv = __elfN(map_partial)(map, object, offset +
544 trunc_page(end) - start, trunc_page(end), end, prot);
545 if (rv != KERN_SUCCESS)
547 end = trunc_page(end);
550 return (KERN_SUCCESS);
551 if ((offset & PAGE_MASK) != 0) {
553 * The mapping is not page aligned. This means that we have
556 rv = vm_map_fixed(map, NULL, 0, start, end - start,
557 prot | VM_PROT_WRITE, VM_PROT_ALL, MAP_CHECK_EXCL);
558 if (rv != KERN_SUCCESS)
561 return (KERN_SUCCESS);
562 for (; start < end; start += sz) {
563 sf = vm_imgact_map_page(object, offset);
565 return (KERN_FAILURE);
566 off = offset - trunc_page(offset);
568 if (sz > PAGE_SIZE - off)
569 sz = PAGE_SIZE - off;
570 error = copyout((caddr_t)sf_buf_kva(sf) + off,
572 vm_imgact_unmap_page(sf);
574 return (KERN_FAILURE);
578 vm_object_reference(object);
579 rv = vm_map_fixed(map, object, offset, start, end - start,
580 prot, VM_PROT_ALL, cow | MAP_CHECK_EXCL |
581 (object != NULL ? MAP_VN_EXEC : 0));
582 if (rv != KERN_SUCCESS) {
583 locked = VOP_ISLOCKED(imgp->vp);
584 VOP_UNLOCK(imgp->vp);
585 vm_object_deallocate(object);
586 vn_lock(imgp->vp, locked | LK_RETRY);
588 } else if (object != NULL) {
589 MPASS(imgp->vp->v_object == object);
590 VOP_SET_TEXT_CHECKED(imgp->vp);
593 return (KERN_SUCCESS);
597 __elfN(load_section)(struct image_params *imgp, vm_ooffset_t offset,
598 caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot)
604 vm_offset_t map_addr;
607 vm_ooffset_t file_addr;
610 * It's necessary to fail if the filsz + offset taken from the
611 * header is greater than the actual file pager object's size.
612 * If we were to allow this, then the vm_map_find() below would
613 * walk right off the end of the file object and into the ether.
615 * While I'm here, might as well check for something else that
616 * is invalid: filsz cannot be greater than memsz.
618 if ((filsz != 0 && (off_t)filsz + offset > imgp->attr->va_size) ||
620 uprintf("elf_load_section: truncated ELF file\n");
624 object = imgp->object;
625 map = &imgp->proc->p_vmspace->vm_map;
626 map_addr = trunc_page((vm_offset_t)vmaddr);
627 file_addr = trunc_page(offset);
630 * We have two choices. We can either clear the data in the last page
631 * of an oversized mapping, or we can start the anon mapping a page
632 * early and copy the initialized data into that first page. We
637 else if (memsz > filsz)
638 map_len = trunc_page(offset + filsz) - file_addr;
640 map_len = round_page(offset + filsz) - file_addr;
643 /* cow flags: don't dump readonly sections in core */
644 cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
645 (prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
647 rv = __elfN(map_insert)(imgp, map, object, file_addr,
648 map_addr, map_addr + map_len, prot, cow);
649 if (rv != KERN_SUCCESS)
652 /* we can stop now if we've covered it all */
658 * We have to get the remaining bit of the file into the first part
659 * of the oversized map segment. This is normally because the .data
660 * segment in the file is extended to provide bss. It's a neat idea
661 * to try and save a page, but it's a pain in the behind to implement.
663 copy_len = filsz == 0 ? 0 : (offset + filsz) - trunc_page(offset +
665 map_addr = trunc_page((vm_offset_t)vmaddr + filsz);
666 map_len = round_page((vm_offset_t)vmaddr + memsz) - map_addr;
668 /* This had damn well better be true! */
670 rv = __elfN(map_insert)(imgp, map, NULL, 0, map_addr,
671 map_addr + map_len, prot, 0);
672 if (rv != KERN_SUCCESS)
677 sf = vm_imgact_map_page(object, offset + filsz);
681 /* send the page fragment to user space */
682 error = copyout((caddr_t)sf_buf_kva(sf), (caddr_t)map_addr,
684 vm_imgact_unmap_page(sf);
690 * Remove write access to the page if it was only granted by map_insert
693 if ((prot & VM_PROT_WRITE) == 0)
694 vm_map_protect(map, trunc_page(map_addr), round_page(map_addr +
695 map_len), prot, 0, VM_MAP_PROTECT_SET_PROT);
701 __elfN(load_sections)(struct image_params *imgp, const Elf_Ehdr *hdr,
702 const Elf_Phdr *phdr, u_long rbase, u_long *base_addrp)
709 ASSERT_VOP_LOCKED(imgp->vp, __func__);
714 for (i = 0; i < hdr->e_phnum; i++) {
715 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
718 /* Loadable segment */
719 prot = __elfN(trans_prot)(phdr[i].p_flags);
720 error = __elfN(load_section)(imgp, phdr[i].p_offset,
721 (caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
722 phdr[i].p_memsz, phdr[i].p_filesz, prot);
727 * Establish the base address if this is the first segment.
730 base_addr = trunc_page(phdr[i].p_vaddr + rbase);
735 if (base_addrp != NULL)
736 *base_addrp = base_addr;
742 * Load the file "file" into memory. It may be either a shared object
745 * The "addr" reference parameter is in/out. On entry, it specifies
746 * the address where a shared object should be loaded. If the file is
747 * an executable, this value is ignored. On exit, "addr" specifies
748 * where the file was actually loaded.
750 * The "entry" reference parameter is out only. On exit, it specifies
751 * the entry point for the loaded file.
754 __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
760 struct image_params image_params;
762 const Elf_Ehdr *hdr = NULL;
763 const Elf_Phdr *phdr = NULL;
764 struct nameidata *nd;
766 struct image_params *imgp;
768 u_long base_addr = 0;
771 #ifdef CAPABILITY_MODE
773 * XXXJA: This check can go away once we are sufficiently confident
774 * that the checks in namei() are correct.
776 if (IN_CAPABILITY_MODE(curthread))
780 tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK | M_ZERO);
782 attr = &tempdata->attr;
783 imgp = &tempdata->image_params;
786 * Initialize part of the common data
791 NDINIT(nd, LOOKUP, ISOPEN | FOLLOW | LOCKSHARED | LOCKLEAF,
792 UIO_SYSSPACE, file, curthread);
793 if ((error = namei(nd)) != 0) {
797 NDFREE(nd, NDF_ONLY_PNBUF);
798 imgp->vp = nd->ni_vp;
801 * Check permissions, modes, uid, etc on the file, and "open" it.
803 error = exec_check_permissions(imgp);
807 error = exec_map_first_page(imgp);
811 imgp->object = nd->ni_vp->v_object;
813 hdr = (const Elf_Ehdr *)imgp->image_header;
814 if ((error = __elfN(check_header)(hdr)) != 0)
816 if (hdr->e_type == ET_DYN)
818 else if (hdr->e_type == ET_EXEC)
825 /* Only support headers that fit within first page for now */
826 if (!__elfN(phdr_in_zero_page)(hdr)) {
831 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
832 if (!aligned(phdr, Elf_Addr)) {
837 error = __elfN(load_sections)(imgp, hdr, phdr, rbase, &base_addr);
842 *entry = (unsigned long)hdr->e_entry + rbase;
846 exec_unmap_first_page(imgp);
850 VOP_UNSET_TEXT_CHECKED(nd->ni_vp);
853 free(tempdata, M_TEMP);
859 __CONCAT(rnd_, __elfN(base))(vm_map_t map __unused, u_long minv, u_long maxv,
864 MPASS(vm_map_min(map) <= minv);
865 MPASS(maxv <= vm_map_max(map));
867 MPASS(minv + align < maxv);
868 arc4rand(&rbase, sizeof(rbase), 0);
869 res = roundup(minv, (u_long)align) + rbase % (maxv - minv);
870 res &= ~((u_long)align - 1);
874 ("res %#lx < minv %#lx, maxv %#lx rbase %#lx",
875 res, minv, maxv, rbase));
877 ("res %#lx > maxv %#lx, minv %#lx rbase %#lx",
878 res, maxv, minv, rbase));
883 __elfN(enforce_limits)(struct image_params *imgp, const Elf_Ehdr *hdr,
884 const Elf_Phdr *phdr, u_long et_dyn_addr)
886 struct vmspace *vmspace;
888 u_long text_size, data_size, total_size, text_addr, data_addr;
889 u_long seg_size, seg_addr;
893 text_size = data_size = total_size = text_addr = data_addr = 0;
895 for (i = 0; i < hdr->e_phnum; i++) {
896 if (phdr[i].p_type != PT_LOAD || phdr[i].p_memsz == 0)
899 seg_addr = trunc_page(phdr[i].p_vaddr + et_dyn_addr);
900 seg_size = round_page(phdr[i].p_memsz +
901 phdr[i].p_vaddr + et_dyn_addr - seg_addr);
904 * Make the largest executable segment the official
905 * text segment and all others data.
907 * Note that obreak() assumes that data_addr + data_size == end
908 * of data load area, and the ELF file format expects segments
909 * to be sorted by address. If multiple data segments exist,
910 * the last one will be used.
913 if ((phdr[i].p_flags & PF_X) != 0 && text_size < seg_size) {
914 text_size = seg_size;
915 text_addr = seg_addr;
917 data_size = seg_size;
918 data_addr = seg_addr;
920 total_size += seg_size;
923 if (data_addr == 0 && data_size == 0) {
924 data_addr = text_addr;
925 data_size = text_size;
929 * Check limits. It should be safe to check the
930 * limits after loading the segments since we do
931 * not actually fault in all the segments pages.
933 PROC_LOCK(imgp->proc);
934 if (data_size > lim_cur_proc(imgp->proc, RLIMIT_DATA))
935 err_str = "Data segment size exceeds process limit";
936 else if (text_size > maxtsiz)
937 err_str = "Text segment size exceeds system limit";
938 else if (total_size > lim_cur_proc(imgp->proc, RLIMIT_VMEM))
939 err_str = "Total segment size exceeds process limit";
940 else if (racct_set(imgp->proc, RACCT_DATA, data_size) != 0)
941 err_str = "Data segment size exceeds resource limit";
942 else if (racct_set(imgp->proc, RACCT_VMEM, total_size) != 0)
943 err_str = "Total segment size exceeds resource limit";
944 PROC_UNLOCK(imgp->proc);
945 if (err_str != NULL) {
946 uprintf("%s\n", err_str);
950 vmspace = imgp->proc->p_vmspace;
951 vmspace->vm_tsize = text_size >> PAGE_SHIFT;
952 vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
953 vmspace->vm_dsize = data_size >> PAGE_SHIFT;
954 vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
960 __elfN(get_interp)(struct image_params *imgp, const Elf_Phdr *phdr,
961 char **interpp, bool *free_interpp)
965 int error, interp_name_len;
967 KASSERT(phdr->p_type == PT_INTERP,
968 ("%s: p_type %u != PT_INTERP", __func__, phdr->p_type));
969 ASSERT_VOP_LOCKED(imgp->vp, __func__);
973 /* Path to interpreter */
974 if (phdr->p_filesz < 2 || phdr->p_filesz > MAXPATHLEN) {
975 uprintf("Invalid PT_INTERP\n");
979 interp_name_len = phdr->p_filesz;
980 if (phdr->p_offset > PAGE_SIZE ||
981 interp_name_len > PAGE_SIZE - phdr->p_offset) {
983 * The vnode lock might be needed by the pagedaemon to
984 * clean pages owned by the vnode. Do not allow sleep
985 * waiting for memory with the vnode locked, instead
986 * try non-sleepable allocation first, and if it
987 * fails, go to the slow path were we drop the lock
988 * and do M_WAITOK. A text reference prevents
989 * modifications to the vnode content.
991 interp = malloc(interp_name_len + 1, M_TEMP, M_NOWAIT);
992 if (interp == NULL) {
993 VOP_UNLOCK(imgp->vp);
994 interp = malloc(interp_name_len + 1, M_TEMP, M_WAITOK);
995 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
998 error = vn_rdwr(UIO_READ, imgp->vp, interp,
999 interp_name_len, phdr->p_offset,
1000 UIO_SYSSPACE, IO_NODELOCKED, td->td_ucred,
1003 free(interp, M_TEMP);
1004 uprintf("i/o error PT_INTERP %d\n", error);
1007 interp[interp_name_len] = '\0';
1010 *free_interpp = true;
1014 interp = __DECONST(char *, imgp->image_header) + phdr->p_offset;
1015 if (interp[interp_name_len - 1] != '\0') {
1016 uprintf("Invalid PT_INTERP\n");
1021 *free_interpp = false;
1026 __elfN(load_interp)(struct image_params *imgp, const Elf_Brandinfo *brand_info,
1027 const char *interp, u_long *addr, u_long *entry)
1032 if (brand_info->emul_path != NULL &&
1033 brand_info->emul_path[0] != '\0') {
1034 path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
1035 snprintf(path, MAXPATHLEN, "%s%s",
1036 brand_info->emul_path, interp);
1037 error = __elfN(load_file)(imgp->proc, path, addr, entry);
1043 if (brand_info->interp_newpath != NULL &&
1044 (brand_info->interp_path == NULL ||
1045 strcmp(interp, brand_info->interp_path) == 0)) {
1046 error = __elfN(load_file)(imgp->proc,
1047 brand_info->interp_newpath, addr, entry);
1052 error = __elfN(load_file)(imgp->proc, interp, addr, entry);
1056 uprintf("ELF interpreter %s not found, error %d\n", interp, error);
1061 * Impossible et_dyn_addr initial value indicating that the real base
1062 * must be calculated later with some randomization applied.
1064 #define ET_DYN_ADDR_RAND 1
1067 __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
1070 const Elf_Ehdr *hdr;
1071 const Elf_Phdr *phdr;
1072 Elf_Auxargs *elf_auxargs;
1073 struct vmspace *vmspace;
1076 Elf_Brandinfo *brand_info;
1077 struct sysentvec *sv;
1078 u_long addr, baddr, et_dyn_addr, entry, proghdr;
1079 u_long maxalign, mapsz, maxv, maxv1;
1085 hdr = (const Elf_Ehdr *)imgp->image_header;
1088 * Do we have a valid ELF header ?
1090 * Only allow ET_EXEC & ET_DYN here, reject ET_DYN later
1091 * if particular brand doesn't support it.
1093 if (__elfN(check_header)(hdr) != 0 ||
1094 (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN))
1098 * From here on down, we return an errno, not -1, as we've
1099 * detected an ELF file.
1102 if (!__elfN(phdr_in_zero_page)(hdr)) {
1103 uprintf("Program headers not in the first page\n");
1106 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
1107 if (!aligned(phdr, Elf_Addr)) {
1108 uprintf("Unaligned program headers\n");
1116 entry = proghdr = 0;
1118 free_interp = false;
1120 maxalign = PAGE_SIZE;
1123 for (i = 0; i < hdr->e_phnum; i++) {
1124 switch (phdr[i].p_type) {
1127 baddr = phdr[i].p_vaddr;
1128 if (phdr[i].p_align > maxalign)
1129 maxalign = phdr[i].p_align;
1130 mapsz += phdr[i].p_memsz;
1134 * If this segment contains the program headers,
1135 * remember their virtual address for the AT_PHDR
1136 * aux entry. Static binaries don't usually include
1139 if (phdr[i].p_offset == 0 &&
1140 hdr->e_phoff + hdr->e_phnum * hdr->e_phentsize
1141 <= phdr[i].p_filesz)
1142 proghdr = phdr[i].p_vaddr + hdr->e_phoff;
1145 /* Path to interpreter */
1146 if (interp != NULL) {
1147 uprintf("Multiple PT_INTERP headers\n");
1151 error = __elfN(get_interp)(imgp, &phdr[i], &interp,
1157 if (__elfN(nxstack))
1159 __elfN(trans_prot)(phdr[i].p_flags);
1160 imgp->stack_sz = phdr[i].p_memsz;
1162 case PT_PHDR: /* Program header table info */
1163 proghdr = phdr[i].p_vaddr;
1168 brand_info = __elfN(get_brandinfo)(imgp, interp, &osrel, &fctl0);
1169 if (brand_info == NULL) {
1170 uprintf("ELF binary type \"%u\" not known.\n",
1171 hdr->e_ident[EI_OSABI]);
1175 sv = brand_info->sysvec;
1177 if (hdr->e_type == ET_DYN) {
1178 if ((brand_info->flags & BI_CAN_EXEC_DYN) == 0) {
1179 uprintf("Cannot execute shared object\n");
1184 * Honour the base load address from the dso if it is
1185 * non-zero for some reason.
1188 if ((sv->sv_flags & SV_ASLR) == 0 ||
1189 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0)
1190 et_dyn_addr = __elfN(pie_base);
1191 else if ((__elfN(pie_aslr_enabled) &&
1192 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) == 0) ||
1193 (imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0)
1194 et_dyn_addr = ET_DYN_ADDR_RAND;
1196 et_dyn_addr = __elfN(pie_base);
1201 * Avoid a possible deadlock if the current address space is destroyed
1202 * and that address space maps the locked vnode. In the common case,
1203 * the locked vnode's v_usecount is decremented but remains greater
1204 * than zero. Consequently, the vnode lock is not needed by vrele().
1205 * However, in cases where the vnode lock is external, such as nullfs,
1206 * v_usecount may become zero.
1208 * The VV_TEXT flag prevents modifications to the executable while
1209 * the vnode is unlocked.
1211 VOP_UNLOCK(imgp->vp);
1214 * Decide whether to enable randomization of user mappings.
1215 * First, reset user preferences for the setid binaries.
1216 * Then, account for the support of the randomization by the
1217 * ABI, by user preferences, and make special treatment for
1220 if (imgp->credential_setid) {
1221 PROC_LOCK(imgp->proc);
1222 imgp->proc->p_flag2 &= ~(P2_ASLR_ENABLE | P2_ASLR_DISABLE |
1223 P2_WXORX_DISABLE | P2_WXORX_ENABLE_EXEC);
1224 PROC_UNLOCK(imgp->proc);
1226 if ((sv->sv_flags & SV_ASLR) == 0 ||
1227 (imgp->proc->p_flag2 & P2_ASLR_DISABLE) != 0 ||
1228 (fctl0 & NT_FREEBSD_FCTL_ASLR_DISABLE) != 0) {
1229 KASSERT(et_dyn_addr != ET_DYN_ADDR_RAND,
1230 ("et_dyn_addr == RAND and !ASLR"));
1231 } else if ((imgp->proc->p_flag2 & P2_ASLR_ENABLE) != 0 ||
1232 (__elfN(aslr_enabled) && hdr->e_type == ET_EXEC) ||
1233 et_dyn_addr == ET_DYN_ADDR_RAND) {
1234 imgp->map_flags |= MAP_ASLR;
1236 * If user does not care about sbrk, utilize the bss
1237 * grow region for mappings as well. We can select
1238 * the base for the image anywere and still not suffer
1239 * from the fragmentation.
1241 if (!__elfN(aslr_honor_sbrk) ||
1242 (imgp->proc->p_flag2 & P2_ASLR_IGNSTART) != 0)
1243 imgp->map_flags |= MAP_ASLR_IGNSTART;
1246 if ((!__elfN(allow_wx) && (fctl0 & NT_FREEBSD_FCTL_WXNEEDED) == 0 &&
1247 (imgp->proc->p_flag2 & P2_WXORX_DISABLE) == 0) ||
1248 (imgp->proc->p_flag2 & P2_WXORX_ENABLE_EXEC) != 0)
1249 imgp->map_flags |= MAP_WXORX;
1251 error = exec_new_vmspace(imgp, sv);
1252 vmspace = imgp->proc->p_vmspace;
1253 map = &vmspace->vm_map;
1255 imgp->proc->p_sysent = sv;
1257 maxv = vm_map_max(map) - lim_max(td, RLIMIT_STACK);
1258 if (et_dyn_addr == ET_DYN_ADDR_RAND) {
1259 KASSERT((map->flags & MAP_ASLR) != 0,
1260 ("ET_DYN_ADDR_RAND but !MAP_ASLR"));
1261 et_dyn_addr = __CONCAT(rnd_, __elfN(base))(map,
1262 vm_map_min(map) + mapsz + lim_max(td, RLIMIT_DATA),
1263 /* reserve half of the address space to interpreter */
1264 maxv / 2, 1UL << flsl(maxalign));
1267 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
1271 error = __elfN(load_sections)(imgp, hdr, phdr, et_dyn_addr, NULL);
1275 error = __elfN(enforce_limits)(imgp, hdr, phdr, et_dyn_addr);
1279 entry = (u_long)hdr->e_entry + et_dyn_addr;
1282 * We load the dynamic linker where a userland call
1283 * to mmap(0, ...) would put it. The rationale behind this
1284 * calculation is that it leaves room for the heap to grow to
1285 * its maximum allowed size.
1287 addr = round_page((vm_offset_t)vmspace->vm_daddr + lim_max(td,
1289 if ((map->flags & MAP_ASLR) != 0) {
1290 maxv1 = maxv / 2 + addr / 2;
1291 MPASS(maxv1 >= addr); /* No overflow */
1292 map->anon_loc = __CONCAT(rnd_, __elfN(base))(map, addr, maxv1,
1293 (MAXPAGESIZES > 1 && pagesizes[1] != 0) ?
1294 pagesizes[1] : pagesizes[0]);
1296 map->anon_loc = addr;
1299 imgp->entry_addr = entry;
1301 if (interp != NULL) {
1302 VOP_UNLOCK(imgp->vp);
1303 if ((map->flags & MAP_ASLR) != 0) {
1304 /* Assume that interpreter fits into 1/4 of AS */
1305 maxv1 = maxv / 2 + addr / 2;
1306 MPASS(maxv1 >= addr); /* No overflow */
1307 addr = __CONCAT(rnd_, __elfN(base))(map, addr,
1310 error = __elfN(load_interp)(imgp, brand_info, interp, &addr,
1312 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
1319 * Construct auxargs table (used by the copyout_auxargs routine)
1321 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_NOWAIT);
1322 if (elf_auxargs == NULL) {
1323 VOP_UNLOCK(imgp->vp);
1324 elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
1325 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
1327 elf_auxargs->execfd = -1;
1328 elf_auxargs->phdr = proghdr + et_dyn_addr;
1329 elf_auxargs->phent = hdr->e_phentsize;
1330 elf_auxargs->phnum = hdr->e_phnum;
1331 elf_auxargs->pagesz = PAGE_SIZE;
1332 elf_auxargs->base = addr;
1333 elf_auxargs->flags = 0;
1334 elf_auxargs->entry = entry;
1335 elf_auxargs->hdr_eflags = hdr->e_flags;
1337 imgp->auxargs = elf_auxargs;
1338 imgp->interpreted = 0;
1339 imgp->reloc_base = addr;
1340 imgp->proc->p_osrel = osrel;
1341 imgp->proc->p_fctl0 = fctl0;
1342 imgp->proc->p_elf_machine = hdr->e_machine;
1343 imgp->proc->p_elf_flags = hdr->e_flags;
1347 free(interp, M_TEMP);
1351 #define elf_suword __CONCAT(suword, __ELF_WORD_SIZE)
1354 __elfN(freebsd_copyout_auxargs)(struct image_params *imgp, uintptr_t base)
1356 Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
1357 Elf_Auxinfo *argarray, *pos;
1360 argarray = pos = malloc(AT_COUNT * sizeof(*pos), M_TEMP,
1363 if (args->execfd != -1)
1364 AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
1365 AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
1366 AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
1367 AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
1368 AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
1369 AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
1370 AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
1371 AUXARGS_ENTRY(pos, AT_BASE, args->base);
1372 AUXARGS_ENTRY(pos, AT_EHDRFLAGS, args->hdr_eflags);
1373 if (imgp->execpathp != 0)
1374 AUXARGS_ENTRY_PTR(pos, AT_EXECPATH, imgp->execpathp);
1375 AUXARGS_ENTRY(pos, AT_OSRELDATE,
1376 imgp->proc->p_ucred->cr_prison->pr_osreldate);
1377 if (imgp->canary != 0) {
1378 AUXARGS_ENTRY_PTR(pos, AT_CANARY, imgp->canary);
1379 AUXARGS_ENTRY(pos, AT_CANARYLEN, imgp->canarylen);
1381 AUXARGS_ENTRY(pos, AT_NCPUS, mp_ncpus);
1382 if (imgp->pagesizes != 0) {
1383 AUXARGS_ENTRY_PTR(pos, AT_PAGESIZES, imgp->pagesizes);
1384 AUXARGS_ENTRY(pos, AT_PAGESIZESLEN, imgp->pagesizeslen);
1386 if (imgp->sysent->sv_timekeep_base != 0) {
1387 AUXARGS_ENTRY(pos, AT_TIMEKEEP,
1388 imgp->sysent->sv_timekeep_base);
1390 AUXARGS_ENTRY(pos, AT_STACKPROT, imgp->sysent->sv_shared_page_obj
1391 != NULL && imgp->stack_prot != 0 ? imgp->stack_prot :
1392 imgp->sysent->sv_stackprot);
1393 if (imgp->sysent->sv_hwcap != NULL)
1394 AUXARGS_ENTRY(pos, AT_HWCAP, *imgp->sysent->sv_hwcap);
1395 if (imgp->sysent->sv_hwcap2 != NULL)
1396 AUXARGS_ENTRY(pos, AT_HWCAP2, *imgp->sysent->sv_hwcap2);
1397 AUXARGS_ENTRY(pos, AT_BSDFLAGS, __elfN(sigfastblock) ?
1398 ELF_BSDF_SIGFASTBLK : 0);
1399 AUXARGS_ENTRY(pos, AT_ARGC, imgp->args->argc);
1400 AUXARGS_ENTRY_PTR(pos, AT_ARGV, imgp->argv);
1401 AUXARGS_ENTRY(pos, AT_ENVC, imgp->args->envc);
1402 AUXARGS_ENTRY_PTR(pos, AT_ENVV, imgp->envv);
1403 AUXARGS_ENTRY_PTR(pos, AT_PS_STRINGS, imgp->ps_strings);
1404 if (imgp->sysent->sv_fxrng_gen_base != 0)
1405 AUXARGS_ENTRY(pos, AT_FXRNG, imgp->sysent->sv_fxrng_gen_base);
1406 AUXARGS_ENTRY(pos, AT_NULL, 0);
1408 free(imgp->auxargs, M_TEMP);
1409 imgp->auxargs = NULL;
1410 KASSERT(pos - argarray <= AT_COUNT, ("Too many auxargs"));
1412 error = copyout(argarray, (void *)base, sizeof(*argarray) * AT_COUNT);
1413 free(argarray, M_TEMP);
1418 __elfN(freebsd_fixup)(uintptr_t *stack_base, struct image_params *imgp)
1422 base = (Elf_Addr *)*stack_base;
1424 if (elf_suword(base, imgp->args->argc) == -1)
1426 *stack_base = (uintptr_t)base;
1431 * Code for generating ELF core dumps.
1434 typedef void (*segment_callback)(vm_map_entry_t, void *);
1436 /* Closure for cb_put_phdr(). */
1437 struct phdr_closure {
1438 Elf_Phdr *phdr; /* Program header to fill in */
1439 Elf_Off offset; /* Offset of segment in core file */
1442 /* Closure for cb_size_segment(). */
1443 struct sseg_closure {
1444 int count; /* Count of writable segments. */
1445 size_t size; /* Total size of all writable segments. */
1448 typedef void (*outfunc_t)(void *, struct sbuf *, size_t *);
1451 int type; /* Note type. */
1452 outfunc_t outfunc; /* Output function. */
1453 void *outarg; /* Argument for the output function. */
1454 size_t outsize; /* Output size. */
1455 TAILQ_ENTRY(note_info) link; /* Link to the next note info. */
1458 TAILQ_HEAD(note_info_list, note_info);
1460 /* Coredump output parameters. */
1461 struct coredump_params {
1463 struct ucred *active_cred;
1464 struct ucred *file_cred;
1467 struct compressor *comp;
1470 extern int compress_user_cores;
1471 extern int compress_user_cores_level;
1473 static void cb_put_phdr(vm_map_entry_t, void *);
1474 static void cb_size_segment(vm_map_entry_t, void *);
1475 static int core_write(struct coredump_params *, const void *, size_t, off_t,
1476 enum uio_seg, size_t *);
1477 static void each_dumpable_segment(struct thread *, segment_callback, void *,
1479 static int __elfN(corehdr)(struct coredump_params *, int, void *, size_t,
1480 struct note_info_list *, size_t, int);
1481 static void __elfN(prepare_notes)(struct thread *, struct note_info_list *,
1483 static void __elfN(puthdr)(struct thread *, void *, size_t, int, size_t, int);
1484 static void __elfN(putnote)(struct note_info *, struct sbuf *);
1485 static size_t register_note(struct note_info_list *, int, outfunc_t, void *);
1486 static int sbuf_drain_core_output(void *, const char *, int);
1488 static void __elfN(note_fpregset)(void *, struct sbuf *, size_t *);
1489 static void __elfN(note_prpsinfo)(void *, struct sbuf *, size_t *);
1490 static void __elfN(note_prstatus)(void *, struct sbuf *, size_t *);
1491 static void __elfN(note_threadmd)(void *, struct sbuf *, size_t *);
1492 static void __elfN(note_thrmisc)(void *, struct sbuf *, size_t *);
1493 static void __elfN(note_ptlwpinfo)(void *, struct sbuf *, size_t *);
1494 static void __elfN(note_procstat_auxv)(void *, struct sbuf *, size_t *);
1495 static void __elfN(note_procstat_proc)(void *, struct sbuf *, size_t *);
1496 static void __elfN(note_procstat_psstrings)(void *, struct sbuf *, size_t *);
1497 static void note_procstat_files(void *, struct sbuf *, size_t *);
1498 static void note_procstat_groups(void *, struct sbuf *, size_t *);
1499 static void note_procstat_osrel(void *, struct sbuf *, size_t *);
1500 static void note_procstat_rlimit(void *, struct sbuf *, size_t *);
1501 static void note_procstat_umask(void *, struct sbuf *, size_t *);
1502 static void note_procstat_vmmap(void *, struct sbuf *, size_t *);
1505 * Write out a core segment to the compression stream.
1508 compress_chunk(struct coredump_params *p, char *base, char *buf, u_int len)
1514 chunk_len = MIN(len, CORE_BUF_SIZE);
1517 * We can get EFAULT error here.
1518 * In that case zero out the current chunk of the segment.
1520 error = copyin(base, buf, chunk_len);
1522 bzero(buf, chunk_len);
1523 error = compressor_write(p->comp, buf, chunk_len);
1533 core_compressed_write(void *base, size_t len, off_t offset, void *arg)
1536 return (core_write((struct coredump_params *)arg, base, len, offset,
1537 UIO_SYSSPACE, NULL));
1541 core_write(struct coredump_params *p, const void *base, size_t len,
1542 off_t offset, enum uio_seg seg, size_t *resid)
1545 return (vn_rdwr_inchunks(UIO_WRITE, p->vp, __DECONST(void *, base),
1546 len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED,
1547 p->active_cred, p->file_cred, resid, p->td));
1550 extern int core_dump_can_intr;
1553 core_output(char *base, size_t len, off_t offset, struct coredump_params *p,
1558 size_t resid, runlen;
1562 KASSERT((uintptr_t)base % PAGE_SIZE == 0,
1563 ("%s: user address %p is not page-aligned", __func__, base));
1565 if (p->comp != NULL)
1566 return (compress_chunk(p, base, tmpbuf, len));
1568 map = &p->td->td_proc->p_vmspace->vm_map;
1569 for (; len > 0; base += runlen, offset += runlen, len -= runlen) {
1571 * Attempt to page in all virtual pages in the range. If a
1572 * virtual page is not backed by the pager, it is represented as
1573 * a hole in the file. This can occur with zero-filled
1574 * anonymous memory or truncated files, for example.
1576 for (runlen = 0; runlen < len; runlen += PAGE_SIZE) {
1577 if (core_dump_can_intr && curproc_sigkilled())
1579 error = vm_fault(map, (uintptr_t)base + runlen,
1580 VM_PROT_READ, VM_FAULT_NOFILL, NULL);
1582 success = error == KERN_SUCCESS;
1583 else if ((error == KERN_SUCCESS) != success)
1588 error = core_write(p, base, runlen, offset,
1589 UIO_USERSPACE, &resid);
1591 if (error != EFAULT)
1595 * EFAULT may be returned if the user mapping
1596 * could not be accessed, e.g., because a mapped
1597 * file has been truncated. Skip the page if no
1598 * progress was made, to protect against a
1599 * hypothetical scenario where vm_fault() was
1600 * successful but core_write() returns EFAULT
1611 error = vn_start_write(p->vp, &mp, V_WAIT);
1614 vn_lock(p->vp, LK_EXCLUSIVE | LK_RETRY);
1615 error = vn_truncate_locked(p->vp, offset + runlen,
1616 false, p->td->td_ucred);
1618 vn_finished_write(mp);
1627 * Drain into a core file.
1630 sbuf_drain_core_output(void *arg, const char *data, int len)
1632 struct coredump_params *p;
1635 p = (struct coredump_params *)arg;
1638 * Some kern_proc out routines that print to this sbuf may
1639 * call us with the process lock held. Draining with the
1640 * non-sleepable lock held is unsafe. The lock is needed for
1641 * those routines when dumping a live process. In our case we
1642 * can safely release the lock before draining and acquire
1645 locked = PROC_LOCKED(p->td->td_proc);
1647 PROC_UNLOCK(p->td->td_proc);
1648 if (p->comp != NULL)
1649 error = compressor_write(p->comp, __DECONST(char *, data), len);
1651 error = core_write(p, __DECONST(void *, data), len, p->offset,
1652 UIO_SYSSPACE, NULL);
1654 PROC_LOCK(p->td->td_proc);
1662 __elfN(coredump)(struct thread *td, struct vnode *vp, off_t limit, int flags)
1664 struct ucred *cred = td->td_ucred;
1665 int compm, error = 0;
1666 struct sseg_closure seginfo;
1667 struct note_info_list notelst;
1668 struct coredump_params params;
1669 struct note_info *ninfo;
1671 size_t hdrsize, notesz, coresize;
1675 TAILQ_INIT(¬elst);
1677 /* Size the program segments. */
1680 each_dumpable_segment(td, cb_size_segment, &seginfo, flags);
1683 * Collect info about the core file header area.
1685 hdrsize = sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * (1 + seginfo.count);
1686 if (seginfo.count + 1 >= PN_XNUM)
1687 hdrsize += sizeof(Elf_Shdr);
1688 __elfN(prepare_notes)(td, ¬elst, ¬esz);
1689 coresize = round_page(hdrsize + notesz) + seginfo.size;
1691 /* Set up core dump parameters. */
1693 params.active_cred = cred;
1694 params.file_cred = NOCRED;
1701 PROC_LOCK(td->td_proc);
1702 error = racct_add(td->td_proc, RACCT_CORE, coresize);
1703 PROC_UNLOCK(td->td_proc);
1710 if (coresize >= limit) {
1715 /* Create a compression stream if necessary. */
1716 compm = compress_user_cores;
1717 if ((flags & (SVC_PT_COREDUMP | SVC_NOCOMPRESS)) == SVC_PT_COREDUMP &&
1719 compm = COMPRESS_GZIP;
1721 params.comp = compressor_init(core_compressed_write,
1722 compm, CORE_BUF_SIZE,
1723 compress_user_cores_level, ¶ms);
1724 if (params.comp == NULL) {
1728 tmpbuf = malloc(CORE_BUF_SIZE, M_TEMP, M_WAITOK | M_ZERO);
1732 * Allocate memory for building the header, fill it up,
1733 * and write it out following the notes.
1735 hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
1736 error = __elfN(corehdr)(¶ms, seginfo.count, hdr, hdrsize, ¬elst,
1739 /* Write the contents of all of the writable segments. */
1745 php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
1746 offset = round_page(hdrsize + notesz);
1747 for (i = 0; i < seginfo.count; i++) {
1748 error = core_output((char *)(uintptr_t)php->p_vaddr,
1749 php->p_filesz, offset, ¶ms, tmpbuf);
1752 offset += php->p_filesz;
1755 if (error == 0 && params.comp != NULL)
1756 error = compressor_flush(params.comp);
1760 "Failed to write core file for process %s (error %d)\n",
1761 curproc->p_comm, error);
1765 free(tmpbuf, M_TEMP);
1766 if (params.comp != NULL)
1767 compressor_fini(params.comp);
1768 while ((ninfo = TAILQ_FIRST(¬elst)) != NULL) {
1769 TAILQ_REMOVE(¬elst, ninfo, link);
1770 free(ninfo, M_TEMP);
1779 * A callback for each_dumpable_segment() to write out the segment's
1780 * program header entry.
1783 cb_put_phdr(vm_map_entry_t entry, void *closure)
1785 struct phdr_closure *phc = (struct phdr_closure *)closure;
1786 Elf_Phdr *phdr = phc->phdr;
1788 phc->offset = round_page(phc->offset);
1790 phdr->p_type = PT_LOAD;
1791 phdr->p_offset = phc->offset;
1792 phdr->p_vaddr = entry->start;
1794 phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
1795 phdr->p_align = PAGE_SIZE;
1796 phdr->p_flags = __elfN(untrans_prot)(entry->protection);
1798 phc->offset += phdr->p_filesz;
1803 * A callback for each_dumpable_segment() to gather information about
1804 * the number of segments and their total size.
1807 cb_size_segment(vm_map_entry_t entry, void *closure)
1809 struct sseg_closure *ssc = (struct sseg_closure *)closure;
1812 ssc->size += entry->end - entry->start;
1816 * For each writable segment in the process's memory map, call the given
1817 * function with a pointer to the map entry and some arbitrary
1818 * caller-supplied data.
1821 each_dumpable_segment(struct thread *td, segment_callback func, void *closure,
1824 struct proc *p = td->td_proc;
1825 vm_map_t map = &p->p_vmspace->vm_map;
1826 vm_map_entry_t entry;
1827 vm_object_t backing_object, object;
1830 vm_map_lock_read(map);
1831 VM_MAP_ENTRY_FOREACH(entry, map) {
1833 * Don't dump inaccessible mappings, deal with legacy
1836 * Note that read-only segments related to the elf binary
1837 * are marked MAP_ENTRY_NOCOREDUMP now so we no longer
1838 * need to arbitrarily ignore such segments.
1840 if ((flags & SVC_ALL) == 0) {
1841 if (elf_legacy_coredump) {
1842 if ((entry->protection & VM_PROT_RW) !=
1846 if ((entry->protection & VM_PROT_ALL) == 0)
1852 * Dont include memory segment in the coredump if
1853 * MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
1854 * madvise(2). Do not dump submaps (i.e. parts of the
1857 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
1859 if ((entry->eflags & MAP_ENTRY_NOCOREDUMP) != 0 &&
1860 (flags & SVC_ALL) == 0)
1862 if ((object = entry->object.vm_object) == NULL)
1865 /* Ignore memory-mapped devices and such things. */
1866 VM_OBJECT_RLOCK(object);
1867 while ((backing_object = object->backing_object) != NULL) {
1868 VM_OBJECT_RLOCK(backing_object);
1869 VM_OBJECT_RUNLOCK(object);
1870 object = backing_object;
1872 ignore_entry = (object->flags & OBJ_FICTITIOUS) != 0;
1873 VM_OBJECT_RUNLOCK(object);
1877 (*func)(entry, closure);
1879 vm_map_unlock_read(map);
1883 * Write the core file header to the file, including padding up to
1884 * the page boundary.
1887 __elfN(corehdr)(struct coredump_params *p, int numsegs, void *hdr,
1888 size_t hdrsize, struct note_info_list *notelst, size_t notesz,
1891 struct note_info *ninfo;
1895 /* Fill in the header. */
1896 bzero(hdr, hdrsize);
1897 __elfN(puthdr)(p->td, hdr, hdrsize, numsegs, notesz, flags);
1899 sb = sbuf_new(NULL, NULL, CORE_BUF_SIZE, SBUF_FIXEDLEN);
1900 sbuf_set_drain(sb, sbuf_drain_core_output, p);
1901 sbuf_start_section(sb, NULL);
1902 sbuf_bcat(sb, hdr, hdrsize);
1903 TAILQ_FOREACH(ninfo, notelst, link)
1904 __elfN(putnote)(ninfo, sb);
1905 /* Align up to a page boundary for the program segments. */
1906 sbuf_end_section(sb, -1, PAGE_SIZE, 0);
1907 error = sbuf_finish(sb);
1914 __elfN(prepare_notes)(struct thread *td, struct note_info_list *list,
1924 size += register_note(list, NT_PRPSINFO, __elfN(note_prpsinfo), p);
1927 * To have the debugger select the right thread (LWP) as the initial
1928 * thread, we dump the state of the thread passed to us in td first.
1929 * This is the thread that causes the core dump and thus likely to
1930 * be the right thread one wants to have selected in the debugger.
1933 while (thr != NULL) {
1934 size += register_note(list, NT_PRSTATUS,
1935 __elfN(note_prstatus), thr);
1936 size += register_note(list, NT_FPREGSET,
1937 __elfN(note_fpregset), thr);
1938 size += register_note(list, NT_THRMISC,
1939 __elfN(note_thrmisc), thr);
1940 size += register_note(list, NT_PTLWPINFO,
1941 __elfN(note_ptlwpinfo), thr);
1942 size += register_note(list, -1,
1943 __elfN(note_threadmd), thr);
1945 thr = (thr == td) ? TAILQ_FIRST(&p->p_threads) :
1946 TAILQ_NEXT(thr, td_plist);
1948 thr = TAILQ_NEXT(thr, td_plist);
1951 size += register_note(list, NT_PROCSTAT_PROC,
1952 __elfN(note_procstat_proc), p);
1953 size += register_note(list, NT_PROCSTAT_FILES,
1954 note_procstat_files, p);
1955 size += register_note(list, NT_PROCSTAT_VMMAP,
1956 note_procstat_vmmap, p);
1957 size += register_note(list, NT_PROCSTAT_GROUPS,
1958 note_procstat_groups, p);
1959 size += register_note(list, NT_PROCSTAT_UMASK,
1960 note_procstat_umask, p);
1961 size += register_note(list, NT_PROCSTAT_RLIMIT,
1962 note_procstat_rlimit, p);
1963 size += register_note(list, NT_PROCSTAT_OSREL,
1964 note_procstat_osrel, p);
1965 size += register_note(list, NT_PROCSTAT_PSSTRINGS,
1966 __elfN(note_procstat_psstrings), p);
1967 size += register_note(list, NT_PROCSTAT_AUXV,
1968 __elfN(note_procstat_auxv), p);
1974 __elfN(puthdr)(struct thread *td, void *hdr, size_t hdrsize, int numsegs,
1975 size_t notesz, int flags)
1980 struct phdr_closure phc;
1982 ehdr = (Elf_Ehdr *)hdr;
1984 ehdr->e_ident[EI_MAG0] = ELFMAG0;
1985 ehdr->e_ident[EI_MAG1] = ELFMAG1;
1986 ehdr->e_ident[EI_MAG2] = ELFMAG2;
1987 ehdr->e_ident[EI_MAG3] = ELFMAG3;
1988 ehdr->e_ident[EI_CLASS] = ELF_CLASS;
1989 ehdr->e_ident[EI_DATA] = ELF_DATA;
1990 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1991 ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
1992 ehdr->e_ident[EI_ABIVERSION] = 0;
1993 ehdr->e_ident[EI_PAD] = 0;
1994 ehdr->e_type = ET_CORE;
1995 ehdr->e_machine = td->td_proc->p_elf_machine;
1996 ehdr->e_version = EV_CURRENT;
1998 ehdr->e_phoff = sizeof(Elf_Ehdr);
1999 ehdr->e_flags = td->td_proc->p_elf_flags;
2000 ehdr->e_ehsize = sizeof(Elf_Ehdr);
2001 ehdr->e_phentsize = sizeof(Elf_Phdr);
2002 ehdr->e_shentsize = sizeof(Elf_Shdr);
2003 ehdr->e_shstrndx = SHN_UNDEF;
2004 if (numsegs + 1 < PN_XNUM) {
2005 ehdr->e_phnum = numsegs + 1;
2008 ehdr->e_phnum = PN_XNUM;
2011 ehdr->e_shoff = ehdr->e_phoff +
2012 (numsegs + 1) * ehdr->e_phentsize;
2013 KASSERT(ehdr->e_shoff == hdrsize - sizeof(Elf_Shdr),
2014 ("e_shoff: %zu, hdrsize - shdr: %zu",
2015 (size_t)ehdr->e_shoff, hdrsize - sizeof(Elf_Shdr)));
2017 shdr = (Elf_Shdr *)((char *)hdr + ehdr->e_shoff);
2018 memset(shdr, 0, sizeof(*shdr));
2020 * A special first section is used to hold large segment and
2021 * section counts. This was proposed by Sun Microsystems in
2022 * Solaris and has been adopted by Linux; the standard ELF
2023 * tools are already familiar with the technique.
2025 * See table 7-7 of the Solaris "Linker and Libraries Guide"
2026 * (or 12-7 depending on the version of the document) for more
2029 shdr->sh_type = SHT_NULL;
2030 shdr->sh_size = ehdr->e_shnum;
2031 shdr->sh_link = ehdr->e_shstrndx;
2032 shdr->sh_info = numsegs + 1;
2036 * Fill in the program header entries.
2038 phdr = (Elf_Phdr *)((char *)hdr + ehdr->e_phoff);
2040 /* The note segement. */
2041 phdr->p_type = PT_NOTE;
2042 phdr->p_offset = hdrsize;
2045 phdr->p_filesz = notesz;
2047 phdr->p_flags = PF_R;
2048 phdr->p_align = ELF_NOTE_ROUNDSIZE;
2051 /* All the writable segments from the program. */
2053 phc.offset = round_page(hdrsize + notesz);
2054 each_dumpable_segment(td, cb_put_phdr, &phc, flags);
2058 register_note(struct note_info_list *list, int type, outfunc_t out, void *arg)
2060 struct note_info *ninfo;
2061 size_t size, notesize;
2064 out(arg, NULL, &size);
2065 ninfo = malloc(sizeof(*ninfo), M_TEMP, M_ZERO | M_WAITOK);
2067 ninfo->outfunc = out;
2068 ninfo->outarg = arg;
2069 ninfo->outsize = size;
2070 TAILQ_INSERT_TAIL(list, ninfo, link);
2075 notesize = sizeof(Elf_Note) + /* note header */
2076 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) +
2078 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */
2084 append_note_data(const void *src, void *dst, size_t len)
2088 padded_len = roundup2(len, ELF_NOTE_ROUNDSIZE);
2090 bcopy(src, dst, len);
2091 bzero((char *)dst + len, padded_len - len);
2093 return (padded_len);
2097 __elfN(populate_note)(int type, void *src, void *dst, size_t size, void **descp)
2105 note = (Elf_Note *)buf;
2106 note->n_namesz = sizeof(FREEBSD_ABI_VENDOR);
2107 note->n_descsz = size;
2108 note->n_type = type;
2109 buf += sizeof(*note);
2110 buf += append_note_data(FREEBSD_ABI_VENDOR, buf,
2111 sizeof(FREEBSD_ABI_VENDOR));
2112 append_note_data(src, buf, size);
2117 notesize = sizeof(Elf_Note) + /* note header */
2118 roundup2(sizeof(FREEBSD_ABI_VENDOR), ELF_NOTE_ROUNDSIZE) +
2120 roundup2(size, ELF_NOTE_ROUNDSIZE); /* note description */
2126 __elfN(putnote)(struct note_info *ninfo, struct sbuf *sb)
2129 ssize_t old_len, sect_len;
2130 size_t new_len, descsz, i;
2132 if (ninfo->type == -1) {
2133 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
2137 note.n_namesz = sizeof(FREEBSD_ABI_VENDOR);
2138 note.n_descsz = ninfo->outsize;
2139 note.n_type = ninfo->type;
2141 sbuf_bcat(sb, ¬e, sizeof(note));
2142 sbuf_start_section(sb, &old_len);
2143 sbuf_bcat(sb, FREEBSD_ABI_VENDOR, sizeof(FREEBSD_ABI_VENDOR));
2144 sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
2145 if (note.n_descsz == 0)
2147 sbuf_start_section(sb, &old_len);
2148 ninfo->outfunc(ninfo->outarg, sb, &ninfo->outsize);
2149 sect_len = sbuf_end_section(sb, old_len, ELF_NOTE_ROUNDSIZE, 0);
2153 new_len = (size_t)sect_len;
2154 descsz = roundup(note.n_descsz, ELF_NOTE_ROUNDSIZE);
2155 if (new_len < descsz) {
2157 * It is expected that individual note emitters will correctly
2158 * predict their expected output size and fill up to that size
2159 * themselves, padding in a format-specific way if needed.
2160 * However, in case they don't, just do it here with zeros.
2162 for (i = 0; i < descsz - new_len; i++)
2164 } else if (new_len > descsz) {
2166 * We can't always truncate sb -- we may have drained some
2169 KASSERT(new_len == descsz, ("%s: Note type %u changed as we "
2170 "read it (%zu > %zu). Since it is longer than "
2171 "expected, this coredump's notes are corrupt. THIS "
2172 "IS A BUG in the note_procstat routine for type %u.\n",
2173 __func__, (unsigned)note.n_type, new_len, descsz,
2174 (unsigned)note.n_type));
2179 * Miscellaneous note out functions.
2182 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2183 #include <compat/freebsd32/freebsd32.h>
2184 #include <compat/freebsd32/freebsd32_signal.h>
2186 typedef struct prstatus32 elf_prstatus_t;
2187 typedef struct prpsinfo32 elf_prpsinfo_t;
2188 typedef struct fpreg32 elf_prfpregset_t;
2189 typedef struct fpreg32 elf_fpregset_t;
2190 typedef struct reg32 elf_gregset_t;
2191 typedef struct thrmisc32 elf_thrmisc_t;
2192 #define ELF_KERN_PROC_MASK KERN_PROC_MASK32
2193 typedef struct kinfo_proc32 elf_kinfo_proc_t;
2194 typedef uint32_t elf_ps_strings_t;
2196 typedef prstatus_t elf_prstatus_t;
2197 typedef prpsinfo_t elf_prpsinfo_t;
2198 typedef prfpregset_t elf_prfpregset_t;
2199 typedef prfpregset_t elf_fpregset_t;
2200 typedef gregset_t elf_gregset_t;
2201 typedef thrmisc_t elf_thrmisc_t;
2202 #define ELF_KERN_PROC_MASK 0
2203 typedef struct kinfo_proc elf_kinfo_proc_t;
2204 typedef vm_offset_t elf_ps_strings_t;
2208 __elfN(note_prpsinfo)(void *arg, struct sbuf *sb, size_t *sizep)
2214 elf_prpsinfo_t *psinfo;
2217 p = (struct proc *)arg;
2219 KASSERT(*sizep == sizeof(*psinfo), ("invalid size"));
2220 psinfo = malloc(sizeof(*psinfo), M_TEMP, M_ZERO | M_WAITOK);
2221 psinfo->pr_version = PRPSINFO_VERSION;
2222 psinfo->pr_psinfosz = sizeof(elf_prpsinfo_t);
2223 strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
2225 if (p->p_args != NULL) {
2226 len = sizeof(psinfo->pr_psargs) - 1;
2227 if (len > p->p_args->ar_length)
2228 len = p->p_args->ar_length;
2229 memcpy(psinfo->pr_psargs, p->p_args->ar_args, len);
2235 sbuf_new(&sbarg, psinfo->pr_psargs,
2236 sizeof(psinfo->pr_psargs), SBUF_FIXEDLEN);
2237 error = proc_getargv(curthread, p, &sbarg);
2239 if (sbuf_finish(&sbarg) == 0)
2240 len = sbuf_len(&sbarg) - 1;
2242 len = sizeof(psinfo->pr_psargs) - 1;
2243 sbuf_delete(&sbarg);
2245 if (error || len == 0)
2246 strlcpy(psinfo->pr_psargs, p->p_comm,
2247 sizeof(psinfo->pr_psargs));
2249 KASSERT(len < sizeof(psinfo->pr_psargs),
2250 ("len is too long: %zu vs %zu", len,
2251 sizeof(psinfo->pr_psargs)));
2252 cp = psinfo->pr_psargs;
2255 cp = memchr(cp, '\0', end - cp);
2261 psinfo->pr_pid = p->p_pid;
2262 sbuf_bcat(sb, psinfo, sizeof(*psinfo));
2263 free(psinfo, M_TEMP);
2265 *sizep = sizeof(*psinfo);
2269 __elfN(note_prstatus)(void *arg, struct sbuf *sb, size_t *sizep)
2272 elf_prstatus_t *status;
2274 td = (struct thread *)arg;
2276 KASSERT(*sizep == sizeof(*status), ("invalid size"));
2277 status = malloc(sizeof(*status), M_TEMP, M_ZERO | M_WAITOK);
2278 status->pr_version = PRSTATUS_VERSION;
2279 status->pr_statussz = sizeof(elf_prstatus_t);
2280 status->pr_gregsetsz = sizeof(elf_gregset_t);
2281 status->pr_fpregsetsz = sizeof(elf_fpregset_t);
2282 status->pr_osreldate = osreldate;
2283 status->pr_cursig = td->td_proc->p_sig;
2284 status->pr_pid = td->td_tid;
2285 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2286 fill_regs32(td, &status->pr_reg);
2288 fill_regs(td, &status->pr_reg);
2290 sbuf_bcat(sb, status, sizeof(*status));
2291 free(status, M_TEMP);
2293 *sizep = sizeof(*status);
2297 __elfN(note_fpregset)(void *arg, struct sbuf *sb, size_t *sizep)
2300 elf_prfpregset_t *fpregset;
2302 td = (struct thread *)arg;
2304 KASSERT(*sizep == sizeof(*fpregset), ("invalid size"));
2305 fpregset = malloc(sizeof(*fpregset), M_TEMP, M_ZERO | M_WAITOK);
2306 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2307 fill_fpregs32(td, fpregset);
2309 fill_fpregs(td, fpregset);
2311 sbuf_bcat(sb, fpregset, sizeof(*fpregset));
2312 free(fpregset, M_TEMP);
2314 *sizep = sizeof(*fpregset);
2318 __elfN(note_thrmisc)(void *arg, struct sbuf *sb, size_t *sizep)
2321 elf_thrmisc_t thrmisc;
2323 td = (struct thread *)arg;
2325 KASSERT(*sizep == sizeof(thrmisc), ("invalid size"));
2326 bzero(&thrmisc, sizeof(thrmisc));
2327 strcpy(thrmisc.pr_tname, td->td_name);
2328 sbuf_bcat(sb, &thrmisc, sizeof(thrmisc));
2330 *sizep = sizeof(thrmisc);
2334 __elfN(note_ptlwpinfo)(void *arg, struct sbuf *sb, size_t *sizep)
2339 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2340 struct ptrace_lwpinfo32 pl;
2342 struct ptrace_lwpinfo pl;
2345 td = (struct thread *)arg;
2346 size = sizeof(structsize) + sizeof(pl);
2348 KASSERT(*sizep == size, ("invalid size"));
2349 structsize = sizeof(pl);
2350 sbuf_bcat(sb, &structsize, sizeof(structsize));
2351 bzero(&pl, sizeof(pl));
2352 pl.pl_lwpid = td->td_tid;
2353 pl.pl_event = PL_EVENT_NONE;
2354 pl.pl_sigmask = td->td_sigmask;
2355 pl.pl_siglist = td->td_siglist;
2356 if (td->td_si.si_signo != 0) {
2357 pl.pl_event = PL_EVENT_SIGNAL;
2358 pl.pl_flags |= PL_FLAG_SI;
2359 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2360 siginfo_to_siginfo32(&td->td_si, &pl.pl_siginfo);
2362 pl.pl_siginfo = td->td_si;
2365 strcpy(pl.pl_tdname, td->td_name);
2366 /* XXX TODO: supply more information in struct ptrace_lwpinfo*/
2367 sbuf_bcat(sb, &pl, sizeof(pl));
2373 * Allow for MD specific notes, as well as any MD
2374 * specific preparations for writing MI notes.
2377 __elfN(note_threadmd)(void *arg, struct sbuf *sb, size_t *sizep)
2383 td = (struct thread *)arg;
2385 if (size != 0 && sb != NULL)
2386 buf = malloc(size, M_TEMP, M_ZERO | M_WAITOK);
2390 __elfN(dump_thread)(td, buf, &size);
2391 KASSERT(sb == NULL || *sizep == size, ("invalid size"));
2392 if (size != 0 && sb != NULL)
2393 sbuf_bcat(sb, buf, size);
2398 #ifdef KINFO_PROC_SIZE
2399 CTASSERT(sizeof(struct kinfo_proc) == KINFO_PROC_SIZE);
2403 __elfN(note_procstat_proc)(void *arg, struct sbuf *sb, size_t *sizep)
2409 p = (struct proc *)arg;
2410 size = sizeof(structsize) + p->p_numthreads *
2411 sizeof(elf_kinfo_proc_t);
2414 KASSERT(*sizep == size, ("invalid size"));
2415 structsize = sizeof(elf_kinfo_proc_t);
2416 sbuf_bcat(sb, &structsize, sizeof(structsize));
2417 sx_slock(&proctree_lock);
2419 kern_proc_out(p, sb, ELF_KERN_PROC_MASK);
2420 sx_sunlock(&proctree_lock);
2425 #ifdef KINFO_FILE_SIZE
2426 CTASSERT(sizeof(struct kinfo_file) == KINFO_FILE_SIZE);
2430 note_procstat_files(void *arg, struct sbuf *sb, size_t *sizep)
2433 size_t size, sect_sz, i;
2434 ssize_t start_len, sect_len;
2435 int structsize, filedesc_flags;
2437 if (coredump_pack_fileinfo)
2438 filedesc_flags = KERN_FILEDESC_PACK_KINFO;
2442 p = (struct proc *)arg;
2443 structsize = sizeof(struct kinfo_file);
2446 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
2447 sbuf_set_drain(sb, sbuf_count_drain, &size);
2448 sbuf_bcat(sb, &structsize, sizeof(structsize));
2450 kern_proc_filedesc_out(p, sb, -1, filedesc_flags);
2455 sbuf_start_section(sb, &start_len);
2457 sbuf_bcat(sb, &structsize, sizeof(structsize));
2459 kern_proc_filedesc_out(p, sb, *sizep - sizeof(structsize),
2462 sect_len = sbuf_end_section(sb, start_len, 0, 0);
2467 KASSERT(sect_sz <= *sizep,
2468 ("kern_proc_filedesc_out did not respect maxlen; "
2469 "requested %zu, got %zu", *sizep - sizeof(structsize),
2470 sect_sz - sizeof(structsize)));
2472 for (i = 0; i < *sizep - sect_sz && sb->s_error == 0; i++)
2477 #ifdef KINFO_VMENTRY_SIZE
2478 CTASSERT(sizeof(struct kinfo_vmentry) == KINFO_VMENTRY_SIZE);
2482 note_procstat_vmmap(void *arg, struct sbuf *sb, size_t *sizep)
2486 int structsize, vmmap_flags;
2488 if (coredump_pack_vmmapinfo)
2489 vmmap_flags = KERN_VMMAP_PACK_KINFO;
2493 p = (struct proc *)arg;
2494 structsize = sizeof(struct kinfo_vmentry);
2497 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
2498 sbuf_set_drain(sb, sbuf_count_drain, &size);
2499 sbuf_bcat(sb, &structsize, sizeof(structsize));
2501 kern_proc_vmmap_out(p, sb, -1, vmmap_flags);
2506 sbuf_bcat(sb, &structsize, sizeof(structsize));
2508 kern_proc_vmmap_out(p, sb, *sizep - sizeof(structsize),
2514 note_procstat_groups(void *arg, struct sbuf *sb, size_t *sizep)
2520 p = (struct proc *)arg;
2521 size = sizeof(structsize) + p->p_ucred->cr_ngroups * sizeof(gid_t);
2523 KASSERT(*sizep == size, ("invalid size"));
2524 structsize = sizeof(gid_t);
2525 sbuf_bcat(sb, &structsize, sizeof(structsize));
2526 sbuf_bcat(sb, p->p_ucred->cr_groups, p->p_ucred->cr_ngroups *
2533 note_procstat_umask(void *arg, struct sbuf *sb, size_t *sizep)
2539 p = (struct proc *)arg;
2540 size = sizeof(structsize) + sizeof(p->p_pd->pd_cmask);
2542 KASSERT(*sizep == size, ("invalid size"));
2543 structsize = sizeof(p->p_pd->pd_cmask);
2544 sbuf_bcat(sb, &structsize, sizeof(structsize));
2545 sbuf_bcat(sb, &p->p_pd->pd_cmask, sizeof(p->p_pd->pd_cmask));
2551 note_procstat_rlimit(void *arg, struct sbuf *sb, size_t *sizep)
2554 struct rlimit rlim[RLIM_NLIMITS];
2558 p = (struct proc *)arg;
2559 size = sizeof(structsize) + sizeof(rlim);
2561 KASSERT(*sizep == size, ("invalid size"));
2562 structsize = sizeof(rlim);
2563 sbuf_bcat(sb, &structsize, sizeof(structsize));
2565 for (i = 0; i < RLIM_NLIMITS; i++)
2566 lim_rlimit_proc(p, i, &rlim[i]);
2568 sbuf_bcat(sb, rlim, sizeof(rlim));
2574 note_procstat_osrel(void *arg, struct sbuf *sb, size_t *sizep)
2580 p = (struct proc *)arg;
2581 size = sizeof(structsize) + sizeof(p->p_osrel);
2583 KASSERT(*sizep == size, ("invalid size"));
2584 structsize = sizeof(p->p_osrel);
2585 sbuf_bcat(sb, &structsize, sizeof(structsize));
2586 sbuf_bcat(sb, &p->p_osrel, sizeof(p->p_osrel));
2592 __elfN(note_procstat_psstrings)(void *arg, struct sbuf *sb, size_t *sizep)
2595 elf_ps_strings_t ps_strings;
2599 p = (struct proc *)arg;
2600 size = sizeof(structsize) + sizeof(ps_strings);
2602 KASSERT(*sizep == size, ("invalid size"));
2603 structsize = sizeof(ps_strings);
2604 #if defined(COMPAT_FREEBSD32) && __ELF_WORD_SIZE == 32
2605 ps_strings = PTROUT(p->p_sysent->sv_psstrings);
2607 ps_strings = p->p_sysent->sv_psstrings;
2609 sbuf_bcat(sb, &structsize, sizeof(structsize));
2610 sbuf_bcat(sb, &ps_strings, sizeof(ps_strings));
2616 __elfN(note_procstat_auxv)(void *arg, struct sbuf *sb, size_t *sizep)
2622 p = (struct proc *)arg;
2625 sb = sbuf_new(NULL, NULL, 128, SBUF_FIXEDLEN);
2626 sbuf_set_drain(sb, sbuf_count_drain, &size);
2627 sbuf_bcat(sb, &structsize, sizeof(structsize));
2629 proc_getauxv(curthread, p, sb);
2635 structsize = sizeof(Elf_Auxinfo);
2636 sbuf_bcat(sb, &structsize, sizeof(structsize));
2638 proc_getauxv(curthread, p, sb);
2644 __elfN(parse_notes)(struct image_params *imgp, Elf_Note *checknote,
2645 const char *note_vendor, const Elf_Phdr *pnote,
2646 boolean_t (*cb)(const Elf_Note *, void *, boolean_t *), void *cb_arg)
2648 const Elf_Note *note, *note0, *note_end;
2649 const char *note_name;
2654 /* We need some limit, might as well use PAGE_SIZE. */
2655 if (pnote == NULL || pnote->p_filesz > PAGE_SIZE)
2657 ASSERT_VOP_LOCKED(imgp->vp, "parse_notes");
2658 if (pnote->p_offset > PAGE_SIZE ||
2659 pnote->p_filesz > PAGE_SIZE - pnote->p_offset) {
2660 buf = malloc(pnote->p_filesz, M_TEMP, M_NOWAIT);
2662 VOP_UNLOCK(imgp->vp);
2663 buf = malloc(pnote->p_filesz, M_TEMP, M_WAITOK);
2664 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
2666 error = vn_rdwr(UIO_READ, imgp->vp, buf, pnote->p_filesz,
2667 pnote->p_offset, UIO_SYSSPACE, IO_NODELOCKED,
2668 curthread->td_ucred, NOCRED, NULL, curthread);
2670 uprintf("i/o error PT_NOTE\n");
2673 note = note0 = (const Elf_Note *)buf;
2674 note_end = (const Elf_Note *)(buf + pnote->p_filesz);
2676 note = note0 = (const Elf_Note *)(imgp->image_header +
2678 note_end = (const Elf_Note *)(imgp->image_header +
2679 pnote->p_offset + pnote->p_filesz);
2682 for (i = 0; i < 100 && note >= note0 && note < note_end; i++) {
2683 if (!aligned(note, Elf32_Addr) || (const char *)note_end -
2684 (const char *)note < sizeof(Elf_Note)) {
2687 if (note->n_namesz != checknote->n_namesz ||
2688 note->n_descsz != checknote->n_descsz ||
2689 note->n_type != checknote->n_type)
2691 note_name = (const char *)(note + 1);
2692 if (note_name + checknote->n_namesz >=
2693 (const char *)note_end || strncmp(note_vendor,
2694 note_name, checknote->n_namesz) != 0)
2697 if (cb(note, cb_arg, &res))
2700 note = (const Elf_Note *)((const char *)(note + 1) +
2701 roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE) +
2702 roundup2(note->n_descsz, ELF_NOTE_ROUNDSIZE));
2711 struct brandnote_cb_arg {
2712 Elf_Brandnote *brandnote;
2717 brandnote_cb(const Elf_Note *note, void *arg0, boolean_t *res)
2719 struct brandnote_cb_arg *arg;
2724 * Fetch the osreldate for binary from the ELF OSABI-note if
2727 *res = (arg->brandnote->flags & BN_TRANSLATE_OSREL) != 0 &&
2728 arg->brandnote->trans_osrel != NULL ?
2729 arg->brandnote->trans_osrel(note, arg->osrel) : TRUE;
2734 static Elf_Note fctl_note = {
2735 .n_namesz = sizeof(FREEBSD_ABI_VENDOR),
2736 .n_descsz = sizeof(uint32_t),
2737 .n_type = NT_FREEBSD_FEATURE_CTL,
2740 struct fctl_cb_arg {
2741 boolean_t *has_fctl0;
2746 note_fctl_cb(const Elf_Note *note, void *arg0, boolean_t *res)
2748 struct fctl_cb_arg *arg;
2749 const Elf32_Word *desc;
2753 p = (uintptr_t)(note + 1);
2754 p += roundup2(note->n_namesz, ELF_NOTE_ROUNDSIZE);
2755 desc = (const Elf32_Word *)p;
2756 *arg->has_fctl0 = TRUE;
2757 *arg->fctl0 = desc[0];
2763 * Try to find the appropriate ABI-note section for checknote, fetch
2764 * the osreldate and feature control flags for binary from the ELF
2765 * OSABI-note. Only the first page of the image is searched, the same
2769 __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote,
2770 int32_t *osrel, boolean_t *has_fctl0, uint32_t *fctl0)
2772 const Elf_Phdr *phdr;
2773 const Elf_Ehdr *hdr;
2774 struct brandnote_cb_arg b_arg;
2775 struct fctl_cb_arg f_arg;
2778 hdr = (const Elf_Ehdr *)imgp->image_header;
2779 phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
2780 b_arg.brandnote = brandnote;
2781 b_arg.osrel = osrel;
2782 f_arg.has_fctl0 = has_fctl0;
2783 f_arg.fctl0 = fctl0;
2785 for (i = 0; i < hdr->e_phnum; i++) {
2786 if (phdr[i].p_type == PT_NOTE && __elfN(parse_notes)(imgp,
2787 &brandnote->hdr, brandnote->vendor, &phdr[i], brandnote_cb,
2789 for (j = 0; j < hdr->e_phnum; j++) {
2790 if (phdr[j].p_type == PT_NOTE &&
2791 __elfN(parse_notes)(imgp, &fctl_note,
2792 FREEBSD_ABI_VENDOR, &phdr[j],
2793 note_fctl_cb, &f_arg))
2804 * Tell kern_execve.c about it, with a little help from the linker.
2806 static struct execsw __elfN(execsw) = {
2807 .ex_imgact = __CONCAT(exec_, __elfN(imgact)),
2808 .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
2810 EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
2813 __elfN(trans_prot)(Elf_Word flags)
2819 prot |= VM_PROT_EXECUTE;
2821 prot |= VM_PROT_WRITE;
2823 prot |= VM_PROT_READ;
2824 #if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
2825 if (i386_read_exec && (flags & PF_R))
2826 prot |= VM_PROT_EXECUTE;
2832 __elfN(untrans_prot)(vm_prot_t prot)
2837 if (prot & VM_PROT_EXECUTE)
2839 if (prot & VM_PROT_READ)
2841 if (prot & VM_PROT_WRITE)
2847 __elfN(stackgap)(struct image_params *imgp, uintptr_t *stack_base)
2849 uintptr_t range, rbase, gap;
2852 pct = __elfN(aslr_stack_gap);
2857 range = imgp->eff_stack_sz * pct / 100;
2858 arc4rand(&rbase, sizeof(rbase), 0);
2859 gap = rbase % range;
2860 gap &= ~(sizeof(u_long) - 1);