2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1993, David Greenman
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include "opt_capsicum.h"
33 #include "opt_hwpmc_hooks.h"
34 #include "opt_ktrace.h"
37 #include <sys/param.h>
38 #include <sys/systm.h>
41 #include <sys/capsicum.h>
42 #include <sys/compressor.h>
43 #include <sys/eventhandler.h>
45 #include <sys/fcntl.h>
46 #include <sys/filedesc.h>
47 #include <sys/imgact.h>
48 #include <sys/imgact_elf.h>
49 #include <sys/kernel.h>
51 #include <sys/malloc.h>
53 #include <sys/mount.h>
54 #include <sys/mutex.h>
55 #include <sys/namei.h>
58 #include <sys/ptrace.h>
60 #include <sys/resourcevar.h>
61 #include <sys/rwlock.h>
62 #include <sys/sched.h>
64 #include <sys/sf_buf.h>
66 #include <sys/signalvar.h>
69 #include <sys/syscallsubr.h>
70 #include <sys/sysctl.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/timers.h>
74 #include <sys/umtxvar.h>
75 #include <sys/vnode.h>
78 #include <sys/ktrace.h>
82 #include <vm/vm_param.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_pager.h>
92 #include <sys/pmckern.h>
95 #include <security/audit/audit.h>
96 #include <security/mac/mac_framework.h>
99 #include <sys/dtrace_bsd.h>
100 dtrace_execexit_func_t dtrace_fasttrap_exec;
103 SDT_PROVIDER_DECLARE(proc);
104 SDT_PROBE_DEFINE1(proc, , , exec, "char *");
105 SDT_PROBE_DEFINE1(proc, , , exec__failure, "int");
106 SDT_PROBE_DEFINE1(proc, , , exec__success, "char *");
108 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
110 int coredump_pack_fileinfo = 1;
111 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
112 &coredump_pack_fileinfo, 0,
113 "Enable file path packing in 'procstat -f' coredump notes");
115 int coredump_pack_vmmapinfo = 1;
116 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN,
117 &coredump_pack_vmmapinfo, 0,
118 "Enable file path packing in 'procstat -v' coredump notes");
120 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
121 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
122 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
123 static int do_execve(struct thread *td, struct image_args *args,
124 struct mac *mac_p, struct vmspace *oldvmspace);
126 /* XXX This should be vm_size_t. */
127 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD|
128 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_ps_strings, "LU",
129 "Location of process' ps_strings structure");
131 /* XXX This should be vm_size_t. */
132 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
133 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_usrstack, "LU",
134 "Top of process stack");
136 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_MPSAFE,
137 NULL, 0, sysctl_kern_stackprot, "I",
138 "Stack memory permissions");
140 u_long ps_arg_cache_limit = PAGE_SIZE / 16;
141 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
142 &ps_arg_cache_limit, 0,
143 "Process' command line characters cache limit");
145 static int disallow_high_osrel;
146 SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW,
147 &disallow_high_osrel, 0,
148 "Disallow execution of binaries built for higher version of the world");
150 static int map_at_zero = 0;
151 SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0,
152 "Permit processes to map an object at virtual address 0.");
154 static int core_dump_can_intr = 1;
155 SYSCTL_INT(_kern, OID_AUTO, core_dump_can_intr, CTLFLAG_RWTUN,
156 &core_dump_can_intr, 0,
157 "Core dumping interruptible with SIGKILL");
160 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
163 vm_offset_t ps_strings;
167 if (req->flags & SCTL_MASK32) {
169 val = (unsigned int)PROC_PS_STRINGS(p);
170 return (SYSCTL_OUT(req, &val, sizeof(val)));
173 ps_strings = PROC_PS_STRINGS(p);
174 return (SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings)));
178 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
185 if (req->flags & SCTL_MASK32) {
188 val32 = round_page((unsigned int)p->p_vmspace->vm_stacktop);
189 return (SYSCTL_OUT(req, &val32, sizeof(val32)));
192 val = round_page(p->p_vmspace->vm_stacktop);
193 return (SYSCTL_OUT(req, &val, sizeof(val)));
197 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
202 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
203 sizeof(p->p_sysent->sv_stackprot)));
207 * Each of the items is a pointer to a `const struct execsw', hence the
208 * double pointer here.
210 static const struct execsw **execsw;
212 #ifndef _SYS_SYSPROTO_H_
221 sys_execve(struct thread *td, struct execve_args *uap)
223 struct image_args args;
224 struct vmspace *oldvmspace;
227 error = pre_execve(td, &oldvmspace);
230 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
231 uap->argv, uap->envv);
233 error = kern_execve(td, &args, NULL, oldvmspace);
234 post_execve(td, error, oldvmspace);
235 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
239 #ifndef _SYS_SYSPROTO_H_
240 struct fexecve_args {
247 sys_fexecve(struct thread *td, struct fexecve_args *uap)
249 struct image_args args;
250 struct vmspace *oldvmspace;
253 error = pre_execve(td, &oldvmspace);
256 error = exec_copyin_args(&args, NULL, UIO_SYSSPACE,
257 uap->argv, uap->envv);
260 error = kern_execve(td, &args, NULL, oldvmspace);
262 post_execve(td, error, oldvmspace);
263 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
267 #ifndef _SYS_SYSPROTO_H_
268 struct __mac_execve_args {
277 sys___mac_execve(struct thread *td, struct __mac_execve_args *uap)
280 struct image_args args;
281 struct vmspace *oldvmspace;
284 error = pre_execve(td, &oldvmspace);
287 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
288 uap->argv, uap->envv);
290 error = kern_execve(td, &args, uap->mac_p, oldvmspace);
291 post_execve(td, error, oldvmspace);
292 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
300 pre_execve(struct thread *td, struct vmspace **oldvmspace)
305 KASSERT(td == curthread, ("non-current thread %p", td));
308 if ((p->p_flag & P_HADTHREADS) != 0) {
310 while (p->p_singlethr > 0) {
311 error = msleep(&p->p_singlethr, &p->p_mtx,
312 PWAIT | PCATCH, "exec1t", 0);
318 if (thread_single(p, SINGLE_BOUNDARY) != 0)
323 KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0,
325 *oldvmspace = p->p_vmspace;
330 post_execve(struct thread *td, int error, struct vmspace *oldvmspace)
334 KASSERT(td == curthread, ("non-current thread %p", td));
336 if ((p->p_flag & P_HADTHREADS) != 0) {
339 * If success, we upgrade to SINGLE_EXIT state to
340 * force other threads to suicide.
342 if (error == EJUSTRETURN)
343 thread_single(p, SINGLE_EXIT);
345 thread_single_end(p, SINGLE_BOUNDARY);
348 exec_cleanup(td, oldvmspace);
352 * kern_execve() has the astonishing property of not always returning to
353 * the caller. If sufficiently bad things happen during the call to
354 * do_execve(), it can end up calling exit1(); as a result, callers must
355 * avoid doing anything which they might need to undo (e.g., allocating
359 kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
360 struct vmspace *oldvmspace)
363 TSEXEC(td->td_proc->p_pid, args->begin_argv);
364 AUDIT_ARG_ARGV(args->begin_argv, args->argc,
365 exec_args_get_begin_envv(args) - args->begin_argv);
366 AUDIT_ARG_ENVV(exec_args_get_begin_envv(args), args->envc,
367 args->endp - exec_args_get_begin_envv(args));
369 /* Must have at least one argument. */
370 if (args->argc == 0) {
371 exec_free_args(args);
374 return (do_execve(td, args, mac_p, oldvmspace));
378 execve_nosetid(struct image_params *imgp)
380 imgp->credential_setid = false;
381 if (imgp->newcred != NULL) {
382 crfree(imgp->newcred);
383 imgp->newcred = NULL;
388 * In-kernel implementation of execve(). All arguments are assumed to be
389 * userspace pointers from the passed thread.
392 do_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
393 struct vmspace *oldvmspace)
395 struct proc *p = td->td_proc;
397 struct ucred *oldcred;
398 struct uidinfo *euip = NULL;
399 uintptr_t stack_base;
400 struct image_params image_params, *imgp;
402 int (*img_first)(struct image_params *);
403 struct pargs *oldargs = NULL, *newargs = NULL;
404 struct sigacts *oldsigacts = NULL, *newsigacts = NULL;
406 struct ktr_io_params *kiop;
408 struct vnode *oldtextvp, *newtextvp;
409 struct vnode *oldtextdvp, *newtextdvp;
410 char *oldbinname, *newbinname;
411 bool credential_changing;
413 struct label *interpvplabel = NULL;
414 bool will_transition;
417 struct pmckern_procexec pe;
419 int error, i, orig_osrel;
421 Elf_Brandinfo *orig_brandinfo;
422 size_t freepath_size;
423 static const char fexecv_proc_title[] = "(fexecv)";
425 imgp = &image_params;
426 oldtextvp = oldtextdvp = NULL;
427 newtextvp = newtextdvp = NULL;
428 newbinname = oldbinname = NULL;
434 * Lock the process and set the P_INEXEC flag to indicate that
435 * it should be left alone until we're done here. This is
436 * necessary to avoid race conditions - e.g. in ptrace() -
437 * that might allow a local user to illicitly obtain elevated
441 KASSERT((p->p_flag & P_INEXEC) == 0,
442 ("%s(): process already has P_INEXEC flag", __func__));
443 p->p_flag |= P_INEXEC;
447 * Initialize part of the common data
449 bzero(imgp, sizeof(*imgp));
453 oldcred = p->p_ucred;
454 orig_osrel = p->p_osrel;
455 orig_fctl0 = p->p_fctl0;
456 orig_brandinfo = p->p_elf_brandinfo;
459 error = mac_execve_enter(imgp, mac_p);
464 SDT_PROBE1(proc, , , exec, args->fname);
467 if (args->fname != NULL) {
468 #ifdef CAPABILITY_MODE
470 * While capability mode can't reach this point via direct
471 * path arguments to execve(), we also don't allow
472 * interpreters to be used in capability mode (for now).
473 * Catch indirect lookups and return a permissions error.
475 if (IN_CAPABILITY_MODE(td)) {
482 * Translate the file name. namei() returns a vnode
483 * pointer in ni_vp among other things.
485 NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | LOCKSHARED | FOLLOW |
486 SAVENAME | AUDITVNODE1 | WANTPARENT, UIO_SYSSPACE,
493 newtextvp = nd.ni_vp;
494 newtextdvp = nd.ni_dvp;
496 newbinname = malloc(nd.ni_cnd.cn_namelen + 1, M_PARGS,
498 memcpy(newbinname, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen);
499 newbinname[nd.ni_cnd.cn_namelen] = '\0';
500 imgp->vp = newtextvp;
503 * Do the best to calculate the full path to the image file.
505 if (args->fname[0] == '/') {
506 imgp->execpath = args->fname;
508 VOP_UNLOCK(imgp->vp);
509 freepath_size = MAXPATHLEN;
510 if (vn_fullpath_hardlink(newtextvp, newtextdvp,
511 newbinname, nd.ni_cnd.cn_namelen, &imgp->execpath,
512 &imgp->freepath, &freepath_size) != 0)
513 imgp->execpath = args->fname;
514 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
517 AUDIT_ARG_FD(args->fd);
520 * If the descriptors was not opened with O_PATH, then
521 * we require that it was opened with O_EXEC or
522 * O_RDONLY. In either case, exec_check_permissions()
523 * below checks _current_ file access mode regardless
524 * of the permissions additionally checked at the
527 error = fgetvp_exec(td, args->fd, &cap_fexecve_rights,
532 if (vn_fullpath(newtextvp, &imgp->execpath,
533 &imgp->freepath) != 0)
534 imgp->execpath = args->fname;
535 vn_lock(newtextvp, LK_SHARED | LK_RETRY);
536 AUDIT_ARG_VNODE1(newtextvp);
537 imgp->vp = newtextvp;
541 * Check file permissions. Also 'opens' file and sets its vnode to
544 error = exec_check_permissions(imgp);
546 goto exec_fail_dealloc;
548 imgp->object = imgp->vp->v_object;
549 if (imgp->object != NULL)
550 vm_object_reference(imgp->object);
552 error = exec_map_first_page(imgp);
554 goto exec_fail_dealloc;
556 imgp->proc->p_osrel = 0;
557 imgp->proc->p_fctl0 = 0;
558 imgp->proc->p_elf_brandinfo = NULL;
561 * Implement image setuid/setgid.
563 * Determine new credentials before attempting image activators
564 * so that it can be used by process_exec handlers to determine
565 * credential/setid changes.
567 * Don't honor setuid/setgid if the filesystem prohibits it or if
568 * the process is being traced.
570 * We disable setuid/setgid/etc in capability mode on the basis
571 * that most setugid applications are not written with that
572 * environment in mind, and will therefore almost certainly operate
573 * incorrectly. In principle there's no reason that setugid
574 * applications might not be useful in capability mode, so we may want
575 * to reconsider this conservative design choice in the future.
577 * XXXMAC: For the time being, use NOSUID to also prohibit
578 * transitions on the file system.
580 credential_changing = false;
581 credential_changing |= (attr.va_mode & S_ISUID) &&
582 oldcred->cr_uid != attr.va_uid;
583 credential_changing |= (attr.va_mode & S_ISGID) &&
584 oldcred->cr_gid != attr.va_gid;
586 will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
587 interpvplabel, imgp) != 0;
588 credential_changing |= will_transition;
591 /* Don't inherit PROC_PDEATHSIG_CTL value if setuid/setgid. */
592 if (credential_changing)
593 imgp->proc->p_pdeathsig = 0;
595 if (credential_changing &&
596 #ifdef CAPABILITY_MODE
597 ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
599 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
600 (p->p_flag & P_TRACED) == 0) {
601 imgp->credential_setid = true;
602 VOP_UNLOCK(imgp->vp);
603 imgp->newcred = crdup(oldcred);
604 if (attr.va_mode & S_ISUID) {
605 euip = uifind(attr.va_uid);
606 change_euid(imgp->newcred, euip);
608 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
609 if (attr.va_mode & S_ISGID)
610 change_egid(imgp->newcred, attr.va_gid);
612 * Implement correct POSIX saved-id behavior.
614 * XXXMAC: Note that the current logic will save the
615 * uid and gid if a MAC domain transition occurs, even
616 * though maybe it shouldn't.
618 change_svuid(imgp->newcred, imgp->newcred->cr_uid);
619 change_svgid(imgp->newcred, imgp->newcred->cr_gid);
622 * Implement correct POSIX saved-id behavior.
624 * XXX: It's not clear that the existing behavior is
625 * POSIX-compliant. A number of sources indicate that the
626 * saved uid/gid should only be updated if the new ruid is
627 * not equal to the old ruid, or the new euid is not equal
628 * to the old euid and the new euid is not equal to the old
629 * ruid. The FreeBSD code always updates the saved uid/gid.
630 * Also, this code uses the new (replaced) euid and egid as
631 * the source, which may or may not be the right ones to use.
633 if (oldcred->cr_svuid != oldcred->cr_uid ||
634 oldcred->cr_svgid != oldcred->cr_gid) {
635 VOP_UNLOCK(imgp->vp);
636 imgp->newcred = crdup(oldcred);
637 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
638 change_svuid(imgp->newcred, imgp->newcred->cr_uid);
639 change_svgid(imgp->newcred, imgp->newcred->cr_gid);
642 /* The new credentials are installed into the process later. */
645 * If the current process has a special image activator it
646 * wants to try first, call it. For example, emulating shell
647 * scripts differently.
650 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
651 error = img_first(imgp);
654 * Loop through the list of image activators, calling each one.
655 * An activator returns -1 if there is no match, 0 on success,
656 * and an error otherwise.
658 for (i = 0; error == -1 && execsw[i]; ++i) {
659 if (execsw[i]->ex_imgact == NULL ||
660 execsw[i]->ex_imgact == img_first) {
663 error = (*execsw[i]->ex_imgact)(imgp);
669 goto exec_fail_dealloc;
673 * Special interpreter operation, cleanup and loop up to try to
674 * activate the interpreter.
676 if (imgp->interpreted) {
677 exec_unmap_first_page(imgp);
679 * The text reference needs to be removed for scripts.
680 * There is a short period before we determine that
681 * something is a script where text reference is active.
682 * The vnode lock is held over this entire period
683 * so nothing should illegitimately be blocked.
685 MPASS(imgp->textset);
686 VOP_UNSET_TEXT_CHECKED(newtextvp);
687 imgp->textset = false;
688 /* free name buffer and old vnode */
690 mac_execve_interpreter_enter(newtextvp, &interpvplabel);
693 VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td);
694 imgp->opened = false;
697 imgp->vp = newtextvp = NULL;
698 if (args->fname != NULL) {
699 if (newtextdvp != NULL) {
704 free(newbinname, M_PARGS);
707 vm_object_deallocate(imgp->object);
709 execve_nosetid(imgp);
710 imgp->execpath = NULL;
711 free(imgp->freepath, M_TEMP);
712 imgp->freepath = NULL;
713 /* set new name to that of the interpreter */
714 args->fname = imgp->interpreter_name;
719 * NB: We unlock the vnode here because it is believed that none
720 * of the sv_copyout_strings/sv_fixup operations require the vnode.
722 VOP_UNLOCK(imgp->vp);
724 if (disallow_high_osrel &&
725 P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) {
727 uprintf("Osrel %d for image %s too high\n", p->p_osrel,
728 imgp->execpath != NULL ? imgp->execpath : "<unresolved>");
729 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
730 goto exec_fail_dealloc;
734 * Copy out strings (args and env) and initialize stack base.
736 error = (*p->p_sysent->sv_copyout_strings)(imgp, &stack_base);
738 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
739 goto exec_fail_dealloc;
745 error = (*p->p_sysent->sv_fixup)(&stack_base, imgp);
747 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
748 goto exec_fail_dealloc;
752 * For security and other reasons, the file descriptor table cannot be
753 * shared after an exec.
757 /* close files on exec */
761 * Malloc things before we need locks.
763 i = exec_args_get_begin_envv(imgp->args) - imgp->args->begin_argv;
764 /* Cache arguments if they fit inside our allowance */
765 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
766 newargs = pargs_alloc(i);
767 bcopy(imgp->args->begin_argv, newargs->ar_args, i);
771 * For security and other reasons, signal handlers cannot
772 * be shared after an exec. The new process gets a copy of the old
773 * handlers. In execsigs(), the new process will have its signals
776 if (sigacts_shared(p->p_sigacts)) {
777 oldsigacts = p->p_sigacts;
778 newsigacts = sigacts_alloc();
779 sigacts_copy(newsigacts, oldsigacts);
782 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
786 p->p_sigacts = newsigacts;
790 /* reset caught signals */
793 /* name this process - nameiexec(p, ndp) */
794 bzero(p->p_comm, sizeof(p->p_comm));
796 bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
797 min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
798 else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0)
799 bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
800 bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
802 sched_clear_tdname(td);
806 * mark as execed, wakeup the process that vforked (if any) and tell
807 * it that it now has its own resources back
810 if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0)
811 p->p_flag2 &= ~P2_NOTRACE;
812 if ((p->p_flag2 & P2_STKGAP_DISABLE_EXEC) == 0)
813 p->p_flag2 &= ~P2_STKGAP_DISABLE;
814 if (p->p_flag & P_PPWAIT) {
815 p->p_flag &= ~(P_PPWAIT | P_PPTRACE);
816 cv_broadcast(&p->p_pwait);
817 /* STOPs are no longer ignored, arrange for AST */
821 if ((imgp->sysent->sv_setid_allowed != NULL &&
822 !(*imgp->sysent->sv_setid_allowed)(td, imgp)) ||
823 (p->p_flag2 & P2_NO_NEW_PRIVS) != 0)
824 execve_nosetid(imgp);
827 * Implement image setuid/setgid installation.
829 if (imgp->credential_setid) {
831 * Turn off syscall tracing for set-id programs, except for
832 * root. Record any set-id flags first to make sure that
833 * we do not regain any tracing during a possible block.
837 kiop = ktrprocexec(p);
840 * Close any file descriptors 0..2 that reference procfs,
841 * then make sure file descriptors 0..2 are in use.
843 * Both fdsetugidsafety() and fdcheckstd() may call functions
844 * taking sleepable locks, so temporarily drop our locks.
847 VOP_UNLOCK(imgp->vp);
849 error = fdcheckstd(td);
850 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
852 goto exec_fail_dealloc;
855 if (will_transition) {
856 mac_vnode_execve_transition(oldcred, imgp->newcred,
857 imgp->vp, interpvplabel, imgp);
861 if (oldcred->cr_uid == oldcred->cr_ruid &&
862 oldcred->cr_gid == oldcred->cr_rgid)
863 p->p_flag &= ~P_SUGID;
866 * Set the new credentials.
868 if (imgp->newcred != NULL) {
869 proc_set_cred(p, imgp->newcred);
875 * Store the vp for use in kern.proc.pathname. This vnode was
876 * referenced by namei() or by fexecve variant of fname handling.
878 oldtextvp = p->p_textvp;
879 p->p_textvp = newtextvp;
880 oldtextdvp = p->p_textdvp;
881 p->p_textdvp = newtextdvp;
883 oldbinname = p->p_binname;
884 p->p_binname = newbinname;
889 * Tell the DTrace fasttrap provider about the exec if it
890 * has declared an interest.
892 if (dtrace_fasttrap_exec)
893 dtrace_fasttrap_exec(p);
897 * Notify others that we exec'd, and clear the P_INEXEC flag
898 * as we're now a bona fide freshly-execed process.
900 KNOTE_LOCKED(p->p_klist, NOTE_EXEC);
901 p->p_flag &= ~P_INEXEC;
903 /* clear "fork but no exec" flag, as we _are_ execing */
904 p->p_acflag &= ~AFORK;
907 * Free any previous argument cache and replace it with
908 * the new argument cache, if any.
918 * Check if system-wide sampling is in effect or if the
919 * current process is using PMCs. If so, do exec() time
920 * processing. This processing needs to happen AFTER the
921 * P_INEXEC flag is cleared.
923 if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
924 VOP_UNLOCK(imgp->vp);
925 pe.pm_credentialschanged = credential_changing;
926 pe.pm_entryaddr = imgp->entry_addr;
928 PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
929 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
933 /* Set values passed into the program in registers. */
934 (*p->p_sysent->sv_setregs)(td, imgp, stack_base);
936 VOP_MMAPPED(imgp->vp);
938 SDT_PROBE1(proc, , , exec__success, args->fname);
942 p->p_osrel = orig_osrel;
943 p->p_fctl0 = orig_fctl0;
944 p->p_elf_brandinfo = orig_brandinfo;
947 if (imgp->firstpage != NULL)
948 exec_unmap_first_page(imgp);
950 if (imgp->vp != NULL) {
952 VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
954 VOP_UNSET_TEXT_CHECKED(imgp->vp);
958 VOP_UNLOCK(imgp->vp);
959 if (args->fname != NULL)
961 if (newtextdvp != NULL)
963 free(newbinname, M_PARGS);
966 if (imgp->object != NULL)
967 vm_object_deallocate(imgp->object);
969 free(imgp->freepath, M_TEMP);
972 if (p->p_ptevents & PTRACE_EXEC) {
974 if (p->p_ptevents & PTRACE_EXEC)
975 td->td_dbgflags |= TDB_EXEC;
980 /* we're done here, clear P_INEXEC */
982 p->p_flag &= ~P_INEXEC;
985 SDT_PROBE1(proc, , , exec__failure, error);
988 if (imgp->newcred != NULL && oldcred != NULL)
989 crfree(imgp->newcred);
992 mac_execve_exit(imgp);
993 mac_execve_interpreter_exit(interpvplabel);
995 exec_free_args(args);
998 * Handle deferred decrement of ref counts.
1000 if (oldtextvp != NULL)
1002 if (oldtextdvp != NULL)
1004 free(oldbinname, M_PARGS);
1006 ktr_io_params_free(kiop);
1008 pargs_drop(oldargs);
1009 pargs_drop(newargs);
1010 if (oldsigacts != NULL)
1011 sigacts_free(oldsigacts);
1015 if (error && imgp->vmspace_destroyed) {
1016 /* sorry, no more process anymore. exit gracefully */
1017 exec_cleanup(td, oldvmspace);
1018 exit1(td, 0, SIGABRT);
1028 * We don't want cpu_set_syscall_retval() to overwrite any of
1029 * the register values put in place by exec_setregs().
1030 * Implementations of cpu_set_syscall_retval() will leave
1031 * registers unmodified when returning EJUSTRETURN.
1033 return (error == 0 ? EJUSTRETURN : error);
1037 exec_cleanup(struct thread *td, struct vmspace *oldvmspace)
1039 if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
1040 KASSERT(td->td_proc->p_vmspace != oldvmspace,
1041 ("oldvmspace still used"));
1042 vmspace_free(oldvmspace);
1043 td->td_pflags &= ~TDP_EXECVMSPC;
1048 exec_map_first_page(struct image_params *imgp)
1054 if (imgp->firstpage != NULL)
1055 exec_unmap_first_page(imgp);
1057 object = imgp->vp->v_object;
1060 #if VM_NRESERVLEVEL > 0
1061 if ((object->flags & OBJ_COLORED) == 0) {
1062 VM_OBJECT_WLOCK(object);
1063 vm_object_color(object, 0);
1064 VM_OBJECT_WUNLOCK(object);
1067 error = vm_page_grab_valid_unlocked(&m, object, 0,
1068 VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) |
1069 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
1071 if (error != VM_PAGER_OK)
1073 imgp->firstpage = sf_buf_alloc(m, 0);
1074 imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
1080 exec_unmap_first_page(struct image_params *imgp)
1084 if (imgp->firstpage != NULL) {
1085 m = sf_buf_page(imgp->firstpage);
1086 sf_buf_free(imgp->firstpage);
1087 imgp->firstpage = NULL;
1088 vm_page_unwire(m, PQ_ACTIVE);
1093 exec_onexec_old(struct thread *td)
1095 sigfastblock_clear(td);
1096 umtx_exec(td->td_proc);
1100 * This is an optimization which removes the unmanaged shared page
1101 * mapping. In combination with pmap_remove_pages(), which cleans all
1102 * managed mappings in the process' vmspace pmap, no work will be left
1103 * for pmap_remove(min, max).
1106 exec_free_abi_mappings(struct proc *p)
1108 struct vmspace *vmspace;
1109 struct sysentvec *sv;
1111 vmspace = p->p_vmspace;
1112 if (refcount_load(&vmspace->vm_refcnt) != 1)
1116 if (sv->sv_shared_page_obj == NULL)
1119 pmap_remove(vmspace_pmap(vmspace), sv->sv_shared_page_base,
1120 sv->sv_shared_page_base + sv->sv_shared_page_len);
1124 * Run down the current address space and install a new one. Map the shared
1128 exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
1131 struct proc *p = imgp->proc;
1132 struct vmspace *vmspace = p->p_vmspace;
1133 struct thread *td = curthread;
1135 vm_offset_t sv_minuser;
1138 imgp->vmspace_destroyed = true;
1141 if (p->p_sysent->sv_onexec_old != NULL)
1142 p->p_sysent->sv_onexec_old(td);
1145 EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp);
1148 * Blow away entire process VM, if address space not shared,
1149 * otherwise, create a new VM space so that other threads are
1152 map = &vmspace->vm_map;
1154 sv_minuser = sv->sv_minuser;
1156 sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
1157 if (refcount_load(&vmspace->vm_refcnt) == 1 &&
1158 vm_map_min(map) == sv_minuser &&
1159 vm_map_max(map) == sv->sv_maxuser &&
1160 cpu_exec_vmspace_reuse(p, map)) {
1161 exec_free_abi_mappings(p);
1163 pmap_remove_pages(vmspace_pmap(vmspace));
1164 vm_map_remove(map, vm_map_min(map), vm_map_max(map));
1166 * An exec terminates mlockall(MCL_FUTURE).
1167 * ASLR and W^X states must be re-evaluated.
1170 vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR |
1171 MAP_ASLR_IGNSTART | MAP_ASLR_STACK | MAP_WXORX);
1174 error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
1177 vmspace = p->p_vmspace;
1178 map = &vmspace->vm_map;
1180 map->flags |= imgp->map_flags;
1182 /* Map a shared page */
1183 obj = sv->sv_shared_page_obj;
1185 vm_object_reference(obj);
1186 error = vm_map_fixed(map, obj, 0,
1187 sv->sv_shared_page_base, sv->sv_shared_page_len,
1188 VM_PROT_READ | VM_PROT_EXECUTE,
1189 VM_PROT_READ | VM_PROT_EXECUTE,
1190 MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
1191 if (error != KERN_SUCCESS) {
1192 vm_object_deallocate(obj);
1193 return (vm_mmap_to_errno(error));
1197 return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0);
1201 * Compute the stack size limit and map the main process stack.
1204 exec_map_stack(struct image_params *imgp)
1206 struct rlimit rlim_stack;
1207 struct sysentvec *sv;
1210 struct vmspace *vmspace;
1211 vm_offset_t stack_addr, stack_top;
1213 int error, find_space, stack_off;
1214 vm_prot_t stack_prot;
1219 if (imgp->stack_sz != 0) {
1220 ssiz = trunc_page(imgp->stack_sz);
1222 lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack);
1224 if (ssiz > rlim_stack.rlim_max)
1225 ssiz = rlim_stack.rlim_max;
1226 if (ssiz > rlim_stack.rlim_cur) {
1227 rlim_stack.rlim_cur = ssiz;
1228 kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack);
1230 } else if (sv->sv_maxssiz != NULL) {
1231 ssiz = *sv->sv_maxssiz;
1236 vmspace = p->p_vmspace;
1237 map = &vmspace->vm_map;
1239 stack_prot = sv->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ?
1240 imgp->stack_prot : sv->sv_stackprot;
1241 if ((map->flags & MAP_ASLR_STACK) != 0) {
1242 stack_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
1243 lim_max(curthread, RLIMIT_DATA));
1244 find_space = VMFS_ANY_SPACE;
1246 stack_addr = sv->sv_usrstack - ssiz;
1247 find_space = VMFS_NO_SPACE;
1249 error = vm_map_find(map, NULL, 0, &stack_addr, (vm_size_t)ssiz,
1250 sv->sv_usrstack, find_space, stack_prot, VM_PROT_ALL,
1251 MAP_STACK_GROWS_DOWN);
1252 if (error != KERN_SUCCESS) {
1253 uprintf("exec_new_vmspace: mapping stack size %#jx prot %#x "
1254 "failed, mach error %d errno %d\n", (uintmax_t)ssiz,
1255 stack_prot, error, vm_mmap_to_errno(error));
1256 return (vm_mmap_to_errno(error));
1259 stack_top = stack_addr + ssiz;
1260 if ((map->flags & MAP_ASLR_STACK) != 0) {
1261 /* Randomize within the first page of the stack. */
1262 arc4rand(&stack_off, sizeof(stack_off), 0);
1263 stack_top -= rounddown2(stack_off & PAGE_MASK, sizeof(void *));
1267 * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they
1268 * are still used to enforce the stack rlimit on the process stack.
1270 vmspace->vm_maxsaddr = (char *)stack_addr;
1271 vmspace->vm_stacktop = stack_top;
1272 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
1278 * Copy out argument and environment strings from the old process address
1279 * space into the temporary string buffer.
1282 exec_copyin_args(struct image_args *args, const char *fname,
1283 enum uio_seg segflg, char **argv, char **envv)
1288 bzero(args, sizeof(*args));
1293 * Allocate demand-paged memory for the file name, argument, and
1294 * environment strings.
1296 error = exec_alloc_args(args);
1301 * Copy the file name.
1303 error = exec_args_add_fname(args, fname, segflg);
1308 * extract arguments first
1311 error = fueword(argv++, &arg);
1318 error = exec_args_add_arg(args, (char *)(uintptr_t)arg,
1325 * extract environment strings
1329 error = fueword(envv++, &env);
1336 error = exec_args_add_env(args,
1337 (char *)(uintptr_t)env, UIO_USERSPACE);
1346 exec_free_args(args);
1350 struct exec_args_kva {
1353 SLIST_ENTRY(exec_args_kva) next;
1356 DPCPU_DEFINE_STATIC(struct exec_args_kva *, exec_args_kva);
1358 static SLIST_HEAD(, exec_args_kva) exec_args_kva_freelist;
1359 static struct mtx exec_args_kva_mtx;
1360 static u_int exec_args_gen;
1363 exec_prealloc_args_kva(void *arg __unused)
1365 struct exec_args_kva *argkva;
1368 SLIST_INIT(&exec_args_kva_freelist);
1369 mtx_init(&exec_args_kva_mtx, "exec args kva", NULL, MTX_DEF);
1370 for (i = 0; i < exec_map_entries; i++) {
1371 argkva = malloc(sizeof(*argkva), M_PARGS, M_WAITOK);
1372 argkva->addr = kmap_alloc_wait(exec_map, exec_map_entry_size);
1373 argkva->gen = exec_args_gen;
1374 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1377 SYSINIT(exec_args_kva, SI_SUB_EXEC, SI_ORDER_ANY, exec_prealloc_args_kva, NULL);
1380 exec_alloc_args_kva(void **cookie)
1382 struct exec_args_kva *argkva;
1384 argkva = (void *)atomic_readandclear_ptr(
1385 (uintptr_t *)DPCPU_PTR(exec_args_kva));
1386 if (argkva == NULL) {
1387 mtx_lock(&exec_args_kva_mtx);
1388 while ((argkva = SLIST_FIRST(&exec_args_kva_freelist)) == NULL)
1389 (void)mtx_sleep(&exec_args_kva_freelist,
1390 &exec_args_kva_mtx, 0, "execkva", 0);
1391 SLIST_REMOVE_HEAD(&exec_args_kva_freelist, next);
1392 mtx_unlock(&exec_args_kva_mtx);
1394 kasan_mark((void *)argkva->addr, exec_map_entry_size,
1395 exec_map_entry_size, 0);
1396 *(struct exec_args_kva **)cookie = argkva;
1397 return (argkva->addr);
1401 exec_release_args_kva(struct exec_args_kva *argkva, u_int gen)
1405 base = argkva->addr;
1406 kasan_mark((void *)argkva->addr, 0, exec_map_entry_size,
1407 KASAN_EXEC_ARGS_FREED);
1408 if (argkva->gen != gen) {
1409 (void)vm_map_madvise(exec_map, base, base + exec_map_entry_size,
1413 if (!atomic_cmpset_ptr((uintptr_t *)DPCPU_PTR(exec_args_kva),
1414 (uintptr_t)NULL, (uintptr_t)argkva)) {
1415 mtx_lock(&exec_args_kva_mtx);
1416 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1417 wakeup_one(&exec_args_kva_freelist);
1418 mtx_unlock(&exec_args_kva_mtx);
1423 exec_free_args_kva(void *cookie)
1426 exec_release_args_kva(cookie, exec_args_gen);
1430 exec_args_kva_lowmem(void *arg __unused)
1432 SLIST_HEAD(, exec_args_kva) head;
1433 struct exec_args_kva *argkva;
1437 gen = atomic_fetchadd_int(&exec_args_gen, 1) + 1;
1440 * Force an madvise of each KVA range. Any currently allocated ranges
1441 * will have MADV_FREE applied once they are freed.
1444 mtx_lock(&exec_args_kva_mtx);
1445 SLIST_SWAP(&head, &exec_args_kva_freelist, exec_args_kva);
1446 mtx_unlock(&exec_args_kva_mtx);
1447 while ((argkva = SLIST_FIRST(&head)) != NULL) {
1448 SLIST_REMOVE_HEAD(&head, next);
1449 exec_release_args_kva(argkva, gen);
1453 argkva = (void *)atomic_readandclear_ptr(
1454 (uintptr_t *)DPCPU_ID_PTR(i, exec_args_kva));
1456 exec_release_args_kva(argkva, gen);
1459 EVENTHANDLER_DEFINE(vm_lowmem, exec_args_kva_lowmem, NULL,
1460 EVENTHANDLER_PRI_ANY);
1463 * Allocate temporary demand-paged, zero-filled memory for the file name,
1464 * argument, and environment strings.
1467 exec_alloc_args(struct image_args *args)
1470 args->buf = (char *)exec_alloc_args_kva(&args->bufkva);
1475 exec_free_args(struct image_args *args)
1478 if (args->buf != NULL) {
1479 exec_free_args_kva(args->bufkva);
1482 if (args->fname_buf != NULL) {
1483 free(args->fname_buf, M_TEMP);
1484 args->fname_buf = NULL;
1489 * A set to functions to fill struct image args.
1491 * NOTE: exec_args_add_fname() must be called (possibly with a NULL
1492 * fname) before the other functions. All exec_args_add_arg() calls must
1493 * be made before any exec_args_add_env() calls. exec_args_adjust_args()
1494 * may be called any time after exec_args_add_fname().
1496 * exec_args_add_fname() - install path to be executed
1497 * exec_args_add_arg() - append an argument string
1498 * exec_args_add_env() - append an env string
1499 * exec_args_adjust_args() - adjust location of the argument list to
1500 * allow new arguments to be prepended
1503 exec_args_add_fname(struct image_args *args, const char *fname,
1504 enum uio_seg segflg)
1509 KASSERT(args->fname == NULL, ("fname already appended"));
1510 KASSERT(args->endp == NULL, ("already appending to args"));
1512 if (fname != NULL) {
1513 args->fname = args->buf;
1514 error = segflg == UIO_SYSSPACE ?
1515 copystr(fname, args->fname, PATH_MAX, &length) :
1516 copyinstr(fname, args->fname, PATH_MAX, &length);
1518 return (error == ENAMETOOLONG ? E2BIG : error);
1522 /* Set up for _arg_*()/_env_*() */
1523 args->endp = args->buf + length;
1524 /* begin_argv must be set and kept updated */
1525 args->begin_argv = args->endp;
1526 KASSERT(exec_map_entry_size - length >= ARG_MAX,
1527 ("too little space remaining for arguments %zu < %zu",
1528 exec_map_entry_size - length, (size_t)ARG_MAX));
1529 args->stringspace = ARG_MAX;
1535 exec_args_add_str(struct image_args *args, const char *str,
1536 enum uio_seg segflg, int *countp)
1541 KASSERT(args->endp != NULL, ("endp not initialized"));
1542 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1544 error = (segflg == UIO_SYSSPACE) ?
1545 copystr(str, args->endp, args->stringspace, &length) :
1546 copyinstr(str, args->endp, args->stringspace, &length);
1548 return (error == ENAMETOOLONG ? E2BIG : error);
1549 args->stringspace -= length;
1550 args->endp += length;
1557 exec_args_add_arg(struct image_args *args, const char *argp,
1558 enum uio_seg segflg)
1561 KASSERT(args->envc == 0, ("appending args after env"));
1563 return (exec_args_add_str(args, argp, segflg, &args->argc));
1567 exec_args_add_env(struct image_args *args, const char *envp,
1568 enum uio_seg segflg)
1571 if (args->envc == 0)
1572 args->begin_envv = args->endp;
1574 return (exec_args_add_str(args, envp, segflg, &args->envc));
1578 exec_args_adjust_args(struct image_args *args, size_t consume, ssize_t extend)
1582 KASSERT(args->endp != NULL, ("endp not initialized"));
1583 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1585 offset = extend - consume;
1586 if (args->stringspace < offset)
1588 memmove(args->begin_argv + extend, args->begin_argv + consume,
1589 args->endp - args->begin_argv + consume);
1591 args->begin_envv += offset;
1592 args->endp += offset;
1593 args->stringspace -= offset;
1598 exec_args_get_begin_envv(struct image_args *args)
1601 KASSERT(args->endp != NULL, ("endp not initialized"));
1604 return (args->begin_envv);
1605 return (args->endp);
1609 * Copy strings out to the new process address space, constructing new arg
1610 * and env vector tables. Return a pointer to the base so that it can be used
1611 * as the initial stack pointer.
1614 exec_copyout_strings(struct image_params *imgp, uintptr_t *stack_base)
1619 uintptr_t destp, ustringp;
1620 struct ps_strings *arginfo;
1622 struct sysentvec *sysent;
1623 size_t execpath_len;
1624 int error, szsigcode;
1625 char canary[sizeof(long) * 8];
1628 sysent = p->p_sysent;
1630 destp = PROC_PS_STRINGS(p);
1631 arginfo = imgp->ps_strings = (void *)destp;
1636 if (sysent->sv_sigcode_base == 0 && sysent->sv_szsigcode != NULL) {
1637 szsigcode = *(sysent->sv_szsigcode);
1639 destp = rounddown2(destp, sizeof(void *));
1640 error = copyout(sysent->sv_sigcode, (void *)destp, szsigcode);
1646 * Copy the image path for the rtld.
1648 if (imgp->execpath != NULL && imgp->auxargs != NULL) {
1649 execpath_len = strlen(imgp->execpath) + 1;
1650 destp -= execpath_len;
1651 destp = rounddown2(destp, sizeof(void *));
1652 imgp->execpathp = (void *)destp;
1653 error = copyout(imgp->execpath, imgp->execpathp, execpath_len);
1659 * Prepare the canary for SSP.
1661 arc4rand(canary, sizeof(canary), 0);
1662 destp -= sizeof(canary);
1663 imgp->canary = (void *)destp;
1664 error = copyout(canary, imgp->canary, sizeof(canary));
1667 imgp->canarylen = sizeof(canary);
1670 * Prepare the pagesizes array.
1672 imgp->pagesizeslen = sizeof(pagesizes[0]) * MAXPAGESIZES;
1673 destp -= imgp->pagesizeslen;
1674 destp = rounddown2(destp, sizeof(void *));
1675 imgp->pagesizes = (void *)destp;
1676 error = copyout(pagesizes, imgp->pagesizes, imgp->pagesizeslen);
1681 * Allocate room for the argument and environment strings.
1683 destp -= ARG_MAX - imgp->args->stringspace;
1684 destp = rounddown2(destp, sizeof(void *));
1687 if (imgp->auxargs) {
1689 * Allocate room on the stack for the ELF auxargs
1690 * array. It has up to AT_COUNT entries.
1692 destp -= AT_COUNT * sizeof(Elf_Auxinfo);
1693 destp = rounddown2(destp, sizeof(void *));
1696 vectp = (char **)destp;
1699 * Allocate room for the argv[] and env vectors including the
1700 * terminating NULL pointers.
1702 vectp -= imgp->args->argc + 1 + imgp->args->envc + 1;
1705 * vectp also becomes our initial stack base
1707 *stack_base = (uintptr_t)vectp;
1709 stringp = imgp->args->begin_argv;
1710 argc = imgp->args->argc;
1711 envc = imgp->args->envc;
1714 * Copy out strings - arguments and environment.
1716 error = copyout(stringp, (void *)ustringp,
1717 ARG_MAX - imgp->args->stringspace);
1722 * Fill in "ps_strings" struct for ps, w, etc.
1725 if (suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp) != 0 ||
1726 suword32(&arginfo->ps_nargvstr, argc) != 0)
1730 * Fill in argument portion of vector table.
1732 for (; argc > 0; --argc) {
1733 if (suword(vectp++, ustringp) != 0)
1735 while (*stringp++ != 0)
1740 /* a null vector table pointer separates the argp's from the envp's */
1741 if (suword(vectp++, 0) != 0)
1745 if (suword(&arginfo->ps_envstr, (long)(intptr_t)vectp) != 0 ||
1746 suword32(&arginfo->ps_nenvstr, envc) != 0)
1750 * Fill in environment portion of vector table.
1752 for (; envc > 0; --envc) {
1753 if (suword(vectp++, ustringp) != 0)
1755 while (*stringp++ != 0)
1760 /* end of vector table is a null pointer */
1761 if (suword(vectp, 0) != 0)
1764 if (imgp->auxargs) {
1766 error = imgp->sysent->sv_copyout_auxargs(imgp,
1776 * Check permissions of file to execute.
1777 * Called with imgp->vp locked.
1778 * Return 0 for success or error code on failure.
1781 exec_check_permissions(struct image_params *imgp)
1783 struct vnode *vp = imgp->vp;
1784 struct vattr *attr = imgp->attr;
1790 /* Get file attributes */
1791 error = VOP_GETATTR(vp, attr, td->td_ucred);
1796 error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
1802 * 1) Check if file execution is disabled for the filesystem that
1803 * this file resides on.
1804 * 2) Ensure that at least one execute bit is on. Otherwise, a
1805 * privileged user will always succeed, and we don't want this
1806 * to happen unless the file really is executable.
1807 * 3) Ensure that the file is a regular file.
1809 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1810 (attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
1811 (attr->va_type != VREG))
1815 * Zero length files can't be exec'd
1817 if (attr->va_size == 0)
1821 * Check for execute permission to file based on current credentials.
1823 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1828 * Check number of open-for-writes on the file and deny execution
1831 * Add a text reference now so no one can write to the
1832 * executable while we're activating it.
1834 * Remember if this was set before and unset it in case this is not
1835 * actually an executable image.
1837 error = VOP_SET_TEXT(vp);
1840 imgp->textset = true;
1843 * Call filesystem specific open routine (which does nothing in the
1846 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
1848 imgp->opened = true;
1853 * Exec handler registration
1856 exec_register(const struct execsw *execsw_arg)
1858 const struct execsw **es, **xs, **newexecsw;
1859 u_int count = 2; /* New slot and trailing NULL */
1862 for (es = execsw; *es; es++)
1864 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1867 for (es = execsw; *es; es++)
1872 free(execsw, M_TEMP);
1878 exec_unregister(const struct execsw *execsw_arg)
1880 const struct execsw **es, **xs, **newexecsw;
1884 panic("unregister with no handlers left?\n");
1886 for (es = execsw; *es; es++) {
1887 if (*es == execsw_arg)
1892 for (es = execsw; *es; es++)
1893 if (*es != execsw_arg)
1895 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1897 for (es = execsw; *es; es++)
1898 if (*es != execsw_arg)
1902 free(execsw, M_TEMP);
1908 * Write out a core segment to the compression stream.
1911 compress_chunk(struct coredump_params *cp, char *base, char *buf, size_t len)
1917 chunk_len = MIN(len, CORE_BUF_SIZE);
1920 * We can get EFAULT error here.
1921 * In that case zero out the current chunk of the segment.
1923 error = copyin(base, buf, chunk_len);
1925 bzero(buf, chunk_len);
1926 error = compressor_write(cp->comp, buf, chunk_len);
1936 core_write(struct coredump_params *cp, const void *base, size_t len,
1937 off_t offset, enum uio_seg seg, size_t *resid)
1940 return (vn_rdwr_inchunks(UIO_WRITE, cp->vp, __DECONST(void *, base),
1941 len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED,
1942 cp->active_cred, cp->file_cred, resid, cp->td));
1946 core_output(char *base, size_t len, off_t offset, struct coredump_params *cp,
1951 size_t resid, runlen;
1955 KASSERT((uintptr_t)base % PAGE_SIZE == 0,
1956 ("%s: user address %p is not page-aligned", __func__, base));
1958 if (cp->comp != NULL)
1959 return (compress_chunk(cp, base, tmpbuf, len));
1961 map = &cp->td->td_proc->p_vmspace->vm_map;
1962 for (; len > 0; base += runlen, offset += runlen, len -= runlen) {
1964 * Attempt to page in all virtual pages in the range. If a
1965 * virtual page is not backed by the pager, it is represented as
1966 * a hole in the file. This can occur with zero-filled
1967 * anonymous memory or truncated files, for example.
1969 for (runlen = 0; runlen < len; runlen += PAGE_SIZE) {
1970 if (core_dump_can_intr && curproc_sigkilled())
1972 error = vm_fault(map, (uintptr_t)base + runlen,
1973 VM_PROT_READ, VM_FAULT_NOFILL, NULL);
1975 success = error == KERN_SUCCESS;
1976 else if ((error == KERN_SUCCESS) != success)
1981 error = core_write(cp, base, runlen, offset,
1982 UIO_USERSPACE, &resid);
1984 if (error != EFAULT)
1988 * EFAULT may be returned if the user mapping
1989 * could not be accessed, e.g., because a mapped
1990 * file has been truncated. Skip the page if no
1991 * progress was made, to protect against a
1992 * hypothetical scenario where vm_fault() was
1993 * successful but core_write() returns EFAULT
2004 error = vn_start_write(cp->vp, &mp, V_WAIT);
2007 vn_lock(cp->vp, LK_EXCLUSIVE | LK_RETRY);
2008 error = vn_truncate_locked(cp->vp, offset + runlen,
2009 false, cp->td->td_ucred);
2011 vn_finished_write(mp);
2020 * Drain into a core file.
2023 sbuf_drain_core_output(void *arg, const char *data, int len)
2025 struct coredump_params *cp;
2030 p = cp->td->td_proc;
2033 * Some kern_proc out routines that print to this sbuf may
2034 * call us with the process lock held. Draining with the
2035 * non-sleepable lock held is unsafe. The lock is needed for
2036 * those routines when dumping a live process. In our case we
2037 * can safely release the lock before draining and acquire
2040 locked = PROC_LOCKED(p);
2043 if (cp->comp != NULL)
2044 error = compressor_write(cp->comp, __DECONST(char *, data),
2047 error = core_write(cp, __DECONST(void *, data), len, cp->offset,
2048 UIO_SYSSPACE, NULL);