2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1993, David Greenman
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include "opt_capsicum.h"
33 #include "opt_hwpmc_hooks.h"
34 #include "opt_ktrace.h"
37 #include <sys/param.h>
38 #include <sys/systm.h>
41 #include <sys/capsicum.h>
42 #include <sys/compressor.h>
43 #include <sys/eventhandler.h>
45 #include <sys/fcntl.h>
46 #include <sys/filedesc.h>
47 #include <sys/imgact.h>
48 #include <sys/imgact_elf.h>
49 #include <sys/kernel.h>
51 #include <sys/malloc.h>
53 #include <sys/mount.h>
54 #include <sys/mutex.h>
55 #include <sys/namei.h>
58 #include <sys/ptrace.h>
60 #include <sys/resourcevar.h>
61 #include <sys/rwlock.h>
62 #include <sys/sched.h>
64 #include <sys/sf_buf.h>
66 #include <sys/signalvar.h>
69 #include <sys/syscallsubr.h>
70 #include <sys/sysctl.h>
71 #include <sys/sysent.h>
72 #include <sys/sysproto.h>
73 #include <sys/timers.h>
74 #include <sys/umtxvar.h>
75 #include <sys/vnode.h>
78 #include <sys/ktrace.h>
82 #include <vm/vm_param.h>
84 #include <vm/vm_page.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_kern.h>
87 #include <vm/vm_extern.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_pager.h>
92 #include <sys/pmckern.h>
95 #include <security/audit/audit.h>
96 #include <security/mac/mac_framework.h>
99 #include <sys/dtrace_bsd.h>
100 dtrace_execexit_func_t dtrace_fasttrap_exec;
103 SDT_PROVIDER_DECLARE(proc);
104 SDT_PROBE_DEFINE1(proc, , , exec, "char *");
105 SDT_PROBE_DEFINE1(proc, , , exec__failure, "int");
106 SDT_PROBE_DEFINE1(proc, , , exec__success, "char *");
108 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
110 int coredump_pack_fileinfo = 1;
111 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
112 &coredump_pack_fileinfo, 0,
113 "Enable file path packing in 'procstat -f' coredump notes");
115 int coredump_pack_vmmapinfo = 1;
116 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN,
117 &coredump_pack_vmmapinfo, 0,
118 "Enable file path packing in 'procstat -v' coredump notes");
120 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
121 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
122 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
123 static int do_execve(struct thread *td, struct image_args *args,
124 struct mac *mac_p, struct vmspace *oldvmspace);
126 /* XXX This should be vm_size_t. */
127 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD|
128 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_ps_strings, "LU",
129 "Location of process' ps_strings structure");
131 /* XXX This should be vm_size_t. */
132 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
133 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_usrstack, "LU",
134 "Top of process stack");
136 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_MPSAFE,
137 NULL, 0, sysctl_kern_stackprot, "I",
138 "Stack memory permissions");
140 u_long ps_arg_cache_limit = PAGE_SIZE / 16;
141 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
142 &ps_arg_cache_limit, 0,
143 "Process' command line characters cache limit");
145 static int disallow_high_osrel;
146 SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW,
147 &disallow_high_osrel, 0,
148 "Disallow execution of binaries built for higher version of the world");
150 static int map_at_zero = 0;
151 SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0,
152 "Permit processes to map an object at virtual address 0.");
154 static int core_dump_can_intr = 1;
155 SYSCTL_INT(_kern, OID_AUTO, core_dump_can_intr, CTLFLAG_RWTUN,
156 &core_dump_can_intr, 0,
157 "Core dumping interruptible with SIGKILL");
160 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
163 vm_offset_t ps_strings;
167 if (req->flags & SCTL_MASK32) {
169 val = (unsigned int)PROC_PS_STRINGS(p);
170 return (SYSCTL_OUT(req, &val, sizeof(val)));
173 ps_strings = PROC_PS_STRINGS(p);
174 return (SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings)));
178 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
185 if (req->flags & SCTL_MASK32) {
187 val = (unsigned int)p->p_sysent->sv_usrstack;
188 error = SYSCTL_OUT(req, &val, sizeof(val));
191 error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack,
192 sizeof(p->p_sysent->sv_usrstack));
197 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
202 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
203 sizeof(p->p_sysent->sv_stackprot)));
207 * Each of the items is a pointer to a `const struct execsw', hence the
208 * double pointer here.
210 static const struct execsw **execsw;
212 #ifndef _SYS_SYSPROTO_H_
221 sys_execve(struct thread *td, struct execve_args *uap)
223 struct image_args args;
224 struct vmspace *oldvmspace;
227 error = pre_execve(td, &oldvmspace);
230 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
231 uap->argv, uap->envv);
233 error = kern_execve(td, &args, NULL, oldvmspace);
234 post_execve(td, error, oldvmspace);
235 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
239 #ifndef _SYS_SYSPROTO_H_
240 struct fexecve_args {
247 sys_fexecve(struct thread *td, struct fexecve_args *uap)
249 struct image_args args;
250 struct vmspace *oldvmspace;
253 error = pre_execve(td, &oldvmspace);
256 error = exec_copyin_args(&args, NULL, UIO_SYSSPACE,
257 uap->argv, uap->envv);
260 error = kern_execve(td, &args, NULL, oldvmspace);
262 post_execve(td, error, oldvmspace);
263 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
267 #ifndef _SYS_SYSPROTO_H_
268 struct __mac_execve_args {
277 sys___mac_execve(struct thread *td, struct __mac_execve_args *uap)
280 struct image_args args;
281 struct vmspace *oldvmspace;
284 error = pre_execve(td, &oldvmspace);
287 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
288 uap->argv, uap->envv);
290 error = kern_execve(td, &args, uap->mac_p, oldvmspace);
291 post_execve(td, error, oldvmspace);
292 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
300 pre_execve(struct thread *td, struct vmspace **oldvmspace)
305 KASSERT(td == curthread, ("non-current thread %p", td));
308 if ((p->p_flag & P_HADTHREADS) != 0) {
310 if (thread_single(p, SINGLE_BOUNDARY) != 0)
314 KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0,
316 *oldvmspace = p->p_vmspace;
321 post_execve(struct thread *td, int error, struct vmspace *oldvmspace)
325 KASSERT(td == curthread, ("non-current thread %p", td));
327 if ((p->p_flag & P_HADTHREADS) != 0) {
330 * If success, we upgrade to SINGLE_EXIT state to
331 * force other threads to suicide.
333 if (error == EJUSTRETURN)
334 thread_single(p, SINGLE_EXIT);
336 thread_single_end(p, SINGLE_BOUNDARY);
339 exec_cleanup(td, oldvmspace);
343 * kern_execve() has the astonishing property of not always returning to
344 * the caller. If sufficiently bad things happen during the call to
345 * do_execve(), it can end up calling exit1(); as a result, callers must
346 * avoid doing anything which they might need to undo (e.g., allocating
350 kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
351 struct vmspace *oldvmspace)
354 TSEXEC(td->td_proc->p_pid, args->begin_argv);
355 AUDIT_ARG_ARGV(args->begin_argv, args->argc,
356 exec_args_get_begin_envv(args) - args->begin_argv);
357 AUDIT_ARG_ENVV(exec_args_get_begin_envv(args), args->envc,
358 args->endp - exec_args_get_begin_envv(args));
359 return (do_execve(td, args, mac_p, oldvmspace));
363 execve_nosetid(struct image_params *imgp)
365 imgp->credential_setid = false;
366 if (imgp->newcred != NULL) {
367 crfree(imgp->newcred);
368 imgp->newcred = NULL;
373 * In-kernel implementation of execve(). All arguments are assumed to be
374 * userspace pointers from the passed thread.
377 do_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
378 struct vmspace *oldvmspace)
380 struct proc *p = td->td_proc;
382 struct ucred *oldcred;
383 struct uidinfo *euip = NULL;
384 uintptr_t stack_base;
385 struct image_params image_params, *imgp;
387 int (*img_first)(struct image_params *);
388 struct pargs *oldargs = NULL, *newargs = NULL;
389 struct sigacts *oldsigacts = NULL, *newsigacts = NULL;
391 struct ktr_io_params *kiop;
393 struct vnode *oldtextvp, *newtextvp;
394 struct vnode *oldtextdvp, *newtextdvp;
395 char *oldbinname, *newbinname;
396 bool credential_changing;
398 struct label *interpvplabel = NULL;
399 bool will_transition;
402 struct pmckern_procexec pe;
404 int error, i, orig_osrel;
406 Elf_Brandinfo *orig_brandinfo;
407 size_t freepath_size;
408 static const char fexecv_proc_title[] = "(fexecv)";
410 imgp = &image_params;
411 oldtextvp = oldtextdvp = NULL;
412 newtextvp = newtextdvp = NULL;
413 newbinname = oldbinname = NULL;
419 * Lock the process and set the P_INEXEC flag to indicate that
420 * it should be left alone until we're done here. This is
421 * necessary to avoid race conditions - e.g. in ptrace() -
422 * that might allow a local user to illicitly obtain elevated
426 KASSERT((p->p_flag & P_INEXEC) == 0,
427 ("%s(): process already has P_INEXEC flag", __func__));
428 p->p_flag |= P_INEXEC;
432 * Initialize part of the common data
434 bzero(imgp, sizeof(*imgp));
438 oldcred = p->p_ucred;
439 orig_osrel = p->p_osrel;
440 orig_fctl0 = p->p_fctl0;
441 orig_brandinfo = p->p_elf_brandinfo;
444 error = mac_execve_enter(imgp, mac_p);
449 SDT_PROBE1(proc, , , exec, args->fname);
452 if (args->fname != NULL) {
453 #ifdef CAPABILITY_MODE
455 * While capability mode can't reach this point via direct
456 * path arguments to execve(), we also don't allow
457 * interpreters to be used in capability mode (for now).
458 * Catch indirect lookups and return a permissions error.
460 if (IN_CAPABILITY_MODE(td)) {
467 * Translate the file name. namei() returns a vnode
468 * pointer in ni_vp among other things.
470 NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | LOCKSHARED | FOLLOW |
471 SAVENAME | AUDITVNODE1 | WANTPARENT, UIO_SYSSPACE,
478 newtextvp = nd.ni_vp;
479 newtextdvp = nd.ni_dvp;
481 newbinname = malloc(nd.ni_cnd.cn_namelen + 1, M_PARGS,
483 memcpy(newbinname, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen);
484 newbinname[nd.ni_cnd.cn_namelen] = '\0';
485 imgp->vp = newtextvp;
488 * Do the best to calculate the full path to the image file.
490 if (args->fname[0] == '/') {
491 imgp->execpath = args->fname;
493 VOP_UNLOCK(imgp->vp);
494 freepath_size = MAXPATHLEN;
495 if (vn_fullpath_hardlink(newtextvp, newtextdvp,
496 newbinname, nd.ni_cnd.cn_namelen, &imgp->execpath,
497 &imgp->freepath, &freepath_size) != 0)
498 imgp->execpath = args->fname;
499 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
502 AUDIT_ARG_FD(args->fd);
505 * If the descriptors was not opened with O_PATH, then
506 * we require that it was opened with O_EXEC or
507 * O_RDONLY. In either case, exec_check_permissions()
508 * below checks _current_ file access mode regardless
509 * of the permissions additionally checked at the
512 error = fgetvp_exec(td, args->fd, &cap_fexecve_rights,
517 if (vn_fullpath(newtextvp, &imgp->execpath,
518 &imgp->freepath) != 0)
519 imgp->execpath = args->fname;
520 vn_lock(newtextvp, LK_SHARED | LK_RETRY);
521 AUDIT_ARG_VNODE1(newtextvp);
522 imgp->vp = newtextvp;
526 * Check file permissions. Also 'opens' file and sets its vnode to
529 error = exec_check_permissions(imgp);
531 goto exec_fail_dealloc;
533 imgp->object = imgp->vp->v_object;
534 if (imgp->object != NULL)
535 vm_object_reference(imgp->object);
537 error = exec_map_first_page(imgp);
539 goto exec_fail_dealloc;
541 imgp->proc->p_osrel = 0;
542 imgp->proc->p_fctl0 = 0;
543 imgp->proc->p_elf_brandinfo = NULL;
546 * Implement image setuid/setgid.
548 * Determine new credentials before attempting image activators
549 * so that it can be used by process_exec handlers to determine
550 * credential/setid changes.
552 * Don't honor setuid/setgid if the filesystem prohibits it or if
553 * the process is being traced.
555 * We disable setuid/setgid/etc in capability mode on the basis
556 * that most setugid applications are not written with that
557 * environment in mind, and will therefore almost certainly operate
558 * incorrectly. In principle there's no reason that setugid
559 * applications might not be useful in capability mode, so we may want
560 * to reconsider this conservative design choice in the future.
562 * XXXMAC: For the time being, use NOSUID to also prohibit
563 * transitions on the file system.
565 credential_changing = false;
566 credential_changing |= (attr.va_mode & S_ISUID) &&
567 oldcred->cr_uid != attr.va_uid;
568 credential_changing |= (attr.va_mode & S_ISGID) &&
569 oldcred->cr_gid != attr.va_gid;
571 will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
572 interpvplabel, imgp) != 0;
573 credential_changing |= will_transition;
576 /* Don't inherit PROC_PDEATHSIG_CTL value if setuid/setgid. */
577 if (credential_changing)
578 imgp->proc->p_pdeathsig = 0;
580 if (credential_changing &&
581 #ifdef CAPABILITY_MODE
582 ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
584 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
585 (p->p_flag & P_TRACED) == 0) {
586 imgp->credential_setid = true;
587 VOP_UNLOCK(imgp->vp);
588 imgp->newcred = crdup(oldcred);
589 if (attr.va_mode & S_ISUID) {
590 euip = uifind(attr.va_uid);
591 change_euid(imgp->newcred, euip);
593 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
594 if (attr.va_mode & S_ISGID)
595 change_egid(imgp->newcred, attr.va_gid);
597 * Implement correct POSIX saved-id behavior.
599 * XXXMAC: Note that the current logic will save the
600 * uid and gid if a MAC domain transition occurs, even
601 * though maybe it shouldn't.
603 change_svuid(imgp->newcred, imgp->newcred->cr_uid);
604 change_svgid(imgp->newcred, imgp->newcred->cr_gid);
607 * Implement correct POSIX saved-id behavior.
609 * XXX: It's not clear that the existing behavior is
610 * POSIX-compliant. A number of sources indicate that the
611 * saved uid/gid should only be updated if the new ruid is
612 * not equal to the old ruid, or the new euid is not equal
613 * to the old euid and the new euid is not equal to the old
614 * ruid. The FreeBSD code always updates the saved uid/gid.
615 * Also, this code uses the new (replaced) euid and egid as
616 * the source, which may or may not be the right ones to use.
618 if (oldcred->cr_svuid != oldcred->cr_uid ||
619 oldcred->cr_svgid != oldcred->cr_gid) {
620 VOP_UNLOCK(imgp->vp);
621 imgp->newcred = crdup(oldcred);
622 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
623 change_svuid(imgp->newcred, imgp->newcred->cr_uid);
624 change_svgid(imgp->newcred, imgp->newcred->cr_gid);
627 /* The new credentials are installed into the process later. */
630 * If the current process has a special image activator it
631 * wants to try first, call it. For example, emulating shell
632 * scripts differently.
635 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
636 error = img_first(imgp);
639 * Loop through the list of image activators, calling each one.
640 * An activator returns -1 if there is no match, 0 on success,
641 * and an error otherwise.
643 for (i = 0; error == -1 && execsw[i]; ++i) {
644 if (execsw[i]->ex_imgact == NULL ||
645 execsw[i]->ex_imgact == img_first) {
648 error = (*execsw[i]->ex_imgact)(imgp);
654 goto exec_fail_dealloc;
658 * Special interpreter operation, cleanup and loop up to try to
659 * activate the interpreter.
661 if (imgp->interpreted) {
662 exec_unmap_first_page(imgp);
664 * The text reference needs to be removed for scripts.
665 * There is a short period before we determine that
666 * something is a script where text reference is active.
667 * The vnode lock is held over this entire period
668 * so nothing should illegitimately be blocked.
670 MPASS(imgp->textset);
671 VOP_UNSET_TEXT_CHECKED(newtextvp);
672 imgp->textset = false;
673 /* free name buffer and old vnode */
675 mac_execve_interpreter_enter(newtextvp, &interpvplabel);
678 VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td);
679 imgp->opened = false;
682 imgp->vp = newtextvp = NULL;
683 if (args->fname != NULL) {
684 if (newtextdvp != NULL) {
688 NDFREE(&nd, NDF_ONLY_PNBUF);
689 free(newbinname, M_PARGS);
692 vm_object_deallocate(imgp->object);
694 execve_nosetid(imgp);
695 imgp->execpath = NULL;
696 free(imgp->freepath, M_TEMP);
697 imgp->freepath = NULL;
698 /* set new name to that of the interpreter */
699 args->fname = imgp->interpreter_name;
704 * NB: We unlock the vnode here because it is believed that none
705 * of the sv_copyout_strings/sv_fixup operations require the vnode.
707 VOP_UNLOCK(imgp->vp);
709 if (disallow_high_osrel &&
710 P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) {
712 uprintf("Osrel %d for image %s too high\n", p->p_osrel,
713 imgp->execpath != NULL ? imgp->execpath : "<unresolved>");
714 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
715 goto exec_fail_dealloc;
719 * Copy out strings (args and env) and initialize stack base.
721 error = (*p->p_sysent->sv_copyout_strings)(imgp, &stack_base);
723 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
724 goto exec_fail_dealloc;
730 error = (*p->p_sysent->sv_fixup)(&stack_base, imgp);
732 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
733 goto exec_fail_dealloc;
737 * For security and other reasons, the file descriptor table cannot be
738 * shared after an exec.
742 /* close files on exec */
746 * Malloc things before we need locks.
748 i = exec_args_get_begin_envv(imgp->args) - imgp->args->begin_argv;
749 /* Cache arguments if they fit inside our allowance */
750 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
751 newargs = pargs_alloc(i);
752 bcopy(imgp->args->begin_argv, newargs->ar_args, i);
756 * For security and other reasons, signal handlers cannot
757 * be shared after an exec. The new process gets a copy of the old
758 * handlers. In execsigs(), the new process will have its signals
761 if (sigacts_shared(p->p_sigacts)) {
762 oldsigacts = p->p_sigacts;
763 newsigacts = sigacts_alloc();
764 sigacts_copy(newsigacts, oldsigacts);
767 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
771 p->p_sigacts = newsigacts;
775 /* reset caught signals */
778 /* name this process - nameiexec(p, ndp) */
779 bzero(p->p_comm, sizeof(p->p_comm));
781 bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
782 min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
783 else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0)
784 bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
785 bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
787 sched_clear_tdname(td);
791 * mark as execed, wakeup the process that vforked (if any) and tell
792 * it that it now has its own resources back
795 if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0)
796 p->p_flag2 &= ~P2_NOTRACE;
797 if ((p->p_flag2 & P2_STKGAP_DISABLE_EXEC) == 0)
798 p->p_flag2 &= ~P2_STKGAP_DISABLE;
799 if (p->p_flag & P_PPWAIT) {
800 p->p_flag &= ~(P_PPWAIT | P_PPTRACE);
801 cv_broadcast(&p->p_pwait);
802 /* STOPs are no longer ignored, arrange for AST */
806 if ((imgp->sysent->sv_setid_allowed != NULL &&
807 !(*imgp->sysent->sv_setid_allowed)(td, imgp)) ||
808 (p->p_flag2 & P2_NO_NEW_PRIVS) != 0)
809 execve_nosetid(imgp);
812 * Implement image setuid/setgid installation.
814 if (imgp->credential_setid) {
816 * Turn off syscall tracing for set-id programs, except for
817 * root. Record any set-id flags first to make sure that
818 * we do not regain any tracing during a possible block.
822 kiop = ktrprocexec(p);
825 * Close any file descriptors 0..2 that reference procfs,
826 * then make sure file descriptors 0..2 are in use.
828 * Both fdsetugidsafety() and fdcheckstd() may call functions
829 * taking sleepable locks, so temporarily drop our locks.
832 VOP_UNLOCK(imgp->vp);
834 error = fdcheckstd(td);
835 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
837 goto exec_fail_dealloc;
840 if (will_transition) {
841 mac_vnode_execve_transition(oldcred, imgp->newcred,
842 imgp->vp, interpvplabel, imgp);
846 if (oldcred->cr_uid == oldcred->cr_ruid &&
847 oldcred->cr_gid == oldcred->cr_rgid)
848 p->p_flag &= ~P_SUGID;
851 * Set the new credentials.
853 if (imgp->newcred != NULL) {
854 proc_set_cred(p, imgp->newcred);
860 * Store the vp for use in kern.proc.pathname. This vnode was
861 * referenced by namei() or by fexecve variant of fname handling.
863 oldtextvp = p->p_textvp;
864 p->p_textvp = newtextvp;
865 oldtextdvp = p->p_textdvp;
866 p->p_textdvp = newtextdvp;
868 oldbinname = p->p_binname;
869 p->p_binname = newbinname;
874 * Tell the DTrace fasttrap provider about the exec if it
875 * has declared an interest.
877 if (dtrace_fasttrap_exec)
878 dtrace_fasttrap_exec(p);
882 * Notify others that we exec'd, and clear the P_INEXEC flag
883 * as we're now a bona fide freshly-execed process.
885 KNOTE_LOCKED(p->p_klist, NOTE_EXEC);
886 p->p_flag &= ~P_INEXEC;
888 /* clear "fork but no exec" flag, as we _are_ execing */
889 p->p_acflag &= ~AFORK;
892 * Free any previous argument cache and replace it with
893 * the new argument cache, if any.
903 * Check if system-wide sampling is in effect or if the
904 * current process is using PMCs. If so, do exec() time
905 * processing. This processing needs to happen AFTER the
906 * P_INEXEC flag is cleared.
908 if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
909 VOP_UNLOCK(imgp->vp);
910 pe.pm_credentialschanged = credential_changing;
911 pe.pm_entryaddr = imgp->entry_addr;
913 PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
914 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
918 /* Set values passed into the program in registers. */
919 (*p->p_sysent->sv_setregs)(td, imgp, stack_base);
921 VOP_MMAPPED(imgp->vp);
923 SDT_PROBE1(proc, , , exec__success, args->fname);
927 p->p_osrel = orig_osrel;
928 p->p_fctl0 = orig_fctl0;
929 p->p_elf_brandinfo = orig_brandinfo;
932 if (imgp->firstpage != NULL)
933 exec_unmap_first_page(imgp);
935 if (imgp->vp != NULL) {
937 VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
939 VOP_UNSET_TEXT_CHECKED(imgp->vp);
943 VOP_UNLOCK(imgp->vp);
944 if (args->fname != NULL)
945 NDFREE(&nd, NDF_ONLY_PNBUF);
946 if (newtextdvp != NULL)
948 free(newbinname, M_PARGS);
951 if (imgp->object != NULL)
952 vm_object_deallocate(imgp->object);
954 free(imgp->freepath, M_TEMP);
957 if (p->p_ptevents & PTRACE_EXEC) {
959 if (p->p_ptevents & PTRACE_EXEC)
960 td->td_dbgflags |= TDB_EXEC;
965 /* we're done here, clear P_INEXEC */
967 p->p_flag &= ~P_INEXEC;
970 SDT_PROBE1(proc, , , exec__failure, error);
973 if (imgp->newcred != NULL && oldcred != NULL)
974 crfree(imgp->newcred);
977 mac_execve_exit(imgp);
978 mac_execve_interpreter_exit(interpvplabel);
980 exec_free_args(args);
983 * Handle deferred decrement of ref counts.
985 if (oldtextvp != NULL)
987 if (oldtextdvp != NULL)
989 free(oldbinname, M_PARGS);
991 ktr_io_params_free(kiop);
995 if (oldsigacts != NULL)
996 sigacts_free(oldsigacts);
1000 if (error && imgp->vmspace_destroyed) {
1001 /* sorry, no more process anymore. exit gracefully */
1002 exec_cleanup(td, oldvmspace);
1003 exit1(td, 0, SIGABRT);
1013 * We don't want cpu_set_syscall_retval() to overwrite any of
1014 * the register values put in place by exec_setregs().
1015 * Implementations of cpu_set_syscall_retval() will leave
1016 * registers unmodified when returning EJUSTRETURN.
1018 return (error == 0 ? EJUSTRETURN : error);
1022 exec_cleanup(struct thread *td, struct vmspace *oldvmspace)
1024 if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
1025 KASSERT(td->td_proc->p_vmspace != oldvmspace,
1026 ("oldvmspace still used"));
1027 vmspace_free(oldvmspace);
1028 td->td_pflags &= ~TDP_EXECVMSPC;
1033 exec_map_first_page(struct image_params *imgp)
1039 if (imgp->firstpage != NULL)
1040 exec_unmap_first_page(imgp);
1042 object = imgp->vp->v_object;
1045 #if VM_NRESERVLEVEL > 0
1046 if ((object->flags & OBJ_COLORED) == 0) {
1047 VM_OBJECT_WLOCK(object);
1048 vm_object_color(object, 0);
1049 VM_OBJECT_WUNLOCK(object);
1052 error = vm_page_grab_valid_unlocked(&m, object, 0,
1053 VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) |
1054 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
1056 if (error != VM_PAGER_OK)
1058 imgp->firstpage = sf_buf_alloc(m, 0);
1059 imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
1065 exec_unmap_first_page(struct image_params *imgp)
1069 if (imgp->firstpage != NULL) {
1070 m = sf_buf_page(imgp->firstpage);
1071 sf_buf_free(imgp->firstpage);
1072 imgp->firstpage = NULL;
1073 vm_page_unwire(m, PQ_ACTIVE);
1078 exec_onexec_old(struct thread *td)
1080 sigfastblock_clear(td);
1081 umtx_exec(td->td_proc);
1085 * This is an optimization which removes the unmanaged shared page
1086 * mapping. In combination with pmap_remove_pages(), which cleans all
1087 * managed mappings in the process' vmspace pmap, no work will be left
1088 * for pmap_remove(min, max).
1091 exec_free_abi_mappings(struct proc *p)
1093 struct vmspace *vmspace;
1094 struct sysentvec *sv;
1096 vmspace = p->p_vmspace;
1097 if (refcount_load(&vmspace->vm_refcnt) != 1)
1101 if (sv->sv_shared_page_obj == NULL)
1104 pmap_remove(vmspace_pmap(vmspace), sv->sv_shared_page_base,
1105 sv->sv_shared_page_base + sv->sv_shared_page_len);
1109 * Destroy old address space, and allocate a new stack.
1110 * The new stack is only sgrowsiz large because it is grown
1111 * automatically on a page fault.
1114 exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
1117 struct proc *p = imgp->proc;
1118 struct vmspace *vmspace = p->p_vmspace;
1119 struct thread *td = curthread;
1121 struct rlimit rlim_stack;
1122 vm_offset_t sv_minuser, stack_addr;
1124 vm_prot_t stack_prot;
1127 imgp->vmspace_destroyed = true;
1130 if (p->p_sysent->sv_onexec_old != NULL)
1131 p->p_sysent->sv_onexec_old(td);
1134 EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp);
1137 * Blow away entire process VM, if address space not shared,
1138 * otherwise, create a new VM space so that other threads are
1141 map = &vmspace->vm_map;
1143 sv_minuser = sv->sv_minuser;
1145 sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
1146 if (refcount_load(&vmspace->vm_refcnt) == 1 &&
1147 vm_map_min(map) == sv_minuser &&
1148 vm_map_max(map) == sv->sv_maxuser &&
1149 cpu_exec_vmspace_reuse(p, map)) {
1150 exec_free_abi_mappings(p);
1152 pmap_remove_pages(vmspace_pmap(vmspace));
1153 vm_map_remove(map, vm_map_min(map), vm_map_max(map));
1155 * An exec terminates mlockall(MCL_FUTURE).
1156 * ASLR and W^X states must be re-evaluated.
1159 vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR |
1160 MAP_ASLR_IGNSTART | MAP_WXORX);
1163 error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
1166 vmspace = p->p_vmspace;
1167 map = &vmspace->vm_map;
1169 map->flags |= imgp->map_flags;
1171 /* Map a shared page */
1172 obj = sv->sv_shared_page_obj;
1174 vm_object_reference(obj);
1175 error = vm_map_fixed(map, obj, 0,
1176 sv->sv_shared_page_base, sv->sv_shared_page_len,
1177 VM_PROT_READ | VM_PROT_EXECUTE,
1178 VM_PROT_READ | VM_PROT_EXECUTE,
1179 MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
1180 if (error != KERN_SUCCESS) {
1181 vm_object_deallocate(obj);
1182 return (vm_mmap_to_errno(error));
1186 /* Allocate a new stack */
1187 if (imgp->stack_sz != 0) {
1188 ssiz = trunc_page(imgp->stack_sz);
1190 lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack);
1192 if (ssiz > rlim_stack.rlim_max)
1193 ssiz = rlim_stack.rlim_max;
1194 if (ssiz > rlim_stack.rlim_cur) {
1195 rlim_stack.rlim_cur = ssiz;
1196 kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack);
1198 } else if (sv->sv_maxssiz != NULL) {
1199 ssiz = *sv->sv_maxssiz;
1203 imgp->eff_stack_sz = lim_cur(curthread, RLIMIT_STACK);
1204 if (ssiz < imgp->eff_stack_sz)
1205 imgp->eff_stack_sz = ssiz;
1206 stack_addr = sv->sv_usrstack - ssiz;
1207 stack_prot = obj != NULL && imgp->stack_prot != 0 ?
1208 imgp->stack_prot : sv->sv_stackprot;
1209 error = vm_map_stack(map, stack_addr, (vm_size_t)ssiz, stack_prot,
1210 VM_PROT_ALL, MAP_STACK_GROWS_DOWN);
1211 if (error != KERN_SUCCESS) {
1212 uprintf("exec_new_vmspace: mapping stack size %#jx prot %#x "
1213 "failed mach error %d errno %d\n", (uintmax_t)ssiz,
1214 stack_prot, error, vm_mmap_to_errno(error));
1215 return (vm_mmap_to_errno(error));
1217 vmspace->vm_stkgap = 0;
1220 * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they
1221 * are still used to enforce the stack rlimit on the process stack.
1223 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
1224 vmspace->vm_maxsaddr = (char *)stack_addr;
1226 return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0);
1230 * Copy out argument and environment strings from the old process address
1231 * space into the temporary string buffer.
1234 exec_copyin_args(struct image_args *args, const char *fname,
1235 enum uio_seg segflg, char **argv, char **envv)
1240 bzero(args, sizeof(*args));
1245 * Allocate demand-paged memory for the file name, argument, and
1246 * environment strings.
1248 error = exec_alloc_args(args);
1253 * Copy the file name.
1255 error = exec_args_add_fname(args, fname, segflg);
1260 * extract arguments first
1263 error = fueword(argv++, &arg);
1270 error = exec_args_add_arg(args, (char *)(uintptr_t)arg,
1277 * extract environment strings
1281 error = fueword(envv++, &env);
1288 error = exec_args_add_env(args,
1289 (char *)(uintptr_t)env, UIO_USERSPACE);
1298 exec_free_args(args);
1302 struct exec_args_kva {
1305 SLIST_ENTRY(exec_args_kva) next;
1308 DPCPU_DEFINE_STATIC(struct exec_args_kva *, exec_args_kva);
1310 static SLIST_HEAD(, exec_args_kva) exec_args_kva_freelist;
1311 static struct mtx exec_args_kva_mtx;
1312 static u_int exec_args_gen;
1315 exec_prealloc_args_kva(void *arg __unused)
1317 struct exec_args_kva *argkva;
1320 SLIST_INIT(&exec_args_kva_freelist);
1321 mtx_init(&exec_args_kva_mtx, "exec args kva", NULL, MTX_DEF);
1322 for (i = 0; i < exec_map_entries; i++) {
1323 argkva = malloc(sizeof(*argkva), M_PARGS, M_WAITOK);
1324 argkva->addr = kmap_alloc_wait(exec_map, exec_map_entry_size);
1325 argkva->gen = exec_args_gen;
1326 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1329 SYSINIT(exec_args_kva, SI_SUB_EXEC, SI_ORDER_ANY, exec_prealloc_args_kva, NULL);
1332 exec_alloc_args_kva(void **cookie)
1334 struct exec_args_kva *argkva;
1336 argkva = (void *)atomic_readandclear_ptr(
1337 (uintptr_t *)DPCPU_PTR(exec_args_kva));
1338 if (argkva == NULL) {
1339 mtx_lock(&exec_args_kva_mtx);
1340 while ((argkva = SLIST_FIRST(&exec_args_kva_freelist)) == NULL)
1341 (void)mtx_sleep(&exec_args_kva_freelist,
1342 &exec_args_kva_mtx, 0, "execkva", 0);
1343 SLIST_REMOVE_HEAD(&exec_args_kva_freelist, next);
1344 mtx_unlock(&exec_args_kva_mtx);
1346 kasan_mark((void *)argkva->addr, exec_map_entry_size,
1347 exec_map_entry_size, 0);
1348 *(struct exec_args_kva **)cookie = argkva;
1349 return (argkva->addr);
1353 exec_release_args_kva(struct exec_args_kva *argkva, u_int gen)
1357 base = argkva->addr;
1358 kasan_mark((void *)argkva->addr, 0, exec_map_entry_size,
1359 KASAN_EXEC_ARGS_FREED);
1360 if (argkva->gen != gen) {
1361 (void)vm_map_madvise(exec_map, base, base + exec_map_entry_size,
1365 if (!atomic_cmpset_ptr((uintptr_t *)DPCPU_PTR(exec_args_kva),
1366 (uintptr_t)NULL, (uintptr_t)argkva)) {
1367 mtx_lock(&exec_args_kva_mtx);
1368 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1369 wakeup_one(&exec_args_kva_freelist);
1370 mtx_unlock(&exec_args_kva_mtx);
1375 exec_free_args_kva(void *cookie)
1378 exec_release_args_kva(cookie, exec_args_gen);
1382 exec_args_kva_lowmem(void *arg __unused)
1384 SLIST_HEAD(, exec_args_kva) head;
1385 struct exec_args_kva *argkva;
1389 gen = atomic_fetchadd_int(&exec_args_gen, 1) + 1;
1392 * Force an madvise of each KVA range. Any currently allocated ranges
1393 * will have MADV_FREE applied once they are freed.
1396 mtx_lock(&exec_args_kva_mtx);
1397 SLIST_SWAP(&head, &exec_args_kva_freelist, exec_args_kva);
1398 mtx_unlock(&exec_args_kva_mtx);
1399 while ((argkva = SLIST_FIRST(&head)) != NULL) {
1400 SLIST_REMOVE_HEAD(&head, next);
1401 exec_release_args_kva(argkva, gen);
1405 argkva = (void *)atomic_readandclear_ptr(
1406 (uintptr_t *)DPCPU_ID_PTR(i, exec_args_kva));
1408 exec_release_args_kva(argkva, gen);
1411 EVENTHANDLER_DEFINE(vm_lowmem, exec_args_kva_lowmem, NULL,
1412 EVENTHANDLER_PRI_ANY);
1415 * Allocate temporary demand-paged, zero-filled memory for the file name,
1416 * argument, and environment strings.
1419 exec_alloc_args(struct image_args *args)
1422 args->buf = (char *)exec_alloc_args_kva(&args->bufkva);
1427 exec_free_args(struct image_args *args)
1430 if (args->buf != NULL) {
1431 exec_free_args_kva(args->bufkva);
1434 if (args->fname_buf != NULL) {
1435 free(args->fname_buf, M_TEMP);
1436 args->fname_buf = NULL;
1441 * A set to functions to fill struct image args.
1443 * NOTE: exec_args_add_fname() must be called (possibly with a NULL
1444 * fname) before the other functions. All exec_args_add_arg() calls must
1445 * be made before any exec_args_add_env() calls. exec_args_adjust_args()
1446 * may be called any time after exec_args_add_fname().
1448 * exec_args_add_fname() - install path to be executed
1449 * exec_args_add_arg() - append an argument string
1450 * exec_args_add_env() - append an env string
1451 * exec_args_adjust_args() - adjust location of the argument list to
1452 * allow new arguments to be prepended
1455 exec_args_add_fname(struct image_args *args, const char *fname,
1456 enum uio_seg segflg)
1461 KASSERT(args->fname == NULL, ("fname already appended"));
1462 KASSERT(args->endp == NULL, ("already appending to args"));
1464 if (fname != NULL) {
1465 args->fname = args->buf;
1466 error = segflg == UIO_SYSSPACE ?
1467 copystr(fname, args->fname, PATH_MAX, &length) :
1468 copyinstr(fname, args->fname, PATH_MAX, &length);
1470 return (error == ENAMETOOLONG ? E2BIG : error);
1474 /* Set up for _arg_*()/_env_*() */
1475 args->endp = args->buf + length;
1476 /* begin_argv must be set and kept updated */
1477 args->begin_argv = args->endp;
1478 KASSERT(exec_map_entry_size - length >= ARG_MAX,
1479 ("too little space remaining for arguments %zu < %zu",
1480 exec_map_entry_size - length, (size_t)ARG_MAX));
1481 args->stringspace = ARG_MAX;
1487 exec_args_add_str(struct image_args *args, const char *str,
1488 enum uio_seg segflg, int *countp)
1493 KASSERT(args->endp != NULL, ("endp not initialized"));
1494 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1496 error = (segflg == UIO_SYSSPACE) ?
1497 copystr(str, args->endp, args->stringspace, &length) :
1498 copyinstr(str, args->endp, args->stringspace, &length);
1500 return (error == ENAMETOOLONG ? E2BIG : error);
1501 args->stringspace -= length;
1502 args->endp += length;
1509 exec_args_add_arg(struct image_args *args, const char *argp,
1510 enum uio_seg segflg)
1513 KASSERT(args->envc == 0, ("appending args after env"));
1515 return (exec_args_add_str(args, argp, segflg, &args->argc));
1519 exec_args_add_env(struct image_args *args, const char *envp,
1520 enum uio_seg segflg)
1523 if (args->envc == 0)
1524 args->begin_envv = args->endp;
1526 return (exec_args_add_str(args, envp, segflg, &args->envc));
1530 exec_args_adjust_args(struct image_args *args, size_t consume, ssize_t extend)
1534 KASSERT(args->endp != NULL, ("endp not initialized"));
1535 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1537 offset = extend - consume;
1538 if (args->stringspace < offset)
1540 memmove(args->begin_argv + extend, args->begin_argv + consume,
1541 args->endp - args->begin_argv + consume);
1543 args->begin_envv += offset;
1544 args->endp += offset;
1545 args->stringspace -= offset;
1550 exec_args_get_begin_envv(struct image_args *args)
1553 KASSERT(args->endp != NULL, ("endp not initialized"));
1556 return (args->begin_envv);
1557 return (args->endp);
1561 exec_stackgap(struct image_params *imgp, uintptr_t *dp)
1563 struct proc *p = imgp->proc;
1565 if (imgp->sysent->sv_stackgap == NULL ||
1566 (p->p_fctl0 & (NT_FREEBSD_FCTL_ASLR_DISABLE |
1567 NT_FREEBSD_FCTL_ASG_DISABLE)) != 0 ||
1568 (imgp->map_flags & MAP_ASLR) == 0) {
1569 p->p_vmspace->vm_stkgap = 0;
1572 p->p_vmspace->vm_stkgap = imgp->sysent->sv_stackgap(imgp, dp);
1576 * Copy strings out to the new process address space, constructing new arg
1577 * and env vector tables. Return a pointer to the base so that it can be used
1578 * as the initial stack pointer.
1581 exec_copyout_strings(struct image_params *imgp, uintptr_t *stack_base)
1586 uintptr_t destp, ustringp;
1587 struct ps_strings *arginfo;
1589 struct sysentvec *sysent;
1590 size_t execpath_len;
1591 int error, szsigcode;
1592 char canary[sizeof(long) * 8];
1595 sysent = p->p_sysent;
1597 destp = PROC_PS_STRINGS(p);
1598 arginfo = imgp->ps_strings = (void *)destp;
1603 if (sysent->sv_sigcode_base == 0 && sysent->sv_szsigcode != NULL) {
1604 szsigcode = *(sysent->sv_szsigcode);
1606 destp = rounddown2(destp, sizeof(void *));
1607 error = copyout(sysent->sv_sigcode, (void *)destp, szsigcode);
1613 * Copy the image path for the rtld.
1615 if (imgp->execpath != NULL && imgp->auxargs != NULL) {
1616 execpath_len = strlen(imgp->execpath) + 1;
1617 destp -= execpath_len;
1618 destp = rounddown2(destp, sizeof(void *));
1619 imgp->execpathp = (void *)destp;
1620 error = copyout(imgp->execpath, imgp->execpathp, execpath_len);
1626 * Prepare the canary for SSP.
1628 arc4rand(canary, sizeof(canary), 0);
1629 destp -= sizeof(canary);
1630 imgp->canary = (void *)destp;
1631 error = copyout(canary, imgp->canary, sizeof(canary));
1634 imgp->canarylen = sizeof(canary);
1637 * Prepare the pagesizes array.
1639 imgp->pagesizeslen = sizeof(pagesizes[0]) * MAXPAGESIZES;
1640 destp -= imgp->pagesizeslen;
1641 destp = rounddown2(destp, sizeof(void *));
1642 imgp->pagesizes = (void *)destp;
1643 error = copyout(pagesizes, imgp->pagesizes, imgp->pagesizeslen);
1648 * Allocate room for the argument and environment strings.
1650 destp -= ARG_MAX - imgp->args->stringspace;
1651 destp = rounddown2(destp, sizeof(void *));
1654 exec_stackgap(imgp, &destp);
1656 if (imgp->auxargs) {
1658 * Allocate room on the stack for the ELF auxargs
1659 * array. It has up to AT_COUNT entries.
1661 destp -= AT_COUNT * sizeof(Elf_Auxinfo);
1662 destp = rounddown2(destp, sizeof(void *));
1665 vectp = (char **)destp;
1668 * Allocate room for the argv[] and env vectors including the
1669 * terminating NULL pointers.
1671 vectp -= imgp->args->argc + 1 + imgp->args->envc + 1;
1674 * vectp also becomes our initial stack base
1676 *stack_base = (uintptr_t)vectp;
1678 stringp = imgp->args->begin_argv;
1679 argc = imgp->args->argc;
1680 envc = imgp->args->envc;
1683 * Copy out strings - arguments and environment.
1685 error = copyout(stringp, (void *)ustringp,
1686 ARG_MAX - imgp->args->stringspace);
1691 * Fill in "ps_strings" struct for ps, w, etc.
1694 if (suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp) != 0 ||
1695 suword32(&arginfo->ps_nargvstr, argc) != 0)
1699 * Fill in argument portion of vector table.
1701 for (; argc > 0; --argc) {
1702 if (suword(vectp++, ustringp) != 0)
1704 while (*stringp++ != 0)
1709 /* a null vector table pointer separates the argp's from the envp's */
1710 if (suword(vectp++, 0) != 0)
1714 if (suword(&arginfo->ps_envstr, (long)(intptr_t)vectp) != 0 ||
1715 suword32(&arginfo->ps_nenvstr, envc) != 0)
1719 * Fill in environment portion of vector table.
1721 for (; envc > 0; --envc) {
1722 if (suword(vectp++, ustringp) != 0)
1724 while (*stringp++ != 0)
1729 /* end of vector table is a null pointer */
1730 if (suword(vectp, 0) != 0)
1733 if (imgp->auxargs) {
1735 error = imgp->sysent->sv_copyout_auxargs(imgp,
1745 * Check permissions of file to execute.
1746 * Called with imgp->vp locked.
1747 * Return 0 for success or error code on failure.
1750 exec_check_permissions(struct image_params *imgp)
1752 struct vnode *vp = imgp->vp;
1753 struct vattr *attr = imgp->attr;
1759 /* Get file attributes */
1760 error = VOP_GETATTR(vp, attr, td->td_ucred);
1765 error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
1771 * 1) Check if file execution is disabled for the filesystem that
1772 * this file resides on.
1773 * 2) Ensure that at least one execute bit is on. Otherwise, a
1774 * privileged user will always succeed, and we don't want this
1775 * to happen unless the file really is executable.
1776 * 3) Ensure that the file is a regular file.
1778 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1779 (attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
1780 (attr->va_type != VREG))
1784 * Zero length files can't be exec'd
1786 if (attr->va_size == 0)
1790 * Check for execute permission to file based on current credentials.
1792 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1797 * Check number of open-for-writes on the file and deny execution
1800 * Add a text reference now so no one can write to the
1801 * executable while we're activating it.
1803 * Remember if this was set before and unset it in case this is not
1804 * actually an executable image.
1806 error = VOP_SET_TEXT(vp);
1809 imgp->textset = true;
1812 * Call filesystem specific open routine (which does nothing in the
1815 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
1817 imgp->opened = true;
1822 * Exec handler registration
1825 exec_register(const struct execsw *execsw_arg)
1827 const struct execsw **es, **xs, **newexecsw;
1828 u_int count = 2; /* New slot and trailing NULL */
1831 for (es = execsw; *es; es++)
1833 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1836 for (es = execsw; *es; es++)
1841 free(execsw, M_TEMP);
1847 exec_unregister(const struct execsw *execsw_arg)
1849 const struct execsw **es, **xs, **newexecsw;
1853 panic("unregister with no handlers left?\n");
1855 for (es = execsw; *es; es++) {
1856 if (*es == execsw_arg)
1861 for (es = execsw; *es; es++)
1862 if (*es != execsw_arg)
1864 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1866 for (es = execsw; *es; es++)
1867 if (*es != execsw_arg)
1871 free(execsw, M_TEMP);
1877 * Write out a core segment to the compression stream.
1880 compress_chunk(struct coredump_params *cp, char *base, char *buf, size_t len)
1886 chunk_len = MIN(len, CORE_BUF_SIZE);
1889 * We can get EFAULT error here.
1890 * In that case zero out the current chunk of the segment.
1892 error = copyin(base, buf, chunk_len);
1894 bzero(buf, chunk_len);
1895 error = compressor_write(cp->comp, buf, chunk_len);
1905 core_write(struct coredump_params *cp, const void *base, size_t len,
1906 off_t offset, enum uio_seg seg, size_t *resid)
1909 return (vn_rdwr_inchunks(UIO_WRITE, cp->vp, __DECONST(void *, base),
1910 len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED,
1911 cp->active_cred, cp->file_cred, resid, cp->td));
1915 core_output(char *base, size_t len, off_t offset, struct coredump_params *cp,
1920 size_t resid, runlen;
1924 KASSERT((uintptr_t)base % PAGE_SIZE == 0,
1925 ("%s: user address %p is not page-aligned", __func__, base));
1927 if (cp->comp != NULL)
1928 return (compress_chunk(cp, base, tmpbuf, len));
1930 map = &cp->td->td_proc->p_vmspace->vm_map;
1931 for (; len > 0; base += runlen, offset += runlen, len -= runlen) {
1933 * Attempt to page in all virtual pages in the range. If a
1934 * virtual page is not backed by the pager, it is represented as
1935 * a hole in the file. This can occur with zero-filled
1936 * anonymous memory or truncated files, for example.
1938 for (runlen = 0; runlen < len; runlen += PAGE_SIZE) {
1939 if (core_dump_can_intr && curproc_sigkilled())
1941 error = vm_fault(map, (uintptr_t)base + runlen,
1942 VM_PROT_READ, VM_FAULT_NOFILL, NULL);
1944 success = error == KERN_SUCCESS;
1945 else if ((error == KERN_SUCCESS) != success)
1950 error = core_write(cp, base, runlen, offset,
1951 UIO_USERSPACE, &resid);
1953 if (error != EFAULT)
1957 * EFAULT may be returned if the user mapping
1958 * could not be accessed, e.g., because a mapped
1959 * file has been truncated. Skip the page if no
1960 * progress was made, to protect against a
1961 * hypothetical scenario where vm_fault() was
1962 * successful but core_write() returns EFAULT
1973 error = vn_start_write(cp->vp, &mp, V_WAIT);
1976 vn_lock(cp->vp, LK_EXCLUSIVE | LK_RETRY);
1977 error = vn_truncate_locked(cp->vp, offset + runlen,
1978 false, cp->td->td_ucred);
1980 vn_finished_write(mp);
1989 * Drain into a core file.
1992 sbuf_drain_core_output(void *arg, const char *data, int len)
1994 struct coredump_params *cp;
1999 p = cp->td->td_proc;
2002 * Some kern_proc out routines that print to this sbuf may
2003 * call us with the process lock held. Draining with the
2004 * non-sleepable lock held is unsafe. The lock is needed for
2005 * those routines when dumping a live process. In our case we
2006 * can safely release the lock before draining and acquire
2009 locked = PROC_LOCKED(p);
2012 if (cp->comp != NULL)
2013 error = compressor_write(cp->comp, __DECONST(char *, data),
2016 error = core_write(cp, __DECONST(void *, data), len, cp->offset,
2017 UIO_SYSSPACE, NULL);