2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 1993, David Greenman
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 #include "opt_capsicum.h"
31 #include "opt_hwpmc_hooks.h"
32 #include "opt_ktrace.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
39 #include <sys/capsicum.h>
40 #include <sys/compressor.h>
41 #include <sys/eventhandler.h>
43 #include <sys/fcntl.h>
44 #include <sys/filedesc.h>
45 #include <sys/imgact.h>
46 #include <sys/imgact_elf.h>
47 #include <sys/kernel.h>
49 #include <sys/malloc.h>
51 #include <sys/mount.h>
52 #include <sys/mutex.h>
53 #include <sys/namei.h>
56 #include <sys/ptrace.h>
58 #include <sys/resourcevar.h>
59 #include <sys/rwlock.h>
60 #include <sys/sched.h>
62 #include <sys/sf_buf.h>
64 #include <sys/signalvar.h>
67 #include <sys/syscallsubr.h>
68 #include <sys/sysctl.h>
69 #include <sys/sysent.h>
70 #include <sys/sysproto.h>
71 #include <sys/timers.h>
72 #include <sys/umtxvar.h>
73 #include <sys/vnode.h>
76 #include <sys/ktrace.h>
80 #include <vm/vm_param.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_map.h>
84 #include <vm/vm_kern.h>
85 #include <vm/vm_extern.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_pager.h>
90 #include <sys/pmckern.h>
93 #include <security/audit/audit.h>
94 #include <security/mac/mac_framework.h>
97 #include <sys/dtrace_bsd.h>
98 dtrace_execexit_func_t dtrace_fasttrap_exec;
101 SDT_PROVIDER_DECLARE(proc);
102 SDT_PROBE_DEFINE1(proc, , , exec, "char *");
103 SDT_PROBE_DEFINE1(proc, , , exec__failure, "int");
104 SDT_PROBE_DEFINE1(proc, , , exec__success, "char *");
106 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
108 int coredump_pack_fileinfo = 1;
109 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_fileinfo, CTLFLAG_RWTUN,
110 &coredump_pack_fileinfo, 0,
111 "Enable file path packing in 'procstat -f' coredump notes");
113 int coredump_pack_vmmapinfo = 1;
114 SYSCTL_INT(_kern, OID_AUTO, coredump_pack_vmmapinfo, CTLFLAG_RWTUN,
115 &coredump_pack_vmmapinfo, 0,
116 "Enable file path packing in 'procstat -v' coredump notes");
118 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
119 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
120 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
121 static int do_execve(struct thread *td, struct image_args *args,
122 struct mac *mac_p, struct vmspace *oldvmspace);
124 /* XXX This should be vm_size_t. */
125 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD|
126 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_ps_strings, "LU",
127 "Location of process' ps_strings structure");
129 /* XXX This should be vm_size_t. */
130 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD|
131 CTLFLAG_CAPRD|CTLFLAG_MPSAFE, NULL, 0, sysctl_kern_usrstack, "LU",
132 "Top of process stack");
134 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD|CTLFLAG_MPSAFE,
135 NULL, 0, sysctl_kern_stackprot, "I",
136 "Stack memory permissions");
138 u_long ps_arg_cache_limit = PAGE_SIZE / 16;
139 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
140 &ps_arg_cache_limit, 0,
141 "Process' command line characters cache limit");
143 static int disallow_high_osrel;
144 SYSCTL_INT(_kern, OID_AUTO, disallow_high_osrel, CTLFLAG_RW,
145 &disallow_high_osrel, 0,
146 "Disallow execution of binaries built for higher version of the world");
148 static int map_at_zero = 0;
149 SYSCTL_INT(_security_bsd, OID_AUTO, map_at_zero, CTLFLAG_RWTUN, &map_at_zero, 0,
150 "Permit processes to map an object at virtual address 0.");
152 static int core_dump_can_intr = 1;
153 SYSCTL_INT(_kern, OID_AUTO, core_dump_can_intr, CTLFLAG_RWTUN,
154 &core_dump_can_intr, 0,
155 "Core dumping interruptible with SIGKILL");
158 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
161 vm_offset_t ps_strings;
165 if (req->flags & SCTL_MASK32) {
167 val = (unsigned int)PROC_PS_STRINGS(p);
168 return (SYSCTL_OUT(req, &val, sizeof(val)));
171 ps_strings = PROC_PS_STRINGS(p);
172 return (SYSCTL_OUT(req, &ps_strings, sizeof(ps_strings)));
176 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
183 if (req->flags & SCTL_MASK32) {
186 val32 = round_page((unsigned int)p->p_vmspace->vm_stacktop);
187 return (SYSCTL_OUT(req, &val32, sizeof(val32)));
190 val = round_page(p->p_vmspace->vm_stacktop);
191 return (SYSCTL_OUT(req, &val, sizeof(val)));
195 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
200 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
201 sizeof(p->p_sysent->sv_stackprot)));
205 * Each of the items is a pointer to a `const struct execsw', hence the
206 * double pointer here.
208 static const struct execsw **execsw;
210 #ifndef _SYS_SYSPROTO_H_
219 sys_execve(struct thread *td, struct execve_args *uap)
221 struct image_args args;
222 struct vmspace *oldvmspace;
225 error = pre_execve(td, &oldvmspace);
228 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
229 uap->argv, uap->envv);
231 error = kern_execve(td, &args, NULL, oldvmspace);
232 post_execve(td, error, oldvmspace);
233 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
237 #ifndef _SYS_SYSPROTO_H_
238 struct fexecve_args {
245 sys_fexecve(struct thread *td, struct fexecve_args *uap)
247 struct image_args args;
248 struct vmspace *oldvmspace;
251 error = pre_execve(td, &oldvmspace);
254 error = exec_copyin_args(&args, NULL, UIO_SYSSPACE,
255 uap->argv, uap->envv);
258 error = kern_execve(td, &args, NULL, oldvmspace);
260 post_execve(td, error, oldvmspace);
261 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
265 #ifndef _SYS_SYSPROTO_H_
266 struct __mac_execve_args {
275 sys___mac_execve(struct thread *td, struct __mac_execve_args *uap)
278 struct image_args args;
279 struct vmspace *oldvmspace;
282 error = pre_execve(td, &oldvmspace);
285 error = exec_copyin_args(&args, uap->fname, UIO_USERSPACE,
286 uap->argv, uap->envv);
288 error = kern_execve(td, &args, uap->mac_p, oldvmspace);
289 post_execve(td, error, oldvmspace);
290 AUDIT_SYSCALL_EXIT(error == EJUSTRETURN ? 0 : error, td);
298 pre_execve(struct thread *td, struct vmspace **oldvmspace)
303 KASSERT(td == curthread, ("non-current thread %p", td));
306 if ((p->p_flag & P_HADTHREADS) != 0) {
308 if (thread_single(p, SINGLE_BOUNDARY) != 0)
312 KASSERT(error != 0 || (td->td_pflags & TDP_EXECVMSPC) == 0,
314 *oldvmspace = p->p_vmspace;
319 post_execve(struct thread *td, int error, struct vmspace *oldvmspace)
323 KASSERT(td == curthread, ("non-current thread %p", td));
325 if ((p->p_flag & P_HADTHREADS) != 0) {
328 * If success, we upgrade to SINGLE_EXIT state to
329 * force other threads to suicide.
331 if (error == EJUSTRETURN)
332 thread_single(p, SINGLE_EXIT);
334 thread_single_end(p, SINGLE_BOUNDARY);
337 exec_cleanup(td, oldvmspace);
341 * kern_execve() has the astonishing property of not always returning to
342 * the caller. If sufficiently bad things happen during the call to
343 * do_execve(), it can end up calling exit1(); as a result, callers must
344 * avoid doing anything which they might need to undo (e.g., allocating
348 kern_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
349 struct vmspace *oldvmspace)
352 TSEXEC(td->td_proc->p_pid, args->begin_argv);
353 AUDIT_ARG_ARGV(args->begin_argv, args->argc,
354 exec_args_get_begin_envv(args) - args->begin_argv);
355 AUDIT_ARG_ENVV(exec_args_get_begin_envv(args), args->envc,
356 args->endp - exec_args_get_begin_envv(args));
358 /* Must have at least one argument. */
359 if (args->argc == 0) {
360 exec_free_args(args);
363 return (do_execve(td, args, mac_p, oldvmspace));
367 execve_nosetid(struct image_params *imgp)
369 imgp->credential_setid = false;
370 if (imgp->newcred != NULL) {
371 crfree(imgp->newcred);
372 imgp->newcred = NULL;
377 * In-kernel implementation of execve(). All arguments are assumed to be
378 * userspace pointers from the passed thread.
381 do_execve(struct thread *td, struct image_args *args, struct mac *mac_p,
382 struct vmspace *oldvmspace)
384 struct proc *p = td->td_proc;
386 struct ucred *oldcred;
387 struct uidinfo *euip = NULL;
388 uintptr_t stack_base;
389 struct image_params image_params, *imgp;
391 struct pargs *oldargs = NULL, *newargs = NULL;
392 struct sigacts *oldsigacts = NULL, *newsigacts = NULL;
394 struct ktr_io_params *kiop;
396 struct vnode *oldtextvp, *newtextvp;
397 struct vnode *oldtextdvp, *newtextdvp;
398 char *oldbinname, *newbinname;
399 bool credential_changing;
401 struct label *interpvplabel = NULL;
402 bool will_transition;
405 struct pmckern_procexec pe;
407 int error, i, orig_osrel;
409 Elf_Brandinfo *orig_brandinfo;
410 size_t freepath_size;
411 static const char fexecv_proc_title[] = "(fexecv)";
413 imgp = &image_params;
414 oldtextvp = oldtextdvp = NULL;
415 newtextvp = newtextdvp = NULL;
416 newbinname = oldbinname = NULL;
422 * Lock the process and set the P_INEXEC flag to indicate that
423 * it should be left alone until we're done here. This is
424 * necessary to avoid race conditions - e.g. in ptrace() -
425 * that might allow a local user to illicitly obtain elevated
429 KASSERT((p->p_flag & P_INEXEC) == 0,
430 ("%s(): process already has P_INEXEC flag", __func__));
431 p->p_flag |= P_INEXEC;
435 * Initialize part of the common data
437 bzero(imgp, sizeof(*imgp));
441 oldcred = p->p_ucred;
442 orig_osrel = p->p_osrel;
443 orig_fctl0 = p->p_fctl0;
444 orig_brandinfo = p->p_elf_brandinfo;
447 error = mac_execve_enter(imgp, mac_p);
452 SDT_PROBE1(proc, , , exec, args->fname);
455 if (args->fname != NULL) {
456 #ifdef CAPABILITY_MODE
458 * While capability mode can't reach this point via direct
459 * path arguments to execve(), we also don't allow
460 * interpreters to be used in capability mode (for now).
461 * Catch indirect lookups and return a permissions error.
463 if (IN_CAPABILITY_MODE(td)) {
470 * Translate the file name. namei() returns a vnode
471 * pointer in ni_vp among other things.
473 NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | LOCKSHARED | FOLLOW |
474 AUDITVNODE1 | WANTPARENT, UIO_SYSSPACE,
481 newtextvp = nd.ni_vp;
482 newtextdvp = nd.ni_dvp;
484 newbinname = malloc(nd.ni_cnd.cn_namelen + 1, M_PARGS,
486 memcpy(newbinname, nd.ni_cnd.cn_nameptr, nd.ni_cnd.cn_namelen);
487 newbinname[nd.ni_cnd.cn_namelen] = '\0';
488 imgp->vp = newtextvp;
491 * Do the best to calculate the full path to the image file.
493 if (args->fname[0] == '/') {
494 imgp->execpath = args->fname;
496 VOP_UNLOCK(imgp->vp);
497 freepath_size = MAXPATHLEN;
498 if (vn_fullpath_hardlink(newtextvp, newtextdvp,
499 newbinname, nd.ni_cnd.cn_namelen, &imgp->execpath,
500 &imgp->freepath, &freepath_size) != 0)
501 imgp->execpath = args->fname;
502 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
504 } else if (imgp->interpreter_vp) {
506 * An image activator has already provided an open vnode
508 newtextvp = imgp->interpreter_vp;
509 imgp->interpreter_vp = NULL;
510 if (vn_fullpath(newtextvp, &imgp->execpath,
511 &imgp->freepath) != 0)
512 imgp->execpath = args->fname;
513 vn_lock(newtextvp, LK_SHARED | LK_RETRY);
514 AUDIT_ARG_VNODE1(newtextvp);
515 imgp->vp = newtextvp;
517 AUDIT_ARG_FD(args->fd);
520 * If the descriptors was not opened with O_PATH, then
521 * we require that it was opened with O_EXEC or
522 * O_RDONLY. In either case, exec_check_permissions()
523 * below checks _current_ file access mode regardless
524 * of the permissions additionally checked at the
527 error = fgetvp_exec(td, args->fd, &cap_fexecve_rights,
532 if (vn_fullpath(newtextvp, &imgp->execpath,
533 &imgp->freepath) != 0)
534 imgp->execpath = args->fname;
535 vn_lock(newtextvp, LK_SHARED | LK_RETRY);
536 AUDIT_ARG_VNODE1(newtextvp);
537 imgp->vp = newtextvp;
541 * Check file permissions. Also 'opens' file and sets its vnode to
544 error = exec_check_permissions(imgp);
546 goto exec_fail_dealloc;
548 imgp->object = imgp->vp->v_object;
549 if (imgp->object != NULL)
550 vm_object_reference(imgp->object);
552 error = exec_map_first_page(imgp);
554 goto exec_fail_dealloc;
556 imgp->proc->p_osrel = 0;
557 imgp->proc->p_fctl0 = 0;
558 imgp->proc->p_elf_brandinfo = NULL;
561 * Implement image setuid/setgid.
563 * Determine new credentials before attempting image activators
564 * so that it can be used by process_exec handlers to determine
565 * credential/setid changes.
567 * Don't honor setuid/setgid if the filesystem prohibits it or if
568 * the process is being traced.
570 * We disable setuid/setgid/etc in capability mode on the basis
571 * that most setugid applications are not written with that
572 * environment in mind, and will therefore almost certainly operate
573 * incorrectly. In principle there's no reason that setugid
574 * applications might not be useful in capability mode, so we may want
575 * to reconsider this conservative design choice in the future.
577 * XXXMAC: For the time being, use NOSUID to also prohibit
578 * transitions on the file system.
580 credential_changing = false;
581 credential_changing |= (attr.va_mode & S_ISUID) &&
582 oldcred->cr_uid != attr.va_uid;
583 credential_changing |= (attr.va_mode & S_ISGID) &&
584 oldcred->cr_gid != attr.va_gid;
586 will_transition = mac_vnode_execve_will_transition(oldcred, imgp->vp,
587 interpvplabel, imgp) != 0;
588 credential_changing |= will_transition;
591 /* Don't inherit PROC_PDEATHSIG_CTL value if setuid/setgid. */
592 if (credential_changing)
593 imgp->proc->p_pdeathsig = 0;
595 if (credential_changing &&
596 #ifdef CAPABILITY_MODE
597 ((oldcred->cr_flags & CRED_FLAG_CAPMODE) == 0) &&
599 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
600 (p->p_flag & P_TRACED) == 0) {
601 imgp->credential_setid = true;
602 VOP_UNLOCK(imgp->vp);
603 imgp->newcred = crdup(oldcred);
604 if (attr.va_mode & S_ISUID) {
605 euip = uifind(attr.va_uid);
606 change_euid(imgp->newcred, euip);
608 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
609 if (attr.va_mode & S_ISGID)
610 change_egid(imgp->newcred, attr.va_gid);
612 * Implement correct POSIX saved-id behavior.
614 * XXXMAC: Note that the current logic will save the
615 * uid and gid if a MAC domain transition occurs, even
616 * though maybe it shouldn't.
618 change_svuid(imgp->newcred, imgp->newcred->cr_uid);
619 change_svgid(imgp->newcred, imgp->newcred->cr_gid);
622 * Implement correct POSIX saved-id behavior.
624 * XXX: It's not clear that the existing behavior is
625 * POSIX-compliant. A number of sources indicate that the
626 * saved uid/gid should only be updated if the new ruid is
627 * not equal to the old ruid, or the new euid is not equal
628 * to the old euid and the new euid is not equal to the old
629 * ruid. The FreeBSD code always updates the saved uid/gid.
630 * Also, this code uses the new (replaced) euid and egid as
631 * the source, which may or may not be the right ones to use.
633 if (oldcred->cr_svuid != oldcred->cr_uid ||
634 oldcred->cr_svgid != oldcred->cr_gid) {
635 VOP_UNLOCK(imgp->vp);
636 imgp->newcred = crdup(oldcred);
637 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
638 change_svuid(imgp->newcred, imgp->newcred->cr_uid);
639 change_svgid(imgp->newcred, imgp->newcred->cr_gid);
642 /* The new credentials are installed into the process later. */
645 * Loop through the list of image activators, calling each one.
646 * An activator returns -1 if there is no match, 0 on success,
647 * and an error otherwise.
650 for (i = 0; error == -1 && execsw[i]; ++i) {
651 if (execsw[i]->ex_imgact == NULL)
653 error = (*execsw[i]->ex_imgact)(imgp);
659 goto exec_fail_dealloc;
663 * Special interpreter operation, cleanup and loop up to try to
664 * activate the interpreter.
666 if (imgp->interpreted) {
667 exec_unmap_first_page(imgp);
669 * The text reference needs to be removed for scripts.
670 * There is a short period before we determine that
671 * something is a script where text reference is active.
672 * The vnode lock is held over this entire period
673 * so nothing should illegitimately be blocked.
675 MPASS(imgp->textset);
676 VOP_UNSET_TEXT_CHECKED(newtextvp);
677 imgp->textset = false;
678 /* free name buffer and old vnode */
680 mac_execve_interpreter_enter(newtextvp, &interpvplabel);
683 VOP_CLOSE(newtextvp, FREAD, td->td_ucred, td);
684 imgp->opened = false;
687 imgp->vp = newtextvp = NULL;
688 if (args->fname != NULL) {
689 if (newtextdvp != NULL) {
694 free(newbinname, M_PARGS);
697 vm_object_deallocate(imgp->object);
699 execve_nosetid(imgp);
700 imgp->execpath = NULL;
701 free(imgp->freepath, M_TEMP);
702 imgp->freepath = NULL;
703 /* set new name to that of the interpreter */
704 if (imgp->interpreter_vp) {
707 args->fname = imgp->interpreter_name;
713 * NB: We unlock the vnode here because it is believed that none
714 * of the sv_copyout_strings/sv_fixup operations require the vnode.
716 VOP_UNLOCK(imgp->vp);
718 if (disallow_high_osrel &&
719 P_OSREL_MAJOR(p->p_osrel) > P_OSREL_MAJOR(__FreeBSD_version)) {
721 uprintf("Osrel %d for image %s too high\n", p->p_osrel,
722 imgp->execpath != NULL ? imgp->execpath : "<unresolved>");
723 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
724 goto exec_fail_dealloc;
728 * Copy out strings (args and env) and initialize stack base.
730 error = (*p->p_sysent->sv_copyout_strings)(imgp, &stack_base);
732 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
733 goto exec_fail_dealloc;
739 error = (*p->p_sysent->sv_fixup)(&stack_base, imgp);
741 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
742 goto exec_fail_dealloc;
746 * For security and other reasons, the file descriptor table cannot be
747 * shared after an exec.
751 /* close files on exec */
755 * Malloc things before we need locks.
757 i = exec_args_get_begin_envv(imgp->args) - imgp->args->begin_argv;
758 /* Cache arguments if they fit inside our allowance */
759 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
760 newargs = pargs_alloc(i);
761 bcopy(imgp->args->begin_argv, newargs->ar_args, i);
765 * For security and other reasons, signal handlers cannot
766 * be shared after an exec. The new process gets a copy of the old
767 * handlers. In execsigs(), the new process will have its signals
770 if (sigacts_shared(p->p_sigacts)) {
771 oldsigacts = p->p_sigacts;
772 newsigacts = sigacts_alloc();
773 sigacts_copy(newsigacts, oldsigacts);
776 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
780 p->p_sigacts = newsigacts;
784 /* reset caught signals */
787 /* name this process - nameiexec(p, ndp) */
788 bzero(p->p_comm, sizeof(p->p_comm));
790 bcopy(nd.ni_cnd.cn_nameptr, p->p_comm,
791 min(nd.ni_cnd.cn_namelen, MAXCOMLEN));
792 else if (vn_commname(newtextvp, p->p_comm, sizeof(p->p_comm)) != 0)
793 bcopy(fexecv_proc_title, p->p_comm, sizeof(fexecv_proc_title));
794 bcopy(p->p_comm, td->td_name, sizeof(td->td_name));
796 sched_clear_tdname(td);
800 * mark as execed, wakeup the process that vforked (if any) and tell
801 * it that it now has its own resources back
804 if ((p->p_flag2 & P2_NOTRACE_EXEC) == 0)
805 p->p_flag2 &= ~P2_NOTRACE;
806 if ((p->p_flag2 & P2_STKGAP_DISABLE_EXEC) == 0)
807 p->p_flag2 &= ~P2_STKGAP_DISABLE;
808 p->p_flag2 &= ~(P2_MEMBAR_PRIVE | P2_MEMBAR_PRIVE_SYNCORE |
810 if (p->p_flag & P_PPWAIT) {
811 p->p_flag &= ~(P_PPWAIT | P_PPTRACE);
812 cv_broadcast(&p->p_pwait);
813 /* STOPs are no longer ignored, arrange for AST */
817 if ((imgp->sysent->sv_setid_allowed != NULL &&
818 !(*imgp->sysent->sv_setid_allowed)(td, imgp)) ||
819 (p->p_flag2 & P2_NO_NEW_PRIVS) != 0)
820 execve_nosetid(imgp);
823 * Implement image setuid/setgid installation.
825 if (imgp->credential_setid) {
827 * Turn off syscall tracing for set-id programs, except for
828 * root. Record any set-id flags first to make sure that
829 * we do not regain any tracing during a possible block.
833 kiop = ktrprocexec(p);
836 * Close any file descriptors 0..2 that reference procfs,
837 * then make sure file descriptors 0..2 are in use.
839 * Both fdsetugidsafety() and fdcheckstd() may call functions
840 * taking sleepable locks, so temporarily drop our locks.
843 VOP_UNLOCK(imgp->vp);
845 error = fdcheckstd(td);
846 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
848 goto exec_fail_dealloc;
851 if (will_transition) {
852 mac_vnode_execve_transition(oldcred, imgp->newcred,
853 imgp->vp, interpvplabel, imgp);
857 if (oldcred->cr_uid == oldcred->cr_ruid &&
858 oldcred->cr_gid == oldcred->cr_rgid)
859 p->p_flag &= ~P_SUGID;
862 * Set the new credentials.
864 if (imgp->newcred != NULL) {
865 proc_set_cred(p, imgp->newcred);
871 * Store the vp for use in kern.proc.pathname. This vnode was
872 * referenced by namei() or by fexecve variant of fname handling.
874 oldtextvp = p->p_textvp;
875 p->p_textvp = newtextvp;
876 oldtextdvp = p->p_textdvp;
877 p->p_textdvp = newtextdvp;
879 oldbinname = p->p_binname;
880 p->p_binname = newbinname;
885 * Tell the DTrace fasttrap provider about the exec if it
886 * has declared an interest.
888 if (dtrace_fasttrap_exec)
889 dtrace_fasttrap_exec(p);
893 * Notify others that we exec'd, and clear the P_INEXEC flag
894 * as we're now a bona fide freshly-execed process.
896 KNOTE_LOCKED(p->p_klist, NOTE_EXEC);
897 p->p_flag &= ~P_INEXEC;
899 /* clear "fork but no exec" flag, as we _are_ execing */
900 p->p_acflag &= ~AFORK;
903 * Free any previous argument cache and replace it with
904 * the new argument cache, if any.
914 * Check if system-wide sampling is in effect or if the
915 * current process is using PMCs. If so, do exec() time
916 * processing. This processing needs to happen AFTER the
917 * P_INEXEC flag is cleared.
919 if (PMC_SYSTEM_SAMPLING_ACTIVE() || PMC_PROC_IS_USING_PMCS(p)) {
920 VOP_UNLOCK(imgp->vp);
921 pe.pm_credentialschanged = credential_changing;
922 pe.pm_baseaddr = imgp->reloc_base;
923 pe.pm_dynaddr = imgp->et_dyn_addr;
925 PMC_CALL_HOOK_X(td, PMC_FN_PROCESS_EXEC, (void *) &pe);
926 vn_lock(imgp->vp, LK_SHARED | LK_RETRY);
930 /* Set values passed into the program in registers. */
931 (*p->p_sysent->sv_setregs)(td, imgp, stack_base);
933 VOP_MMAPPED(imgp->vp);
935 SDT_PROBE1(proc, , , exec__success, args->fname);
939 p->p_osrel = orig_osrel;
940 p->p_fctl0 = orig_fctl0;
941 p->p_elf_brandinfo = orig_brandinfo;
944 if (imgp->firstpage != NULL)
945 exec_unmap_first_page(imgp);
947 if (imgp->vp != NULL) {
949 VOP_CLOSE(imgp->vp, FREAD, td->td_ucred, td);
951 VOP_UNSET_TEXT_CHECKED(imgp->vp);
955 VOP_UNLOCK(imgp->vp);
956 if (args->fname != NULL)
958 if (newtextdvp != NULL)
960 free(newbinname, M_PARGS);
963 if (imgp->object != NULL)
964 vm_object_deallocate(imgp->object);
966 free(imgp->freepath, M_TEMP);
969 if (p->p_ptevents & PTRACE_EXEC) {
971 if (p->p_ptevents & PTRACE_EXEC)
972 td->td_dbgflags |= TDB_EXEC;
977 /* we're done here, clear P_INEXEC */
979 p->p_flag &= ~P_INEXEC;
982 SDT_PROBE1(proc, , , exec__failure, error);
985 if (imgp->newcred != NULL && oldcred != NULL)
986 crfree(imgp->newcred);
989 mac_execve_exit(imgp);
990 mac_execve_interpreter_exit(interpvplabel);
992 exec_free_args(args);
995 * Handle deferred decrement of ref counts.
997 if (oldtextvp != NULL)
999 if (oldtextdvp != NULL)
1001 free(oldbinname, M_PARGS);
1003 ktr_io_params_free(kiop);
1005 pargs_drop(oldargs);
1006 pargs_drop(newargs);
1007 if (oldsigacts != NULL)
1008 sigacts_free(oldsigacts);
1012 if (error && imgp->vmspace_destroyed) {
1013 /* sorry, no more process anymore. exit gracefully */
1014 exec_cleanup(td, oldvmspace);
1015 exit1(td, 0, SIGABRT);
1025 * We don't want cpu_set_syscall_retval() to overwrite any of
1026 * the register values put in place by exec_setregs().
1027 * Implementations of cpu_set_syscall_retval() will leave
1028 * registers unmodified when returning EJUSTRETURN.
1030 return (error == 0 ? EJUSTRETURN : error);
1034 exec_cleanup(struct thread *td, struct vmspace *oldvmspace)
1036 if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
1037 KASSERT(td->td_proc->p_vmspace != oldvmspace,
1038 ("oldvmspace still used"));
1039 vmspace_free(oldvmspace);
1040 td->td_pflags &= ~TDP_EXECVMSPC;
1045 exec_map_first_page(struct image_params *imgp)
1051 if (imgp->firstpage != NULL)
1052 exec_unmap_first_page(imgp);
1054 object = imgp->vp->v_object;
1057 #if VM_NRESERVLEVEL > 0
1058 if ((object->flags & OBJ_COLORED) == 0) {
1059 VM_OBJECT_WLOCK(object);
1060 vm_object_color(object, 0);
1061 VM_OBJECT_WUNLOCK(object);
1064 error = vm_page_grab_valid_unlocked(&m, object, 0,
1065 VM_ALLOC_COUNT(VM_INITIAL_PAGEIN) |
1066 VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED);
1068 if (error != VM_PAGER_OK)
1070 imgp->firstpage = sf_buf_alloc(m, 0);
1071 imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
1077 exec_unmap_first_page(struct image_params *imgp)
1081 if (imgp->firstpage != NULL) {
1082 m = sf_buf_page(imgp->firstpage);
1083 sf_buf_free(imgp->firstpage);
1084 imgp->firstpage = NULL;
1085 vm_page_unwire(m, PQ_ACTIVE);
1090 exec_onexec_old(struct thread *td)
1092 sigfastblock_clear(td);
1093 umtx_exec(td->td_proc);
1097 * This is an optimization which removes the unmanaged shared page
1098 * mapping. In combination with pmap_remove_pages(), which cleans all
1099 * managed mappings in the process' vmspace pmap, no work will be left
1100 * for pmap_remove(min, max).
1103 exec_free_abi_mappings(struct proc *p)
1105 struct vmspace *vmspace;
1107 vmspace = p->p_vmspace;
1108 if (refcount_load(&vmspace->vm_refcnt) != 1)
1111 if (!PROC_HAS_SHP(p))
1114 pmap_remove(vmspace_pmap(vmspace), vmspace->vm_shp_base,
1115 vmspace->vm_shp_base + p->p_sysent->sv_shared_page_len);
1119 * Run down the current address space and install a new one.
1122 exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
1125 struct proc *p = imgp->proc;
1126 struct vmspace *vmspace = p->p_vmspace;
1127 struct thread *td = curthread;
1128 vm_offset_t sv_minuser;
1131 imgp->vmspace_destroyed = true;
1134 if (p->p_sysent->sv_onexec_old != NULL)
1135 p->p_sysent->sv_onexec_old(td);
1138 EVENTHANDLER_DIRECT_INVOKE(process_exec, p, imgp);
1141 * Blow away entire process VM, if address space not shared,
1142 * otherwise, create a new VM space so that other threads are
1145 map = &vmspace->vm_map;
1147 sv_minuser = sv->sv_minuser;
1149 sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
1150 if (refcount_load(&vmspace->vm_refcnt) == 1 &&
1151 vm_map_min(map) == sv_minuser &&
1152 vm_map_max(map) == sv->sv_maxuser &&
1153 cpu_exec_vmspace_reuse(p, map)) {
1154 exec_free_abi_mappings(p);
1156 pmap_remove_pages(vmspace_pmap(vmspace));
1157 vm_map_remove(map, vm_map_min(map), vm_map_max(map));
1159 * An exec terminates mlockall(MCL_FUTURE).
1160 * ASLR and W^X states must be re-evaluated.
1163 vm_map_modflags(map, 0, MAP_WIREFUTURE | MAP_ASLR |
1164 MAP_ASLR_IGNSTART | MAP_ASLR_STACK | MAP_WXORX);
1167 error = vmspace_exec(p, sv_minuser, sv->sv_maxuser);
1170 vmspace = p->p_vmspace;
1171 map = &vmspace->vm_map;
1173 map->flags |= imgp->map_flags;
1175 return (sv->sv_onexec != NULL ? sv->sv_onexec(p, imgp) : 0);
1179 * Compute the stack size limit and map the main process stack.
1180 * Map the shared page.
1183 exec_map_stack(struct image_params *imgp)
1185 struct rlimit rlim_stack;
1186 struct sysentvec *sv;
1189 struct vmspace *vmspace;
1190 vm_offset_t stack_addr, stack_top;
1191 vm_offset_t sharedpage_addr;
1193 int error, find_space, stack_off;
1194 vm_prot_t stack_prot;
1200 if (imgp->stack_sz != 0) {
1201 ssiz = trunc_page(imgp->stack_sz);
1203 lim_rlimit_proc(p, RLIMIT_STACK, &rlim_stack);
1205 if (ssiz > rlim_stack.rlim_max)
1206 ssiz = rlim_stack.rlim_max;
1207 if (ssiz > rlim_stack.rlim_cur) {
1208 rlim_stack.rlim_cur = ssiz;
1209 kern_setrlimit(curthread, RLIMIT_STACK, &rlim_stack);
1211 } else if (sv->sv_maxssiz != NULL) {
1212 ssiz = *sv->sv_maxssiz;
1217 vmspace = p->p_vmspace;
1218 map = &vmspace->vm_map;
1220 stack_prot = sv->sv_shared_page_obj != NULL && imgp->stack_prot != 0 ?
1221 imgp->stack_prot : sv->sv_stackprot;
1222 if ((map->flags & MAP_ASLR_STACK) != 0) {
1223 stack_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
1224 lim_max(curthread, RLIMIT_DATA));
1225 find_space = VMFS_ANY_SPACE;
1227 stack_addr = sv->sv_usrstack - ssiz;
1228 find_space = VMFS_NO_SPACE;
1230 error = vm_map_find(map, NULL, 0, &stack_addr, (vm_size_t)ssiz,
1231 sv->sv_usrstack, find_space, stack_prot, VM_PROT_ALL,
1232 MAP_STACK_GROWS_DOWN);
1233 if (error != KERN_SUCCESS) {
1234 uprintf("exec_new_vmspace: mapping stack size %#jx prot %#x "
1235 "failed, mach error %d errno %d\n", (uintmax_t)ssiz,
1236 stack_prot, error, vm_mmap_to_errno(error));
1237 return (vm_mmap_to_errno(error));
1240 stack_top = stack_addr + ssiz;
1241 if ((map->flags & MAP_ASLR_STACK) != 0) {
1242 /* Randomize within the first page of the stack. */
1243 arc4rand(&stack_off, sizeof(stack_off), 0);
1244 stack_top -= rounddown2(stack_off & PAGE_MASK, sizeof(void *));
1247 /* Map a shared page */
1248 obj = sv->sv_shared_page_obj;
1250 sharedpage_addr = 0;
1255 * If randomization is disabled then the shared page will
1256 * be mapped at address specified in sysentvec.
1257 * Otherwise any address above .data section can be selected.
1258 * Same logic is used for stack address randomization.
1259 * If the address randomization is applied map a guard page
1260 * at the top of UVA.
1262 vm_object_reference(obj);
1263 if ((imgp->imgp_flags & IMGP_ASLR_SHARED_PAGE) != 0) {
1264 sharedpage_addr = round_page((vm_offset_t)p->p_vmspace->vm_daddr +
1265 lim_max(curthread, RLIMIT_DATA));
1267 error = vm_map_fixed(map, NULL, 0,
1268 sv->sv_maxuser - PAGE_SIZE, PAGE_SIZE,
1269 VM_PROT_NONE, VM_PROT_NONE, MAP_CREATE_GUARD);
1270 if (error != KERN_SUCCESS) {
1272 * This is not fatal, so let's just print a warning
1275 uprintf("%s: Mapping guard page at the top of UVA failed"
1276 " mach error %d errno %d",
1277 __func__, error, vm_mmap_to_errno(error));
1280 error = vm_map_find(map, obj, 0,
1281 &sharedpage_addr, sv->sv_shared_page_len,
1282 sv->sv_maxuser, VMFS_ANY_SPACE,
1283 VM_PROT_READ | VM_PROT_EXECUTE,
1284 VM_PROT_READ | VM_PROT_EXECUTE,
1285 MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
1287 sharedpage_addr = sv->sv_shared_page_base;
1288 vm_map_fixed(map, obj, 0,
1289 sharedpage_addr, sv->sv_shared_page_len,
1290 VM_PROT_READ | VM_PROT_EXECUTE,
1291 VM_PROT_READ | VM_PROT_EXECUTE,
1292 MAP_INHERIT_SHARE | MAP_ACC_NO_CHARGE);
1294 if (error != KERN_SUCCESS) {
1295 uprintf("%s: mapping shared page at addr: %p"
1296 "failed, mach error %d errno %d\n", __func__,
1297 (void *)sharedpage_addr, error, vm_mmap_to_errno(error));
1298 vm_object_deallocate(obj);
1299 return (vm_mmap_to_errno(error));
1303 * vm_ssize and vm_maxsaddr are somewhat antiquated concepts, but they
1304 * are still used to enforce the stack rlimit on the process stack.
1306 vmspace->vm_maxsaddr = (char *)stack_addr;
1307 vmspace->vm_stacktop = stack_top;
1308 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
1309 vmspace->vm_shp_base = sharedpage_addr;
1315 * Copy out argument and environment strings from the old process address
1316 * space into the temporary string buffer.
1319 exec_copyin_args(struct image_args *args, const char *fname,
1320 enum uio_seg segflg, char **argv, char **envv)
1325 bzero(args, sizeof(*args));
1330 * Allocate demand-paged memory for the file name, argument, and
1331 * environment strings.
1333 error = exec_alloc_args(args);
1338 * Copy the file name.
1340 error = exec_args_add_fname(args, fname, segflg);
1345 * extract arguments first
1348 error = fueword(argv++, &arg);
1355 error = exec_args_add_arg(args, (char *)(uintptr_t)arg,
1362 * extract environment strings
1366 error = fueword(envv++, &env);
1373 error = exec_args_add_env(args,
1374 (char *)(uintptr_t)env, UIO_USERSPACE);
1383 exec_free_args(args);
1387 struct exec_args_kva {
1390 SLIST_ENTRY(exec_args_kva) next;
1393 DPCPU_DEFINE_STATIC(struct exec_args_kva *, exec_args_kva);
1395 static SLIST_HEAD(, exec_args_kva) exec_args_kva_freelist;
1396 static struct mtx exec_args_kva_mtx;
1397 static u_int exec_args_gen;
1400 exec_prealloc_args_kva(void *arg __unused)
1402 struct exec_args_kva *argkva;
1405 SLIST_INIT(&exec_args_kva_freelist);
1406 mtx_init(&exec_args_kva_mtx, "exec args kva", NULL, MTX_DEF);
1407 for (i = 0; i < exec_map_entries; i++) {
1408 argkva = malloc(sizeof(*argkva), M_PARGS, M_WAITOK);
1409 argkva->addr = kmap_alloc_wait(exec_map, exec_map_entry_size);
1410 argkva->gen = exec_args_gen;
1411 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1414 SYSINIT(exec_args_kva, SI_SUB_EXEC, SI_ORDER_ANY, exec_prealloc_args_kva, NULL);
1417 exec_alloc_args_kva(void **cookie)
1419 struct exec_args_kva *argkva;
1421 argkva = (void *)atomic_readandclear_ptr(
1422 (uintptr_t *)DPCPU_PTR(exec_args_kva));
1423 if (argkva == NULL) {
1424 mtx_lock(&exec_args_kva_mtx);
1425 while ((argkva = SLIST_FIRST(&exec_args_kva_freelist)) == NULL)
1426 (void)mtx_sleep(&exec_args_kva_freelist,
1427 &exec_args_kva_mtx, 0, "execkva", 0);
1428 SLIST_REMOVE_HEAD(&exec_args_kva_freelist, next);
1429 mtx_unlock(&exec_args_kva_mtx);
1431 kasan_mark((void *)argkva->addr, exec_map_entry_size,
1432 exec_map_entry_size, 0);
1433 *(struct exec_args_kva **)cookie = argkva;
1434 return (argkva->addr);
1438 exec_release_args_kva(struct exec_args_kva *argkva, u_int gen)
1442 base = argkva->addr;
1443 kasan_mark((void *)argkva->addr, 0, exec_map_entry_size,
1444 KASAN_EXEC_ARGS_FREED);
1445 if (argkva->gen != gen) {
1446 (void)vm_map_madvise(exec_map, base, base + exec_map_entry_size,
1450 if (!atomic_cmpset_ptr((uintptr_t *)DPCPU_PTR(exec_args_kva),
1451 (uintptr_t)NULL, (uintptr_t)argkva)) {
1452 mtx_lock(&exec_args_kva_mtx);
1453 SLIST_INSERT_HEAD(&exec_args_kva_freelist, argkva, next);
1454 wakeup_one(&exec_args_kva_freelist);
1455 mtx_unlock(&exec_args_kva_mtx);
1460 exec_free_args_kva(void *cookie)
1463 exec_release_args_kva(cookie, exec_args_gen);
1467 exec_args_kva_lowmem(void *arg __unused)
1469 SLIST_HEAD(, exec_args_kva) head;
1470 struct exec_args_kva *argkva;
1474 gen = atomic_fetchadd_int(&exec_args_gen, 1) + 1;
1477 * Force an madvise of each KVA range. Any currently allocated ranges
1478 * will have MADV_FREE applied once they are freed.
1481 mtx_lock(&exec_args_kva_mtx);
1482 SLIST_SWAP(&head, &exec_args_kva_freelist, exec_args_kva);
1483 mtx_unlock(&exec_args_kva_mtx);
1484 while ((argkva = SLIST_FIRST(&head)) != NULL) {
1485 SLIST_REMOVE_HEAD(&head, next);
1486 exec_release_args_kva(argkva, gen);
1490 argkva = (void *)atomic_readandclear_ptr(
1491 (uintptr_t *)DPCPU_ID_PTR(i, exec_args_kva));
1493 exec_release_args_kva(argkva, gen);
1496 EVENTHANDLER_DEFINE(vm_lowmem, exec_args_kva_lowmem, NULL,
1497 EVENTHANDLER_PRI_ANY);
1500 * Allocate temporary demand-paged, zero-filled memory for the file name,
1501 * argument, and environment strings.
1504 exec_alloc_args(struct image_args *args)
1507 args->buf = (char *)exec_alloc_args_kva(&args->bufkva);
1512 exec_free_args(struct image_args *args)
1515 if (args->buf != NULL) {
1516 exec_free_args_kva(args->bufkva);
1519 if (args->fname_buf != NULL) {
1520 free(args->fname_buf, M_TEMP);
1521 args->fname_buf = NULL;
1526 * A set to functions to fill struct image args.
1528 * NOTE: exec_args_add_fname() must be called (possibly with a NULL
1529 * fname) before the other functions. All exec_args_add_arg() calls must
1530 * be made before any exec_args_add_env() calls. exec_args_adjust_args()
1531 * may be called any time after exec_args_add_fname().
1533 * exec_args_add_fname() - install path to be executed
1534 * exec_args_add_arg() - append an argument string
1535 * exec_args_add_env() - append an env string
1536 * exec_args_adjust_args() - adjust location of the argument list to
1537 * allow new arguments to be prepended
1540 exec_args_add_fname(struct image_args *args, const char *fname,
1541 enum uio_seg segflg)
1546 KASSERT(args->fname == NULL, ("fname already appended"));
1547 KASSERT(args->endp == NULL, ("already appending to args"));
1549 if (fname != NULL) {
1550 args->fname = args->buf;
1551 error = segflg == UIO_SYSSPACE ?
1552 copystr(fname, args->fname, PATH_MAX, &length) :
1553 copyinstr(fname, args->fname, PATH_MAX, &length);
1555 return (error == ENAMETOOLONG ? E2BIG : error);
1559 /* Set up for _arg_*()/_env_*() */
1560 args->endp = args->buf + length;
1561 /* begin_argv must be set and kept updated */
1562 args->begin_argv = args->endp;
1563 KASSERT(exec_map_entry_size - length >= ARG_MAX,
1564 ("too little space remaining for arguments %zu < %zu",
1565 exec_map_entry_size - length, (size_t)ARG_MAX));
1566 args->stringspace = ARG_MAX;
1572 exec_args_add_str(struct image_args *args, const char *str,
1573 enum uio_seg segflg, int *countp)
1578 KASSERT(args->endp != NULL, ("endp not initialized"));
1579 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1581 error = (segflg == UIO_SYSSPACE) ?
1582 copystr(str, args->endp, args->stringspace, &length) :
1583 copyinstr(str, args->endp, args->stringspace, &length);
1585 return (error == ENAMETOOLONG ? E2BIG : error);
1586 args->stringspace -= length;
1587 args->endp += length;
1594 exec_args_add_arg(struct image_args *args, const char *argp,
1595 enum uio_seg segflg)
1598 KASSERT(args->envc == 0, ("appending args after env"));
1600 return (exec_args_add_str(args, argp, segflg, &args->argc));
1604 exec_args_add_env(struct image_args *args, const char *envp,
1605 enum uio_seg segflg)
1608 if (args->envc == 0)
1609 args->begin_envv = args->endp;
1611 return (exec_args_add_str(args, envp, segflg, &args->envc));
1615 exec_args_adjust_args(struct image_args *args, size_t consume, ssize_t extend)
1619 KASSERT(args->endp != NULL, ("endp not initialized"));
1620 KASSERT(args->begin_argv != NULL, ("begin_argp not initialized"));
1622 offset = extend - consume;
1623 if (args->stringspace < offset)
1625 memmove(args->begin_argv + extend, args->begin_argv + consume,
1626 args->endp - args->begin_argv + consume);
1628 args->begin_envv += offset;
1629 args->endp += offset;
1630 args->stringspace -= offset;
1635 exec_args_get_begin_envv(struct image_args *args)
1638 KASSERT(args->endp != NULL, ("endp not initialized"));
1641 return (args->begin_envv);
1642 return (args->endp);
1646 * Copy strings out to the new process address space, constructing new arg
1647 * and env vector tables. Return a pointer to the base so that it can be used
1648 * as the initial stack pointer.
1651 exec_copyout_strings(struct image_params *imgp, uintptr_t *stack_base)
1656 uintptr_t destp, ustringp;
1657 struct ps_strings *arginfo;
1659 struct sysentvec *sysent;
1660 size_t execpath_len;
1661 int error, szsigcode;
1662 char canary[sizeof(long) * 8];
1665 sysent = p->p_sysent;
1667 destp = PROC_PS_STRINGS(p);
1668 arginfo = imgp->ps_strings = (void *)destp;
1673 if (sysent->sv_shared_page_base == 0 && sysent->sv_szsigcode != NULL) {
1674 szsigcode = *(sysent->sv_szsigcode);
1676 destp = rounddown2(destp, sizeof(void *));
1677 error = copyout(sysent->sv_sigcode, (void *)destp, szsigcode);
1683 * Copy the image path for the rtld.
1685 if (imgp->execpath != NULL && imgp->auxargs != NULL) {
1686 execpath_len = strlen(imgp->execpath) + 1;
1687 destp -= execpath_len;
1688 destp = rounddown2(destp, sizeof(void *));
1689 imgp->execpathp = (void *)destp;
1690 error = copyout(imgp->execpath, imgp->execpathp, execpath_len);
1696 * Prepare the canary for SSP.
1698 arc4rand(canary, sizeof(canary), 0);
1699 destp -= sizeof(canary);
1700 imgp->canary = (void *)destp;
1701 error = copyout(canary, imgp->canary, sizeof(canary));
1704 imgp->canarylen = sizeof(canary);
1707 * Prepare the pagesizes array.
1709 imgp->pagesizeslen = sizeof(pagesizes[0]) * MAXPAGESIZES;
1710 destp -= imgp->pagesizeslen;
1711 destp = rounddown2(destp, sizeof(void *));
1712 imgp->pagesizes = (void *)destp;
1713 error = copyout(pagesizes, imgp->pagesizes, imgp->pagesizeslen);
1718 * Allocate room for the argument and environment strings.
1720 destp -= ARG_MAX - imgp->args->stringspace;
1721 destp = rounddown2(destp, sizeof(void *));
1724 if (imgp->auxargs) {
1726 * Allocate room on the stack for the ELF auxargs
1727 * array. It has up to AT_COUNT entries.
1729 destp -= AT_COUNT * sizeof(Elf_Auxinfo);
1730 destp = rounddown2(destp, sizeof(void *));
1733 vectp = (char **)destp;
1736 * Allocate room for the argv[] and env vectors including the
1737 * terminating NULL pointers.
1739 vectp -= imgp->args->argc + 1 + imgp->args->envc + 1;
1742 * vectp also becomes our initial stack base
1744 *stack_base = (uintptr_t)vectp;
1746 stringp = imgp->args->begin_argv;
1747 argc = imgp->args->argc;
1748 envc = imgp->args->envc;
1751 * Copy out strings - arguments and environment.
1753 error = copyout(stringp, (void *)ustringp,
1754 ARG_MAX - imgp->args->stringspace);
1759 * Fill in "ps_strings" struct for ps, w, etc.
1762 if (suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp) != 0 ||
1763 suword32(&arginfo->ps_nargvstr, argc) != 0)
1767 * Fill in argument portion of vector table.
1769 for (; argc > 0; --argc) {
1770 if (suword(vectp++, ustringp) != 0)
1772 while (*stringp++ != 0)
1777 /* a null vector table pointer separates the argp's from the envp's */
1778 if (suword(vectp++, 0) != 0)
1782 if (suword(&arginfo->ps_envstr, (long)(intptr_t)vectp) != 0 ||
1783 suword32(&arginfo->ps_nenvstr, envc) != 0)
1787 * Fill in environment portion of vector table.
1789 for (; envc > 0; --envc) {
1790 if (suword(vectp++, ustringp) != 0)
1792 while (*stringp++ != 0)
1797 /* end of vector table is a null pointer */
1798 if (suword(vectp, 0) != 0)
1801 if (imgp->auxargs) {
1803 error = imgp->sysent->sv_copyout_auxargs(imgp,
1813 * Check permissions of file to execute.
1814 * Called with imgp->vp locked.
1815 * Return 0 for success or error code on failure.
1818 exec_check_permissions(struct image_params *imgp)
1820 struct vnode *vp = imgp->vp;
1821 struct vattr *attr = imgp->attr;
1827 /* Get file attributes */
1828 error = VOP_GETATTR(vp, attr, td->td_ucred);
1833 error = mac_vnode_check_exec(td->td_ucred, imgp->vp, imgp);
1839 * 1) Check if file execution is disabled for the filesystem that
1840 * this file resides on.
1841 * 2) Ensure that at least one execute bit is on. Otherwise, a
1842 * privileged user will always succeed, and we don't want this
1843 * to happen unless the file really is executable.
1844 * 3) Ensure that the file is a regular file.
1846 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1847 (attr->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0 ||
1848 (attr->va_type != VREG))
1852 * Zero length files can't be exec'd
1854 if (attr->va_size == 0)
1858 * Check for execute permission to file based on current credentials.
1860 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1865 * Check number of open-for-writes on the file and deny execution
1868 * Add a text reference now so no one can write to the
1869 * executable while we're activating it.
1871 * Remember if this was set before and unset it in case this is not
1872 * actually an executable image.
1874 error = VOP_SET_TEXT(vp);
1877 imgp->textset = true;
1880 * Call filesystem specific open routine (which does nothing in the
1883 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL);
1885 imgp->opened = true;
1890 * Exec handler registration
1893 exec_register(const struct execsw *execsw_arg)
1895 const struct execsw **es, **xs, **newexecsw;
1896 u_int count = 2; /* New slot and trailing NULL */
1899 for (es = execsw; *es; es++)
1901 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1904 for (es = execsw; *es; es++)
1909 free(execsw, M_TEMP);
1915 exec_unregister(const struct execsw *execsw_arg)
1917 const struct execsw **es, **xs, **newexecsw;
1921 panic("unregister with no handlers left?\n");
1923 for (es = execsw; *es; es++) {
1924 if (*es == execsw_arg)
1929 for (es = execsw; *es; es++)
1930 if (*es != execsw_arg)
1932 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1934 for (es = execsw; *es; es++)
1935 if (*es != execsw_arg)
1939 free(execsw, M_TEMP);
1945 * Write out a core segment to the compression stream.
1948 compress_chunk(struct coredump_params *cp, char *base, char *buf, size_t len)
1954 chunk_len = MIN(len, CORE_BUF_SIZE);
1957 * We can get EFAULT error here.
1958 * In that case zero out the current chunk of the segment.
1960 error = copyin(base, buf, chunk_len);
1962 bzero(buf, chunk_len);
1963 error = compressor_write(cp->comp, buf, chunk_len);
1973 core_write(struct coredump_params *cp, const void *base, size_t len,
1974 off_t offset, enum uio_seg seg, size_t *resid)
1977 return (vn_rdwr_inchunks(UIO_WRITE, cp->vp, __DECONST(void *, base),
1978 len, offset, seg, IO_UNIT | IO_DIRECT | IO_RANGELOCKED,
1979 cp->active_cred, cp->file_cred, resid, cp->td));
1983 core_output(char *base, size_t len, off_t offset, struct coredump_params *cp,
1988 size_t resid, runlen;
1992 KASSERT((uintptr_t)base % PAGE_SIZE == 0,
1993 ("%s: user address %p is not page-aligned", __func__, base));
1995 if (cp->comp != NULL)
1996 return (compress_chunk(cp, base, tmpbuf, len));
1998 map = &cp->td->td_proc->p_vmspace->vm_map;
1999 for (; len > 0; base += runlen, offset += runlen, len -= runlen) {
2001 * Attempt to page in all virtual pages in the range. If a
2002 * virtual page is not backed by the pager, it is represented as
2003 * a hole in the file. This can occur with zero-filled
2004 * anonymous memory or truncated files, for example.
2006 for (runlen = 0; runlen < len; runlen += PAGE_SIZE) {
2007 if (core_dump_can_intr && curproc_sigkilled())
2009 error = vm_fault(map, (uintptr_t)base + runlen,
2010 VM_PROT_READ, VM_FAULT_NOFILL, NULL);
2012 success = error == KERN_SUCCESS;
2013 else if ((error == KERN_SUCCESS) != success)
2018 error = core_write(cp, base, runlen, offset,
2019 UIO_USERSPACE, &resid);
2021 if (error != EFAULT)
2025 * EFAULT may be returned if the user mapping
2026 * could not be accessed, e.g., because a mapped
2027 * file has been truncated. Skip the page if no
2028 * progress was made, to protect against a
2029 * hypothetical scenario where vm_fault() was
2030 * successful but core_write() returns EFAULT
2041 error = vn_start_write(cp->vp, &mp, V_WAIT);
2044 vn_lock(cp->vp, LK_EXCLUSIVE | LK_RETRY);
2045 error = vn_truncate_locked(cp->vp, offset + runlen,
2046 false, cp->td->td_ucred);
2048 vn_finished_write(mp);
2057 * Drain into a core file.
2060 sbuf_drain_core_output(void *arg, const char *data, int len)
2062 struct coredump_params *cp;
2067 p = cp->td->td_proc;
2070 * Some kern_proc out routines that print to this sbuf may
2071 * call us with the process lock held. Draining with the
2072 * non-sleepable lock held is unsafe. The lock is needed for
2073 * those routines when dumping a live process. In our case we
2074 * can safely release the lock before draining and acquire
2077 locked = PROC_LOCKED(p);
2080 if (cp->comp != NULL)
2081 error = compressor_write(cp->comp, __DECONST(char *, data),
2084 error = core_write(cp, __DECONST(void *, data), len, cp->offset,
2085 UIO_SYSSPACE, NULL);