2 * Copyright (c) 1993, David Greenman
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include "opt_ktrace.h"
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/eventhandler.h>
37 #include <sys/mutex.h>
38 #include <sys/sysproto.h>
39 #include <sys/signalvar.h>
40 #include <sys/kernel.h>
42 #include <sys/mount.h>
43 #include <sys/filedesc.h>
44 #include <sys/fcntl.h>
47 #include <sys/imgact.h>
48 #include <sys/imgact_elf.h>
50 #include <sys/malloc.h>
52 #include <sys/pioctl.h>
53 #include <sys/namei.h>
54 #include <sys/sf_buf.h>
55 #include <sys/sysent.h>
57 #include <sys/sysctl.h>
59 #include <sys/vnode.h>
61 #include <sys/ktrace.h>
65 #include <vm/vm_param.h>
67 #include <vm/vm_page.h>
68 #include <vm/vm_map.h>
69 #include <vm/vm_kern.h>
70 #include <vm/vm_extern.h>
71 #include <vm/vm_object.h>
72 #include <vm/vm_pager.h>
74 #include <machine/reg.h>
76 MALLOC_DEFINE(M_PARGS, "proc-args", "Process arguments");
78 static int sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS);
79 static int sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS);
80 static int sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS);
81 static int kern_execve(struct thread *td, char *fname, char **argv,
82 char **envv, struct mac *mac_p);
84 /* XXX This should be vm_size_t. */
85 SYSCTL_PROC(_kern, KERN_PS_STRINGS, ps_strings, CTLTYPE_ULONG|CTLFLAG_RD,
86 NULL, 0, sysctl_kern_ps_strings, "LU", "");
88 /* XXX This should be vm_size_t. */
89 SYSCTL_PROC(_kern, KERN_USRSTACK, usrstack, CTLTYPE_ULONG|CTLFLAG_RD,
90 NULL, 0, sysctl_kern_usrstack, "LU", "");
92 SYSCTL_PROC(_kern, OID_AUTO, stackprot, CTLTYPE_INT|CTLFLAG_RD,
93 NULL, 0, sysctl_kern_stackprot, "I", "");
95 u_long ps_arg_cache_limit = PAGE_SIZE / 16;
96 SYSCTL_ULONG(_kern, OID_AUTO, ps_arg_cache_limit, CTLFLAG_RW,
97 &ps_arg_cache_limit, 0, "");
100 sysctl_kern_ps_strings(SYSCTL_HANDLER_ARGS)
106 #if defined(__amd64__) || defined(__ia64__)
107 if (req->oldlen == sizeof(unsigned int)) {
109 val = (unsigned int)p->p_sysent->sv_psstrings;
110 error = SYSCTL_OUT(req, &val, sizeof(val));
113 error = SYSCTL_OUT(req, &p->p_sysent->sv_psstrings,
114 sizeof(p->p_sysent->sv_psstrings));
119 sysctl_kern_usrstack(SYSCTL_HANDLER_ARGS)
125 #if defined(__amd64__) || defined(__ia64__)
126 if (req->oldlen == sizeof(unsigned int)) {
128 val = (unsigned int)p->p_sysent->sv_usrstack;
129 error = SYSCTL_OUT(req, &val, sizeof(val));
132 error = SYSCTL_OUT(req, &p->p_sysent->sv_usrstack,
133 sizeof(p->p_sysent->sv_usrstack));
138 sysctl_kern_stackprot(SYSCTL_HANDLER_ARGS)
143 return (SYSCTL_OUT(req, &p->p_sysent->sv_stackprot,
144 sizeof(p->p_sysent->sv_stackprot)));
148 * Each of the items is a pointer to a `const struct execsw', hence the
149 * double pointer here.
151 static const struct execsw **execsw;
153 #ifndef _SYS_SYSPROTO_H_
167 struct execve_args /* {
174 return (kern_execve(td, uap->fname, uap->argv, uap->envv, NULL));
177 #ifndef _SYS_SYSPROTO_H_
178 struct __mac_execve_args {
190 __mac_execve(td, uap)
192 struct __mac_execve_args /* {
201 return (kern_execve(td, uap->fname, uap->argv, uap->envv,
209 * In-kernel implementation of execve(). All arguments are assumed to be
210 * userspace pointers from the passed thread.
215 kern_execve(td, fname, argv, envv, mac_p)
222 struct proc *p = td->td_proc;
223 struct nameidata nd, *ndp;
224 struct ucred *newcred = NULL, *oldcred;
225 struct uidinfo *euip;
226 register_t *stack_base;
228 struct image_params image_params, *imgp;
230 int (*img_first)(struct image_params *);
231 struct pargs *oldargs = NULL, *newargs = NULL;
232 struct sigacts *oldsigacts, *newsigacts;
234 struct vnode *tracevp = NULL;
235 struct ucred *tracecred = NULL;
237 struct vnode *textvp = NULL;
238 int credential_changing;
241 struct label *interplabel = NULL;
245 imgp = &image_params;
248 * Lock the process and set the P_INEXEC flag to indicate that
249 * it should be left alone until we're done here. This is
250 * necessary to avoid race conditions - e.g. in ptrace() -
251 * that might allow a local user to illicitly obtain elevated
255 KASSERT((p->p_flag & P_INEXEC) == 0,
256 ("%s(): process already has P_INEXEC flag", __func__));
257 if (p->p_flag & P_SA || p->p_numthreads > 1) {
258 if (thread_single(SINGLE_EXIT)) {
261 return (ERESTART); /* Try again later. */
264 * If we get here all other threads are dead,
265 * so unset the associated flags and lose KSE mode.
268 td->td_mailbox = NULL;
269 td->td_pflags &= ~TDP_SA;
272 p->p_flag |= P_INEXEC;
276 * Initialize part of the common data
279 imgp->userspace_argv = argv;
280 imgp->userspace_envv = envv;
281 imgp->execlabel = NULL;
283 imgp->argc = imgp->envc = 0;
285 imgp->entry_addr = 0;
286 imgp->vmspace_destroyed = 0;
287 imgp->interpreted = 0;
288 imgp->interpreter_name[0] = '\0';
289 imgp->auxargs = NULL;
292 imgp->firstpage = NULL;
293 imgp->ps_strings = 0;
294 imgp->auxarg_size = 0;
297 error = mac_execve_enter(imgp, mac_p);
305 * Allocate temporary demand zeroed space for argument and
306 * environment strings
308 imgp->stringbase = (char *)kmem_alloc_wait(exec_map, ARG_MAX);
309 if (imgp->stringbase == NULL) {
314 imgp->stringp = imgp->stringbase;
315 imgp->stringspace = ARG_MAX;
316 imgp->image_header = NULL;
319 * Translate the file name. namei() returns a vnode pointer
320 * in ni_vp amoung other things.
323 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
324 UIO_USERSPACE, fname, td);
331 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase,
336 imgp->vp = ndp->ni_vp;
340 * Check file permissions (also 'opens' file)
342 error = exec_check_permissions(imgp);
344 goto exec_fail_dealloc;
346 if (VOP_GETVOBJECT(imgp->vp, &imgp->object) == 0)
347 vm_object_reference(imgp->object);
350 * Set VV_TEXT now so no one can write to the executable while we're
353 * Remember if this was set before and unset it in case this is not
354 * actually an executable image.
356 textset = imgp->vp->v_vflag & VV_TEXT;
357 imgp->vp->v_vflag |= VV_TEXT;
359 error = exec_map_first_page(imgp);
361 goto exec_fail_dealloc;
364 * If the current process has a special image activator it
365 * wants to try first, call it. For example, emulating shell
366 * scripts differently.
369 if ((img_first = imgp->proc->p_sysent->sv_imgact_try) != NULL)
370 error = img_first(imgp);
373 * Loop through the list of image activators, calling each one.
374 * An activator returns -1 if there is no match, 0 on success,
375 * and an error otherwise.
377 for (i = 0; error == -1 && execsw[i]; ++i) {
378 if (execsw[i]->ex_imgact == NULL ||
379 execsw[i]->ex_imgact == img_first) {
382 error = (*execsw[i]->ex_imgact)(imgp);
388 imgp->vp->v_vflag &= ~VV_TEXT;
391 goto exec_fail_dealloc;
395 * Special interpreter operation, cleanup and loop up to try to
396 * activate the interpreter.
398 if (imgp->interpreted) {
399 exec_unmap_first_page(imgp);
401 * VV_TEXT needs to be unset for scripts. There is a short
402 * period before we determine that something is a script where
403 * VV_TEXT will be set. The vnode lock is held over this
404 * entire period so nothing should illegitimately be blocked.
406 imgp->vp->v_vflag &= ~VV_TEXT;
407 /* free name buffer and old vnode */
408 NDFREE(ndp, NDF_ONLY_PNBUF);
410 interplabel = mac_vnode_label_alloc();
411 mac_copy_vnode_label(ndp->ni_vp->v_label, interplabel);
414 vm_object_deallocate(imgp->object);
416 /* set new name to that of the interpreter */
417 NDINIT(ndp, LOOKUP, LOCKLEAF | FOLLOW | SAVENAME,
418 UIO_SYSSPACE, imgp->interpreter_name, td);
423 * Copy out strings (args and env) and initialize stack base
425 if (p->p_sysent->sv_copyout_strings)
426 stack_base = (*p->p_sysent->sv_copyout_strings)(imgp);
428 stack_base = exec_copyout_strings(imgp);
431 * If custom stack fixup routine present for this process
432 * let it do the stack setup.
433 * Else stuff argument count as first item on stack
435 if (p->p_sysent->sv_fixup != NULL)
436 (*p->p_sysent->sv_fixup)(&stack_base, imgp);
438 suword(--stack_base, imgp->argc);
441 * For security and other reasons, the file descriptor table cannot
442 * be shared after an exec.
444 FILEDESC_LOCK(p->p_fd);
445 if (p->p_fd->fd_refcnt > 1) {
446 struct filedesc *tmp;
448 tmp = fdcopy(td->td_proc->p_fd);
449 FILEDESC_UNLOCK(p->p_fd);
453 FILEDESC_UNLOCK(p->p_fd);
456 * Malloc things before we need locks.
459 euip = uifind(attr.va_uid);
460 i = imgp->endargs - imgp->stringbase;
461 if (ps_arg_cache_limit >= i + sizeof(struct pargs))
462 newargs = pargs_alloc(i);
464 /* close files on exec */
467 /* Get a reference to the vnode prior to locking the proc */
471 * For security and other reasons, signal handlers cannot
472 * be shared after an exec. The new process gets a copy of the old
473 * handlers. In execsigs(), the new process will have its signals
477 if (sigacts_shared(p->p_sigacts)) {
478 oldsigacts = p->p_sigacts;
480 newsigacts = sigacts_alloc();
481 sigacts_copy(newsigacts, oldsigacts);
483 p->p_sigacts = newsigacts;
490 /* reset caught signals */
493 /* name this process - nameiexec(p, ndp) */
494 len = min(ndp->ni_cnd.cn_namelen,MAXCOMLEN);
495 bcopy(ndp->ni_cnd.cn_nameptr, p->p_comm, len);
499 * mark as execed, wakeup the process that vforked (if any) and tell
500 * it that it now has its own resources back
503 if (p->p_pptr && (p->p_flag & P_PPWAIT)) {
504 p->p_flag &= ~P_PPWAIT;
509 * Implement image setuid/setgid.
511 * Don't honor setuid/setgid if the filesystem prohibits it or if
512 * the process is being traced.
514 * XXXMAC: For the time being, use NOSUID to also prohibit
515 * transitions on the file system.
517 oldcred = p->p_ucred;
518 credential_changing = 0;
519 credential_changing |= (attr.va_mode & VSUID) && oldcred->cr_uid !=
521 credential_changing |= (attr.va_mode & VSGID) && oldcred->cr_gid !=
524 will_transition = mac_execve_will_transition(oldcred, imgp->vp,
526 credential_changing |= will_transition;
529 if (credential_changing &&
530 (imgp->vp->v_mount->mnt_flag & MNT_NOSUID) == 0 &&
531 (p->p_flag & P_TRACED) == 0) {
533 * Turn off syscall tracing for set-id programs, except for
534 * root. Record any set-id flags first to make sure that
535 * we do not regain any tracing during a possible block.
539 if (p->p_tracevp != NULL && suser_cred(oldcred, PRISON_ROOT)) {
540 mtx_lock(&ktrace_mtx);
542 tracevp = p->p_tracevp;
544 tracecred = p->p_tracecred;
545 p->p_tracecred = NULL;
546 mtx_unlock(&ktrace_mtx);
550 * Close any file descriptors 0..2 that reference procfs,
551 * then make sure file descriptors 0..2 are in use.
553 * setugidsafety() may call closef() and then pfind()
554 * which may grab the process lock.
555 * fdcheckstd() may call falloc() which may block to
556 * allocate memory, so temporarily drop the process lock.
560 error = fdcheckstd(td);
565 * Set the new credentials.
567 crcopy(newcred, oldcred);
568 if (attr.va_mode & VSUID)
569 change_euid(newcred, euip);
570 if (attr.va_mode & VSGID)
571 change_egid(newcred, attr.va_gid);
573 if (will_transition) {
574 mac_execve_transition(oldcred, newcred, imgp->vp,
579 * Implement correct POSIX saved-id behavior.
581 * XXXMAC: Note that the current logic will save the
582 * uid and gid if a MAC domain transition occurs, even
583 * though maybe it shouldn't.
585 change_svuid(newcred, newcred->cr_uid);
586 change_svgid(newcred, newcred->cr_gid);
587 p->p_ucred = newcred;
590 if (oldcred->cr_uid == oldcred->cr_ruid &&
591 oldcred->cr_gid == oldcred->cr_rgid)
592 p->p_flag &= ~P_SUGID;
594 * Implement correct POSIX saved-id behavior.
596 * XXX: It's not clear that the existing behavior is
597 * POSIX-compliant. A number of sources indicate that the
598 * saved uid/gid should only be updated if the new ruid is
599 * not equal to the old ruid, or the new euid is not equal
600 * to the old euid and the new euid is not equal to the old
601 * ruid. The FreeBSD code always updates the saved uid/gid.
602 * Also, this code uses the new (replaced) euid and egid as
603 * the source, which may or may not be the right ones to use.
605 if (oldcred->cr_svuid != oldcred->cr_uid ||
606 oldcred->cr_svgid != oldcred->cr_gid) {
607 crcopy(newcred, oldcred);
608 change_svuid(newcred, newcred->cr_uid);
609 change_svgid(newcred, newcred->cr_gid);
610 p->p_ucred = newcred;
616 * Store the vp for use in procfs. This vnode was referenced prior
617 * to locking the proc lock.
619 textvp = p->p_textvp;
620 p->p_textvp = ndp->ni_vp;
623 * Notify others that we exec'd, and clear the P_INEXEC flag
624 * as we're now a bona fide freshly-execed process.
626 KNOTE(&p->p_klist, NOTE_EXEC);
627 p->p_flag &= ~P_INEXEC;
630 * If tracing the process, trap to debugger so breakpoints
631 * can be set before the program executes.
633 if (p->p_flag & P_TRACED)
636 /* clear "fork but no exec" flag, as we _are_ execing */
637 p->p_acflag &= ~AFORK;
639 /* Free any previous argument cache */
643 /* Cache arguments if they fit inside our allowance */
644 if (ps_arg_cache_limit >= i + sizeof(struct pargs)) {
645 bcopy(imgp->stringbase, newargs->ar_args, i);
651 /* Set values passed into the program in registers. */
652 if (p->p_sysent->sv_setregs)
653 (*p->p_sysent->sv_setregs)(td, imgp->entry_addr,
654 (u_long)(uintptr_t)stack_base, imgp->ps_strings);
656 exec_setregs(td, imgp->entry_addr,
657 (u_long)(uintptr_t)stack_base, imgp->ps_strings);
661 * Free any resources malloc'd earlier that we didn't use.
669 * Handle deferred decrement of ref counts.
673 if (ndp->ni_vp && error != 0)
678 if (tracecred != NULL)
685 if (oldsigacts != NULL)
686 sigacts_free(oldsigacts);
691 * free various allocated resources
693 if (imgp->firstpage != NULL)
694 exec_unmap_first_page(imgp);
696 if (imgp->vp != NULL) {
697 NDFREE(ndp, NDF_ONLY_PNBUF);
701 if (imgp->stringbase != NULL)
702 kmem_free_wakeup(exec_map, (vm_offset_t)imgp->stringbase,
705 if (imgp->object != NULL)
706 vm_object_deallocate(imgp->object);
710 * Stop the process here if its stop event mask has
711 * the S_EXEC bit set.
713 STOPEVENT(p, S_EXEC, 0);
718 /* we're done here, clear P_INEXEC */
720 p->p_flag &= ~P_INEXEC;
723 if (imgp->vmspace_destroyed) {
724 /* sorry, no more process anymore. exit gracefully */
726 mac_execve_exit(imgp);
727 if (interplabel != NULL)
728 mac_vnode_label_free(interplabel);
730 exit1(td, W_EXITCODE(0, SIGABRT));
736 mac_execve_exit(imgp);
737 if (interplabel != NULL)
738 mac_vnode_label_free(interplabel);
745 exec_map_first_page(imgp)
746 struct image_params *imgp;
750 vm_page_t ma[VM_INITIAL_PAGEIN];
755 if (imgp->firstpage != NULL)
756 exec_unmap_first_page(imgp);
758 VOP_GETVOBJECT(imgp->vp, &object);
759 VM_OBJECT_LOCK(object);
760 ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
761 if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
762 initial_pagein = VM_INITIAL_PAGEIN;
763 if (initial_pagein > object->size)
764 initial_pagein = object->size;
765 for (i = 1; i < initial_pagein; i++) {
766 if ((ma[i] = vm_page_lookup(object, i)) != NULL) {
769 vm_page_lock_queues();
770 if ((ma[i]->flags & PG_BUSY) || ma[i]->busy) {
771 vm_page_unlock_queues();
775 vm_page_unlock_queues();
777 ma[i] = vm_page_alloc(object, i,
784 rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
785 ma[0] = vm_page_lookup(object, 0);
786 if ((rv != VM_PAGER_OK) || (ma[0] == NULL) ||
787 (ma[0]->valid == 0)) {
789 vm_page_lock_queues();
790 pmap_remove_all(ma[0]);
792 vm_page_unlock_queues();
794 VM_OBJECT_UNLOCK(object);
798 vm_page_lock_queues();
800 vm_page_wakeup(ma[0]);
801 vm_page_unlock_queues();
802 VM_OBJECT_UNLOCK(object);
804 imgp->firstpage = sf_buf_alloc(ma[0], 0);
805 imgp->image_header = (char *)sf_buf_kva(imgp->firstpage);
811 exec_unmap_first_page(imgp)
812 struct image_params *imgp;
816 if (imgp->firstpage != NULL) {
817 m = sf_buf_page(imgp->firstpage);
818 sf_buf_free(imgp->firstpage);
819 imgp->firstpage = NULL;
820 vm_page_lock_queues();
822 vm_page_unlock_queues();
827 * Destroy old address space, and allocate a new stack
828 * The new stack is only SGROWSIZ large because it is grown
829 * automatically in trap.c.
832 exec_new_vmspace(imgp, sv)
833 struct image_params *imgp;
834 struct sysentvec *sv;
837 struct proc *p = imgp->proc;
838 struct vmspace *vmspace = p->p_vmspace;
839 vm_offset_t stack_addr;
844 imgp->vmspace_destroyed = 1;
846 /* Called with Giant held, do not depend on it! */
847 EVENTHANDLER_INVOKE(process_exec, p);
850 * Here is as good a place as any to do any resource limit cleanups.
851 * This is needed if a 64 bit binary exec's a 32 bit binary - the
852 * data size limit may need to be changed to a value that makes
853 * sense for the 32 bit binary.
855 if (sv->sv_fixlimits != NULL)
856 sv->sv_fixlimits(imgp);
859 * Blow away entire process VM, if address space not shared,
860 * otherwise, create a new VM space so that other threads are
863 map = &vmspace->vm_map;
864 if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser &&
865 vm_map_max(map) == sv->sv_maxuser) {
867 vm_page_lock_queues();
868 pmap_remove_pages(vmspace_pmap(vmspace), vm_map_min(map),
870 vm_page_unlock_queues();
871 vm_map_remove(map, vm_map_min(map), vm_map_max(map));
873 vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser);
874 vmspace = p->p_vmspace;
875 map = &vmspace->vm_map;
878 /* Allocate a new stack */
879 stack_addr = sv->sv_usrstack - maxssiz;
880 error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz,
881 sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_DOWN);
886 /* Allocate a new register stack */
887 stack_addr = IA64_BACKINGSTORE;
888 error = vm_map_stack(map, stack_addr, (vm_size_t)maxssiz,
889 sv->sv_stackprot, VM_PROT_ALL, MAP_STACK_GROWS_UP);
894 /* vm_ssize and vm_maxsaddr are somewhat antiquated concepts in the
895 * VM_STACK case, but they are still used to monitor the size of the
896 * process stack so we can check the stack rlimit.
898 vmspace->vm_ssize = sgrowsiz >> PAGE_SHIFT;
899 vmspace->vm_maxsaddr = (char *)sv->sv_usrstack - maxssiz;
905 * Copy out argument and environment strings from the old process
906 * address space into the temporary string buffer.
909 exec_extract_strings(imgp)
910 struct image_params *imgp;
918 * extract arguments first
921 argv = imgp->userspace_argv;
924 argp = (caddr_t)(intptr_t)fuword(argv);
925 if (argp == (caddr_t)-1)
933 if (argp == (caddr_t)-1)
935 if ((error = copyinstr(argp, imgp->stringp,
936 imgp->stringspace, &length))) {
937 if (error == ENAMETOOLONG)
941 imgp->stringspace -= length;
942 imgp->stringp += length;
944 } while ((argp = (caddr_t)(intptr_t)fuword(argv++)));
949 imgp->endargs = imgp->stringp;
952 * extract environment strings
955 envv = imgp->userspace_envv;
958 while ((envp = (caddr_t)(intptr_t)fuword(envv++))) {
959 if (envp == (caddr_t)-1)
961 if ((error = copyinstr(envp, imgp->stringp,
962 imgp->stringspace, &length))) {
963 if (error == ENAMETOOLONG)
967 imgp->stringspace -= length;
968 imgp->stringp += length;
977 * Copy strings out to the new process address space, constructing
978 * new arg and env vector tables. Return a pointer to the base
979 * so that it can be used as the initial stack pointer.
982 exec_copyout_strings(imgp)
983 struct image_params *imgp;
987 char *stringp, *destp;
988 register_t *stack_base;
989 struct ps_strings *arginfo;
994 * Calculate string base and vector table pointers.
995 * Also deal with signal trampoline code for this exec type.
999 arginfo = (struct ps_strings *)p->p_sysent->sv_psstrings;
1000 if (p->p_sysent->sv_szsigcode != NULL)
1001 szsigcode = *(p->p_sysent->sv_szsigcode);
1002 destp = (caddr_t)arginfo - szsigcode - SPARE_USRSPACE -
1003 roundup((ARG_MAX - imgp->stringspace), sizeof(char *));
1009 copyout(p->p_sysent->sv_sigcode, ((caddr_t)arginfo -
1010 szsigcode), szsigcode);
1013 * If we have a valid auxargs ptr, prepare some room
1016 if (imgp->auxargs) {
1018 * 'AT_COUNT*2' is size for the ELF Auxargs data. This is for
1019 * lower compatibility.
1021 imgp->auxarg_size = (imgp->auxarg_size) ? imgp->auxarg_size :
1024 * The '+ 2' is for the null pointers at the end of each of
1025 * the arg and env vector sets,and imgp->auxarg_size is room
1026 * for argument of Runtime loader.
1028 vectp = (char **)(destp - (imgp->argc + imgp->envc + 2 +
1029 imgp->auxarg_size) * sizeof(char *));
1033 * The '+ 2' is for the null pointers at the end of each of
1034 * the arg and env vector sets
1036 vectp = (char **)(destp - (imgp->argc + imgp->envc + 2) *
1040 * vectp also becomes our initial stack base
1042 stack_base = (register_t *)vectp;
1044 stringp = imgp->stringbase;
1049 * Copy out strings - arguments and environment.
1051 copyout(stringp, destp, ARG_MAX - imgp->stringspace);
1054 * Fill in "ps_strings" struct for ps, w, etc.
1056 suword(&arginfo->ps_argvstr, (long)(intptr_t)vectp);
1057 suword(&arginfo->ps_nargvstr, argc);
1060 * Fill in argument portion of vector table.
1062 for (; argc > 0; --argc) {
1063 suword(vectp++, (long)(intptr_t)destp);
1064 while (*stringp++ != 0)
1069 /* a null vector table pointer separates the argp's from the envp's */
1072 suword(&arginfo->ps_envstr, (long)(intptr_t)vectp);
1073 suword(&arginfo->ps_nenvstr, envc);
1076 * Fill in environment portion of vector table.
1078 for (; envc > 0; --envc) {
1079 suword(vectp++, (long)(intptr_t)destp);
1080 while (*stringp++ != 0)
1085 /* end of vector table is a null pointer */
1088 return (stack_base);
1092 * Check permissions of file to execute.
1093 * Called with imgp->vp locked.
1094 * Return 0 for success or error code on failure.
1097 exec_check_permissions(imgp)
1098 struct image_params *imgp;
1100 struct vnode *vp = imgp->vp;
1101 struct vattr *attr = imgp->attr;
1105 td = curthread; /* XXXKSE */
1107 /* Get file attributes */
1108 error = VOP_GETATTR(vp, attr, td->td_ucred, td);
1113 error = mac_check_vnode_exec(td->td_ucred, imgp->vp, imgp);
1119 * 1) Check if file execution is disabled for the filesystem that this
1121 * 2) Insure that at least one execute bit is on - otherwise root
1122 * will always succeed, and we don't want to happen unless the
1123 * file really is executable.
1124 * 3) Insure that the file is a regular file.
1126 if ((vp->v_mount->mnt_flag & MNT_NOEXEC) ||
1127 ((attr->va_mode & 0111) == 0) ||
1128 (attr->va_type != VREG))
1132 * Zero length files can't be exec'd
1134 if (attr->va_size == 0)
1138 * Check for execute permission to file based on current credentials.
1140 error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td);
1145 * Check number of open-for-writes on the file and deny execution
1148 if (vp->v_writecount)
1152 * Call filesystem specific open routine (which does nothing in the
1155 error = VOP_OPEN(vp, FREAD, td->td_ucred, td, -1);
1160 * Exec handler registration
1163 exec_register(execsw_arg)
1164 const struct execsw *execsw_arg;
1166 const struct execsw **es, **xs, **newexecsw;
1167 int count = 2; /* New slot and trailing NULL */
1170 for (es = execsw; *es; es++)
1172 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1173 if (newexecsw == NULL)
1177 for (es = execsw; *es; es++)
1182 free(execsw, M_TEMP);
1188 exec_unregister(execsw_arg)
1189 const struct execsw *execsw_arg;
1191 const struct execsw **es, **xs, **newexecsw;
1195 panic("unregister with no handlers left?\n");
1197 for (es = execsw; *es; es++) {
1198 if (*es == execsw_arg)
1203 for (es = execsw; *es; es++)
1204 if (*es != execsw_arg)
1206 newexecsw = malloc(count * sizeof(*es), M_TEMP, M_WAITOK);
1207 if (newexecsw == NULL)
1210 for (es = execsw; *es; es++)
1211 if (*es != execsw_arg)
1215 free(execsw, M_TEMP);