2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include "opt_ktrace.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/sysproto.h>
46 #include <sys/eventhandler.h>
47 #include <sys/filedesc.h>
48 #include <sys/kernel.h>
49 #include <sys/kthread.h>
50 #include <sys/sysctl.h>
52 #include <sys/malloc.h>
53 #include <sys/mutex.h>
56 #include <sys/pioctl.h>
57 #include <sys/resourcevar.h>
58 #include <sys/sched.h>
59 #include <sys/syscall.h>
60 #include <sys/vmmeter.h>
61 #include <sys/vnode.h>
64 #include <sys/ktrace.h>
65 #include <sys/unistd.h>
67 #include <sys/signalvar.h>
69 #include <security/audit/audit.h>
70 #include <security/mac/mac_framework.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_extern.h>
79 #ifndef _SYS_SYSPROTO_H_
85 static int forksleep; /* Place for fork1() to sleep on. */
94 struct fork_args *uap;
99 error = fork1(td, RFFDG | RFPROC, 0, &p2);
101 td->td_retval[0] = p2->p_pid;
102 td->td_retval[1] = 0;
114 struct vfork_args *uap;
119 error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, 0, &p2);
121 td->td_retval[0] = p2->p_pid;
122 td->td_retval[1] = 0;
133 struct rfork_args *uap;
138 /* Don't allow kernel-only flags. */
139 if ((uap->flags & RFKERNELONLY) != 0)
142 AUDIT_ARG(fflags, uap->flags);
143 error = fork1(td, uap->flags, 0, &p2);
145 td->td_retval[0] = p2 ? p2->p_pid : 0;
146 td->td_retval[1] = 0;
151 int nprocs = 1; /* process 0 */
153 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
157 * Random component to lastpid generation. We mix in a random factor to make
158 * it a little harder to predict. We sanity check the modulus value to avoid
159 * doing it in critical paths. Don't let it be too small or we pointlessly
160 * waste randomness entropy, and don't let it be impossibly large. Using a
161 * modulus that is too big causes a LOT more process table scans and slows
162 * down fork processing as the pidchecked caching is defeated.
164 static int randompid = 0;
167 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
171 error = sysctl_wire_old_buffer(req, sizeof(int));
174 sx_xlock(&allproc_lock);
176 error = sysctl_handle_int(oidp, &pid, 0, req);
177 if (error == 0 && req->newptr != NULL) {
178 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
180 else if (pid < 2) /* NOP */
182 else if (pid < 100) /* Make it reasonable */
186 sx_xunlock(&allproc_lock);
190 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
191 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
194 fork1(td, flags, pages, procp)
200 struct proc *p1, *p2, *pptr;
201 struct proc *newproc;
203 static int curfail, pidchecked = 0;
204 static struct timeval lastfail;
206 struct filedesc_to_leader *fdtol;
208 struct sigacts *newsigacts;
211 /* Can't copy and clear. */
212 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
218 * Here we don't create a new process, but we divorce
219 * certain parts of a process from itself.
221 if ((flags & RFPROC) == 0) {
222 if ((p1->p_flag & P_HADTHREADS) &&
223 (flags & (RFCFDG | RFFDG))) {
225 if (thread_single(SINGLE_BOUNDARY)) {
232 vm_forkproc(td, NULL, NULL, flags);
235 * Close all file descriptors.
237 if (flags & RFCFDG) {
238 struct filedesc *fdtmp;
239 fdtmp = fdinit(td->td_proc->p_fd);
245 * Unshare file descriptors (from parent).
250 if ((p1->p_flag & P_HADTHREADS) &&
251 (flags & (RFCFDG | RFFDG))) {
261 * Note 1:1 allows for forking with one thread coming out on the
262 * other side with the expectation that the process is about to
265 if (p1->p_flag & P_HADTHREADS) {
267 * Idle the other threads for a second.
268 * Since the user space is copied, it must remain stable.
269 * In addition, all threads (from the user perspective)
270 * need to either be suspended or in the kernel,
271 * where they will try restart in the parent and will
272 * be aborted in the child.
275 if (thread_single(SINGLE_NO_EXIT)) {
276 /* Abort. Someone else is single threading before us. */
282 * All other activity in this process
283 * is now suspended at the user boundary,
284 * (or other safe places if we think of any).
288 /* Allocate new proc. */
289 newproc = uma_zalloc(proc_zone, M_WAITOK);
291 mac_init_proc(newproc);
294 audit_proc_alloc(newproc);
296 knlist_init(&newproc->p_klist, &newproc->p_mtx, NULL, NULL, NULL);
297 STAILQ_INIT(&newproc->p_ktr);
299 /* We have to lock the process tree while we look for a pid. */
300 sx_slock(&proctree_lock);
303 * Although process entries are dynamically created, we still keep
304 * a global limit on the maximum number we will create. Don't allow
305 * a nonprivileged user to use the last ten processes; don't let root
306 * exceed the limit. The variable nprocs is the current number of
307 * processes, maxproc is the limit.
309 sx_xlock(&allproc_lock);
310 if ((nprocs >= maxproc - 10 &&
311 priv_check_cred(td->td_ucred, PRIV_MAXPROC, SUSER_RUID) != 0) ||
318 * Increment the count of procs running with this uid. Don't allow
319 * a nonprivileged user to exceed their current limit.
321 * XXXRW: Can we avoid privilege here if it's not needed?
323 error = priv_check_cred(td->td_ucred, PRIV_PROC_LIMIT, SUSER_RUID |
326 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1, 0);
329 ok = chgproccnt(td->td_ucred->cr_ruidinfo, 1,
330 lim_cur(p1, RLIMIT_NPROC));
339 * Increment the nprocs resource before blocking can occur. There
340 * are hard-limits as to the number of processes that can run.
345 * Find an unused process ID. We remember a range of unused IDs
346 * ready to use (from lastpid+1 through pidchecked-1).
348 * If RFHIGHPID is set (used during system boot), do not allocate
351 trypid = lastpid + 1;
352 if (flags & RFHIGHPID) {
357 trypid += arc4random() % randompid;
361 * If the process ID prototype has wrapped around,
362 * restart somewhat above 0, as the low-numbered procs
363 * tend to include daemons that don't exit.
365 if (trypid >= PID_MAX) {
366 trypid = trypid % PID_MAX;
371 if (trypid >= pidchecked) {
374 pidchecked = PID_MAX;
376 * Scan the active and zombie procs to check whether this pid
377 * is in use. Remember the lowest pid that's greater
378 * than trypid, so we can avoid checking for a while.
380 p2 = LIST_FIRST(&allproc);
382 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
383 while (p2->p_pid == trypid ||
384 (p2->p_pgrp != NULL &&
385 (p2->p_pgrp->pg_id == trypid ||
386 (p2->p_session != NULL &&
387 p2->p_session->s_sid == trypid)))) {
389 if (trypid >= pidchecked)
392 if (p2->p_pid > trypid && pidchecked > p2->p_pid)
393 pidchecked = p2->p_pid;
394 if (p2->p_pgrp != NULL) {
395 if (p2->p_pgrp->pg_id > trypid &&
396 pidchecked > p2->p_pgrp->pg_id)
397 pidchecked = p2->p_pgrp->pg_id;
398 if (p2->p_session != NULL &&
399 p2->p_session->s_sid > trypid &&
400 pidchecked > p2->p_session->s_sid)
401 pidchecked = p2->p_session->s_sid;
406 p2 = LIST_FIRST(&zombproc);
410 sx_sunlock(&proctree_lock);
413 * RFHIGHPID does not mess with the lastpid counter during boot.
415 if (flags & RFHIGHPID)
421 p2->p_state = PRS_NEW; /* protect against others */
423 AUDIT_ARG(pid, p2->p_pid);
424 LIST_INSERT_HEAD(&allproc, p2, p_list);
425 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
426 sx_xunlock(&allproc_lock);
429 * Malloc things while we don't hold any locks.
431 if (flags & RFSIGSHARE)
434 newsigacts = sigacts_alloc();
439 if (flags & RFCFDG) {
440 fd = fdinit(p1->p_fd);
442 } else if (flags & RFFDG) {
443 fd = fdcopy(p1->p_fd);
446 fd = fdshare(p1->p_fd);
447 if (p1->p_fdtol == NULL)
449 filedesc_to_leader_alloc(NULL,
452 if ((flags & RFTHREAD) != 0) {
454 * Shared file descriptor table and
455 * shared process leaders.
458 FILEDESC_LOCK_FAST(p1->p_fd);
459 fdtol->fdl_refcount++;
460 FILEDESC_UNLOCK_FAST(p1->p_fd);
463 * Shared file descriptor table, and
464 * different process leaders
466 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
472 * Make a proc table entry for the new process.
473 * Start by zeroing the section of proc that is zero-initialized,
474 * then copy the section that is copied directly from the parent.
476 td2 = FIRST_THREAD_IN_PROC(p2);
478 /* Allocate and switch to an alternate kstack if specified. */
480 vm_thread_new_altkstack(td2, pages);
485 bzero(&p2->p_startzero,
486 __rangeof(struct proc, p_startzero, p_endzero));
487 bzero(&td2->td_startzero,
488 __rangeof(struct thread, td_startzero, td_endzero));
490 bcopy(&p1->p_startcopy, &p2->p_startcopy,
491 __rangeof(struct proc, p_startcopy, p_endcopy));
492 bcopy(&td->td_startcopy, &td2->td_startcopy,
493 __rangeof(struct thread, td_startcopy, td_endcopy));
495 td2->td_sigstk = td->td_sigstk;
496 td2->td_sigmask = td->td_sigmask;
499 * Duplicate sub-structures as needed.
500 * Increase reference counts on shared objects.
503 if (p1->p_flag & P_PROFIL)
505 mtx_lock_spin(&sched_lock);
506 p2->p_sflag = PS_INMEM;
508 * Allow the scheduler to adjust the priority of the child and
509 * parent while we hold the sched_lock.
513 mtx_unlock_spin(&sched_lock);
514 p2->p_ucred = crhold(td->td_ucred);
515 td2->td_ucred = crhold(p2->p_ucred);
517 audit_proc_fork(p1, p2);
519 pargs_hold(p2->p_args);
521 if (flags & RFSIGSHARE) {
522 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
524 sigacts_copy(newsigacts, p1->p_sigacts);
525 p2->p_sigacts = newsigacts;
527 if (flags & RFLINUXTHPN)
528 p2->p_sigparent = SIGUSR1;
530 p2->p_sigparent = SIGCHLD;
532 p2->p_textvp = p1->p_textvp;
537 * p_limit is copy-on-write. Bump its refcount.
539 p2->p_limit = lim_hold(p1->p_limit);
541 pstats_fork(p1->p_stats, p2->p_stats);
546 /* Bump references to the text vnode (for procfs) */
551 * Set up linkage for kernel based threading.
553 if ((flags & RFTHREAD) != 0) {
554 mtx_lock(&ppeers_lock);
555 p2->p_peers = p1->p_peers;
557 p2->p_leader = p1->p_leader;
558 mtx_unlock(&ppeers_lock);
559 PROC_LOCK(p1->p_leader);
560 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
561 PROC_UNLOCK(p1->p_leader);
563 * The task leader is exiting, so process p1 is
564 * going to be killed shortly. Since p1 obviously
565 * isn't dead yet, we know that the leader is either
566 * sending SIGKILL's to all the processes in this
567 * task or is sleeping waiting for all the peers to
568 * exit. We let p1 complete the fork, but we need
569 * to go ahead and kill the new process p2 since
570 * the task leader may not get a chance to send
571 * SIGKILL to it. We leave it on the list so that
572 * the task leader will wait for this new process
576 psignal(p2, SIGKILL);
579 PROC_UNLOCK(p1->p_leader);
585 sx_xlock(&proctree_lock);
586 PGRP_LOCK(p1->p_pgrp);
591 * Preserve some more flags in subprocess. P_PROFIL has already
594 p2->p_flag |= p1->p_flag & P_SUGID;
595 td2->td_pflags |= td->td_pflags & TDP_ALTSTACK;
596 SESS_LOCK(p1->p_session);
597 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
598 p2->p_flag |= P_CONTROLT;
599 SESS_UNLOCK(p1->p_session);
600 if (flags & RFPPWAIT)
601 p2->p_flag |= P_PPWAIT;
603 p2->p_pgrp = p1->p_pgrp;
604 LIST_INSERT_AFTER(p1, p2, p_pglist);
605 PGRP_UNLOCK(p1->p_pgrp);
606 LIST_INIT(&p2->p_children);
608 callout_init(&p2->p_itcallout, CALLOUT_MPSAFE);
612 * Copy traceflag and tracefile if enabled.
614 mtx_lock(&ktrace_mtx);
615 KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
616 if (p1->p_traceflag & KTRFAC_INHERIT) {
617 p2->p_traceflag = p1->p_traceflag;
618 if ((p2->p_tracevp = p1->p_tracevp) != NULL) {
620 KASSERT(p1->p_tracecred != NULL,
621 ("ktrace vnode with no cred"));
622 p2->p_tracecred = crhold(p1->p_tracecred);
625 mtx_unlock(&ktrace_mtx);
629 * If PF_FORK is set, the child process inherits the
630 * procfs ioctl flags from its parent.
632 if (p1->p_pfsflags & PF_FORK) {
633 p2->p_stops = p1->p_stops;
634 p2->p_pfsflags = p1->p_pfsflags;
638 * This begins the section where we must prevent the parent
639 * from being swapped.
645 * Attach the new process to its parent.
647 * If RFNOWAIT is set, the newly created process becomes a child
648 * of init. This effectively disassociates the child from the
651 if (flags & RFNOWAIT)
656 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
657 sx_xunlock(&proctree_lock);
659 /* Inform accounting that we have forked. */
660 p2->p_acflag = AFORK;
664 * Finish creating the child process. It will return via a different
665 * execution path later. (ie: directly into user mode)
667 vm_forkproc(td, p2, td2, flags);
669 if (flags == (RFFDG | RFPROC)) {
670 atomic_add_int(&cnt.v_forks, 1);
671 atomic_add_int(&cnt.v_forkpages, p2->p_vmspace->vm_dsize +
672 p2->p_vmspace->vm_ssize);
673 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
674 atomic_add_int(&cnt.v_vforks, 1);
675 atomic_add_int(&cnt.v_vforkpages, p2->p_vmspace->vm_dsize +
676 p2->p_vmspace->vm_ssize);
677 } else if (p1 == &proc0) {
678 atomic_add_int(&cnt.v_kthreads, 1);
679 atomic_add_int(&cnt.v_kthreadpages, p2->p_vmspace->vm_dsize +
680 p2->p_vmspace->vm_ssize);
682 atomic_add_int(&cnt.v_rforks, 1);
683 atomic_add_int(&cnt.v_rforkpages, p2->p_vmspace->vm_dsize +
684 p2->p_vmspace->vm_ssize);
688 * Both processes are set up, now check if any loadable modules want
689 * to adjust anything.
690 * What if they have an error? XXX
692 EVENTHANDLER_INVOKE(process_fork, p1, p2, flags);
695 * Set the child start time and mark the process as being complete.
697 microuptime(&p2->p_stats->p_start);
698 mtx_lock_spin(&sched_lock);
699 p2->p_state = PRS_NORMAL;
702 * If RFSTOPPED not requested, make child runnable and add to
705 if ((flags & RFSTOPPED) == 0) {
707 setrunqueue(td2, SRQ_BORING);
709 mtx_unlock_spin(&sched_lock);
712 * Now can be swapped.
718 * Tell any interested parties about the new process.
720 KNOTE_LOCKED(&p1->p_klist, NOTE_FORK | p2->p_pid);
725 * Preserve synchronization semantics of vfork. If waiting for
726 * child to exec or exit, set P_PPWAIT on child, and sleep on our
727 * proc (in case of exit).
730 while (p2->p_flag & P_PPWAIT)
731 msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0);
735 * If other threads are waiting, let them continue now.
737 if (p1->p_flag & P_HADTHREADS) {
744 * Return child proc pointer to parent.
749 sx_sunlock(&proctree_lock);
750 if (ppsratecheck(&lastfail, &curfail, 1))
751 printf("maxproc limit exceeded by uid %i, please see tuning(7) and login.conf(5).\n",
752 td->td_ucred->cr_ruid);
753 sx_xunlock(&allproc_lock);
755 mac_destroy_proc(newproc);
758 audit_proc_free(newproc);
760 uma_zfree(proc_zone, newproc);
761 if (p1->p_flag & P_HADTHREADS) {
766 tsleep(&forksleep, PUSER, "fork", hz / 2);
771 * Handle the return of a child process from fork1(). This function
772 * is called from the MD fork_trampoline() entry point.
775 fork_exit(callout, arg, frame)
776 void (*callout)(void *, struct trapframe *);
778 struct trapframe *frame;
784 * Finish setting up thread glue so that it begins execution in a
785 * non-nested critical section with sched_lock held but not recursed.
789 td->td_oncpu = PCPU_GET(cpuid);
790 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
792 sched_lock.mtx_lock = (uintptr_t)td;
793 mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED);
794 CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)",
795 td, td->td_sched, p->p_pid, p->p_comm);
798 * Processes normally resume in mi_switch() after being
799 * cpu_switch()'ed to, but when children start up they arrive here
800 * instead, so we must do much the same things as mi_switch() would.
803 if ((td = PCPU_GET(deadthread))) {
804 PCPU_SET(deadthread, NULL);
808 mtx_unlock_spin(&sched_lock);
811 * cpu_set_fork_handler intercepts this function call to
812 * have this call a non-return function to stay in kernel mode.
813 * initproc has its own fork handler, but it does return.
815 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
819 * Check if a kernel thread misbehaved and returned from its main
822 if (p->p_flag & P_KTHREAD) {
823 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
824 p->p_comm, p->p_pid);
827 mtx_assert(&Giant, MA_NOTOWNED);
829 EVENTHANDLER_INVOKE(schedtail, p);
833 * Simplified back end of syscall(), used when returning from fork()
834 * directly into user mode. Giant is not held on entry, and must not
835 * be held on return. This function is passed in to fork_exit() as the
836 * first parameter and is called when returning to a new userland process.
839 fork_return(td, frame)
841 struct trapframe *frame;
846 if (KTRPOINT(td, KTR_SYSRET))
847 ktrsysret(SYS_fork, 0, 0);
849 mtx_assert(&Giant, MA_NOTOWNED);