2 * Copyright (c) 1982, 1986, 1989, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
42 #include "opt_ktrace.h"
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/sysproto.h>
47 #include <sys/filedesc.h>
48 #include <sys/kernel.h>
49 #include <sys/sysctl.h>
51 #include <sys/malloc.h>
52 #include <sys/mutex.h>
54 #include <sys/resourcevar.h>
55 #include <sys/syscall.h>
56 #include <sys/vnode.h>
59 #include <sys/ktrace.h>
60 #include <sys/kthread.h>
61 #include <sys/unistd.h>
67 #include <vm/vm_map.h>
68 #include <vm/vm_extern.h>
69 #include <vm/vm_zone.h>
71 #include <sys/vmmeter.h>
74 static MALLOC_DEFINE(M_ATFORK, "atfork", "atfork callback");
77 * These are the stuctures used to create a callout list for things to do
78 * when forking a process
82 TAILQ_ENTRY(forklist) next;
85 static struct sx fork_list_lock;
87 TAILQ_HEAD(forklist_head, forklist);
88 static struct forklist_head fork_list = TAILQ_HEAD_INITIALIZER(fork_list);
90 #ifndef _SYS_SYSPROTO_H_
97 init_fork_list(void *data __unused)
100 sx_init(&fork_list_lock, "fork list");
102 SYSINIT(fork_list, SI_SUB_INTRINSIC, SI_ORDER_ANY, init_fork_list, NULL);
111 struct fork_args *uap;
117 error = fork1(td, RFFDG | RFPROC, &p2);
119 td->td_retval[0] = p2->p_pid;
120 td->td_retval[1] = 0;
133 struct vfork_args *uap;
139 error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2);
141 td->td_retval[0] = p2->p_pid;
142 td->td_retval[1] = 0;
154 struct rfork_args *uap;
159 /* Don't allow kernel only flags. */
160 if ((uap->flags & RFKERNELONLY) != 0)
163 error = fork1(td, uap->flags, &p2);
165 td->td_retval[0] = p2 ? p2->p_pid : 0;
166 td->td_retval[1] = 0;
173 int nprocs = 1; /* process 0 */
175 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
179 * Random component to lastpid generation. We mix in a random factor to make
180 * it a little harder to predict. We sanity check the modulus value to avoid
181 * doing it in critical paths. Don't let it be too small or we pointlessly
182 * waste randomness entropy, and don't let it be impossibly large. Using a
183 * modulus that is too big causes a LOT more process table scans and slows
184 * down fork processing as the pidchecked caching is defeated.
186 static int randompid = 0;
189 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
194 error = sysctl_handle_int(oidp, &pid, 0, req);
195 if (error || !req->newptr)
197 if (pid < 0 || pid > PID_MAX - 100) /* out of range */
199 else if (pid < 2) /* NOP */
201 else if (pid < 100) /* Make it reasonable */
207 SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
208 0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
212 kse_init(struct kse *kse1, struct kse *kse2)
217 thread_init(struct thread *thread1, struct thread *thread2)
222 ksegrp_init(struct ksegrp *ksegrp1, struct ksegrp *ksegrp2)
228 fork1(td, flags, procp)
229 struct thread *td; /* parent proc */
231 struct proc **procp; /* child proc */
233 struct proc *p2, *pptr;
235 struct proc *newproc;
238 static int pidchecked = 0;
241 struct proc *p1 = td->td_proc;
245 /* Can't copy and clear */
246 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
250 * Here we don't create a new process, but we divorce
251 * certain parts of a process from itself.
253 if ((flags & RFPROC) == 0) {
254 vm_forkproc(td, 0, flags);
257 * Close all file descriptors.
259 if (flags & RFCFDG) {
260 struct filedesc *fdtmp;
261 fdtmp = fdinit(td); /* XXXKSE */
263 fdfree(td); /* XXXKSE */
269 * Unshare file descriptors (from parent.)
272 FILEDESC_LOCK(p1->p_fd);
273 if (p1->p_fd->fd_refcnt > 1) {
274 struct filedesc *newfd;
277 FILEDESC_UNLOCK(p1->p_fd);
283 FILEDESC_UNLOCK(p1->p_fd);
290 * Although process entries are dynamically created, we still keep
291 * a global limit on the maximum number we will create. Don't allow
292 * a nonprivileged user to use the last process; don't let root
293 * exceed the limit. The variable nprocs is the current number of
294 * processes, maxproc is the limit.
296 uid = p1->p_ucred->cr_ruid;
297 if ((nprocs >= maxproc - 1 && uid != 0) || nprocs >= maxproc) {
302 * Increment the nprocs resource before blocking can occur. There
303 * are hard-limits as to the number of processes that can run.
308 * Increment the count of procs running with this uid. Don't allow
309 * a nonprivileged user to exceed their current limit.
311 ok = chgproccnt(p1->p_ucred->cr_ruidinfo, 1,
312 (uid != 0) ? p1->p_rlimit[RLIMIT_NPROC].rlim_cur : 0);
315 * Back out the process count
321 /* Allocate new proc. */
322 newproc = zalloc(proc_zone);
325 * Setup linkage for kernel based threading
327 if((flags & RFTHREAD) != 0) {
328 newproc->p_peers = p1->p_peers;
329 p1->p_peers = newproc;
330 newproc->p_leader = p1->p_leader;
332 newproc->p_peers = NULL;
333 newproc->p_leader = newproc;
336 newproc->p_vmspace = NULL;
339 * Find an unused process ID. We remember a range of unused IDs
340 * ready to use (from lastpid+1 through pidchecked-1).
342 * If RFHIGHPID is set (used during system boot), do not allocate
345 sx_xlock(&allproc_lock);
346 trypid = lastpid + 1;
347 if (flags & RFHIGHPID) {
353 trypid += arc4random() % randompid;
357 * If the process ID prototype has wrapped around,
358 * restart somewhat above 0, as the low-numbered procs
359 * tend to include daemons that don't exit.
361 if (trypid >= PID_MAX) {
362 trypid = trypid % PID_MAX;
367 if (trypid >= pidchecked) {
370 pidchecked = PID_MAX;
372 * Scan the active and zombie procs to check whether this pid
373 * is in use. Remember the lowest pid that's greater
374 * than trypid, so we can avoid checking for a while.
376 p2 = LIST_FIRST(&allproc);
378 for (; p2 != NULL; p2 = LIST_NEXT(p2, p_list)) {
379 while (p2->p_pid == trypid ||
380 p2->p_pgrp->pg_id == trypid ||
381 p2->p_session->s_sid == trypid) {
383 if (trypid >= pidchecked)
386 if (p2->p_pid > trypid && pidchecked > p2->p_pid)
387 pidchecked = p2->p_pid;
388 if (p2->p_pgrp->pg_id > trypid &&
389 pidchecked > p2->p_pgrp->pg_id)
390 pidchecked = p2->p_pgrp->pg_id;
391 if (p2->p_session->s_sid > trypid &&
392 pidchecked > p2->p_session->s_sid)
393 pidchecked = p2->p_session->s_sid;
397 p2 = LIST_FIRST(&zombproc);
403 * RFHIGHPID does not mess with the lastpid counter during boot.
405 if (flags & RFHIGHPID)
411 p2->p_stat = SIDL; /* protect against others */
413 LIST_INSERT_HEAD(&allproc, p2, p_list);
414 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
415 sx_xunlock(&allproc_lock);
418 * Make a proc table entry for the new process.
419 * Start by zeroing the section of proc that is zero-initialized,
420 * then copy the section that is copied directly from the parent.
422 bzero(&p2->p_startzero,
423 (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
424 bzero(&p2->p_kse.ke_startzero,
425 (unsigned) ((caddr_t)&p2->p_kse.ke_endzero
426 - (caddr_t)&p2->p_kse.ke_startzero));
427 bzero(&p2->p_thread.td_startzero,
428 (unsigned) ((caddr_t)&p2->p_thread.td_endzero
429 - (caddr_t)&p2->p_thread.td_startzero));
430 bzero(&p2->p_ksegrp.kg_startzero,
431 (unsigned) ((caddr_t)&p2->p_ksegrp.kg_endzero
432 - (caddr_t)&p2->p_ksegrp.kg_startzero));
434 bcopy(&p1->p_startcopy, &p2->p_startcopy,
435 (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));
437 bcopy(&p1->p_kse.ke_startcopy, &p2->p_kse.ke_startcopy,
438 (unsigned) ((caddr_t)&p2->p_kse.ke_endcopy
439 - (caddr_t)&p2->p_kse.ke_startcopy));
441 bcopy(&p1->p_thread.td_startcopy, &p2->p_thread.td_startcopy,
442 (unsigned) ((caddr_t)&p2->p_thread.td_endcopy
443 - (caddr_t)&p2->p_thread.td_startcopy));
445 bcopy(&p1->p_ksegrp.kg_startcopy, &p2->p_ksegrp.kg_startcopy,
446 (unsigned) ((caddr_t)&p2->p_ksegrp.kg_endcopy
447 - (caddr_t)&p2->p_ksegrp.kg_startcopy));
451 * XXXKSE Theoretically only the running thread would get copied
452 * Others in the kernel would be 'aborted' in the child.
453 * i.e return E*something*
457 mtx_init(&p2->p_mtx, "process lock", MTX_DEF);
459 /* note.. XXXKSE no pcb or u-area yet */
462 * Duplicate sub-structures as needed.
463 * Increase reference counts on shared objects.
464 * The p_stats and p_sigacts substructs are set in vm_forkproc.
467 mtx_lock_spin(&sched_lock);
468 p2->p_sflag = PS_INMEM;
469 if (p1->p_sflag & PS_PROFIL)
471 mtx_unlock_spin(&sched_lock);
473 p2->p_ucred = crhold(p1->p_ucred);
474 p2->p_thread.td_ucred = crhold(p2->p_ucred); /* XXXKSE */
477 p2->p_args->ar_ref++;
479 if (flags & RFSIGSHARE) {
480 p2->p_procsig = p1->p_procsig;
481 p2->p_procsig->ps_refcnt++;
482 if (p1->p_sigacts == &p1->p_uarea->u_sigacts) {
483 struct sigacts *newsigacts;
487 /* Create the shared sigacts structure */
488 MALLOC(newsigacts, struct sigacts *,
489 sizeof(struct sigacts), M_SUBPROC, M_WAITOK);
493 * Set p_sigacts to the new shared structure.
494 * Note that this is updating p1->p_sigacts at the
495 * same time, since p_sigacts is just a pointer to
496 * the shared p_procsig->ps_sigacts.
498 p2->p_sigacts = newsigacts;
499 *p2->p_sigacts = p1->p_uarea->u_sigacts;
504 MALLOC(p2->p_procsig, struct procsig *, sizeof(struct procsig),
505 M_SUBPROC, M_WAITOK);
508 bcopy(p1->p_procsig, p2->p_procsig, sizeof(*p2->p_procsig));
509 p2->p_procsig->ps_refcnt = 1;
510 p2->p_sigacts = NULL; /* finished in vm_forkproc() */
512 if (flags & RFLINUXTHPN)
513 p2->p_sigparent = SIGUSR1;
515 p2->p_sigparent = SIGCHLD;
517 /* bump references to the text vnode (for procfs) */
518 p2->p_textvp = p1->p_textvp;
526 else if (flags & RFFDG) {
527 FILEDESC_LOCK(p1->p_fd);
529 FILEDESC_UNLOCK(p1->p_fd);
536 * If p_limit is still copy-on-write, bump refcnt,
537 * otherwise get a copy that won't be modified.
538 * (If PL_SHAREMOD is clear, the structure is shared
542 if (p1->p_limit->p_lflags & PL_SHAREMOD)
543 p2->p_limit = limcopy(p1->p_limit);
545 p2->p_limit = p1->p_limit;
546 p2->p_limit->p_refcnt++;
550 * Preserve some more flags in subprocess. PS_PROFIL has already
553 p2->p_flag |= p1->p_flag & (P_SUGID | P_ALTSTACK);
554 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
555 p2->p_flag |= P_CONTROLT;
556 if (flags & RFPPWAIT)
557 p2->p_flag |= P_PPWAIT;
559 LIST_INSERT_AFTER(p1, p2, p_pglist);
564 * Attach the new process to its parent.
566 * If RFNOWAIT is set, the newly created process becomes a child
567 * of init. This effectively disassociates the child from the
570 if (flags & RFNOWAIT)
574 sx_xlock(&proctree_lock);
578 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
579 sx_xunlock(&proctree_lock);
581 LIST_INIT(&p2->p_children);
582 LIST_INIT(&p2->p_thread.td_contested); /* XXXKSE only 1 thread? */
584 callout_init(&p2->p_itcallout, 0);
585 callout_init(&p2->p_thread.td_slpcallout, 1); /* XXXKSE */
590 * Copy traceflag and tracefile if enabled. If not inherited,
591 * these were zeroed above but we still could have a trace race
592 * so make sure p2's p_tracep is NULL.
594 if ((p1->p_traceflag & KTRFAC_INHERIT) && p2->p_tracep == NULL) {
595 p2->p_traceflag = p1->p_traceflag;
596 if ((p2->p_tracep = p1->p_tracep) != NULL) {
607 * set priority of child to be that of parent
608 * XXXKSE hey! copying the estcpu seems dodgy.. should split it..
610 mtx_lock_spin(&sched_lock);
611 p2->p_ksegrp.kg_estcpu = p1->p_ksegrp.kg_estcpu;
612 mtx_unlock_spin(&sched_lock);
615 * This begins the section where we must prevent the parent
616 * from being swapped.
623 * Finish creating the child process. It will return via a different
624 * execution path later. (ie: directly into user mode)
626 vm_forkproc(td, p2, flags);
628 if (flags == (RFFDG | RFPROC)) {
630 cnt.v_forkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
631 } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
633 cnt.v_vforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
634 } else if (p1 == &proc0) {
636 cnt.v_kthreadpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
639 cnt.v_rforkpages += p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize;
643 * Both processes are set up, now check if any loadable modules want
644 * to adjust anything.
645 * What if they have an error? XXX
647 sx_slock(&fork_list_lock);
648 TAILQ_FOREACH(ep, &fork_list, next) {
649 (*ep->function)(p1, p2, flags);
651 sx_sunlock(&fork_list_lock);
654 * If RFSTOPPED not requested, make child runnable and add to
657 microtime(&(p2->p_stats->p_start));
658 p2->p_acflag = AFORK;
659 if ((flags & RFSTOPPED) == 0) {
660 mtx_lock_spin(&sched_lock);
662 setrunqueue(&p2->p_thread);
663 mtx_unlock_spin(&sched_lock);
667 * Now can be swapped.
673 * tell any interested parties about the new process
675 KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
679 * Preserve synchronization semantics of vfork. If waiting for
680 * child to exec or exit, set P_PPWAIT on child, and sleep on our
681 * proc (in case of exit).
684 while (p2->p_flag & P_PPWAIT)
685 msleep(p1, &p2->p_mtx, PWAIT, "ppwait", 0);
689 * Return child proc pointer to parent.
696 * The next two functionms are general routines to handle adding/deleting
697 * items on the fork callout list.
700 * Take the arguments given and put them onto the fork callout list,
701 * However first make sure that it's not already there.
702 * Returns 0 on success or a standard error number.
707 forklist_fn function;
712 /* let the programmer know if he's been stupid */
713 if (rm_at_fork(function))
714 printf("WARNING: fork callout entry (%p) already present\n",
717 ep = malloc(sizeof(*ep), M_ATFORK, M_NOWAIT);
720 ep->function = function;
721 sx_xlock(&fork_list_lock);
722 TAILQ_INSERT_TAIL(&fork_list, ep, next);
723 sx_xunlock(&fork_list_lock);
728 * Scan the exit callout list for the given item and remove it..
729 * Returns the number of items removed (0 or 1)
734 forklist_fn function;
738 sx_xlock(&fork_list_lock);
739 TAILQ_FOREACH(ep, &fork_list, next) {
740 if (ep->function == function) {
741 TAILQ_REMOVE(&fork_list, ep, next);
742 sx_xunlock(&fork_list_lock);
747 sx_xunlock(&fork_list_lock);
752 * Handle the return of a child process from fork1(). This function
753 * is called from the MD fork_trampoline() entry point.
756 fork_exit(callout, arg, frame)
757 void (*callout)(void *, struct trapframe *);
759 struct trapframe *frame;
761 struct thread *td = curthread;
762 struct proc *p = td->td_proc;
764 td->td_kse->ke_oncpu = PCPU_GET(cpuid);
766 * Setup the sched_lock state so that we can release it.
768 sched_lock.mtx_lock = (uintptr_t)td;
769 sched_lock.mtx_recurse = 0;
771 td->td_savecrit = CRITICAL_FORK;
772 CTR3(KTR_PROC, "fork_exit: new proc %p (pid %d, %s)", p, p->p_pid,
774 if (PCPU_GET(switchtime.tv_sec) == 0)
775 microuptime(PCPU_PTR(switchtime));
776 PCPU_SET(switchticks, ticks);
777 mtx_unlock_spin(&sched_lock);
780 * cpu_set_fork_handler intercepts this function call to
781 * have this call a non-return function to stay in kernel mode.
782 * initproc has its own fork handler, but it does return.
784 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
788 * Check if a kernel thread misbehaved and returned from its main
792 if (p->p_flag & P_KTHREAD) {
795 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
796 p->p_comm, p->p_pid);
801 crfree(td->td_ucred);
804 mtx_assert(&Giant, MA_NOTOWNED);
808 * Simplified back end of syscall(), used when returning from fork()
809 * directly into user mode. Giant is not held on entry, and must not
810 * be held on return. This function is passed in to fork_exit() as the
811 * first parameter and is called when returning to a new userland process.
814 fork_return(td, frame)
816 struct trapframe *frame;
819 userret(td, frame, 0);
821 if (KTRPOINT(td->td_proc, KTR_SYSRET)) {
822 ktrsysret(td->td_proc->p_tracep, SYS_fork, 0, 0);
825 mtx_assert(&Giant, MA_NOTOWNED);