2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 1982, 1986, 1989, 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)kern_fork.c 8.6 (Berkeley) 4/8/94
39 #include <sys/cdefs.h>
40 #include "opt_ktrace.h"
41 #include "opt_kstack_pages.h"
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bitstring.h>
46 #include <sys/sysproto.h>
47 #include <sys/eventhandler.h>
48 #include <sys/fcntl.h>
49 #include <sys/filedesc.h>
51 #include <sys/kernel.h>
52 #include <sys/kthread.h>
53 #include <sys/sysctl.h>
55 #include <sys/malloc.h>
57 #include <sys/mutex.h>
60 #include <sys/procdesc.h>
61 #include <sys/ptrace.h>
62 #include <sys/racct.h>
63 #include <sys/resourcevar.h>
64 #include <sys/sched.h>
65 #include <sys/syscall.h>
66 #include <sys/vmmeter.h>
67 #include <sys/vnode.h>
70 #include <sys/ktrace.h>
71 #include <sys/unistd.h>
74 #include <sys/sysent.h>
75 #include <sys/signalvar.h>
77 #include <security/audit/audit.h>
78 #include <security/mac/mac_framework.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_extern.h>
87 #include <sys/dtrace_bsd.h>
88 dtrace_fork_func_t dtrace_fasttrap_fork;
91 SDT_PROVIDER_DECLARE(proc);
92 SDT_PROBE_DEFINE3(proc, , , create, "struct proc *", "struct proc *", "int");
94 #ifndef _SYS_SYSPROTO_H_
102 sys_fork(struct thread *td, struct fork_args *uap)
107 bzero(&fr, sizeof(fr));
108 fr.fr_flags = RFFDG | RFPROC;
110 error = fork1(td, &fr);
112 td->td_retval[0] = pid;
113 td->td_retval[1] = 0;
120 sys_pdfork(struct thread *td, struct pdfork_args *uap)
125 bzero(&fr, sizeof(fr));
126 fr.fr_flags = RFFDG | RFPROC | RFPROCDESC;
129 fr.fr_pd_flags = uap->flags;
130 AUDIT_ARG_FFLAGS(uap->flags);
132 * It is necessary to return fd by reference because 0 is a valid file
133 * descriptor number, and the child needs to be able to distinguish
134 * itself from the parent using the return value.
136 error = fork1(td, &fr);
138 td->td_retval[0] = pid;
139 td->td_retval[1] = 0;
140 error = copyout(&fd, uap->fdp, sizeof(fd));
147 sys_vfork(struct thread *td, struct vfork_args *uap)
152 bzero(&fr, sizeof(fr));
153 fr.fr_flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
155 error = fork1(td, &fr);
157 td->td_retval[0] = pid;
158 td->td_retval[1] = 0;
164 sys_rfork(struct thread *td, struct rfork_args *uap)
169 /* Don't allow kernel-only flags. */
170 if ((uap->flags & RFKERNELONLY) != 0)
172 /* RFSPAWN must not appear with others */
173 if ((uap->flags & RFSPAWN) != 0 && uap->flags != RFSPAWN)
176 AUDIT_ARG_FFLAGS(uap->flags);
177 bzero(&fr, sizeof(fr));
178 if ((uap->flags & RFSPAWN) != 0) {
179 fr.fr_flags = RFFDG | RFPROC | RFPPWAIT | RFMEM;
180 fr.fr_flags2 = FR2_DROPSIG_CAUGHT;
182 fr.fr_flags = uap->flags;
185 error = fork1(td, &fr);
187 td->td_retval[0] = pid;
188 td->td_retval[1] = 0;
193 int __exclusive_cache_line nprocs = 1; /* process 0 */
195 SYSCTL_INT(_kern, OID_AUTO, lastpid, CTLFLAG_RD, &lastpid, 0,
199 * Random component to lastpid generation. We mix in a random factor to make
200 * it a little harder to predict. We sanity check the modulus value to avoid
201 * doing it in critical paths. Don't let it be too small or we pointlessly
202 * waste randomness entropy, and don't let it be impossibly large. Using a
203 * modulus that is too big causes a LOT more process table scans and slows
204 * down fork processing as the pidchecked caching is defeated.
206 static int randompid = 0;
209 sysctl_kern_randompid(SYSCTL_HANDLER_ARGS)
213 error = sysctl_wire_old_buffer(req, sizeof(int));
216 sx_xlock(&allproc_lock);
218 error = sysctl_handle_int(oidp, &pid, 0, req);
219 if (error == 0 && req->newptr != NULL) {
223 /* generate a random PID modulus between 100 and 1123 */
224 randompid = 100 + arc4random() % 1024;
225 else if (pid < 0 || pid > pid_max - 100)
227 randompid = pid_max - 100;
229 /* Make it reasonable */
234 sx_xunlock(&allproc_lock);
238 SYSCTL_PROC(_kern, OID_AUTO, randompid,
239 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
240 sysctl_kern_randompid, "I",
241 "Random PID modulus. Special values: 0: disable, 1: choose random value");
243 extern bitstr_t proc_id_pidmap;
244 extern bitstr_t proc_id_grpidmap;
245 extern bitstr_t proc_id_sessidmap;
246 extern bitstr_t proc_id_reapmap;
249 * Find an unused process ID
251 * If RFHIGHPID is set (used during system boot), do not allocate
255 fork_findpid(int flags)
261 * Avoid calling arc4random with procid_lock held.
264 if (__predict_false(randompid))
265 random = arc4random() % randompid;
267 mtx_lock(&procid_lock);
269 trypid = lastpid + 1;
270 if (flags & RFHIGHPID) {
277 if (trypid >= pid_max)
280 bit_ffc_at(&proc_id_pidmap, trypid, pid_max, &result);
282 KASSERT(trypid != 2, ("unexpectedly ran out of IDs"));
286 if (bit_test(&proc_id_grpidmap, result) ||
287 bit_test(&proc_id_sessidmap, result) ||
288 bit_test(&proc_id_reapmap, result)) {
294 * RFHIGHPID does not mess with the lastpid counter during boot.
296 if ((flags & RFHIGHPID) == 0)
299 bit_set(&proc_id_pidmap, result);
300 mtx_unlock(&procid_lock);
306 fork_norfproc(struct thread *td, int flags)
311 KASSERT((flags & RFPROC) == 0,
312 ("fork_norfproc called with RFPROC set"));
316 * Quiesce other threads if necessary. If RFMEM is not specified we
317 * must ensure that other threads do not concurrently create a second
318 * process sharing the vmspace, see vmspace_unshare().
320 if ((p1->p_flag & (P_HADTHREADS | P_SYSTEM)) == P_HADTHREADS &&
321 ((flags & (RFCFDG | RFFDG)) != 0 || (flags & RFMEM) == 0)) {
323 if (thread_single(p1, SINGLE_BOUNDARY)) {
330 error = vm_forkproc(td, NULL, NULL, NULL, flags);
335 * Close all file descriptors.
337 if ((flags & RFCFDG) != 0) {
338 struct filedesc *fdtmp;
339 struct pwddesc *pdtmp;
341 pdtmp = pdinit(td->td_proc->p_pd, false);
350 * Unshare file descriptors (from parent).
352 if ((flags & RFFDG) != 0) {
358 if ((p1->p_flag & (P_HADTHREADS | P_SYSTEM)) == P_HADTHREADS &&
359 ((flags & (RFCFDG | RFFDG)) != 0 || (flags & RFMEM) == 0)) {
361 thread_single_end(p1, SINGLE_BOUNDARY);
368 do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *td2,
369 struct vmspace *vm2, struct file *fp_procdesc)
371 struct proc *p1, *pptr;
373 struct filedesc_to_leader *fdtol;
375 struct sigacts *newsigacts;
380 bcopy(&p1->p_startcopy, &p2->p_startcopy,
381 __rangeof(struct proc, p_startcopy, p_endcopy));
382 pargs_hold(p2->p_args);
385 bzero(&p2->p_startzero,
386 __rangeof(struct proc, p_startzero, p_endzero));
388 /* Tell the prison that we exist. */
389 prison_proc_hold(p2->p_ucred->cr_prison);
391 p2->p_state = PRS_NEW; /* protect against others */
392 p2->p_pid = fork_findpid(fr->fr_flags);
393 AUDIT_ARG_PID(p2->p_pid);
394 TSFORK(p2->p_pid, p1->p_pid);
396 sx_xlock(&allproc_lock);
397 LIST_INSERT_HEAD(&allproc, p2, p_list);
399 prison_proc_link(p2->p_ucred->cr_prison, p2);
400 sx_xunlock(&allproc_lock);
402 sx_xlock(PIDHASHLOCK(p2->p_pid));
403 LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
404 sx_xunlock(PIDHASHLOCK(p2->p_pid));
409 * Malloc things while we don't hold any locks.
411 if (fr->fr_flags & RFSIGSHARE)
414 newsigacts = sigacts_alloc();
419 if (fr->fr_flags & RFCFDG) {
420 pd = pdinit(p1->p_pd, false);
423 } else if (fr->fr_flags & RFFDG) {
424 if (fr->fr_flags2 & FR2_SHARE_PATHS)
425 pd = pdshare(p1->p_pd);
427 pd = pdcopy(p1->p_pd);
428 fd = fdcopy(p1->p_fd);
431 if (fr->fr_flags2 & FR2_SHARE_PATHS)
432 pd = pdcopy(p1->p_pd);
434 pd = pdshare(p1->p_pd);
435 fd = fdshare(p1->p_fd);
436 if (p1->p_fdtol == NULL)
437 p1->p_fdtol = filedesc_to_leader_alloc(NULL, NULL,
439 if ((fr->fr_flags & RFTHREAD) != 0) {
441 * Shared file descriptor table, and shared
444 fdtol = filedesc_to_leader_share(p1->p_fdtol, p1->p_fd);
447 * Shared file descriptor table, and different
450 fdtol = filedesc_to_leader_alloc(p1->p_fdtol,
455 * Make a proc table entry for the new process.
456 * Start by zeroing the section of proc that is zero-initialized,
457 * then copy the section that is copied directly from the parent.
463 bzero(&td2->td_startzero,
464 __rangeof(struct thread, td_startzero, td_endzero));
466 bcopy(&td->td_startcopy, &td2->td_startcopy,
467 __rangeof(struct thread, td_startcopy, td_endcopy));
469 bcopy(&p2->p_comm, &td2->td_name, sizeof(td2->td_name));
470 td2->td_sigstk = td->td_sigstk;
471 td2->td_flags = TDF_INMEM;
472 td2->td_lend_user_pri = PRI_MAX;
476 td2->td_vnet_lpush = NULL;
480 * Allow the scheduler to initialize the child.
485 * Request AST to check for TDP_RFPPWAIT. Do it here
486 * to avoid calling thread_lock() again.
488 if ((fr->fr_flags & RFPPWAIT) != 0)
489 ast_sched_locked(td, TDA_VFORK);
493 * Duplicate sub-structures as needed.
494 * Increase reference counts on shared objects.
496 p2->p_flag = P_INMEM;
497 p2->p_flag2 = p1->p_flag2 & (P2_ASLR_DISABLE | P2_ASLR_ENABLE |
498 P2_ASLR_IGNSTART | P2_NOTRACE | P2_NOTRACE_EXEC |
499 P2_PROTMAX_ENABLE | P2_PROTMAX_DISABLE | P2_TRAPCAP |
500 P2_STKGAP_DISABLE | P2_STKGAP_DISABLE_EXEC | P2_NO_NEW_PRIVS |
501 P2_WXORX_DISABLE | P2_WXORX_ENABLE_EXEC);
502 p2->p_swtick = ticks;
503 if (p1->p_flag & P_PROFIL)
506 if (fr->fr_flags & RFSIGSHARE) {
507 p2->p_sigacts = sigacts_hold(p1->p_sigacts);
509 sigacts_copy(newsigacts, p1->p_sigacts);
510 p2->p_sigacts = newsigacts;
511 if ((fr->fr_flags2 & (FR2_DROPSIG_CAUGHT | FR2_KPROC)) != 0) {
512 mtx_lock(&p2->p_sigacts->ps_mtx);
513 if ((fr->fr_flags2 & FR2_DROPSIG_CAUGHT) != 0)
515 if ((fr->fr_flags2 & FR2_KPROC) != 0)
516 p2->p_sigacts->ps_flag |= PS_NOCLDWAIT;
517 mtx_unlock(&p2->p_sigacts->ps_mtx);
521 if (fr->fr_flags & RFTSIGZMB)
522 p2->p_sigparent = RFTSIGNUM(fr->fr_flags);
523 else if (fr->fr_flags & RFLINUXTHPN)
524 p2->p_sigparent = SIGUSR1;
526 p2->p_sigparent = SIGCHLD;
528 if ((fr->fr_flags2 & FR2_KPROC) != 0) {
529 p2->p_flag |= P_SYSTEM | P_KPROC;
530 td2->td_pflags |= TDP_KTHREAD;
533 p2->p_textvp = p1->p_textvp;
534 p2->p_textdvp = p1->p_textdvp;
539 if (p1->p_flag2 & P2_INHERIT_PROTECTED) {
540 p2->p_flag |= P_PROTECTED;
541 p2->p_flag2 |= P2_INHERIT_PROTECTED;
545 * p_limit is copy-on-write. Bump its refcount.
549 thread_cow_get_proc(td2, p2);
551 pstats_fork(p1->p_stats, p2->p_stats);
557 * Bump references to the text vnode and directory, and copy
560 if (p2->p_textvp != NULL)
561 vrefact(p2->p_textvp);
562 if (p2->p_textdvp != NULL)
563 vrefact(p2->p_textdvp);
564 p2->p_binname = p1->p_binname == NULL ? NULL :
565 strdup(p1->p_binname, M_PARGS);
568 * Set up linkage for kernel based threading.
570 if ((fr->fr_flags & RFTHREAD) != 0) {
571 mtx_lock(&ppeers_lock);
572 p2->p_peers = p1->p_peers;
574 p2->p_leader = p1->p_leader;
575 mtx_unlock(&ppeers_lock);
576 PROC_LOCK(p1->p_leader);
577 if ((p1->p_leader->p_flag & P_WEXIT) != 0) {
578 PROC_UNLOCK(p1->p_leader);
580 * The task leader is exiting, so process p1 is
581 * going to be killed shortly. Since p1 obviously
582 * isn't dead yet, we know that the leader is either
583 * sending SIGKILL's to all the processes in this
584 * task or is sleeping waiting for all the peers to
585 * exit. We let p1 complete the fork, but we need
586 * to go ahead and kill the new process p2 since
587 * the task leader may not get a chance to send
588 * SIGKILL to it. We leave it on the list so that
589 * the task leader will wait for this new process
593 kern_psignal(p2, SIGKILL);
596 PROC_UNLOCK(p1->p_leader);
602 sx_xlock(&proctree_lock);
603 PGRP_LOCK(p1->p_pgrp);
608 * Preserve some more flags in subprocess. P_PROFIL has already
611 p2->p_flag |= p1->p_flag & P_SUGID;
612 td2->td_pflags |= (td->td_pflags & (TDP_ALTSTACK | TDP_SIGFASTBLOCK));
613 SESS_LOCK(p1->p_session);
614 if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
615 p2->p_flag |= P_CONTROLT;
616 SESS_UNLOCK(p1->p_session);
617 if (fr->fr_flags & RFPPWAIT)
618 p2->p_flag |= P_PPWAIT;
620 p2->p_pgrp = p1->p_pgrp;
621 LIST_INSERT_AFTER(p1, p2, p_pglist);
622 PGRP_UNLOCK(p1->p_pgrp);
623 LIST_INIT(&p2->p_children);
624 LIST_INIT(&p2->p_orphans);
626 callout_init_mtx(&p2->p_itcallout, &p2->p_mtx, 0);
627 TAILQ_INIT(&p2->p_kqtim_stop);
630 * This begins the section where we must prevent the parent
631 * from being swapped.
637 * Attach the new process to its parent.
639 * If RFNOWAIT is set, the newly created process becomes a child
640 * of init. This effectively disassociates the child from the
643 if ((fr->fr_flags & RFNOWAIT) != 0) {
647 p2->p_reaper = (p1->p_treeflag & P_TREE_REAPER) != 0 ?
652 p2->p_oppid = pptr->p_pid;
653 LIST_INSERT_HEAD(&pptr->p_children, p2, p_sibling);
654 LIST_INIT(&p2->p_reaplist);
655 LIST_INSERT_HEAD(&p2->p_reaper->p_reaplist, p2, p_reapsibling);
656 if (p2->p_reaper == p1 && p1 != initproc) {
657 p2->p_reapsubtree = p2->p_pid;
658 proc_id_set_cond(PROC_ID_REAP, p2->p_pid);
660 sx_xunlock(&proctree_lock);
662 /* Inform accounting that we have forked. */
663 p2->p_acflag = AFORK;
671 * Finish creating the child process. It will return via a different
672 * execution path later. (ie: directly into user mode)
674 vm_forkproc(td, p2, td2, vm2, fr->fr_flags);
676 if (fr->fr_flags == (RFFDG | RFPROC)) {
678 VM_CNT_ADD(v_forkpages, p2->p_vmspace->vm_dsize +
679 p2->p_vmspace->vm_ssize);
680 } else if (fr->fr_flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) {
681 VM_CNT_INC(v_vforks);
682 VM_CNT_ADD(v_vforkpages, p2->p_vmspace->vm_dsize +
683 p2->p_vmspace->vm_ssize);
684 } else if (p1 == &proc0) {
685 VM_CNT_INC(v_kthreads);
686 VM_CNT_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize +
687 p2->p_vmspace->vm_ssize);
689 VM_CNT_INC(v_rforks);
690 VM_CNT_ADD(v_rforkpages, p2->p_vmspace->vm_dsize +
691 p2->p_vmspace->vm_ssize);
695 * Associate the process descriptor with the process before anything
696 * can happen that might cause that process to need the descriptor.
697 * However, don't do this until after fork(2) can no longer fail.
699 if (fr->fr_flags & RFPROCDESC)
700 procdesc_new(p2, fr->fr_pd_flags);
703 * Both processes are set up, now check if any loadable modules want
704 * to adjust anything.
706 EVENTHANDLER_DIRECT_INVOKE(process_fork, p1, p2, fr->fr_flags);
709 * Set the child start time and mark the process as being complete.
713 microuptime(&p2->p_stats->p_start);
715 p2->p_state = PRS_NORMAL;
720 * Tell the DTrace fasttrap provider about the new process so that any
721 * tracepoints inherited from the parent can be removed. We have to do
722 * this only after p_state is PRS_NORMAL since the fasttrap module will
723 * use pfind() later on.
725 if ((fr->fr_flags & RFMEM) == 0 && dtrace_fasttrap_fork)
726 dtrace_fasttrap_fork(p1, p2);
728 if (fr->fr_flags & RFPPWAIT) {
729 td->td_pflags |= TDP_RFPPWAIT;
730 td->td_rfppwait_p = p2;
731 td->td_dbgflags |= TDB_VFORK;
736 * Tell any interested parties about the new process.
738 knote_fork(p1->p_klist, p2->p_pid);
741 * Now can be swapped.
745 SDT_PROBE3(proc, , , create, p2, p1, fr->fr_flags);
747 if (fr->fr_flags & RFPROCDESC) {
748 procdesc_finit(p2->p_procdesc, fp_procdesc);
749 fdrop(fp_procdesc, td);
753 * Speculative check for PTRACE_FORK. PTRACE_FORK is not
754 * synced with forks in progress so it is OK if we miss it
757 if ((p1->p_ptevents & PTRACE_FORK) != 0) {
758 sx_xlock(&proctree_lock);
762 * p1->p_ptevents & p1->p_pptr are protected by both
763 * process and proctree locks for modifications,
764 * so owning proctree_lock allows the race-free read.
766 if ((p1->p_ptevents & PTRACE_FORK) != 0) {
768 * Arrange for debugger to receive the fork event.
770 * We can report PL_FLAG_FORKED regardless of
771 * P_FOLLOWFORK settings, but it does not make a sense
774 td->td_dbgflags |= TDB_FORK;
775 td->td_dbg_forked = p2->p_pid;
776 td2->td_dbgflags |= TDB_STOPATFORK;
777 proc_set_traced(p2, true);
779 "do_fork: attaching to new child pid %d: oppid %d",
780 p2->p_pid, p2->p_oppid);
781 proc_reparent(p2, p1->p_pptr, false);
784 sx_xunlock(&proctree_lock);
787 racct_proc_fork_done(p2);
789 if ((fr->fr_flags & RFSTOPPED) == 0) {
790 if (fr->fr_pidp != NULL)
791 *fr->fr_pidp = p2->p_pid;
793 * If RFSTOPPED not requested, make child runnable and
798 sched_add(td2, SRQ_BORING);
805 ast_vfork(struct thread *td, int tda __unused)
809 MPASS(td->td_pflags & TDP_RFPPWAIT);
813 * Preserve synchronization semantics of vfork. If
814 * waiting for child to exec or exit, fork set
815 * P_PPWAIT on child, and there we sleep on our proc
818 * Do it after the ptracestop() above is finished, to
819 * not block our debugger until child execs or exits
820 * to finish vfork wait.
822 td->td_pflags &= ~TDP_RFPPWAIT;
823 p2 = td->td_rfppwait_p;
826 while (p2->p_flag & P_PPWAIT) {
828 if (thread_suspend_check_needed()) {
830 thread_suspend_check(0);
836 cv_timedwait(&p2->p_pwait, &p2->p_mtx, hz);
840 if (td->td_dbgflags & TDB_VFORK) {
842 if (p->p_ptevents & PTRACE_VFORK)
843 ptracestop(td, SIGTRAP, NULL);
844 td->td_dbgflags &= ~TDB_VFORK;
850 fork1(struct thread *td, struct fork_req *fr)
852 struct proc *p1, *newproc;
856 struct file *fp_procdesc;
858 vm_ooffset_t mem_charged;
859 int error, nprocs_new;
861 static struct timeval lastfail;
863 bool killsx_locked, singlethreaded;
865 flags = fr->fr_flags;
866 pages = fr->fr_pages;
868 if ((flags & RFSTOPPED) != 0)
869 MPASS(fr->fr_procp != NULL && fr->fr_pidp == NULL);
871 MPASS(fr->fr_procp == NULL);
873 /* Check for the undefined or unimplemented flags. */
874 if ((flags & ~(RFFLAGS | RFTSIGFLAGS(RFTSIGMASK))) != 0)
877 /* Signal value requires RFTSIGZMB. */
878 if ((flags & RFTSIGFLAGS(RFTSIGMASK)) != 0 && (flags & RFTSIGZMB) == 0)
881 /* Can't copy and clear. */
882 if ((flags & (RFFDG|RFCFDG)) == (RFFDG|RFCFDG))
885 /* Check the validity of the signal number. */
886 if ((flags & RFTSIGZMB) != 0 && (u_int)RFTSIGNUM(flags) > _SIG_MAXSIG)
889 if ((flags & RFPROCDESC) != 0) {
890 /* Can't not create a process yet get a process descriptor. */
891 if ((flags & RFPROC) == 0)
894 /* Must provide a place to put a procdesc if creating one. */
895 if (fr->fr_pd_fd == NULL)
898 /* Check if we are using supported flags. */
899 if ((fr->fr_pd_flags & ~PD_ALLOWED_AT_FORK) != 0)
906 * Here we don't create a new process, but we divorce
907 * certain parts of a process from itself.
909 if ((flags & RFPROC) == 0) {
910 if (fr->fr_procp != NULL)
911 *fr->fr_procp = NULL;
912 else if (fr->fr_pidp != NULL)
914 return (fork_norfproc(td, flags));
920 killsx_locked = false;
921 singlethreaded = false;
924 * Increment the nprocs resource before allocations occur.
925 * Although process entries are dynamically created, we still
926 * keep a global limit on the maximum number we will
927 * create. There are hard-limits as to the number of processes
928 * that can run, established by the KVA and memory usage for
931 * Don't allow a nonprivileged user to use the last ten
932 * processes; don't let root exceed the limit.
934 nprocs_new = atomic_fetchadd_int(&nprocs, 1) + 1;
935 if (nprocs_new >= maxproc - 10) {
936 if (priv_check_cred(td->td_ucred, PRIV_MAXPROC) != 0 ||
937 nprocs_new >= maxproc) {
939 sx_xlock(&allproc_lock);
940 if (ppsratecheck(&lastfail, &curfail, 1)) {
941 printf("maxproc limit exceeded by uid %u "
942 "(pid %d); see tuning(7) and "
944 td->td_ucred->cr_ruid, p1->p_pid);
946 sx_xunlock(&allproc_lock);
952 * If we are possibly multi-threaded, and there is a process
953 * sending a signal to our group right now, ensure that our
954 * other threads cannot be chosen for the signal queueing.
955 * Otherwise, this might delay signal action, and make the new
956 * child escape the signaling.
959 if (p1->p_numthreads > 1) {
960 if (sx_try_slock(&pg->pg_killsx) != 0) {
961 killsx_locked = true;
964 if (thread_single(p1, SINGLE_BOUNDARY)) {
970 singlethreaded = true;
975 * Atomically check for signals and block processes from sending
976 * a signal to our process group until the child is visible.
978 if (!killsx_locked && sx_slock_sig(&pg->pg_killsx) != 0) {
982 if (__predict_false(p1->p_pgrp != pg || sig_intr() != 0)) {
984 * Either the process was moved to other process
985 * group, or there is pending signal. sx_slock_sig()
986 * does not check for signals if not sleeping for the
989 sx_sunlock(&pg->pg_killsx);
990 killsx_locked = false;
994 killsx_locked = true;
998 * If required, create a process descriptor in the parent first; we
999 * will abandon it if something goes wrong. We don't finit() until
1002 if (flags & RFPROCDESC) {
1003 error = procdesc_falloc(td, &fp_procdesc, fr->fr_pd_fd,
1004 fr->fr_pd_flags, fr->fr_pd_fcaps);
1007 AUDIT_ARG_FD(*fr->fr_pd_fd);
1012 pages = kstack_pages;
1013 /* Allocate new proc. */
1014 newproc = uma_zalloc(proc_zone, M_WAITOK);
1015 td2 = FIRST_THREAD_IN_PROC(newproc);
1017 td2 = thread_alloc(pages);
1022 proc_linkup(newproc, td2);
1024 kmsan_thread_alloc(td2);
1025 if (td2->td_kstack == 0 || td2->td_kstack_pages != pages) {
1026 if (td2->td_kstack != 0)
1027 vm_thread_dispose(td2);
1028 if (!thread_alloc_stack(td2, pages)) {
1035 if ((flags & RFMEM) == 0) {
1036 vm2 = vmspace_fork(p1->p_vmspace, &mem_charged);
1041 if (!swap_reserve(mem_charged)) {
1043 * The swap reservation failed. The accounting
1044 * from the entries of the copied vm2 will be
1045 * subtracted in vmspace_free(), so force the
1046 * reservation there.
1048 swap_reserve_force(mem_charged);
1056 * XXX: This is ugly; when we copy resource usage, we need to bump
1057 * per-cred resource counters.
1059 newproc->p_ucred = crcowget(td->td_ucred);
1062 * Initialize resource accounting for the child process.
1064 error = racct_proc_fork(p1, newproc);
1071 mac_proc_init(newproc);
1073 newproc->p_klist = knlist_alloc(&newproc->p_mtx);
1074 STAILQ_INIT(&newproc->p_ktr);
1077 * Increment the count of procs running with this uid. Don't allow
1078 * a nonprivileged user to exceed their current limit.
1080 cred = td->td_ucred;
1081 if (!chgproccnt(cred->cr_ruidinfo, 1, lim_cur(td, RLIMIT_NPROC))) {
1082 if (priv_check_cred(cred, PRIV_PROC_LIMIT) != 0)
1084 chgproccnt(cred->cr_ruidinfo, 1, 0);
1087 do_fork(td, fr, newproc, td2, vm2, fp_procdesc);
1093 mac_proc_destroy(newproc);
1095 racct_proc_exit(newproc);
1097 proc_unset_cred(newproc);
1101 uma_zfree(proc_zone, newproc);
1102 if ((flags & RFPROCDESC) != 0 && fp_procdesc != NULL) {
1103 fdclose(td, fp_procdesc, *fr->fr_pd_fd);
1104 fdrop(fp_procdesc, td);
1106 atomic_add_int(&nprocs, -1);
1109 sx_sunlock(&pg->pg_killsx);
1110 if (singlethreaded) {
1112 thread_single_end(p1, SINGLE_BOUNDARY);
1116 pause("fork", hz / 2);
1121 * Handle the return of a child process from fork1(). This function
1122 * is called from the MD fork_trampoline() entry point.
1125 fork_exit(void (*callout)(void *, struct trapframe *), void *arg,
1126 struct trapframe *frame)
1132 kmsan_mark(frame, sizeof(*frame), KMSAN_STATE_INITED);
1136 KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
1138 CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
1139 td, td_get_sched(td), p->p_pid, td->td_name);
1141 sched_fork_exit(td);
1144 * Processes normally resume in mi_switch() after being
1145 * cpu_switch()'ed to, but when children start up they arrive here
1146 * instead, so we must do much the same things as mi_switch() would.
1148 if ((dtd = PCPU_GET(deadthread))) {
1149 PCPU_SET(deadthread, NULL);
1155 * cpu_fork_kthread_handler intercepts this function call to
1156 * have this call a non-return function to stay in kernel mode.
1157 * initproc has its own fork handler, but it does return.
1159 KASSERT(callout != NULL, ("NULL callout in fork_exit"));
1160 callout(arg, frame);
1163 * Check if a kernel thread misbehaved and returned from its main
1166 if (p->p_flag & P_KPROC) {
1167 printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n",
1168 td->td_name, p->p_pid);
1171 mtx_assert(&Giant, MA_NOTOWNED);
1174 * Now going to return to userland.
1177 if (p->p_sysent->sv_schedtail != NULL)
1178 (p->p_sysent->sv_schedtail)(td);
1184 * Simplified back end of syscall(), used when returning from fork()
1185 * directly into user mode. This function is passed in to fork_exit()
1186 * as the first parameter and is called when returning to a new
1190 fork_return(struct thread *td, struct trapframe *frame)
1195 if (td->td_dbgflags & TDB_STOPATFORK) {
1197 if ((p->p_flag & P_TRACED) != 0) {
1199 * Inform the debugger if one is still present.
1201 td->td_dbgflags |= TDB_CHILD | TDB_SCX | TDB_FSTP;
1202 ptracestop(td, SIGSTOP, NULL);
1203 td->td_dbgflags &= ~(TDB_CHILD | TDB_SCX);
1206 * ... otherwise clear the request.
1208 td->td_dbgflags &= ~TDB_STOPATFORK;
1211 } else if (p->p_flag & P_TRACED) {
1213 * This is the start of a new thread in a traced
1214 * process. Report a system call exit event.
1217 td->td_dbgflags |= TDB_SCX;
1218 if ((p->p_ptevents & PTRACE_SCX) != 0 ||
1219 (td->td_dbgflags & TDB_BORN) != 0)
1220 ptracestop(td, SIGTRAP, NULL);
1221 td->td_dbgflags &= ~(TDB_SCX | TDB_BORN);
1226 * If the prison was killed mid-fork, die along with it.
1228 if (!prison_isalive(td->td_ucred->cr_prison))
1229 exit1(td, 0, SIGKILL);
1232 if (KTRPOINT(td, KTR_SYSRET))
1233 ktrsysret(td->td_sa.code, 0, 0);
1238 fork_init(void *arg __unused)
1240 ast_register(TDA_VFORK, ASTR_ASTF_REQUIRED | ASTR_TDP, TDP_RFPPWAIT,
1243 SYSINIT(fork, SI_SUB_INTRINSIC, SI_ORDER_ANY, fork_init, NULL);