2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/mutex.h>
38 #include <sys/resourcevar.h>
40 #include <sys/sysctl.h>
41 #include <sys/sched.h>
42 #include <sys/sleepqueue.h>
43 #include <sys/turnstile.h>
47 #include <security/audit/audit.h>
50 #include <vm/vm_extern.h>
55 * KSEGRP related storage.
57 static uma_zone_t ksegrp_zone;
60 * thread related storage.
63 static uma_zone_t thread_zone;
66 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
67 static int thread_debug = 0;
68 SYSCTL_INT(_kern_threads, OID_AUTO, debug, CTLFLAG_RW,
69 &thread_debug, 0, "thread debug");
71 int max_threads_per_proc = 1500;
72 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
73 &max_threads_per_proc, 0, "Limit on threads per proc");
75 int max_groups_per_proc = 1500;
76 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
77 &max_groups_per_proc, 0, "Limit on thread groups per proc");
80 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
81 &max_threads_hits, 0, "");
87 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
89 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
91 struct mtx kse_zombie_lock;
92 MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
96 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
102 if (virtual_cpu == 0)
105 new_val = virtual_cpu;
106 error = sysctl_handle_int(oidp, &new_val, 0, req);
107 if (error != 0 || req->newptr == NULL)
111 virtual_cpu = new_val;
116 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
117 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
118 "debug virtual cpus");
122 static struct unrhdr *tid_unrhdr;
125 * Prepare a thread for use.
128 thread_ctor(void *mem, int size, void *arg, int flags)
132 td = (struct thread *)mem;
133 td->td_state = TDS_INACTIVE;
134 td->td_oncpu = NOCPU;
136 td->td_tid = alloc_unr(tid_unrhdr);
139 * Note that td_critnest begins life as 1 because the thread is not
140 * running and is thereby implicitly waiting to be on the receiving
141 * end of a context switch. A context switch must occur inside a
142 * critical section, and in fact, includes hand-off of the sched_lock.
143 * After a context switch to a newly created thread, it will release
144 * sched_lock for the first time, and its td_critnest will hit 0 for
145 * the first time. This happens on the far end of a context switch,
146 * and when it context switches away from itself, it will in fact go
147 * back into a critical section, and hand off the sched lock to the
153 audit_thread_alloc(td);
155 umtx_thread_alloc(td);
160 * Reclaim a thread after use.
163 thread_dtor(void *mem, int size, void *arg)
167 td = (struct thread *)mem;
170 /* Verify that this thread is in a safe state to free. */
171 switch (td->td_state) {
177 * We must never unlink a thread that is in one of
178 * these states, because it is currently active.
180 panic("bad state for thread unlinking");
185 panic("bad thread state");
190 audit_thread_free(td);
192 free_unr(tid_unrhdr, td->td_tid);
197 * Initialize type-stable parts of a thread (when newly created).
200 thread_init(void *mem, int size, int flags)
204 td = (struct thread *)mem;
206 vm_thread_new(td, 0);
207 cpu_thread_setup(td);
208 td->td_sleepqueue = sleepq_alloc();
209 td->td_turnstile = turnstile_alloc();
210 td->td_sched = (struct td_sched *)&td[1];
212 umtx_thread_init(td);
217 * Tear down type-stable parts of a thread (just before being discarded).
220 thread_fini(void *mem, int size)
224 td = (struct thread *)mem;
225 turnstile_free(td->td_turnstile);
226 sleepq_free(td->td_sleepqueue);
227 umtx_thread_fini(td);
228 vm_thread_dispose(td);
233 * Initialize type-stable parts of a ksegrp (when newly created).
236 ksegrp_ctor(void *mem, int size, void *arg, int flags)
240 kg = (struct ksegrp *)mem;
242 kg->kg_sched = (struct kg_sched *)&kg[1];
247 ksegrp_link(struct ksegrp *kg, struct proc *p)
250 TAILQ_INIT(&kg->kg_threads);
251 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
252 TAILQ_INIT(&kg->kg_upcalls); /* all upcall structure in ksegrp */
255 * the following counters are in the -zero- section
256 * and may not need clearing
258 kg->kg_numthreads = 0;
259 kg->kg_numupcalls = 0;
260 /* link it in now that it's consistent */
262 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
270 ksegrp_unlink(struct ksegrp *kg)
274 mtx_assert(&sched_lock, MA_OWNED);
275 KASSERT((kg->kg_numthreads == 0), ("ksegrp_unlink: residual threads"));
276 KASSERT((kg->kg_numupcalls == 0), ("ksegrp_unlink: residual upcalls"));
279 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
285 * For a newly created process,
286 * link up all the structures and its initial threads etc.
288 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
289 * proc_dtor() (should go away)
294 proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
296 proc_linkup(struct proc *p, struct thread *td)
301 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
303 TAILQ_INIT(&p->p_threads); /* all threads in proc */
304 TAILQ_INIT(&p->p_suspended); /* Threads suspended */
305 sigqueue_init(&p->p_sigqueue, p);
306 p->p_ksi = ksiginfo_alloc(1);
307 if (p->p_ksi != NULL) {
308 /* XXX p_ksi may be null if ksiginfo zone is not ready */
309 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
311 LIST_INIT(&p->p_mqnotifier);
326 * Initialize global thread allocation resources.
332 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
333 tid_unrhdr = new_unrhdr(PID_MAX + 1, INT_MAX, &tid_lock);
335 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
336 thread_ctor, thread_dtor, thread_init, thread_fini,
339 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
340 ksegrp_ctor, NULL, NULL, NULL,
342 kseinit(); /* set up kse specific stuff e.g. upcall zone*/
347 * Stash an embarasingly extra thread into the zombie thread queue.
350 thread_stash(struct thread *td)
352 mtx_lock_spin(&kse_zombie_lock);
353 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
354 mtx_unlock_spin(&kse_zombie_lock);
359 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
362 ksegrp_stash(struct ksegrp *kg)
364 mtx_lock_spin(&kse_zombie_lock);
365 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
366 mtx_unlock_spin(&kse_zombie_lock);
371 * Reap zombie kse resource.
376 struct thread *td_first, *td_next;
378 struct ksegrp *kg_first, * kg_next;
382 * Don't even bother to lock if none at this instant,
383 * we really don't care about the next instant..
386 if ((!TAILQ_EMPTY(&zombie_threads))
387 || (!TAILQ_EMPTY(&zombie_ksegrps))) {
389 if (!TAILQ_EMPTY(&zombie_threads)) {
391 mtx_lock_spin(&kse_zombie_lock);
392 td_first = TAILQ_FIRST(&zombie_threads);
394 kg_first = TAILQ_FIRST(&zombie_ksegrps);
397 TAILQ_INIT(&zombie_threads);
400 TAILQ_INIT(&zombie_ksegrps);
402 mtx_unlock_spin(&kse_zombie_lock);
404 td_next = TAILQ_NEXT(td_first, td_runq);
405 if (td_first->td_ucred)
406 crfree(td_first->td_ucred);
407 thread_free(td_first);
412 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
413 ksegrp_free(kg_first);
417 * there will always be a thread on the list if one of these
432 return (uma_zalloc(ksegrp_zone, M_WAITOK));
443 thread_reap(); /* check if any zombies to get */
444 return (uma_zalloc(thread_zone, M_WAITOK));
449 * Deallocate a ksegrp.
452 ksegrp_free(struct ksegrp *td)
454 uma_zfree(ksegrp_zone, td);
459 * Deallocate a thread.
462 thread_free(struct thread *td)
465 cpu_thread_clean(td);
466 uma_zfree(thread_zone, td);
470 * Discard the current thread and exit from its context.
471 * Always called with scheduler locked.
473 * Because we can't free a thread while we're operating under its context,
474 * push the current thread into our CPU's deadthread holder. This means
475 * we needn't worry about someone else grabbing our context before we
476 * do a cpu_throw(). This may not be needed now as we are under schedlock.
477 * Maybe we can just do a thread_stash() as thr_exit1 does.
480 * libthr expects its thread exit to return for the last
481 * thread, meaning that the program is back to non-threaded
482 * mode I guess. Because we do this (cpu_throw) unconditionally
483 * here, they have their own version of it. (thr_exit1())
484 * that doesn't do it all if this was the last thread.
485 * It is also called from thread_suspend_check().
486 * Of course in the end, they end up coming here through exit1
487 * anyhow.. After fixing 'thr' to play by the rules we should be able
488 * to merge these two functions together.
495 * thread_user_enter()
498 * thread_suspend_check()
503 uint64_t new_switchtime;
516 mtx_assert(&sched_lock, MA_OWNED);
517 mtx_assert(&Giant, MA_NOTOWNED);
518 PROC_LOCK_ASSERT(p, MA_OWNED);
519 KASSERT(p != NULL, ("thread exiting without a process"));
521 KASSERT(kg != NULL, ("thread exiting without a kse group"));
523 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
524 (long)p->p_pid, p->p_comm);
525 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
528 AUDIT_SYSCALL_EXIT(0, td);
532 if (td->td_standin != NULL) {
534 * Note that we don't need to free the cred here as it
535 * is done in thread_reap().
537 thread_stash(td->td_standin);
538 td->td_standin = NULL;
542 umtx_thread_exit(td);
545 * drop FPU & debug register state storage, or any other
546 * architecture specific resources that
547 * would not be on a new untouched process.
549 cpu_thread_exit(td); /* XXXSMP */
553 * The thread is exiting. scheduler can release its stuff
554 * and collect stats etc.
555 * XXX this is not very right, since PROC_UNLOCK may still
556 * need scheduler stuff.
558 sched_thread_exit(td);
561 /* Do the same timestamp bookkeeping that mi_switch() would do. */
562 new_switchtime = cpu_ticks();
563 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
564 p->p_rux.rux_uticks += td->td_uticks;
565 p->p_rux.rux_sticks += td->td_sticks;
566 p->p_rux.rux_iticks += td->td_iticks;
567 PCPU_SET(switchtime, new_switchtime);
568 PCPU_SET(switchticks, ticks);
571 /* Add our usage into the usage of all our children. */
572 if (p->p_numthreads == 1)
573 ruadd(p->p_ru, &p->p_rux, &p->p_stats->p_cru, &p->p_crux);
576 * The last thread is left attached to the process
577 * So that the whole bundle gets recycled. Skip
578 * all this stuff if we never had threads.
579 * EXIT clears all sign of other threads when
580 * it goes to single threading, so the last thread always
581 * takes the short path.
583 if (p->p_flag & P_HADTHREADS) {
584 if (p->p_numthreads > 1) {
588 /* XXX first arg not used in 4BSD or ULE */
589 sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
595 * The test below is NOT true if we are the
596 * sole exiting thread. P_STOPPED_SNGL is unset
597 * in exit1() after it is the only survivor.
599 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
600 if (p->p_numthreads == p->p_suspcount) {
601 thread_unsuspend_one(p->p_singlethread);
607 * Because each upcall structure has an owner thread,
608 * owner thread exits only when process is in exiting
609 * state, so upcall to userland is no longer needed,
610 * deleting upcall structure is safe here.
611 * So when all threads in a group is exited, all upcalls
612 * in the group should be automatically freed.
613 * XXXKSE This is a KSE thing and should be exported
619 * If the thread we unlinked above was the last one,
620 * then this ksegrp should go away too.
622 if (kg->kg_numthreads == 0) {
624 * let the scheduler know about this in case
625 * it needs to recover stats or resources.
626 * Theoretically we could let
627 * sched_exit_ksegrp() do the equivalent of
628 * setting the concurrency to 0
629 * but don't do it yet to avoid changing
630 * the existing scheduler code until we
632 * We supply a random other ksegrp
633 * as the recipient of any built up
634 * cpu usage etc. (If the scheduler wants it).
636 * This is probably not fair so think of
639 sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
640 sched_set_concurrency(kg, 0); /* XXX TEMP */
647 td->td_ksegrp = NULL;
649 PCPU_SET(deadthread, td);
652 * The last thread is exiting.. but not through exit()
654 * Theoretically this can't happen
655 * exit1() - clears threading flags before coming here
656 * kse_exit() - treats last thread specially
657 * thr_exit() - treats last thread specially
659 * thread_user_enter() - only if more exist
660 * thread_userret() - only if more exist
662 * thread_suspend_check() - only if more exist
664 panic ("thread_exit: Last thread exiting on its own");
668 * non threaded process comes here.
669 * This includes an EX threaded process that is coming
670 * here via exit1(). (exit1 dethreads the proc first).
674 td->td_state = TDS_INACTIVE;
675 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
676 cpu_throw(td, choosethread());
677 panic("I'm a teapot!");
682 * Do any thread specific cleanups that may be needed in wait()
683 * called with Giant, proc and schedlock not held.
686 thread_wait(struct proc *p)
690 mtx_assert(&Giant, MA_NOTOWNED);
691 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
693 KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
695 FOREACH_THREAD_IN_PROC(p, td) {
697 if (td->td_standin != NULL) {
698 if (td->td_standin->td_ucred != NULL) {
699 crfree(td->td_standin->td_ucred);
700 td->td_standin->td_ucred = NULL;
702 thread_free(td->td_standin);
703 td->td_standin = NULL;
706 cpu_thread_clean(td);
707 crfree(td->td_ucred);
709 thread_reap(); /* check for zombie threads etc. */
713 * Link a thread to a process.
714 * set up anything that needs to be initialized for it to
715 * be used by the process.
717 * Note that we do not link to the proc's ucred here.
718 * The thread is linked as if running but no KSE assigned.
722 * thread_schedule_upcall()
728 thread_link(struct thread *td, struct ksegrp *kg)
730 thread_link(struct thread *td, struct proc *p)
740 td->td_state = TDS_INACTIVE;
750 LIST_INIT(&td->td_contested);
751 sigqueue_init(&td->td_sigqueue, p);
752 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
753 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
755 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
764 * Convert a process with one thread to an unthreaded process.
766 * thread_single(exit) (called from execve and exit)
767 * kse_exit() XXX may need cleaning up wrt KSE stuff
770 thread_unthread(struct thread *td)
772 struct proc *p = td->td_proc;
774 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
777 p->p_flag &= ~(P_SA|P_HADTHREADS);
778 td->td_mailbox = NULL;
779 td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
780 if (td->td_standin != NULL) {
781 thread_stash(td->td_standin);
782 td->td_standin = NULL;
784 sched_set_concurrency(td->td_ksegrp, 1);
786 p->p_flag &= ~P_HADTHREADS;
795 thread_unlink(struct thread *td)
797 struct proc *p = td->td_proc;
799 struct ksegrp *kg = td->td_ksegrp;
802 mtx_assert(&sched_lock, MA_OWNED);
803 TAILQ_REMOVE(&p->p_threads, td, td_plist);
806 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
809 /* could clear a few other things here */
811 /* Must NOT clear links to proc and ksegrp! */
813 /* Must NOT clear links to proc! */
818 * Enforce single-threading.
820 * Returns 1 if the caller must abort (another thread is waiting to
821 * exit the process or similar). Process is locked!
822 * Returns 0 when you are successfully the only thread running.
823 * A process has successfully single threaded in the suspend mode when
824 * There are no threads in user mode. Threads in the kernel must be
825 * allowed to continue until they get to the user boundary. They may even
826 * copy out their return values and data before suspending. They may however be
827 * accelerated in reaching the user boundary as we will wake up
828 * any sleeping threads that are interruptable. (PCATCH).
831 thread_single(int mode)
840 mtx_assert(&Giant, MA_NOTOWNED);
841 PROC_LOCK_ASSERT(p, MA_OWNED);
842 KASSERT((td != NULL), ("curthread is NULL"));
844 if ((p->p_flag & P_HADTHREADS) == 0)
847 /* Is someone already single threading? */
848 if (p->p_singlethread != NULL && p->p_singlethread != td)
851 if (mode == SINGLE_EXIT) {
852 p->p_flag |= P_SINGLE_EXIT;
853 p->p_flag &= ~P_SINGLE_BOUNDARY;
855 p->p_flag &= ~P_SINGLE_EXIT;
856 if (mode == SINGLE_BOUNDARY)
857 p->p_flag |= P_SINGLE_BOUNDARY;
859 p->p_flag &= ~P_SINGLE_BOUNDARY;
861 p->p_flag |= P_STOPPED_SINGLE;
862 mtx_lock_spin(&sched_lock);
863 p->p_singlethread = td;
864 if (mode == SINGLE_EXIT)
865 remaining = p->p_numthreads;
866 else if (mode == SINGLE_BOUNDARY)
867 remaining = p->p_numthreads - p->p_boundary_count;
869 remaining = p->p_numthreads - p->p_suspcount;
870 while (remaining != 1) {
871 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
873 FOREACH_THREAD_IN_PROC(p, td2) {
876 td2->td_flags |= TDF_ASTPENDING;
877 if (TD_IS_INHIBITED(td2)) {
880 if (td->td_flags & TDF_DBSUSPEND)
881 td->td_flags &= ~TDF_DBSUSPEND;
882 if (TD_IS_SUSPENDED(td2))
883 thread_unsuspend_one(td2);
884 if (TD_ON_SLEEPQ(td2) &&
885 (td2->td_flags & TDF_SINTR))
886 sleepq_abort(td2, EINTR);
888 case SINGLE_BOUNDARY:
889 if (TD_IS_SUSPENDED(td2) &&
890 !(td2->td_flags & TDF_BOUNDARY))
891 thread_unsuspend_one(td2);
892 if (TD_ON_SLEEPQ(td2) &&
893 (td2->td_flags & TDF_SINTR))
894 sleepq_abort(td2, ERESTART);
897 if (TD_IS_SUSPENDED(td2))
900 * maybe other inhibitted states too?
902 if ((td2->td_flags & TDF_SINTR) &&
903 (td2->td_inhibitors &
904 (TDI_SLEEPING | TDI_SWAPPED)))
905 thread_suspend_one(td2);
910 else if (TD_IS_RUNNING(td2) && td != td2) {
915 if (mode == SINGLE_EXIT)
916 remaining = p->p_numthreads;
917 else if (mode == SINGLE_BOUNDARY)
918 remaining = p->p_numthreads - p->p_boundary_count;
920 remaining = p->p_numthreads - p->p_suspcount;
923 * Maybe we suspended some threads.. was it enough?
930 * Wake us up when everyone else has suspended.
931 * In the mean time we suspend as well.
934 thread_suspend_one(td);
936 mi_switch(SW_VOL, NULL);
937 mtx_unlock_spin(&sched_lock);
939 mtx_lock_spin(&sched_lock);
940 if (mode == SINGLE_EXIT)
941 remaining = p->p_numthreads;
942 else if (mode == SINGLE_BOUNDARY)
943 remaining = p->p_numthreads - p->p_boundary_count;
945 remaining = p->p_numthreads - p->p_suspcount;
947 if (mode == SINGLE_EXIT) {
949 * We have gotten rid of all the other threads and we
950 * are about to either exit or exec. In either case,
951 * we try our utmost to revert to being a non-threaded
954 p->p_singlethread = NULL;
955 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
958 mtx_unlock_spin(&sched_lock);
963 * Called in from locations that can safely check to see
964 * whether we have to suspend or at least throttle for a
965 * single-thread event (e.g. fork).
967 * Such locations include userret().
968 * If the "return_instead" argument is non zero, the thread must be able to
969 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
971 * The 'return_instead' argument tells the function if it may do a
972 * thread_exit() or suspend, or whether the caller must abort and back
975 * If the thread that set the single_threading request has set the
976 * P_SINGLE_EXIT bit in the process flags then this call will never return
977 * if 'return_instead' is false, but will exit.
979 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
980 *---------------+--------------------+---------------------
981 * 0 | returns 0 | returns 0 or 1
982 * | when ST ends | immediatly
983 *---------------+--------------------+---------------------
984 * 1 | thread exits | returns 1
986 * 0 = thread_exit() or suspension ok,
987 * other = return error instead of stopping the thread.
989 * While a full suspension is under effect, even a single threading
990 * thread would be suspended if it made this call (but it shouldn't).
991 * This call should only be made from places where
992 * thread_exit() would be safe as that may be the outcome unless
993 * return_instead is set.
996 thread_suspend_check(int return_instead)
1003 mtx_assert(&Giant, MA_NOTOWNED);
1004 PROC_LOCK_ASSERT(p, MA_OWNED);
1005 while (P_SHOULDSTOP(p) ||
1006 ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
1007 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1008 KASSERT(p->p_singlethread != NULL,
1009 ("singlethread not set"));
1011 * The only suspension in action is a
1012 * single-threading. Single threader need not stop.
1013 * XXX Should be safe to access unlocked
1014 * as it can only be set to be true by us.
1016 if (p->p_singlethread == td)
1017 return (0); /* Exempt from stopping. */
1019 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
1022 /* Should we goto user boundary if we didn't come from there? */
1023 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
1024 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
1027 /* If thread will exit, flush its pending signals */
1028 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
1029 sigqueue_flush(&td->td_sigqueue);
1031 mtx_lock_spin(&sched_lock);
1034 * If the process is waiting for us to exit,
1035 * this thread should just suicide.
1036 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1038 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
1042 * When a thread suspends, it just
1043 * moves to the processes's suspend queue
1046 thread_suspend_one(td);
1047 if (return_instead == 0) {
1048 p->p_boundary_count++;
1049 td->td_flags |= TDF_BOUNDARY;
1051 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1052 if (p->p_numthreads == p->p_suspcount)
1053 thread_unsuspend_one(p->p_singlethread);
1056 mi_switch(SW_INVOL, NULL);
1057 if (return_instead == 0) {
1058 p->p_boundary_count--;
1059 td->td_flags &= ~TDF_BOUNDARY;
1061 mtx_unlock_spin(&sched_lock);
1068 thread_suspend_one(struct thread *td)
1070 struct proc *p = td->td_proc;
1072 mtx_assert(&sched_lock, MA_OWNED);
1073 PROC_LOCK_ASSERT(p, MA_OWNED);
1074 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
1076 TD_SET_SUSPENDED(td);
1077 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
1081 thread_unsuspend_one(struct thread *td)
1083 struct proc *p = td->td_proc;
1085 mtx_assert(&sched_lock, MA_OWNED);
1086 PROC_LOCK_ASSERT(p, MA_OWNED);
1087 TAILQ_REMOVE(&p->p_suspended, td, td_runq);
1088 TD_CLR_SUSPENDED(td);
1094 * Allow all threads blocked by single threading to continue running.
1097 thread_unsuspend(struct proc *p)
1101 mtx_assert(&sched_lock, MA_OWNED);
1102 PROC_LOCK_ASSERT(p, MA_OWNED);
1103 if (!P_SHOULDSTOP(p)) {
1104 while ((td = TAILQ_FIRST(&p->p_suspended))) {
1105 thread_unsuspend_one(td);
1107 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
1108 (p->p_numthreads == p->p_suspcount)) {
1110 * Stopping everything also did the job for the single
1111 * threading request. Now we've downgraded to single-threaded,
1114 thread_unsuspend_one(p->p_singlethread);
1119 * End the single threading mode..
1122 thread_single_end(void)
1129 PROC_LOCK_ASSERT(p, MA_OWNED);
1130 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
1131 mtx_lock_spin(&sched_lock);
1132 p->p_singlethread = NULL;
1134 * If there are other threads they mey now run,
1135 * unless of course there is a blanket 'stop order'
1136 * on the process. The single threader must be allowed
1137 * to continue however as this is a bad place to stop.
1139 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1140 while ((td = TAILQ_FIRST(&p->p_suspended))) {
1141 thread_unsuspend_one(td);
1144 mtx_unlock_spin(&sched_lock);
1148 thread_find(struct proc *p, lwpid_t tid)
1152 PROC_LOCK_ASSERT(p, MA_OWNED);
1153 mtx_lock_spin(&sched_lock);
1154 FOREACH_THREAD_IN_PROC(p, td) {
1155 if (td->td_tid == tid)
1158 mtx_unlock_spin(&sched_lock);