2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/mutex.h>
38 #include <sys/resourcevar.h>
40 #include <sys/sysctl.h>
41 #include <sys/sched.h>
42 #include <sys/sleepqueue.h>
43 #include <sys/selinfo.h>
44 #include <sys/turnstile.h>
47 #include <sys/cpuset.h>
49 #include <security/audit/audit.h>
52 #include <vm/vm_extern.h>
54 #include <sys/eventhandler.h>
57 * thread related storage.
59 static uma_zone_t thread_zone;
61 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
63 int max_threads_per_proc = 1500;
64 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
65 &max_threads_per_proc, 0, "Limit on threads per proc");
68 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
69 &max_threads_hits, 0, "");
75 TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
76 static struct mtx zombie_lock;
77 MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
79 static void thread_zombie(struct thread *);
83 sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
92 new_val = virtual_cpu;
93 error = sysctl_handle_int(oidp, &new_val, 0, req);
94 if (error != 0 || req->newptr == NULL)
98 virtual_cpu = new_val;
103 SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
104 0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
105 "debug virtual cpus");
109 static struct unrhdr *tid_unrhdr;
112 * Prepare a thread for use.
115 thread_ctor(void *mem, int size, void *arg, int flags)
119 td = (struct thread *)mem;
120 td->td_state = TDS_INACTIVE;
121 td->td_oncpu = NOCPU;
123 td->td_tid = alloc_unr(tid_unrhdr);
127 * Note that td_critnest begins life as 1 because the thread is not
128 * running and is thereby implicitly waiting to be on the receiving
129 * end of a context switch.
132 EVENTHANDLER_INVOKE(thread_ctor, td);
134 audit_thread_alloc(td);
136 umtx_thread_alloc(td);
141 * Reclaim a thread after use.
144 thread_dtor(void *mem, int size, void *arg)
148 td = (struct thread *)mem;
151 /* Verify that this thread is in a safe state to free. */
152 switch (td->td_state) {
158 * We must never unlink a thread that is in one of
159 * these states, because it is currently active.
161 panic("bad state for thread unlinking");
166 panic("bad thread state");
171 audit_thread_free(td);
173 EVENTHANDLER_INVOKE(thread_dtor, td);
174 free_unr(tid_unrhdr, td->td_tid);
179 * Initialize type-stable parts of a thread (when newly created).
182 thread_init(void *mem, int size, int flags)
186 td = (struct thread *)mem;
188 td->td_sleepqueue = sleepq_alloc();
189 td->td_turnstile = turnstile_alloc();
190 EVENTHANDLER_INVOKE(thread_init, td);
191 td->td_sched = (struct td_sched *)&td[1];
193 umtx_thread_init(td);
199 * Tear down type-stable parts of a thread (just before being discarded).
202 thread_fini(void *mem, int size)
206 td = (struct thread *)mem;
207 EVENTHANDLER_INVOKE(thread_fini, td);
208 turnstile_free(td->td_turnstile);
209 sleepq_free(td->td_sleepqueue);
210 umtx_thread_fini(td);
215 * For a newly created process,
216 * link up all the structures and its initial threads etc.
218 * {arch}/{arch}/machdep.c ia64_init(), init386() etc.
219 * proc_dtor() (should go away)
223 proc_linkup0(struct proc *p, struct thread *td)
225 TAILQ_INIT(&p->p_threads); /* all threads in proc */
230 proc_linkup(struct proc *p, struct thread *td)
234 TAILQ_INIT(&p->p_upcalls); /* upcall list */
236 sigqueue_init(&p->p_sigqueue, p);
237 p->p_ksi = ksiginfo_alloc(1);
238 if (p->p_ksi != NULL) {
239 /* XXX p_ksi may be null if ksiginfo zone is not ready */
240 p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
242 LIST_INIT(&p->p_mqnotifier);
248 * Initialize global thread allocation resources.
254 mtx_init(&tid_lock, "TID lock", NULL, MTX_DEF);
255 /* leave one number for thread0 */
256 tid_unrhdr = new_unrhdr(PID_MAX + 2, INT_MAX, &tid_lock);
258 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
259 thread_ctor, thread_dtor, thread_init, thread_fini,
262 kseinit(); /* set up kse specific stuff e.g. upcall zone*/
267 * Place an unused thread on the zombie list.
268 * Use the slpq as that must be unused by now.
271 thread_zombie(struct thread *td)
273 mtx_lock_spin(&zombie_lock);
274 TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
275 mtx_unlock_spin(&zombie_lock);
279 * Release a thread that has exited after cpu_throw().
282 thread_stash(struct thread *td)
284 atomic_subtract_rel_int(&td->td_proc->p_exitthreads, 1);
289 * Reap zombie kse resource.
294 struct thread *td_first, *td_next;
297 * Don't even bother to lock if none at this instant,
298 * we really don't care about the next instant..
300 if (!TAILQ_EMPTY(&zombie_threads)) {
301 mtx_lock_spin(&zombie_lock);
302 td_first = TAILQ_FIRST(&zombie_threads);
304 TAILQ_INIT(&zombie_threads);
305 mtx_unlock_spin(&zombie_lock);
307 td_next = TAILQ_NEXT(td_first, td_slpq);
308 if (td_first->td_ucred)
309 crfree(td_first->td_ucred);
310 thread_free(td_first);
327 thread_reap(); /* check if any zombies to get */
329 td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
330 KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
331 if (!vm_thread_new(td, 0)) {
332 uma_zfree(thread_zone, td);
335 cpu_thread_alloc(td);
341 * Deallocate a thread.
344 thread_free(struct thread *td)
347 if (td->td_cpuset != NULL)
348 cpuset_rel(td->td_cpuset);
350 cpuset_rel(td->td_cpuset);
352 td->td_cpuset = NULL;
354 if (td->td_altkstack != 0)
355 vm_thread_dispose_altkstack(td);
356 if (td->td_kstack != 0)
357 vm_thread_dispose(td);
358 uma_zfree(thread_zone, td);
362 * Discard the current thread and exit from its context.
363 * Always called with scheduler locked.
365 * Because we can't free a thread while we're operating under its context,
366 * push the current thread into our CPU's deadthread holder. This means
367 * we needn't worry about someone else grabbing our context before we
368 * do a cpu_throw(). This may not be needed now as we are under schedlock.
369 * Maybe we can just do a thread_stash() as thr_exit1 does.
372 * libthr expects its thread exit to return for the last
373 * thread, meaning that the program is back to non-threaded
374 * mode I guess. Because we do this (cpu_throw) unconditionally
375 * here, they have their own version of it. (thr_exit1())
376 * that doesn't do it all if this was the last thread.
377 * It is also called from thread_suspend_check().
378 * Of course in the end, they end up coming here through exit1
379 * anyhow.. After fixing 'thr' to play by the rules we should be able
380 * to merge these two functions together.
387 * thread_user_enter()
390 * thread_suspend_check()
395 uint64_t new_switchtime;
403 PROC_SLOCK_ASSERT(p, MA_OWNED);
404 mtx_assert(&Giant, MA_NOTOWNED);
406 PROC_LOCK_ASSERT(p, MA_OWNED);
407 KASSERT(p != NULL, ("thread exiting without a process"));
408 CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
409 (long)p->p_pid, td->td_name);
410 KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
413 AUDIT_SYSCALL_EXIT(0, td);
417 if (td->td_standin != NULL) {
419 * Note that we don't need to free the cred here as it
420 * is done in thread_reap().
422 thread_zombie(td->td_standin);
423 td->td_standin = NULL;
427 umtx_thread_exit(td);
430 * drop FPU & debug register state storage, or any other
431 * architecture specific resources that
432 * would not be on a new untouched process.
434 cpu_thread_exit(td); /* XXXSMP */
436 /* Do the same timestamp bookkeeping that mi_switch() would do. */
437 new_switchtime = cpu_ticks();
438 p->p_rux.rux_runtime += (new_switchtime - PCPU_GET(switchtime));
439 PCPU_SET(switchtime, new_switchtime);
440 PCPU_SET(switchticks, ticks);
441 PCPU_INC(cnt.v_swtch);
442 /* Save our resource usage in our process. */
443 td->td_ru.ru_nvcsw++;
444 rucollect(&p->p_ru, &td->td_ru);
446 * The last thread is left attached to the process
447 * So that the whole bundle gets recycled. Skip
448 * all this stuff if we never had threads.
449 * EXIT clears all sign of other threads when
450 * it goes to single threading, so the last thread always
451 * takes the short path.
453 if (p->p_flag & P_HADTHREADS) {
454 if (p->p_numthreads > 1) {
462 td2 = FIRST_THREAD_IN_PROC(p);
463 sched_exit_thread(td2, td);
466 * The test below is NOT true if we are the
467 * sole exiting thread. P_STOPPED_SNGL is unset
468 * in exit1() after it is the only survivor.
470 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
471 if (p->p_numthreads == p->p_suspcount) {
472 thread_lock(p->p_singlethread);
473 thread_unsuspend_one(p->p_singlethread);
474 thread_unlock(p->p_singlethread);
478 atomic_add_int(&td->td_proc->p_exitthreads, 1);
479 PCPU_SET(deadthread, td);
482 * The last thread is exiting.. but not through exit()
484 * Theoretically this can't happen
485 * exit1() - clears threading flags before coming here
486 * kse_exit() - treats last thread specially
487 * thr_exit() - treats last thread specially
489 * thread_user_enter() - only if more exist
490 * thread_userret() - only if more exist
492 * thread_suspend_check() - only if more exist
494 panic ("thread_exit: Last thread exiting on its own");
499 /* Save our tick information with both the thread and proc locked */
500 ruxagg(&p->p_rux, td);
502 td->td_state = TDS_INACTIVE;
503 CTR1(KTR_PROC, "thread_exit: cpu_throw() thread %p", td);
505 panic("I'm a teapot!");
510 * Do any thread specific cleanups that may be needed in wait()
511 * called with Giant, proc and schedlock not held.
514 thread_wait(struct proc *p)
518 mtx_assert(&Giant, MA_NOTOWNED);
519 KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
520 td = FIRST_THREAD_IN_PROC(p);
522 if (td->td_standin != NULL) {
523 if (td->td_standin->td_ucred != NULL) {
524 crfree(td->td_standin->td_ucred);
525 td->td_standin->td_ucred = NULL;
527 thread_free(td->td_standin);
528 td->td_standin = NULL;
531 /* Lock the last thread so we spin until it exits cpu_throw(). */
534 /* Wait for any remaining threads to exit cpu_throw(). */
535 while (p->p_exitthreads)
536 sched_relinquish(curthread);
537 cpuset_rel(td->td_cpuset);
538 td->td_cpuset = NULL;
539 cpu_thread_clean(td);
540 crfree(td->td_ucred);
541 thread_reap(); /* check for zombie threads etc. */
545 * Link a thread to a process.
546 * set up anything that needs to be initialized for it to
547 * be used by the process.
549 * Note that we do not link to the proc's ucred here.
550 * The thread is linked as if running but no KSE assigned.
553 * thread_schedule_upcall()
557 thread_link(struct thread *td, struct proc *p)
561 * XXX This can't be enabled because it's called for proc0 before
562 * it's spinlock has been created.
563 * PROC_SLOCK_ASSERT(p, MA_OWNED);
565 td->td_state = TDS_INACTIVE;
567 td->td_flags = TDF_INMEM;
569 LIST_INIT(&td->td_contested);
570 LIST_INIT(&td->td_lprof[0]);
571 LIST_INIT(&td->td_lprof[1]);
572 sigqueue_init(&td->td_sigqueue, p);
573 callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
574 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
579 * Convert a process with one thread to an unthreaded process.
581 * thread_single(exit) (called from execve and exit)
582 * kse_exit() XXX may need cleaning up wrt KSE stuff
585 thread_unthread(struct thread *td)
587 struct proc *p = td->td_proc;
589 KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
594 p->p_flag &= ~(P_SA|P_HADTHREADS);
595 td->td_mailbox = NULL;
596 td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
597 if (td->td_standin != NULL) {
598 thread_zombie(td->td_standin);
599 td->td_standin = NULL;
602 p->p_flag &= ~P_HADTHREADS;
611 thread_unlink(struct thread *td)
613 struct proc *p = td->td_proc;
615 PROC_SLOCK_ASSERT(p, MA_OWNED);
616 TAILQ_REMOVE(&p->p_threads, td, td_plist);
618 /* could clear a few other things here */
619 /* Must NOT clear links to proc! */
623 * Enforce single-threading.
625 * Returns 1 if the caller must abort (another thread is waiting to
626 * exit the process or similar). Process is locked!
627 * Returns 0 when you are successfully the only thread running.
628 * A process has successfully single threaded in the suspend mode when
629 * There are no threads in user mode. Threads in the kernel must be
630 * allowed to continue until they get to the user boundary. They may even
631 * copy out their return values and data before suspending. They may however be
632 * accelerated in reaching the user boundary as we will wake up
633 * any sleeping threads that are interruptable. (PCATCH).
636 thread_single(int mode)
645 mtx_assert(&Giant, MA_NOTOWNED);
646 PROC_LOCK_ASSERT(p, MA_OWNED);
647 KASSERT((td != NULL), ("curthread is NULL"));
649 if ((p->p_flag & P_HADTHREADS) == 0)
652 /* Is someone already single threading? */
653 if (p->p_singlethread != NULL && p->p_singlethread != td)
656 if (mode == SINGLE_EXIT) {
657 p->p_flag |= P_SINGLE_EXIT;
658 p->p_flag &= ~P_SINGLE_BOUNDARY;
660 p->p_flag &= ~P_SINGLE_EXIT;
661 if (mode == SINGLE_BOUNDARY)
662 p->p_flag |= P_SINGLE_BOUNDARY;
664 p->p_flag &= ~P_SINGLE_BOUNDARY;
666 p->p_flag |= P_STOPPED_SINGLE;
668 p->p_singlethread = td;
669 if (mode == SINGLE_EXIT)
670 remaining = p->p_numthreads;
671 else if (mode == SINGLE_BOUNDARY)
672 remaining = p->p_numthreads - p->p_boundary_count;
674 remaining = p->p_numthreads - p->p_suspcount;
675 while (remaining != 1) {
676 if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
678 FOREACH_THREAD_IN_PROC(p, td2) {
682 td2->td_flags |= TDF_ASTPENDING;
683 if (TD_IS_INHIBITED(td2)) {
686 if (td->td_flags & TDF_DBSUSPEND)
687 td->td_flags &= ~TDF_DBSUSPEND;
688 if (TD_IS_SUSPENDED(td2))
689 thread_unsuspend_one(td2);
690 if (TD_ON_SLEEPQ(td2) &&
691 (td2->td_flags & TDF_SINTR))
692 sleepq_abort(td2, EINTR);
694 case SINGLE_BOUNDARY:
697 if (TD_IS_SUSPENDED(td2)) {
702 * maybe other inhibited states too?
704 if ((td2->td_flags & TDF_SINTR) &&
705 (td2->td_inhibitors &
706 (TDI_SLEEPING | TDI_SWAPPED)))
707 thread_suspend_one(td2);
712 else if (TD_IS_RUNNING(td2) && td != td2) {
718 if (mode == SINGLE_EXIT)
719 remaining = p->p_numthreads;
720 else if (mode == SINGLE_BOUNDARY)
721 remaining = p->p_numthreads - p->p_boundary_count;
723 remaining = p->p_numthreads - p->p_suspcount;
726 * Maybe we suspended some threads.. was it enough?
733 * Wake us up when everyone else has suspended.
734 * In the mean time we suspend as well.
736 thread_suspend_switch(td);
737 if (mode == SINGLE_EXIT)
738 remaining = p->p_numthreads;
739 else if (mode == SINGLE_BOUNDARY)
740 remaining = p->p_numthreads - p->p_boundary_count;
742 remaining = p->p_numthreads - p->p_suspcount;
744 if (mode == SINGLE_EXIT) {
746 * We have gotten rid of all the other threads and we
747 * are about to either exit or exec. In either case,
748 * we try our utmost to revert to being a non-threaded
751 p->p_singlethread = NULL;
752 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT);
760 * Called in from locations that can safely check to see
761 * whether we have to suspend or at least throttle for a
762 * single-thread event (e.g. fork).
764 * Such locations include userret().
765 * If the "return_instead" argument is non zero, the thread must be able to
766 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
768 * The 'return_instead' argument tells the function if it may do a
769 * thread_exit() or suspend, or whether the caller must abort and back
772 * If the thread that set the single_threading request has set the
773 * P_SINGLE_EXIT bit in the process flags then this call will never return
774 * if 'return_instead' is false, but will exit.
776 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
777 *---------------+--------------------+---------------------
778 * 0 | returns 0 | returns 0 or 1
779 * | when ST ends | immediatly
780 *---------------+--------------------+---------------------
781 * 1 | thread exits | returns 1
783 * 0 = thread_exit() or suspension ok,
784 * other = return error instead of stopping the thread.
786 * While a full suspension is under effect, even a single threading
787 * thread would be suspended if it made this call (but it shouldn't).
788 * This call should only be made from places where
789 * thread_exit() would be safe as that may be the outcome unless
790 * return_instead is set.
793 thread_suspend_check(int return_instead)
800 mtx_assert(&Giant, MA_NOTOWNED);
801 PROC_LOCK_ASSERT(p, MA_OWNED);
802 while (P_SHOULDSTOP(p) ||
803 ((p->p_flag & P_TRACED) && (td->td_flags & TDF_DBSUSPEND))) {
804 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
805 KASSERT(p->p_singlethread != NULL,
806 ("singlethread not set"));
808 * The only suspension in action is a
809 * single-threading. Single threader need not stop.
810 * XXX Should be safe to access unlocked
811 * as it can only be set to be true by us.
813 if (p->p_singlethread == td)
814 return (0); /* Exempt from stopping. */
816 if ((p->p_flag & P_SINGLE_EXIT) && return_instead)
819 /* Should we goto user boundary if we didn't come from there? */
820 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE &&
821 (p->p_flag & P_SINGLE_BOUNDARY) && return_instead)
824 /* If thread will exit, flush its pending signals */
825 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
826 sigqueue_flush(&td->td_sigqueue);
831 * If the process is waiting for us to exit,
832 * this thread should just suicide.
833 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
835 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td))
837 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
838 if (p->p_numthreads == p->p_suspcount + 1) {
839 thread_lock(p->p_singlethread);
840 thread_unsuspend_one(p->p_singlethread);
841 thread_unlock(p->p_singlethread);
847 * When a thread suspends, it just
848 * gets taken off all queues.
850 thread_suspend_one(td);
851 if (return_instead == 0) {
852 p->p_boundary_count++;
853 td->td_flags |= TDF_BOUNDARY;
856 mi_switch(SW_INVOL, NULL);
857 if (return_instead == 0)
858 td->td_flags &= ~TDF_BOUNDARY;
861 if (return_instead == 0)
862 p->p_boundary_count--;
868 thread_suspend_switch(struct thread *td)
873 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
874 PROC_LOCK_ASSERT(p, MA_OWNED);
875 PROC_SLOCK_ASSERT(p, MA_OWNED);
877 * We implement thread_suspend_one in stages here to avoid
878 * dropping the proc lock while the thread lock is owned.
884 TD_SET_SUSPENDED(td);
888 mi_switch(SW_VOL, NULL);
896 thread_suspend_one(struct thread *td)
898 struct proc *p = td->td_proc;
900 PROC_SLOCK_ASSERT(p, MA_OWNED);
901 THREAD_LOCK_ASSERT(td, MA_OWNED);
902 KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
904 TD_SET_SUSPENDED(td);
909 thread_unsuspend_one(struct thread *td)
911 struct proc *p = td->td_proc;
913 PROC_SLOCK_ASSERT(p, MA_OWNED);
914 THREAD_LOCK_ASSERT(td, MA_OWNED);
915 KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
916 TD_CLR_SUSPENDED(td);
922 * Allow all threads blocked by single threading to continue running.
925 thread_unsuspend(struct proc *p)
929 PROC_LOCK_ASSERT(p, MA_OWNED);
930 PROC_SLOCK_ASSERT(p, MA_OWNED);
931 if (!P_SHOULDSTOP(p)) {
932 FOREACH_THREAD_IN_PROC(p, td) {
934 if (TD_IS_SUSPENDED(td)) {
935 thread_unsuspend_one(td);
939 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
940 (p->p_numthreads == p->p_suspcount)) {
942 * Stopping everything also did the job for the single
943 * threading request. Now we've downgraded to single-threaded,
946 thread_lock(p->p_singlethread);
947 thread_unsuspend_one(p->p_singlethread);
948 thread_unlock(p->p_singlethread);
953 * End the single threading mode..
956 thread_single_end(void)
963 PROC_LOCK_ASSERT(p, MA_OWNED);
964 p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
966 p->p_singlethread = NULL;
968 * If there are other threads they mey now run,
969 * unless of course there is a blanket 'stop order'
970 * on the process. The single threader must be allowed
971 * to continue however as this is a bad place to stop.
973 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
974 FOREACH_THREAD_IN_PROC(p, td) {
976 if (TD_IS_SUSPENDED(td)) {
977 thread_unsuspend_one(td);
986 thread_find(struct proc *p, lwpid_t tid)
990 PROC_LOCK_ASSERT(p, MA_OWNED);
992 FOREACH_THREAD_IN_PROC(p, td) {
993 if (td->td_tid == tid)