2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
39 #include <sys/sysctl.h>
40 #include <sys/sysproto.h>
41 #include <sys/filedesc.h>
42 #include <sys/sched.h>
43 #include <sys/signalvar.h>
50 #include <sys/ucontext.h>
53 #include <vm/vm_object.h>
56 #include <vm/vm_map.h>
58 #include <machine/frame.h>
61 * KSEGRP related storage.
63 static uma_zone_t ksegrp_zone;
64 static uma_zone_t kse_zone;
65 static uma_zone_t thread_zone;
68 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
69 static int oiks_debug = 0; /* 0 disable, 1 printf, 2 enter debugger */
70 SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW,
71 &oiks_debug, 0, "OIKS thread debug");
73 static int oiks_max_threads_per_proc = 10;
74 SYSCTL_INT(_kern_threads, OID_AUTO, oiks_max_per_proc, CTLFLAG_RW,
75 &oiks_max_threads_per_proc, 0, "Debug limit on threads per proc");
77 static int max_threads_per_proc = 30;
78 SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
79 &max_threads_per_proc, 0, "Limit on threads per proc");
81 static int max_groups_per_proc = 5;
82 SYSCTL_INT(_kern_threads, OID_AUTO, max_groups_per_proc, CTLFLAG_RW,
83 &max_groups_per_proc, 0, "Limit on thread groups per proc");
85 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
87 struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
88 TAILQ_HEAD(, kse) zombie_kses = TAILQ_HEAD_INITIALIZER(zombie_kses);
89 TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
90 struct mtx zombie_thread_lock;
91 MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
92 "zombie_thread_lock", MTX_SPIN);
96 void kse_purge(struct proc *p, struct thread *td);
98 * Pepare a thread for use.
101 thread_ctor(void *mem, int size, void *arg)
105 td = (struct thread *)mem;
106 td->td_state = TDS_INACTIVE;
107 td->td_flags |= TDF_UNBOUND;
111 * Reclaim a thread after use.
114 thread_dtor(void *mem, int size, void *arg)
118 mtx_assert(&Giant, MA_OWNED);
119 td = (struct thread *)mem;
122 /* Verify that this thread is in a safe state to free. */
123 switch (td->td_state) {
129 * We must never unlink a thread that is in one of
130 * these states, because it is currently active.
132 panic("bad state for thread unlinking");
137 panic("bad thread state");
146 * Initialize type-stable parts of a thread (when newly created).
149 thread_init(void *mem, int size)
153 td = (struct thread *)mem;
155 pmap_new_thread(td, 0);
157 cpu_thread_setup(td);
158 td->td_sched = (struct td_sched *)&td[1];
162 * Tear down type-stable parts of a thread (just before being discarded).
165 thread_fini(void *mem, int size)
169 td = (struct thread *)mem;
170 pmap_dispose_thread(td);
173 * Initialize type-stable parts of a kse (when newly created).
176 kse_init(void *mem, int size)
180 ke = (struct kse *)mem;
181 ke->ke_sched = (struct ke_sched *)&ke[1];
184 * Initialize type-stable parts of a ksegrp (when newly created).
187 ksegrp_init(void *mem, int size)
191 kg = (struct ksegrp *)mem;
192 kg->kg_sched = (struct kg_sched *)&kg[1];
196 * KSE is linked onto the idle queue.
199 kse_link(struct kse *ke, struct ksegrp *kg)
201 struct proc *p = kg->kg_proc;
203 TAILQ_INSERT_HEAD(&kg->kg_kseq, ke, ke_kglist);
205 ke->ke_state = KES_UNQUEUED;
208 ke->ke_thread = NULL;
209 ke->ke_oncpu = NOCPU;
213 kse_unlink(struct kse *ke)
217 mtx_assert(&sched_lock, MA_OWNED);
219 if (ke->ke_state == KES_IDLE) {
221 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
224 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
225 if (--kg->kg_kses == 0) {
229 * Aggregate stats from the KSE
235 ksegrp_link(struct ksegrp *kg, struct proc *p)
238 TAILQ_INIT(&kg->kg_threads);
239 TAILQ_INIT(&kg->kg_runq); /* links with td_runq */
240 TAILQ_INIT(&kg->kg_slpq); /* links with td_runq */
241 TAILQ_INIT(&kg->kg_kseq); /* all kses in ksegrp */
242 TAILQ_INIT(&kg->kg_iq); /* idle kses in ksegrp */
243 TAILQ_INIT(&kg->kg_lq); /* loan kses in ksegrp */
245 /* the following counters are in the -zero- section and may not need clearing */
246 kg->kg_numthreads = 0;
249 kg->kg_idle_kses = 0;
250 kg->kg_loan_kses = 0;
251 kg->kg_runq_kses = 0; /* XXXKSE change name */
252 /* link it in now that it's consistent */
254 TAILQ_INSERT_HEAD(&p->p_ksegrps, kg, kg_ksegrp);
258 ksegrp_unlink(struct ksegrp *kg)
262 mtx_assert(&sched_lock, MA_OWNED);
264 KASSERT(((kg->kg_numthreads == 0) && (kg->kg_kses == 0)),
265 ("kseg_unlink: residual threads or KSEs"));
266 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
269 * Aggregate stats from the KSE
275 * for a newly created process,
276 * link up a the structure and its initial threads etc.
279 proc_linkup(struct proc *p, struct ksegrp *kg,
280 struct kse *ke, struct thread *td)
283 TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
284 TAILQ_INIT(&p->p_threads); /* all threads in proc */
285 TAILQ_INIT(&p->p_suspended); /* Threads suspended */
295 kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
301 /* KSE-enabled processes only, please. */
302 if (!(p->p_flag & P_KSES))
304 if (uap->tmbx == NULL)
306 mtx_lock_spin(&sched_lock);
307 FOREACH_THREAD_IN_PROC(p, td2) {
308 if (td2->td_mailbox == uap->tmbx) {
309 td2->td_flags |= TDF_INTERRUPT;
310 if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) {
311 if (td2->td_flags & TDF_CVWAITQ)
316 mtx_unlock_spin(&sched_lock);
317 td->td_retval[0] = 0;
318 td->td_retval[1] = 0;
322 mtx_unlock_spin(&sched_lock);
327 kse_exit(struct thread *td, struct kse_exit_args *uap)
333 /* KSE-enabled processes only, please. */
334 if (!(p->p_flag & P_KSES))
336 /* must be a bound thread */
337 if (td->td_flags & TDF_UNBOUND)
340 /* serialize killing kse */
342 mtx_lock_spin(&sched_lock);
343 if ((kg->kg_kses == 1) && (kg->kg_numthreads > 1)) {
344 mtx_unlock_spin(&sched_lock);
348 if ((p->p_numthreads == 1) && (p->p_numksegrps == 1)) {
349 p->p_flag &= ~P_KSES;
350 mtx_unlock_spin(&sched_lock);
353 while (mtx_owned(&Giant))
355 td->td_kse->ke_flags |= KEF_EXIT;
363 kse_release(struct thread *td, struct kse_release_args *uap)
368 /* KSE-enabled processes only */
369 if (!(p->p_flag & P_KSES))
372 * Must be a bound thread. And kse must have a mailbox ready,
373 * if not, the kse would can not generate an upcall.
375 if (!(td->td_flags & TDF_UNBOUND) && (td->td_kse->ke_mailbox != NULL)) {
377 mtx_lock_spin(&sched_lock);
378 /* prevent last thread from exiting */
379 if (p->p_numthreads == 1) {
380 mtx_unlock_spin(&sched_lock);
381 if (td->td_standin == NULL) {
383 td->td_standin = thread_alloc();
386 msleep(p->p_sigacts, &p->p_mtx, PPAUSE|PCATCH,
388 mtx_lock_spin(&sched_lock);
389 td->td_flags |= TDF_UNBOUND;
390 thread_schedule_upcall(td, td->td_kse);
398 /* struct kse_wakeup_args {
399 struct kse_mailbox *mbx;
402 kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
405 struct kse *ke, *ke2;
409 /* KSE-enabled processes only, please. */
410 if (!(p->p_flag & P_KSES))
412 if (td->td_standin == NULL)
413 td->td_standin = thread_alloc();
415 mtx_lock_spin(&sched_lock);
417 FOREACH_KSEGRP_IN_PROC(p, kg) {
418 FOREACH_KSE_IN_GROUP(kg, ke2) {
419 if (ke2->ke_mailbox != uap->mbx)
421 if (ke2->ke_state == KES_IDLE) {
425 mtx_unlock_spin(&sched_lock);
426 td->td_retval[0] = 0;
427 td->td_retval[1] = 0;
434 ke = TAILQ_FIRST(&kg->kg_iq);
437 mtx_unlock_spin(&sched_lock);
441 thread_schedule_upcall(td, ke);
442 mtx_unlock_spin(&sched_lock);
443 td->td_retval[0] = 0;
444 td->td_retval[1] = 0;
449 * No new KSEG: first call: use current KSE, don't schedule an upcall
450 * All other situations, do allocate a new KSE and schedule an upcall on it.
452 /* struct kse_create_args {
453 struct kse_mailbox *mbx;
457 kse_create(struct thread *td, struct kse_create_args *uap)
461 struct ksegrp *newkg;
464 struct kse_mailbox mbx;
468 if ((err = copyin(uap->mbx, &mbx, sizeof(mbx))))
471 p->p_flag |= P_KSES; /* easier to just set it than to test and set */
474 if (p->p_numksegrps >= max_groups_per_proc)
477 * If we want a new KSEGRP it doesn't matter whether
478 * we have already fired up KSE mode before or not.
479 * We put the process in KSE mode and create a new KSEGRP
480 * and KSE. If our KSE has not got a mailbox yet then
481 * that doesn't matter, just leave it that way. It will
482 * ensure that this thread stay BOUND. It's possible
483 * that the call came form a threaded library and the main
484 * program knows nothing of threads.
486 newkg = ksegrp_alloc();
487 bzero(&newkg->kg_startzero, RANGEOF(struct ksegrp,
488 kg_startzero, kg_endzero));
489 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy,
490 RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy));
494 * Otherwise, if we have already set this KSE
495 * to have a mailbox, we want to make another KSE here,
496 * but only if there are not already the limit, which
499 * If the current KSE doesn't have a mailbox we just use it
502 * Because we don't like to access
503 * the KSE outside of schedlock if we are UNBOUND,
504 * (because it can change if we are preempted by an interrupt)
505 * we can deduce it as having a mailbox if we are UNBOUND,
506 * and only need to actually look at it if we are BOUND,
509 if ((td->td_flags & TDF_UNBOUND) || td->td_kse->ke_mailbox) {
510 if (oiks_debug == 0) {
512 if (kg->kg_kses > mp_ncpus)
523 bzero(&newke->ke_startzero, RANGEOF(struct kse,
524 ke_startzero, ke_endzero));
526 bcopy(&ke->ke_startcopy, &newke->ke_startcopy,
527 RANGEOF(struct kse, ke_startcopy, ke_endcopy));
529 /* For the first call this may not have been set */
530 if (td->td_standin == NULL) {
531 td->td_standin = thread_alloc();
533 mtx_lock_spin(&sched_lock);
535 if (p->p_numksegrps >= max_groups_per_proc) {
536 mtx_unlock_spin(&sched_lock);
541 ksegrp_link(newkg, p);
545 kse_link(newke, newkg);
546 if (p->p_sflag & PS_NEEDSIGCHK)
547 newke->ke_flags |= KEF_ASTPENDING;
548 newke->ke_mailbox = uap->mbx;
549 newke->ke_upcall = mbx.km_func;
550 bcopy(&mbx.km_stack, &newke->ke_stack, sizeof(stack_t));
551 thread_schedule_upcall(td, newke);
552 mtx_unlock_spin(&sched_lock);
555 * If we didn't allocate a new KSE then the we are using
556 * the exisiting (BOUND) kse.
559 ke->ke_mailbox = uap->mbx;
560 ke->ke_upcall = mbx.km_func;
561 bcopy(&mbx.km_stack, &ke->ke_stack, sizeof(stack_t));
564 * Fill out the KSE-mode specific fields of the new kse.
567 td->td_retval[0] = 0;
568 td->td_retval[1] = 0;
573 * Fill a ucontext_t with a thread's context information.
575 * This is an analogue to getcontext(3).
578 thread_getcontext(struct thread *td, ucontext_t *uc)
582 * XXX this is declared in a MD include file, i386/include/ucontext.h but
583 * is used in MI code.
586 get_mcontext(td, &uc->uc_mcontext);
588 uc->uc_sigmask = td->td_proc->p_sigmask;
592 * Set a thread's context from a ucontext_t.
594 * This is an analogue to setcontext(3).
597 thread_setcontext(struct thread *td, ucontext_t *uc)
602 * XXX this is declared in a MD include file, i386/include/ucontext.h but
603 * is used in MI code.
606 ret = set_mcontext(td, &uc->uc_mcontext);
611 SIG_CANTMASK(uc->uc_sigmask);
612 PROC_LOCK(td->td_proc);
613 td->td_proc->p_sigmask = uc->uc_sigmask;
614 PROC_UNLOCK(td->td_proc);
620 * Initialize global thread allocation resources.
627 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
628 thread_ctor, thread_dtor, thread_init, thread_fini,
632 * XXX the ia64 kstack allocator is really lame and is at the mercy
633 * of contigmallloc(). This hackery is to pre-construct a whole
634 * pile of thread structures with associated kernel stacks early
635 * in the system startup while contigmalloc() still works. Once we
636 * have them, keep them. Sigh.
638 thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
639 thread_ctor, thread_dtor, thread_init, thread_fini,
640 UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
641 uma_prealloc(thread_zone, 512); /* XXX arbitary */
643 ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
644 NULL, NULL, ksegrp_init, NULL,
646 kse_zone = uma_zcreate("KSE", sched_sizeof_kse(),
647 NULL, NULL, kse_init, NULL,
652 * Stash an embarasingly extra thread into the zombie thread queue.
655 thread_stash(struct thread *td)
657 mtx_lock_spin(&zombie_thread_lock);
658 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
659 mtx_unlock_spin(&zombie_thread_lock);
663 * Stash an embarasingly extra kse into the zombie kse queue.
666 kse_stash(struct kse *ke)
668 mtx_lock_spin(&zombie_thread_lock);
669 TAILQ_INSERT_HEAD(&zombie_kses, ke, ke_procq);
670 mtx_unlock_spin(&zombie_thread_lock);
674 * Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
677 ksegrp_stash(struct ksegrp *kg)
679 mtx_lock_spin(&zombie_thread_lock);
680 TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
681 mtx_unlock_spin(&zombie_thread_lock);
685 * Reap zombie threads.
690 struct thread *td_first, *td_next;
691 struct kse *ke_first, *ke_next;
692 struct ksegrp *kg_first, * kg_next;
695 * don't even bother to lock if none at this instant
696 * We really don't care about the next instant..
698 if ((!TAILQ_EMPTY(&zombie_threads))
699 || (!TAILQ_EMPTY(&zombie_kses))
700 || (!TAILQ_EMPTY(&zombie_ksegrps))) {
701 mtx_lock_spin(&zombie_thread_lock);
702 td_first = TAILQ_FIRST(&zombie_threads);
703 ke_first = TAILQ_FIRST(&zombie_kses);
704 kg_first = TAILQ_FIRST(&zombie_ksegrps);
706 TAILQ_INIT(&zombie_threads);
708 TAILQ_INIT(&zombie_kses);
710 TAILQ_INIT(&zombie_ksegrps);
711 mtx_unlock_spin(&zombie_thread_lock);
713 td_next = TAILQ_NEXT(td_first, td_runq);
714 thread_free(td_first);
718 ke_next = TAILQ_NEXT(ke_first, ke_procq);
723 kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
724 ksegrp_free(kg_first);
736 return (uma_zalloc(ksegrp_zone, M_WAITOK));
745 return (uma_zalloc(kse_zone, M_WAITOK));
754 thread_reap(); /* check if any zombies to get */
755 return (uma_zalloc(thread_zone, M_WAITOK));
759 * Deallocate a ksegrp.
762 ksegrp_free(struct ksegrp *td)
764 uma_zfree(ksegrp_zone, td);
771 kse_free(struct kse *td)
773 uma_zfree(kse_zone, td);
777 * Deallocate a thread.
780 thread_free(struct thread *td)
782 uma_zfree(thread_zone, td);
786 * Store the thread context in the UTS's mailbox.
787 * then add the mailbox at the head of a list we are building in user space.
788 * The list is anchored in the ksegrp structure.
791 thread_export_context(struct thread *td)
804 /* Export the user/machine context. */
806 addr = (caddr_t)td->td_mailbox +
807 offsetof(struct kse_thr_mailbox, tm_context);
808 #else /* if user pointer arithmetic is valid in the kernel */
809 addr = (void *)(&td->td_mailbox->tm_context);
811 error = copyin(addr, &uc, sizeof(ucontext_t));
813 thread_getcontext(td, &uc);
814 error = copyout(&uc, addr, sizeof(ucontext_t));
823 /* get address in latest mbox of list pointer */
825 addr = (caddr_t)td->td_mailbox
826 + offsetof(struct kse_thr_mailbox , tm_next);
827 #else /* if user pointer arithmetic is valid in the kernel */
828 addr = (void *)(&td->td_mailbox->tm_next);
831 * Put the saved address of the previous first
832 * entry into this one
835 mbx = (uintptr_t)kg->kg_completed;
836 if (suword(addr, mbx)) {
840 if (mbx == (uintptr_t)kg->kg_completed) {
841 kg->kg_completed = td->td_mailbox;
847 addr = (caddr_t)td->td_mailbox
848 + offsetof(struct kse_thr_mailbox, tm_sticks);
849 temp = fuword(addr) + td->td_usticks;
850 if (suword(addr, temp))
862 * Take the list of completed mailboxes for this KSEGRP and put them on this
863 * KSE's mailbox as it's the next one going up.
866 thread_link_mboxes(struct ksegrp *kg, struct kse *ke)
868 struct proc *p = kg->kg_proc;
873 addr = (caddr_t)ke->ke_mailbox
874 + offsetof(struct kse_mailbox, km_completed);
875 #else /* if user pointer arithmetic is valid in the kernel */
876 addr = (void *)(&ke->ke_mailbox->km_completed);
879 mbx = (uintptr_t)kg->kg_completed;
880 if (suword(addr, mbx)) {
886 /* XXXKSE could use atomic CMPXCH here */
888 if (mbx == (uintptr_t)kg->kg_completed) {
889 kg->kg_completed = NULL;
899 * This function should be called at statclock interrupt time
902 thread_add_ticks_intr(int user, uint ticks)
904 struct thread *td = curthread;
905 struct kse *ke = td->td_kse;
907 if (ke->ke_mailbox == NULL)
910 /* Current always do via ast() */
911 ke->ke_flags |= KEF_ASTPENDING;
912 ke->ke_uuticks += ticks;
914 if (td->td_mailbox != NULL)
915 td->td_usticks += ticks;
917 ke->ke_usticks += ticks;
923 thread_update_uticks(void)
925 struct thread *td = curthread;
926 struct proc *p = td->td_proc;
927 struct kse *ke = td->td_kse;
928 struct kse_thr_mailbox *tmbx;
932 KASSERT(!(td->td_flags & TDF_UNBOUND), ("thread not bound."));
934 if (ke->ke_mailbox == NULL)
937 uticks = ke->ke_uuticks;
939 sticks = ke->ke_usticks;
941 tmbx = (void *)fuword((caddr_t)ke->ke_mailbox
942 + offsetof(struct kse_mailbox, km_curthread));
943 if ((tmbx == NULL) || (tmbx == (void *)-1))
946 addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_uticks);
947 uticks += fuword(addr);
948 if (suword(addr, uticks))
952 addr = (caddr_t)tmbx + offsetof(struct kse_thr_mailbox, tm_sticks);
953 sticks += fuword(addr);
954 if (suword(addr, sticks))
966 * Discard the current thread and exit from its context.
968 * Because we can't free a thread while we're operating under its context,
969 * push the current thread into our KSE's ke_tdspare slot, freeing the
970 * thread that might be there currently. Because we know that only this
971 * processor will run our KSE, we needn't worry about someone else grabbing
972 * our context before we do a cpu_throw.
987 mtx_assert(&sched_lock, MA_OWNED);
988 KASSERT(p != NULL, ("thread exiting without a process"));
989 KASSERT(ke != NULL, ("thread exiting without a kse"));
990 KASSERT(kg != NULL, ("thread exiting without a kse group"));
991 PROC_LOCK_ASSERT(p, MA_OWNED);
992 CTR1(KTR_PROC, "thread_exit: thread %p", td);
993 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
995 if (ke->ke_tdspare != NULL) {
996 thread_stash(ke->ke_tdspare);
997 ke->ke_tdspare = NULL;
999 if (td->td_standin != NULL) {
1000 thread_stash(td->td_standin);
1001 td->td_standin = NULL;
1004 cpu_thread_exit(td); /* XXXSMP */
1007 * The last thread is left attached to the process
1008 * So that the whole bundle gets recycled. Skip
1011 if (p->p_numthreads > 1) {
1013 * Unlink this thread from its proc and the kseg.
1014 * In keeping with the other structs we probably should
1015 * have a thread_unlink() that does some of this but it
1016 * would only be called from here (I think) so it would
1017 * be a waste. (might be useful for proc_fini() as well.)
1019 TAILQ_REMOVE(&p->p_threads, td, td_plist);
1021 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
1022 kg->kg_numthreads--;
1024 * The test below is NOT true if we are the
1025 * sole exiting thread. P_STOPPED_SNGL is unset
1026 * in exit1() after it is the only survivor.
1028 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1029 if (p->p_numthreads == p->p_suspcount) {
1030 thread_unsuspend_one(p->p_singlethread);
1034 /* Reassign this thread's KSE. */
1035 ke->ke_thread = NULL;
1037 ke->ke_state = KES_UNQUEUED;
1038 KASSERT((ke->ke_bound != td),
1039 ("thread_exit: entered with ke_bound set"));
1042 * The reason for all this hoopla is
1043 * an attempt to stop our thread stack from being freed
1044 * until AFTER we have stopped running on it.
1045 * Since we are under schedlock, almost any method where
1046 * it is eventually freed by someone else is probably ok.
1047 * (Especially if they do it under schedlock). We could
1048 * almost free it here if we could be certain that
1049 * the uma code wouldn't pull it apart immediatly,
1050 * but unfortunatly we can not guarantee that.
1052 * For threads that are exiting and NOT killing their
1053 * KSEs we can just stash it in the KSE, however
1054 * in the case where the KSE is also being deallocated,
1055 * we need to store it somewhere else. It turns out that
1056 * we will never free the last KSE, so there is always one
1057 * other KSE available. We might as well just choose one
1058 * and stash it there. Being under schedlock should make that
1061 * In borrower threads, we can stash it in the lender
1062 * Where it won't be needed until this thread is long gone.
1063 * Borrower threads can't kill their KSE anyhow, so even
1064 * the KSE would be a safe place for them. It is not
1065 * necessary to have a KSE (or KSEGRP) at all beyond this
1066 * point, while we are under the protection of schedlock.
1068 * Either give the KSE to another thread to use (or make
1069 * it idle), or free it entirely, possibly along with its
1070 * ksegrp if it's the last one.
1072 if (ke->ke_flags & KEF_EXIT) {
1075 * Designate another KSE to hold our thread.
1076 * Safe as long as we abide by whatever lock
1077 * we control it with.. The other KSE will not
1078 * be able to run it until we release the schelock,
1079 * but we need to be careful about it deciding to
1080 * write to the stack before then. Luckily
1081 * I believe that while another thread's
1082 * standin thread can be used in this way, the
1083 * spare thread for the KSE cannot be used without
1084 * holding schedlock at least once.
1086 ke = FIRST_KSE_IN_PROC(p);
1093 * WE are a borrower..
1094 * stash our thread with the owner.
1096 if (ke->ke_bound->td_standin) {
1097 thread_stash(ke->ke_bound->td_standin);
1099 ke->ke_bound->td_standin = td;
1102 if (ke->ke_tdspare != NULL) {
1103 thread_stash(ke->ke_tdspare);
1104 ke->ke_tdspare = NULL;
1106 ke->ke_tdspare = td;
1111 td->td_state = TDS_INACTIVE;
1113 td->td_ksegrp = NULL;
1114 td->td_last_kse = NULL;
1124 * Link a thread to a process.
1125 * set up anything that needs to be initialized for it to
1126 * be used by the process.
1128 * Note that we do not link to the proc's ucred here.
1129 * The thread is linked as if running but no KSE assigned.
1132 thread_link(struct thread *td, struct ksegrp *kg)
1137 td->td_state = TDS_INACTIVE;
1140 td->td_last_kse = NULL;
1142 LIST_INIT(&td->td_contested);
1143 callout_init(&td->td_slpcallout, 1);
1144 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
1145 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
1147 kg->kg_numthreads++;
1148 if (oiks_debug && (p->p_numthreads > oiks_max_threads_per_proc)) {
1149 printf("OIKS %d\n", p->p_numthreads);
1157 kse_purge(struct proc *p, struct thread *td)
1162 KASSERT(p->p_numthreads == 1, ("bad thread number"));
1163 mtx_lock_spin(&sched_lock);
1164 while ((kg = TAILQ_FIRST(&p->p_ksegrps)) != NULL) {
1165 while ((ke = TAILQ_FIRST(&kg->kg_iq)) != NULL) {
1166 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1168 TAILQ_REMOVE(&kg->kg_kseq, ke, ke_kglist);
1171 thread_stash(ke->ke_tdspare);
1174 TAILQ_REMOVE(&p->p_ksegrps, kg, kg_ksegrp);
1176 KASSERT(((kg->kg_kses == 0) && (kg != td->td_ksegrp)) ||
1177 ((kg->kg_kses == 1) && (kg == td->td_ksegrp)),
1179 if (kg != td->td_ksegrp) {
1183 TAILQ_INSERT_HEAD(&p->p_ksegrps, td->td_ksegrp, kg_ksegrp);
1185 mtx_unlock_spin(&sched_lock);
1190 * Create a thread and schedule it for upcall on the KSE given.
1193 thread_schedule_upcall(struct thread *td, struct kse *ke)
1199 mtx_assert(&sched_lock, MA_OWNED);
1200 newkse = (ke != td->td_kse);
1203 * If the kse is already owned by another thread then we can't
1204 * schedule an upcall because the other thread must be BOUND
1205 * which means it is not in a position to take an upcall.
1206 * We must be borrowing the KSE to allow us to complete some in-kernel
1207 * work. When we complete, the Bound thread will have teh chance to
1208 * complete. This thread will sleep as planned. Hopefully there will
1209 * eventually be un unbound thread that can be converted to an
1210 * upcall to report the completion of this thread.
1212 if (ke->ke_bound && ((ke->ke_bound->td_flags & TDF_UNBOUND) == 0)) {
1215 KASSERT((ke->ke_bound == NULL), ("kse already bound"));
1217 if (ke->ke_state == KES_IDLE) {
1219 TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
1221 ke->ke_state = KES_UNQUEUED;
1223 if ((td2 = td->td_standin) != NULL) {
1224 td->td_standin = NULL;
1227 panic("no reserve thread when called with a new kse");
1229 * If called from (e.g.) sleep and we do not have
1230 * a reserve thread, then we've used it, so do not
1235 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
1236 td2, td->td_proc->p_pid, td->td_proc->p_comm);
1237 bzero(&td2->td_startzero,
1238 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
1239 bcopy(&td->td_startcopy, &td2->td_startcopy,
1240 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
1241 thread_link(td2, ke->ke_ksegrp);
1242 cpu_set_upcall(td2, td->td_pcb);
1245 * XXXKSE do we really need this? (default values for the
1248 bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
1251 * Bind the new thread to the KSE,
1252 * and if it's our KSE, lend it back to ourself
1253 * so we can continue running.
1255 td2->td_ucred = crhold(td->td_ucred);
1256 td2->td_flags = TDF_UPCALLING; /* note: BOUND */
1258 td2->td_state = TDS_CAN_RUN;
1259 td2->td_inhibitors = 0;
1261 * If called from msleep(), we are working on the current
1262 * KSE so fake that we borrowed it. If called from
1263 * kse_create(), don't, as we have a new kse too.
1267 * This thread will be scheduled when the current thread
1268 * blocks, exits or tries to enter userspace, (which ever
1269 * happens first). When that happens the KSe will "revert"
1270 * to this thread in a BOUND manner. Since we are called
1271 * from msleep() this is going to be "very soon" in nearly
1277 ke->ke_bound = NULL;
1278 ke->ke_thread = td2;
1279 ke->ke_state = KES_THREAD;
1282 return (td2); /* bogus.. should be a void function */
1286 * Schedule an upcall to notify a KSE process recieved signals.
1288 * XXX - Modifying a sigset_t like this is totally bogus.
1291 signal_upcall(struct proc *p, int sig)
1293 struct thread *td, *td2;
1298 PROC_LOCK_ASSERT(p, MA_OWNED);
1301 td = FIRST_THREAD_IN_PROC(p);
1304 error = copyin(&ke->ke_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
1310 error = copyout(&ss, &ke->ke_mailbox->km_sigscaught, sizeof(sigset_t));
1314 if (td->td_standin == NULL)
1315 td->td_standin = thread_alloc();
1316 mtx_lock_spin(&sched_lock);
1317 td2 = thread_schedule_upcall(td, ke); /* Bogus JRE */
1318 mtx_unlock_spin(&sched_lock);
1323 * setup done on the thread when it enters the kernel.
1324 * XXXKSE Presently only for syscalls but eventually all kernel entries.
1327 thread_user_enter(struct proc *p, struct thread *td)
1332 * First check that we shouldn't just abort.
1333 * But check if we are the single thread first!
1334 * XXX p_singlethread not locked, but should be safe.
1336 if ((p->p_flag & P_WEXIT) && (p->p_singlethread != td)) {
1338 mtx_lock_spin(&sched_lock);
1344 * If we are doing a syscall in a KSE environment,
1345 * note where our mailbox is. There is always the
1346 * possibility that we could do this lazily (in sleep()),
1347 * but for now do it every time.
1350 if (ke->ke_mailbox != NULL) {
1352 td->td_mailbox = (void *)fuword((caddr_t)ke->ke_mailbox
1353 + offsetof(struct kse_mailbox, km_curthread));
1354 #else /* if user pointer arithmetic is ok in the kernel */
1356 (void *)fuword( (void *)&ke->ke_mailbox->km_curthread);
1358 if ((td->td_mailbox == NULL) ||
1359 (td->td_mailbox == (void *)-1)) {
1360 td->td_mailbox = NULL; /* single thread it.. */
1361 mtx_lock_spin(&sched_lock);
1362 td->td_flags &= ~TDF_UNBOUND;
1363 mtx_unlock_spin(&sched_lock);
1366 * when thread limit reached, act like that the thread
1367 * has already done an upcall.
1369 if (p->p_numthreads > max_threads_per_proc) {
1370 if (td->td_standin != NULL)
1371 thread_stash(td->td_standin);
1372 td->td_standin = NULL;
1374 if (td->td_standin == NULL)
1375 td->td_standin = thread_alloc();
1377 mtx_lock_spin(&sched_lock);
1378 td->td_flags |= TDF_UNBOUND;
1379 mtx_unlock_spin(&sched_lock);
1386 * The extra work we go through if we are a threaded process when we
1387 * return to userland.
1389 * If we are a KSE process and returning to user mode, check for
1390 * extra work to do before we return (e.g. for more syscalls
1391 * to complete first). If we were in a critical section, we should
1392 * just return to let it finish. Same if we were in the UTS (in
1393 * which case the mailbox's context's busy indicator will be set).
1394 * The only traps we suport will have set the mailbox.
1395 * We will clear it here.
1398 thread_userret(struct thread *td, struct trapframe *frame)
1410 unbound = td->td_flags & TDF_UNBOUND;
1416 * Originally bound threads never upcall but they may
1417 * loan out their KSE at this point.
1418 * Upcalls imply bound.. They also may want to do some Philantropy.
1419 * Unbound threads on the other hand either yield to other work
1420 * or transform into an upcall.
1421 * (having saved their context to user space in both cases)
1425 * We are an unbound thread, looking to return to
1427 * THere are several possibilities:
1428 * 1) we are using a borrowed KSE. save state and exit.
1429 * kse_reassign() will recycle the kse as needed,
1430 * 2) we are not.. save state, and then convert ourself
1431 * to be an upcall, bound to the KSE.
1432 * if there are others that need the kse,
1433 * give them a chance by doing an mi_switch().
1434 * Because we are bound, control will eventually return
1437 * Save the thread's context, and link it
1438 * into the KSEGRP's list of completed threads.
1440 error = thread_export_context(td);
1441 td->td_mailbox = NULL;
1445 * If we are not running on a borrowed KSE, then
1446 * failing to do the KSE operation just defaults
1447 * back to synchonous operation, so just return from
1448 * the syscall. If it IS borrowed, there is nothing
1449 * we can do. We just lose that context. We
1450 * probably should note this somewhere and send
1451 * the process a signal.
1453 PROC_LOCK(td->td_proc);
1454 psignal(td->td_proc, SIGSEGV);
1455 mtx_lock_spin(&sched_lock);
1456 if (td->td_kse->ke_bound == NULL) {
1457 td->td_flags &= ~TDF_UNBOUND;
1458 PROC_UNLOCK(td->td_proc);
1459 mtx_unlock_spin(&sched_lock);
1460 thread_update_uticks();
1461 return (error); /* go sync */
1467 * if the KSE is owned and we are borrowing it,
1468 * don't make an upcall, just exit so that the owner
1469 * can get its KSE if it wants it.
1470 * Our context is already safely stored for later
1474 mtx_lock_spin(&sched_lock);
1475 if (td->td_kse->ke_bound) {
1481 * Turn ourself into a bound upcall.
1482 * We will rely on kse_reassign()
1483 * to make us run at a later time.
1484 * We should look just like a sheduled upcall
1485 * from msleep() or cv_wait().
1487 td->td_flags &= ~TDF_UNBOUND;
1488 td->td_flags |= TDF_UPCALLING;
1489 /* Only get here if we have become an upcall */
1492 mtx_lock_spin(&sched_lock);
1495 * We ARE going back to userland with this KSE.
1496 * Check for threads that need to borrow it.
1497 * Optimisation: don't call mi_switch if no-one wants the KSE.
1498 * Any other thread that comes ready after this missed the boat.
1501 if ((td2 = kg->kg_last_assigned))
1502 td2 = TAILQ_NEXT(td2, td_runq);
1504 td2 = TAILQ_FIRST(&kg->kg_runq);
1507 * force a switch to more urgent 'in kernel'
1508 * work. Control will return to this thread
1509 * when there is no more work to do.
1510 * kse_reassign() will do tha for us.
1514 ke->ke_thread = NULL;
1515 mi_switch(); /* kse_reassign() will (re)find td2 */
1517 mtx_unlock_spin(&sched_lock);
1521 * Ensure that we have a spare thread available,
1522 * for when we re-enter the kernel.
1524 if (td->td_standin == NULL) {
1525 if (ke->ke_tdspare) {
1526 td->td_standin = ke->ke_tdspare;
1527 ke->ke_tdspare = NULL;
1529 td->td_standin = thread_alloc();
1533 thread_update_uticks();
1535 * To get here, we know there is no other need for our
1536 * KSE so we can proceed. If not upcalling, go back to
1537 * userspace. If we are, get the upcall set up.
1539 if ((td->td_flags & TDF_UPCALLING) == 0)
1543 * We must be an upcall to get this far.
1544 * There is no more work to do and we are going to ride
1545 * this thead/KSE up to userland as an upcall.
1546 * Do the last parts of the setup needed for the upcall.
1548 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
1549 td, td->td_proc->p_pid, td->td_proc->p_comm);
1552 * Set user context to the UTS.
1554 cpu_set_upcall_kse(td, ke);
1557 * Put any completed mailboxes on this KSE's list.
1559 error = thread_link_mboxes(kg, ke);
1564 * Set state and mailbox.
1565 * From now on we are just a bound outgoing process.
1566 * **Problem** userret is often called several times.
1567 * it would be nice if this all happenned only on the first time
1568 * through. (the scan for extra work etc.)
1570 mtx_lock_spin(&sched_lock);
1571 td->td_flags &= ~TDF_UPCALLING;
1572 mtx_unlock_spin(&sched_lock);
1574 error = suword((caddr_t)ke->ke_mailbox +
1575 offsetof(struct kse_mailbox, km_curthread), 0);
1576 #else /* if user pointer arithmetic is ok in the kernel */
1577 error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
1579 ke->ke_uuticks = ke->ke_usticks = 0;
1582 if (copyout(&ts, (caddr_t)&ke->ke_mailbox->km_timeofday,
1591 * Things are going to be so screwed we should just kill the process.
1592 * how do we do that?
1594 PROC_LOCK(td->td_proc);
1595 psignal(td->td_proc, SIGSEGV);
1596 PROC_UNLOCK(td->td_proc);
1597 return (error); /* go sync */
1601 * Enforce single-threading.
1603 * Returns 1 if the caller must abort (another thread is waiting to
1604 * exit the process or similar). Process is locked!
1605 * Returns 0 when you are successfully the only thread running.
1606 * A process has successfully single threaded in the suspend mode when
1607 * There are no threads in user mode. Threads in the kernel must be
1608 * allowed to continue until they get to the user boundary. They may even
1609 * copy out their return values and data before suspending. They may however be
1610 * accellerated in reaching the user boundary as we will wake up
1611 * any sleeping threads that are interruptable. (PCATCH).
1614 thread_single(int force_exit)
1622 PROC_LOCK_ASSERT(p, MA_OWNED);
1623 KASSERT((td != NULL), ("curthread is NULL"));
1625 if ((p->p_flag & P_KSES) == 0)
1628 /* Is someone already single threading? */
1629 if (p->p_singlethread)
1632 if (force_exit == SINGLE_EXIT)
1633 p->p_flag |= P_SINGLE_EXIT;
1635 p->p_flag &= ~P_SINGLE_EXIT;
1636 p->p_flag |= P_STOPPED_SINGLE;
1637 p->p_singlethread = td;
1638 /* XXXKSE Which lock protects the below values? */
1639 while ((p->p_numthreads - p->p_suspcount) != 1) {
1640 mtx_lock_spin(&sched_lock);
1641 FOREACH_THREAD_IN_PROC(p, td2) {
1644 if (TD_IS_INHIBITED(td2)) {
1645 if (force_exit == SINGLE_EXIT) {
1646 if (TD_IS_SUSPENDED(td2)) {
1647 thread_unsuspend_one(td2);
1649 if (TD_ON_SLEEPQ(td2) &&
1650 (td2->td_flags & TDF_SINTR)) {
1651 if (td2->td_flags & TDF_CVWAITQ)
1657 if (TD_IS_SUSPENDED(td2))
1659 /* maybe other inhibitted states too? */
1660 if (TD_IS_SLEEPING(td2))
1661 thread_suspend_one(td2);
1666 * Maybe we suspended some threads.. was it enough?
1668 if ((p->p_numthreads - p->p_suspcount) == 1) {
1669 mtx_unlock_spin(&sched_lock);
1674 * Wake us up when everyone else has suspended.
1675 * In the mean time we suspend as well.
1677 thread_suspend_one(td);
1681 mtx_unlock_spin(&sched_lock);
1685 if (force_exit == SINGLE_EXIT)
1691 * Called in from locations that can safely check to see
1692 * whether we have to suspend or at least throttle for a
1693 * single-thread event (e.g. fork).
1695 * Such locations include userret().
1696 * If the "return_instead" argument is non zero, the thread must be able to
1697 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
1699 * The 'return_instead' argument tells the function if it may do a
1700 * thread_exit() or suspend, or whether the caller must abort and back
1703 * If the thread that set the single_threading request has set the
1704 * P_SINGLE_EXIT bit in the process flags then this call will never return
1705 * if 'return_instead' is false, but will exit.
1707 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
1708 *---------------+--------------------+---------------------
1709 * 0 | returns 0 | returns 0 or 1
1710 * | when ST ends | immediatly
1711 *---------------+--------------------+---------------------
1712 * 1 | thread exits | returns 1
1714 * 0 = thread_exit() or suspension ok,
1715 * other = return error instead of stopping the thread.
1717 * While a full suspension is under effect, even a single threading
1718 * thread would be suspended if it made this call (but it shouldn't).
1719 * This call should only be made from places where
1720 * thread_exit() would be safe as that may be the outcome unless
1721 * return_instead is set.
1724 thread_suspend_check(int return_instead)
1734 PROC_LOCK_ASSERT(p, MA_OWNED);
1735 while (P_SHOULDSTOP(p)) {
1736 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1737 KASSERT(p->p_singlethread != NULL,
1738 ("singlethread not set"));
1740 * The only suspension in action is a
1741 * single-threading. Single threader need not stop.
1742 * XXX Should be safe to access unlocked
1743 * as it can only be set to be true by us.
1745 if (p->p_singlethread == td)
1746 return (0); /* Exempt from stopping. */
1752 * If the process is waiting for us to exit,
1753 * this thread should just suicide.
1754 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
1756 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
1757 mtx_lock_spin(&sched_lock);
1758 while (mtx_owned(&Giant))
1761 * free extra kses and ksegrps, we needn't worry
1762 * about if current thread is in same ksegrp as
1763 * p_singlethread and last kse in the group
1764 * could be killed, this is protected by kg_numthreads,
1765 * in this case, we deduce that kg_numthreads must > 1.
1768 if (ke->ke_bound == NULL &&
1769 ((kg->kg_kses != 1) || (kg->kg_numthreads == 1)))
1770 ke->ke_flags |= KEF_EXIT;
1775 * When a thread suspends, it just
1776 * moves to the processes's suspend queue
1779 * XXXKSE if TDF_BOUND is true
1780 * it will not release it's KSE which might
1781 * lead to deadlock if there are not enough KSEs
1782 * to complete all waiting threads.
1783 * Maybe be able to 'lend' it out again.
1784 * (lent kse's can not go back to userland?)
1785 * and can only be lent in STOPPED state.
1787 mtx_lock_spin(&sched_lock);
1788 if ((p->p_flag & P_STOPPED_SIG) &&
1789 (p->p_suspcount+1 == p->p_numthreads)) {
1790 mtx_unlock_spin(&sched_lock);
1791 PROC_LOCK(p->p_pptr);
1792 if ((p->p_pptr->p_procsig->ps_flag &
1793 PS_NOCLDSTOP) == 0) {
1794 psignal(p->p_pptr, SIGCHLD);
1796 PROC_UNLOCK(p->p_pptr);
1797 mtx_lock_spin(&sched_lock);
1799 mtx_assert(&Giant, MA_NOTOWNED);
1800 thread_suspend_one(td);
1802 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
1803 if (p->p_numthreads == p->p_suspcount) {
1804 thread_unsuspend_one(p->p_singlethread);
1807 p->p_stats->p_ru.ru_nivcsw++;
1809 mtx_unlock_spin(&sched_lock);
1816 thread_suspend_one(struct thread *td)
1818 struct proc *p = td->td_proc;
1820 mtx_assert(&sched_lock, MA_OWNED);
1822 TD_SET_SUSPENDED(td);
1823 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
1825 * Hack: If we are suspending but are on the sleep queue
1826 * then we are in msleep or the cv equivalent. We
1827 * want to look like we have two Inhibitors.
1828 * May already be set.. doesn't matter.
1830 if (TD_ON_SLEEPQ(td))
1831 TD_SET_SLEEPING(td);
1835 thread_unsuspend_one(struct thread *td)
1837 struct proc *p = td->td_proc;
1839 mtx_assert(&sched_lock, MA_OWNED);
1840 TAILQ_REMOVE(&p->p_suspended, td, td_runq);
1841 TD_CLR_SUSPENDED(td);
1847 * Allow all threads blocked by single threading to continue running.
1850 thread_unsuspend(struct proc *p)
1854 mtx_assert(&sched_lock, MA_OWNED);
1855 PROC_LOCK_ASSERT(p, MA_OWNED);
1856 if (!P_SHOULDSTOP(p)) {
1857 while (( td = TAILQ_FIRST(&p->p_suspended))) {
1858 thread_unsuspend_one(td);
1860 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
1861 (p->p_numthreads == p->p_suspcount)) {
1863 * Stopping everything also did the job for the single
1864 * threading request. Now we've downgraded to single-threaded,
1867 thread_unsuspend_one(p->p_singlethread);
1872 thread_single_end(void)
1879 PROC_LOCK_ASSERT(p, MA_OWNED);
1880 p->p_flag &= ~P_STOPPED_SINGLE;
1881 p->p_singlethread = NULL;
1883 * If there are other threads they mey now run,
1884 * unless of course there is a blanket 'stop order'
1885 * on the process. The single threader must be allowed
1886 * to continue however as this is a bad place to stop.
1888 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1889 mtx_lock_spin(&sched_lock);
1890 while (( td = TAILQ_FIRST(&p->p_suspended))) {
1891 thread_unsuspend_one(td);
1893 mtx_unlock_spin(&sched_lock);