2 * Copyright (C) 2001 Julian Elischer <julian@freebsd.org>.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/mutex.h>
38 #include <sys/sysctl.h>
39 #include <sys/filedesc.h>
41 #include <sys/signalvar.h>
47 #include <sys/ucontext.h>
50 #include <vm/vm_object.h>
53 #include <vm/vm_map.h>
55 #include <machine/frame.h>
58 * KSEGRP related storage.
60 static uma_zone_t ksegrp_zone;
61 static uma_zone_t kse_zone;
62 static uma_zone_t thread_zone;
65 SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0, "thread allocation");
66 static int oiks_debug = 1; /* 0 disable, 1 printf, 2 enter debugger */
67 SYSCTL_INT(_kern_threads, OID_AUTO, oiks, CTLFLAG_RW,
68 &oiks_debug, 0, "OIKS thread debug");
70 static int max_threads_per_proc = 6;
71 SYSCTL_INT(_kern_threads, OID_AUTO, max_per_proc, CTLFLAG_RW,
72 &max_threads_per_proc, 0, "Limit on threads per proc");
74 #define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
76 struct threadqueue zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
77 struct mtx zombie_thread_lock;
78 MTX_SYSINIT(zombie_thread_lock, &zombie_thread_lock,
79 "zombie_thread_lock", MTX_SPIN);
82 * Pepare a thread for use.
85 thread_ctor(void *mem, int size, void *arg)
89 KASSERT((size == sizeof(struct thread)),
90 ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
92 td = (struct thread *)mem;
93 td->td_state = TDS_INACTIVE;
94 td->td_flags |= TDF_UNBOUND;
98 * Reclaim a thread after use.
101 thread_dtor(void *mem, int size, void *arg)
105 KASSERT((size == sizeof(struct thread)),
106 ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
108 td = (struct thread *)mem;
111 /* Verify that this thread is in a safe state to free. */
112 switch (td->td_state) {
118 * We must never unlink a thread that is in one of
119 * these states, because it is currently active.
121 panic("bad state for thread unlinking");
126 panic("bad thread state");
133 * Initialize type-stable parts of a thread (when newly created).
136 thread_init(void *mem, int size)
140 KASSERT((size == sizeof(struct thread)),
141 ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
143 td = (struct thread *)mem;
147 cpu_thread_setup(td);
151 * Tear down type-stable parts of a thread (just before being discarded).
154 thread_fini(void *mem, int size)
158 KASSERT((size == sizeof(struct thread)),
159 ("size mismatch: %d != %d\n", size, (int)sizeof(struct thread)));
161 td = (struct thread *)mem;
162 pmap_dispose_thread(td);
166 * Fill a ucontext_t with a thread's context information.
168 * This is an analogue to getcontext(3).
171 thread_getcontext(struct thread *td, ucontext_t *uc)
175 * XXX this is declared in a MD include file, i386/include/ucontext.h but
176 * is used in MI code.
179 get_mcontext(td, &uc->uc_mcontext);
181 uc->uc_sigmask = td->td_proc->p_sigmask;
185 * Set a thread's context from a ucontext_t.
187 * This is an analogue to setcontext(3).
190 thread_setcontext(struct thread *td, ucontext_t *uc)
195 * XXX this is declared in a MD include file, i386/include/ucontext.h but
196 * is used in MI code.
199 ret = set_mcontext(td, &uc->uc_mcontext);
204 SIG_CANTMASK(uc->uc_sigmask);
205 PROC_LOCK(td->td_proc);
206 td->td_proc->p_sigmask = uc->uc_sigmask;
207 PROC_UNLOCK(td->td_proc);
213 * Initialize global thread allocation resources.
219 thread_zone = uma_zcreate("THREAD", sizeof (struct thread),
220 thread_ctor, thread_dtor, thread_init, thread_fini,
222 ksegrp_zone = uma_zcreate("KSEGRP", sizeof (struct ksegrp),
223 NULL, NULL, NULL, NULL,
225 kse_zone = uma_zcreate("KSE", sizeof (struct kse),
226 NULL, NULL, NULL, NULL,
231 * Stash an embarasingly extra thread into the zombie thread queue.
234 thread_stash(struct thread *td)
236 mtx_lock_spin(&zombie_thread_lock);
237 TAILQ_INSERT_HEAD(&zombie_threads, td, td_runq);
238 mtx_unlock_spin(&zombie_thread_lock);
242 * Reap zombie threads.
247 struct thread *td_reaped;
250 * don't even bother to lock if none at this instant
251 * We really don't care about the next instant..
253 if (!TAILQ_EMPTY(&zombie_threads)) {
254 mtx_lock_spin(&zombie_thread_lock);
255 while (!TAILQ_EMPTY(&zombie_threads)) {
256 td_reaped = TAILQ_FIRST(&zombie_threads);
257 TAILQ_REMOVE(&zombie_threads, td_reaped, td_runq);
258 mtx_unlock_spin(&zombie_thread_lock);
259 thread_free(td_reaped);
260 mtx_lock_spin(&zombie_thread_lock);
262 mtx_unlock_spin(&zombie_thread_lock);
272 return (uma_zalloc(ksegrp_zone, M_WAITOK));
281 return (uma_zalloc(kse_zone, M_WAITOK));
290 thread_reap(); /* check if any zombies to get */
291 return (uma_zalloc(thread_zone, M_WAITOK));
295 * Deallocate a ksegrp.
298 ksegrp_free(struct ksegrp *td)
300 uma_zfree(ksegrp_zone, td);
307 kse_free(struct kse *td)
309 uma_zfree(kse_zone, td);
313 * Deallocate a thread.
316 thread_free(struct thread *td)
318 uma_zfree(thread_zone, td);
322 * Store the thread context in the UTS's mailbox.
323 * then add the mailbox at the head of a list we are building in user space.
324 * The list is anchored in the ksegrp structure.
327 thread_export_context(struct thread *td)
329 struct proc *p = td->td_proc;
336 /* Export the user/machine context. */
338 addr = (caddr_t)td->td_mailbox +
339 offsetof(struct kse_thr_mailbox, tm_context);
340 #else /* if user pointer arithmetic is valid in the kernel */
341 addr = (void *)(&td->td_mailbox->tm_context);
343 error = copyin(addr, &uc, sizeof(ucontext_t));
345 thread_getcontext(td, &uc);
346 error = copyout(&uc, addr, sizeof(ucontext_t));
355 /* get address in latest mbox of list pointer */
357 addr = (caddr_t)td->td_mailbox
358 + offsetof(struct kse_thr_mailbox , tm_next);
359 #else /* if user pointer arithmetic is valid in the kernel */
360 addr = (void *)(&td->td_mailbox->tm_next);
363 * Put the saved address of the previous first
364 * entry into this one
368 mbx = (uintptr_t)kg->kg_completed;
369 if (suword(addr, mbx)) {
376 if (mbx == (uintptr_t)kg->kg_completed) {
377 kg->kg_completed = td->td_mailbox;
387 * Take the list of completed mailboxes for this KSEGRP and put them on this
388 * KSE's mailbox as it's the next one going up.
391 thread_link_mboxes(struct ksegrp *kg, struct kse *ke)
393 struct proc *p = kg->kg_proc;
398 addr = (caddr_t)ke->ke_mailbox
399 + offsetof(struct kse_mailbox, km_completed);
400 #else /* if user pointer arithmetic is valid in the kernel */
401 addr = (void *)(&ke->ke_mailbox->km_completed);
404 mbx = (uintptr_t)kg->kg_completed;
405 if (suword(addr, mbx)) {
411 /* XXXKSE could use atomic CMPXCH here */
413 if (mbx == (uintptr_t)kg->kg_completed) {
414 kg->kg_completed = NULL;
424 * Discard the current thread and exit from its context.
426 * Because we can't free a thread while we're operating under its context,
427 * push the current thread into our KSE's ke_tdspare slot, freeing the
428 * thread that might be there currently. Because we know that only this
429 * processor will run our KSE, we needn't worry about someone else grabbing
430 * our context before we do a cpu_throw.
445 mtx_assert(&sched_lock, MA_OWNED);
446 KASSERT(p != NULL, ("thread exiting without a process"));
447 KASSERT(ke != NULL, ("thread exiting without a kse"));
448 KASSERT(kg != NULL, ("thread exiting without a kse group"));
449 PROC_LOCK_ASSERT(p, MA_OWNED);
450 CTR1(KTR_PROC, "thread_exit: thread %p", td);
451 KASSERT(!mtx_owned(&Giant), ("dying thread owns giant"));
453 if (ke->ke_tdspare != NULL) {
454 thread_stash(ke->ke_tdspare);
455 ke->ke_tdspare = NULL;
457 cpu_thread_exit(td); /* XXXSMP */
460 * The last thread is left attached to the process
461 * So that the whole bundle gets recycled. Skip
464 if (p->p_numthreads > 1) {
465 /* Reassign this thread's KSE. */
466 ke->ke_thread = NULL;
468 ke->ke_state = KES_UNQUEUED;
471 /* Unlink this thread from its proc. and the kseg */
472 TAILQ_REMOVE(&p->p_threads, td, td_plist);
474 TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
477 * The test below is NOT true if we are the
478 * sole exiting thread. P_STOPPED_SNGL is unset
479 * in exit1() after it is the only survivor.
481 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
482 if (p->p_numthreads == p->p_suspcount) {
483 thread_unsuspend_one(p->p_singlethread);
487 td->td_state = TDS_INACTIVE;
489 td->td_ksegrp = NULL;
490 td->td_last_kse = NULL;
501 * Link a thread to a process.
502 * set up anything that needs to be initialized for it to
503 * be used by the process.
505 * Note that we do not link to the proc's ucred here.
506 * The thread is linked as if running but no KSE assigned.
509 thread_link(struct thread *td, struct ksegrp *kg)
514 td->td_state = TDS_INACTIVE;
517 td->td_last_kse = NULL;
519 LIST_INIT(&td->td_contested);
520 callout_init(&td->td_slpcallout, 1);
521 TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
522 TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
525 if (oiks_debug && p->p_numthreads > max_threads_per_proc) {
526 printf("OIKS %d\n", p->p_numthreads);
534 * Create a thread and schedule it for upcall on the KSE given.
537 thread_schedule_upcall(struct thread *td, struct kse *ke)
541 mtx_assert(&sched_lock, MA_OWNED);
542 if (ke->ke_tdspare != NULL) {
543 td2 = ke->ke_tdspare;
544 ke->ke_tdspare = NULL;
546 mtx_unlock_spin(&sched_lock);
547 td2 = thread_alloc();
548 mtx_lock_spin(&sched_lock);
550 CTR3(KTR_PROC, "thread_schedule_upcall: thread %p (pid %d, %s)",
551 td, td->td_proc->p_pid, td->td_proc->p_comm);
552 bzero(&td2->td_startzero,
553 (unsigned)RANGEOF(struct thread, td_startzero, td_endzero));
554 bcopy(&td->td_startcopy, &td2->td_startcopy,
555 (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy));
556 thread_link(td2, ke->ke_ksegrp);
557 cpu_set_upcall(td2, td->td_pcb);
558 bcopy(td->td_frame, td2->td_frame, sizeof(struct trapframe));
560 * The user context for this thread is selected when we choose
561 * a KSE and return to userland on it. All we need do here is
562 * note that the thread exists in order to perform an upcall.
564 * Since selecting a KSE to perform the upcall involves locking
565 * that KSE's context to our upcall, its best to wait until the
566 * last possible moment before grabbing a KSE. We do this in
569 td2->td_ucred = crhold(td->td_ucred);
570 td2->td_flags = TDF_UNBOUND|TDF_UPCALLING;
577 * Schedule an upcall to notify a KSE process recieved signals.
579 * XXX - Modifying a sigset_t like this is totally bogus.
582 signal_upcall(struct proc *p, int sig)
584 struct thread *td, *td2;
589 PROC_LOCK_ASSERT(p, MA_OWNED);
591 td = FIRST_THREAD_IN_PROC(p);
594 error = copyin(&ke->ke_mailbox->km_sigscaught, &ss, sizeof(sigset_t));
600 error = copyout(&ss, &ke->ke_mailbox->km_sigscaught, sizeof(sigset_t));
604 mtx_lock_spin(&sched_lock);
605 td2 = thread_schedule_upcall(td, ke);
606 mtx_unlock_spin(&sched_lock);
611 * Consider whether or not an upcall should be made, and update the
612 * TDF_UPCALLING flag appropriately.
614 * This function is called when the current thread had been bound to a user
615 * thread that performed a syscall that blocked, and is now returning.
616 * Got that? syscall -> msleep -> wakeup -> syscall_return -> us.
618 * This thread will be returned to the UTS in its mailbox as a completed
619 * thread. We need to decide whether or not to perform an upcall now,
620 * or simply queue the thread for later.
622 * XXXKSE Future enhancement: We could also return back to
623 * the thread if we haven't had to do an upcall since then.
624 * If the KSE's copy is == the thread's copy, and there are
625 * no other completed threads.
628 thread_consider_upcalling(struct thread *td)
635 * Save the thread's context, and link it
636 * into the KSEGRP's list of completed threads.
638 error = thread_export_context(td);
639 td->td_flags &= ~TDF_UNBOUND;
640 td->td_mailbox = NULL;
643 * Failing to do the KSE operation just defaults
644 * back to synchonous operation, so just return from
650 * Decide whether to perform an upcall now.
652 /* Make sure there are no other threads waiting to run. */
656 mtx_lock_spin(&sched_lock);
657 /* bogus test, ok for testing though */
658 if (TAILQ_FIRST(&kg->kg_runq) &&
659 (TAILQ_LAST(&kg->kg_runq, threadqueue)
660 != kg->kg_last_assigned)) {
662 * Another thread in this KSEG needs to run.
663 * Switch to it instead of performing an upcall,
664 * abondoning this thread. Perform the upcall
665 * later; discard this thread for now.
667 * XXXKSE - As for the other threads to run;
668 * we COULD rush through all the threads
669 * in this KSEG at this priority, or we
670 * could throw the ball back into the court
671 * and just run the highest prio kse available.
672 * What is OUR priority? The priority of the highest
673 * sycall waiting to be returned?
674 * For now, just let another KSE run (easiest).
676 thread_exit(); /* Abandon current thread. */
680 * Perform an upcall now.
682 * XXXKSE - Assumes we are going to userland, and not
683 * nested in the kernel.
685 td->td_flags |= TDF_UPCALLING;
686 mtx_unlock_spin(&sched_lock);
692 * The extra work we go through if we are a threaded process when we
693 * return to userland.
695 * If we are a KSE process and returning to user mode, check for
696 * extra work to do before we return (e.g. for more syscalls
697 * to complete first). If we were in a critical section, we should
698 * just return to let it finish. Same if we were in the UTS (in
699 * which case the mailbox's context's busy indicator will be set).
700 * The only traps we suport will have set the mailbox.
701 * We will clear it here.
704 thread_userret(struct thread *td, struct trapframe *frame)
710 /* Make the thread bound from now on, but remember what it was. */
711 unbound = td->td_flags & TDF_UNBOUND;
712 td->td_flags &= ~TDF_UNBOUND;
714 * Ensure that we have a spare thread available.
717 if (ke->ke_tdspare == NULL) {
719 ke->ke_tdspare = thread_alloc();
723 * Originally bound threads need no additional work.
729 * Decide whether or not we should perform an upcall now.
731 if (((td->td_flags & TDF_UPCALLING) == 0) && unbound) {
732 /* if we have other threads to run we will not return */
733 if ((error = thread_consider_upcalling(td)))
734 return (error); /* coundn't go async , just go sync. */
736 if (td->td_flags & TDF_UPCALLING) {
738 * There is no more work to do and we are going to ride
739 * this thead/KSE up to userland as an upcall.
741 CTR3(KTR_PROC, "userret: upcall thread %p (pid %d, %s)",
742 td, td->td_proc->p_pid, td->td_proc->p_comm);
745 * Set user context to the UTS.
747 cpu_set_upcall_kse(td, ke);
750 * Put any completed mailboxes on this KSE's list.
752 error = thread_link_mboxes(td->td_ksegrp, ke);
757 * Set state and mailbox.
759 td->td_flags &= ~TDF_UPCALLING;
761 error = suword((caddr_t)ke->ke_mailbox +
762 offsetof(struct kse_mailbox, km_curthread),
764 #else /* if user pointer arithmetic is ok in the kernel */
765 error = suword((caddr_t)&ke->ke_mailbox->km_curthread, 0);
771 * Stop any chance that we may be separated from
772 * the KSE we are currently on. This is "biting the bullet",
773 * we are committing to go to user space as as this KSE here.
778 * Things are going to be so screwed we should just kill the process.
781 panic ("thread_userret.. need to kill proc..... how?");
785 * Enforce single-threading.
787 * Returns 1 if the caller must abort (another thread is waiting to
788 * exit the process or similar). Process is locked!
789 * Returns 0 when you are successfully the only thread running.
790 * A process has successfully single threaded in the suspend mode when
791 * There are no threads in user mode. Threads in the kernel must be
792 * allowed to continue until they get to the user boundary. They may even
793 * copy out their return values and data before suspending. They may however be
794 * accellerated in reaching the user boundary as we will wake up
795 * any sleeping threads that are interruptable. (PCATCH).
798 thread_single(int force_exit)
806 PROC_LOCK_ASSERT(p, MA_OWNED);
807 KASSERT((td != NULL), ("curthread is NULL"));
809 if ((p->p_flag & P_KSES) == 0)
812 /* Is someone already single threading? */
813 if (p->p_singlethread)
816 if (force_exit == SINGLE_EXIT)
817 p->p_flag |= P_SINGLE_EXIT;
819 p->p_flag &= ~P_SINGLE_EXIT;
820 p->p_flag |= P_STOPPED_SINGLE;
821 p->p_singlethread = td;
822 while ((p->p_numthreads - p->p_suspcount) != 1) {
823 mtx_lock_spin(&sched_lock);
824 FOREACH_THREAD_IN_PROC(p, td2) {
827 if (TD_IS_INHIBITED(td2)) {
828 if (TD_IS_SUSPENDED(td2)) {
829 if (force_exit == SINGLE_EXIT) {
830 thread_unsuspend_one(td2);
833 if ( TD_IS_SLEEPING(td2)) {
834 if (td2->td_flags & TDF_CVWAITQ)
835 cv_waitq_remove(td2);
845 * Wake us up when everyone else has suspended.
846 * In the mean time we suspend as well.
848 thread_suspend_one(td);
852 mtx_unlock_spin(&sched_lock);
860 * Called in from locations that can safely check to see
861 * whether we have to suspend or at least throttle for a
862 * single-thread event (e.g. fork).
864 * Such locations include userret().
865 * If the "return_instead" argument is non zero, the thread must be able to
866 * accept 0 (caller may continue), or 1 (caller must abort) as a result.
868 * The 'return_instead' argument tells the function if it may do a
869 * thread_exit() or suspend, or whether the caller must abort and back
872 * If the thread that set the single_threading request has set the
873 * P_SINGLE_EXIT bit in the process flags then this call will never return
874 * if 'return_instead' is false, but will exit.
876 * P_SINGLE_EXIT | return_instead == 0| return_instead != 0
877 *---------------+--------------------+---------------------
878 * 0 | returns 0 | returns 0 or 1
879 * | when ST ends | immediatly
880 *---------------+--------------------+---------------------
881 * 1 | thread exits | returns 1
883 * 0 = thread_exit() or suspension ok,
884 * other = return error instead of stopping the thread.
886 * While a full suspension is under effect, even a single threading
887 * thread would be suspended if it made this call (but it shouldn't).
888 * This call should only be made from places where
889 * thread_exit() would be safe as that may be the outcome unless
890 * return_instead is set.
893 thread_suspend_check(int return_instead)
895 struct thread *td = curthread;
896 struct proc *p = td->td_proc;
900 PROC_LOCK_ASSERT(p, MA_OWNED);
901 while (P_SHOULDSTOP(p)) {
902 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
903 KASSERT(p->p_singlethread != NULL,
904 ("singlethread not set"));
906 * The only suspension in action is a
907 * single-threading. Single threader need not stop.
908 * XXX Should be safe to access unlocked
909 * as it can only be set to be true by us.
911 if (p->p_singlethread == td)
912 return (0); /* Exempt from stopping. */
918 * If the process is waiting for us to exit,
919 * this thread should just suicide.
920 * Assumes that P_SINGLE_EXIT implies P_STOPPED_SINGLE.
922 if ((p->p_flag & P_SINGLE_EXIT) && (p->p_singlethread != td)) {
923 mtx_lock_spin(&sched_lock);
924 while (mtx_owned(&Giant))
930 * When a thread suspends, it just
931 * moves to the processes's suspend queue
934 * XXXKSE if TDF_BOUND is true
935 * it will not release it's KSE which might
936 * lead to deadlock if there are not enough KSEs
937 * to complete all waiting threads.
938 * Maybe be able to 'lend' it out again.
939 * (lent kse's can not go back to userland?)
940 * and can only be lent in STOPPED state.
942 mtx_lock_spin(&sched_lock);
943 if ((p->p_flag & P_STOPPED_SIG) &&
944 (p->p_suspcount+1 == p->p_numthreads)) {
945 mtx_unlock_spin(&sched_lock);
946 PROC_LOCK(p->p_pptr);
947 if ((p->p_pptr->p_procsig->ps_flag &
948 PS_NOCLDSTOP) == 0) {
949 psignal(p->p_pptr, SIGCHLD);
951 PROC_UNLOCK(p->p_pptr);
952 mtx_lock_spin(&sched_lock);
954 mtx_assert(&Giant, MA_NOTOWNED);
955 thread_suspend_one(td);
957 if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) {
958 if (p->p_numthreads == p->p_suspcount) {
959 thread_unsuspend_one(p->p_singlethread);
962 p->p_stats->p_ru.ru_nivcsw++;
964 mtx_unlock_spin(&sched_lock);
971 thread_suspend_one(struct thread *td)
973 struct proc *p = td->td_proc;
975 mtx_assert(&sched_lock, MA_OWNED);
977 TD_SET_SUSPENDED(td);
978 TAILQ_INSERT_TAIL(&p->p_suspended, td, td_runq);
980 * Hack: If we are suspending but are on the sleep queue
981 * then we are in msleep or the cv equivalent. We
982 * want to look like we have two Inhibitors.
984 if (TD_ON_SLEEPQ(td))
989 thread_unsuspend_one(struct thread *td)
991 struct proc *p = td->td_proc;
993 mtx_assert(&sched_lock, MA_OWNED);
994 TAILQ_REMOVE(&p->p_suspended, td, td_runq);
995 TD_CLR_SUSPENDED(td);
1001 * Allow all threads blocked by single threading to continue running.
1004 thread_unsuspend(struct proc *p)
1008 mtx_assert(&sched_lock, MA_OWNED);
1009 PROC_LOCK_ASSERT(p, MA_OWNED);
1010 if (!P_SHOULDSTOP(p)) {
1011 while (( td = TAILQ_FIRST(&p->p_suspended))) {
1012 thread_unsuspend_one(td);
1014 } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) &&
1015 (p->p_numthreads == p->p_suspcount)) {
1017 * Stopping everything also did the job for the single
1018 * threading request. Now we've downgraded to single-threaded,
1021 thread_unsuspend_one(p->p_singlethread);
1026 thread_single_end(void)
1033 PROC_LOCK_ASSERT(p, MA_OWNED);
1034 p->p_flag &= ~P_STOPPED_SINGLE;
1035 p->p_singlethread = NULL;
1037 * If there are other threads they mey now run,
1038 * unless of course there is a blanket 'stop order'
1039 * on the process. The single threader must be allowed
1040 * to continue however as this is a bad place to stop.
1042 if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
1043 mtx_lock_spin(&sched_lock);
1044 while (( td = TAILQ_FIRST(&p->p_suspended))) {
1045 thread_unsuspend_one(td);
1047 mtx_unlock_spin(&sched_lock);