2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Implementation of sleep queues used to hold queue of threads blocked on
32 * a wait channel. Sleep queues different from turnstiles in that wait
33 * channels are not owned by anyone, so there is no priority propagation.
34 * Sleep queues can also provide a timeout and can also be interrupted by
35 * signals. That said, there are several similarities between the turnstile
36 * and sleep queue implementations. (Note: turnstiles were implemented
37 * first.) For example, both use a hash table of the same size where each
38 * bucket is referred to as a "chain" that contains both a spin lock and
39 * a linked list of queues. An individual queue is located by using a hash
40 * to pick a chain, locking the chain, and then walking the chain searching
41 * for the queue. This means that a wait channel object does not need to
42 * embed it's queue head just as locks do not embed their turnstile queue
43 * head. Threads also carry around a sleep queue that they lend to the
44 * wait channel when blocking. Just as in turnstiles, the queue includes
45 * a free list of the sleep queues of other threads blocked on the same
46 * wait channel in the case of multiple waiters.
48 * Some additional functionality provided by sleep queues include the
49 * ability to set a timeout. The timeout is managed using a per-thread
50 * callout that resumes a thread if it is asleep. A thread may also
51 * catch signals while it is asleep (aka an interruptible sleep). The
52 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
53 * sleep queues also provide some extra assertions. One is not allowed to
54 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
55 * must consistently use the same lock to synchronize with a wait channel,
56 * though this check is currently only a warning for sleep/wakeup due to
57 * pre-existing abuse of that API. The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
65 #include "opt_sleepqueue_profiling.h"
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/kernel.h>
73 #include <sys/malloc.h>
74 #include <sys/mutex.h>
76 #include <sys/sched.h>
77 #include <sys/signalvar.h>
78 #include <sys/sleepqueue.h>
79 #include <sys/sysctl.h>
86 * Constants for the hash table of sleep queue chains. These constants are
87 * the same ones that 4BSD (and possibly earlier versions of BSD) used.
88 * Basically, we ignore the lower 8 bits of the address since most wait
89 * channel pointers are aligned and only look at the next 7 bits for the
90 * hash. SC_TABLESIZE must be a power of two for SC_MASK to work properly.
92 #define SC_TABLESIZE 128 /* Must be power of 2. */
93 #define SC_MASK (SC_TABLESIZE - 1)
95 #define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
96 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
99 * There two different lists of sleep queues. Both lists are connected
100 * via the sq_hash entries. The first list is the sleep queue chain list
101 * that a sleep queue is on when it is attached to a wait channel. The
102 * second list is the free list hung off of a sleep queue that is attached
105 * Each sleep queue also contains the wait channel it is attached to, the
106 * list of threads blocked on that wait channel, flags specific to the
107 * wait channel, and the lock used to synchronize with a wait channel.
108 * The flags are used to catch mismatches between the various consumers
109 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
110 * The lock pointer is only used when invariants are enabled for various
114 * c - sleep queue chain lock
117 TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
118 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
119 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
120 void *sq_wchan; /* (c) Wait channel. */
122 int sq_type; /* (c) Queue type. */
123 struct lock_object *sq_lock; /* (c) Associated lock. */
127 struct sleepqueue_chain {
128 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
129 struct mtx sc_lock; /* Spin lock for this chain. */
130 #ifdef SLEEPQUEUE_PROFILING
131 u_int sc_depth; /* Length of sc_queues. */
132 u_int sc_max_depth; /* Max length of sc_queues. */
136 #ifdef SLEEPQUEUE_PROFILING
137 u_int sleepq_max_depth;
138 SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
139 SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
140 "sleepq chain stats");
141 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
142 0, "maxmimum depth achieved of a single chain");
144 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
146 static MALLOC_DEFINE(M_SLEEPQUEUE, "sleepqueue", "sleep queues");
149 * Prototypes for non-exported routines.
151 static int sleepq_catch_signals(void *wchan);
152 static int sleepq_check_signals(void);
153 static int sleepq_check_timeout(void);
154 static void sleepq_switch(void *wchan);
155 static void sleepq_timeout(void *arg);
156 static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri);
159 * Early initialization of sleep queues that is called from the sleepinit()
163 init_sleepqueues(void)
165 #ifdef SLEEPQUEUE_PROFILING
166 struct sysctl_oid *chain_oid;
171 for (i = 0; i < SC_TABLESIZE; i++) {
172 LIST_INIT(&sleepq_chains[i].sc_queues);
173 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
175 #ifdef SLEEPQUEUE_PROFILING
176 snprintf(chain_name, sizeof(chain_name), "%d", i);
177 chain_oid = SYSCTL_ADD_NODE(NULL,
178 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
179 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
180 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
181 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
182 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
183 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
187 thread0.td_sleepqueue = sleepq_alloc();
191 * Malloc and initialize a new sleep queue for a new thread.
196 struct sleepqueue *sq;
199 sq = malloc(sizeof(struct sleepqueue), M_SLEEPQUEUE, M_WAITOK | M_ZERO);
200 for (i = 0; i < NR_SLEEPQS; i++)
201 TAILQ_INIT(&sq->sq_blocked[i]);
202 LIST_INIT(&sq->sq_free);
207 * Free a sleep queue when a thread is destroyed.
210 sleepq_free(struct sleepqueue *sq)
215 for (i = 0; i < NR_SLEEPQS; i++)
216 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
217 free(sq, M_SLEEPQUEUE);
221 * Lock the sleep queue chain associated with the specified wait channel.
224 sleepq_lock(void *wchan)
226 struct sleepqueue_chain *sc;
228 sc = SC_LOOKUP(wchan);
229 mtx_lock_spin(&sc->sc_lock);
233 * Look up the sleep queue associated with a given wait channel in the hash
234 * table locking the associated sleep queue chain. If no queue is found in
235 * the table, NULL is returned.
238 sleepq_lookup(void *wchan)
240 struct sleepqueue_chain *sc;
241 struct sleepqueue *sq;
243 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
244 sc = SC_LOOKUP(wchan);
245 mtx_assert(&sc->sc_lock, MA_OWNED);
246 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
247 if (sq->sq_wchan == wchan)
253 * Unlock the sleep queue chain associated with a given wait channel.
256 sleepq_release(void *wchan)
258 struct sleepqueue_chain *sc;
260 sc = SC_LOOKUP(wchan);
261 mtx_unlock_spin(&sc->sc_lock);
265 * Places the current thread on the sleep queue for the specified wait
266 * channel. If INVARIANTS is enabled, then it associates the passed in
267 * lock with the sleepq to make sure it is held when that sleep queue is
271 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
274 struct sleepqueue_chain *sc;
275 struct sleepqueue *sq;
279 sc = SC_LOOKUP(wchan);
280 mtx_assert(&sc->sc_lock, MA_OWNED);
281 MPASS(td->td_sleepqueue != NULL);
282 MPASS(wchan != NULL);
283 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
285 /* If this thread is not allowed to sleep, die a horrible death. */
286 KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
287 ("Trying sleep, but thread marked as sleeping prohibited"));
289 /* Look up the sleep queue associated with the wait channel 'wchan'. */
290 sq = sleepq_lookup(wchan);
293 * If the wait channel does not already have a sleep queue, use
294 * this thread's sleep queue. Otherwise, insert the current thread
295 * into the sleep queue already in use by this wait channel.
301 sq = td->td_sleepqueue;
302 for (i = 0; i < NR_SLEEPQS; i++)
303 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
304 ("thread's sleep queue %d is not empty", i));
305 KASSERT(LIST_EMPTY(&sq->sq_free),
306 ("thread's sleep queue has a non-empty free list"));
307 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
309 sq->sq_type = flags & SLEEPQ_TYPE;
311 #ifdef SLEEPQUEUE_PROFILING
313 if (sc->sc_depth > sc->sc_max_depth) {
314 sc->sc_max_depth = sc->sc_depth;
315 if (sc->sc_max_depth > sleepq_max_depth)
316 sleepq_max_depth = sc->sc_max_depth;
319 sq = td->td_sleepqueue;
320 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
321 sq->sq_wchan = wchan;
323 MPASS(wchan == sq->sq_wchan);
324 MPASS(lock == sq->sq_lock);
325 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
326 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
328 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
329 td->td_sleepqueue = NULL;
330 mtx_lock_spin(&sched_lock);
331 td->td_sqqueue = queue;
332 td->td_wchan = wchan;
333 td->td_wmesg = wmesg;
334 if (flags & SLEEPQ_INTERRUPTIBLE) {
335 td->td_flags |= TDF_SINTR;
336 td->td_flags &= ~TDF_SLEEPABORT;
338 mtx_unlock_spin(&sched_lock);
342 * Sets a timeout that will remove the current thread from the specified
343 * sleep queue after timo ticks if the thread has not already been awakened.
346 sleepq_set_timeout(void *wchan, int timo)
348 struct sleepqueue_chain *sc;
352 sc = SC_LOOKUP(wchan);
353 mtx_assert(&sc->sc_lock, MA_OWNED);
354 MPASS(TD_ON_SLEEPQ(td));
355 MPASS(td->td_sleepqueue == NULL);
356 MPASS(wchan != NULL);
357 callout_reset(&td->td_slpcallout, timo, sleepq_timeout, td);
361 * Marks the pending sleep of the current thread as interruptible and
362 * makes an initial check for pending signals before putting a thread
363 * to sleep. Return with sleep queue and scheduler lock held.
366 sleepq_catch_signals(void *wchan)
368 struct sleepqueue_chain *sc;
369 struct sleepqueue *sq;
377 sc = SC_LOOKUP(wchan);
378 mtx_assert(&sc->sc_lock, MA_OWNED);
379 MPASS(wchan != NULL);
380 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
381 (void *)td, (long)p->p_pid, p->p_comm);
383 MPASS(td->td_flags & TDF_SINTR);
384 mtx_unlock_spin(&sc->sc_lock);
386 /* See if there are any pending signals for this thread. */
389 mtx_lock(&ps->ps_mtx);
392 mtx_unlock(&ps->ps_mtx);
393 ret = thread_suspend_check(1);
394 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
396 if (SIGISMEMBER(ps->ps_sigintr, sig))
400 mtx_unlock(&ps->ps_mtx);
404 mtx_lock_spin(&sc->sc_lock);
406 * Lock sched_lock before unlocking proc lock,
407 * without this, we could lose a race.
409 mtx_lock_spin(&sched_lock);
411 if (!(td->td_flags & TDF_INTERRUPT))
413 /* KSE threads tried unblocking us. */
414 ret = td->td_intrval;
415 mtx_unlock_spin(&sched_lock);
416 MPASS(ret == EINTR || ret == ERESTART);
419 mtx_lock_spin(&sc->sc_lock);
422 * There were pending signals and this thread is still
423 * on the sleep queue, remove it from the sleep queue.
425 sq = sleepq_lookup(wchan);
426 mtx_lock_spin(&sched_lock);
427 if (TD_ON_SLEEPQ(td))
428 sleepq_resume_thread(sq, td, -1);
433 * Switches to another thread if we are still asleep on a sleep queue and
434 * drop the lock on the sleep queue chain. Returns with sched_lock held.
437 sleepq_switch(void *wchan)
439 struct sleepqueue_chain *sc;
443 sc = SC_LOOKUP(wchan);
444 mtx_assert(&sc->sc_lock, MA_OWNED);
445 mtx_assert(&sched_lock, MA_OWNED);
448 * If we have a sleep queue, then we've already been woken up, so
451 if (td->td_sleepqueue != NULL) {
452 MPASS(!TD_ON_SLEEPQ(td));
453 mtx_unlock_spin(&sc->sc_lock);
458 * Otherwise, actually go to sleep.
460 mtx_unlock_spin(&sc->sc_lock);
463 mi_switch(SW_VOL, NULL);
464 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
465 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
466 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
470 * Check to see if we timed out.
473 sleepq_check_timeout(void)
477 mtx_assert(&sched_lock, MA_OWNED);
481 * If TDF_TIMEOUT is set, we timed out.
483 if (td->td_flags & TDF_TIMEOUT) {
484 td->td_flags &= ~TDF_TIMEOUT;
485 return (EWOULDBLOCK);
489 * If TDF_TIMOFAIL is set, the timeout ran after we had
490 * already been woken up.
492 if (td->td_flags & TDF_TIMOFAIL)
493 td->td_flags &= ~TDF_TIMOFAIL;
496 * If callout_stop() fails, then the timeout is running on
497 * another CPU, so synchronize with it to avoid having it
498 * accidentally wake up a subsequent sleep.
500 else if (callout_stop(&td->td_slpcallout) == 0) {
501 td->td_flags |= TDF_TIMEOUT;
503 mi_switch(SW_INVOL, NULL);
509 * Check to see if we were awoken by a signal.
512 sleepq_check_signals(void)
516 mtx_assert(&sched_lock, MA_OWNED);
519 /* We are no longer in an interruptible sleep. */
520 if (td->td_flags & TDF_SINTR)
521 td->td_flags &= ~TDF_SINTR;
523 if (td->td_flags & TDF_SLEEPABORT) {
524 td->td_flags &= ~TDF_SLEEPABORT;
525 return (td->td_intrval);
528 if (td->td_flags & TDF_INTERRUPT)
529 return (td->td_intrval);
535 * Block the current thread until it is awakened from its sleep queue.
538 sleepq_wait(void *wchan)
541 MPASS(!(curthread->td_flags & TDF_SINTR));
542 mtx_lock_spin(&sched_lock);
543 sleepq_switch(wchan);
544 mtx_unlock_spin(&sched_lock);
548 * Block the current thread until it is awakened from its sleep queue
549 * or it is interrupted by a signal.
552 sleepq_wait_sig(void *wchan)
557 rcatch = sleepq_catch_signals(wchan);
559 sleepq_switch(wchan);
561 sleepq_release(wchan);
562 rval = sleepq_check_signals();
563 mtx_unlock_spin(&sched_lock);
570 * Block the current thread until it is awakened from its sleep queue
571 * or it times out while waiting.
574 sleepq_timedwait(void *wchan)
578 MPASS(!(curthread->td_flags & TDF_SINTR));
579 mtx_lock_spin(&sched_lock);
580 sleepq_switch(wchan);
581 rval = sleepq_check_timeout();
582 mtx_unlock_spin(&sched_lock);
587 * Block the current thread until it is awakened from its sleep queue,
588 * it is interrupted by a signal, or it times out waiting to be awakened.
591 sleepq_timedwait_sig(void *wchan)
593 int rcatch, rvalt, rvals;
595 rcatch = sleepq_catch_signals(wchan);
597 sleepq_switch(wchan);
599 sleepq_release(wchan);
600 rvalt = sleepq_check_timeout();
601 rvals = sleepq_check_signals();
602 mtx_unlock_spin(&sched_lock);
611 * Removes a thread from a sleep queue and makes it
615 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
617 struct sleepqueue_chain *sc;
620 MPASS(sq->sq_wchan != NULL);
621 MPASS(td->td_wchan == sq->sq_wchan);
622 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
623 sc = SC_LOOKUP(sq->sq_wchan);
624 mtx_assert(&sc->sc_lock, MA_OWNED);
625 mtx_assert(&sched_lock, MA_OWNED);
627 /* Remove the thread from the queue. */
628 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
631 * Get a sleep queue for this thread. If this is the last waiter,
632 * use the queue itself and take it out of the chain, otherwise,
633 * remove a queue from the free list.
635 if (LIST_EMPTY(&sq->sq_free)) {
636 td->td_sleepqueue = sq;
640 #ifdef SLEEPQUEUE_PROFILING
644 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
645 LIST_REMOVE(td->td_sleepqueue, sq_hash);
649 td->td_flags &= ~TDF_SINTR;
652 * Note that thread td might not be sleeping if it is running
653 * sleepq_catch_signals() on another CPU or is blocked on
654 * its proc lock to check signals. It doesn't hurt to clear
655 * the sleeping flag if it isn't set though, so we just always
656 * do it. However, we can't assert that it is set.
658 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
659 (void *)td, (long)td->td_proc->p_pid, td->td_proc->p_comm);
662 /* Adjust priority if requested. */
663 MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX));
664 if (pri != -1 && td->td_priority > pri)
670 * Find the highest priority thread sleeping on a wait channel and resume it.
673 sleepq_signal(void *wchan, int flags, int pri, int queue)
675 struct sleepqueue *sq;
676 struct thread *td, *besttd;
678 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
679 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
680 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
681 sq = sleepq_lookup(wchan);
683 sleepq_release(wchan);
686 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
687 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
690 * Find the highest priority thread on the queue. If there is a
691 * tie, use the thread that first appears in the queue as it has
692 * been sleeping the longest since threads are always added to
693 * the tail of sleep queues.
696 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
697 if (besttd == NULL || td->td_priority < besttd->td_priority)
700 MPASS(besttd != NULL);
701 mtx_lock_spin(&sched_lock);
702 sleepq_resume_thread(sq, besttd, pri);
703 mtx_unlock_spin(&sched_lock);
704 sleepq_release(wchan);
708 * Resume all threads sleeping on a specified wait channel.
711 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
713 struct sleepqueue *sq;
715 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
716 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
717 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
718 sq = sleepq_lookup(wchan);
720 sleepq_release(wchan);
723 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
724 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
726 /* Resume all blocked threads on the sleep queue. */
727 mtx_lock_spin(&sched_lock);
728 while (!TAILQ_EMPTY(&sq->sq_blocked[queue]))
729 sleepq_resume_thread(sq, TAILQ_FIRST(&sq->sq_blocked[queue]),
731 mtx_unlock_spin(&sched_lock);
732 sleepq_release(wchan);
736 * Time sleeping threads out. When the timeout expires, the thread is
737 * removed from the sleep queue and made runnable if it is still asleep.
740 sleepq_timeout(void *arg)
742 struct sleepqueue *sq;
747 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
748 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
751 * First, see if the thread is asleep and get the wait channel if
754 mtx_lock_spin(&sched_lock);
755 if (TD_ON_SLEEPQ(td)) {
756 wchan = td->td_wchan;
757 mtx_unlock_spin(&sched_lock);
759 sq = sleepq_lookup(wchan);
760 mtx_lock_spin(&sched_lock);
767 * At this point, if the thread is still on the sleep queue,
768 * we have that sleep queue locked as it cannot migrate sleep
769 * queues while we dropped sched_lock. If it had resumed and
770 * was on another CPU while the lock was dropped, it would have
771 * seen that TDF_TIMEOUT and TDF_TIMOFAIL are clear and the
772 * call to callout_stop() to stop this routine would have failed
773 * meaning that it would have already set TDF_TIMEOUT to
774 * synchronize with this function.
776 if (TD_ON_SLEEPQ(td)) {
777 MPASS(td->td_wchan == wchan);
779 td->td_flags |= TDF_TIMEOUT;
780 sleepq_resume_thread(sq, td, -1);
781 mtx_unlock_spin(&sched_lock);
782 sleepq_release(wchan);
784 } else if (wchan != NULL)
785 sleepq_release(wchan);
788 * Now check for the edge cases. First, if TDF_TIMEOUT is set,
789 * then the other thread has already yielded to us, so clear
790 * the flag and resume it. If TDF_TIMEOUT is not set, then the
791 * we know that the other thread is not on a sleep queue, but it
792 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL
793 * to let it know that the timeout has already run and doesn't
794 * need to be canceled.
796 if (td->td_flags & TDF_TIMEOUT) {
797 MPASS(TD_IS_SLEEPING(td));
798 td->td_flags &= ~TDF_TIMEOUT;
802 td->td_flags |= TDF_TIMOFAIL;
803 mtx_unlock_spin(&sched_lock);
807 * Resumes a specific thread from the sleep queue associated with a specific
808 * wait channel if it is on that queue.
811 sleepq_remove(struct thread *td, void *wchan)
813 struct sleepqueue *sq;
816 * Look up the sleep queue for this wait channel, then re-check
817 * that the thread is asleep on that channel, if it is not, then
820 MPASS(wchan != NULL);
822 sq = sleepq_lookup(wchan);
823 mtx_lock_spin(&sched_lock);
824 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
825 mtx_unlock_spin(&sched_lock);
826 sleepq_release(wchan);
831 /* Thread is asleep on sleep queue sq, so wake it up. */
832 sleepq_resume_thread(sq, td, -1);
833 sleepq_release(wchan);
834 mtx_unlock_spin(&sched_lock);
838 * Abort a thread as if an interrupt had occurred. Only abort
839 * interruptible waits (unfortunately it isn't safe to abort others).
841 * XXX: What in the world does the comment below mean?
842 * Also, whatever the signal code does...
845 sleepq_abort(struct thread *td, int intrval)
849 mtx_assert(&sched_lock, MA_OWNED);
850 MPASS(TD_ON_SLEEPQ(td));
851 MPASS(td->td_flags & TDF_SINTR);
852 MPASS(intrval == EINTR || intrval == ERESTART);
855 * If the TDF_TIMEOUT flag is set, just leave. A
856 * timeout is scheduled anyhow.
858 if (td->td_flags & TDF_TIMEOUT)
861 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
862 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm);
863 wchan = td->td_wchan;
865 td->td_intrval = intrval;
866 td->td_flags |= TDF_SLEEPABORT;
868 mtx_unlock_spin(&sched_lock);
869 sleepq_remove(td, wchan);
870 mtx_lock_spin(&sched_lock);
874 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
876 struct sleepqueue_chain *sc;
877 struct sleepqueue *sq;
879 struct lock_object *lock;
889 * First, see if there is an active sleep queue for the wait channel
890 * indicated by the address.
892 wchan = (void *)addr;
893 sc = SC_LOOKUP(wchan);
894 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
895 if (sq->sq_wchan == wchan)
899 * Second, see if there is an active sleep queue at the address
902 for (i = 0; i < SC_TABLESIZE; i++)
903 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
904 if (sq == (struct sleepqueue *)addr)
908 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
911 db_printf("Wait channel: %p\n", sq->sq_wchan);
913 db_printf("Queue type: %d\n", sq->sq_type);
916 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
917 LOCK_CLASS(lock)->lc_name, lock->lo_name);
920 db_printf("Blocked threads:\n");
921 for (i = 0; i < NR_SLEEPQS; i++) {
922 db_printf("\nQueue[%d]:\n", i);
923 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
924 db_printf("\tempty\n");
926 TAILQ_FOREACH(td, &sq->sq_blocked[0],
928 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
929 td->td_tid, td->td_proc->p_pid,
930 td->td_name[i] != '\0' ? td->td_name :
931 td->td_proc->p_comm);
936 /* Alias 'show sleepqueue' to 'show sleepq'. */
937 DB_SET(sleepqueue, db_show_sleepqueue, db_show_cmd_set, 0, NULL);