2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Implementation of sleep queues used to hold queue of threads blocked on
29 * a wait channel. Sleep queues different from turnstiles in that wait
30 * channels are not owned by anyone, so there is no priority propagation.
31 * Sleep queues can also provide a timeout and can also be interrupted by
32 * signals. That said, there are several similarities between the turnstile
33 * and sleep queue implementations. (Note: turnstiles were implemented
34 * first.) For example, both use a hash table of the same size where each
35 * bucket is referred to as a "chain" that contains both a spin lock and
36 * a linked list of queues. An individual queue is located by using a hash
37 * to pick a chain, locking the chain, and then walking the chain searching
38 * for the queue. This means that a wait channel object does not need to
39 * embed it's queue head just as locks do not embed their turnstile queue
40 * head. Threads also carry around a sleep queue that they lend to the
41 * wait channel when blocking. Just as in turnstiles, the queue includes
42 * a free list of the sleep queues of other threads blocked on the same
43 * wait channel in the case of multiple waiters.
45 * Some additional functionality provided by sleep queues include the
46 * ability to set a timeout. The timeout is managed using a per-thread
47 * callout that resumes a thread if it is asleep. A thread may also
48 * catch signals while it is asleep (aka an interruptible sleep). The
49 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
50 * sleep queues also provide some extra assertions. One is not allowed to
51 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
52 * must consistently use the same lock to synchronize with a wait channel,
53 * though this check is currently only a warning for sleep/wakeup due to
54 * pre-existing abuse of that API. The same lock must also be held when
55 * awakening threads, though that is currently only enforced for condition
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
62 #include "opt_sleepqueue_profiling.h"
64 #include "opt_sched.h"
66 #include <sys/param.h>
67 #include <sys/systm.h>
69 #include <sys/kernel.h>
71 #include <sys/mutex.h>
74 #include <sys/sched.h>
76 #include <sys/signalvar.h>
77 #include <sys/sleepqueue.h>
78 #include <sys/sysctl.h>
87 * Constants for the hash table of sleep queue chains.
88 * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
90 #define SC_TABLESIZE 256 /* Must be power of 2. */
91 #define SC_MASK (SC_TABLESIZE - 1)
93 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
95 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
98 * There two different lists of sleep queues. Both lists are connected
99 * via the sq_hash entries. The first list is the sleep queue chain list
100 * that a sleep queue is on when it is attached to a wait channel. The
101 * second list is the free list hung off of a sleep queue that is attached
104 * Each sleep queue also contains the wait channel it is attached to, the
105 * list of threads blocked on that wait channel, flags specific to the
106 * wait channel, and the lock used to synchronize with a wait channel.
107 * The flags are used to catch mismatches between the various consumers
108 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
109 * The lock pointer is only used when invariants are enabled for various
113 * c - sleep queue chain lock
116 TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
117 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */
118 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
119 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
120 void *sq_wchan; /* (c) Wait channel. */
121 int sq_type; /* (c) Queue type. */
123 struct lock_object *sq_lock; /* (c) Associated lock. */
127 struct sleepqueue_chain {
128 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
129 struct mtx sc_lock; /* Spin lock for this chain. */
130 #ifdef SLEEPQUEUE_PROFILING
131 u_int sc_depth; /* Length of sc_queues. */
132 u_int sc_max_depth; /* Max length of sc_queues. */
136 #ifdef SLEEPQUEUE_PROFILING
137 u_int sleepq_max_depth;
138 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
139 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
140 "sleepq chain stats");
141 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
142 0, "maxmimum depth achieved of a single chain");
144 static void sleepq_profile(const char *wmesg);
145 static int prof_enabled;
147 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
148 static uma_zone_t sleepq_zone;
151 * Prototypes for non-exported routines.
153 static int sleepq_catch_signals(void *wchan, int pri);
154 static int sleepq_check_signals(void);
155 static int sleepq_check_timeout(void);
157 static void sleepq_dtor(void *mem, int size, void *arg);
159 static int sleepq_init(void *mem, int size, int flags);
160 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
162 static void sleepq_switch(void *wchan, int pri);
163 static void sleepq_timeout(void *arg);
165 SDT_PROBE_DECLARE(sched, , , sleep);
166 SDT_PROBE_DECLARE(sched, , , wakeup);
169 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes.
170 * Note that it must happen after sleepinit() has been fully executed, so
171 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup.
173 #ifdef SLEEPQUEUE_PROFILING
175 init_sleepqueue_profiling(void)
178 struct sysctl_oid *chain_oid;
181 for (i = 0; i < SC_TABLESIZE; i++) {
182 snprintf(chain_name, sizeof(chain_name), "%u", i);
183 chain_oid = SYSCTL_ADD_NODE(NULL,
184 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
185 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
186 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
187 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
188 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
189 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
194 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
195 init_sleepqueue_profiling, NULL);
199 * Early initialization of sleep queues that is called from the sleepinit()
203 init_sleepqueues(void)
207 for (i = 0; i < SC_TABLESIZE; i++) {
208 LIST_INIT(&sleepq_chains[i].sc_queues);
209 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
210 MTX_SPIN | MTX_RECURSE);
212 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
214 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
216 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
219 thread0.td_sleepqueue = sleepq_alloc();
223 * Get a sleep queue for a new thread.
229 return (uma_zalloc(sleepq_zone, M_WAITOK));
233 * Free a sleep queue when a thread is destroyed.
236 sleepq_free(struct sleepqueue *sq)
239 uma_zfree(sleepq_zone, sq);
243 * Lock the sleep queue chain associated with the specified wait channel.
246 sleepq_lock(void *wchan)
248 struct sleepqueue_chain *sc;
250 sc = SC_LOOKUP(wchan);
251 mtx_lock_spin(&sc->sc_lock);
255 * Look up the sleep queue associated with a given wait channel in the hash
256 * table locking the associated sleep queue chain. If no queue is found in
257 * the table, NULL is returned.
260 sleepq_lookup(void *wchan)
262 struct sleepqueue_chain *sc;
263 struct sleepqueue *sq;
265 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
266 sc = SC_LOOKUP(wchan);
267 mtx_assert(&sc->sc_lock, MA_OWNED);
268 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
269 if (sq->sq_wchan == wchan)
275 * Unlock the sleep queue chain associated with a given wait channel.
278 sleepq_release(void *wchan)
280 struct sleepqueue_chain *sc;
282 sc = SC_LOOKUP(wchan);
283 mtx_unlock_spin(&sc->sc_lock);
287 * Places the current thread on the sleep queue for the specified wait
288 * channel. If INVARIANTS is enabled, then it associates the passed in
289 * lock with the sleepq to make sure it is held when that sleep queue is
293 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
296 struct sleepqueue_chain *sc;
297 struct sleepqueue *sq;
301 sc = SC_LOOKUP(wchan);
302 mtx_assert(&sc->sc_lock, MA_OWNED);
303 MPASS(td->td_sleepqueue != NULL);
304 MPASS(wchan != NULL);
305 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
307 /* If this thread is not allowed to sleep, die a horrible death. */
308 KASSERT(td->td_no_sleeping == 0,
309 ("%s: td %p to sleep on wchan %p with sleeping prohibited",
310 __func__, td, wchan));
312 /* Look up the sleep queue associated with the wait channel 'wchan'. */
313 sq = sleepq_lookup(wchan);
316 * If the wait channel does not already have a sleep queue, use
317 * this thread's sleep queue. Otherwise, insert the current thread
318 * into the sleep queue already in use by this wait channel.
324 sq = td->td_sleepqueue;
325 for (i = 0; i < NR_SLEEPQS; i++) {
326 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
327 ("thread's sleep queue %d is not empty", i));
328 KASSERT(sq->sq_blockedcnt[i] == 0,
329 ("thread's sleep queue %d count mismatches", i));
331 KASSERT(LIST_EMPTY(&sq->sq_free),
332 ("thread's sleep queue has a non-empty free list"));
333 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
336 #ifdef SLEEPQUEUE_PROFILING
338 if (sc->sc_depth > sc->sc_max_depth) {
339 sc->sc_max_depth = sc->sc_depth;
340 if (sc->sc_max_depth > sleepq_max_depth)
341 sleepq_max_depth = sc->sc_max_depth;
344 sq = td->td_sleepqueue;
345 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
346 sq->sq_wchan = wchan;
347 sq->sq_type = flags & SLEEPQ_TYPE;
349 MPASS(wchan == sq->sq_wchan);
350 MPASS(lock == sq->sq_lock);
351 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
352 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
355 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
356 sq->sq_blockedcnt[queue]++;
357 td->td_sleepqueue = NULL;
358 td->td_sqqueue = queue;
359 td->td_wchan = wchan;
360 td->td_wmesg = wmesg;
361 if (flags & SLEEPQ_INTERRUPTIBLE) {
362 td->td_flags |= TDF_SINTR;
363 td->td_flags &= ~TDF_SLEEPABORT;
369 * Sets a timeout that will remove the current thread from the specified
370 * sleep queue after timo ticks if the thread has not already been awakened.
373 sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
376 struct sleepqueue_chain *sc;
380 sc = SC_LOOKUP(wchan);
381 mtx_assert(&sc->sc_lock, MA_OWNED);
382 MPASS(TD_ON_SLEEPQ(td));
383 MPASS(td->td_sleepqueue == NULL);
384 MPASS(wchan != NULL);
385 callout_reset_sbt_on(&td->td_slpcallout, sbt, pr,
386 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_DIRECT_EXEC);
390 * Return the number of actual sleepers for the specified queue.
393 sleepq_sleepcnt(void *wchan, int queue)
395 struct sleepqueue *sq;
397 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
398 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
399 sq = sleepq_lookup(wchan);
402 return (sq->sq_blockedcnt[queue]);
406 * Marks the pending sleep of the current thread as interruptible and
407 * makes an initial check for pending signals before putting a thread
408 * to sleep. Enters and exits with the thread lock held. Thread lock
409 * may have transitioned from the sleepq lock to a run lock.
412 sleepq_catch_signals(void *wchan, int pri)
414 struct sleepqueue_chain *sc;
415 struct sleepqueue *sq;
423 sc = SC_LOOKUP(wchan);
424 mtx_assert(&sc->sc_lock, MA_OWNED);
425 MPASS(wchan != NULL);
426 if ((td->td_pflags & TDP_WAKEUP) != 0) {
427 td->td_pflags &= ~TDP_WAKEUP;
434 * See if there are any pending signals for this thread. If not
435 * we can switch immediately. Otherwise do the signal processing
439 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
440 sleepq_switch(wchan, pri);
444 mtx_unlock_spin(&sc->sc_lock);
445 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
446 (void *)td, (long)p->p_pid, td->td_name);
449 mtx_lock(&ps->ps_mtx);
452 mtx_unlock(&ps->ps_mtx);
453 ret = thread_suspend_check(1);
454 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
456 if (SIGISMEMBER(ps->ps_sigintr, sig))
460 mtx_unlock(&ps->ps_mtx);
463 * Lock the per-process spinlock prior to dropping the PROC_LOCK
464 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and
465 * thread_lock() are currently held in tdsendsignal().
468 mtx_lock_spin(&sc->sc_lock);
473 sleepq_switch(wchan, pri);
478 * There were pending signals and this thread is still
479 * on the sleep queue, remove it from the sleep queue.
481 if (TD_ON_SLEEPQ(td)) {
482 sq = sleepq_lookup(wchan);
483 if (sleepq_resume_thread(sq, td, 0)) {
486 * This thread hasn't gone to sleep yet, so it
487 * should not be swapped out.
489 panic("not waking up swapper");
493 mtx_unlock_spin(&sc->sc_lock);
494 MPASS(td->td_lock != &sc->sc_lock);
499 * Switches to another thread if we are still asleep on a sleep queue.
500 * Returns with thread lock.
503 sleepq_switch(void *wchan, int pri)
505 struct sleepqueue_chain *sc;
506 struct sleepqueue *sq;
510 sc = SC_LOOKUP(wchan);
511 mtx_assert(&sc->sc_lock, MA_OWNED);
512 THREAD_LOCK_ASSERT(td, MA_OWNED);
515 * If we have a sleep queue, then we've already been woken up, so
518 if (td->td_sleepqueue != NULL) {
519 mtx_unlock_spin(&sc->sc_lock);
524 * If TDF_TIMEOUT is set, then our sleep has been timed out
525 * already but we are still on the sleep queue, so dequeue the
528 if (td->td_flags & TDF_TIMEOUT) {
529 MPASS(TD_ON_SLEEPQ(td));
530 sq = sleepq_lookup(wchan);
531 if (sleepq_resume_thread(sq, td, 0)) {
534 * This thread hasn't gone to sleep yet, so it
535 * should not be swapped out.
537 panic("not waking up swapper");
540 mtx_unlock_spin(&sc->sc_lock);
543 #ifdef SLEEPQUEUE_PROFILING
545 sleepq_profile(td->td_wmesg);
547 MPASS(td->td_sleepqueue == NULL);
548 sched_sleep(td, pri);
549 thread_lock_set(td, &sc->sc_lock);
550 SDT_PROBE0(sched, , , sleep);
552 mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
553 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
554 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
555 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
559 * Check to see if we timed out.
562 sleepq_check_timeout(void)
567 THREAD_LOCK_ASSERT(td, MA_OWNED);
570 * If TDF_TIMEOUT is set, we timed out.
572 if (td->td_flags & TDF_TIMEOUT) {
573 td->td_flags &= ~TDF_TIMEOUT;
574 return (EWOULDBLOCK);
578 * If TDF_TIMOFAIL is set, the timeout ran after we had
579 * already been woken up.
581 if (td->td_flags & TDF_TIMOFAIL)
582 td->td_flags &= ~TDF_TIMOFAIL;
585 * If callout_stop() fails, then the timeout is running on
586 * another CPU, so synchronize with it to avoid having it
587 * accidentally wake up a subsequent sleep.
589 else if (callout_stop(&td->td_slpcallout) == 0) {
590 td->td_flags |= TDF_TIMEOUT;
592 mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
598 * Check to see if we were awoken by a signal.
601 sleepq_check_signals(void)
606 THREAD_LOCK_ASSERT(td, MA_OWNED);
608 /* We are no longer in an interruptible sleep. */
609 if (td->td_flags & TDF_SINTR)
610 td->td_flags &= ~TDF_SINTR;
612 if (td->td_flags & TDF_SLEEPABORT) {
613 td->td_flags &= ~TDF_SLEEPABORT;
614 return (td->td_intrval);
621 * Block the current thread until it is awakened from its sleep queue.
624 sleepq_wait(void *wchan, int pri)
629 MPASS(!(td->td_flags & TDF_SINTR));
631 sleepq_switch(wchan, pri);
636 * Block the current thread until it is awakened from its sleep queue
637 * or it is interrupted by a signal.
640 sleepq_wait_sig(void *wchan, int pri)
645 rcatch = sleepq_catch_signals(wchan, pri);
646 rval = sleepq_check_signals();
647 thread_unlock(curthread);
654 * Block the current thread until it is awakened from its sleep queue
655 * or it times out while waiting.
658 sleepq_timedwait(void *wchan, int pri)
664 MPASS(!(td->td_flags & TDF_SINTR));
666 sleepq_switch(wchan, pri);
667 rval = sleepq_check_timeout();
674 * Block the current thread until it is awakened from its sleep queue,
675 * it is interrupted by a signal, or it times out waiting to be awakened.
678 sleepq_timedwait_sig(void *wchan, int pri)
680 int rcatch, rvalt, rvals;
682 rcatch = sleepq_catch_signals(wchan, pri);
683 rvalt = sleepq_check_timeout();
684 rvals = sleepq_check_signals();
685 thread_unlock(curthread);
694 * Returns the type of sleepqueue given a waitchannel.
697 sleepq_type(void *wchan)
699 struct sleepqueue *sq;
702 MPASS(wchan != NULL);
705 sq = sleepq_lookup(wchan);
707 sleepq_release(wchan);
711 sleepq_release(wchan);
716 * Removes a thread from a sleep queue and makes it
720 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
722 struct sleepqueue_chain *sc;
725 MPASS(sq->sq_wchan != NULL);
726 MPASS(td->td_wchan == sq->sq_wchan);
727 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
728 THREAD_LOCK_ASSERT(td, MA_OWNED);
729 sc = SC_LOOKUP(sq->sq_wchan);
730 mtx_assert(&sc->sc_lock, MA_OWNED);
732 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
734 /* Remove the thread from the queue. */
735 sq->sq_blockedcnt[td->td_sqqueue]--;
736 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
739 * Get a sleep queue for this thread. If this is the last waiter,
740 * use the queue itself and take it out of the chain, otherwise,
741 * remove a queue from the free list.
743 if (LIST_EMPTY(&sq->sq_free)) {
744 td->td_sleepqueue = sq;
748 #ifdef SLEEPQUEUE_PROFILING
752 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
753 LIST_REMOVE(td->td_sleepqueue, sq_hash);
757 td->td_flags &= ~TDF_SINTR;
759 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
760 (void *)td, (long)td->td_proc->p_pid, td->td_name);
762 /* Adjust priority if requested. */
763 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
764 if (pri != 0 && td->td_priority > pri &&
765 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
769 * Note that thread td might not be sleeping if it is running
770 * sleepq_catch_signals() on another CPU or is blocked on its
771 * proc lock to check signals. There's no need to mark the
772 * thread runnable in that case.
774 if (TD_IS_SLEEPING(td)) {
776 return (setrunnable(td));
783 * UMA zone item deallocator.
786 sleepq_dtor(void *mem, int size, void *arg)
788 struct sleepqueue *sq;
792 for (i = 0; i < NR_SLEEPQS; i++) {
793 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
794 MPASS(sq->sq_blockedcnt[i] == 0);
800 * UMA zone item initializer.
803 sleepq_init(void *mem, int size, int flags)
805 struct sleepqueue *sq;
810 for (i = 0; i < NR_SLEEPQS; i++) {
811 TAILQ_INIT(&sq->sq_blocked[i]);
812 sq->sq_blockedcnt[i] = 0;
814 LIST_INIT(&sq->sq_free);
819 * Find the highest priority thread sleeping on a wait channel and resume it.
822 sleepq_signal(void *wchan, int flags, int pri, int queue)
824 struct sleepqueue *sq;
825 struct thread *td, *besttd;
828 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
829 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
830 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
831 sq = sleepq_lookup(wchan);
834 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
835 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
838 * Find the highest priority thread on the queue. If there is a
839 * tie, use the thread that first appears in the queue as it has
840 * been sleeping the longest since threads are always added to
841 * the tail of sleep queues.
844 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
845 if (besttd == NULL || td->td_priority < besttd->td_priority)
848 MPASS(besttd != NULL);
850 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
851 thread_unlock(besttd);
852 return (wakeup_swapper);
856 * Resume all threads sleeping on a specified wait channel.
859 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
861 struct sleepqueue *sq;
862 struct thread *td, *tdn;
865 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
866 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
867 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
868 sq = sleepq_lookup(wchan);
871 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
872 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
874 /* Resume all blocked threads on the sleep queue. */
876 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
878 if (sleepq_resume_thread(sq, td, pri))
882 return (wakeup_swapper);
886 * Time sleeping threads out. When the timeout expires, the thread is
887 * removed from the sleep queue and made runnable if it is still asleep.
890 sleepq_timeout(void *arg)
892 struct sleepqueue_chain *sc;
893 struct sleepqueue *sq;
900 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
901 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
904 * First, see if the thread is asleep and get the wait channel if
908 if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
909 wchan = td->td_wchan;
910 sc = SC_LOOKUP(wchan);
911 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
912 sq = sleepq_lookup(wchan);
914 td->td_flags |= TDF_TIMEOUT;
915 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
923 * If the thread is on the SLEEPQ but isn't sleeping yet, it
924 * can either be on another CPU in between sleepq_add() and
925 * one of the sleepq_*wait*() routines or it can be in
926 * sleepq_catch_signals().
928 if (TD_ON_SLEEPQ(td)) {
929 td->td_flags |= TDF_TIMEOUT;
935 * Now check for the edge cases. First, if TDF_TIMEOUT is set,
936 * then the other thread has already yielded to us, so clear
937 * the flag and resume it. If TDF_TIMEOUT is not set, then the
938 * we know that the other thread is not on a sleep queue, but it
939 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL
940 * to let it know that the timeout has already run and doesn't
941 * need to be canceled.
943 if (td->td_flags & TDF_TIMEOUT) {
944 MPASS(TD_IS_SLEEPING(td));
945 td->td_flags &= ~TDF_TIMEOUT;
947 wakeup_swapper = setrunnable(td);
949 td->td_flags |= TDF_TIMOFAIL;
956 * Resumes a specific thread from the sleep queue associated with a specific
957 * wait channel if it is on that queue.
960 sleepq_remove(struct thread *td, void *wchan)
962 struct sleepqueue *sq;
966 * Look up the sleep queue for this wait channel, then re-check
967 * that the thread is asleep on that channel, if it is not, then
970 MPASS(wchan != NULL);
972 sq = sleepq_lookup(wchan);
974 * We can not lock the thread here as it may be sleeping on a
975 * different sleepq. However, holding the sleepq lock for this
976 * wchan can guarantee that we do not miss a wakeup for this
977 * channel. The asserts below will catch any false positives.
979 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
980 sleepq_release(wchan);
983 /* Thread is asleep on sleep queue sq, so wake it up. */
986 MPASS(td->td_wchan == wchan);
987 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
989 sleepq_release(wchan);
995 * Abort a thread as if an interrupt had occurred. Only abort
996 * interruptible waits (unfortunately it isn't safe to abort others).
999 sleepq_abort(struct thread *td, int intrval)
1001 struct sleepqueue *sq;
1004 THREAD_LOCK_ASSERT(td, MA_OWNED);
1005 MPASS(TD_ON_SLEEPQ(td));
1006 MPASS(td->td_flags & TDF_SINTR);
1007 MPASS(intrval == EINTR || intrval == ERESTART);
1010 * If the TDF_TIMEOUT flag is set, just leave. A
1011 * timeout is scheduled anyhow.
1013 if (td->td_flags & TDF_TIMEOUT)
1016 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1017 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1018 td->td_intrval = intrval;
1019 td->td_flags |= TDF_SLEEPABORT;
1021 * If the thread has not slept yet it will find the signal in
1022 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise
1023 * we have to do it here.
1025 if (!TD_IS_SLEEPING(td))
1027 wchan = td->td_wchan;
1028 MPASS(wchan != NULL);
1029 sq = sleepq_lookup(wchan);
1032 /* Thread is asleep on sleep queue sq, so wake it up. */
1033 return (sleepq_resume_thread(sq, td, 0));
1036 #ifdef SLEEPQUEUE_PROFILING
1037 #define SLEEPQ_PROF_LOCATIONS 1024
1038 #define SLEEPQ_SBUFSIZE 512
1039 struct sleepq_prof {
1040 LIST_ENTRY(sleepq_prof) sp_link;
1041 const char *sp_wmesg;
1045 LIST_HEAD(sqphead, sleepq_prof);
1047 struct sqphead sleepq_prof_free;
1048 struct sqphead sleepq_hash[SC_TABLESIZE];
1049 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1050 static struct mtx sleepq_prof_lock;
1051 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1054 sleepq_profile(const char *wmesg)
1056 struct sleepq_prof *sp;
1058 mtx_lock_spin(&sleepq_prof_lock);
1059 if (prof_enabled == 0)
1061 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1062 if (sp->sp_wmesg == wmesg)
1064 sp = LIST_FIRST(&sleepq_prof_free);
1067 sp->sp_wmesg = wmesg;
1068 LIST_REMOVE(sp, sp_link);
1069 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1073 mtx_unlock_spin(&sleepq_prof_lock);
1078 sleepq_prof_reset(void)
1080 struct sleepq_prof *sp;
1084 mtx_lock_spin(&sleepq_prof_lock);
1085 enabled = prof_enabled;
1087 for (i = 0; i < SC_TABLESIZE; i++)
1088 LIST_INIT(&sleepq_hash[i]);
1089 LIST_INIT(&sleepq_prof_free);
1090 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1091 sp = &sleepq_profent[i];
1092 sp->sp_wmesg = NULL;
1094 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1096 prof_enabled = enabled;
1097 mtx_unlock_spin(&sleepq_prof_lock);
1101 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1106 error = sysctl_handle_int(oidp, &v, v, req);
1109 if (req->newptr == NULL)
1111 if (v == prof_enabled)
1114 sleepq_prof_reset();
1115 mtx_lock_spin(&sleepq_prof_lock);
1117 mtx_unlock_spin(&sleepq_prof_lock);
1123 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1128 error = sysctl_handle_int(oidp, &v, 0, req);
1131 if (req->newptr == NULL)
1135 sleepq_prof_reset();
1141 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1143 struct sleepq_prof *sp;
1149 error = sysctl_wire_old_buffer(req, 0);
1152 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1153 sbuf_printf(sb, "\nwmesg\tcount\n");
1154 enabled = prof_enabled;
1155 mtx_lock_spin(&sleepq_prof_lock);
1157 mtx_unlock_spin(&sleepq_prof_lock);
1158 for (i = 0; i < SC_TABLESIZE; i++) {
1159 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1160 sbuf_printf(sb, "%s\t%ld\n",
1161 sp->sp_wmesg, sp->sp_count);
1164 mtx_lock_spin(&sleepq_prof_lock);
1165 prof_enabled = enabled;
1166 mtx_unlock_spin(&sleepq_prof_lock);
1168 error = sbuf_finish(sb);
1173 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1174 NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1175 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1176 NULL, 0, reset_sleepq_prof_stats, "I",
1177 "Reset sleepqueue profiling statistics");
1178 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1179 NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1183 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1185 struct sleepqueue_chain *sc;
1186 struct sleepqueue *sq;
1188 struct lock_object *lock;
1198 * First, see if there is an active sleep queue for the wait channel
1199 * indicated by the address.
1201 wchan = (void *)addr;
1202 sc = SC_LOOKUP(wchan);
1203 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1204 if (sq->sq_wchan == wchan)
1208 * Second, see if there is an active sleep queue at the address
1211 for (i = 0; i < SC_TABLESIZE; i++)
1212 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1213 if (sq == (struct sleepqueue *)addr)
1217 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1220 db_printf("Wait channel: %p\n", sq->sq_wchan);
1221 db_printf("Queue type: %d\n", sq->sq_type);
1225 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1226 LOCK_CLASS(lock)->lc_name, lock->lo_name);
1229 db_printf("Blocked threads:\n");
1230 for (i = 0; i < NR_SLEEPQS; i++) {
1231 db_printf("\nQueue[%d]:\n", i);
1232 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1233 db_printf("\tempty\n");
1235 TAILQ_FOREACH(td, &sq->sq_blocked[0],
1237 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1238 td->td_tid, td->td_proc->p_pid,
1241 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1245 /* Alias 'show sleepqueue' to 'show sleepq'. */
1246 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);