2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * Implementation of sleep queues used to hold queue of threads blocked on
29 * a wait channel. Sleep queues are different from turnstiles in that wait
30 * channels are not owned by anyone, so there is no priority propagation.
31 * Sleep queues can also provide a timeout and can also be interrupted by
32 * signals. That said, there are several similarities between the turnstile
33 * and sleep queue implementations. (Note: turnstiles were implemented
34 * first.) For example, both use a hash table of the same size where each
35 * bucket is referred to as a "chain" that contains both a spin lock and
36 * a linked list of queues. An individual queue is located by using a hash
37 * to pick a chain, locking the chain, and then walking the chain searching
38 * for the queue. This means that a wait channel object does not need to
39 * embed its queue head just as locks do not embed their turnstile queue
40 * head. Threads also carry around a sleep queue that they lend to the
41 * wait channel when blocking. Just as in turnstiles, the queue includes
42 * a free list of the sleep queues of other threads blocked on the same
43 * wait channel in the case of multiple waiters.
45 * Some additional functionality provided by sleep queues include the
46 * ability to set a timeout. The timeout is managed using a per-thread
47 * callout that resumes a thread if it is asleep. A thread may also
48 * catch signals while it is asleep (aka an interruptible sleep). The
49 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
50 * sleep queues also provide some extra assertions. One is not allowed to
51 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
52 * must consistently use the same lock to synchronize with a wait channel,
53 * though this check is currently only a warning for sleep/wakeup due to
54 * pre-existing abuse of that API. The same lock must also be held when
55 * awakening threads, though that is currently only enforced for condition
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
62 #include "opt_sleepqueue_profiling.h"
64 #include "opt_sched.h"
65 #include "opt_stack.h"
67 #include <sys/param.h>
68 #include <sys/systm.h>
70 #include <sys/kernel.h>
72 #include <sys/mutex.h>
75 #include <sys/sched.h>
77 #include <sys/signalvar.h>
78 #include <sys/sleepqueue.h>
79 #include <sys/stack.h>
80 #include <sys/sysctl.h>
83 #include <machine/atomic.h>
93 * Constants for the hash table of sleep queue chains.
94 * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
97 #define SC_TABLESIZE 256
99 CTASSERT(powerof2(SC_TABLESIZE));
100 #define SC_MASK (SC_TABLESIZE - 1)
102 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
104 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
107 * There are two different lists of sleep queues. Both lists are connected
108 * via the sq_hash entries. The first list is the sleep queue chain list
109 * that a sleep queue is on when it is attached to a wait channel. The
110 * second list is the free list hung off of a sleep queue that is attached
113 * Each sleep queue also contains the wait channel it is attached to, the
114 * list of threads blocked on that wait channel, flags specific to the
115 * wait channel, and the lock used to synchronize with a wait channel.
116 * The flags are used to catch mismatches between the various consumers
117 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
118 * The lock pointer is only used when invariants are enabled for various
122 * c - sleep queue chain lock
125 TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
126 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */
127 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
128 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
129 void *sq_wchan; /* (c) Wait channel. */
130 int sq_type; /* (c) Queue type. */
132 struct lock_object *sq_lock; /* (c) Associated lock. */
136 struct sleepqueue_chain {
137 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
138 struct mtx sc_lock; /* Spin lock for this chain. */
139 #ifdef SLEEPQUEUE_PROFILING
140 u_int sc_depth; /* Length of sc_queues. */
141 u_int sc_max_depth; /* Max length of sc_queues. */
143 } __aligned(CACHE_LINE_SIZE);
145 #ifdef SLEEPQUEUE_PROFILING
146 u_int sleepq_max_depth;
147 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
148 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
149 "sleepq chain stats");
150 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
151 0, "maxmimum depth achieved of a single chain");
153 static void sleepq_profile(const char *wmesg);
154 static int prof_enabled;
156 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
157 static uma_zone_t sleepq_zone;
160 * Prototypes for non-exported routines.
162 static int sleepq_catch_signals(void *wchan, int pri);
163 static int sleepq_check_signals(void);
164 static int sleepq_check_timeout(void);
166 static void sleepq_dtor(void *mem, int size, void *arg);
168 static int sleepq_init(void *mem, int size, int flags);
169 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
171 static void sleepq_switch(void *wchan, int pri);
172 static void sleepq_timeout(void *arg);
174 SDT_PROBE_DECLARE(sched, , , sleep);
175 SDT_PROBE_DECLARE(sched, , , wakeup);
178 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes.
179 * Note that it must happen after sleepinit() has been fully executed, so
180 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup.
182 #ifdef SLEEPQUEUE_PROFILING
184 init_sleepqueue_profiling(void)
187 struct sysctl_oid *chain_oid;
190 for (i = 0; i < SC_TABLESIZE; i++) {
191 snprintf(chain_name, sizeof(chain_name), "%u", i);
192 chain_oid = SYSCTL_ADD_NODE(NULL,
193 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
194 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
195 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
196 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
197 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
198 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
203 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
204 init_sleepqueue_profiling, NULL);
208 * Early initialization of sleep queues that is called from the sleepinit()
212 init_sleepqueues(void)
216 for (i = 0; i < SC_TABLESIZE; i++) {
217 LIST_INIT(&sleepq_chains[i].sc_queues);
218 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
219 MTX_SPIN | MTX_RECURSE);
221 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
223 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
225 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
228 thread0.td_sleepqueue = sleepq_alloc();
232 * Get a sleep queue for a new thread.
238 return (uma_zalloc(sleepq_zone, M_WAITOK));
242 * Free a sleep queue when a thread is destroyed.
245 sleepq_free(struct sleepqueue *sq)
248 uma_zfree(sleepq_zone, sq);
252 * Lock the sleep queue chain associated with the specified wait channel.
255 sleepq_lock(void *wchan)
257 struct sleepqueue_chain *sc;
259 sc = SC_LOOKUP(wchan);
260 mtx_lock_spin(&sc->sc_lock);
264 * Look up the sleep queue associated with a given wait channel in the hash
265 * table locking the associated sleep queue chain. If no queue is found in
266 * the table, NULL is returned.
269 sleepq_lookup(void *wchan)
271 struct sleepqueue_chain *sc;
272 struct sleepqueue *sq;
274 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
275 sc = SC_LOOKUP(wchan);
276 mtx_assert(&sc->sc_lock, MA_OWNED);
277 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
278 if (sq->sq_wchan == wchan)
284 * Unlock the sleep queue chain associated with a given wait channel.
287 sleepq_release(void *wchan)
289 struct sleepqueue_chain *sc;
291 sc = SC_LOOKUP(wchan);
292 mtx_unlock_spin(&sc->sc_lock);
296 * Places the current thread on the sleep queue for the specified wait
297 * channel. If INVARIANTS is enabled, then it associates the passed in
298 * lock with the sleepq to make sure it is held when that sleep queue is
302 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
305 struct sleepqueue_chain *sc;
306 struct sleepqueue *sq;
310 sc = SC_LOOKUP(wchan);
311 mtx_assert(&sc->sc_lock, MA_OWNED);
312 MPASS(td->td_sleepqueue != NULL);
313 MPASS(wchan != NULL);
314 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
316 /* If this thread is not allowed to sleep, die a horrible death. */
317 KASSERT(td->td_no_sleeping == 0,
318 ("%s: td %p to sleep on wchan %p with sleeping prohibited",
319 __func__, td, wchan));
321 /* Look up the sleep queue associated with the wait channel 'wchan'. */
322 sq = sleepq_lookup(wchan);
325 * If the wait channel does not already have a sleep queue, use
326 * this thread's sleep queue. Otherwise, insert the current thread
327 * into the sleep queue already in use by this wait channel.
333 sq = td->td_sleepqueue;
334 for (i = 0; i < NR_SLEEPQS; i++) {
335 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
336 ("thread's sleep queue %d is not empty", i));
337 KASSERT(sq->sq_blockedcnt[i] == 0,
338 ("thread's sleep queue %d count mismatches", i));
340 KASSERT(LIST_EMPTY(&sq->sq_free),
341 ("thread's sleep queue has a non-empty free list"));
342 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
345 #ifdef SLEEPQUEUE_PROFILING
347 if (sc->sc_depth > sc->sc_max_depth) {
348 sc->sc_max_depth = sc->sc_depth;
349 if (sc->sc_max_depth > sleepq_max_depth)
350 sleepq_max_depth = sc->sc_max_depth;
353 sq = td->td_sleepqueue;
354 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
355 sq->sq_wchan = wchan;
356 sq->sq_type = flags & SLEEPQ_TYPE;
358 MPASS(wchan == sq->sq_wchan);
359 MPASS(lock == sq->sq_lock);
360 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
361 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
364 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
365 sq->sq_blockedcnt[queue]++;
366 td->td_sleepqueue = NULL;
367 td->td_sqqueue = queue;
368 td->td_wchan = wchan;
369 td->td_wmesg = wmesg;
370 if (flags & SLEEPQ_INTERRUPTIBLE) {
371 td->td_flags |= TDF_SINTR;
372 td->td_flags &= ~TDF_SLEEPABORT;
378 * Sets a timeout that will remove the current thread from the specified
379 * sleep queue after timo ticks if the thread has not already been awakened.
382 sleepq_set_timeout_sbt(void *wchan, sbintime_t sbt, sbintime_t pr,
385 struct sleepqueue_chain *sc;
390 sc = SC_LOOKUP(wchan);
391 mtx_assert(&sc->sc_lock, MA_OWNED);
392 MPASS(TD_ON_SLEEPQ(td));
393 MPASS(td->td_sleepqueue == NULL);
394 MPASS(wchan != NULL);
395 if (cold && td == &thread0)
396 panic("timed sleep before timers are working");
397 KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx",
398 td->td_tid, td, (uintmax_t)td->td_sleeptimo));
400 callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
402 callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
403 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC |
408 * Return the number of actual sleepers for the specified queue.
411 sleepq_sleepcnt(void *wchan, int queue)
413 struct sleepqueue *sq;
415 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
416 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
417 sq = sleepq_lookup(wchan);
420 return (sq->sq_blockedcnt[queue]);
424 * Marks the pending sleep of the current thread as interruptible and
425 * makes an initial check for pending signals before putting a thread
426 * to sleep. Enters and exits with the thread lock held. Thread lock
427 * may have transitioned from the sleepq lock to a run lock.
430 sleepq_catch_signals(void *wchan, int pri)
432 struct sleepqueue_chain *sc;
433 struct sleepqueue *sq;
442 sc = SC_LOOKUP(wchan);
443 mtx_assert(&sc->sc_lock, MA_OWNED);
444 MPASS(wchan != NULL);
445 if ((td->td_pflags & TDP_WAKEUP) != 0) {
446 td->td_pflags &= ~TDP_WAKEUP;
453 * See if there are any pending signals or suspension requests for this
454 * thread. If not, we can switch immediately.
457 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0) {
459 mtx_unlock_spin(&sc->sc_lock);
460 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
461 (void *)td, (long)p->p_pid, td->td_name);
464 * Check for suspension first. Checking for signals and then
465 * suspending could result in a missed signal, since a signal
466 * can be delivered while this thread is suspended.
468 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
469 ret = thread_suspend_check(1);
470 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
473 mtx_lock_spin(&sc->sc_lock);
478 if ((td->td_flags & TDF_NEEDSIGCHK) != 0) {
480 mtx_lock(&ps->ps_mtx);
483 mtx_unlock(&ps->ps_mtx);
484 KASSERT((td->td_flags & TDF_SBDRY) != 0,
486 KASSERT(TD_SBDRY_INTR(td),
487 ("lost TDF_SERESTART of TDF_SEINTR"));
488 KASSERT((td->td_flags &
489 (TDF_SEINTR | TDF_SERESTART)) !=
490 (TDF_SEINTR | TDF_SERESTART),
491 ("both TDF_SEINTR and TDF_SERESTART"));
492 ret = TD_SBDRY_ERRNO(td);
493 } else if (sig != 0) {
494 ret = SIGISMEMBER(ps->ps_sigintr, sig) ?
496 mtx_unlock(&ps->ps_mtx);
498 mtx_unlock(&ps->ps_mtx);
502 * Lock the per-process spinlock prior to dropping the PROC_LOCK
503 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and
504 * thread_lock() are currently held in tdsendsignal().
507 mtx_lock_spin(&sc->sc_lock);
513 sleepq_switch(wchan, pri);
518 * There were pending signals and this thread is still
519 * on the sleep queue, remove it from the sleep queue.
521 if (TD_ON_SLEEPQ(td)) {
522 sq = sleepq_lookup(wchan);
523 if (sleepq_resume_thread(sq, td, 0)) {
526 * This thread hasn't gone to sleep yet, so it
527 * should not be swapped out.
529 panic("not waking up swapper");
533 mtx_unlock_spin(&sc->sc_lock);
534 MPASS(td->td_lock != &sc->sc_lock);
539 * Switches to another thread if we are still asleep on a sleep queue.
540 * Returns with thread lock.
543 sleepq_switch(void *wchan, int pri)
545 struct sleepqueue_chain *sc;
546 struct sleepqueue *sq;
551 sc = SC_LOOKUP(wchan);
552 mtx_assert(&sc->sc_lock, MA_OWNED);
553 THREAD_LOCK_ASSERT(td, MA_OWNED);
556 * If we have a sleep queue, then we've already been woken up, so
559 if (td->td_sleepqueue != NULL) {
560 mtx_unlock_spin(&sc->sc_lock);
565 * If TDF_TIMEOUT is set, then our sleep has been timed out
566 * already but we are still on the sleep queue, so dequeue the
569 * Do the same if the real-time clock has been adjusted since this
570 * thread calculated its timeout based on that clock. This handles
571 * the following race:
572 * - The Ts thread needs to sleep until an absolute real-clock time.
573 * It copies the global rtc_generation into curthread->td_rtcgen,
574 * reads the RTC, and calculates a sleep duration based on that time.
575 * See umtxq_sleep() for an example.
576 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes
577 * threads that are sleeping until an absolute real-clock time.
578 * See tc_setclock() and the POSIX specification of clock_settime().
579 * - Ts reaches the code below. It holds the sleepqueue chain lock,
580 * so Tc has finished waking, so this thread must test td_rtcgen.
581 * (The declaration of td_rtcgen refers to this comment.)
583 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation;
584 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) {
588 MPASS(TD_ON_SLEEPQ(td));
589 sq = sleepq_lookup(wchan);
590 if (sleepq_resume_thread(sq, td, 0)) {
593 * This thread hasn't gone to sleep yet, so it
594 * should not be swapped out.
596 panic("not waking up swapper");
599 mtx_unlock_spin(&sc->sc_lock);
602 #ifdef SLEEPQUEUE_PROFILING
604 sleepq_profile(td->td_wmesg);
606 MPASS(td->td_sleepqueue == NULL);
607 sched_sleep(td, pri);
608 thread_lock_set(td, &sc->sc_lock);
609 SDT_PROBE0(sched, , , sleep);
611 mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
612 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
613 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
614 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
618 * Check to see if we timed out.
621 sleepq_check_timeout(void)
627 THREAD_LOCK_ASSERT(td, MA_OWNED);
630 * If TDF_TIMEOUT is set, we timed out. But recheck
631 * td_sleeptimo anyway.
634 if (td->td_sleeptimo != 0) {
635 if (td->td_sleeptimo <= sbinuptime())
637 td->td_sleeptimo = 0;
639 if (td->td_flags & TDF_TIMEOUT)
640 td->td_flags &= ~TDF_TIMEOUT;
643 * We ignore the situation where timeout subsystem was
644 * unable to stop our callout. The struct thread is
645 * type-stable, the callout will use the correct
646 * memory when running. The checks of the
647 * td_sleeptimo value in this function and in
648 * sleepq_timeout() ensure that the thread does not
649 * get spurious wakeups, even if the callout was reset
652 callout_stop(&td->td_slpcallout);
657 * Check to see if we were awoken by a signal.
660 sleepq_check_signals(void)
665 THREAD_LOCK_ASSERT(td, MA_OWNED);
667 /* We are no longer in an interruptible sleep. */
668 if (td->td_flags & TDF_SINTR)
669 td->td_flags &= ~TDF_SINTR;
671 if (td->td_flags & TDF_SLEEPABORT) {
672 td->td_flags &= ~TDF_SLEEPABORT;
673 return (td->td_intrval);
680 * Block the current thread until it is awakened from its sleep queue.
683 sleepq_wait(void *wchan, int pri)
688 MPASS(!(td->td_flags & TDF_SINTR));
690 sleepq_switch(wchan, pri);
695 * Block the current thread until it is awakened from its sleep queue
696 * or it is interrupted by a signal.
699 sleepq_wait_sig(void *wchan, int pri)
704 rcatch = sleepq_catch_signals(wchan, pri);
705 rval = sleepq_check_signals();
706 thread_unlock(curthread);
713 * Block the current thread until it is awakened from its sleep queue
714 * or it times out while waiting.
717 sleepq_timedwait(void *wchan, int pri)
723 MPASS(!(td->td_flags & TDF_SINTR));
725 sleepq_switch(wchan, pri);
726 rval = sleepq_check_timeout();
733 * Block the current thread until it is awakened from its sleep queue,
734 * it is interrupted by a signal, or it times out waiting to be awakened.
737 sleepq_timedwait_sig(void *wchan, int pri)
739 int rcatch, rvalt, rvals;
741 rcatch = sleepq_catch_signals(wchan, pri);
742 rvalt = sleepq_check_timeout();
743 rvals = sleepq_check_signals();
744 thread_unlock(curthread);
753 * Returns the type of sleepqueue given a waitchannel.
756 sleepq_type(void *wchan)
758 struct sleepqueue *sq;
761 MPASS(wchan != NULL);
764 sq = sleepq_lookup(wchan);
766 sleepq_release(wchan);
770 sleepq_release(wchan);
775 * Removes a thread from a sleep queue and makes it
779 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
781 struct sleepqueue_chain *sc;
784 MPASS(sq->sq_wchan != NULL);
785 MPASS(td->td_wchan == sq->sq_wchan);
786 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
787 THREAD_LOCK_ASSERT(td, MA_OWNED);
788 sc = SC_LOOKUP(sq->sq_wchan);
789 mtx_assert(&sc->sc_lock, MA_OWNED);
791 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
793 /* Remove the thread from the queue. */
794 sq->sq_blockedcnt[td->td_sqqueue]--;
795 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
798 * Get a sleep queue for this thread. If this is the last waiter,
799 * use the queue itself and take it out of the chain, otherwise,
800 * remove a queue from the free list.
802 if (LIST_EMPTY(&sq->sq_free)) {
803 td->td_sleepqueue = sq;
807 #ifdef SLEEPQUEUE_PROFILING
811 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
812 LIST_REMOVE(td->td_sleepqueue, sq_hash);
816 td->td_flags &= ~TDF_SINTR;
818 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
819 (void *)td, (long)td->td_proc->p_pid, td->td_name);
821 /* Adjust priority if requested. */
822 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
823 if (pri != 0 && td->td_priority > pri &&
824 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
828 * Note that thread td might not be sleeping if it is running
829 * sleepq_catch_signals() on another CPU or is blocked on its
830 * proc lock to check signals. There's no need to mark the
831 * thread runnable in that case.
833 if (TD_IS_SLEEPING(td)) {
835 return (setrunnable(td));
842 * UMA zone item deallocator.
845 sleepq_dtor(void *mem, int size, void *arg)
847 struct sleepqueue *sq;
851 for (i = 0; i < NR_SLEEPQS; i++) {
852 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
853 MPASS(sq->sq_blockedcnt[i] == 0);
859 * UMA zone item initializer.
862 sleepq_init(void *mem, int size, int flags)
864 struct sleepqueue *sq;
869 for (i = 0; i < NR_SLEEPQS; i++) {
870 TAILQ_INIT(&sq->sq_blocked[i]);
871 sq->sq_blockedcnt[i] = 0;
873 LIST_INIT(&sq->sq_free);
878 * Find the highest priority thread sleeping on a wait channel and resume it.
881 sleepq_signal(void *wchan, int flags, int pri, int queue)
883 struct sleepqueue *sq;
884 struct thread *td, *besttd;
887 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
888 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
889 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
890 sq = sleepq_lookup(wchan);
893 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
894 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
897 * Find the highest priority thread on the queue. If there is a
898 * tie, use the thread that first appears in the queue as it has
899 * been sleeping the longest since threads are always added to
900 * the tail of sleep queues.
902 besttd = TAILQ_FIRST(&sq->sq_blocked[queue]);
903 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
904 if (td->td_priority < besttd->td_priority)
907 MPASS(besttd != NULL);
909 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
910 thread_unlock(besttd);
911 return (wakeup_swapper);
915 match_any(struct thread *td __unused)
922 * Resume all threads sleeping on a specified wait channel.
925 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
927 struct sleepqueue *sq;
929 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
930 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
931 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
932 sq = sleepq_lookup(wchan);
935 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
936 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
938 return (sleepq_remove_matching(sq, queue, match_any, pri));
942 * Resume threads on the sleep queue that match the given predicate.
945 sleepq_remove_matching(struct sleepqueue *sq, int queue,
946 bool (*matches)(struct thread *), int pri)
948 struct thread *td, *tdn;
952 * The last thread will be given ownership of sq and may
953 * re-enqueue itself before sleepq_resume_thread() returns,
954 * so we must cache the "next" queue item at the beginning
955 * of the final iteration.
958 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
961 wakeup_swapper |= sleepq_resume_thread(sq, td, pri);
965 return (wakeup_swapper);
969 * Time sleeping threads out. When the timeout expires, the thread is
970 * removed from the sleep queue and made runnable if it is still asleep.
973 sleepq_timeout(void *arg)
975 struct sleepqueue_chain *sc;
976 struct sleepqueue *sq;
983 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
984 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
988 if (td->td_sleeptimo > sbinuptime() || td->td_sleeptimo == 0) {
990 * The thread does not want a timeout (yet).
992 } else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
994 * See if the thread is asleep and get the wait
997 wchan = td->td_wchan;
998 sc = SC_LOOKUP(wchan);
999 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
1000 sq = sleepq_lookup(wchan);
1002 td->td_flags |= TDF_TIMEOUT;
1003 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1004 } else if (TD_ON_SLEEPQ(td)) {
1006 * If the thread is on the SLEEPQ but isn't sleeping
1007 * yet, it can either be on another CPU in between
1008 * sleepq_add() and one of the sleepq_*wait*()
1009 * routines or it can be in sleepq_catch_signals().
1011 td->td_flags |= TDF_TIMEOUT;
1020 * Resumes a specific thread from the sleep queue associated with a specific
1021 * wait channel if it is on that queue.
1024 sleepq_remove(struct thread *td, void *wchan)
1026 struct sleepqueue *sq;
1030 * Look up the sleep queue for this wait channel, then re-check
1031 * that the thread is asleep on that channel, if it is not, then
1034 MPASS(wchan != NULL);
1036 sq = sleepq_lookup(wchan);
1038 * We can not lock the thread here as it may be sleeping on a
1039 * different sleepq. However, holding the sleepq lock for this
1040 * wchan can guarantee that we do not miss a wakeup for this
1041 * channel. The asserts below will catch any false positives.
1043 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
1044 sleepq_release(wchan);
1047 /* Thread is asleep on sleep queue sq, so wake it up. */
1050 MPASS(td->td_wchan == wchan);
1051 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
1053 sleepq_release(wchan);
1059 * Abort a thread as if an interrupt had occurred. Only abort
1060 * interruptible waits (unfortunately it isn't safe to abort others).
1063 sleepq_abort(struct thread *td, int intrval)
1065 struct sleepqueue *sq;
1068 THREAD_LOCK_ASSERT(td, MA_OWNED);
1069 MPASS(TD_ON_SLEEPQ(td));
1070 MPASS(td->td_flags & TDF_SINTR);
1071 MPASS(intrval == EINTR || intrval == ERESTART);
1074 * If the TDF_TIMEOUT flag is set, just leave. A
1075 * timeout is scheduled anyhow.
1077 if (td->td_flags & TDF_TIMEOUT)
1080 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1081 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1082 td->td_intrval = intrval;
1083 td->td_flags |= TDF_SLEEPABORT;
1085 * If the thread has not slept yet it will find the signal in
1086 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise
1087 * we have to do it here.
1089 if (!TD_IS_SLEEPING(td))
1091 wchan = td->td_wchan;
1092 MPASS(wchan != NULL);
1093 sq = sleepq_lookup(wchan);
1096 /* Thread is asleep on sleep queue sq, so wake it up. */
1097 return (sleepq_resume_thread(sq, td, 0));
1101 sleepq_chains_remove_matching(bool (*matches)(struct thread *))
1103 struct sleepqueue_chain *sc;
1104 struct sleepqueue *sq;
1105 int i, wakeup_swapper;
1108 for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) {
1109 if (LIST_EMPTY(&sc->sc_queues)) {
1112 mtx_lock_spin(&sc->sc_lock);
1113 LIST_FOREACH(sq, &sc->sc_queues, sq_hash) {
1114 for (i = 0; i < NR_SLEEPQS; ++i) {
1115 wakeup_swapper |= sleepq_remove_matching(sq, i,
1119 mtx_unlock_spin(&sc->sc_lock);
1121 if (wakeup_swapper) {
1127 * Prints the stacks of all threads presently sleeping on wchan/queue to
1128 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually
1129 * printed. Typically, this will equal the number of threads sleeping on the
1130 * queue, but may be less if sb overflowed before all stacks were printed.
1134 sleepq_sbuf_print_stacks(struct sbuf *sb, void *wchan, int queue,
1135 int *count_stacks_printed)
1137 struct thread *td, *td_next;
1138 struct sleepqueue *sq;
1140 struct sbuf **td_infos;
1141 int i, stack_idx, error, stacks_to_allocate;
1142 bool finished, partial_print;
1146 partial_print = false;
1148 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1149 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1151 stacks_to_allocate = 10;
1152 for (i = 0; i < 3 && !finished ; i++) {
1153 /* We cannot malloc while holding the queue's spinlock, so
1154 * we do our mallocs now, and hope it is enough. If it
1155 * isn't, we will free these, drop the lock, malloc more,
1156 * and try again, up to a point. After that point we will
1157 * give up and report ENOMEM. We also cannot write to sb
1158 * during this time since the client may have set the
1159 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a
1160 * malloc as we print to it. So we defer actually printing
1161 * to sb until after we drop the spinlock.
1164 /* Where we will store the stacks. */
1165 st = malloc(sizeof(struct stack *) * stacks_to_allocate,
1167 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1169 st[stack_idx] = stack_create(M_WAITOK);
1171 /* Where we will store the td name, tid, etc. */
1172 td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate,
1174 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1176 td_infos[stack_idx] = sbuf_new(NULL, NULL,
1177 MAXCOMLEN + sizeof(struct thread *) * 2 + 40,
1181 sq = sleepq_lookup(wchan);
1183 /* This sleepq does not exist; exit and return ENOENT. */
1186 sleepq_release(wchan);
1191 /* Save thread info */
1192 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
1194 if (stack_idx >= stacks_to_allocate)
1197 /* Note the td_lock is equal to the sleepq_lock here. */
1198 stack_save_td(st[stack_idx], td);
1200 sbuf_printf(td_infos[stack_idx], "%d: %s %p",
1201 td->td_tid, td->td_name, td);
1207 sleepq_release(wchan);
1209 /* Print the stacks */
1210 for (i = 0; i < stack_idx; i++) {
1211 sbuf_finish(td_infos[i]);
1212 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i]));
1213 stack_sbuf_print(sb, st[i]);
1214 sbuf_printf(sb, "\n");
1216 error = sbuf_error(sb);
1218 *count_stacks_printed = stack_idx;
1223 sleepq_release(wchan);
1224 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1226 stack_destroy(st[stack_idx]);
1227 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1229 sbuf_delete(td_infos[stack_idx]);
1231 free(td_infos, M_TEMP);
1232 stacks_to_allocate *= 10;
1235 if (!finished && error == 0)
1242 #ifdef SLEEPQUEUE_PROFILING
1243 #define SLEEPQ_PROF_LOCATIONS 1024
1244 #define SLEEPQ_SBUFSIZE 512
1245 struct sleepq_prof {
1246 LIST_ENTRY(sleepq_prof) sp_link;
1247 const char *sp_wmesg;
1251 LIST_HEAD(sqphead, sleepq_prof);
1253 struct sqphead sleepq_prof_free;
1254 struct sqphead sleepq_hash[SC_TABLESIZE];
1255 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1256 static struct mtx sleepq_prof_lock;
1257 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1260 sleepq_profile(const char *wmesg)
1262 struct sleepq_prof *sp;
1264 mtx_lock_spin(&sleepq_prof_lock);
1265 if (prof_enabled == 0)
1267 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1268 if (sp->sp_wmesg == wmesg)
1270 sp = LIST_FIRST(&sleepq_prof_free);
1273 sp->sp_wmesg = wmesg;
1274 LIST_REMOVE(sp, sp_link);
1275 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1279 mtx_unlock_spin(&sleepq_prof_lock);
1284 sleepq_prof_reset(void)
1286 struct sleepq_prof *sp;
1290 mtx_lock_spin(&sleepq_prof_lock);
1291 enabled = prof_enabled;
1293 for (i = 0; i < SC_TABLESIZE; i++)
1294 LIST_INIT(&sleepq_hash[i]);
1295 LIST_INIT(&sleepq_prof_free);
1296 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1297 sp = &sleepq_profent[i];
1298 sp->sp_wmesg = NULL;
1300 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1302 prof_enabled = enabled;
1303 mtx_unlock_spin(&sleepq_prof_lock);
1307 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1312 error = sysctl_handle_int(oidp, &v, v, req);
1315 if (req->newptr == NULL)
1317 if (v == prof_enabled)
1320 sleepq_prof_reset();
1321 mtx_lock_spin(&sleepq_prof_lock);
1323 mtx_unlock_spin(&sleepq_prof_lock);
1329 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1334 error = sysctl_handle_int(oidp, &v, 0, req);
1337 if (req->newptr == NULL)
1341 sleepq_prof_reset();
1347 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1349 struct sleepq_prof *sp;
1355 error = sysctl_wire_old_buffer(req, 0);
1358 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1359 sbuf_printf(sb, "\nwmesg\tcount\n");
1360 enabled = prof_enabled;
1361 mtx_lock_spin(&sleepq_prof_lock);
1363 mtx_unlock_spin(&sleepq_prof_lock);
1364 for (i = 0; i < SC_TABLESIZE; i++) {
1365 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1366 sbuf_printf(sb, "%s\t%ld\n",
1367 sp->sp_wmesg, sp->sp_count);
1370 mtx_lock_spin(&sleepq_prof_lock);
1371 prof_enabled = enabled;
1372 mtx_unlock_spin(&sleepq_prof_lock);
1374 error = sbuf_finish(sb);
1379 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1380 NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1381 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1382 NULL, 0, reset_sleepq_prof_stats, "I",
1383 "Reset sleepqueue profiling statistics");
1384 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1385 NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1389 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1391 struct sleepqueue_chain *sc;
1392 struct sleepqueue *sq;
1394 struct lock_object *lock;
1404 * First, see if there is an active sleep queue for the wait channel
1405 * indicated by the address.
1407 wchan = (void *)addr;
1408 sc = SC_LOOKUP(wchan);
1409 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1410 if (sq->sq_wchan == wchan)
1414 * Second, see if there is an active sleep queue at the address
1417 for (i = 0; i < SC_TABLESIZE; i++)
1418 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1419 if (sq == (struct sleepqueue *)addr)
1423 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1426 db_printf("Wait channel: %p\n", sq->sq_wchan);
1427 db_printf("Queue type: %d\n", sq->sq_type);
1431 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1432 LOCK_CLASS(lock)->lc_name, lock->lo_name);
1435 db_printf("Blocked threads:\n");
1436 for (i = 0; i < NR_SLEEPQS; i++) {
1437 db_printf("\nQueue[%d]:\n", i);
1438 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1439 db_printf("\tempty\n");
1441 TAILQ_FOREACH(td, &sq->sq_blocked[i],
1443 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1444 td->td_tid, td->td_proc->p_pid,
1447 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1451 /* Alias 'show sleepqueue' to 'show sleepq'. */
1452 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);