2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Implementation of sleep queues used to hold queue of threads blocked on
30 * a wait channel. Sleep queues are different from turnstiles in that wait
31 * channels are not owned by anyone, so there is no priority propagation.
32 * Sleep queues can also provide a timeout and can also be interrupted by
33 * signals. That said, there are several similarities between the turnstile
34 * and sleep queue implementations. (Note: turnstiles were implemented
35 * first.) For example, both use a hash table of the same size where each
36 * bucket is referred to as a "chain" that contains both a spin lock and
37 * a linked list of queues. An individual queue is located by using a hash
38 * to pick a chain, locking the chain, and then walking the chain searching
39 * for the queue. This means that a wait channel object does not need to
40 * embed its queue head just as locks do not embed their turnstile queue
41 * head. Threads also carry around a sleep queue that they lend to the
42 * wait channel when blocking. Just as in turnstiles, the queue includes
43 * a free list of the sleep queues of other threads blocked on the same
44 * wait channel in the case of multiple waiters.
46 * Some additional functionality provided by sleep queues include the
47 * ability to set a timeout. The timeout is managed using a per-thread
48 * callout that resumes a thread if it is asleep. A thread may also
49 * catch signals while it is asleep (aka an interruptible sleep). The
50 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
51 * sleep queues also provide some extra assertions. One is not allowed to
52 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
53 * must consistently use the same lock to synchronize with a wait channel,
54 * though this check is currently only a warning for sleep/wakeup due to
55 * pre-existing abuse of that API. The same lock must also be held when
56 * awakening threads, though that is currently only enforced for condition
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
63 #include "opt_sleepqueue_profiling.h"
65 #include "opt_sched.h"
66 #include "opt_stack.h"
68 #include <sys/param.h>
69 #include <sys/systm.h>
71 #include <sys/kernel.h>
73 #include <sys/mutex.h>
76 #include <sys/sched.h>
78 #include <sys/signalvar.h>
79 #include <sys/sleepqueue.h>
80 #include <sys/stack.h>
81 #include <sys/sysctl.h>
84 #include <sys/epoch.h>
87 #include <machine/atomic.h>
96 * Constants for the hash table of sleep queue chains.
97 * SC_TABLESIZE must be a power of two for SC_MASK to work properly.
100 #define SC_TABLESIZE 256
102 CTASSERT(powerof2(SC_TABLESIZE));
103 #define SC_MASK (SC_TABLESIZE - 1)
105 #define SC_HASH(wc) ((((uintptr_t)(wc) >> SC_SHIFT) ^ (uintptr_t)(wc)) & \
107 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
110 * There are two different lists of sleep queues. Both lists are connected
111 * via the sq_hash entries. The first list is the sleep queue chain list
112 * that a sleep queue is on when it is attached to a wait channel. The
113 * second list is the free list hung off of a sleep queue that is attached
116 * Each sleep queue also contains the wait channel it is attached to, the
117 * list of threads blocked on that wait channel, flags specific to the
118 * wait channel, and the lock used to synchronize with a wait channel.
119 * The flags are used to catch mismatches between the various consumers
120 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
121 * The lock pointer is only used when invariants are enabled for various
125 * c - sleep queue chain lock
128 struct threadqueue sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
129 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */
130 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
131 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
132 const void *sq_wchan; /* (c) Wait channel. */
133 int sq_type; /* (c) Queue type. */
135 struct lock_object *sq_lock; /* (c) Associated lock. */
139 struct sleepqueue_chain {
140 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
141 struct mtx sc_lock; /* Spin lock for this chain. */
142 #ifdef SLEEPQUEUE_PROFILING
143 u_int sc_depth; /* Length of sc_queues. */
144 u_int sc_max_depth; /* Max length of sc_queues. */
146 } __aligned(CACHE_LINE_SIZE);
148 #ifdef SLEEPQUEUE_PROFILING
149 u_int sleepq_max_depth;
150 static SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
152 static SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains,
153 CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
154 "sleepq chain stats");
155 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
156 0, "maxmimum depth achieved of a single chain");
158 static void sleepq_profile(const char *wmesg);
159 static int prof_enabled;
161 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
162 static uma_zone_t sleepq_zone;
165 * Prototypes for non-exported routines.
167 static int sleepq_catch_signals(const void *wchan, int pri);
168 static inline int sleepq_check_signals(void);
169 static inline int sleepq_check_timeout(void);
171 static void sleepq_dtor(void *mem, int size, void *arg);
173 static int sleepq_init(void *mem, int size, int flags);
174 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
175 int pri, int srqflags);
176 static void sleepq_remove_thread(struct sleepqueue *sq, struct thread *td);
177 static void sleepq_switch(const void *wchan, int pri);
178 static void sleepq_timeout(void *arg);
180 SDT_PROBE_DECLARE(sched, , , sleep);
181 SDT_PROBE_DECLARE(sched, , , wakeup);
184 * Initialize SLEEPQUEUE_PROFILING specific sysctl nodes.
185 * Note that it must happen after sleepinit() has been fully executed, so
186 * it must happen after SI_SUB_KMEM SYSINIT() subsystem setup.
188 #ifdef SLEEPQUEUE_PROFILING
190 init_sleepqueue_profiling(void)
193 struct sysctl_oid *chain_oid;
196 for (i = 0; i < SC_TABLESIZE; i++) {
197 snprintf(chain_name, sizeof(chain_name), "%u", i);
198 chain_oid = SYSCTL_ADD_NODE(NULL,
199 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
200 chain_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
201 "sleepq chain stats");
202 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
203 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
204 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
205 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
210 SYSINIT(sleepqueue_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
211 init_sleepqueue_profiling, NULL);
215 * Early initialization of sleep queues that is called from the sleepinit()
219 init_sleepqueues(void)
223 for (i = 0; i < SC_TABLESIZE; i++) {
224 LIST_INIT(&sleepq_chains[i].sc_queues);
225 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
228 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
230 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
232 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
235 thread0.td_sleepqueue = sleepq_alloc();
239 * Get a sleep queue for a new thread.
245 return (uma_zalloc(sleepq_zone, M_WAITOK));
249 * Free a sleep queue when a thread is destroyed.
252 sleepq_free(struct sleepqueue *sq)
255 uma_zfree(sleepq_zone, sq);
259 * Lock the sleep queue chain associated with the specified wait channel.
262 sleepq_lock(const void *wchan)
264 struct sleepqueue_chain *sc;
266 sc = SC_LOOKUP(wchan);
267 mtx_lock_spin(&sc->sc_lock);
271 * Look up the sleep queue associated with a given wait channel in the hash
272 * table locking the associated sleep queue chain. If no queue is found in
273 * the table, NULL is returned.
276 sleepq_lookup(const void *wchan)
278 struct sleepqueue_chain *sc;
279 struct sleepqueue *sq;
281 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
282 sc = SC_LOOKUP(wchan);
283 mtx_assert(&sc->sc_lock, MA_OWNED);
284 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
285 if (sq->sq_wchan == wchan)
291 * Unlock the sleep queue chain associated with a given wait channel.
294 sleepq_release(const void *wchan)
296 struct sleepqueue_chain *sc;
298 sc = SC_LOOKUP(wchan);
299 mtx_unlock_spin(&sc->sc_lock);
303 * Places the current thread on the sleep queue for the specified wait
304 * channel. If INVARIANTS is enabled, then it associates the passed in
305 * lock with the sleepq to make sure it is held when that sleep queue is
309 sleepq_add(const void *wchan, struct lock_object *lock, const char *wmesg,
310 int flags, int queue)
312 struct sleepqueue_chain *sc;
313 struct sleepqueue *sq;
317 sc = SC_LOOKUP(wchan);
318 mtx_assert(&sc->sc_lock, MA_OWNED);
319 MPASS(td->td_sleepqueue != NULL);
320 MPASS(wchan != NULL);
321 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
323 /* If this thread is not allowed to sleep, die a horrible death. */
324 if (__predict_false(!THREAD_CAN_SLEEP())) {
326 epoch_trace_list(curthread);
329 ("%s: td %p to sleep on wchan %p with sleeping prohibited",
330 __func__, td, wchan));
333 /* Look up the sleep queue associated with the wait channel 'wchan'. */
334 sq = sleepq_lookup(wchan);
337 * If the wait channel does not already have a sleep queue, use
338 * this thread's sleep queue. Otherwise, insert the current thread
339 * into the sleep queue already in use by this wait channel.
345 sq = td->td_sleepqueue;
346 for (i = 0; i < NR_SLEEPQS; i++) {
347 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
348 ("thread's sleep queue %d is not empty", i));
349 KASSERT(sq->sq_blockedcnt[i] == 0,
350 ("thread's sleep queue %d count mismatches", i));
352 KASSERT(LIST_EMPTY(&sq->sq_free),
353 ("thread's sleep queue has a non-empty free list"));
354 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
357 #ifdef SLEEPQUEUE_PROFILING
359 if (sc->sc_depth > sc->sc_max_depth) {
360 sc->sc_max_depth = sc->sc_depth;
361 if (sc->sc_max_depth > sleepq_max_depth)
362 sleepq_max_depth = sc->sc_max_depth;
365 sq = td->td_sleepqueue;
366 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
367 sq->sq_wchan = wchan;
368 sq->sq_type = flags & SLEEPQ_TYPE;
370 MPASS(wchan == sq->sq_wchan);
371 MPASS(lock == sq->sq_lock);
372 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
373 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
376 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
377 sq->sq_blockedcnt[queue]++;
378 td->td_sleepqueue = NULL;
379 td->td_sqqueue = queue;
380 td->td_wchan = wchan;
381 td->td_wmesg = wmesg;
382 if (flags & SLEEPQ_INTERRUPTIBLE) {
384 td->td_flags |= TDF_SINTR;
386 td->td_flags &= ~TDF_TIMEOUT;
391 * Sets a timeout that will remove the current thread from the specified
392 * sleep queue after timo ticks if the thread has not already been awakened.
395 sleepq_set_timeout_sbt(const void *wchan, sbintime_t sbt, sbintime_t pr,
398 struct sleepqueue_chain *sc __unused;
403 sc = SC_LOOKUP(wchan);
404 mtx_assert(&sc->sc_lock, MA_OWNED);
405 MPASS(TD_ON_SLEEPQ(td));
406 MPASS(td->td_sleepqueue == NULL);
407 MPASS(wchan != NULL);
408 if (cold && td == &thread0)
409 panic("timed sleep before timers are working");
410 KASSERT(td->td_sleeptimo == 0, ("td %d %p td_sleeptimo %jx",
411 td->td_tid, td, (uintmax_t)td->td_sleeptimo));
413 callout_when(sbt, pr, flags, &td->td_sleeptimo, &pr1);
415 callout_reset_sbt_on(&td->td_slpcallout, td->td_sleeptimo, pr1,
416 sleepq_timeout, td, PCPU_GET(cpuid), flags | C_PRECALC |
421 * Return the number of actual sleepers for the specified queue.
424 sleepq_sleepcnt(const void *wchan, int queue)
426 struct sleepqueue *sq;
428 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
429 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
430 sq = sleepq_lookup(wchan);
433 return (sq->sq_blockedcnt[queue]);
437 * Marks the pending sleep of the current thread as interruptible and
438 * makes an initial check for pending signals before putting a thread
439 * to sleep. Enters and exits with the thread lock held. Thread lock
440 * may have transitioned from the sleepq lock to a run lock.
443 sleepq_catch_signals(const void *wchan, int pri)
445 struct sleepqueue_chain *sc;
446 struct sleepqueue *sq;
455 sc = SC_LOOKUP(wchan);
456 mtx_assert(&sc->sc_lock, MA_OWNED);
457 MPASS(wchan != NULL);
458 if ((td->td_pflags & TDP_WAKEUP) != 0) {
459 td->td_pflags &= ~TDP_WAKEUP;
466 * See if there are any pending signals or suspension requests for this
467 * thread. If not, we can switch immediately.
470 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) != 0) {
472 mtx_unlock_spin(&sc->sc_lock);
473 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
474 (void *)td, (long)p->p_pid, td->td_name);
477 * Check for suspension first. Checking for signals and then
478 * suspending could result in a missed signal, since a signal
479 * can be delivered while this thread is suspended.
481 if ((td->td_flags & TDF_NEEDSUSPCHK) != 0) {
482 ret = thread_suspend_check(1);
483 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
486 mtx_lock_spin(&sc->sc_lock);
491 if ((td->td_flags & TDF_NEEDSIGCHK) != 0) {
493 mtx_lock(&ps->ps_mtx);
496 mtx_unlock(&ps->ps_mtx);
497 KASSERT((td->td_flags & TDF_SBDRY) != 0,
499 KASSERT(TD_SBDRY_INTR(td),
500 ("lost TDF_SERESTART of TDF_SEINTR"));
501 KASSERT((td->td_flags &
502 (TDF_SEINTR | TDF_SERESTART)) !=
503 (TDF_SEINTR | TDF_SERESTART),
504 ("both TDF_SEINTR and TDF_SERESTART"));
505 ret = TD_SBDRY_ERRNO(td);
506 } else if (sig != 0) {
507 ret = SIGISMEMBER(ps->ps_sigintr, sig) ?
509 mtx_unlock(&ps->ps_mtx);
511 mtx_unlock(&ps->ps_mtx);
515 * Do not go into sleep if this thread was the
516 * ptrace(2) attach leader. cursig() consumed
517 * SIGSTOP from PT_ATTACH, but we usually act
518 * on the signal by interrupting sleep, and
519 * should do that here as well.
521 if ((td->td_dbgflags & TDB_FSTP) != 0) {
524 td->td_dbgflags &= ~TDB_FSTP;
528 * Lock the per-process spinlock prior to dropping the PROC_LOCK
529 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and
530 * thread_lock() are currently held in tdsendsignal().
533 mtx_lock_spin(&sc->sc_lock);
539 sleepq_switch(wchan, pri);
544 * There were pending signals and this thread is still
545 * on the sleep queue, remove it from the sleep queue.
547 if (TD_ON_SLEEPQ(td)) {
548 sq = sleepq_lookup(wchan);
549 sleepq_remove_thread(sq, td);
551 MPASS(td->td_lock != &sc->sc_lock);
552 mtx_unlock_spin(&sc->sc_lock);
559 * Switches to another thread if we are still asleep on a sleep queue.
560 * Returns with thread lock.
563 sleepq_switch(const void *wchan, int pri)
565 struct sleepqueue_chain *sc;
566 struct sleepqueue *sq;
571 sc = SC_LOOKUP(wchan);
572 mtx_assert(&sc->sc_lock, MA_OWNED);
573 THREAD_LOCK_ASSERT(td, MA_OWNED);
576 * If we have a sleep queue, then we've already been woken up, so
579 if (td->td_sleepqueue != NULL) {
580 mtx_unlock_spin(&sc->sc_lock);
586 * If TDF_TIMEOUT is set, then our sleep has been timed out
587 * already but we are still on the sleep queue, so dequeue the
590 * Do the same if the real-time clock has been adjusted since this
591 * thread calculated its timeout based on that clock. This handles
592 * the following race:
593 * - The Ts thread needs to sleep until an absolute real-clock time.
594 * It copies the global rtc_generation into curthread->td_rtcgen,
595 * reads the RTC, and calculates a sleep duration based on that time.
596 * See umtxq_sleep() for an example.
597 * - The Tc thread adjusts the RTC, bumps rtc_generation, and wakes
598 * threads that are sleeping until an absolute real-clock time.
599 * See tc_setclock() and the POSIX specification of clock_settime().
600 * - Ts reaches the code below. It holds the sleepqueue chain lock,
601 * so Tc has finished waking, so this thread must test td_rtcgen.
602 * (The declaration of td_rtcgen refers to this comment.)
604 rtc_changed = td->td_rtcgen != 0 && td->td_rtcgen != rtc_generation;
605 if ((td->td_flags & TDF_TIMEOUT) || rtc_changed) {
609 MPASS(TD_ON_SLEEPQ(td));
610 sq = sleepq_lookup(wchan);
611 sleepq_remove_thread(sq, td);
612 mtx_unlock_spin(&sc->sc_lock);
616 #ifdef SLEEPQUEUE_PROFILING
618 sleepq_profile(td->td_wmesg);
620 MPASS(td->td_sleepqueue == NULL);
621 sched_sleep(td, pri);
622 thread_lock_set(td, &sc->sc_lock);
623 SDT_PROBE0(sched, , , sleep);
625 mi_switch(SW_VOL | SWT_SLEEPQ);
626 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
627 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
628 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
632 * Check to see if we timed out.
635 sleepq_check_timeout(void)
642 if (td->td_sleeptimo != 0) {
643 if (td->td_sleeptimo <= sbinuptime())
645 td->td_sleeptimo = 0;
651 * Check to see if we were awoken by a signal.
654 sleepq_check_signals(void)
659 KASSERT((td->td_flags & TDF_SINTR) == 0,
660 ("thread %p still in interruptible sleep?", td));
662 return (td->td_intrval);
666 * Block the current thread until it is awakened from its sleep queue.
669 sleepq_wait(const void *wchan, int pri)
674 MPASS(!(td->td_flags & TDF_SINTR));
676 sleepq_switch(wchan, pri);
680 * Block the current thread until it is awakened from its sleep queue
681 * or it is interrupted by a signal.
684 sleepq_wait_sig(const void *wchan, int pri)
688 rcatch = sleepq_catch_signals(wchan, pri);
691 return (sleepq_check_signals());
695 * Block the current thread until it is awakened from its sleep queue
696 * or it times out while waiting.
699 sleepq_timedwait(const void *wchan, int pri)
704 MPASS(!(td->td_flags & TDF_SINTR));
707 sleepq_switch(wchan, pri);
709 return (sleepq_check_timeout());
713 * Block the current thread until it is awakened from its sleep queue,
714 * it is interrupted by a signal, or it times out waiting to be awakened.
717 sleepq_timedwait_sig(const void *wchan, int pri)
719 int rcatch, rvalt, rvals;
721 rcatch = sleepq_catch_signals(wchan, pri);
722 /* We must always call check_timeout() to clear sleeptimo. */
723 rvalt = sleepq_check_timeout();
724 rvals = sleepq_check_signals();
733 * Returns the type of sleepqueue given a waitchannel.
736 sleepq_type(const void *wchan)
738 struct sleepqueue *sq;
741 MPASS(wchan != NULL);
743 sq = sleepq_lookup(wchan);
752 * Removes a thread from a sleep queue and makes it
755 * Requires the sc chain locked on entry. If SRQ_HOLD is specified it will
756 * be locked on return. Returns without the thread lock held.
759 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri,
762 struct sleepqueue_chain *sc;
766 MPASS(sq->sq_wchan != NULL);
767 MPASS(td->td_wchan == sq->sq_wchan);
769 sc = SC_LOOKUP(sq->sq_wchan);
770 mtx_assert(&sc->sc_lock, MA_OWNED);
773 * Avoid recursing on the chain lock. If the locks don't match we
774 * need to acquire the thread lock which setrunnable will drop for
775 * us. In this case we need to drop the chain lock afterwards.
777 * There is no race that will make td_lock equal to sc_lock because
781 if (!TD_IS_SLEEPING(td)) {
785 thread_lock_block_wait(td);
787 /* Remove thread from the sleepq. */
788 sleepq_remove_thread(sq, td);
790 /* If we're done with the sleepqueue release it. */
791 if ((srqflags & SRQ_HOLD) == 0 && drop)
792 mtx_unlock_spin(&sc->sc_lock);
794 /* Adjust priority if requested. */
795 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
796 if (pri != 0 && td->td_priority > pri &&
797 PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
801 * Note that thread td might not be sleeping if it is running
802 * sleepq_catch_signals() on another CPU or is blocked on its
803 * proc lock to check signals. There's no need to mark the
804 * thread runnable in that case.
806 if (TD_IS_SLEEPING(td)) {
809 return (setrunnable(td, srqflags));
818 sleepq_remove_thread(struct sleepqueue *sq, struct thread *td)
820 struct sleepqueue_chain *sc __unused;
823 MPASS(sq->sq_wchan != NULL);
824 MPASS(td->td_wchan == sq->sq_wchan);
825 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
826 THREAD_LOCK_ASSERT(td, MA_OWNED);
827 sc = SC_LOOKUP(sq->sq_wchan);
828 mtx_assert(&sc->sc_lock, MA_OWNED);
830 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
832 /* Remove the thread from the queue. */
833 sq->sq_blockedcnt[td->td_sqqueue]--;
834 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
837 * Get a sleep queue for this thread. If this is the last waiter,
838 * use the queue itself and take it out of the chain, otherwise,
839 * remove a queue from the free list.
841 if (LIST_EMPTY(&sq->sq_free)) {
842 td->td_sleepqueue = sq;
846 #ifdef SLEEPQUEUE_PROFILING
850 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
851 LIST_REMOVE(td->td_sleepqueue, sq_hash);
853 if ((td->td_flags & TDF_TIMEOUT) == 0 && td->td_sleeptimo != 0)
855 * We ignore the situation where timeout subsystem was
856 * unable to stop our callout. The struct thread is
857 * type-stable, the callout will use the correct
858 * memory when running. The checks of the
859 * td_sleeptimo value in this function and in
860 * sleepq_timeout() ensure that the thread does not
861 * get spurious wakeups, even if the callout was reset
864 callout_stop(&td->td_slpcallout);
868 td->td_flags &= ~(TDF_SINTR | TDF_TIMEOUT);
870 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
871 (void *)td, (long)td->td_proc->p_pid, td->td_name);
876 * UMA zone item deallocator.
879 sleepq_dtor(void *mem, int size, void *arg)
881 struct sleepqueue *sq;
885 for (i = 0; i < NR_SLEEPQS; i++) {
886 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
887 MPASS(sq->sq_blockedcnt[i] == 0);
893 * UMA zone item initializer.
896 sleepq_init(void *mem, int size, int flags)
898 struct sleepqueue *sq;
903 for (i = 0; i < NR_SLEEPQS; i++) {
904 TAILQ_INIT(&sq->sq_blocked[i]);
905 sq->sq_blockedcnt[i] = 0;
907 LIST_INIT(&sq->sq_free);
912 * Find thread sleeping on a wait channel and resume it.
915 sleepq_signal(const void *wchan, int flags, int pri, int queue)
917 struct sleepqueue_chain *sc;
918 struct sleepqueue *sq;
919 struct threadqueue *head;
920 struct thread *td, *besttd;
923 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
924 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
925 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
926 sq = sleepq_lookup(wchan);
929 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
930 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
932 head = &sq->sq_blocked[queue];
933 if (flags & SLEEPQ_UNFAIR) {
935 * Find the most recently sleeping thread, but try to
936 * skip threads still in process of context switch to
937 * avoid spinning on the thread lock.
939 sc = SC_LOOKUP(wchan);
940 besttd = TAILQ_LAST_FAST(head, thread, td_slpq);
941 while (besttd->td_lock != &sc->sc_lock) {
942 td = TAILQ_PREV_FAST(besttd, head, thread, td_slpq);
949 * Find the highest priority thread on the queue. If there
950 * is a tie, use the thread that first appears in the queue
951 * as it has been sleeping the longest since threads are
952 * always added to the tail of sleep queues.
954 besttd = td = TAILQ_FIRST(head);
955 while ((td = TAILQ_NEXT(td, td_slpq)) != NULL) {
956 if (td->td_priority < besttd->td_priority)
960 MPASS(besttd != NULL);
961 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri, SRQ_HOLD);
962 return (wakeup_swapper);
966 match_any(struct thread *td __unused)
973 * Resume all threads sleeping on a specified wait channel.
976 sleepq_broadcast(const void *wchan, int flags, int pri, int queue)
978 struct sleepqueue *sq;
980 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
981 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
982 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
983 sq = sleepq_lookup(wchan);
986 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
987 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
989 return (sleepq_remove_matching(sq, queue, match_any, pri));
993 * Resume threads on the sleep queue that match the given predicate.
996 sleepq_remove_matching(struct sleepqueue *sq, int queue,
997 bool (*matches)(struct thread *), int pri)
999 struct thread *td, *tdn;
1003 * The last thread will be given ownership of sq and may
1004 * re-enqueue itself before sleepq_resume_thread() returns,
1005 * so we must cache the "next" queue item at the beginning
1006 * of the final iteration.
1009 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
1011 wakeup_swapper |= sleepq_resume_thread(sq, td, pri,
1015 return (wakeup_swapper);
1019 * Time sleeping threads out. When the timeout expires, the thread is
1020 * removed from the sleep queue and made runnable if it is still asleep.
1023 sleepq_timeout(void *arg)
1025 struct sleepqueue_chain *sc __unused;
1026 struct sleepqueue *sq;
1032 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
1033 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1036 if (td->td_sleeptimo == 0 || td->td_sleeptimo > sbinuptime()) {
1038 * The thread does not want a timeout (yet).
1040 } else if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
1042 * See if the thread is asleep and get the wait
1045 wchan = td->td_wchan;
1046 sc = SC_LOOKUP(wchan);
1047 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
1048 sq = sleepq_lookup(wchan);
1050 td->td_flags |= TDF_TIMEOUT;
1051 wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0);
1055 } else if (TD_ON_SLEEPQ(td)) {
1057 * If the thread is on the SLEEPQ but isn't sleeping
1058 * yet, it can either be on another CPU in between
1059 * sleepq_add() and one of the sleepq_*wait*()
1060 * routines or it can be in sleepq_catch_signals().
1062 td->td_flags |= TDF_TIMEOUT;
1068 * Resumes a specific thread from the sleep queue associated with a specific
1069 * wait channel if it is on that queue.
1072 sleepq_remove(struct thread *td, const void *wchan)
1074 struct sleepqueue_chain *sc;
1075 struct sleepqueue *sq;
1079 * Look up the sleep queue for this wait channel, then re-check
1080 * that the thread is asleep on that channel, if it is not, then
1083 MPASS(wchan != NULL);
1084 sc = SC_LOOKUP(wchan);
1085 mtx_lock_spin(&sc->sc_lock);
1087 * We can not lock the thread here as it may be sleeping on a
1088 * different sleepq. However, holding the sleepq lock for this
1089 * wchan can guarantee that we do not miss a wakeup for this
1090 * channel. The asserts below will catch any false positives.
1092 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
1093 mtx_unlock_spin(&sc->sc_lock);
1097 /* Thread is asleep on sleep queue sq, so wake it up. */
1098 sq = sleepq_lookup(wchan);
1100 MPASS(td->td_wchan == wchan);
1101 wakeup_swapper = sleepq_resume_thread(sq, td, 0, 0);
1107 * Abort a thread as if an interrupt had occurred. Only abort
1108 * interruptible waits (unfortunately it isn't safe to abort others).
1110 * Requires thread lock on entry, releases on return.
1113 sleepq_abort(struct thread *td, int intrval)
1115 struct sleepqueue *sq;
1118 THREAD_LOCK_ASSERT(td, MA_OWNED);
1119 MPASS(TD_ON_SLEEPQ(td));
1120 MPASS(td->td_flags & TDF_SINTR);
1121 MPASS(intrval == EINTR || intrval == ERESTART);
1124 * If the TDF_TIMEOUT flag is set, just leave. A
1125 * timeout is scheduled anyhow.
1127 if (td->td_flags & TDF_TIMEOUT) {
1132 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
1133 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
1134 td->td_intrval = intrval;
1137 * If the thread has not slept yet it will find the signal in
1138 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise
1139 * we have to do it here.
1141 if (!TD_IS_SLEEPING(td)) {
1145 wchan = td->td_wchan;
1146 MPASS(wchan != NULL);
1147 sq = sleepq_lookup(wchan);
1150 /* Thread is asleep on sleep queue sq, so wake it up. */
1151 return (sleepq_resume_thread(sq, td, 0, 0));
1155 sleepq_chains_remove_matching(bool (*matches)(struct thread *))
1157 struct sleepqueue_chain *sc;
1158 struct sleepqueue *sq, *sq1;
1159 int i, wakeup_swapper;
1162 for (sc = &sleepq_chains[0]; sc < sleepq_chains + SC_TABLESIZE; ++sc) {
1163 if (LIST_EMPTY(&sc->sc_queues)) {
1166 mtx_lock_spin(&sc->sc_lock);
1167 LIST_FOREACH_SAFE(sq, &sc->sc_queues, sq_hash, sq1) {
1168 for (i = 0; i < NR_SLEEPQS; ++i) {
1169 wakeup_swapper |= sleepq_remove_matching(sq, i,
1173 mtx_unlock_spin(&sc->sc_lock);
1175 if (wakeup_swapper) {
1181 * Prints the stacks of all threads presently sleeping on wchan/queue to
1182 * the sbuf sb. Sets count_stacks_printed to the number of stacks actually
1183 * printed. Typically, this will equal the number of threads sleeping on the
1184 * queue, but may be less if sb overflowed before all stacks were printed.
1188 sleepq_sbuf_print_stacks(struct sbuf *sb, const void *wchan, int queue,
1189 int *count_stacks_printed)
1191 struct thread *td, *td_next;
1192 struct sleepqueue *sq;
1194 struct sbuf **td_infos;
1195 int i, stack_idx, error, stacks_to_allocate;
1201 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
1202 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
1204 stacks_to_allocate = 10;
1205 for (i = 0; i < 3 && !finished ; i++) {
1206 /* We cannot malloc while holding the queue's spinlock, so
1207 * we do our mallocs now, and hope it is enough. If it
1208 * isn't, we will free these, drop the lock, malloc more,
1209 * and try again, up to a point. After that point we will
1210 * give up and report ENOMEM. We also cannot write to sb
1211 * during this time since the client may have set the
1212 * SBUF_AUTOEXTEND flag on their sbuf, which could cause a
1213 * malloc as we print to it. So we defer actually printing
1214 * to sb until after we drop the spinlock.
1217 /* Where we will store the stacks. */
1218 st = malloc(sizeof(struct stack *) * stacks_to_allocate,
1220 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1222 st[stack_idx] = stack_create(M_WAITOK);
1224 /* Where we will store the td name, tid, etc. */
1225 td_infos = malloc(sizeof(struct sbuf *) * stacks_to_allocate,
1227 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1229 td_infos[stack_idx] = sbuf_new(NULL, NULL,
1230 MAXCOMLEN + sizeof(struct thread *) * 2 + 40,
1234 sq = sleepq_lookup(wchan);
1236 /* This sleepq does not exist; exit and return ENOENT. */
1239 sleepq_release(wchan);
1244 /* Save thread info */
1245 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
1247 if (stack_idx >= stacks_to_allocate)
1250 /* Note the td_lock is equal to the sleepq_lock here. */
1251 (void)stack_save_td(st[stack_idx], td);
1253 sbuf_printf(td_infos[stack_idx], "%d: %s %p",
1254 td->td_tid, td->td_name, td);
1260 sleepq_release(wchan);
1262 /* Print the stacks */
1263 for (i = 0; i < stack_idx; i++) {
1264 sbuf_finish(td_infos[i]);
1265 sbuf_printf(sb, "--- thread %s: ---\n", sbuf_data(td_infos[i]));
1266 stack_sbuf_print(sb, st[i]);
1267 sbuf_printf(sb, "\n");
1269 error = sbuf_error(sb);
1271 *count_stacks_printed = stack_idx;
1276 sleepq_release(wchan);
1277 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1279 stack_destroy(st[stack_idx]);
1280 for (stack_idx = 0; stack_idx < stacks_to_allocate;
1282 sbuf_delete(td_infos[stack_idx]);
1284 free(td_infos, M_TEMP);
1285 stacks_to_allocate *= 10;
1288 if (!finished && error == 0)
1295 #ifdef SLEEPQUEUE_PROFILING
1296 #define SLEEPQ_PROF_LOCATIONS 1024
1297 #define SLEEPQ_SBUFSIZE 512
1298 struct sleepq_prof {
1299 LIST_ENTRY(sleepq_prof) sp_link;
1300 const char *sp_wmesg;
1304 LIST_HEAD(sqphead, sleepq_prof);
1306 struct sqphead sleepq_prof_free;
1307 struct sqphead sleepq_hash[SC_TABLESIZE];
1308 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1309 static struct mtx sleepq_prof_lock;
1310 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1313 sleepq_profile(const char *wmesg)
1315 struct sleepq_prof *sp;
1317 mtx_lock_spin(&sleepq_prof_lock);
1318 if (prof_enabled == 0)
1320 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1321 if (sp->sp_wmesg == wmesg)
1323 sp = LIST_FIRST(&sleepq_prof_free);
1326 sp->sp_wmesg = wmesg;
1327 LIST_REMOVE(sp, sp_link);
1328 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1332 mtx_unlock_spin(&sleepq_prof_lock);
1337 sleepq_prof_reset(void)
1339 struct sleepq_prof *sp;
1343 mtx_lock_spin(&sleepq_prof_lock);
1344 enabled = prof_enabled;
1346 for (i = 0; i < SC_TABLESIZE; i++)
1347 LIST_INIT(&sleepq_hash[i]);
1348 LIST_INIT(&sleepq_prof_free);
1349 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1350 sp = &sleepq_profent[i];
1351 sp->sp_wmesg = NULL;
1353 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1355 prof_enabled = enabled;
1356 mtx_unlock_spin(&sleepq_prof_lock);
1360 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1365 error = sysctl_handle_int(oidp, &v, v, req);
1368 if (req->newptr == NULL)
1370 if (v == prof_enabled)
1373 sleepq_prof_reset();
1374 mtx_lock_spin(&sleepq_prof_lock);
1376 mtx_unlock_spin(&sleepq_prof_lock);
1382 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1387 error = sysctl_handle_int(oidp, &v, 0, req);
1390 if (req->newptr == NULL)
1394 sleepq_prof_reset();
1400 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1402 struct sleepq_prof *sp;
1408 error = sysctl_wire_old_buffer(req, 0);
1411 sb = sbuf_new_for_sysctl(NULL, NULL, SLEEPQ_SBUFSIZE, req);
1412 sbuf_printf(sb, "\nwmesg\tcount\n");
1413 enabled = prof_enabled;
1414 mtx_lock_spin(&sleepq_prof_lock);
1416 mtx_unlock_spin(&sleepq_prof_lock);
1417 for (i = 0; i < SC_TABLESIZE; i++) {
1418 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1419 sbuf_printf(sb, "%s\t%ld\n",
1420 sp->sp_wmesg, sp->sp_count);
1423 mtx_lock_spin(&sleepq_prof_lock);
1424 prof_enabled = enabled;
1425 mtx_unlock_spin(&sleepq_prof_lock);
1427 error = sbuf_finish(sb);
1432 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats,
1433 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0,
1434 dump_sleepq_prof_stats, "A",
1435 "Sleepqueue profiling statistics");
1436 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset,
1437 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
1438 reset_sleepq_prof_stats, "I",
1439 "Reset sleepqueue profiling statistics");
1440 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable,
1441 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
1442 enable_sleepq_prof, "I",
1443 "Enable sleepqueue profiling");
1447 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1449 struct sleepqueue_chain *sc;
1450 struct sleepqueue *sq;
1452 struct lock_object *lock;
1462 * First, see if there is an active sleep queue for the wait channel
1463 * indicated by the address.
1465 wchan = (void *)addr;
1466 sc = SC_LOOKUP(wchan);
1467 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1468 if (sq->sq_wchan == wchan)
1472 * Second, see if there is an active sleep queue at the address
1475 for (i = 0; i < SC_TABLESIZE; i++)
1476 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1477 if (sq == (struct sleepqueue *)addr)
1481 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1484 db_printf("Wait channel: %p\n", sq->sq_wchan);
1485 db_printf("Queue type: %d\n", sq->sq_type);
1489 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1490 LOCK_CLASS(lock)->lc_name, lock->lo_name);
1493 db_printf("Blocked threads:\n");
1494 for (i = 0; i < NR_SLEEPQS; i++) {
1495 db_printf("\nQueue[%d]:\n", i);
1496 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1497 db_printf("\tempty\n");
1499 TAILQ_FOREACH(td, &sq->sq_blocked[i],
1501 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1502 td->td_tid, td->td_proc->p_pid,
1505 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1509 /* Alias 'show sleepqueue' to 'show sleepq'. */
1510 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);