2 * Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the author nor the names of any co-contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Implementation of sleep queues used to hold queue of threads blocked on
32 * a wait channel. Sleep queues different from turnstiles in that wait
33 * channels are not owned by anyone, so there is no priority propagation.
34 * Sleep queues can also provide a timeout and can also be interrupted by
35 * signals. That said, there are several similarities between the turnstile
36 * and sleep queue implementations. (Note: turnstiles were implemented
37 * first.) For example, both use a hash table of the same size where each
38 * bucket is referred to as a "chain" that contains both a spin lock and
39 * a linked list of queues. An individual queue is located by using a hash
40 * to pick a chain, locking the chain, and then walking the chain searching
41 * for the queue. This means that a wait channel object does not need to
42 * embed it's queue head just as locks do not embed their turnstile queue
43 * head. Threads also carry around a sleep queue that they lend to the
44 * wait channel when blocking. Just as in turnstiles, the queue includes
45 * a free list of the sleep queues of other threads blocked on the same
46 * wait channel in the case of multiple waiters.
48 * Some additional functionality provided by sleep queues include the
49 * ability to set a timeout. The timeout is managed using a per-thread
50 * callout that resumes a thread if it is asleep. A thread may also
51 * catch signals while it is asleep (aka an interruptible sleep). The
52 * signal code uses sleepq_abort() to interrupt a sleeping thread. Finally,
53 * sleep queues also provide some extra assertions. One is not allowed to
54 * mix the sleep/wakeup and cv APIs for a given wait channel. Also, one
55 * must consistently use the same lock to synchronize with a wait channel,
56 * though this check is currently only a warning for sleep/wakeup due to
57 * pre-existing abuse of that API. The same lock must also be held when
58 * awakening threads, though that is currently only enforced for condition
62 #include <sys/cdefs.h>
63 __FBSDID("$FreeBSD$");
65 #include "opt_sleepqueue_profiling.h"
67 #include "opt_kdtrace.h"
68 #include "opt_sched.h"
70 #include <sys/param.h>
71 #include <sys/systm.h>
73 #include <sys/kernel.h>
75 #include <sys/mutex.h>
78 #include <sys/sched.h>
80 #include <sys/signalvar.h>
81 #include <sys/sleepqueue.h>
82 #include <sys/sysctl.h>
91 * Constants for the hash table of sleep queue chains. These constants are
92 * the same ones that 4BSD (and possibly earlier versions of BSD) used.
93 * Basically, we ignore the lower 8 bits of the address since most wait
94 * channel pointers are aligned and only look at the next 7 bits for the
95 * hash. SC_TABLESIZE must be a power of two for SC_MASK to work properly.
97 #define SC_TABLESIZE 128 /* Must be power of 2. */
98 #define SC_MASK (SC_TABLESIZE - 1)
100 #define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
101 #define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
104 * There two different lists of sleep queues. Both lists are connected
105 * via the sq_hash entries. The first list is the sleep queue chain list
106 * that a sleep queue is on when it is attached to a wait channel. The
107 * second list is the free list hung off of a sleep queue that is attached
110 * Each sleep queue also contains the wait channel it is attached to, the
111 * list of threads blocked on that wait channel, flags specific to the
112 * wait channel, and the lock used to synchronize with a wait channel.
113 * The flags are used to catch mismatches between the various consumers
114 * of the sleep queue API (e.g. sleep/wakeup and condition variables).
115 * The lock pointer is only used when invariants are enabled for various
119 * c - sleep queue chain lock
122 TAILQ_HEAD(, thread) sq_blocked[NR_SLEEPQS]; /* (c) Blocked threads. */
123 u_int sq_blockedcnt[NR_SLEEPQS]; /* (c) N. of blocked threads. */
124 LIST_ENTRY(sleepqueue) sq_hash; /* (c) Chain and free list. */
125 LIST_HEAD(, sleepqueue) sq_free; /* (c) Free queues. */
126 void *sq_wchan; /* (c) Wait channel. */
127 int sq_type; /* (c) Queue type. */
129 struct lock_object *sq_lock; /* (c) Associated lock. */
133 struct sleepqueue_chain {
134 LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
135 struct mtx sc_lock; /* Spin lock for this chain. */
136 #ifdef SLEEPQUEUE_PROFILING
137 u_int sc_depth; /* Length of sc_queues. */
138 u_int sc_max_depth; /* Max length of sc_queues. */
142 #ifdef SLEEPQUEUE_PROFILING
143 u_int sleepq_max_depth;
144 SYSCTL_NODE(_debug, OID_AUTO, sleepq, CTLFLAG_RD, 0, "sleepq profiling");
145 SYSCTL_NODE(_debug_sleepq, OID_AUTO, chains, CTLFLAG_RD, 0,
146 "sleepq chain stats");
147 SYSCTL_UINT(_debug_sleepq, OID_AUTO, max_depth, CTLFLAG_RD, &sleepq_max_depth,
148 0, "maxmimum depth achieved of a single chain");
150 static void sleepq_profile(const char *wmesg);
151 static int prof_enabled;
153 static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
154 static uma_zone_t sleepq_zone;
157 * Prototypes for non-exported routines.
159 static int sleepq_catch_signals(void *wchan, int pri);
160 static int sleepq_check_signals(void);
161 static int sleepq_check_timeout(void);
163 static void sleepq_dtor(void *mem, int size, void *arg);
165 static int sleepq_init(void *mem, int size, int flags);
166 static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
168 static void sleepq_switch(void *wchan, int pri);
169 static void sleepq_timeout(void *arg);
171 SDT_PROBE_DECLARE(sched, , , sleep);
172 SDT_PROBE_DECLARE(sched, , , wakeup);
175 * Early initialization of sleep queues that is called from the sleepinit()
179 init_sleepqueues(void)
181 #ifdef SLEEPQUEUE_PROFILING
182 struct sysctl_oid *chain_oid;
187 for (i = 0; i < SC_TABLESIZE; i++) {
188 LIST_INIT(&sleepq_chains[i].sc_queues);
189 mtx_init(&sleepq_chains[i].sc_lock, "sleepq chain", NULL,
190 MTX_SPIN | MTX_RECURSE);
191 #ifdef SLEEPQUEUE_PROFILING
192 snprintf(chain_name, sizeof(chain_name), "%d", i);
193 chain_oid = SYSCTL_ADD_NODE(NULL,
194 SYSCTL_STATIC_CHILDREN(_debug_sleepq_chains), OID_AUTO,
195 chain_name, CTLFLAG_RD, NULL, "sleepq chain stats");
196 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
197 "depth", CTLFLAG_RD, &sleepq_chains[i].sc_depth, 0, NULL);
198 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
199 "max_depth", CTLFLAG_RD, &sleepq_chains[i].sc_max_depth, 0,
203 sleepq_zone = uma_zcreate("SLEEPQUEUE", sizeof(struct sleepqueue),
205 NULL, sleepq_dtor, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
207 NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
210 thread0.td_sleepqueue = sleepq_alloc();
214 * Get a sleep queue for a new thread.
220 return (uma_zalloc(sleepq_zone, M_WAITOK));
224 * Free a sleep queue when a thread is destroyed.
227 sleepq_free(struct sleepqueue *sq)
230 uma_zfree(sleepq_zone, sq);
234 * Lock the sleep queue chain associated with the specified wait channel.
237 sleepq_lock(void *wchan)
239 struct sleepqueue_chain *sc;
241 sc = SC_LOOKUP(wchan);
242 mtx_lock_spin(&sc->sc_lock);
246 * Look up the sleep queue associated with a given wait channel in the hash
247 * table locking the associated sleep queue chain. If no queue is found in
248 * the table, NULL is returned.
251 sleepq_lookup(void *wchan)
253 struct sleepqueue_chain *sc;
254 struct sleepqueue *sq;
256 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
257 sc = SC_LOOKUP(wchan);
258 mtx_assert(&sc->sc_lock, MA_OWNED);
259 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
260 if (sq->sq_wchan == wchan)
266 * Unlock the sleep queue chain associated with a given wait channel.
269 sleepq_release(void *wchan)
271 struct sleepqueue_chain *sc;
273 sc = SC_LOOKUP(wchan);
274 mtx_unlock_spin(&sc->sc_lock);
278 * Places the current thread on the sleep queue for the specified wait
279 * channel. If INVARIANTS is enabled, then it associates the passed in
280 * lock with the sleepq to make sure it is held when that sleep queue is
284 sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
287 struct sleepqueue_chain *sc;
288 struct sleepqueue *sq;
292 sc = SC_LOOKUP(wchan);
293 mtx_assert(&sc->sc_lock, MA_OWNED);
294 MPASS(td->td_sleepqueue != NULL);
295 MPASS(wchan != NULL);
296 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
298 /* If this thread is not allowed to sleep, die a horrible death. */
299 KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
300 ("Trying sleep, but thread marked as sleeping prohibited"));
302 /* Look up the sleep queue associated with the wait channel 'wchan'. */
303 sq = sleepq_lookup(wchan);
306 * If the wait channel does not already have a sleep queue, use
307 * this thread's sleep queue. Otherwise, insert the current thread
308 * into the sleep queue already in use by this wait channel.
314 sq = td->td_sleepqueue;
315 for (i = 0; i < NR_SLEEPQS; i++) {
316 KASSERT(TAILQ_EMPTY(&sq->sq_blocked[i]),
317 ("thread's sleep queue %d is not empty", i));
318 KASSERT(sq->sq_blockedcnt[i] == 0,
319 ("thread's sleep queue %d count mismatches", i));
321 KASSERT(LIST_EMPTY(&sq->sq_free),
322 ("thread's sleep queue has a non-empty free list"));
323 KASSERT(sq->sq_wchan == NULL, ("stale sq_wchan pointer"));
326 #ifdef SLEEPQUEUE_PROFILING
328 if (sc->sc_depth > sc->sc_max_depth) {
329 sc->sc_max_depth = sc->sc_depth;
330 if (sc->sc_max_depth > sleepq_max_depth)
331 sleepq_max_depth = sc->sc_max_depth;
334 sq = td->td_sleepqueue;
335 LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
336 sq->sq_wchan = wchan;
337 sq->sq_type = flags & SLEEPQ_TYPE;
339 MPASS(wchan == sq->sq_wchan);
340 MPASS(lock == sq->sq_lock);
341 MPASS((flags & SLEEPQ_TYPE) == sq->sq_type);
342 LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
345 TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
346 sq->sq_blockedcnt[queue]++;
347 td->td_sleepqueue = NULL;
348 td->td_sqqueue = queue;
349 td->td_wchan = wchan;
350 td->td_wmesg = wmesg;
351 if (flags & SLEEPQ_INTERRUPTIBLE) {
352 td->td_flags |= TDF_SINTR;
353 td->td_flags &= ~TDF_SLEEPABORT;
359 * Sets a timeout that will remove the current thread from the specified
360 * sleep queue after timo ticks if the thread has not already been awakened.
363 sleepq_set_timeout(void *wchan, int timo)
365 struct sleepqueue_chain *sc;
369 sc = SC_LOOKUP(wchan);
370 mtx_assert(&sc->sc_lock, MA_OWNED);
371 MPASS(TD_ON_SLEEPQ(td));
372 MPASS(td->td_sleepqueue == NULL);
373 MPASS(wchan != NULL);
374 callout_reset_curcpu(&td->td_slpcallout, timo, sleepq_timeout, td);
378 * Return the number of actual sleepers for the specified queue.
381 sleepq_sleepcnt(void *wchan, int queue)
383 struct sleepqueue *sq;
385 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
386 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
387 sq = sleepq_lookup(wchan);
390 return (sq->sq_blockedcnt[queue]);
394 * Marks the pending sleep of the current thread as interruptible and
395 * makes an initial check for pending signals before putting a thread
396 * to sleep. Enters and exits with the thread lock held. Thread lock
397 * may have transitioned from the sleepq lock to a run lock.
400 sleepq_catch_signals(void *wchan, int pri)
402 struct sleepqueue_chain *sc;
403 struct sleepqueue *sq;
407 int sig, ret, stop_allowed;
411 sc = SC_LOOKUP(wchan);
412 mtx_assert(&sc->sc_lock, MA_OWNED);
413 MPASS(wchan != NULL);
415 * See if there are any pending signals for this thread. If not
416 * we can switch immediately. Otherwise do the signal processing
420 if ((td->td_flags & (TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK)) == 0) {
421 sleepq_switch(wchan, pri);
424 stop_allowed = (td->td_flags & TDF_SBDRY) ? SIG_STOP_NOT_ALLOWED :
427 mtx_unlock_spin(&sc->sc_lock);
428 CTR3(KTR_PROC, "sleepq catching signals: thread %p (pid %ld, %s)",
429 (void *)td, (long)p->p_pid, td->td_name);
432 mtx_lock(&ps->ps_mtx);
433 sig = cursig(td, stop_allowed);
435 mtx_unlock(&ps->ps_mtx);
436 ret = thread_suspend_check(1);
437 MPASS(ret == 0 || ret == EINTR || ret == ERESTART);
439 if (SIGISMEMBER(ps->ps_sigintr, sig))
443 mtx_unlock(&ps->ps_mtx);
446 * Lock the per-process spinlock prior to dropping the PROC_LOCK
447 * to avoid a signal delivery race. PROC_LOCK, PROC_SLOCK, and
448 * thread_lock() are currently held in tdsignal().
451 mtx_lock_spin(&sc->sc_lock);
456 sleepq_switch(wchan, pri);
460 * There were pending signals and this thread is still
461 * on the sleep queue, remove it from the sleep queue.
463 if (TD_ON_SLEEPQ(td)) {
464 sq = sleepq_lookup(wchan);
465 if (sleepq_resume_thread(sq, td, 0)) {
468 * This thread hasn't gone to sleep yet, so it
469 * should not be swapped out.
471 panic("not waking up swapper");
475 mtx_unlock_spin(&sc->sc_lock);
476 MPASS(td->td_lock != &sc->sc_lock);
481 * Switches to another thread if we are still asleep on a sleep queue.
482 * Returns with thread lock.
485 sleepq_switch(void *wchan, int pri)
487 struct sleepqueue_chain *sc;
488 struct sleepqueue *sq;
492 sc = SC_LOOKUP(wchan);
493 mtx_assert(&sc->sc_lock, MA_OWNED);
494 THREAD_LOCK_ASSERT(td, MA_OWNED);
497 * If we have a sleep queue, then we've already been woken up, so
500 if (td->td_sleepqueue != NULL) {
501 mtx_unlock_spin(&sc->sc_lock);
506 * If TDF_TIMEOUT is set, then our sleep has been timed out
507 * already but we are still on the sleep queue, so dequeue the
510 if (td->td_flags & TDF_TIMEOUT) {
511 MPASS(TD_ON_SLEEPQ(td));
512 sq = sleepq_lookup(wchan);
513 if (sleepq_resume_thread(sq, td, 0)) {
516 * This thread hasn't gone to sleep yet, so it
517 * should not be swapped out.
519 panic("not waking up swapper");
522 mtx_unlock_spin(&sc->sc_lock);
525 #ifdef SLEEPQUEUE_PROFILING
527 sleepq_profile(td->td_wmesg);
529 MPASS(td->td_sleepqueue == NULL);
530 sched_sleep(td, pri);
531 thread_lock_set(td, &sc->sc_lock);
532 SDT_PROBE0(sched, , , sleep);
534 mi_switch(SW_VOL | SWT_SLEEPQ, NULL);
535 KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
536 CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
537 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
541 * Check to see if we timed out.
544 sleepq_check_timeout(void)
549 THREAD_LOCK_ASSERT(td, MA_OWNED);
552 * If TDF_TIMEOUT is set, we timed out.
554 if (td->td_flags & TDF_TIMEOUT) {
555 td->td_flags &= ~TDF_TIMEOUT;
556 return (EWOULDBLOCK);
560 * If TDF_TIMOFAIL is set, the timeout ran after we had
561 * already been woken up.
563 if (td->td_flags & TDF_TIMOFAIL)
564 td->td_flags &= ~TDF_TIMOFAIL;
567 * If callout_stop() fails, then the timeout is running on
568 * another CPU, so synchronize with it to avoid having it
569 * accidentally wake up a subsequent sleep.
571 else if (callout_stop(&td->td_slpcallout) == 0) {
572 td->td_flags |= TDF_TIMEOUT;
574 mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
580 * Check to see if we were awoken by a signal.
583 sleepq_check_signals(void)
588 THREAD_LOCK_ASSERT(td, MA_OWNED);
590 /* We are no longer in an interruptible sleep. */
591 if (td->td_flags & TDF_SINTR)
592 td->td_flags &= ~TDF_SINTR;
594 if (td->td_flags & TDF_SLEEPABORT) {
595 td->td_flags &= ~TDF_SLEEPABORT;
596 return (td->td_intrval);
603 * Block the current thread until it is awakened from its sleep queue.
606 sleepq_wait(void *wchan, int pri)
611 MPASS(!(td->td_flags & TDF_SINTR));
613 sleepq_switch(wchan, pri);
618 * Block the current thread until it is awakened from its sleep queue
619 * or it is interrupted by a signal.
622 sleepq_wait_sig(void *wchan, int pri)
627 rcatch = sleepq_catch_signals(wchan, pri);
628 rval = sleepq_check_signals();
629 thread_unlock(curthread);
636 * Block the current thread until it is awakened from its sleep queue
637 * or it times out while waiting.
640 sleepq_timedwait(void *wchan, int pri)
646 MPASS(!(td->td_flags & TDF_SINTR));
648 sleepq_switch(wchan, pri);
649 rval = sleepq_check_timeout();
656 * Block the current thread until it is awakened from its sleep queue,
657 * it is interrupted by a signal, or it times out waiting to be awakened.
660 sleepq_timedwait_sig(void *wchan, int pri)
662 int rcatch, rvalt, rvals;
664 rcatch = sleepq_catch_signals(wchan, pri);
665 rvalt = sleepq_check_timeout();
666 rvals = sleepq_check_signals();
667 thread_unlock(curthread);
676 * Returns the type of sleepqueue given a waitchannel.
679 sleepq_type(void *wchan)
681 struct sleepqueue *sq;
684 MPASS(wchan != NULL);
687 sq = sleepq_lookup(wchan);
689 sleepq_release(wchan);
693 sleepq_release(wchan);
698 * Removes a thread from a sleep queue and makes it
702 sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
704 struct sleepqueue_chain *sc;
707 MPASS(sq->sq_wchan != NULL);
708 MPASS(td->td_wchan == sq->sq_wchan);
709 MPASS(td->td_sqqueue < NR_SLEEPQS && td->td_sqqueue >= 0);
710 THREAD_LOCK_ASSERT(td, MA_OWNED);
711 sc = SC_LOOKUP(sq->sq_wchan);
712 mtx_assert(&sc->sc_lock, MA_OWNED);
714 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
716 /* Remove the thread from the queue. */
717 sq->sq_blockedcnt[td->td_sqqueue]--;
718 TAILQ_REMOVE(&sq->sq_blocked[td->td_sqqueue], td, td_slpq);
721 * Get a sleep queue for this thread. If this is the last waiter,
722 * use the queue itself and take it out of the chain, otherwise,
723 * remove a queue from the free list.
725 if (LIST_EMPTY(&sq->sq_free)) {
726 td->td_sleepqueue = sq;
730 #ifdef SLEEPQUEUE_PROFILING
734 td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
735 LIST_REMOVE(td->td_sleepqueue, sq_hash);
739 td->td_flags &= ~TDF_SINTR;
741 CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
742 (void *)td, (long)td->td_proc->p_pid, td->td_name);
744 /* Adjust priority if requested. */
745 MPASS(pri == 0 || (pri >= PRI_MIN && pri <= PRI_MAX));
746 if (pri != 0 && td->td_priority > pri)
750 * Note that thread td might not be sleeping if it is running
751 * sleepq_catch_signals() on another CPU or is blocked on its
752 * proc lock to check signals. There's no need to mark the
753 * thread runnable in that case.
755 if (TD_IS_SLEEPING(td)) {
757 return (setrunnable(td));
764 * UMA zone item deallocator.
767 sleepq_dtor(void *mem, int size, void *arg)
769 struct sleepqueue *sq;
773 for (i = 0; i < NR_SLEEPQS; i++) {
774 MPASS(TAILQ_EMPTY(&sq->sq_blocked[i]));
775 MPASS(sq->sq_blockedcnt[i] == 0);
781 * UMA zone item initializer.
784 sleepq_init(void *mem, int size, int flags)
786 struct sleepqueue *sq;
791 for (i = 0; i < NR_SLEEPQS; i++) {
792 TAILQ_INIT(&sq->sq_blocked[i]);
793 sq->sq_blockedcnt[i] = 0;
795 LIST_INIT(&sq->sq_free);
800 * Find the highest priority thread sleeping on a wait channel and resume it.
803 sleepq_signal(void *wchan, int flags, int pri, int queue)
805 struct sleepqueue *sq;
806 struct thread *td, *besttd;
809 CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
810 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
811 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
812 sq = sleepq_lookup(wchan);
815 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
816 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
819 * Find the highest priority thread on the queue. If there is a
820 * tie, use the thread that first appears in the queue as it has
821 * been sleeping the longest since threads are always added to
822 * the tail of sleep queues.
825 TAILQ_FOREACH(td, &sq->sq_blocked[queue], td_slpq) {
826 if (besttd == NULL || td->td_priority < besttd->td_priority)
829 MPASS(besttd != NULL);
831 wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
832 thread_unlock(besttd);
833 return (wakeup_swapper);
837 * Resume all threads sleeping on a specified wait channel.
840 sleepq_broadcast(void *wchan, int flags, int pri, int queue)
842 struct sleepqueue *sq;
843 struct thread *td, *tdn;
846 CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags);
847 KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
848 MPASS((queue >= 0) && (queue < NR_SLEEPQS));
849 sq = sleepq_lookup(wchan);
852 KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
853 ("%s: mismatch between sleep/wakeup and cv_*", __func__));
855 /* Resume all blocked threads on the sleep queue. */
857 TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
859 if (sleepq_resume_thread(sq, td, pri))
863 return (wakeup_swapper);
867 * Time sleeping threads out. When the timeout expires, the thread is
868 * removed from the sleep queue and made runnable if it is still asleep.
871 sleepq_timeout(void *arg)
873 struct sleepqueue_chain *sc;
874 struct sleepqueue *sq;
881 CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)",
882 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
885 * First, see if the thread is asleep and get the wait channel if
889 if (TD_IS_SLEEPING(td) && TD_ON_SLEEPQ(td)) {
890 wchan = td->td_wchan;
891 sc = SC_LOOKUP(wchan);
892 THREAD_LOCKPTR_ASSERT(td, &sc->sc_lock);
893 sq = sleepq_lookup(wchan);
895 td->td_flags |= TDF_TIMEOUT;
896 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
904 * If the thread is on the SLEEPQ but isn't sleeping yet, it
905 * can either be on another CPU in between sleepq_add() and
906 * one of the sleepq_*wait*() routines or it can be in
907 * sleepq_catch_signals().
909 if (TD_ON_SLEEPQ(td)) {
910 td->td_flags |= TDF_TIMEOUT;
916 * Now check for the edge cases. First, if TDF_TIMEOUT is set,
917 * then the other thread has already yielded to us, so clear
918 * the flag and resume it. If TDF_TIMEOUT is not set, then the
919 * we know that the other thread is not on a sleep queue, but it
920 * hasn't resumed execution yet. In that case, set TDF_TIMOFAIL
921 * to let it know that the timeout has already run and doesn't
922 * need to be canceled.
924 if (td->td_flags & TDF_TIMEOUT) {
925 MPASS(TD_IS_SLEEPING(td));
926 td->td_flags &= ~TDF_TIMEOUT;
928 wakeup_swapper = setrunnable(td);
930 td->td_flags |= TDF_TIMOFAIL;
937 * Resumes a specific thread from the sleep queue associated with a specific
938 * wait channel if it is on that queue.
941 sleepq_remove(struct thread *td, void *wchan)
943 struct sleepqueue *sq;
947 * Look up the sleep queue for this wait channel, then re-check
948 * that the thread is asleep on that channel, if it is not, then
951 MPASS(wchan != NULL);
953 sq = sleepq_lookup(wchan);
955 * We can not lock the thread here as it may be sleeping on a
956 * different sleepq. However, holding the sleepq lock for this
957 * wchan can guarantee that we do not miss a wakeup for this
958 * channel. The asserts below will catch any false positives.
960 if (!TD_ON_SLEEPQ(td) || td->td_wchan != wchan) {
961 sleepq_release(wchan);
964 /* Thread is asleep on sleep queue sq, so wake it up. */
967 MPASS(td->td_wchan == wchan);
968 wakeup_swapper = sleepq_resume_thread(sq, td, 0);
970 sleepq_release(wchan);
976 * Abort a thread as if an interrupt had occurred. Only abort
977 * interruptible waits (unfortunately it isn't safe to abort others).
980 sleepq_abort(struct thread *td, int intrval)
982 struct sleepqueue *sq;
985 THREAD_LOCK_ASSERT(td, MA_OWNED);
986 MPASS(TD_ON_SLEEPQ(td));
987 MPASS(td->td_flags & TDF_SINTR);
988 MPASS(intrval == EINTR || intrval == ERESTART);
991 * If the TDF_TIMEOUT flag is set, just leave. A
992 * timeout is scheduled anyhow.
994 if (td->td_flags & TDF_TIMEOUT)
997 CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)",
998 (void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
999 td->td_intrval = intrval;
1000 td->td_flags |= TDF_SLEEPABORT;
1002 * If the thread has not slept yet it will find the signal in
1003 * sleepq_catch_signals() and call sleepq_resume_thread. Otherwise
1004 * we have to do it here.
1006 if (!TD_IS_SLEEPING(td))
1008 wchan = td->td_wchan;
1009 MPASS(wchan != NULL);
1010 sq = sleepq_lookup(wchan);
1013 /* Thread is asleep on sleep queue sq, so wake it up. */
1014 return (sleepq_resume_thread(sq, td, 0));
1017 #ifdef SLEEPQUEUE_PROFILING
1018 #define SLEEPQ_PROF_LOCATIONS 1024
1019 #define SLEEPQ_SBUFSIZE (40 * 512)
1020 struct sleepq_prof {
1021 LIST_ENTRY(sleepq_prof) sp_link;
1022 const char *sp_wmesg;
1026 LIST_HEAD(sqphead, sleepq_prof);
1028 struct sqphead sleepq_prof_free;
1029 struct sqphead sleepq_hash[SC_TABLESIZE];
1030 static struct sleepq_prof sleepq_profent[SLEEPQ_PROF_LOCATIONS];
1031 static struct mtx sleepq_prof_lock;
1032 MTX_SYSINIT(sleepq_prof_lock, &sleepq_prof_lock, "sleepq_prof", MTX_SPIN);
1035 sleepq_profile(const char *wmesg)
1037 struct sleepq_prof *sp;
1039 mtx_lock_spin(&sleepq_prof_lock);
1040 if (prof_enabled == 0)
1042 LIST_FOREACH(sp, &sleepq_hash[SC_HASH(wmesg)], sp_link)
1043 if (sp->sp_wmesg == wmesg)
1045 sp = LIST_FIRST(&sleepq_prof_free);
1048 sp->sp_wmesg = wmesg;
1049 LIST_REMOVE(sp, sp_link);
1050 LIST_INSERT_HEAD(&sleepq_hash[SC_HASH(wmesg)], sp, sp_link);
1054 mtx_unlock_spin(&sleepq_prof_lock);
1059 sleepq_prof_reset(void)
1061 struct sleepq_prof *sp;
1065 mtx_lock_spin(&sleepq_prof_lock);
1066 enabled = prof_enabled;
1068 for (i = 0; i < SC_TABLESIZE; i++)
1069 LIST_INIT(&sleepq_hash[i]);
1070 LIST_INIT(&sleepq_prof_free);
1071 for (i = 0; i < SLEEPQ_PROF_LOCATIONS; i++) {
1072 sp = &sleepq_profent[i];
1073 sp->sp_wmesg = NULL;
1075 LIST_INSERT_HEAD(&sleepq_prof_free, sp, sp_link);
1077 prof_enabled = enabled;
1078 mtx_unlock_spin(&sleepq_prof_lock);
1082 enable_sleepq_prof(SYSCTL_HANDLER_ARGS)
1087 error = sysctl_handle_int(oidp, &v, v, req);
1090 if (req->newptr == NULL)
1092 if (v == prof_enabled)
1095 sleepq_prof_reset();
1096 mtx_lock_spin(&sleepq_prof_lock);
1098 mtx_unlock_spin(&sleepq_prof_lock);
1104 reset_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1109 error = sysctl_handle_int(oidp, &v, 0, req);
1112 if (req->newptr == NULL)
1116 sleepq_prof_reset();
1122 dump_sleepq_prof_stats(SYSCTL_HANDLER_ARGS)
1124 static int multiplier = 1;
1125 struct sleepq_prof *sp;
1132 sb = sbuf_new(NULL, NULL, SLEEPQ_SBUFSIZE * multiplier, SBUF_FIXEDLEN);
1133 sbuf_printf(sb, "\nwmesg\tcount\n");
1134 enabled = prof_enabled;
1135 mtx_lock_spin(&sleepq_prof_lock);
1137 mtx_unlock_spin(&sleepq_prof_lock);
1138 for (i = 0; i < SC_TABLESIZE; i++) {
1139 LIST_FOREACH(sp, &sleepq_hash[i], sp_link) {
1140 sbuf_printf(sb, "%s\t%ld\n",
1141 sp->sp_wmesg, sp->sp_count);
1142 if (sbuf_overflowed(sb)) {
1149 mtx_lock_spin(&sleepq_prof_lock);
1150 prof_enabled = enabled;
1151 mtx_unlock_spin(&sleepq_prof_lock);
1154 error = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
1159 SYSCTL_PROC(_debug_sleepq, OID_AUTO, stats, CTLTYPE_STRING | CTLFLAG_RD,
1160 NULL, 0, dump_sleepq_prof_stats, "A", "Sleepqueue profiling statistics");
1161 SYSCTL_PROC(_debug_sleepq, OID_AUTO, reset, CTLTYPE_INT | CTLFLAG_RW,
1162 NULL, 0, reset_sleepq_prof_stats, "I",
1163 "Reset sleepqueue profiling statistics");
1164 SYSCTL_PROC(_debug_sleepq, OID_AUTO, enable, CTLTYPE_INT | CTLFLAG_RW,
1165 NULL, 0, enable_sleepq_prof, "I", "Enable sleepqueue profiling");
1169 DB_SHOW_COMMAND(sleepq, db_show_sleepqueue)
1171 struct sleepqueue_chain *sc;
1172 struct sleepqueue *sq;
1174 struct lock_object *lock;
1184 * First, see if there is an active sleep queue for the wait channel
1185 * indicated by the address.
1187 wchan = (void *)addr;
1188 sc = SC_LOOKUP(wchan);
1189 LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
1190 if (sq->sq_wchan == wchan)
1194 * Second, see if there is an active sleep queue at the address
1197 for (i = 0; i < SC_TABLESIZE; i++)
1198 LIST_FOREACH(sq, &sleepq_chains[i].sc_queues, sq_hash) {
1199 if (sq == (struct sleepqueue *)addr)
1203 db_printf("Unable to locate a sleep queue via %p\n", (void *)addr);
1206 db_printf("Wait channel: %p\n", sq->sq_wchan);
1207 db_printf("Queue type: %d\n", sq->sq_type);
1211 db_printf("Associated Interlock: %p - (%s) %s\n", lock,
1212 LOCK_CLASS(lock)->lc_name, lock->lo_name);
1215 db_printf("Blocked threads:\n");
1216 for (i = 0; i < NR_SLEEPQS; i++) {
1217 db_printf("\nQueue[%d]:\n", i);
1218 if (TAILQ_EMPTY(&sq->sq_blocked[i]))
1219 db_printf("\tempty\n");
1221 TAILQ_FOREACH(td, &sq->sq_blocked[0],
1223 db_printf("\t%p (tid %d, pid %d, \"%s\")\n", td,
1224 td->td_tid, td->td_proc->p_pid,
1227 db_printf("(expected: %u)\n", sq->sq_blockedcnt[i]);
1231 /* Alias 'show sleepqueue' to 'show sleepq'. */
1232 DB_SHOW_ALIAS(sleepqueue, db_show_sleepqueue);