2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 * Implementation of turnstiles used to hold queue of threads blocked on
34 * non-sleepable locks. Sleepable locks use condition variables to
35 * implement their queues. Turnstiles differ from a sleep queue in that
36 * turnstile queue's are assigned to a lock held by an owning thread. Thus,
37 * when one thread is enqueued onto a turnstile, it can lend its priority
38 * to the owning thread.
40 * We wish to avoid bloating locks with an embedded turnstile and we do not
41 * want to use back-pointers in the locks for the same reason. Thus, we
42 * use a similar approach to that of Solaris 7 as described in Solaris
43 * Internals by Jim Mauro and Richard McDougall. Turnstiles are looked up
44 * in a hash table based on the address of the lock. Each entry in the
45 * hash table is a linked-lists of turnstiles and is called a turnstile
46 * chain. Each chain contains a spin mutex that protects all of the
47 * turnstiles in the chain.
49 * Each time a thread is created, a turnstile is allocated from a UMA zone
50 * and attached to that thread. When a thread blocks on a lock, if it is the
51 * first thread to block, it lends its turnstile to the lock. If the lock
52 * already has a turnstile, then it gives its turnstile to the lock's
53 * turnstile's free list. When a thread is woken up, it takes a turnstile from
54 * the free list if there are any other waiters. If it is the only thread
55 * blocked on the lock, then it reclaims the turnstile associated with the lock
56 * and removes it from the hash table.
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
63 #include "opt_turnstile_profiling.h"
64 #include "opt_sched.h"
66 #include <sys/param.h>
67 #include <sys/systm.h>
69 #include <sys/kernel.h>
72 #include <sys/mutex.h>
74 #include <sys/queue.h>
75 #include <sys/sched.h>
77 #include <sys/sysctl.h>
78 #include <sys/turnstile.h>
84 #include <sys/lockmgr.h>
89 * Constants for the hash table of turnstile chains. TC_SHIFT is a magic
90 * number chosen because the sleep queue's use the same value for the
91 * shift. Basically, we ignore the lower 8 bits of the address.
92 * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
94 #define TC_TABLESIZE 128 /* Must be power of 2. */
95 #define TC_MASK (TC_TABLESIZE - 1)
97 #define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
98 #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)]
101 * There are three different lists of turnstiles as follows. The list
102 * connected by ts_link entries is a per-thread list of all the turnstiles
103 * attached to locks that we own. This is used to fixup our priority when
104 * a lock is released. The other two lists use the ts_hash entries. The
105 * first of these two is the turnstile chain list that a turnstile is on
106 * when it is attached to a lock. The second list to use ts_hash is the
107 * free list hung off of a turnstile that is attached to a lock.
109 * Each turnstile contains three lists of threads. The two ts_blocked lists
110 * are linked list of threads blocked on the turnstile's lock. One list is
111 * for exclusive waiters, and the other is for shared waiters. The
112 * ts_pending list is a linked list of threads previously awakened by
113 * turnstile_signal() or turnstile_wait() that are waiting to be put on
117 * c - turnstile chain lock
118 * q - td_contested lock
121 struct mtx ts_lock; /* Spin lock for self. */
122 struct threadqueue ts_blocked[2]; /* (c + q) Blocked threads. */
123 struct threadqueue ts_pending; /* (c) Pending threads. */
124 LIST_ENTRY(turnstile) ts_hash; /* (c) Chain and free list. */
125 LIST_ENTRY(turnstile) ts_link; /* (q) Contested locks. */
126 LIST_HEAD(, turnstile) ts_free; /* (c) Free turnstiles. */
127 struct lock_object *ts_lockobj; /* (c) Lock we reference. */
128 struct thread *ts_owner; /* (c + q) Who owns the lock. */
131 struct turnstile_chain {
132 LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */
133 struct mtx tc_lock; /* Spin lock for this chain. */
134 #ifdef TURNSTILE_PROFILING
135 u_int tc_depth; /* Length of tc_queues. */
136 u_int tc_max_depth; /* Max length of tc_queues. */
140 #ifdef TURNSTILE_PROFILING
141 u_int turnstile_max_depth;
142 static SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0,
143 "turnstile profiling");
144 static SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0,
145 "turnstile chain stats");
146 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
147 &turnstile_max_depth, 0, "maximum depth achieved of a single chain");
149 static struct mtx td_contested_lock;
150 static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
151 static uma_zone_t turnstile_zone;
154 * Prototypes for non-exported routines.
156 static void init_turnstile0(void *dummy);
157 #ifdef TURNSTILE_PROFILING
158 static void init_turnstile_profiling(void *arg);
160 static void propagate_priority(struct thread *td);
161 static int turnstile_adjust_thread(struct turnstile *ts,
163 static struct thread *turnstile_first_waiter(struct turnstile *ts);
164 static void turnstile_setowner(struct turnstile *ts, struct thread *owner);
166 static void turnstile_dtor(void *mem, int size, void *arg);
168 static int turnstile_init(void *mem, int size, int flags);
169 static void turnstile_fini(void *mem, int size);
171 SDT_PROVIDER_DECLARE(sched);
172 SDT_PROBE_DEFINE(sched, , , sleep);
173 SDT_PROBE_DEFINE2(sched, , , wakeup, "struct thread *",
177 * Walks the chain of turnstiles and their owners to propagate the priority
178 * of the thread being blocked to all the threads holding locks that have to
179 * release their locks before this thread can run again.
182 propagate_priority(struct thread *td)
184 struct turnstile *ts;
187 THREAD_LOCK_ASSERT(td, MA_OWNED);
188 pri = td->td_priority;
190 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
192 * Grab a recursive lock on this turnstile chain so it stays locked
193 * for the whole operation. The caller expects us to return with
194 * the original lock held. We only ever lock down the chain so
195 * the lock order is constant.
197 mtx_lock_spin(&ts->ts_lock);
203 * This might be a read lock with no owner. There's
204 * not much we can do, so just bail.
206 mtx_unlock_spin(&ts->ts_lock);
210 thread_lock_flags(td, MTX_DUPOK);
211 mtx_unlock_spin(&ts->ts_lock);
212 MPASS(td->td_proc != NULL);
213 MPASS(td->td_proc->p_magic == P_MAGIC);
216 * If the thread is asleep, then we are probably about
217 * to deadlock. To make debugging this easier, show
218 * backtrace of misbehaving thread and panic to not
219 * leave the kernel deadlocked.
221 if (TD_IS_SLEEPING(td)) {
223 "Sleeping thread (tid %d, pid %d) owns a non-sleepable lock\n",
224 td->td_tid, td->td_proc->p_pid);
225 kdb_backtrace_thread(td);
226 panic("sleeping thread");
230 * If this thread already has higher priority than the
231 * thread that is being blocked, we are finished.
233 if (td->td_priority <= pri) {
239 * Bump this thread's priority.
241 sched_lend_prio(td, pri);
244 * If lock holder is actually running or on the run queue
247 if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) {
248 MPASS(td->td_blocked == NULL);
255 * For UP, we check to see if td is curthread (this shouldn't
256 * ever happen however as it would mean we are in a deadlock.)
258 KASSERT(td != curthread, ("Deadlock detected"));
262 * If we aren't blocked on a lock, we should be.
264 KASSERT(TD_ON_LOCK(td), (
265 "thread %d(%s):%d holds %s but isn't blocked on a lock\n",
266 td->td_tid, td->td_name, td->td_state,
267 ts->ts_lockobj->lo_name));
270 * Pick up the lock that td is blocked on.
274 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
275 /* Resort td on the list if needed. */
276 if (!turnstile_adjust_thread(ts, td)) {
277 mtx_unlock_spin(&ts->ts_lock);
280 /* The thread lock is released as ts lock above. */
285 * Adjust the thread's position on a turnstile after its priority has been
289 turnstile_adjust_thread(struct turnstile *ts, struct thread *td)
291 struct thread *td1, *td2;
294 THREAD_LOCK_ASSERT(td, MA_OWNED);
295 MPASS(TD_ON_LOCK(td));
298 * This thread may not be blocked on this turnstile anymore
299 * but instead might already be woken up on another CPU
300 * that is waiting on the thread lock in turnstile_unpend() to
301 * finish waking this thread up. We can detect this case
302 * by checking to see if this thread has been given a
303 * turnstile by either turnstile_signal() or
304 * turnstile_broadcast(). In this case, treat the thread as
305 * if it was already running.
307 if (td->td_turnstile != NULL)
311 * Check if the thread needs to be moved on the blocked chain.
312 * It needs to be moved if either its priority is lower than
313 * the previous thread or higher than the next thread.
315 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
316 td1 = TAILQ_PREV(td, threadqueue, td_lockq);
317 td2 = TAILQ_NEXT(td, td_lockq);
318 if ((td1 != NULL && td->td_priority < td1->td_priority) ||
319 (td2 != NULL && td->td_priority > td2->td_priority)) {
322 * Remove thread from blocked chain and determine where
323 * it should be moved to.
325 queue = td->td_tsqueue;
326 MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE);
327 mtx_lock_spin(&td_contested_lock);
328 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
329 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) {
330 MPASS(td1->td_proc->p_magic == P_MAGIC);
331 if (td1->td_priority > td->td_priority)
336 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
338 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
339 mtx_unlock_spin(&td_contested_lock);
342 "turnstile_adjust_thread: td %d put at tail on [%p] %s",
343 td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name);
346 "turnstile_adjust_thread: td %d moved before %d on [%p] %s",
347 td->td_tid, td1->td_tid, ts->ts_lockobj,
348 ts->ts_lockobj->lo_name);
354 * Early initialization of turnstiles. This is not done via a SYSINIT()
355 * since this needs to be initialized very early when mutexes are first
359 init_turnstiles(void)
363 for (i = 0; i < TC_TABLESIZE; i++) {
364 LIST_INIT(&turnstile_chains[i].tc_turnstiles);
365 mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
368 mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
369 LIST_INIT(&thread0.td_contested);
370 thread0.td_turnstile = NULL;
373 #ifdef TURNSTILE_PROFILING
375 init_turnstile_profiling(void *arg)
377 struct sysctl_oid *chain_oid;
381 for (i = 0; i < TC_TABLESIZE; i++) {
382 snprintf(chain_name, sizeof(chain_name), "%d", i);
383 chain_oid = SYSCTL_ADD_NODE(NULL,
384 SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO,
385 chain_name, CTLFLAG_RD, NULL, "turnstile chain stats");
386 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
387 "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0,
389 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
390 "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth,
394 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
395 init_turnstile_profiling, NULL);
399 init_turnstile0(void *dummy)
402 turnstile_zone = uma_zcreate("TURNSTILE", sizeof(struct turnstile),
409 turnstile_init, turnstile_fini, UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
410 thread0.td_turnstile = turnstile_alloc();
412 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
415 * Update a thread on the turnstile list after it's priority has been changed.
416 * The old priority is passed in as an argument.
419 turnstile_adjust(struct thread *td, u_char oldpri)
421 struct turnstile *ts;
423 MPASS(TD_ON_LOCK(td));
426 * Pick up the lock that td is blocked on.
430 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
431 mtx_assert(&ts->ts_lock, MA_OWNED);
433 /* Resort the turnstile on the list. */
434 if (!turnstile_adjust_thread(ts, td))
437 * If our priority was lowered and we are at the head of the
438 * turnstile, then propagate our new priority up the chain.
439 * Note that we currently don't try to revoke lent priorities
440 * when our priority goes up.
442 MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE ||
443 td->td_tsqueue == TS_SHARED_QUEUE);
444 if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) &&
445 td->td_priority < oldpri) {
446 propagate_priority(td);
451 * Set the owner of the lock this turnstile is attached to.
454 turnstile_setowner(struct turnstile *ts, struct thread *owner)
457 mtx_assert(&td_contested_lock, MA_OWNED);
458 MPASS(ts->ts_owner == NULL);
460 /* A shared lock might not have an owner. */
464 MPASS(owner->td_proc->p_magic == P_MAGIC);
465 ts->ts_owner = owner;
466 LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
471 * UMA zone item deallocator.
474 turnstile_dtor(void *mem, int size, void *arg)
476 struct turnstile *ts;
479 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
480 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
481 MPASS(TAILQ_EMPTY(&ts->ts_pending));
486 * UMA zone item initializer.
489 turnstile_init(void *mem, int size, int flags)
491 struct turnstile *ts;
495 TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
496 TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]);
497 TAILQ_INIT(&ts->ts_pending);
498 LIST_INIT(&ts->ts_free);
499 mtx_init(&ts->ts_lock, "turnstile lock", NULL, MTX_SPIN | MTX_RECURSE);
504 turnstile_fini(void *mem, int size)
506 struct turnstile *ts;
509 mtx_destroy(&ts->ts_lock);
513 * Get a turnstile for a new thread.
516 turnstile_alloc(void)
519 return (uma_zalloc(turnstile_zone, M_WAITOK));
523 * Free a turnstile when a thread is destroyed.
526 turnstile_free(struct turnstile *ts)
529 uma_zfree(turnstile_zone, ts);
533 * Lock the turnstile chain associated with the specified lock.
536 turnstile_chain_lock(struct lock_object *lock)
538 struct turnstile_chain *tc;
540 tc = TC_LOOKUP(lock);
541 mtx_lock_spin(&tc->tc_lock);
545 turnstile_trywait(struct lock_object *lock)
547 struct turnstile_chain *tc;
548 struct turnstile *ts;
550 tc = TC_LOOKUP(lock);
551 mtx_lock_spin(&tc->tc_lock);
552 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
553 if (ts->ts_lockobj == lock) {
554 mtx_lock_spin(&ts->ts_lock);
558 ts = curthread->td_turnstile;
560 mtx_lock_spin(&ts->ts_lock);
561 KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
562 ts->ts_lockobj = lock;
568 turnstile_cancel(struct turnstile *ts)
570 struct turnstile_chain *tc;
571 struct lock_object *lock;
573 mtx_assert(&ts->ts_lock, MA_OWNED);
575 mtx_unlock_spin(&ts->ts_lock);
576 lock = ts->ts_lockobj;
577 if (ts == curthread->td_turnstile)
578 ts->ts_lockobj = NULL;
579 tc = TC_LOOKUP(lock);
580 mtx_unlock_spin(&tc->tc_lock);
584 * Look up the turnstile for a lock in the hash table locking the associated
585 * turnstile chain along the way. If no turnstile is found in the hash
586 * table, NULL is returned.
589 turnstile_lookup(struct lock_object *lock)
591 struct turnstile_chain *tc;
592 struct turnstile *ts;
594 tc = TC_LOOKUP(lock);
595 mtx_assert(&tc->tc_lock, MA_OWNED);
596 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
597 if (ts->ts_lockobj == lock) {
598 mtx_lock_spin(&ts->ts_lock);
605 * Unlock the turnstile chain associated with a given lock.
608 turnstile_chain_unlock(struct lock_object *lock)
610 struct turnstile_chain *tc;
612 tc = TC_LOOKUP(lock);
613 mtx_unlock_spin(&tc->tc_lock);
617 * Return a pointer to the thread waiting on this turnstile with the
618 * most important priority or NULL if the turnstile has no waiters.
620 static struct thread *
621 turnstile_first_waiter(struct turnstile *ts)
623 struct thread *std, *xtd;
625 std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]);
626 xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
627 if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority))
633 * Take ownership of a turnstile and adjust the priority of the new
634 * owner appropriately.
637 turnstile_claim(struct turnstile *ts)
639 struct thread *td, *owner;
640 struct turnstile_chain *tc;
642 mtx_assert(&ts->ts_lock, MA_OWNED);
643 MPASS(ts != curthread->td_turnstile);
646 mtx_lock_spin(&td_contested_lock);
647 turnstile_setowner(ts, owner);
648 mtx_unlock_spin(&td_contested_lock);
650 td = turnstile_first_waiter(ts);
652 MPASS(td->td_proc->p_magic == P_MAGIC);
653 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
656 * Update the priority of the new owner if needed.
659 if (td->td_priority < owner->td_priority)
660 sched_lend_prio(owner, td->td_priority);
661 thread_unlock(owner);
662 tc = TC_LOOKUP(ts->ts_lockobj);
663 mtx_unlock_spin(&ts->ts_lock);
664 mtx_unlock_spin(&tc->tc_lock);
668 * Block the current thread on the turnstile assicated with 'lock'. This
669 * function will context switch and not return until this thread has been
670 * woken back up. This function must be called with the appropriate
671 * turnstile chain locked and will return with it unlocked.
674 turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
676 struct turnstile_chain *tc;
677 struct thread *td, *td1;
678 struct lock_object *lock;
681 mtx_assert(&ts->ts_lock, MA_OWNED);
683 MPASS(owner->td_proc->p_magic == P_MAGIC);
684 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
687 * If the lock does not already have a turnstile, use this thread's
688 * turnstile. Otherwise insert the current thread into the
689 * turnstile already in use by this lock.
691 tc = TC_LOOKUP(ts->ts_lockobj);
692 mtx_assert(&tc->tc_lock, MA_OWNED);
693 if (ts == td->td_turnstile) {
694 #ifdef TURNSTILE_PROFILING
696 if (tc->tc_depth > tc->tc_max_depth) {
697 tc->tc_max_depth = tc->tc_depth;
698 if (tc->tc_max_depth > turnstile_max_depth)
699 turnstile_max_depth = tc->tc_max_depth;
702 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
703 KASSERT(TAILQ_EMPTY(&ts->ts_pending),
704 ("thread's turnstile has pending threads"));
705 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]),
706 ("thread's turnstile has exclusive waiters"));
707 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]),
708 ("thread's turnstile has shared waiters"));
709 KASSERT(LIST_EMPTY(&ts->ts_free),
710 ("thread's turnstile has a non-empty free list"));
711 MPASS(ts->ts_lockobj != NULL);
712 mtx_lock_spin(&td_contested_lock);
713 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
714 turnstile_setowner(ts, owner);
715 mtx_unlock_spin(&td_contested_lock);
717 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq)
718 if (td1->td_priority > td->td_priority)
720 mtx_lock_spin(&td_contested_lock);
722 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
724 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
725 MPASS(owner == ts->ts_owner);
726 mtx_unlock_spin(&td_contested_lock);
727 MPASS(td->td_turnstile != NULL);
728 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
731 thread_lock_set(td, &ts->ts_lock);
732 td->td_turnstile = NULL;
734 /* Save who we are blocked on and switch. */
735 lock = ts->ts_lockobj;
736 td->td_tsqueue = queue;
738 td->td_lockname = lock->lo_name;
739 td->td_blktick = ticks;
741 mtx_unlock_spin(&tc->tc_lock);
742 propagate_priority(td);
744 if (LOCK_LOG_TEST(lock, 0))
745 CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__,
746 td->td_tid, lock, lock->lo_name);
748 SDT_PROBE0(sched, , , sleep);
750 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
751 mi_switch(SW_VOL | SWT_TURNSTILE, NULL);
753 if (LOCK_LOG_TEST(lock, 0))
754 CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s",
755 __func__, td->td_tid, lock, lock->lo_name);
760 * Pick the highest priority thread on this turnstile and put it on the
761 * pending list. This must be called with the turnstile chain locked.
764 turnstile_signal(struct turnstile *ts, int queue)
766 struct turnstile_chain *tc;
771 mtx_assert(&ts->ts_lock, MA_OWNED);
772 MPASS(curthread->td_proc->p_magic == P_MAGIC);
773 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
774 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
777 * Pick the highest priority thread blocked on this lock and
778 * move it to the pending list.
780 td = TAILQ_FIRST(&ts->ts_blocked[queue]);
781 MPASS(td->td_proc->p_magic == P_MAGIC);
782 mtx_lock_spin(&td_contested_lock);
783 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
784 mtx_unlock_spin(&td_contested_lock);
785 TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
788 * If the turnstile is now empty, remove it from its chain and
789 * give it to the about-to-be-woken thread. Otherwise take a
790 * turnstile from the free list and give it to the thread.
792 empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
793 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]);
795 tc = TC_LOOKUP(ts->ts_lockobj);
796 mtx_assert(&tc->tc_lock, MA_OWNED);
797 MPASS(LIST_EMPTY(&ts->ts_free));
798 #ifdef TURNSTILE_PROFILING
802 ts = LIST_FIRST(&ts->ts_free);
804 LIST_REMOVE(ts, ts_hash);
805 td->td_turnstile = ts;
811 * Put all blocked threads on the pending list. This must be called with
812 * the turnstile chain locked.
815 turnstile_broadcast(struct turnstile *ts, int queue)
817 struct turnstile_chain *tc;
818 struct turnstile *ts1;
822 mtx_assert(&ts->ts_lock, MA_OWNED);
823 MPASS(curthread->td_proc->p_magic == P_MAGIC);
824 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
826 * We must have the chain locked so that we can remove the empty
827 * turnstile from the hash queue.
829 tc = TC_LOOKUP(ts->ts_lockobj);
830 mtx_assert(&tc->tc_lock, MA_OWNED);
831 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
834 * Transfer the blocked list to the pending list.
836 mtx_lock_spin(&td_contested_lock);
837 TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq);
838 mtx_unlock_spin(&td_contested_lock);
841 * Give a turnstile to each thread. The last thread gets
842 * this turnstile if the turnstile is empty.
844 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
845 if (LIST_EMPTY(&ts->ts_free)) {
846 MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
848 #ifdef TURNSTILE_PROFILING
852 ts1 = LIST_FIRST(&ts->ts_free);
854 LIST_REMOVE(ts1, ts_hash);
855 td->td_turnstile = ts1;
860 * Wakeup all threads on the pending list and adjust the priority of the
861 * current thread appropriately. This must be called with the turnstile
865 turnstile_unpend(struct turnstile *ts, int owner_type)
867 TAILQ_HEAD( ,thread) pending_threads;
868 struct turnstile *nts;
873 mtx_assert(&ts->ts_lock, MA_OWNED);
874 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
875 MPASS(!TAILQ_EMPTY(&ts->ts_pending));
878 * Move the list of pending threads out of the turnstile and
879 * into a local variable.
881 TAILQ_INIT(&pending_threads);
882 TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
884 if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
885 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]))
886 ts->ts_lockobj = NULL;
889 * Adjust the priority of curthread based on other contested
890 * locks it owns. Don't lower the priority below the base
896 mtx_lock_spin(&td_contested_lock);
898 * Remove the turnstile from this thread's list of contested locks
899 * since this thread doesn't own it anymore. New threads will
900 * not be blocking on the turnstile until it is claimed by a new
901 * owner. There might not be a current owner if this is a shared
904 if (ts->ts_owner != NULL) {
906 LIST_REMOVE(ts, ts_link);
908 LIST_FOREACH(nts, &td->td_contested, ts_link) {
909 cp = turnstile_first_waiter(nts)->td_priority;
913 mtx_unlock_spin(&td_contested_lock);
914 sched_unlend_prio(td, pri);
917 * Wake up all the pending threads. If a thread is not blocked
918 * on a lock, then it is currently executing on another CPU in
919 * turnstile_wait() or sitting on a run queue waiting to resume
920 * in turnstile_wait(). Set a flag to force it to try to acquire
921 * the lock again instead of blocking.
923 while (!TAILQ_EMPTY(&pending_threads)) {
924 td = TAILQ_FIRST(&pending_threads);
925 TAILQ_REMOVE(&pending_threads, td, td_lockq);
926 SDT_PROBE2(sched, , , wakeup, td, td->td_proc);
928 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
929 MPASS(td->td_proc->p_magic == P_MAGIC);
930 MPASS(TD_ON_LOCK(td));
932 MPASS(TD_CAN_RUN(td));
933 td->td_blocked = NULL;
934 td->td_lockname = NULL;
937 td->td_tsqueue = 0xff;
939 sched_add(td, SRQ_BORING);
942 mtx_unlock_spin(&ts->ts_lock);
946 * Give up ownership of a turnstile. This must be called with the
947 * turnstile chain locked.
950 turnstile_disown(struct turnstile *ts)
956 mtx_assert(&ts->ts_lock, MA_OWNED);
957 MPASS(ts->ts_owner == curthread);
958 MPASS(TAILQ_EMPTY(&ts->ts_pending));
959 MPASS(!TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) ||
960 !TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
963 * Remove the turnstile from this thread's list of contested locks
964 * since this thread doesn't own it anymore. New threads will
965 * not be blocking on the turnstile until it is claimed by a new
968 mtx_lock_spin(&td_contested_lock);
970 LIST_REMOVE(ts, ts_link);
971 mtx_unlock_spin(&td_contested_lock);
974 * Adjust the priority of curthread based on other contested
975 * locks it owns. Don't lower the priority below the base
981 mtx_unlock_spin(&ts->ts_lock);
982 mtx_lock_spin(&td_contested_lock);
983 LIST_FOREACH(ts, &td->td_contested, ts_link) {
984 cp = turnstile_first_waiter(ts)->td_priority;
988 mtx_unlock_spin(&td_contested_lock);
989 sched_unlend_prio(td, pri);
994 * Return the first thread in a turnstile.
997 turnstile_head(struct turnstile *ts, int queue)
1002 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
1003 mtx_assert(&ts->ts_lock, MA_OWNED);
1005 return (TAILQ_FIRST(&ts->ts_blocked[queue]));
1009 * Returns true if a sub-queue of a turnstile is empty.
1012 turnstile_empty(struct turnstile *ts, int queue)
1017 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
1018 mtx_assert(&ts->ts_lock, MA_OWNED);
1020 return (TAILQ_EMPTY(&ts->ts_blocked[queue]));
1025 print_thread(struct thread *td, const char *prefix)
1028 db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid,
1029 td->td_proc->p_pid, td->td_name);
1033 print_queue(struct threadqueue *queue, const char *header, const char *prefix)
1037 db_printf("%s:\n", header);
1038 if (TAILQ_EMPTY(queue)) {
1039 db_printf("%sempty\n", prefix);
1042 TAILQ_FOREACH(td, queue, td_lockq) {
1043 print_thread(td, prefix);
1047 DB_SHOW_COMMAND(turnstile, db_show_turnstile)
1049 struct turnstile_chain *tc;
1050 struct turnstile *ts;
1051 struct lock_object *lock;
1058 * First, see if there is an active turnstile for the lock indicated
1061 lock = (struct lock_object *)addr;
1062 tc = TC_LOOKUP(lock);
1063 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
1064 if (ts->ts_lockobj == lock)
1068 * Second, see if there is an active turnstile at the address
1071 for (i = 0; i < TC_TABLESIZE; i++)
1072 LIST_FOREACH(ts, &turnstile_chains[i].tc_turnstiles, ts_hash) {
1073 if (ts == (struct turnstile *)addr)
1077 db_printf("Unable to locate a turnstile via %p\n", (void *)addr);
1080 lock = ts->ts_lockobj;
1081 db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name,
1084 print_thread(ts->ts_owner, "Lock Owner: ");
1086 db_printf("Lock Owner: none\n");
1087 print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t");
1088 print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters",
1090 print_queue(&ts->ts_pending, "Pending Threads", "\t");
1095 * Show all the threads a particular thread is waiting on based on
1096 * non-sleepable and non-spin locks.
1099 print_lockchain(struct thread *td, const char *prefix)
1101 struct lock_object *lock;
1102 struct lock_class *class;
1103 struct turnstile *ts;
1106 * Follow the chain. We keep walking as long as the thread is
1107 * blocked on a turnstile that has an owner.
1109 while (!db_pager_quit) {
1110 db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid,
1111 td->td_proc->p_pid, td->td_name);
1112 switch (td->td_state) {
1114 db_printf("is inactive\n");
1117 db_printf("can run\n");
1120 db_printf("is on a run queue\n");
1123 db_printf("running on CPU %d\n", td->td_oncpu);
1126 if (TD_ON_LOCK(td)) {
1127 ts = td->td_blocked;
1128 lock = ts->ts_lockobj;
1129 class = LOCK_CLASS(lock);
1130 db_printf("blocked on lock %p (%s) \"%s\"\n",
1131 lock, class->lc_name, lock->lo_name);
1132 if (ts->ts_owner == NULL)
1137 db_printf("inhibited\n");
1140 db_printf("??? (%#x)\n", td->td_state);
1146 DB_SHOW_COMMAND(lockchain, db_show_lockchain)
1150 /* Figure out which thread to start with. */
1152 td = db_lookup_thread(addr, true);
1156 print_lockchain(td, "");
1159 DB_SHOW_ALL_COMMAND(chains, db_show_allchains)
1166 FOREACH_PROC_IN_SYSTEM(p) {
1167 FOREACH_THREAD_IN_PROC(p, td) {
1168 if (TD_ON_LOCK(td) && LIST_EMPTY(&td->td_contested)) {
1169 db_printf("chain %d:\n", i++);
1170 print_lockchain(td, " ");
1177 DB_SHOW_ALIAS(allchains, db_show_allchains)
1180 * Show all the threads a particular thread is waiting on based on
1184 print_sleepchain(struct thread *td, const char *prefix)
1186 struct thread *owner;
1189 * Follow the chain. We keep walking as long as the thread is
1190 * blocked on a sleep lock that has an owner.
1192 while (!db_pager_quit) {
1193 db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid,
1194 td->td_proc->p_pid, td->td_name);
1195 switch (td->td_state) {
1197 db_printf("is inactive\n");
1200 db_printf("can run\n");
1203 db_printf("is on a run queue\n");
1206 db_printf("running on CPU %d\n", td->td_oncpu);
1209 if (TD_ON_SLEEPQ(td)) {
1210 if (lockmgr_chain(td, &owner) ||
1211 sx_chain(td, &owner)) {
1217 db_printf("sleeping on %p \"%s\"\n",
1218 td->td_wchan, td->td_wmesg);
1221 db_printf("inhibited\n");
1224 db_printf("??? (%#x)\n", td->td_state);
1230 DB_SHOW_COMMAND(sleepchain, db_show_sleepchain)
1234 /* Figure out which thread to start with. */
1236 td = db_lookup_thread(addr, true);
1240 print_sleepchain(td, "");
1243 static void print_waiters(struct turnstile *ts, int indent);
1246 print_waiter(struct thread *td, int indent)
1248 struct turnstile *ts;
1253 for (i = 0; i < indent; i++)
1255 print_thread(td, "thread ");
1256 LIST_FOREACH(ts, &td->td_contested, ts_link)
1257 print_waiters(ts, indent + 1);
1261 print_waiters(struct turnstile *ts, int indent)
1263 struct lock_object *lock;
1264 struct lock_class *class;
1270 lock = ts->ts_lockobj;
1271 class = LOCK_CLASS(lock);
1272 for (i = 0; i < indent; i++)
1274 db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name, lock->lo_name);
1275 TAILQ_FOREACH(td, &ts->ts_blocked[TS_EXCLUSIVE_QUEUE], td_lockq)
1276 print_waiter(td, indent + 1);
1277 TAILQ_FOREACH(td, &ts->ts_blocked[TS_SHARED_QUEUE], td_lockq)
1278 print_waiter(td, indent + 1);
1279 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq)
1280 print_waiter(td, indent + 1);
1283 DB_SHOW_COMMAND(locktree, db_show_locktree)
1285 struct lock_object *lock;
1286 struct lock_class *class;
1287 struct turnstile_chain *tc;
1288 struct turnstile *ts;
1292 lock = (struct lock_object *)addr;
1293 tc = TC_LOOKUP(lock);
1294 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
1295 if (ts->ts_lockobj == lock)
1298 class = LOCK_CLASS(lock);
1299 db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name,
1302 print_waiters(ts, 0);