2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 * Implementation of turnstiles used to hold queue of threads blocked on
34 * non-sleepable locks. Sleepable locks use condition variables to
35 * implement their queues. Turnstiles differ from a sleep queue in that
36 * turnstile queue's are assigned to a lock held by an owning thread. Thus,
37 * when one thread is enqueued onto a turnstile, it can lend its priority
38 * to the owning thread.
40 * We wish to avoid bloating locks with an embedded turnstile and we do not
41 * want to use back-pointers in the locks for the same reason. Thus, we
42 * use a similar approach to that of Solaris 7 as described in Solaris
43 * Internals by Jim Mauro and Richard McDougall. Turnstiles are looked up
44 * in a hash table based on the address of the lock. Each entry in the
45 * hash table is a linked-lists of turnstiles and is called a turnstile
46 * chain. Each chain contains a spin mutex that protects all of the
47 * turnstiles in the chain.
49 * Each time a thread is created, a turnstile is allocated from a UMA zone
50 * and attached to that thread. When a thread blocks on a lock, if it is the
51 * first thread to block, it lends its turnstile to the lock. If the lock
52 * already has a turnstile, then it gives its turnstile to the lock's
53 * turnstile's free list. When a thread is woken up, it takes a turnstile from
54 * the free list if there are any other waiters. If it is the only thread
55 * blocked on the lock, then it reclaims the turnstile associated with the lock
56 * and removes it from the hash table.
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
63 #include "opt_turnstile_profiling.h"
64 #include "opt_sched.h"
66 #include <sys/param.h>
67 #include <sys/systm.h>
69 #include <sys/kernel.h>
72 #include <sys/mutex.h>
74 #include <sys/queue.h>
75 #include <sys/sched.h>
76 #include <sys/sysctl.h>
77 #include <sys/turnstile.h>
83 #include <sys/lockmgr.h>
88 * Constants for the hash table of turnstile chains. TC_SHIFT is a magic
89 * number chosen because the sleep queue's use the same value for the
90 * shift. Basically, we ignore the lower 8 bits of the address.
91 * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
93 #define TC_TABLESIZE 128 /* Must be power of 2. */
94 #define TC_MASK (TC_TABLESIZE - 1)
96 #define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
97 #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)]
100 * There are three different lists of turnstiles as follows. The list
101 * connected by ts_link entries is a per-thread list of all the turnstiles
102 * attached to locks that we own. This is used to fixup our priority when
103 * a lock is released. The other two lists use the ts_hash entries. The
104 * first of these two is the turnstile chain list that a turnstile is on
105 * when it is attached to a lock. The second list to use ts_hash is the
106 * free list hung off of a turnstile that is attached to a lock.
108 * Each turnstile contains three lists of threads. The two ts_blocked lists
109 * are linked list of threads blocked on the turnstile's lock. One list is
110 * for exclusive waiters, and the other is for shared waiters. The
111 * ts_pending list is a linked list of threads previously awakened by
112 * turnstile_signal() or turnstile_wait() that are waiting to be put on
116 * c - turnstile chain lock
117 * q - td_contested lock
120 struct mtx ts_lock; /* Spin lock for self. */
121 struct threadqueue ts_blocked[2]; /* (c + q) Blocked threads. */
122 struct threadqueue ts_pending; /* (c) Pending threads. */
123 LIST_ENTRY(turnstile) ts_hash; /* (c) Chain and free list. */
124 LIST_ENTRY(turnstile) ts_link; /* (q) Contested locks. */
125 LIST_HEAD(, turnstile) ts_free; /* (c) Free turnstiles. */
126 struct lock_object *ts_lockobj; /* (c) Lock we reference. */
127 struct thread *ts_owner; /* (c + q) Who owns the lock. */
130 struct turnstile_chain {
131 LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */
132 struct mtx tc_lock; /* Spin lock for this chain. */
133 #ifdef TURNSTILE_PROFILING
134 u_int tc_depth; /* Length of tc_queues. */
135 u_int tc_max_depth; /* Max length of tc_queues. */
139 #ifdef TURNSTILE_PROFILING
140 u_int turnstile_max_depth;
141 SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0, "turnstile profiling");
142 SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0,
143 "turnstile chain stats");
144 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
145 &turnstile_max_depth, 0, "maximum depth achieved of a single chain");
147 static struct mtx td_contested_lock;
148 static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
149 static uma_zone_t turnstile_zone;
152 * Prototypes for non-exported routines.
154 static void init_turnstile0(void *dummy);
155 #ifdef TURNSTILE_PROFILING
156 static void init_turnstile_profiling(void *arg);
158 static void propagate_priority(struct thread *td);
159 static int turnstile_adjust_thread(struct turnstile *ts,
161 static struct thread *turnstile_first_waiter(struct turnstile *ts);
162 static void turnstile_setowner(struct turnstile *ts, struct thread *owner);
164 static void turnstile_dtor(void *mem, int size, void *arg);
166 static int turnstile_init(void *mem, int size, int flags);
167 static void turnstile_fini(void *mem, int size);
170 * Walks the chain of turnstiles and their owners to propagate the priority
171 * of the thread being blocked to all the threads holding locks that have to
172 * release their locks before this thread can run again.
175 propagate_priority(struct thread *td)
177 struct turnstile *ts;
180 THREAD_LOCK_ASSERT(td, MA_OWNED);
181 pri = td->td_priority;
183 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
185 * Grab a recursive lock on this turnstile chain so it stays locked
186 * for the whole operation. The caller expects us to return with
187 * the original lock held. We only ever lock down the chain so
188 * the lock order is constant.
190 mtx_lock_spin(&ts->ts_lock);
196 * This might be a read lock with no owner. There's
197 * not much we can do, so just bail.
199 mtx_unlock_spin(&ts->ts_lock);
203 thread_lock_flags(td, MTX_DUPOK);
204 mtx_unlock_spin(&ts->ts_lock);
205 MPASS(td->td_proc != NULL);
206 MPASS(td->td_proc->p_magic == P_MAGIC);
209 * If the thread is asleep, then we are probably about
210 * to deadlock. To make debugging this easier, just
211 * panic and tell the user which thread misbehaved so
212 * they can hopefully get a stack trace from the truly
213 * misbehaving thread.
215 if (TD_IS_SLEEPING(td)) {
217 "Sleeping thread (tid %d, pid %d) owns a non-sleepable lock\n",
218 td->td_tid, td->td_proc->p_pid);
219 kdb_backtrace_thread(td);
220 panic("sleeping thread");
224 * If this thread already has higher priority than the
225 * thread that is being blocked, we are finished.
227 if (td->td_priority <= pri) {
233 * Bump this thread's priority.
235 sched_lend_prio(td, pri);
238 * If lock holder is actually running or on the run queue
241 if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) {
242 MPASS(td->td_blocked == NULL);
249 * For UP, we check to see if td is curthread (this shouldn't
250 * ever happen however as it would mean we are in a deadlock.)
252 KASSERT(td != curthread, ("Deadlock detected"));
256 * If we aren't blocked on a lock, we should be.
258 KASSERT(TD_ON_LOCK(td), (
259 "thread %d(%s):%d holds %s but isn't blocked on a lock\n",
260 td->td_tid, td->td_name, td->td_state,
261 ts->ts_lockobj->lo_name));
264 * Pick up the lock that td is blocked on.
268 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
269 /* Resort td on the list if needed. */
270 if (!turnstile_adjust_thread(ts, td)) {
271 mtx_unlock_spin(&ts->ts_lock);
274 /* The thread lock is released as ts lock above. */
279 * Adjust the thread's position on a turnstile after its priority has been
283 turnstile_adjust_thread(struct turnstile *ts, struct thread *td)
285 struct thread *td1, *td2;
288 THREAD_LOCK_ASSERT(td, MA_OWNED);
289 MPASS(TD_ON_LOCK(td));
292 * This thread may not be blocked on this turnstile anymore
293 * but instead might already be woken up on another CPU
294 * that is waiting on the thread lock in turnstile_unpend() to
295 * finish waking this thread up. We can detect this case
296 * by checking to see if this thread has been given a
297 * turnstile by either turnstile_signal() or
298 * turnstile_broadcast(). In this case, treat the thread as
299 * if it was already running.
301 if (td->td_turnstile != NULL)
305 * Check if the thread needs to be moved on the blocked chain.
306 * It needs to be moved if either its priority is lower than
307 * the previous thread or higher than the next thread.
309 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
310 td1 = TAILQ_PREV(td, threadqueue, td_lockq);
311 td2 = TAILQ_NEXT(td, td_lockq);
312 if ((td1 != NULL && td->td_priority < td1->td_priority) ||
313 (td2 != NULL && td->td_priority > td2->td_priority)) {
316 * Remove thread from blocked chain and determine where
317 * it should be moved to.
319 queue = td->td_tsqueue;
320 MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE);
321 mtx_lock_spin(&td_contested_lock);
322 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
323 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) {
324 MPASS(td1->td_proc->p_magic == P_MAGIC);
325 if (td1->td_priority > td->td_priority)
330 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
332 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
333 mtx_unlock_spin(&td_contested_lock);
336 "turnstile_adjust_thread: td %d put at tail on [%p] %s",
337 td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name);
340 "turnstile_adjust_thread: td %d moved before %d on [%p] %s",
341 td->td_tid, td1->td_tid, ts->ts_lockobj,
342 ts->ts_lockobj->lo_name);
348 * Early initialization of turnstiles. This is not done via a SYSINIT()
349 * since this needs to be initialized very early when mutexes are first
353 init_turnstiles(void)
357 for (i = 0; i < TC_TABLESIZE; i++) {
358 LIST_INIT(&turnstile_chains[i].tc_turnstiles);
359 mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
362 mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
363 LIST_INIT(&thread0.td_contested);
364 thread0.td_turnstile = NULL;
367 #ifdef TURNSTILE_PROFILING
369 init_turnstile_profiling(void *arg)
371 struct sysctl_oid *chain_oid;
375 for (i = 0; i < TC_TABLESIZE; i++) {
376 snprintf(chain_name, sizeof(chain_name), "%d", i);
377 chain_oid = SYSCTL_ADD_NODE(NULL,
378 SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO,
379 chain_name, CTLFLAG_RD, NULL, "turnstile chain stats");
380 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
381 "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0,
383 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
384 "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth,
388 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
389 init_turnstile_profiling, NULL);
393 init_turnstile0(void *dummy)
396 turnstile_zone = uma_zcreate("TURNSTILE", sizeof(struct turnstile),
403 turnstile_init, turnstile_fini, UMA_ALIGN_CACHE, UMA_ZONE_NOFREE);
404 thread0.td_turnstile = turnstile_alloc();
406 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
409 * Update a thread on the turnstile list after it's priority has been changed.
410 * The old priority is passed in as an argument.
413 turnstile_adjust(struct thread *td, u_char oldpri)
415 struct turnstile *ts;
417 MPASS(TD_ON_LOCK(td));
420 * Pick up the lock that td is blocked on.
424 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
425 mtx_assert(&ts->ts_lock, MA_OWNED);
427 /* Resort the turnstile on the list. */
428 if (!turnstile_adjust_thread(ts, td))
431 * If our priority was lowered and we are at the head of the
432 * turnstile, then propagate our new priority up the chain.
433 * Note that we currently don't try to revoke lent priorities
434 * when our priority goes up.
436 MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE ||
437 td->td_tsqueue == TS_SHARED_QUEUE);
438 if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) &&
439 td->td_priority < oldpri) {
440 propagate_priority(td);
445 * Set the owner of the lock this turnstile is attached to.
448 turnstile_setowner(struct turnstile *ts, struct thread *owner)
451 mtx_assert(&td_contested_lock, MA_OWNED);
452 MPASS(ts->ts_owner == NULL);
454 /* A shared lock might not have an owner. */
458 MPASS(owner->td_proc->p_magic == P_MAGIC);
459 ts->ts_owner = owner;
460 LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
465 * UMA zone item deallocator.
468 turnstile_dtor(void *mem, int size, void *arg)
470 struct turnstile *ts;
473 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
474 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
475 MPASS(TAILQ_EMPTY(&ts->ts_pending));
480 * UMA zone item initializer.
483 turnstile_init(void *mem, int size, int flags)
485 struct turnstile *ts;
489 TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
490 TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]);
491 TAILQ_INIT(&ts->ts_pending);
492 LIST_INIT(&ts->ts_free);
493 mtx_init(&ts->ts_lock, "turnstile lock", NULL, MTX_SPIN | MTX_RECURSE);
498 turnstile_fini(void *mem, int size)
500 struct turnstile *ts;
503 mtx_destroy(&ts->ts_lock);
507 * Get a turnstile for a new thread.
510 turnstile_alloc(void)
513 return (uma_zalloc(turnstile_zone, M_WAITOK));
517 * Free a turnstile when a thread is destroyed.
520 turnstile_free(struct turnstile *ts)
523 uma_zfree(turnstile_zone, ts);
527 * Lock the turnstile chain associated with the specified lock.
530 turnstile_chain_lock(struct lock_object *lock)
532 struct turnstile_chain *tc;
534 tc = TC_LOOKUP(lock);
535 mtx_lock_spin(&tc->tc_lock);
539 turnstile_trywait(struct lock_object *lock)
541 struct turnstile_chain *tc;
542 struct turnstile *ts;
544 tc = TC_LOOKUP(lock);
545 mtx_lock_spin(&tc->tc_lock);
546 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
547 if (ts->ts_lockobj == lock) {
548 mtx_lock_spin(&ts->ts_lock);
552 ts = curthread->td_turnstile;
554 mtx_lock_spin(&ts->ts_lock);
555 KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
556 ts->ts_lockobj = lock;
562 turnstile_cancel(struct turnstile *ts)
564 struct turnstile_chain *tc;
565 struct lock_object *lock;
567 mtx_assert(&ts->ts_lock, MA_OWNED);
569 mtx_unlock_spin(&ts->ts_lock);
570 lock = ts->ts_lockobj;
571 if (ts == curthread->td_turnstile)
572 ts->ts_lockobj = NULL;
573 tc = TC_LOOKUP(lock);
574 mtx_unlock_spin(&tc->tc_lock);
578 * Look up the turnstile for a lock in the hash table locking the associated
579 * turnstile chain along the way. If no turnstile is found in the hash
580 * table, NULL is returned.
583 turnstile_lookup(struct lock_object *lock)
585 struct turnstile_chain *tc;
586 struct turnstile *ts;
588 tc = TC_LOOKUP(lock);
589 mtx_assert(&tc->tc_lock, MA_OWNED);
590 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
591 if (ts->ts_lockobj == lock) {
592 mtx_lock_spin(&ts->ts_lock);
599 * Unlock the turnstile chain associated with a given lock.
602 turnstile_chain_unlock(struct lock_object *lock)
604 struct turnstile_chain *tc;
606 tc = TC_LOOKUP(lock);
607 mtx_unlock_spin(&tc->tc_lock);
611 * Return a pointer to the thread waiting on this turnstile with the
612 * most important priority or NULL if the turnstile has no waiters.
614 static struct thread *
615 turnstile_first_waiter(struct turnstile *ts)
617 struct thread *std, *xtd;
619 std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]);
620 xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
621 if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority))
627 * Take ownership of a turnstile and adjust the priority of the new
628 * owner appropriately.
631 turnstile_claim(struct turnstile *ts)
633 struct thread *td, *owner;
634 struct turnstile_chain *tc;
636 mtx_assert(&ts->ts_lock, MA_OWNED);
637 MPASS(ts != curthread->td_turnstile);
640 mtx_lock_spin(&td_contested_lock);
641 turnstile_setowner(ts, owner);
642 mtx_unlock_spin(&td_contested_lock);
644 td = turnstile_first_waiter(ts);
646 MPASS(td->td_proc->p_magic == P_MAGIC);
647 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
650 * Update the priority of the new owner if needed.
653 if (td->td_priority < owner->td_priority)
654 sched_lend_prio(owner, td->td_priority);
655 thread_unlock(owner);
656 tc = TC_LOOKUP(ts->ts_lockobj);
657 mtx_unlock_spin(&ts->ts_lock);
658 mtx_unlock_spin(&tc->tc_lock);
662 * Block the current thread on the turnstile assicated with 'lock'. This
663 * function will context switch and not return until this thread has been
664 * woken back up. This function must be called with the appropriate
665 * turnstile chain locked and will return with it unlocked.
668 turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
670 struct turnstile_chain *tc;
671 struct thread *td, *td1;
672 struct lock_object *lock;
675 mtx_assert(&ts->ts_lock, MA_OWNED);
677 MPASS(owner->td_proc->p_magic == P_MAGIC);
678 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
681 * If the lock does not already have a turnstile, use this thread's
682 * turnstile. Otherwise insert the current thread into the
683 * turnstile already in use by this lock.
685 tc = TC_LOOKUP(ts->ts_lockobj);
686 mtx_assert(&tc->tc_lock, MA_OWNED);
687 if (ts == td->td_turnstile) {
688 #ifdef TURNSTILE_PROFILING
690 if (tc->tc_depth > tc->tc_max_depth) {
691 tc->tc_max_depth = tc->tc_depth;
692 if (tc->tc_max_depth > turnstile_max_depth)
693 turnstile_max_depth = tc->tc_max_depth;
696 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
697 KASSERT(TAILQ_EMPTY(&ts->ts_pending),
698 ("thread's turnstile has pending threads"));
699 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]),
700 ("thread's turnstile has exclusive waiters"));
701 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]),
702 ("thread's turnstile has shared waiters"));
703 KASSERT(LIST_EMPTY(&ts->ts_free),
704 ("thread's turnstile has a non-empty free list"));
705 MPASS(ts->ts_lockobj != NULL);
706 mtx_lock_spin(&td_contested_lock);
707 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
708 turnstile_setowner(ts, owner);
709 mtx_unlock_spin(&td_contested_lock);
711 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq)
712 if (td1->td_priority > td->td_priority)
714 mtx_lock_spin(&td_contested_lock);
716 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
718 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
719 MPASS(owner == ts->ts_owner);
720 mtx_unlock_spin(&td_contested_lock);
721 MPASS(td->td_turnstile != NULL);
722 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
725 thread_lock_set(td, &ts->ts_lock);
726 td->td_turnstile = NULL;
728 /* Save who we are blocked on and switch. */
729 lock = ts->ts_lockobj;
730 td->td_tsqueue = queue;
732 td->td_lockname = lock->lo_name;
733 td->td_blktick = ticks;
735 mtx_unlock_spin(&tc->tc_lock);
736 propagate_priority(td);
738 if (LOCK_LOG_TEST(lock, 0))
739 CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__,
740 td->td_tid, lock, lock->lo_name);
742 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
743 mi_switch(SW_VOL | SWT_TURNSTILE, NULL);
745 if (LOCK_LOG_TEST(lock, 0))
746 CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s",
747 __func__, td->td_tid, lock, lock->lo_name);
752 * Pick the highest priority thread on this turnstile and put it on the
753 * pending list. This must be called with the turnstile chain locked.
756 turnstile_signal(struct turnstile *ts, int queue)
758 struct turnstile_chain *tc;
763 mtx_assert(&ts->ts_lock, MA_OWNED);
764 MPASS(curthread->td_proc->p_magic == P_MAGIC);
765 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
766 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
769 * Pick the highest priority thread blocked on this lock and
770 * move it to the pending list.
772 td = TAILQ_FIRST(&ts->ts_blocked[queue]);
773 MPASS(td->td_proc->p_magic == P_MAGIC);
774 mtx_lock_spin(&td_contested_lock);
775 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
776 mtx_unlock_spin(&td_contested_lock);
777 TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
780 * If the turnstile is now empty, remove it from its chain and
781 * give it to the about-to-be-woken thread. Otherwise take a
782 * turnstile from the free list and give it to the thread.
784 empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
785 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]);
787 tc = TC_LOOKUP(ts->ts_lockobj);
788 mtx_assert(&tc->tc_lock, MA_OWNED);
789 MPASS(LIST_EMPTY(&ts->ts_free));
790 #ifdef TURNSTILE_PROFILING
794 ts = LIST_FIRST(&ts->ts_free);
796 LIST_REMOVE(ts, ts_hash);
797 td->td_turnstile = ts;
803 * Put all blocked threads on the pending list. This must be called with
804 * the turnstile chain locked.
807 turnstile_broadcast(struct turnstile *ts, int queue)
809 struct turnstile_chain *tc;
810 struct turnstile *ts1;
814 mtx_assert(&ts->ts_lock, MA_OWNED);
815 MPASS(curthread->td_proc->p_magic == P_MAGIC);
816 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
818 * We must have the chain locked so that we can remove the empty
819 * turnstile from the hash queue.
821 tc = TC_LOOKUP(ts->ts_lockobj);
822 mtx_assert(&tc->tc_lock, MA_OWNED);
823 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
826 * Transfer the blocked list to the pending list.
828 mtx_lock_spin(&td_contested_lock);
829 TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq);
830 mtx_unlock_spin(&td_contested_lock);
833 * Give a turnstile to each thread. The last thread gets
834 * this turnstile if the turnstile is empty.
836 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
837 if (LIST_EMPTY(&ts->ts_free)) {
838 MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
840 #ifdef TURNSTILE_PROFILING
844 ts1 = LIST_FIRST(&ts->ts_free);
846 LIST_REMOVE(ts1, ts_hash);
847 td->td_turnstile = ts1;
852 * Wakeup all threads on the pending list and adjust the priority of the
853 * current thread appropriately. This must be called with the turnstile
857 turnstile_unpend(struct turnstile *ts, int owner_type)
859 TAILQ_HEAD( ,thread) pending_threads;
860 struct turnstile *nts;
865 mtx_assert(&ts->ts_lock, MA_OWNED);
866 MPASS(ts->ts_owner == curthread || ts->ts_owner == NULL);
867 MPASS(!TAILQ_EMPTY(&ts->ts_pending));
870 * Move the list of pending threads out of the turnstile and
871 * into a local variable.
873 TAILQ_INIT(&pending_threads);
874 TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
876 if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
877 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]))
878 ts->ts_lockobj = NULL;
881 * Adjust the priority of curthread based on other contested
882 * locks it owns. Don't lower the priority below the base
888 mtx_lock_spin(&td_contested_lock);
890 * Remove the turnstile from this thread's list of contested locks
891 * since this thread doesn't own it anymore. New threads will
892 * not be blocking on the turnstile until it is claimed by a new
893 * owner. There might not be a current owner if this is a shared
896 if (ts->ts_owner != NULL) {
898 LIST_REMOVE(ts, ts_link);
900 LIST_FOREACH(nts, &td->td_contested, ts_link) {
901 cp = turnstile_first_waiter(nts)->td_priority;
905 mtx_unlock_spin(&td_contested_lock);
906 sched_unlend_prio(td, pri);
909 * Wake up all the pending threads. If a thread is not blocked
910 * on a lock, then it is currently executing on another CPU in
911 * turnstile_wait() or sitting on a run queue waiting to resume
912 * in turnstile_wait(). Set a flag to force it to try to acquire
913 * the lock again instead of blocking.
915 while (!TAILQ_EMPTY(&pending_threads)) {
916 td = TAILQ_FIRST(&pending_threads);
917 TAILQ_REMOVE(&pending_threads, td, td_lockq);
919 THREAD_LOCKPTR_ASSERT(td, &ts->ts_lock);
920 MPASS(td->td_proc->p_magic == P_MAGIC);
921 MPASS(TD_ON_LOCK(td));
923 MPASS(TD_CAN_RUN(td));
924 td->td_blocked = NULL;
925 td->td_lockname = NULL;
928 td->td_tsqueue = 0xff;
930 sched_add(td, SRQ_BORING);
933 mtx_unlock_spin(&ts->ts_lock);
937 * Give up ownership of a turnstile. This must be called with the
938 * turnstile chain locked.
941 turnstile_disown(struct turnstile *ts)
947 mtx_assert(&ts->ts_lock, MA_OWNED);
948 MPASS(ts->ts_owner == curthread);
949 MPASS(TAILQ_EMPTY(&ts->ts_pending));
950 MPASS(!TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) ||
951 !TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
954 * Remove the turnstile from this thread's list of contested locks
955 * since this thread doesn't own it anymore. New threads will
956 * not be blocking on the turnstile until it is claimed by a new
959 mtx_lock_spin(&td_contested_lock);
961 LIST_REMOVE(ts, ts_link);
962 mtx_unlock_spin(&td_contested_lock);
965 * Adjust the priority of curthread based on other contested
966 * locks it owns. Don't lower the priority below the base
972 mtx_unlock_spin(&ts->ts_lock);
973 mtx_lock_spin(&td_contested_lock);
974 LIST_FOREACH(ts, &td->td_contested, ts_link) {
975 cp = turnstile_first_waiter(ts)->td_priority;
979 mtx_unlock_spin(&td_contested_lock);
980 sched_unlend_prio(td, pri);
985 * Return the first thread in a turnstile.
988 turnstile_head(struct turnstile *ts, int queue)
993 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
994 mtx_assert(&ts->ts_lock, MA_OWNED);
996 return (TAILQ_FIRST(&ts->ts_blocked[queue]));
1000 * Returns true if a sub-queue of a turnstile is empty.
1003 turnstile_empty(struct turnstile *ts, int queue)
1008 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
1009 mtx_assert(&ts->ts_lock, MA_OWNED);
1011 return (TAILQ_EMPTY(&ts->ts_blocked[queue]));
1016 print_thread(struct thread *td, const char *prefix)
1019 db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid,
1020 td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1025 print_queue(struct threadqueue *queue, const char *header, const char *prefix)
1029 db_printf("%s:\n", header);
1030 if (TAILQ_EMPTY(queue)) {
1031 db_printf("%sempty\n", prefix);
1034 TAILQ_FOREACH(td, queue, td_lockq) {
1035 print_thread(td, prefix);
1039 DB_SHOW_COMMAND(turnstile, db_show_turnstile)
1041 struct turnstile_chain *tc;
1042 struct turnstile *ts;
1043 struct lock_object *lock;
1050 * First, see if there is an active turnstile for the lock indicated
1053 lock = (struct lock_object *)addr;
1054 tc = TC_LOOKUP(lock);
1055 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
1056 if (ts->ts_lockobj == lock)
1060 * Second, see if there is an active turnstile at the address
1063 for (i = 0; i < TC_TABLESIZE; i++)
1064 LIST_FOREACH(ts, &turnstile_chains[i].tc_turnstiles, ts_hash) {
1065 if (ts == (struct turnstile *)addr)
1069 db_printf("Unable to locate a turnstile via %p\n", (void *)addr);
1072 lock = ts->ts_lockobj;
1073 db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name,
1076 print_thread(ts->ts_owner, "Lock Owner: ");
1078 db_printf("Lock Owner: none\n");
1079 print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t");
1080 print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters",
1082 print_queue(&ts->ts_pending, "Pending Threads", "\t");
1087 * Show all the threads a particular thread is waiting on based on
1088 * non-sleepable and non-spin locks.
1091 print_lockchain(struct thread *td, const char *prefix)
1093 struct lock_object *lock;
1094 struct lock_class *class;
1095 struct turnstile *ts;
1098 * Follow the chain. We keep walking as long as the thread is
1099 * blocked on a turnstile that has an owner.
1101 while (!db_pager_quit) {
1102 db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid,
1103 td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1105 switch (td->td_state) {
1107 db_printf("is inactive\n");
1110 db_printf("can run\n");
1113 db_printf("is on a run queue\n");
1116 db_printf("running on CPU %d\n", td->td_oncpu);
1119 if (TD_ON_LOCK(td)) {
1120 ts = td->td_blocked;
1121 lock = ts->ts_lockobj;
1122 class = LOCK_CLASS(lock);
1123 db_printf("blocked on lock %p (%s) \"%s\"\n",
1124 lock, class->lc_name, lock->lo_name);
1125 if (ts->ts_owner == NULL)
1130 db_printf("inhibited\n");
1133 db_printf("??? (%#x)\n", td->td_state);
1139 DB_SHOW_COMMAND(lockchain, db_show_lockchain)
1143 /* Figure out which thread to start with. */
1145 td = db_lookup_thread(addr, TRUE);
1149 print_lockchain(td, "");
1152 DB_SHOW_ALL_COMMAND(chains, db_show_allchains)
1159 FOREACH_PROC_IN_SYSTEM(p) {
1160 FOREACH_THREAD_IN_PROC(p, td) {
1161 if (TD_ON_LOCK(td) && LIST_EMPTY(&td->td_contested)) {
1162 db_printf("chain %d:\n", i++);
1163 print_lockchain(td, " ");
1170 DB_SHOW_ALIAS(allchains, db_show_allchains)
1173 * Show all the threads a particular thread is waiting on based on
1177 print_sleepchain(struct thread *td, const char *prefix)
1179 struct thread *owner;
1182 * Follow the chain. We keep walking as long as the thread is
1183 * blocked on a sleep lock that has an owner.
1185 while (!db_pager_quit) {
1186 db_printf("%sthread %d (pid %d, %s) ", prefix, td->td_tid,
1187 td->td_proc->p_pid, td->td_name[0] != '\0' ? td->td_name :
1189 switch (td->td_state) {
1191 db_printf("is inactive\n");
1194 db_printf("can run\n");
1197 db_printf("is on a run queue\n");
1200 db_printf("running on CPU %d\n", td->td_oncpu);
1203 if (TD_ON_SLEEPQ(td)) {
1204 if (lockmgr_chain(td, &owner) ||
1205 sx_chain(td, &owner)) {
1211 db_printf("sleeping on %p \"%s\"\n",
1212 td->td_wchan, td->td_wmesg);
1215 db_printf("inhibited\n");
1218 db_printf("??? (%#x)\n", td->td_state);
1224 DB_SHOW_COMMAND(sleepchain, db_show_sleepchain)
1228 /* Figure out which thread to start with. */
1230 td = db_lookup_thread(addr, TRUE);
1234 print_sleepchain(td, "");
1237 static void print_waiters(struct turnstile *ts, int indent);
1240 print_waiter(struct thread *td, int indent)
1242 struct turnstile *ts;
1247 for (i = 0; i < indent; i++)
1249 print_thread(td, "thread ");
1250 LIST_FOREACH(ts, &td->td_contested, ts_link)
1251 print_waiters(ts, indent + 1);
1255 print_waiters(struct turnstile *ts, int indent)
1257 struct lock_object *lock;
1258 struct lock_class *class;
1264 lock = ts->ts_lockobj;
1265 class = LOCK_CLASS(lock);
1266 for (i = 0; i < indent; i++)
1268 db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name, lock->lo_name);
1269 TAILQ_FOREACH(td, &ts->ts_blocked[TS_EXCLUSIVE_QUEUE], td_lockq)
1270 print_waiter(td, indent + 1);
1271 TAILQ_FOREACH(td, &ts->ts_blocked[TS_SHARED_QUEUE], td_lockq)
1272 print_waiter(td, indent + 1);
1273 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq)
1274 print_waiter(td, indent + 1);
1277 DB_SHOW_COMMAND(locktree, db_show_locktree)
1279 struct lock_object *lock;
1280 struct lock_class *class;
1281 struct turnstile_chain *tc;
1282 struct turnstile *ts;
1286 lock = (struct lock_object *)addr;
1287 tc = TC_LOOKUP(lock);
1288 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
1289 if (ts->ts_lockobj == lock)
1292 class = LOCK_CLASS(lock);
1293 db_printf("lock %p (%s) \"%s\"\n", lock, class->lc_name,
1296 print_waiters(ts, 0);