2 * Copyright (c) 1998 Berkeley Software Design, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Berkeley Software Design Inc's name may not be used to endorse or
13 * promote products derived from this software without specific prior
16 * THIS SOFTWARE IS PROVIDED BY BERKELEY SOFTWARE DESIGN INC ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL BERKELEY SOFTWARE DESIGN INC BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * from BSDI $Id: mutex_witness.c,v 1.1.2.20 2000/04/27 03:10:27 cp Exp $
29 * and BSDI $Id: synch_machdep.c,v 2.3.2.39 2000/04/27 03:10:25 cp Exp $
33 * Implementation of turnstiles used to hold queue of threads blocked on
34 * non-sleepable locks. Sleepable locks use condition variables to
35 * implement their queues. Turnstiles differ from a sleep queue in that
36 * turnstile queue's are assigned to a lock held by an owning thread. Thus,
37 * when one thread is enqueued onto a turnstile, it can lend its priority
38 * to the owning thread.
40 * We wish to avoid bloating locks with an embedded turnstile and we do not
41 * want to use back-pointers in the locks for the same reason. Thus, we
42 * use a similar approach to that of Solaris 7 as described in Solaris
43 * Internals by Jim Mauro and Richard McDougall. Turnstiles are looked up
44 * in a hash table based on the address of the lock. Each entry in the
45 * hash table is a linked-lists of turnstiles and is called a turnstile
46 * chain. Each chain contains a spin mutex that protects all of the
47 * turnstiles in the chain.
49 * Each time a thread is created, a turnstile is malloc'd and attached to
50 * that thread. When a thread blocks on a lock, if it is the first thread
51 * to block, it lends its turnstile to the lock. If the lock already has
52 * a turnstile, then it gives its turnstile to the lock's turnstile's free
53 * list. When a thread is woken up, it takes a turnstile from the free list
54 * if there are any other waiters. If it is the only thread blocked on the
55 * lock, then it reclaims the turnstile associated with the lock and removes
56 * it from the hash table.
59 #include <sys/cdefs.h>
60 __FBSDID("$FreeBSD$");
63 #include "opt_turnstile_profiling.h"
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
70 #include <sys/malloc.h>
71 #include <sys/mutex.h>
73 #include <sys/queue.h>
74 #include <sys/sched.h>
75 #include <sys/sysctl.h>
76 #include <sys/turnstile.h>
83 * Constants for the hash table of turnstile chains. TC_SHIFT is a magic
84 * number chosen because the sleep queue's use the same value for the
85 * shift. Basically, we ignore the lower 8 bits of the address.
86 * TC_TABLESIZE must be a power of two for TC_MASK to work properly.
88 #define TC_TABLESIZE 128 /* Must be power of 2. */
89 #define TC_MASK (TC_TABLESIZE - 1)
91 #define TC_HASH(lock) (((uintptr_t)(lock) >> TC_SHIFT) & TC_MASK)
92 #define TC_LOOKUP(lock) &turnstile_chains[TC_HASH(lock)]
95 * There are three different lists of turnstiles as follows. The list
96 * connected by ts_link entries is a per-thread list of all the turnstiles
97 * attached to locks that we own. This is used to fixup our priority when
98 * a lock is released. The other two lists use the ts_hash entries. The
99 * first of these two is the turnstile chain list that a turnstile is on
100 * when it is attached to a lock. The second list to use ts_hash is the
101 * free list hung off of a turnstile that is attached to a lock.
103 * Each turnstile contains three lists of threads. The two ts_blocked lists
104 * are linked list of threads blocked on the turnstile's lock. One list is
105 * for exclusive waiters, and the other is for shared waiters. The
106 * ts_pending list is a linked list of threads previously awakened by
107 * turnstile_signal() or turnstile_wait() that are waiting to be put on
111 * c - turnstile chain lock
112 * q - td_contested lock
115 struct threadqueue ts_blocked[2]; /* (c + q) Blocked threads. */
116 struct threadqueue ts_pending; /* (c) Pending threads. */
117 LIST_ENTRY(turnstile) ts_hash; /* (c) Chain and free list. */
118 LIST_ENTRY(turnstile) ts_link; /* (q) Contested locks. */
119 LIST_HEAD(, turnstile) ts_free; /* (c) Free turnstiles. */
120 struct lock_object *ts_lockobj; /* (c) Lock we reference. */
121 struct thread *ts_owner; /* (c + q) Who owns the lock. */
124 struct turnstile_chain {
125 LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */
126 struct mtx tc_lock; /* Spin lock for this chain. */
127 #ifdef TURNSTILE_PROFILING
128 u_int tc_depth; /* Length of tc_queues. */
129 u_int tc_max_depth; /* Max length of tc_queues. */
133 #ifdef TURNSTILE_PROFILING
134 u_int turnstile_max_depth;
135 SYSCTL_NODE(_debug, OID_AUTO, turnstile, CTLFLAG_RD, 0, "turnstile profiling");
136 SYSCTL_NODE(_debug_turnstile, OID_AUTO, chains, CTLFLAG_RD, 0,
137 "turnstile chain stats");
138 SYSCTL_UINT(_debug_turnstile, OID_AUTO, max_depth, CTLFLAG_RD,
139 &turnstile_max_depth, 0, "maxmimum depth achieved of a single chain");
141 static struct mtx td_contested_lock;
142 static struct turnstile_chain turnstile_chains[TC_TABLESIZE];
144 static MALLOC_DEFINE(M_TURNSTILE, "turnstiles", "turnstiles");
147 * Prototypes for non-exported routines.
149 static void init_turnstile0(void *dummy);
150 #ifdef TURNSTILE_PROFILING
151 static void init_turnstile_profiling(void *arg);
153 static void propagate_priority(struct thread *td);
154 static int turnstile_adjust_thread(struct turnstile *ts,
156 static struct thread *turnstile_first_waiter(struct turnstile *ts);
157 static void turnstile_setowner(struct turnstile *ts, struct thread *owner);
160 * Walks the chain of turnstiles and their owners to propagate the priority
161 * of the thread being blocked to all the threads holding locks that have to
162 * release their locks before this thread can run again.
165 propagate_priority(struct thread *td)
167 struct turnstile_chain *tc;
168 struct turnstile *ts;
171 mtx_assert(&sched_lock, MA_OWNED);
172 pri = td->td_priority;
179 * This might be a read lock with no owner. There's
180 * not much we can do, so just bail.
185 MPASS(td->td_proc != NULL);
186 MPASS(td->td_proc->p_magic == P_MAGIC);
189 * XXX: The owner of a turnstile can be stale if it is the
190 * first thread to grab a rlock of a rw lock. In that case
191 * it is possible for us to be at SSLEEP or some other
192 * weird state. We should probably just return if the state
193 * isn't SRUN or SLOCK.
195 KASSERT(!TD_IS_SLEEPING(td),
196 ("sleeping thread (tid %d) owns a non-sleepable lock",
200 * If this thread already has higher priority than the
201 * thread that is being blocked, we are finished.
203 if (td->td_priority <= pri)
207 * Bump this thread's priority.
209 sched_lend_prio(td, pri);
212 * If lock holder is actually running or on the run queue
215 if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) {
216 MPASS(td->td_blocked == NULL);
222 * For UP, we check to see if td is curthread (this shouldn't
223 * ever happen however as it would mean we are in a deadlock.)
225 KASSERT(td != curthread, ("Deadlock detected"));
229 * If we aren't blocked on a lock, we should be.
231 KASSERT(TD_ON_LOCK(td), (
232 "thread %d(%s):%d holds %s but isn't blocked on a lock\n",
233 td->td_tid, td->td_proc->p_comm, td->td_state,
234 ts->ts_lockobj->lo_name));
237 * Pick up the lock that td is blocked on.
241 tc = TC_LOOKUP(ts->ts_lockobj);
242 mtx_lock_spin(&tc->tc_lock);
244 /* Resort td on the list if needed. */
245 if (!turnstile_adjust_thread(ts, td)) {
246 mtx_unlock_spin(&tc->tc_lock);
249 mtx_unlock_spin(&tc->tc_lock);
254 * Adjust the thread's position on a turnstile after its priority has been
258 turnstile_adjust_thread(struct turnstile *ts, struct thread *td)
260 struct turnstile_chain *tc;
261 struct thread *td1, *td2;
264 mtx_assert(&sched_lock, MA_OWNED);
265 MPASS(TD_ON_LOCK(td));
268 * This thread may not be blocked on this turnstile anymore
269 * but instead might already be woken up on another CPU
270 * that is waiting on sched_lock in turnstile_unpend() to
271 * finish waking this thread up. We can detect this case
272 * by checking to see if this thread has been given a
273 * turnstile by either turnstile_signal() or
274 * turnstile_broadcast(). In this case, treat the thread as
275 * if it was already running.
277 if (td->td_turnstile != NULL)
281 * Check if the thread needs to be moved on the blocked chain.
282 * It needs to be moved if either its priority is lower than
283 * the previous thread or higher than the next thread.
285 tc = TC_LOOKUP(ts->ts_lockobj);
286 mtx_assert(&tc->tc_lock, MA_OWNED);
287 td1 = TAILQ_PREV(td, threadqueue, td_lockq);
288 td2 = TAILQ_NEXT(td, td_lockq);
289 if ((td1 != NULL && td->td_priority < td1->td_priority) ||
290 (td2 != NULL && td->td_priority > td2->td_priority)) {
293 * Remove thread from blocked chain and determine where
294 * it should be moved to.
296 queue = td->td_tsqueue;
297 MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE);
298 mtx_lock_spin(&td_contested_lock);
299 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
300 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) {
301 MPASS(td1->td_proc->p_magic == P_MAGIC);
302 if (td1->td_priority > td->td_priority)
307 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
309 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
310 mtx_unlock_spin(&td_contested_lock);
313 "turnstile_adjust_thread: td %d put at tail on [%p] %s",
314 td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name);
317 "turnstile_adjust_thread: td %d moved before %d on [%p] %s",
318 td->td_tid, td1->td_tid, ts->ts_lockobj,
319 ts->ts_lockobj->lo_name);
325 * Early initialization of turnstiles. This is not done via a SYSINIT()
326 * since this needs to be initialized very early when mutexes are first
330 init_turnstiles(void)
334 for (i = 0; i < TC_TABLESIZE; i++) {
335 LIST_INIT(&turnstile_chains[i].tc_turnstiles);
336 mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain",
339 mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN);
340 LIST_INIT(&thread0.td_contested);
341 thread0.td_turnstile = NULL;
344 #ifdef TURNSTILE_PROFILING
346 init_turnstile_profiling(void *arg)
348 struct sysctl_oid *chain_oid;
352 for (i = 0; i < TC_TABLESIZE; i++) {
353 snprintf(chain_name, sizeof(chain_name), "%d", i);
354 chain_oid = SYSCTL_ADD_NODE(NULL,
355 SYSCTL_STATIC_CHILDREN(_debug_turnstile_chains), OID_AUTO,
356 chain_name, CTLFLAG_RD, NULL, "turnstile chain stats");
357 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
358 "depth", CTLFLAG_RD, &turnstile_chains[i].tc_depth, 0,
360 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(chain_oid), OID_AUTO,
361 "max_depth", CTLFLAG_RD, &turnstile_chains[i].tc_max_depth,
365 SYSINIT(turnstile_profiling, SI_SUB_LOCK, SI_ORDER_ANY,
366 init_turnstile_profiling, NULL);
370 init_turnstile0(void *dummy)
373 thread0.td_turnstile = turnstile_alloc();
375 SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL);
378 * Update a thread on the turnstile list after it's priority has been changed.
379 * The old priority is passed in as an argument.
382 turnstile_adjust(struct thread *td, u_char oldpri)
384 struct turnstile_chain *tc;
385 struct turnstile *ts;
387 mtx_assert(&sched_lock, MA_OWNED);
388 MPASS(TD_ON_LOCK(td));
391 * Pick up the lock that td is blocked on.
395 tc = TC_LOOKUP(ts->ts_lockobj);
396 mtx_lock_spin(&tc->tc_lock);
398 /* Resort the turnstile on the list. */
399 if (!turnstile_adjust_thread(ts, td)) {
400 mtx_unlock_spin(&tc->tc_lock);
405 * If our priority was lowered and we are at the head of the
406 * turnstile, then propagate our new priority up the chain.
407 * Note that we currently don't try to revoke lent priorities
408 * when our priority goes up.
410 MPASS(td->td_tsqueue == TS_EXCLUSIVE_QUEUE ||
411 td->td_tsqueue == TS_SHARED_QUEUE);
412 if (td == TAILQ_FIRST(&ts->ts_blocked[td->td_tsqueue]) &&
413 td->td_priority < oldpri) {
414 mtx_unlock_spin(&tc->tc_lock);
415 propagate_priority(td);
417 mtx_unlock_spin(&tc->tc_lock);
421 * Set the owner of the lock this turnstile is attached to.
424 turnstile_setowner(struct turnstile *ts, struct thread *owner)
427 mtx_assert(&td_contested_lock, MA_OWNED);
428 MPASS(ts->ts_owner == NULL);
430 /* A shared lock might not have an owner. */
434 MPASS(owner->td_proc->p_magic == P_MAGIC);
435 ts->ts_owner = owner;
436 LIST_INSERT_HEAD(&owner->td_contested, ts, ts_link);
440 * Malloc a turnstile for a new thread, initialize it and return it.
443 turnstile_alloc(void)
445 struct turnstile *ts;
447 ts = malloc(sizeof(struct turnstile), M_TURNSTILE, M_WAITOK | M_ZERO);
448 TAILQ_INIT(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
449 TAILQ_INIT(&ts->ts_blocked[TS_SHARED_QUEUE]);
450 TAILQ_INIT(&ts->ts_pending);
451 LIST_INIT(&ts->ts_free);
456 * Free a turnstile when a thread is destroyed.
459 turnstile_free(struct turnstile *ts)
463 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]));
464 MPASS(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]));
465 MPASS(TAILQ_EMPTY(&ts->ts_pending));
466 free(ts, M_TURNSTILE);
470 * Lock the turnstile chain associated with the specified lock.
473 turnstile_lock(struct lock_object *lock)
475 struct turnstile_chain *tc;
477 tc = TC_LOOKUP(lock);
478 mtx_lock_spin(&tc->tc_lock);
482 * Look up the turnstile for a lock in the hash table locking the associated
483 * turnstile chain along the way. If no turnstile is found in the hash
484 * table, NULL is returned.
487 turnstile_lookup(struct lock_object *lock)
489 struct turnstile_chain *tc;
490 struct turnstile *ts;
492 tc = TC_LOOKUP(lock);
493 mtx_assert(&tc->tc_lock, MA_OWNED);
494 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
495 if (ts->ts_lockobj == lock)
501 * Unlock the turnstile chain associated with a given lock.
504 turnstile_release(struct lock_object *lock)
506 struct turnstile_chain *tc;
508 tc = TC_LOOKUP(lock);
509 mtx_unlock_spin(&tc->tc_lock);
513 * Return a pointer to the thread waiting on this turnstile with the
514 * most important priority or NULL if the turnstile has no waiters.
516 static struct thread *
517 turnstile_first_waiter(struct turnstile *ts)
519 struct thread *std, *xtd;
521 std = TAILQ_FIRST(&ts->ts_blocked[TS_SHARED_QUEUE]);
522 xtd = TAILQ_FIRST(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]);
523 if (xtd == NULL || (std != NULL && std->td_priority < xtd->td_priority))
529 * Take ownership of a turnstile and adjust the priority of the new
530 * owner appropriately.
533 turnstile_claim(struct lock_object *lock)
535 struct turnstile_chain *tc;
536 struct turnstile *ts;
537 struct thread *td, *owner;
539 tc = TC_LOOKUP(lock);
540 mtx_assert(&tc->tc_lock, MA_OWNED);
541 ts = turnstile_lookup(lock);
545 mtx_lock_spin(&td_contested_lock);
546 turnstile_setowner(ts, owner);
547 mtx_unlock_spin(&td_contested_lock);
549 td = turnstile_first_waiter(ts);
551 MPASS(td->td_proc->p_magic == P_MAGIC);
552 mtx_unlock_spin(&tc->tc_lock);
555 * Update the priority of the new owner if needed.
557 mtx_lock_spin(&sched_lock);
558 if (td->td_priority < owner->td_priority)
559 sched_lend_prio(owner, td->td_priority);
560 mtx_unlock_spin(&sched_lock);
564 * Block the current thread on the turnstile assicated with 'lock'. This
565 * function will context switch and not return until this thread has been
566 * woken back up. This function must be called with the appropriate
567 * turnstile chain locked and will return with it unlocked.
570 turnstile_wait(struct lock_object *lock, struct thread *owner, int queue)
572 struct turnstile_chain *tc;
573 struct turnstile *ts;
574 struct thread *td, *td1;
577 tc = TC_LOOKUP(lock);
578 mtx_assert(&tc->tc_lock, MA_OWNED);
579 MPASS(td->td_turnstile != NULL);
580 if (queue == TS_SHARED_QUEUE)
581 MPASS(owner != NULL);
583 MPASS(owner->td_proc->p_magic == P_MAGIC);
584 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
586 /* Look up the turnstile associated with the lock 'lock'. */
587 ts = turnstile_lookup(lock);
590 * If the lock does not already have a turnstile, use this thread's
591 * turnstile. Otherwise insert the current thread into the
592 * turnstile already in use by this lock.
595 #ifdef TURNSTILE_PROFILING
597 if (tc->tc_depth > tc->tc_max_depth) {
598 tc->tc_max_depth = tc->tc_depth;
599 if (tc->tc_max_depth > turnstile_max_depth)
600 turnstile_max_depth = tc->tc_max_depth;
603 ts = td->td_turnstile;
604 LIST_INSERT_HEAD(&tc->tc_turnstiles, ts, ts_hash);
605 KASSERT(TAILQ_EMPTY(&ts->ts_pending),
606 ("thread's turnstile has pending threads"));
607 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]),
608 ("thread's turnstile has exclusive waiters"));
609 KASSERT(TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]),
610 ("thread's turnstile has shared waiters"));
611 KASSERT(LIST_EMPTY(&ts->ts_free),
612 ("thread's turnstile has a non-empty free list"));
613 KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer"));
614 ts->ts_lockobj = lock;
615 mtx_lock_spin(&td_contested_lock);
616 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
617 turnstile_setowner(ts, owner);
618 mtx_unlock_spin(&td_contested_lock);
620 TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq)
621 if (td1->td_priority > td->td_priority)
623 mtx_lock_spin(&td_contested_lock);
625 TAILQ_INSERT_BEFORE(td1, td, td_lockq);
627 TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
628 MPASS(owner == ts->ts_owner);
629 mtx_unlock_spin(&td_contested_lock);
630 MPASS(td->td_turnstile != NULL);
631 LIST_INSERT_HEAD(&ts->ts_free, td->td_turnstile, ts_hash);
633 td->td_turnstile = NULL;
634 mtx_unlock_spin(&tc->tc_lock);
636 mtx_lock_spin(&sched_lock);
638 * Handle race condition where a thread on another CPU that owns
639 * lock 'lock' could have woken us in between us dropping the
640 * turnstile chain lock and acquiring the sched_lock.
642 if (td->td_flags & TDF_TSNOBLOCK) {
643 td->td_flags &= ~TDF_TSNOBLOCK;
644 mtx_unlock_spin(&sched_lock);
650 * If we're borrowing an interrupted thread's VM context, we
651 * must clean up before going to sleep.
653 if (td->td_ithd != NULL) {
654 struct ithd *it = td->td_ithd;
656 if (it->it_interrupted) {
657 if (LOCK_LOG_TEST(lock, 0))
658 CTR3(KTR_LOCK, "%s: %p interrupted %p",
659 __func__, it, it->it_interrupted);
665 /* Save who we are blocked on and switch. */
666 td->td_tsqueue = queue;
668 td->td_lockname = lock->lo_name;
670 propagate_priority(td);
672 if (LOCK_LOG_TEST(lock, 0))
673 CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__,
674 td->td_tid, lock, lock->lo_name);
676 mi_switch(SW_VOL, NULL);
678 if (LOCK_LOG_TEST(lock, 0))
679 CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s",
680 __func__, td->td_tid, lock, lock->lo_name);
682 mtx_unlock_spin(&sched_lock);
686 * Pick the highest priority thread on this turnstile and put it on the
687 * pending list. This must be called with the turnstile chain locked.
690 turnstile_signal(struct turnstile *ts, int queue)
692 struct turnstile_chain *tc;
697 MPASS(curthread->td_proc->p_magic == P_MAGIC);
698 MPASS(ts->ts_owner == curthread ||
699 (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
700 tc = TC_LOOKUP(ts->ts_lockobj);
701 mtx_assert(&tc->tc_lock, MA_OWNED);
702 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
705 * Pick the highest priority thread blocked on this lock and
706 * move it to the pending list.
708 td = TAILQ_FIRST(&ts->ts_blocked[queue]);
709 MPASS(td->td_proc->p_magic == P_MAGIC);
710 mtx_lock_spin(&td_contested_lock);
711 TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
712 mtx_unlock_spin(&td_contested_lock);
713 TAILQ_INSERT_TAIL(&ts->ts_pending, td, td_lockq);
716 * If the turnstile is now empty, remove it from its chain and
717 * give it to the about-to-be-woken thread. Otherwise take a
718 * turnstile from the free list and give it to the thread.
720 empty = TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
721 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]);
723 MPASS(LIST_EMPTY(&ts->ts_free));
724 #ifdef TURNSTILE_PROFILING
728 ts = LIST_FIRST(&ts->ts_free);
730 LIST_REMOVE(ts, ts_hash);
731 td->td_turnstile = ts;
737 * Put all blocked threads on the pending list. This must be called with
738 * the turnstile chain locked.
741 turnstile_broadcast(struct turnstile *ts, int queue)
743 struct turnstile_chain *tc;
744 struct turnstile *ts1;
748 MPASS(curthread->td_proc->p_magic == P_MAGIC);
749 MPASS(ts->ts_owner == curthread ||
750 (queue == TS_EXCLUSIVE_QUEUE && ts->ts_owner == NULL));
751 tc = TC_LOOKUP(ts->ts_lockobj);
752 mtx_assert(&tc->tc_lock, MA_OWNED);
753 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
756 * Transfer the blocked list to the pending list.
758 mtx_lock_spin(&td_contested_lock);
759 TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq);
760 mtx_unlock_spin(&td_contested_lock);
763 * Give a turnstile to each thread. The last thread gets
764 * this turnstile if the turnstile is empty.
766 TAILQ_FOREACH(td, &ts->ts_pending, td_lockq) {
767 if (LIST_EMPTY(&ts->ts_free)) {
768 MPASS(TAILQ_NEXT(td, td_lockq) == NULL);
770 #ifdef TURNSTILE_PROFILING
774 ts1 = LIST_FIRST(&ts->ts_free);
776 LIST_REMOVE(ts1, ts_hash);
777 td->td_turnstile = ts1;
782 * Wakeup all threads on the pending list and adjust the priority of the
783 * current thread appropriately. This must be called with the turnstile
787 turnstile_unpend(struct turnstile *ts, int owner_type)
789 TAILQ_HEAD( ,thread) pending_threads;
790 struct turnstile_chain *tc;
795 MPASS(ts->ts_owner == curthread ||
796 (owner_type == TS_SHARED_LOCK && ts->ts_owner == NULL));
797 tc = TC_LOOKUP(ts->ts_lockobj);
798 mtx_assert(&tc->tc_lock, MA_OWNED);
799 MPASS(!TAILQ_EMPTY(&ts->ts_pending));
802 * Move the list of pending threads out of the turnstile and
803 * into a local variable.
805 TAILQ_INIT(&pending_threads);
806 TAILQ_CONCAT(&pending_threads, &ts->ts_pending, td_lockq);
808 if (TAILQ_EMPTY(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE]) &&
809 TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]))
810 ts->ts_lockobj = NULL;
814 * Remove the turnstile from this thread's list of contested locks
815 * since this thread doesn't own it anymore. New threads will
816 * not be blocking on the turnstile until it is claimed by a new
817 * owner. There might not be a current owner if this is a shared
820 if (ts->ts_owner != NULL) {
821 mtx_lock_spin(&td_contested_lock);
823 LIST_REMOVE(ts, ts_link);
824 mtx_unlock_spin(&td_contested_lock);
827 mtx_unlock_spin(&tc->tc_lock);
830 * Adjust the priority of curthread based on other contested
831 * locks it owns. Don't lower the priority below the base
836 mtx_lock_spin(&sched_lock);
837 mtx_lock_spin(&td_contested_lock);
838 LIST_FOREACH(ts, &td->td_contested, ts_link) {
839 cp = turnstile_first_waiter(ts)->td_priority;
843 mtx_unlock_spin(&td_contested_lock);
844 sched_unlend_prio(td, pri);
847 * Wake up all the pending threads. If a thread is not blocked
848 * on a lock, then it is currently executing on another CPU in
849 * turnstile_wait() or sitting on a run queue waiting to resume
850 * in turnstile_wait(). Set a flag to force it to try to acquire
851 * the lock again instead of blocking.
853 while (!TAILQ_EMPTY(&pending_threads)) {
854 td = TAILQ_FIRST(&pending_threads);
855 TAILQ_REMOVE(&pending_threads, td, td_lockq);
856 MPASS(td->td_proc->p_magic == P_MAGIC);
857 if (TD_ON_LOCK(td)) {
858 td->td_blocked = NULL;
859 td->td_lockname = NULL;
861 td->td_tsqueue = 0xff;
864 MPASS(TD_CAN_RUN(td));
865 setrunqueue(td, SRQ_BORING);
867 td->td_flags |= TDF_TSNOBLOCK;
868 MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td));
872 mtx_unlock_spin(&sched_lock);
876 * Return the first thread in a turnstile.
879 turnstile_head(struct turnstile *ts, int queue)
882 struct turnstile_chain *tc;
885 MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
886 tc = TC_LOOKUP(ts->ts_lockobj);
887 mtx_assert(&tc->tc_lock, MA_OWNED);
889 return (TAILQ_FIRST(&ts->ts_blocked[queue]));
894 print_thread(struct thread *td, const char *prefix)
897 db_printf("%s%p (tid %d, pid %d, \"%s\")\n", prefix, td, td->td_tid,
898 td->td_proc->p_pid, td->td_proc->p_comm);
902 print_queue(struct threadqueue *queue, const char *header, const char *prefix)
906 db_printf("%s:\n", header);
907 if (TAILQ_EMPTY(queue)) {
908 db_printf("%sempty\n", prefix);
911 TAILQ_FOREACH(td, queue, td_lockq) {
912 print_thread(td, prefix);
916 DB_SHOW_COMMAND(turnstile, db_show_turnstile)
918 struct turnstile_chain *tc;
919 struct turnstile *ts;
920 struct lock_object *lock;
927 * First, see if there is an active turnstile for the lock indicated
930 lock = (struct lock_object *)addr;
931 tc = TC_LOOKUP(lock);
932 LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash)
933 if (ts->ts_lockobj == lock)
937 * Second, see if there is an active turnstile at the address
940 for (i = 0; i < TC_TABLESIZE; i++)
941 LIST_FOREACH(ts, &turnstile_chains[i].tc_turnstiles, ts_hash) {
942 if (ts == (struct turnstile *)addr)
946 db_printf("Unable to locate a turnstile via %p\n", (void *)addr);
949 lock = ts->ts_lockobj;
950 db_printf("Lock: %p - (%s) %s\n", lock, LOCK_CLASS(lock)->lc_name,
953 print_thread(ts->ts_owner, "Lock Owner: ");
955 db_printf("Lock Owner: none\n");
956 print_queue(&ts->ts_blocked[TS_SHARED_QUEUE], "Shared Waiters", "\t");
957 print_queue(&ts->ts_blocked[TS_EXCLUSIVE_QUEUE], "Exclusive Waiters",
959 print_queue(&ts->ts_pending, "Pending Threads", "\t");