2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include "opt_adaptive_lockmgrs.h"
31 #include "opt_kdtrace.h"
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
39 #include <sys/lock_profile.h>
40 #include <sys/lockmgr.h>
41 #include <sys/mutex.h>
43 #include <sys/sleepqueue.h>
45 #include <sys/stack.h>
47 #include <sys/sysctl.h>
48 #include <sys/systm.h>
50 #include <machine/cpu.h>
56 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
57 (LK_ADAPTIVE | LK_NOSHARE));
58 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
59 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
61 #define SQ_EXCLUSIVE_QUEUE 0
62 #define SQ_SHARED_QUEUE 1
65 #define _lockmgr_assert(lk, what, file, line)
66 #define TD_LOCKS_INC(td)
67 #define TD_LOCKS_DEC(td)
69 #define TD_LOCKS_INC(td) ((td)->td_locks++)
70 #define TD_LOCKS_DEC(td) ((td)->td_locks--)
72 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
73 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
76 #define STACK_PRINT(lk)
77 #define STACK_SAVE(lk)
78 #define STACK_ZERO(lk)
80 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
81 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
82 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
85 #define LOCK_LOG2(lk, string, arg1, arg2) \
86 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
87 CTR2(KTR_LOCK, (string), (arg1), (arg2))
88 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
89 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
90 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
92 #define GIANT_DECLARE \
94 WITNESS_SAVE_DECL(Giant)
95 #define GIANT_RESTORE() do { \
99 WITNESS_RESTORE(&Giant.lock_object, Giant); \
102 #define GIANT_SAVE() do { \
103 if (mtx_owned(&Giant)) { \
104 WITNESS_SAVE(&Giant.lock_object, Giant); \
105 while (mtx_owned(&Giant)) { \
107 mtx_unlock(&Giant); \
112 #define LK_CAN_SHARE(x) \
113 (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \
114 ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \
115 curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
116 #define LK_TRYOP(x) \
119 #define LK_CAN_WITNESS(x) \
120 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
121 #define LK_TRYWIT(x) \
122 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
124 #define LK_CAN_ADAPT(lk, f) \
125 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
126 ((f) & LK_SLEEPFAIL) == 0)
128 #define lockmgr_disowned(lk) \
129 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
131 #define lockmgr_xlocked(lk) \
132 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
134 static void assert_lockmgr(const struct lock_object *lock, int how);
136 static void db_show_lockmgr(const struct lock_object *lock);
138 static void lock_lockmgr(struct lock_object *lock, int how);
140 static int owner_lockmgr(const struct lock_object *lock,
141 struct thread **owner);
143 static int unlock_lockmgr(struct lock_object *lock);
145 struct lock_class lock_class_lockmgr = {
146 .lc_name = "lockmgr",
147 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
148 .lc_assert = assert_lockmgr,
150 .lc_ddb_show = db_show_lockmgr,
152 .lc_lock = lock_lockmgr,
153 .lc_unlock = unlock_lockmgr,
155 .lc_owner = owner_lockmgr,
159 #ifdef ADAPTIVE_LOCKMGRS
160 static u_int alk_retries = 10;
161 static u_int alk_loops = 10000;
162 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL,
163 "lockmgr debugging");
164 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
165 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
168 static __inline struct thread *
169 lockmgr_xholder(const struct lock *lk)
174 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
178 * It assumes sleepq_lock held and returns with this one unheld.
179 * It also assumes the generic interlock is sane and previously checked.
180 * If LK_INTERLOCK is specified the interlock is not reacquired after the
184 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
185 const char *wmesg, int pri, int timo, int queue)
188 struct lock_class *class;
191 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
192 catch = pri & PCATCH;
196 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
197 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
199 if (flags & LK_INTERLOCK)
200 class->lc_unlock(ilk);
201 if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
204 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
205 SLEEPQ_INTERRUPTIBLE : 0), queue);
206 if ((flags & LK_TIMELOCK) && timo)
207 sleepq_set_timeout(&lk->lock_object, timo);
210 * Decisional switch for real sleeping.
212 if ((flags & LK_TIMELOCK) && timo && catch)
213 error = sleepq_timedwait_sig(&lk->lock_object, pri);
214 else if ((flags & LK_TIMELOCK) && timo)
215 error = sleepq_timedwait(&lk->lock_object, pri);
217 error = sleepq_wait_sig(&lk->lock_object, pri);
219 sleepq_wait(&lk->lock_object, pri);
221 if ((flags & LK_SLEEPFAIL) && error == 0)
228 wakeupshlk(struct lock *lk, const char *file, int line)
232 int queue, wakeup_swapper;
234 TD_LOCKS_DEC(curthread);
235 TD_SLOCKS_DEC(curthread);
236 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
237 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
244 * If there is more than one shared lock held, just drop one
247 if (LK_SHARERS(x) > 1) {
248 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
255 * If there are not waiters on the exclusive queue, drop the
258 if ((x & LK_ALL_WAITERS) == 0) {
259 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
261 if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
267 * We should have a sharer with waiters, so enter the hard
268 * path in order to handle wakeups correctly.
270 sleepq_lock(&lk->lock_object);
271 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
275 * If the lock has exclusive waiters, give them preference in
276 * order to avoid deadlock with shared runners up.
277 * If interruptible sleeps left the exclusive queue empty
278 * avoid a starvation for the threads sleeping on the shared
279 * queue by giving them precedence and cleaning up the
280 * exclusive waiters bit anyway.
281 * Please note that lk_exslpfail count may be lying about
282 * the real number of waiters with the LK_SLEEPFAIL flag on
283 * because they may be used in conjuction with interruptible
284 * sleeps so lk_exslpfail might be considered an 'upper limit'
285 * bound, including the edge cases.
287 realexslp = sleepq_sleepcnt(&lk->lock_object,
289 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
290 if (lk->lk_exslpfail < realexslp) {
291 lk->lk_exslpfail = 0;
292 queue = SQ_EXCLUSIVE_QUEUE;
293 v |= (x & LK_SHARED_WAITERS);
295 lk->lk_exslpfail = 0;
297 "%s: %p has only LK_SLEEPFAIL sleepers",
300 "%s: %p waking up threads on the exclusive queue",
303 sleepq_broadcast(&lk->lock_object,
304 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
305 queue = SQ_SHARED_QUEUE;
311 * Exclusive waiters sleeping with LK_SLEEPFAIL on
312 * and using interruptible sleeps/timeout may have
313 * left spourious lk_exslpfail counts on, so clean
316 lk->lk_exslpfail = 0;
317 queue = SQ_SHARED_QUEUE;
320 if (!atomic_cmpset_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
322 sleepq_release(&lk->lock_object);
325 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
326 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
328 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
330 sleepq_release(&lk->lock_object);
334 lock_profile_release_lock(&lk->lock_object);
335 return (wakeup_swapper);
339 assert_lockmgr(const struct lock_object *lock, int what)
342 panic("lockmgr locks do not support assertions");
346 lock_lockmgr(struct lock_object *lock, int how)
349 panic("lockmgr locks do not support sleep interlocking");
353 unlock_lockmgr(struct lock_object *lock)
356 panic("lockmgr locks do not support sleep interlocking");
361 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
364 panic("lockmgr locks do not support owner inquiring");
369 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
373 MPASS((flags & ~LK_INIT_MASK) == 0);
374 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
375 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
378 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
379 if (flags & LK_CANRECURSE)
380 iflags |= LO_RECURSABLE;
381 if ((flags & LK_NODUP) == 0)
383 if (flags & LK_NOPROFILE)
384 iflags |= LO_NOPROFILE;
385 if ((flags & LK_NOWITNESS) == 0)
386 iflags |= LO_WITNESS;
387 if (flags & LK_QUIET)
389 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
391 lk->lk_lock = LK_UNLOCKED;
393 lk->lk_exslpfail = 0;
396 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
401 * XXX: Gross hacks to manipulate external lock flags after
402 * initialization. Used for certain vnode and buf locks.
405 lockallowshare(struct lock *lk)
408 lockmgr_assert(lk, KA_XLOCKED);
409 lk->lock_object.lo_flags &= ~LK_NOSHARE;
413 lockallowrecurse(struct lock *lk)
416 lockmgr_assert(lk, KA_XLOCKED);
417 lk->lock_object.lo_flags |= LO_RECURSABLE;
421 lockdisablerecurse(struct lock *lk)
424 lockmgr_assert(lk, KA_XLOCKED);
425 lk->lock_object.lo_flags &= ~LO_RECURSABLE;
429 lockdestroy(struct lock *lk)
432 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
433 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
434 KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
435 lock_destroy(&lk->lock_object);
439 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
440 const char *wmesg, int pri, int timo, const char *file, int line)
443 struct lock_class *class;
447 int error, ipri, itimo, queue, wakeup_swapper;
448 #ifdef LOCK_PROFILING
449 uint64_t waittime = 0;
452 #ifdef ADAPTIVE_LOCKMGRS
453 volatile struct thread *owner;
454 u_int i, spintries = 0;
458 tid = (uintptr_t)curthread;
459 op = (flags & LK_TYPE_MASK);
460 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
461 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
462 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
464 MPASS((flags & ~LK_TOTAL_MASK) == 0);
465 KASSERT((op & (op - 1)) == 0,
466 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
467 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
468 (op != LK_DOWNGRADE && op != LK_RELEASE),
469 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
470 __func__, file, line));
471 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
472 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
473 __func__, file, line));
475 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
476 if (panicstr != NULL) {
477 if (flags & LK_INTERLOCK)
478 class->lc_unlock(ilk);
482 if (lk->lock_object.lo_flags & LK_NOSHARE) {
489 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
498 if (LK_CAN_WITNESS(flags))
499 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
505 * If no other thread has an exclusive lock, or
506 * no exclusive waiter is present, bump the count of
507 * sharers. Since we have to preserve the state of
508 * waiters, if we fail to acquire the shared lock
509 * loop back and retry.
511 if (LK_CAN_SHARE(x)) {
512 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
517 lock_profile_obtain_lock_failed(&lk->lock_object,
518 &contested, &waittime);
521 * If the lock is already held by curthread in
522 * exclusive way avoid a deadlock.
524 if (LK_HOLDER(x) == tid) {
526 "%s: %p already held in exclusive mode",
533 * If the lock is expected to not sleep just give up
536 if (LK_TRYOP(flags)) {
537 LOCK_LOG2(lk, "%s: %p fails the try operation",
543 #ifdef ADAPTIVE_LOCKMGRS
545 * If the owner is running on another CPU, spin until
546 * the owner stops running or the state of the lock
547 * changes. We need a double-state handle here
548 * because for a failed acquisition the lock can be
549 * either held in exclusive mode or shared mode
550 * (for the writer starvation avoidance technique).
552 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
553 LK_HOLDER(x) != LK_KERNPROC) {
554 owner = (struct thread *)LK_HOLDER(x);
555 if (LOCK_LOG_TEST(&lk->lock_object, 0))
557 "%s: spinning on %p held by %p",
558 __func__, lk, owner);
561 * If we are holding also an interlock drop it
562 * in order to avoid a deadlock if the lockmgr
563 * owner is adaptively spinning on the
566 if (flags & LK_INTERLOCK) {
567 class->lc_unlock(ilk);
568 flags &= ~LK_INTERLOCK;
571 while (LK_HOLDER(lk->lk_lock) ==
572 (uintptr_t)owner && TD_IS_RUNNING(owner))
576 } else if (LK_CAN_ADAPT(lk, flags) &&
577 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
578 spintries < alk_retries) {
579 if (flags & LK_INTERLOCK) {
580 class->lc_unlock(ilk);
581 flags &= ~LK_INTERLOCK;
585 for (i = 0; i < alk_loops; i++) {
586 if (LOCK_LOG_TEST(&lk->lock_object, 0))
588 "%s: shared spinning on %p with %u and %u",
589 __func__, lk, spintries, i);
591 if ((x & LK_SHARE) == 0 ||
592 LK_CAN_SHARE(x) != 0)
603 * Acquire the sleepqueue chain lock because we
604 * probabilly will need to manipulate waiters flags.
606 sleepq_lock(&lk->lock_object);
610 * if the lock can be acquired in shared mode, try
613 if (LK_CAN_SHARE(x)) {
614 sleepq_release(&lk->lock_object);
618 #ifdef ADAPTIVE_LOCKMGRS
620 * The current lock owner might have started executing
621 * on another CPU (or the lock could have changed
622 * owner) while we were waiting on the turnstile
623 * chain lock. If so, drop the turnstile lock and try
626 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
627 LK_HOLDER(x) != LK_KERNPROC) {
628 owner = (struct thread *)LK_HOLDER(x);
629 if (TD_IS_RUNNING(owner)) {
630 sleepq_release(&lk->lock_object);
637 * Try to set the LK_SHARED_WAITERS flag. If we fail,
638 * loop back and retry.
640 if ((x & LK_SHARED_WAITERS) == 0) {
641 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
642 x | LK_SHARED_WAITERS)) {
643 sleepq_release(&lk->lock_object);
646 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
651 * As far as we have been unable to acquire the
652 * shared lock and the shared waiters flag is set,
655 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
657 flags &= ~LK_INTERLOCK;
660 "%s: interrupted sleep for %p with %d",
661 __func__, lk, error);
664 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
668 lock_profile_obtain_lock_success(&lk->lock_object,
669 contested, waittime, file, line);
670 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
672 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
674 TD_LOCKS_INC(curthread);
675 TD_SLOCKS_INC(curthread);
680 _lockmgr_assert(lk, KA_SLOCKED, file, line);
682 x = v & LK_ALL_WAITERS;
683 v &= LK_EXCLUSIVE_SPINNERS;
686 * Try to switch from one shared lock to an exclusive one.
687 * We need to preserve waiters flags during the operation.
689 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
691 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
693 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
694 LK_TRYWIT(flags), file, line);
695 TD_SLOCKS_DEC(curthread);
700 * We have been unable to succeed in upgrading, so just
701 * give up the shared lock.
703 wakeup_swapper |= wakeupshlk(lk, file, line);
707 if (LK_CAN_WITNESS(flags))
708 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
709 LOP_EXCLUSIVE, file, line, ilk);
712 * If curthread already holds the lock and this one is
713 * allowed to recurse, simply recurse on it.
715 if (lockmgr_xlocked(lk)) {
716 if ((flags & LK_CANRECURSE) == 0 &&
717 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
720 * If the lock is expected to not panic just
721 * give up and return.
723 if (LK_TRYOP(flags)) {
725 "%s: %p fails the try operation",
730 if (flags & LK_INTERLOCK)
731 class->lc_unlock(ilk);
732 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
733 __func__, iwmesg, file, line);
736 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
737 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
738 lk->lk_recurse, file, line);
739 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
740 LK_TRYWIT(flags), file, line);
741 TD_LOCKS_INC(curthread);
745 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
747 lock_profile_obtain_lock_failed(&lk->lock_object,
748 &contested, &waittime);
751 * If the lock is expected to not sleep just give up
754 if (LK_TRYOP(flags)) {
755 LOCK_LOG2(lk, "%s: %p fails the try operation",
761 #ifdef ADAPTIVE_LOCKMGRS
763 * If the owner is running on another CPU, spin until
764 * the owner stops running or the state of the lock
768 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
769 LK_HOLDER(x) != LK_KERNPROC) {
770 owner = (struct thread *)LK_HOLDER(x);
771 if (LOCK_LOG_TEST(&lk->lock_object, 0))
773 "%s: spinning on %p held by %p",
774 __func__, lk, owner);
777 * If we are holding also an interlock drop it
778 * in order to avoid a deadlock if the lockmgr
779 * owner is adaptively spinning on the
782 if (flags & LK_INTERLOCK) {
783 class->lc_unlock(ilk);
784 flags &= ~LK_INTERLOCK;
787 while (LK_HOLDER(lk->lk_lock) ==
788 (uintptr_t)owner && TD_IS_RUNNING(owner))
792 } else if (LK_CAN_ADAPT(lk, flags) &&
793 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
794 spintries < alk_retries) {
795 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
796 !atomic_cmpset_ptr(&lk->lk_lock, x,
797 x | LK_EXCLUSIVE_SPINNERS))
799 if (flags & LK_INTERLOCK) {
800 class->lc_unlock(ilk);
801 flags &= ~LK_INTERLOCK;
805 for (i = 0; i < alk_loops; i++) {
806 if (LOCK_LOG_TEST(&lk->lock_object, 0))
808 "%s: shared spinning on %p with %u and %u",
809 __func__, lk, spintries, i);
811 LK_EXCLUSIVE_SPINNERS) == 0)
822 * Acquire the sleepqueue chain lock because we
823 * probabilly will need to manipulate waiters flags.
825 sleepq_lock(&lk->lock_object);
829 * if the lock has been released while we spun on
830 * the sleepqueue chain lock just try again.
832 if (x == LK_UNLOCKED) {
833 sleepq_release(&lk->lock_object);
837 #ifdef ADAPTIVE_LOCKMGRS
839 * The current lock owner might have started executing
840 * on another CPU (or the lock could have changed
841 * owner) while we were waiting on the turnstile
842 * chain lock. If so, drop the turnstile lock and try
845 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
846 LK_HOLDER(x) != LK_KERNPROC) {
847 owner = (struct thread *)LK_HOLDER(x);
848 if (TD_IS_RUNNING(owner)) {
849 sleepq_release(&lk->lock_object);
856 * The lock can be in the state where there is a
857 * pending queue of waiters, but still no owner.
858 * This happens when the lock is contested and an
859 * owner is going to claim the lock.
860 * If curthread is the one successfully acquiring it
861 * claim lock ownership and return, preserving waiters
864 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
865 if ((x & ~v) == LK_UNLOCKED) {
866 v &= ~LK_EXCLUSIVE_SPINNERS;
867 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
869 sleepq_release(&lk->lock_object);
871 "%s: %p claimed by a new writer",
875 sleepq_release(&lk->lock_object);
880 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
881 * fail, loop back and retry.
883 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
884 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
885 x | LK_EXCLUSIVE_WAITERS)) {
886 sleepq_release(&lk->lock_object);
889 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
894 * As far as we have been unable to acquire the
895 * exclusive lock and the exclusive waiters flag
896 * is set, we will sleep.
898 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
900 flags &= ~LK_INTERLOCK;
903 "%s: interrupted sleep for %p with %d",
904 __func__, lk, error);
907 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
911 lock_profile_obtain_lock_success(&lk->lock_object,
912 contested, waittime, file, line);
913 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
914 lk->lk_recurse, file, line);
915 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
916 LK_TRYWIT(flags), file, line);
917 TD_LOCKS_INC(curthread);
922 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
923 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
924 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
925 TD_SLOCKS_INC(curthread);
928 * In order to preserve waiters flags, just spin.
932 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
934 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
935 LK_SHARERS_LOCK(1) | x))
941 _lockmgr_assert(lk, KA_LOCKED, file, line);
944 if ((x & LK_SHARE) == 0) {
947 * As first option, treact the lock as if it has not
949 * Fix-up the tid var if the lock has been disowned.
951 if (LK_HOLDER(x) == LK_KERNPROC)
954 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
956 TD_LOCKS_DEC(curthread);
958 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
959 lk->lk_recurse, file, line);
962 * The lock is held in exclusive mode.
963 * If the lock is recursed also, then unrecurse it.
965 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
966 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
971 if (tid != LK_KERNPROC)
972 lock_profile_release_lock(&lk->lock_object);
974 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
978 sleepq_lock(&lk->lock_object);
983 * If the lock has exclusive waiters, give them
984 * preference in order to avoid deadlock with
986 * If interruptible sleeps left the exclusive queue
987 * empty avoid a starvation for the threads sleeping
988 * on the shared queue by giving them precedence
989 * and cleaning up the exclusive waiters bit anyway.
990 * Please note that lk_exslpfail count may be lying
991 * about the real number of waiters with the
992 * LK_SLEEPFAIL flag on because they may be used in
993 * conjuction with interruptible sleeps so
994 * lk_exslpfail might be considered an 'upper limit'
995 * bound, including the edge cases.
997 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
998 realexslp = sleepq_sleepcnt(&lk->lock_object,
1000 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1001 if (lk->lk_exslpfail < realexslp) {
1002 lk->lk_exslpfail = 0;
1003 queue = SQ_EXCLUSIVE_QUEUE;
1004 v |= (x & LK_SHARED_WAITERS);
1006 lk->lk_exslpfail = 0;
1008 "%s: %p has only LK_SLEEPFAIL sleepers",
1011 "%s: %p waking up threads on the exclusive queue",
1014 sleepq_broadcast(&lk->lock_object,
1015 SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1016 queue = SQ_SHARED_QUEUE;
1021 * Exclusive waiters sleeping with LK_SLEEPFAIL
1022 * on and using interruptible sleeps/timeout
1023 * may have left spourious lk_exslpfail counts
1024 * on, so clean it up anyway.
1026 lk->lk_exslpfail = 0;
1027 queue = SQ_SHARED_QUEUE;
1031 "%s: %p waking up threads on the %s queue",
1032 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1034 atomic_store_rel_ptr(&lk->lk_lock, v);
1035 wakeup_swapper |= sleepq_broadcast(&lk->lock_object,
1036 SLEEPQ_LK, 0, queue);
1037 sleepq_release(&lk->lock_object);
1040 wakeup_swapper = wakeupshlk(lk, file, line);
1043 if (LK_CAN_WITNESS(flags))
1044 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1045 LOP_EXCLUSIVE, file, line, ilk);
1048 * Trying to drain a lock we already own will result in a
1051 if (lockmgr_xlocked(lk)) {
1052 if (flags & LK_INTERLOCK)
1053 class->lc_unlock(ilk);
1054 panic("%s: draining %s with the lock held @ %s:%d\n",
1055 __func__, iwmesg, file, line);
1058 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1059 lock_profile_obtain_lock_failed(&lk->lock_object,
1060 &contested, &waittime);
1063 * If the lock is expected to not sleep just give up
1066 if (LK_TRYOP(flags)) {
1067 LOCK_LOG2(lk, "%s: %p fails the try operation",
1074 * Acquire the sleepqueue chain lock because we
1075 * probabilly will need to manipulate waiters flags.
1077 sleepq_lock(&lk->lock_object);
1081 * if the lock has been released while we spun on
1082 * the sleepqueue chain lock just try again.
1084 if (x == LK_UNLOCKED) {
1085 sleepq_release(&lk->lock_object);
1089 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1090 if ((x & ~v) == LK_UNLOCKED) {
1091 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1094 * If interruptible sleeps left the exclusive
1095 * queue empty avoid a starvation for the
1096 * threads sleeping on the shared queue by
1097 * giving them precedence and cleaning up the
1098 * exclusive waiters bit anyway.
1099 * Please note that lk_exslpfail count may be
1100 * lying about the real number of waiters with
1101 * the LK_SLEEPFAIL flag on because they may
1102 * be used in conjuction with interruptible
1103 * sleeps so lk_exslpfail might be considered
1104 * an 'upper limit' bound, including the edge
1107 if (v & LK_EXCLUSIVE_WAITERS) {
1108 queue = SQ_EXCLUSIVE_QUEUE;
1109 v &= ~LK_EXCLUSIVE_WAITERS;
1113 * Exclusive waiters sleeping with
1114 * LK_SLEEPFAIL on and using
1115 * interruptible sleeps/timeout may
1116 * have left spourious lk_exslpfail
1117 * counts on, so clean it up anyway.
1119 MPASS(v & LK_SHARED_WAITERS);
1120 lk->lk_exslpfail = 0;
1121 queue = SQ_SHARED_QUEUE;
1122 v &= ~LK_SHARED_WAITERS;
1124 if (queue == SQ_EXCLUSIVE_QUEUE) {
1126 sleepq_sleepcnt(&lk->lock_object,
1127 SQ_EXCLUSIVE_QUEUE);
1128 if (lk->lk_exslpfail >= realexslp) {
1129 lk->lk_exslpfail = 0;
1130 queue = SQ_SHARED_QUEUE;
1131 v &= ~LK_SHARED_WAITERS;
1132 if (realexslp != 0) {
1134 "%s: %p has only LK_SLEEPFAIL sleepers",
1137 "%s: %p waking up threads on the exclusive queue",
1143 SQ_EXCLUSIVE_QUEUE);
1146 lk->lk_exslpfail = 0;
1148 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1149 sleepq_release(&lk->lock_object);
1153 "%s: %p waking up all threads on the %s queue",
1154 __func__, lk, queue == SQ_SHARED_QUEUE ?
1155 "shared" : "exclusive");
1156 wakeup_swapper |= sleepq_broadcast(
1157 &lk->lock_object, SLEEPQ_LK, 0, queue);
1160 * If shared waiters have been woken up we need
1161 * to wait for one of them to acquire the lock
1162 * before to set the exclusive waiters in
1163 * order to avoid a deadlock.
1165 if (queue == SQ_SHARED_QUEUE) {
1166 for (v = lk->lk_lock;
1167 (v & LK_SHARE) && !LK_SHARERS(v);
1174 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1175 * fail, loop back and retry.
1177 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1178 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1179 x | LK_EXCLUSIVE_WAITERS)) {
1180 sleepq_release(&lk->lock_object);
1183 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1188 * As far as we have been unable to acquire the
1189 * exclusive lock and the exclusive waiters flag
1190 * is set, we will sleep.
1192 if (flags & LK_INTERLOCK) {
1193 class->lc_unlock(ilk);
1194 flags &= ~LK_INTERLOCK;
1197 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1198 SQ_EXCLUSIVE_QUEUE);
1199 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1201 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1206 lock_profile_obtain_lock_success(&lk->lock_object,
1207 contested, waittime, file, line);
1208 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1209 lk->lk_recurse, file, line);
1210 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1211 LK_TRYWIT(flags), file, line);
1212 TD_LOCKS_INC(curthread);
1217 if (flags & LK_INTERLOCK)
1218 class->lc_unlock(ilk);
1219 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1222 if (flags & LK_INTERLOCK)
1223 class->lc_unlock(ilk);
1231 _lockmgr_disown(struct lock *lk, const char *file, int line)
1235 if (SCHEDULER_STOPPED())
1238 tid = (uintptr_t)curthread;
1239 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1242 * If the owner is already LK_KERNPROC just skip the whole operation.
1244 if (LK_HOLDER(lk->lk_lock) != tid)
1246 lock_profile_release_lock(&lk->lock_object);
1247 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1248 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1249 TD_LOCKS_DEC(curthread);
1253 * In order to preserve waiters flags, just spin.
1257 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1258 x &= LK_ALL_WAITERS;
1259 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1267 lockmgr_printinfo(const struct lock *lk)
1272 if (lk->lk_lock == LK_UNLOCKED)
1273 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1274 else if (lk->lk_lock & LK_SHARE)
1275 printf("lock type %s: SHARED (count %ju)\n",
1276 lk->lock_object.lo_name,
1277 (uintmax_t)LK_SHARERS(lk->lk_lock));
1279 td = lockmgr_xholder(lk);
1280 printf("lock type %s: EXCL by thread %p "
1281 "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name, td,
1282 td->td_proc->p_pid, td->td_proc->p_comm, td->td_tid);
1286 if (x & LK_EXCLUSIVE_WAITERS)
1287 printf(" with exclusive waiters pending\n");
1288 if (x & LK_SHARED_WAITERS)
1289 printf(" with shared waiters pending\n");
1290 if (x & LK_EXCLUSIVE_SPINNERS)
1291 printf(" with exclusive spinners pending\n");
1297 lockstatus(const struct lock *lk)
1306 if ((x & LK_SHARE) == 0) {
1307 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1311 } else if (x == LK_UNLOCKED)
1317 #ifdef INVARIANT_SUPPORT
1319 FEATURE(invariant_support,
1320 "Support for modules compiled with INVARIANTS option");
1323 #undef _lockmgr_assert
1327 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1331 if (panicstr != NULL)
1335 case KA_SLOCKED | KA_NOTRECURSED:
1336 case KA_SLOCKED | KA_RECURSED:
1339 case KA_LOCKED | KA_NOTRECURSED:
1340 case KA_LOCKED | KA_RECURSED:
1344 * We cannot trust WITNESS if the lock is held in exclusive
1345 * mode and a call to lockmgr_disown() happened.
1346 * Workaround this skipping the check if the lock is held in
1347 * exclusive mode even for the KA_LOCKED case.
1349 if (slocked || (lk->lk_lock & LK_SHARE)) {
1350 witness_assert(&lk->lock_object, what, file, line);
1354 if (lk->lk_lock == LK_UNLOCKED ||
1355 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1356 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1357 panic("Lock %s not %slocked @ %s:%d\n",
1358 lk->lock_object.lo_name, slocked ? "share" : "",
1361 if ((lk->lk_lock & LK_SHARE) == 0) {
1362 if (lockmgr_recursed(lk)) {
1363 if (what & KA_NOTRECURSED)
1364 panic("Lock %s recursed @ %s:%d\n",
1365 lk->lock_object.lo_name, file,
1367 } else if (what & KA_RECURSED)
1368 panic("Lock %s not recursed @ %s:%d\n",
1369 lk->lock_object.lo_name, file, line);
1373 case KA_XLOCKED | KA_NOTRECURSED:
1374 case KA_XLOCKED | KA_RECURSED:
1375 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1376 panic("Lock %s not exclusively locked @ %s:%d\n",
1377 lk->lock_object.lo_name, file, line);
1378 if (lockmgr_recursed(lk)) {
1379 if (what & KA_NOTRECURSED)
1380 panic("Lock %s recursed @ %s:%d\n",
1381 lk->lock_object.lo_name, file, line);
1382 } else if (what & KA_RECURSED)
1383 panic("Lock %s not recursed @ %s:%d\n",
1384 lk->lock_object.lo_name, file, line);
1387 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1388 panic("Lock %s exclusively locked @ %s:%d\n",
1389 lk->lock_object.lo_name, file, line);
1392 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1400 lockmgr_chain(struct thread *td, struct thread **ownerp)
1406 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1408 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1409 if (lk->lk_lock & LK_SHARE)
1410 db_printf("SHARED (count %ju)\n",
1411 (uintmax_t)LK_SHARERS(lk->lk_lock));
1413 db_printf("EXCL\n");
1414 *ownerp = lockmgr_xholder(lk);
1420 db_show_lockmgr(const struct lock_object *lock)
1423 const struct lock *lk;
1425 lk = (const struct lock *)lock;
1427 db_printf(" state: ");
1428 if (lk->lk_lock == LK_UNLOCKED)
1429 db_printf("UNLOCKED\n");
1430 else if (lk->lk_lock & LK_SHARE)
1431 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1433 td = lockmgr_xholder(lk);
1434 if (td == (struct thread *)LK_KERNPROC)
1435 db_printf("XLOCK: LK_KERNPROC\n");
1437 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1438 td->td_tid, td->td_proc->p_pid,
1439 td->td_proc->p_comm);
1440 if (lockmgr_recursed(lk))
1441 db_printf(" recursed: %d\n", lk->lk_recurse);
1443 db_printf(" waiters: ");
1444 switch (lk->lk_lock & LK_ALL_WAITERS) {
1445 case LK_SHARED_WAITERS:
1446 db_printf("shared\n");
1448 case LK_EXCLUSIVE_WAITERS:
1449 db_printf("exclusive\n");
1451 case LK_ALL_WAITERS:
1452 db_printf("shared and exclusive\n");
1455 db_printf("none\n");
1457 db_printf(" spinners: ");
1458 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1459 db_printf("exclusive\n");
1461 db_printf("none\n");