2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
37 #include <sys/lock_profile.h>
38 #include <sys/lockmgr.h>
39 #include <sys/mutex.h>
41 #include <sys/sleepqueue.h>
43 #include <sys/stack.h>
45 #include <sys/systm.h>
47 #include <machine/cpu.h>
53 CTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) ==
54 (LK_CANRECURSE | LK_NOSHARE));
56 #define SQ_EXCLUSIVE_QUEUE 0
57 #define SQ_SHARED_QUEUE 1
60 #define _lockmgr_assert(lk, what, file, line)
61 #define TD_LOCKS_INC(td)
62 #define TD_LOCKS_DEC(td)
64 #define TD_LOCKS_INC(td) ((td)->td_locks++)
65 #define TD_LOCKS_DEC(td) ((td)->td_locks--)
67 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
68 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
71 #define STACK_PRINT(lk)
72 #define STACK_SAVE(lk)
73 #define STACK_ZERO(lk)
75 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
76 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
77 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
80 #define LOCK_LOG2(lk, string, arg1, arg2) \
81 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
82 CTR2(KTR_LOCK, (string), (arg1), (arg2))
83 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
84 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
85 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
87 #define GIANT_DECLARE \
89 WITNESS_SAVE_DECL(Giant)
90 #define GIANT_RESTORE() do { \
94 WITNESS_RESTORE(&Giant.lock_object, Giant); \
97 #define GIANT_SAVE() do { \
98 if (mtx_owned(&Giant)) { \
99 WITNESS_SAVE(&Giant.lock_object, Giant); \
100 while (mtx_owned(&Giant)) { \
102 mtx_unlock(&Giant); \
107 #define LK_CAN_SHARE(x) \
108 (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \
109 curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
110 #define LK_TRYOP(x) \
113 #define LK_CAN_WITNESS(x) \
114 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
115 #define LK_TRYWIT(x) \
116 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
118 #define lockmgr_disowned(lk) \
119 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
121 #define lockmgr_xlocked(lk) \
122 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
124 static void assert_lockmgr(struct lock_object *lock, int how);
126 static void db_show_lockmgr(struct lock_object *lock);
128 static void lock_lockmgr(struct lock_object *lock, int how);
129 static int unlock_lockmgr(struct lock_object *lock);
131 struct lock_class lock_class_lockmgr = {
132 .lc_name = "lockmgr",
133 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
134 .lc_assert = assert_lockmgr,
136 .lc_ddb_show = db_show_lockmgr,
138 .lc_lock = lock_lockmgr,
139 .lc_unlock = unlock_lockmgr
142 static __inline struct thread *
143 lockmgr_xholder(struct lock *lk)
148 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
152 * It assumes sleepq_lock held and returns with this one unheld.
153 * It also assumes the generic interlock is sane and previously checked.
154 * If LK_INTERLOCK is specified the interlock is not reacquired after the
158 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
159 const char *wmesg, int pri, int timo, int queue)
162 struct lock_class *class;
165 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
166 catch = pri & PCATCH;
170 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
171 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
173 if (flags & LK_INTERLOCK)
174 class->lc_unlock(ilk);
176 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
177 SLEEPQ_INTERRUPTIBLE : 0), queue);
178 if ((flags & LK_TIMELOCK) && timo)
179 sleepq_set_timeout(&lk->lock_object, timo);
182 * Decisional switch for real sleeping.
184 if ((flags & LK_TIMELOCK) && timo && catch)
185 error = sleepq_timedwait_sig(&lk->lock_object, pri);
186 else if ((flags & LK_TIMELOCK) && timo)
187 error = sleepq_timedwait(&lk->lock_object, pri);
189 error = sleepq_wait_sig(&lk->lock_object, pri);
191 sleepq_wait(&lk->lock_object, pri);
193 if ((flags & LK_SLEEPFAIL) && error == 0)
200 wakeupshlk(struct lock *lk, const char *file, int line)
205 TD_LOCKS_DEC(curthread);
206 TD_SLOCKS_DEC(curthread);
207 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
208 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
214 * If there is more than one shared lock held, just drop one
217 if (LK_SHARERS(x) > 1) {
218 if (atomic_cmpset_ptr(&lk->lk_lock, x,
225 * If there are not waiters on the exclusive queue, drop the
228 if ((x & LK_ALL_WAITERS) == 0) {
229 MPASS(x == LK_SHARERS_LOCK(1));
230 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1),
237 * We should have a sharer with waiters, so enter the hard
238 * path in order to handle wakeups correctly.
240 sleepq_lock(&lk->lock_object);
241 x = lk->lk_lock & LK_ALL_WAITERS;
245 * If the lock has exclusive waiters, give them preference in
246 * order to avoid deadlock with shared runners up.
248 if (x & LK_EXCLUSIVE_WAITERS) {
249 queue = SQ_EXCLUSIVE_QUEUE;
250 v |= (x & LK_SHARED_WAITERS);
252 MPASS(x == LK_SHARED_WAITERS);
253 queue = SQ_SHARED_QUEUE;
256 if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
258 sleepq_release(&lk->lock_object);
261 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
262 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
264 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
265 sleepq_release(&lk->lock_object);
269 lock_profile_release_lock(&lk->lock_object);
273 assert_lockmgr(struct lock_object *lock, int what)
276 panic("lockmgr locks do not support assertions");
280 lock_lockmgr(struct lock_object *lock, int how)
283 panic("lockmgr locks do not support sleep interlocking");
287 unlock_lockmgr(struct lock_object *lock)
290 panic("lockmgr locks do not support sleep interlocking");
294 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
298 MPASS((flags & ~LK_INIT_MASK) == 0);
300 iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE;
301 if ((flags & LK_NODUP) == 0)
303 if (flags & LK_NOPROFILE)
304 iflags |= LO_NOPROFILE;
305 if ((flags & LK_NOWITNESS) == 0)
306 iflags |= LO_WITNESS;
307 if (flags & LK_QUIET)
309 iflags |= flags & (LK_CANRECURSE | LK_NOSHARE);
311 lk->lk_lock = LK_UNLOCKED;
315 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
320 lockdestroy(struct lock *lk)
323 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
324 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
325 lock_destroy(&lk->lock_object);
329 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
330 const char *wmesg, int pri, int timo, const char *file, int line)
334 struct lock_class *class;
338 int contested, error, ipri, itimo, queue;
343 tid = (uintptr_t)curthread;
344 op = (flags & LK_TYPE_MASK);
345 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
346 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
347 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
349 MPASS((flags & ~LK_TOTAL_MASK) == 0);
350 KASSERT((op & (op - 1)) == 0,
351 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
352 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
353 (op != LK_DOWNGRADE && op != LK_RELEASE),
354 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
355 __func__, file, line));
356 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
357 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
358 __func__, file, line));
360 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
361 if (panicstr != NULL) {
362 if (flags & LK_INTERLOCK)
363 class->lc_unlock(ilk);
367 if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
372 if (LK_CAN_WITNESS(flags))
373 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
379 * If no other thread has an exclusive lock, or
380 * no exclusive waiter is present, bump the count of
381 * sharers. Since we have to preserve the state of
382 * waiters, if we fail to acquire the shared lock
383 * loop back and retry.
385 if (LK_CAN_SHARE(x)) {
386 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
391 lock_profile_obtain_lock_failed(&lk->lock_object,
392 &contested, &waittime);
395 * If the lock is alredy held by curthread in
396 * exclusive way avoid a deadlock.
398 if (LK_HOLDER(x) == tid) {
400 "%s: %p alredy held in exclusive mode",
407 * If the lock is expected to not sleep just give up
410 if (LK_TRYOP(flags)) {
411 LOCK_LOG2(lk, "%s: %p fails the try operation",
418 * Acquire the sleepqueue chain lock because we
419 * probabilly will need to manipulate waiters flags.
421 sleepq_lock(&lk->lock_object);
425 * if the lock can be acquired in shared mode, try
428 if (LK_CAN_SHARE(x)) {
429 sleepq_release(&lk->lock_object);
434 * Try to set the LK_SHARED_WAITERS flag. If we fail,
435 * loop back and retry.
437 if ((x & LK_SHARED_WAITERS) == 0) {
438 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
439 x | LK_SHARED_WAITERS)) {
440 sleepq_release(&lk->lock_object);
443 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
448 * As far as we have been unable to acquire the
449 * shared lock and the shared waiters flag is set,
452 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
454 flags &= ~LK_INTERLOCK;
457 "%s: interrupted sleep for %p with %d",
458 __func__, lk, error);
461 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
465 lock_profile_obtain_lock_success(&lk->lock_object,
466 contested, waittime, file, line);
467 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
469 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
471 TD_LOCKS_INC(curthread);
472 TD_SLOCKS_INC(curthread);
477 _lockmgr_assert(lk, KA_SLOCKED, file, line);
478 x = lk->lk_lock & LK_ALL_WAITERS;
481 * Try to switch from one shared lock to an exclusive one.
482 * We need to preserve waiters flags during the operation.
484 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
486 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
488 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
489 LK_TRYWIT(flags), file, line);
490 TD_SLOCKS_DEC(curthread);
495 * We have been unable to succeed in upgrading, so just
496 * give up the shared lock.
498 wakeupshlk(lk, file, line);
502 if (LK_CAN_WITNESS(flags))
503 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
504 LOP_EXCLUSIVE, file, line);
507 * If curthread alredy holds the lock and this one is
508 * allowed to recurse, simply recurse on it.
510 if (lockmgr_xlocked(lk)) {
511 if ((flags & LK_CANRECURSE) == 0 &&
512 (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) {
515 * If the lock is expected to not panic just
516 * give up and return.
518 if (LK_TRYOP(flags)) {
520 "%s: %p fails the try operation",
525 if (flags & LK_INTERLOCK)
526 class->lc_unlock(ilk);
527 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
528 __func__, iwmesg, file, line);
531 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
532 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
533 lk->lk_recurse, file, line);
534 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
535 LK_TRYWIT(flags), file, line);
536 TD_LOCKS_INC(curthread);
540 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
542 lock_profile_obtain_lock_failed(&lk->lock_object,
543 &contested, &waittime);
546 * If the lock is expected to not sleep just give up
549 if (LK_TRYOP(flags)) {
550 LOCK_LOG2(lk, "%s: %p fails the try operation",
557 * Acquire the sleepqueue chain lock because we
558 * probabilly will need to manipulate waiters flags.
560 sleepq_lock(&lk->lock_object);
562 v = x & LK_ALL_WAITERS;
565 * if the lock has been released while we spun on
566 * the sleepqueue chain lock just try again.
568 if (x == LK_UNLOCKED) {
569 sleepq_release(&lk->lock_object);
574 * The lock can be in the state where there is a
575 * pending queue of waiters, but still no owner.
576 * This happens when the lock is contested and an
577 * owner is going to claim the lock.
578 * If curthread is the one successfully acquiring it
579 * claim lock ownership and return, preserving waiters
582 if (x == (LK_UNLOCKED | v)) {
583 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
585 sleepq_release(&lk->lock_object);
587 "%s: %p claimed by a new writer",
591 sleepq_release(&lk->lock_object);
596 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
597 * fail, loop back and retry.
599 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
600 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
601 x | LK_EXCLUSIVE_WAITERS)) {
602 sleepq_release(&lk->lock_object);
605 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
610 * As far as we have been unable to acquire the
611 * exclusive lock and the exclusive waiters flag
612 * is set, we will sleep.
614 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
616 flags &= ~LK_INTERLOCK;
619 "%s: interrupted sleep for %p with %d",
620 __func__, lk, error);
623 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
627 lock_profile_obtain_lock_success(&lk->lock_object,
628 contested, waittime, file, line);
629 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
630 lk->lk_recurse, file, line);
631 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
632 LK_TRYWIT(flags), file, line);
633 TD_LOCKS_INC(curthread);
638 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
639 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
640 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
641 TD_SLOCKS_INC(curthread);
644 * In order to preserve waiters flags, just spin.
647 x = lk->lk_lock & LK_ALL_WAITERS;
648 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
649 LK_SHARERS_LOCK(1) | x))
655 _lockmgr_assert(lk, KA_LOCKED, file, line);
658 if ((x & LK_SHARE) == 0) {
661 * As first option, treact the lock as if it has not
663 * Fix-up the tid var if the lock has been disowned.
665 if (LK_HOLDER(x) == LK_KERNPROC)
668 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
670 TD_LOCKS_DEC(curthread);
672 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
673 lk->lk_recurse, file, line);
676 * The lock is held in exclusive mode.
677 * If the lock is recursed also, then unrecurse it.
679 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
680 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
685 lock_profile_release_lock(&lk->lock_object);
687 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
691 sleepq_lock(&lk->lock_object);
692 x = lk->lk_lock & LK_ALL_WAITERS;
696 * If the lock has exclusive waiters, give them
697 * preference in order to avoid deadlock with
700 if (x & LK_EXCLUSIVE_WAITERS) {
701 queue = SQ_EXCLUSIVE_QUEUE;
702 v |= (x & LK_SHARED_WAITERS);
704 MPASS(x == LK_SHARED_WAITERS);
705 queue = SQ_SHARED_QUEUE;
709 "%s: %p waking up threads on the %s queue",
710 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
712 atomic_store_rel_ptr(&lk->lk_lock, v);
713 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
714 sleepq_release(&lk->lock_object);
717 wakeupshlk(lk, file, line);
720 if (LK_CAN_WITNESS(flags))
721 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
722 LOP_EXCLUSIVE, file, line);
725 * Trying to drain a lock we alredy own will result in a
728 if (lockmgr_xlocked(lk)) {
729 if (flags & LK_INTERLOCK)
730 class->lc_unlock(ilk);
731 panic("%s: draining %s with the lock held @ %s:%d\n",
732 __func__, iwmesg, file, line);
735 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
736 lock_profile_obtain_lock_failed(&lk->lock_object,
737 &contested, &waittime);
740 * If the lock is expected to not sleep just give up
743 if (LK_TRYOP(flags)) {
744 LOCK_LOG2(lk, "%s: %p fails the try operation",
751 * Acquire the sleepqueue chain lock because we
752 * probabilly will need to manipulate waiters flags.
754 sleepq_lock(&lk->lock_object);
756 v = x & LK_ALL_WAITERS;
759 * if the lock has been released while we spun on
760 * the sleepqueue chain lock just try again.
762 if (x == LK_UNLOCKED) {
763 sleepq_release(&lk->lock_object);
767 if (x == (LK_UNLOCKED | v)) {
769 if (v & LK_EXCLUSIVE_WAITERS) {
770 queue = SQ_EXCLUSIVE_QUEUE;
771 v &= ~LK_EXCLUSIVE_WAITERS;
773 MPASS(v & LK_SHARED_WAITERS);
774 queue = SQ_SHARED_QUEUE;
775 v &= ~LK_SHARED_WAITERS;
777 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
778 sleepq_release(&lk->lock_object);
782 "%s: %p waking up all threads on the %s queue",
783 __func__, lk, queue == SQ_SHARED_QUEUE ?
784 "shared" : "exclusive");
785 sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
789 * If shared waiters have been woken up we need
790 * to wait for one of them to acquire the lock
791 * before to set the exclusive waiters in
792 * order to avoid a deadlock.
794 if (queue == SQ_SHARED_QUEUE) {
795 for (v = lk->lk_lock;
796 (v & LK_SHARE) && !LK_SHARERS(v);
803 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
804 * fail, loop back and retry.
806 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
807 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
808 x | LK_EXCLUSIVE_WAITERS)) {
809 sleepq_release(&lk->lock_object);
812 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
817 * As far as we have been unable to acquire the
818 * exclusive lock and the exclusive waiters flag
819 * is set, we will sleep.
821 if (flags & LK_INTERLOCK) {
822 class->lc_unlock(ilk);
823 flags &= ~LK_INTERLOCK;
826 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
828 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
830 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
835 lock_profile_obtain_lock_success(&lk->lock_object,
836 contested, waittime, file, line);
837 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
838 lk->lk_recurse, file, line);
839 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
840 LK_TRYWIT(flags), file, line);
841 TD_LOCKS_INC(curthread);
846 if (flags & LK_INTERLOCK)
847 class->lc_unlock(ilk);
848 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
851 if (flags & LK_INTERLOCK)
852 class->lc_unlock(ilk);
858 _lockmgr_disown(struct lock *lk, const char *file, int line)
862 tid = (uintptr_t)curthread;
863 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
866 * If the owner is alredy LK_KERNPROC just skip the whole operation.
868 if (LK_HOLDER(lk->lk_lock) != tid)
870 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
871 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
872 TD_LOCKS_DEC(curthread);
875 * In order to preserve waiters flags, just spin.
878 x = lk->lk_lock & LK_ALL_WAITERS;
879 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
887 lockmgr_printinfo(struct lock *lk)
892 if (lk->lk_lock == LK_UNLOCKED)
893 printf(" lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
894 else if (lk->lk_lock & LK_SHARE)
895 printf(" lock type %s: SHARED (count %ju)\n",
896 lk->lock_object.lo_name,
897 (uintmax_t)LK_SHARERS(lk->lk_lock));
899 td = lockmgr_xholder(lk);
900 printf(" lock type %s: EXCL by thread %p (pid %d)\n",
901 lk->lock_object.lo_name, td, td->td_proc->p_pid);
905 if (x & LK_EXCLUSIVE_WAITERS)
906 printf(" with exclusive waiters pending\n");
907 if (x & LK_SHARED_WAITERS)
908 printf(" with shared waiters pending\n");
914 lockstatus(struct lock *lk)
923 if ((x & LK_SHARE) == 0) {
924 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
928 } else if (x == LK_UNLOCKED)
934 #ifdef INVARIANT_SUPPORT
936 #undef _lockmgr_assert
940 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
944 if (panicstr != NULL)
948 case KA_SLOCKED | KA_NOTRECURSED:
949 case KA_SLOCKED | KA_RECURSED:
952 case KA_LOCKED | KA_NOTRECURSED:
953 case KA_LOCKED | KA_RECURSED:
957 * We cannot trust WITNESS if the lock is held in exclusive
958 * mode and a call to lockmgr_disown() happened.
959 * Workaround this skipping the check if the lock is held in
960 * exclusive mode even for the KA_LOCKED case.
962 if (slocked || (lk->lk_lock & LK_SHARE)) {
963 witness_assert(&lk->lock_object, what, file, line);
967 if (lk->lk_lock == LK_UNLOCKED ||
968 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
969 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
970 panic("Lock %s not %slocked @ %s:%d\n",
971 lk->lock_object.lo_name, slocked ? "share" : "",
974 if ((lk->lk_lock & LK_SHARE) == 0) {
975 if (lockmgr_recursed(lk)) {
976 if (what & KA_NOTRECURSED)
977 panic("Lock %s recursed @ %s:%d\n",
978 lk->lock_object.lo_name, file,
980 } else if (what & KA_RECURSED)
981 panic("Lock %s not recursed @ %s:%d\n",
982 lk->lock_object.lo_name, file, line);
986 case KA_XLOCKED | KA_NOTRECURSED:
987 case KA_XLOCKED | KA_RECURSED:
988 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
989 panic("Lock %s not exclusively locked @ %s:%d\n",
990 lk->lock_object.lo_name, file, line);
991 if (lockmgr_recursed(lk)) {
992 if (what & KA_NOTRECURSED)
993 panic("Lock %s recursed @ %s:%d\n",
994 lk->lock_object.lo_name, file, line);
995 } else if (what & KA_RECURSED)
996 panic("Lock %s not recursed @ %s:%d\n",
997 lk->lock_object.lo_name, file, line);
1000 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1001 panic("Lock %s exclusively locked @ %s:%d\n",
1002 lk->lock_object.lo_name, file, line);
1005 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1013 lockmgr_chain(struct thread *td, struct thread **ownerp)
1019 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1021 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1022 if (lk->lk_lock & LK_SHARE)
1023 db_printf("SHARED (count %ju)\n",
1024 (uintmax_t)LK_SHARERS(lk->lk_lock));
1026 db_printf("EXCL\n");
1027 *ownerp = lockmgr_xholder(lk);
1033 db_show_lockmgr(struct lock_object *lock)
1038 lk = (struct lock *)lock;
1040 db_printf(" state: ");
1041 if (lk->lk_lock == LK_UNLOCKED)
1042 db_printf("UNLOCKED\n");
1043 else if (lk->lk_lock & LK_SHARE)
1044 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1046 td = lockmgr_xholder(lk);
1047 if (td == (struct thread *)LK_KERNPROC)
1048 db_printf("XLOCK: LK_KERNPROC\n");
1050 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1051 td->td_tid, td->td_proc->p_pid,
1052 td->td_proc->p_comm);
1053 if (lockmgr_recursed(lk))
1054 db_printf(" recursed: %d\n", lk->lk_recurse);
1056 db_printf(" waiters: ");
1057 switch (lk->lk_lock & LK_ALL_WAITERS) {
1058 case LK_SHARED_WAITERS:
1059 db_printf("shared\n");
1060 case LK_EXCLUSIVE_WAITERS:
1061 db_printf("exclusive\n");
1063 case LK_ALL_WAITERS:
1064 db_printf("shared and exclusive\n");
1067 db_printf("none\n");