2 * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice(s), this list of conditions and the following disclaimer as
10 * the first lines of this file unmodified other than the possible
11 * addition of one or more copyright notices.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice(s), this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
17 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
20 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
29 #include "opt_adaptive_lockmgrs.h"
31 #include "opt_kdtrace.h"
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <sys/param.h>
38 #include <sys/linker_set.h>
40 #include <sys/lock_profile.h>
41 #include <sys/lockmgr.h>
42 #include <sys/mutex.h>
44 #include <sys/sleepqueue.h>
46 #include <sys/stack.h>
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
51 #include <machine/cpu.h>
57 CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) ==
58 (LK_ADAPTIVE | LK_NOSHARE));
59 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
60 ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
62 #define SQ_EXCLUSIVE_QUEUE 0
63 #define SQ_SHARED_QUEUE 1
66 #define _lockmgr_assert(lk, what, file, line)
67 #define TD_LOCKS_INC(td)
68 #define TD_LOCKS_DEC(td)
70 #define TD_LOCKS_INC(td) ((td)->td_locks++)
71 #define TD_LOCKS_DEC(td) ((td)->td_locks--)
73 #define TD_SLOCKS_INC(td) ((td)->td_lk_slocks++)
74 #define TD_SLOCKS_DEC(td) ((td)->td_lk_slocks--)
77 #define STACK_PRINT(lk)
78 #define STACK_SAVE(lk)
79 #define STACK_ZERO(lk)
81 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
82 #define STACK_SAVE(lk) stack_save(&(lk)->lk_stack)
83 #define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack)
86 #define LOCK_LOG2(lk, string, arg1, arg2) \
87 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
88 CTR2(KTR_LOCK, (string), (arg1), (arg2))
89 #define LOCK_LOG3(lk, string, arg1, arg2, arg3) \
90 if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \
91 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
93 #define GIANT_DECLARE \
95 WITNESS_SAVE_DECL(Giant)
96 #define GIANT_RESTORE() do { \
100 WITNESS_RESTORE(&Giant.lock_object, Giant); \
103 #define GIANT_SAVE() do { \
104 if (mtx_owned(&Giant)) { \
105 WITNESS_SAVE(&Giant.lock_object, Giant); \
106 while (mtx_owned(&Giant)) { \
108 mtx_unlock(&Giant); \
113 #define LK_CAN_SHARE(x) \
114 (((x) & LK_SHARE) && (((x) & LK_EXCLUSIVE_WAITERS) == 0 || \
115 ((x) & LK_EXCLUSIVE_SPINNERS) == 0 || \
116 curthread->td_lk_slocks || (curthread->td_pflags & TDP_DEADLKTREAT)))
117 #define LK_TRYOP(x) \
120 #define LK_CAN_WITNESS(x) \
121 (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
122 #define LK_TRYWIT(x) \
123 (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
125 #define LK_CAN_ADAPT(lk, f) \
126 (((lk)->lock_object.lo_flags & LK_ADAPTIVE) != 0 && \
127 ((f) & LK_SLEEPFAIL) == 0)
129 #define lockmgr_disowned(lk) \
130 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
132 #define lockmgr_xlocked(lk) \
133 (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
135 static void assert_lockmgr(struct lock_object *lock, int how);
137 static void db_show_lockmgr(struct lock_object *lock);
139 static void lock_lockmgr(struct lock_object *lock, int how);
141 static int owner_lockmgr(struct lock_object *lock, struct thread **owner);
143 static int unlock_lockmgr(struct lock_object *lock);
145 struct lock_class lock_class_lockmgr = {
146 .lc_name = "lockmgr",
147 .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
148 .lc_assert = assert_lockmgr,
150 .lc_ddb_show = db_show_lockmgr,
152 .lc_lock = lock_lockmgr,
153 .lc_unlock = unlock_lockmgr,
155 .lc_owner = owner_lockmgr,
159 #ifdef ADAPTIVE_LOCKMGRS
160 static u_int alk_retries = 10;
161 static u_int alk_loops = 10000;
162 SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
163 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
164 SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
167 static __inline struct thread *
168 lockmgr_xholder(struct lock *lk)
173 return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
177 * It assumes sleepq_lock held and returns with this one unheld.
178 * It also assumes the generic interlock is sane and previously checked.
179 * If LK_INTERLOCK is specified the interlock is not reacquired after the
183 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
184 const char *wmesg, int pri, int timo, int queue)
187 struct lock_class *class;
190 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
191 catch = pri & PCATCH;
195 LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
196 (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
198 if (flags & LK_INTERLOCK)
199 class->lc_unlock(ilk);
201 sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
202 SLEEPQ_INTERRUPTIBLE : 0), queue);
203 if ((flags & LK_TIMELOCK) && timo)
204 sleepq_set_timeout(&lk->lock_object, timo);
207 * Decisional switch for real sleeping.
209 if ((flags & LK_TIMELOCK) && timo && catch)
210 error = sleepq_timedwait_sig(&lk->lock_object, pri);
211 else if ((flags & LK_TIMELOCK) && timo)
212 error = sleepq_timedwait(&lk->lock_object, pri);
214 error = sleepq_wait_sig(&lk->lock_object, pri);
216 sleepq_wait(&lk->lock_object, pri);
218 if ((flags & LK_SLEEPFAIL) && error == 0)
225 wakeupshlk(struct lock *lk, const char *file, int line)
228 int queue, wakeup_swapper;
230 TD_LOCKS_DEC(curthread);
231 TD_SLOCKS_DEC(curthread);
232 WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
233 LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
240 * If there is more than one shared lock held, just drop one
243 if (LK_SHARERS(x) > 1) {
244 if (atomic_cmpset_ptr(&lk->lk_lock, x,
251 * If there are not waiters on the exclusive queue, drop the
254 if ((x & LK_ALL_WAITERS) == 0) {
255 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
257 if (atomic_cmpset_ptr(&lk->lk_lock, x, LK_UNLOCKED))
263 * We should have a sharer with waiters, so enter the hard
264 * path in order to handle wakeups correctly.
266 sleepq_lock(&lk->lock_object);
267 x = lk->lk_lock & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
271 * If the lock has exclusive waiters, give them preference in
272 * order to avoid deadlock with shared runners up.
274 if (x & LK_EXCLUSIVE_WAITERS) {
275 queue = SQ_EXCLUSIVE_QUEUE;
276 v |= (x & LK_SHARED_WAITERS);
278 MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
280 queue = SQ_SHARED_QUEUE;
283 if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x,
285 sleepq_release(&lk->lock_object);
288 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
289 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
291 wakeup_swapper = sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
293 sleepq_release(&lk->lock_object);
297 lock_profile_release_lock(&lk->lock_object);
298 return (wakeup_swapper);
302 assert_lockmgr(struct lock_object *lock, int what)
305 panic("lockmgr locks do not support assertions");
309 lock_lockmgr(struct lock_object *lock, int how)
312 panic("lockmgr locks do not support sleep interlocking");
316 unlock_lockmgr(struct lock_object *lock)
319 panic("lockmgr locks do not support sleep interlocking");
324 owner_lockmgr(struct lock_object *lock, struct thread **owner)
327 panic("lockmgr locks do not support owner inquiring");
332 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
336 MPASS((flags & ~LK_INIT_MASK) == 0);
337 ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
338 ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
341 iflags = LO_SLEEPABLE | LO_UPGRADABLE;
342 if (flags & LK_CANRECURSE)
343 iflags |= LO_RECURSABLE;
344 if ((flags & LK_NODUP) == 0)
346 if (flags & LK_NOPROFILE)
347 iflags |= LO_NOPROFILE;
348 if ((flags & LK_NOWITNESS) == 0)
349 iflags |= LO_WITNESS;
350 if (flags & LK_QUIET)
352 iflags |= flags & (LK_ADAPTIVE | LK_NOSHARE);
354 lk->lk_lock = LK_UNLOCKED;
358 lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
363 lockdestroy(struct lock *lk)
366 KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
367 KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
368 lock_destroy(&lk->lock_object);
372 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
373 const char *wmesg, int pri, int timo, const char *file, int line)
376 struct lock_class *class;
380 int error, ipri, itimo, queue, wakeup_swapper;
381 #ifdef LOCK_PROFILING
382 uint64_t waittime = 0;
385 #ifdef ADAPTIVE_LOCKMGRS
386 volatile struct thread *owner;
387 u_int i, spintries = 0;
391 tid = (uintptr_t)curthread;
392 op = (flags & LK_TYPE_MASK);
393 iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
394 ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
395 itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
397 MPASS((flags & ~LK_TOTAL_MASK) == 0);
398 KASSERT((op & (op - 1)) == 0,
399 ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
400 KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
401 (op != LK_DOWNGRADE && op != LK_RELEASE),
402 ("%s: Invalid flags in regard of the operation desired @ %s:%d",
403 __func__, file, line));
404 KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
405 ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
406 __func__, file, line));
408 class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
409 if (panicstr != NULL) {
410 if (flags & LK_INTERLOCK)
411 class->lc_unlock(ilk);
415 if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE))
421 if (LK_CAN_WITNESS(flags))
422 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
428 * If no other thread has an exclusive lock, or
429 * no exclusive waiter is present, bump the count of
430 * sharers. Since we have to preserve the state of
431 * waiters, if we fail to acquire the shared lock
432 * loop back and retry.
434 if (LK_CAN_SHARE(x)) {
435 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
440 lock_profile_obtain_lock_failed(&lk->lock_object,
441 &contested, &waittime);
444 * If the lock is already held by curthread in
445 * exclusive way avoid a deadlock.
447 if (LK_HOLDER(x) == tid) {
449 "%s: %p already held in exclusive mode",
456 * If the lock is expected to not sleep just give up
459 if (LK_TRYOP(flags)) {
460 LOCK_LOG2(lk, "%s: %p fails the try operation",
466 #ifdef ADAPTIVE_LOCKMGRS
468 * If the owner is running on another CPU, spin until
469 * the owner stops running or the state of the lock
472 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
473 LK_HOLDER(x) != LK_KERNPROC) {
474 owner = (struct thread *)LK_HOLDER(x);
475 if (LOCK_LOG_TEST(&lk->lock_object, 0))
477 "%s: spinning on %p held by %p",
478 __func__, lk, owner);
481 * If we are holding also an interlock drop it
482 * in order to avoid a deadlock if the lockmgr
483 * owner is adaptively spinning on the
486 if (flags & LK_INTERLOCK) {
487 class->lc_unlock(ilk);
488 flags &= ~LK_INTERLOCK;
491 while (LK_HOLDER(lk->lk_lock) ==
492 (uintptr_t)owner && TD_IS_RUNNING(owner))
494 } else if (LK_CAN_ADAPT(lk, flags) &&
495 (x & LK_SHARE) !=0 && LK_SHARERS(x) &&
496 spintries < alk_retries) {
497 if (flags & LK_INTERLOCK) {
498 class->lc_unlock(ilk);
499 flags &= ~LK_INTERLOCK;
503 for (i = 0; i < alk_loops; i++) {
504 if (LOCK_LOG_TEST(&lk->lock_object, 0))
506 "%s: shared spinning on %p with %u and %u",
507 __func__, lk, spintries, i);
509 if ((x & LK_SHARE) == 0 ||
510 LK_CAN_SHARE(x) != 0)
520 * Acquire the sleepqueue chain lock because we
521 * probabilly will need to manipulate waiters flags.
523 sleepq_lock(&lk->lock_object);
527 * if the lock can be acquired in shared mode, try
530 if (LK_CAN_SHARE(x)) {
531 sleepq_release(&lk->lock_object);
535 #ifdef ADAPTIVE_LOCKMGRS
537 * The current lock owner might have started executing
538 * on another CPU (or the lock could have changed
539 * owner) while we were waiting on the turnstile
540 * chain lock. If so, drop the turnstile lock and try
543 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
544 LK_HOLDER(x) != LK_KERNPROC) {
545 owner = (struct thread *)LK_HOLDER(x);
546 if (TD_IS_RUNNING(owner)) {
547 sleepq_release(&lk->lock_object);
554 * Try to set the LK_SHARED_WAITERS flag. If we fail,
555 * loop back and retry.
557 if ((x & LK_SHARED_WAITERS) == 0) {
558 if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x,
559 x | LK_SHARED_WAITERS)) {
560 sleepq_release(&lk->lock_object);
563 LOCK_LOG2(lk, "%s: %p set shared waiters flag",
568 * As far as we have been unable to acquire the
569 * shared lock and the shared waiters flag is set,
572 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
574 flags &= ~LK_INTERLOCK;
577 "%s: interrupted sleep for %p with %d",
578 __func__, lk, error);
581 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
585 lock_profile_obtain_lock_success(&lk->lock_object,
586 contested, waittime, file, line);
587 LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
589 WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
591 TD_LOCKS_INC(curthread);
592 TD_SLOCKS_INC(curthread);
597 _lockmgr_assert(lk, KA_SLOCKED, file, line);
599 x = v & LK_ALL_WAITERS;
600 v &= LK_EXCLUSIVE_SPINNERS;
603 * Try to switch from one shared lock to an exclusive one.
604 * We need to preserve waiters flags during the operation.
606 if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
608 LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
610 WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
611 LK_TRYWIT(flags), file, line);
612 TD_SLOCKS_DEC(curthread);
617 * We have been unable to succeed in upgrading, so just
618 * give up the shared lock.
620 wakeup_swapper |= wakeupshlk(lk, file, line);
624 if (LK_CAN_WITNESS(flags))
625 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
626 LOP_EXCLUSIVE, file, line, ilk);
629 * If curthread already holds the lock and this one is
630 * allowed to recurse, simply recurse on it.
632 if (lockmgr_xlocked(lk)) {
633 if ((flags & LK_CANRECURSE) == 0 &&
634 (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
637 * If the lock is expected to not panic just
638 * give up and return.
640 if (LK_TRYOP(flags)) {
642 "%s: %p fails the try operation",
647 if (flags & LK_INTERLOCK)
648 class->lc_unlock(ilk);
649 panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n",
650 __func__, iwmesg, file, line);
653 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
654 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
655 lk->lk_recurse, file, line);
656 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
657 LK_TRYWIT(flags), file, line);
658 TD_LOCKS_INC(curthread);
662 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED,
664 lock_profile_obtain_lock_failed(&lk->lock_object,
665 &contested, &waittime);
668 * If the lock is expected to not sleep just give up
671 if (LK_TRYOP(flags)) {
672 LOCK_LOG2(lk, "%s: %p fails the try operation",
678 #ifdef ADAPTIVE_LOCKMGRS
680 * If the owner is running on another CPU, spin until
681 * the owner stops running or the state of the lock
685 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
686 LK_HOLDER(x) != LK_KERNPROC) {
687 owner = (struct thread *)LK_HOLDER(x);
688 if (LOCK_LOG_TEST(&lk->lock_object, 0))
690 "%s: spinning on %p held by %p",
691 __func__, lk, owner);
694 * If we are holding also an interlock drop it
695 * in order to avoid a deadlock if the lockmgr
696 * owner is adaptively spinning on the
699 if (flags & LK_INTERLOCK) {
700 class->lc_unlock(ilk);
701 flags &= ~LK_INTERLOCK;
704 while (LK_HOLDER(lk->lk_lock) ==
705 (uintptr_t)owner && TD_IS_RUNNING(owner))
707 } else if (LK_CAN_ADAPT(lk, flags) &&
708 (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
709 spintries < alk_retries) {
710 if ((x & LK_EXCLUSIVE_SPINNERS) == 0 &&
711 !atomic_cmpset_ptr(&lk->lk_lock, x,
712 x | LK_EXCLUSIVE_SPINNERS))
714 if (flags & LK_INTERLOCK) {
715 class->lc_unlock(ilk);
716 flags &= ~LK_INTERLOCK;
720 for (i = 0; i < alk_loops; i++) {
721 if (LOCK_LOG_TEST(&lk->lock_object, 0))
723 "%s: shared spinning on %p with %u and %u",
724 __func__, lk, spintries, i);
726 LK_EXCLUSIVE_SPINNERS) == 0)
736 * Acquire the sleepqueue chain lock because we
737 * probabilly will need to manipulate waiters flags.
739 sleepq_lock(&lk->lock_object);
743 * if the lock has been released while we spun on
744 * the sleepqueue chain lock just try again.
746 if (x == LK_UNLOCKED) {
747 sleepq_release(&lk->lock_object);
751 #ifdef ADAPTIVE_LOCKMGRS
753 * The current lock owner might have started executing
754 * on another CPU (or the lock could have changed
755 * owner) while we were waiting on the turnstile
756 * chain lock. If so, drop the turnstile lock and try
759 if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 &&
760 LK_HOLDER(x) != LK_KERNPROC) {
761 owner = (struct thread *)LK_HOLDER(x);
762 if (TD_IS_RUNNING(owner)) {
763 sleepq_release(&lk->lock_object);
770 * The lock can be in the state where there is a
771 * pending queue of waiters, but still no owner.
772 * This happens when the lock is contested and an
773 * owner is going to claim the lock.
774 * If curthread is the one successfully acquiring it
775 * claim lock ownership and return, preserving waiters
778 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
779 if ((x & ~v) == LK_UNLOCKED) {
780 v &= ~LK_EXCLUSIVE_SPINNERS;
781 if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
783 sleepq_release(&lk->lock_object);
785 "%s: %p claimed by a new writer",
789 sleepq_release(&lk->lock_object);
794 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
795 * fail, loop back and retry.
797 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
798 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
799 x | LK_EXCLUSIVE_WAITERS)) {
800 sleepq_release(&lk->lock_object);
803 LOCK_LOG2(lk, "%s: %p set excl waiters flag",
808 * As far as we have been unable to acquire the
809 * exclusive lock and the exclusive waiters flag
810 * is set, we will sleep.
812 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
814 flags &= ~LK_INTERLOCK;
817 "%s: interrupted sleep for %p with %d",
818 __func__, lk, error);
821 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
825 lock_profile_obtain_lock_success(&lk->lock_object,
826 contested, waittime, file, line);
827 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
828 lk->lk_recurse, file, line);
829 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
830 LK_TRYWIT(flags), file, line);
831 TD_LOCKS_INC(curthread);
836 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
837 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
838 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
839 TD_SLOCKS_INC(curthread);
842 * In order to preserve waiters flags, just spin.
846 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
848 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
849 LK_SHARERS_LOCK(1) | x))
855 _lockmgr_assert(lk, KA_LOCKED, file, line);
858 if ((x & LK_SHARE) == 0) {
861 * As first option, treact the lock as if it has not
863 * Fix-up the tid var if the lock has been disowned.
865 if (LK_HOLDER(x) == LK_KERNPROC)
868 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE,
870 TD_LOCKS_DEC(curthread);
872 LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0,
873 lk->lk_recurse, file, line);
876 * The lock is held in exclusive mode.
877 * If the lock is recursed also, then unrecurse it.
879 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
880 LOCK_LOG2(lk, "%s: %p unrecursing", __func__,
885 if (tid != LK_KERNPROC)
886 lock_profile_release_lock(&lk->lock_object);
888 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid,
892 sleepq_lock(&lk->lock_object);
897 * If the lock has exclusive waiters, give them
898 * preference in order to avoid deadlock with
901 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
902 if (x & LK_EXCLUSIVE_WAITERS) {
903 queue = SQ_EXCLUSIVE_QUEUE;
904 v |= (x & LK_SHARED_WAITERS);
906 MPASS((x & LK_ALL_WAITERS) ==
908 queue = SQ_SHARED_QUEUE;
912 "%s: %p waking up threads on the %s queue",
913 __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
915 atomic_store_rel_ptr(&lk->lk_lock, v);
916 wakeup_swapper = sleepq_broadcast(&lk->lock_object,
917 SLEEPQ_LK, 0, queue);
918 sleepq_release(&lk->lock_object);
921 wakeup_swapper = wakeupshlk(lk, file, line);
924 if (LK_CAN_WITNESS(flags))
925 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
926 LOP_EXCLUSIVE, file, line, ilk);
929 * Trying to drain a lock we already own will result in a
932 if (lockmgr_xlocked(lk)) {
933 if (flags & LK_INTERLOCK)
934 class->lc_unlock(ilk);
935 panic("%s: draining %s with the lock held @ %s:%d\n",
936 __func__, iwmesg, file, line);
939 while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
940 lock_profile_obtain_lock_failed(&lk->lock_object,
941 &contested, &waittime);
944 * If the lock is expected to not sleep just give up
947 if (LK_TRYOP(flags)) {
948 LOCK_LOG2(lk, "%s: %p fails the try operation",
955 * Acquire the sleepqueue chain lock because we
956 * probabilly will need to manipulate waiters flags.
958 sleepq_lock(&lk->lock_object);
962 * if the lock has been released while we spun on
963 * the sleepqueue chain lock just try again.
965 if (x == LK_UNLOCKED) {
966 sleepq_release(&lk->lock_object);
970 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
971 if ((x & ~v) == LK_UNLOCKED) {
972 v = (x & ~LK_EXCLUSIVE_SPINNERS);
973 if (v & LK_EXCLUSIVE_WAITERS) {
974 queue = SQ_EXCLUSIVE_QUEUE;
975 v &= ~LK_EXCLUSIVE_WAITERS;
977 MPASS(v & LK_SHARED_WAITERS);
978 queue = SQ_SHARED_QUEUE;
979 v &= ~LK_SHARED_WAITERS;
981 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
982 sleepq_release(&lk->lock_object);
986 "%s: %p waking up all threads on the %s queue",
987 __func__, lk, queue == SQ_SHARED_QUEUE ?
988 "shared" : "exclusive");
989 wakeup_swapper |= sleepq_broadcast(
990 &lk->lock_object, SLEEPQ_LK, 0, queue);
993 * If shared waiters have been woken up we need
994 * to wait for one of them to acquire the lock
995 * before to set the exclusive waiters in
996 * order to avoid a deadlock.
998 if (queue == SQ_SHARED_QUEUE) {
999 for (v = lk->lk_lock;
1000 (v & LK_SHARE) && !LK_SHARERS(v);
1007 * Try to set the LK_EXCLUSIVE_WAITERS flag. If we
1008 * fail, loop back and retry.
1010 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1011 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1012 x | LK_EXCLUSIVE_WAITERS)) {
1013 sleepq_release(&lk->lock_object);
1016 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1021 * As far as we have been unable to acquire the
1022 * exclusive lock and the exclusive waiters flag
1023 * is set, we will sleep.
1025 if (flags & LK_INTERLOCK) {
1026 class->lc_unlock(ilk);
1027 flags &= ~LK_INTERLOCK;
1030 sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1031 SQ_EXCLUSIVE_QUEUE);
1032 sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1034 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1039 lock_profile_obtain_lock_success(&lk->lock_object,
1040 contested, waittime, file, line);
1041 LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1042 lk->lk_recurse, file, line);
1043 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1044 LK_TRYWIT(flags), file, line);
1045 TD_LOCKS_INC(curthread);
1050 if (flags & LK_INTERLOCK)
1051 class->lc_unlock(ilk);
1052 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1055 if (flags & LK_INTERLOCK)
1056 class->lc_unlock(ilk);
1064 _lockmgr_disown(struct lock *lk, const char *file, int line)
1068 tid = (uintptr_t)curthread;
1069 _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line);
1072 * If the owner is already LK_KERNPROC just skip the whole operation.
1074 if (LK_HOLDER(lk->lk_lock) != tid)
1076 lock_profile_release_lock(&lk->lock_object);
1077 LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1078 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1079 TD_LOCKS_DEC(curthread);
1082 * In order to preserve waiters flags, just spin.
1086 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1087 x &= LK_ALL_WAITERS;
1088 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1096 lockmgr_printinfo(struct lock *lk)
1101 if (lk->lk_lock == LK_UNLOCKED)
1102 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1103 else if (lk->lk_lock & LK_SHARE)
1104 printf("lock type %s: SHARED (count %ju)\n",
1105 lk->lock_object.lo_name,
1106 (uintmax_t)LK_SHARERS(lk->lk_lock));
1108 td = lockmgr_xholder(lk);
1109 printf("lock type %s: EXCL by thread %p (pid %d)\n",
1110 lk->lock_object.lo_name, td, td->td_proc->p_pid);
1114 if (x & LK_EXCLUSIVE_WAITERS)
1115 printf(" with exclusive waiters pending\n");
1116 if (x & LK_SHARED_WAITERS)
1117 printf(" with shared waiters pending\n");
1118 if (x & LK_EXCLUSIVE_SPINNERS)
1119 printf(" with exclusive spinners pending\n");
1125 lockstatus(struct lock *lk)
1134 if ((x & LK_SHARE) == 0) {
1135 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1139 } else if (x == LK_UNLOCKED)
1145 #ifdef INVARIANT_SUPPORT
1147 #undef _lockmgr_assert
1151 _lockmgr_assert(struct lock *lk, int what, const char *file, int line)
1155 if (panicstr != NULL)
1159 case KA_SLOCKED | KA_NOTRECURSED:
1160 case KA_SLOCKED | KA_RECURSED:
1163 case KA_LOCKED | KA_NOTRECURSED:
1164 case KA_LOCKED | KA_RECURSED:
1168 * We cannot trust WITNESS if the lock is held in exclusive
1169 * mode and a call to lockmgr_disown() happened.
1170 * Workaround this skipping the check if the lock is held in
1171 * exclusive mode even for the KA_LOCKED case.
1173 if (slocked || (lk->lk_lock & LK_SHARE)) {
1174 witness_assert(&lk->lock_object, what, file, line);
1178 if (lk->lk_lock == LK_UNLOCKED ||
1179 ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1180 (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1181 panic("Lock %s not %slocked @ %s:%d\n",
1182 lk->lock_object.lo_name, slocked ? "share" : "",
1185 if ((lk->lk_lock & LK_SHARE) == 0) {
1186 if (lockmgr_recursed(lk)) {
1187 if (what & KA_NOTRECURSED)
1188 panic("Lock %s recursed @ %s:%d\n",
1189 lk->lock_object.lo_name, file,
1191 } else if (what & KA_RECURSED)
1192 panic("Lock %s not recursed @ %s:%d\n",
1193 lk->lock_object.lo_name, file, line);
1197 case KA_XLOCKED | KA_NOTRECURSED:
1198 case KA_XLOCKED | KA_RECURSED:
1199 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1200 panic("Lock %s not exclusively locked @ %s:%d\n",
1201 lk->lock_object.lo_name, file, line);
1202 if (lockmgr_recursed(lk)) {
1203 if (what & KA_NOTRECURSED)
1204 panic("Lock %s recursed @ %s:%d\n",
1205 lk->lock_object.lo_name, file, line);
1206 } else if (what & KA_RECURSED)
1207 panic("Lock %s not recursed @ %s:%d\n",
1208 lk->lock_object.lo_name, file, line);
1211 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1212 panic("Lock %s exclusively locked @ %s:%d\n",
1213 lk->lock_object.lo_name, file, line);
1216 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1224 lockmgr_chain(struct thread *td, struct thread **ownerp)
1230 if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1232 db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1233 if (lk->lk_lock & LK_SHARE)
1234 db_printf("SHARED (count %ju)\n",
1235 (uintmax_t)LK_SHARERS(lk->lk_lock));
1237 db_printf("EXCL\n");
1238 *ownerp = lockmgr_xholder(lk);
1244 db_show_lockmgr(struct lock_object *lock)
1249 lk = (struct lock *)lock;
1251 db_printf(" state: ");
1252 if (lk->lk_lock == LK_UNLOCKED)
1253 db_printf("UNLOCKED\n");
1254 else if (lk->lk_lock & LK_SHARE)
1255 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1257 td = lockmgr_xholder(lk);
1258 if (td == (struct thread *)LK_KERNPROC)
1259 db_printf("XLOCK: LK_KERNPROC\n");
1261 db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1262 td->td_tid, td->td_proc->p_pid,
1263 td->td_proc->p_comm);
1264 if (lockmgr_recursed(lk))
1265 db_printf(" recursed: %d\n", lk->lk_recurse);
1267 db_printf(" waiters: ");
1268 switch (lk->lk_lock & LK_ALL_WAITERS) {
1269 case LK_SHARED_WAITERS:
1270 db_printf("shared\n");
1272 case LK_EXCLUSIVE_WAITERS:
1273 db_printf("exclusive\n");
1275 case LK_ALL_WAITERS:
1276 db_printf("shared and exclusive\n");
1279 db_printf("none\n");
1281 db_printf(" spinners: ");
1282 if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1283 db_printf("exclusive\n");
1285 db_printf("none\n");