]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_lock.c
one-true-awk: import 20210221 (1e4bc42c53a1) which fixes a number of bugs
[FreeBSD/FreeBSD.git] / sys / kern / kern_lock.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30
31 #include "opt_ddb.h"
32 #include "opt_hwpmc_hooks.h"
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/lock_profile.h>
43 #include <sys/lockmgr.h>
44 #include <sys/lockstat.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/sleepqueue.h>
48 #ifdef DEBUG_LOCKS
49 #include <sys/stack.h>
50 #endif
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53
54 #include <machine/cpu.h>
55
56 #ifdef DDB
57 #include <ddb/ddb.h>
58 #endif
59
60 #ifdef HWPMC_HOOKS
61 #include <sys/pmckern.h>
62 PMC_SOFT_DECLARE( , , lock, failed);
63 #endif
64
65 /*
66  * Hack. There should be prio_t or similar so that this is not necessary.
67  */
68 _Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
69     "prio flags wont fit in u_short pri in struct lock");
70
71 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
72     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
73
74 #define SQ_EXCLUSIVE_QUEUE      0
75 #define SQ_SHARED_QUEUE         1
76
77 #ifndef INVARIANTS
78 #define _lockmgr_assert(lk, what, file, line)
79 #endif
80
81 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
82 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
83
84 #ifndef DEBUG_LOCKS
85 #define STACK_PRINT(lk)
86 #define STACK_SAVE(lk)
87 #define STACK_ZERO(lk)
88 #else
89 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
90 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
91 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
92 #endif
93
94 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
95         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
96                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
97 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
98         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
99                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
100
101 #define GIANT_DECLARE                                                   \
102         int _i = 0;                                                     \
103         WITNESS_SAVE_DECL(Giant)
104 #define GIANT_RESTORE() do {                                            \
105         if (__predict_false(_i > 0)) {                                  \
106                 while (_i--)                                            \
107                         mtx_lock(&Giant);                               \
108                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
109         }                                                               \
110 } while (0)
111 #define GIANT_SAVE() do {                                               \
112         if (__predict_false(mtx_owned(&Giant))) {                       \
113                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
114                 while (mtx_owned(&Giant)) {                             \
115                         _i++;                                           \
116                         mtx_unlock(&Giant);                             \
117                 }                                                       \
118         }                                                               \
119 } while (0)
120
121 static bool __always_inline
122 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
123 {
124
125         if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
126             LK_SHARE)
127                 return (true);
128         if (fp || (!(x & LK_SHARE)))
129                 return (false);
130         if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
131             (curthread->td_pflags & TDP_DEADLKTREAT))
132                 return (true);
133         return (false);
134 }
135
136 #define LK_TRYOP(x)                                                     \
137         ((x) & LK_NOWAIT)
138
139 #define LK_CAN_WITNESS(x)                                               \
140         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
141 #define LK_TRYWIT(x)                                                    \
142         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
143
144 #define lockmgr_disowned(lk)                                            \
145         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
146
147 #define lockmgr_xlocked_v(v)                                            \
148         (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
149
150 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
151
152 static void     assert_lockmgr(const struct lock_object *lock, int how);
153 #ifdef DDB
154 static void     db_show_lockmgr(const struct lock_object *lock);
155 #endif
156 static void     lock_lockmgr(struct lock_object *lock, uintptr_t how);
157 #ifdef KDTRACE_HOOKS
158 static int      owner_lockmgr(const struct lock_object *lock,
159                     struct thread **owner);
160 #endif
161 static uintptr_t unlock_lockmgr(struct lock_object *lock);
162
163 struct lock_class lock_class_lockmgr = {
164         .lc_name = "lockmgr",
165         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
166         .lc_assert = assert_lockmgr,
167 #ifdef DDB
168         .lc_ddb_show = db_show_lockmgr,
169 #endif
170         .lc_lock = lock_lockmgr,
171         .lc_unlock = unlock_lockmgr,
172 #ifdef KDTRACE_HOOKS
173         .lc_owner = owner_lockmgr,
174 #endif
175 };
176
177 static __read_mostly bool lk_adaptive = true;
178 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
179 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
180     0, "");
181 #define lockmgr_delay  locks_delay
182
183 struct lockmgr_wait {
184         const char *iwmesg;
185         int ipri;
186         int itimo;
187 };
188
189 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
190     int flags, bool fp);
191 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
192
193 static void
194 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
195 {
196         struct lock_class *class;
197
198         if (flags & LK_INTERLOCK) {
199                 class = LOCK_CLASS(ilk);
200                 class->lc_unlock(ilk);
201         }
202
203         if (__predict_false(wakeup_swapper))
204                 kick_proc0();
205 }
206
207 static void
208 lockmgr_note_shared_acquire(struct lock *lk, int contested,
209     uint64_t waittime, const char *file, int line, int flags)
210 {
211
212         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
213             waittime, file, line, LOCKSTAT_READER);
214         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
215         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
216         TD_LOCKS_INC(curthread);
217         TD_SLOCKS_INC(curthread);
218         STACK_SAVE(lk);
219 }
220
221 static void
222 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
223 {
224
225         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
226         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
227         TD_LOCKS_DEC(curthread);
228         TD_SLOCKS_DEC(curthread);
229 }
230
231 static void
232 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
233     uint64_t waittime, const char *file, int line, int flags)
234 {
235
236         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
237             waittime, file, line, LOCKSTAT_WRITER);
238         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
239         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
240             line);
241         TD_LOCKS_INC(curthread);
242         STACK_SAVE(lk);
243 }
244
245 static void
246 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
247 {
248
249         if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
250                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
251                 TD_LOCKS_DEC(curthread);
252         }
253         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
254             line);
255 }
256
257 static __inline struct thread *
258 lockmgr_xholder(const struct lock *lk)
259 {
260         uintptr_t x;
261
262         x = lockmgr_read_value(lk);
263         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
264 }
265
266 /*
267  * It assumes sleepq_lock held and returns with this one unheld.
268  * It also assumes the generic interlock is sane and previously checked.
269  * If LK_INTERLOCK is specified the interlock is not reacquired after the
270  * sleep.
271  */
272 static __inline int
273 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
274     const char *wmesg, int pri, int timo, int queue)
275 {
276         GIANT_DECLARE;
277         struct lock_class *class;
278         int catch, error;
279
280         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
281         catch = pri & PCATCH;
282         pri &= PRIMASK;
283         error = 0;
284
285         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
286             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
287
288         if (flags & LK_INTERLOCK)
289                 class->lc_unlock(ilk);
290         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
291                 if (lk->lk_exslpfail < USHRT_MAX)
292                         lk->lk_exslpfail++;
293         }
294         GIANT_SAVE();
295         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
296             SLEEPQ_INTERRUPTIBLE : 0), queue);
297         if ((flags & LK_TIMELOCK) && timo)
298                 sleepq_set_timeout(&lk->lock_object, timo);
299
300         /*
301          * Decisional switch for real sleeping.
302          */
303         if ((flags & LK_TIMELOCK) && timo && catch)
304                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
305         else if ((flags & LK_TIMELOCK) && timo)
306                 error = sleepq_timedwait(&lk->lock_object, pri);
307         else if (catch)
308                 error = sleepq_wait_sig(&lk->lock_object, pri);
309         else
310                 sleepq_wait(&lk->lock_object, pri);
311         GIANT_RESTORE();
312         if ((flags & LK_SLEEPFAIL) && error == 0)
313                 error = ENOLCK;
314
315         return (error);
316 }
317
318 static __inline int
319 wakeupshlk(struct lock *lk, const char *file, int line)
320 {
321         uintptr_t v, x, orig_x;
322         u_int realexslp;
323         int queue, wakeup_swapper;
324
325         wakeup_swapper = 0;
326         for (;;) {
327                 x = lockmgr_read_value(lk);
328                 if (lockmgr_sunlock_try(lk, &x))
329                         break;
330
331                 /*
332                  * We should have a sharer with waiters, so enter the hard
333                  * path in order to handle wakeups correctly.
334                  */
335                 sleepq_lock(&lk->lock_object);
336                 orig_x = lockmgr_read_value(lk);
337 retry_sleepq:
338                 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
339                 v = LK_UNLOCKED;
340
341                 /*
342                  * If the lock has exclusive waiters, give them preference in
343                  * order to avoid deadlock with shared runners up.
344                  * If interruptible sleeps left the exclusive queue empty
345                  * avoid a starvation for the threads sleeping on the shared
346                  * queue by giving them precedence and cleaning up the
347                  * exclusive waiters bit anyway.
348                  * Please note that lk_exslpfail count may be lying about
349                  * the real number of waiters with the LK_SLEEPFAIL flag on
350                  * because they may be used in conjunction with interruptible
351                  * sleeps so lk_exslpfail might be considered an 'upper limit'
352                  * bound, including the edge cases.
353                  */
354                 realexslp = sleepq_sleepcnt(&lk->lock_object,
355                     SQ_EXCLUSIVE_QUEUE);
356                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
357                         if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
358                                 lk->lk_exslpfail = 0;
359                                 queue = SQ_EXCLUSIVE_QUEUE;
360                                 v |= (x & LK_SHARED_WAITERS);
361                         } else {
362                                 lk->lk_exslpfail = 0;
363                                 LOCK_LOG2(lk,
364                                     "%s: %p has only LK_SLEEPFAIL sleepers",
365                                     __func__, lk);
366                                 LOCK_LOG2(lk,
367                             "%s: %p waking up threads on the exclusive queue",
368                                     __func__, lk);
369                                 wakeup_swapper =
370                                     sleepq_broadcast(&lk->lock_object,
371                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
372                                 queue = SQ_SHARED_QUEUE;
373                         }
374                 } else {
375                         /*
376                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
377                          * and using interruptible sleeps/timeout may have
378                          * left spourious lk_exslpfail counts on, so clean
379                          * it up anyway.
380                          */
381                         lk->lk_exslpfail = 0;
382                         queue = SQ_SHARED_QUEUE;
383                 }
384
385                 if (lockmgr_sunlock_try(lk, &orig_x)) {
386                         sleepq_release(&lk->lock_object);
387                         break;
388                 }
389
390                 x |= LK_SHARERS_LOCK(1);
391                 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
392                         orig_x = x;
393                         goto retry_sleepq;
394                 }
395                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
396                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
397                     "exclusive");
398                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
399                     0, queue);
400                 sleepq_release(&lk->lock_object);
401                 break;
402         }
403
404         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
405         return (wakeup_swapper);
406 }
407
408 static void
409 assert_lockmgr(const struct lock_object *lock, int what)
410 {
411
412         panic("lockmgr locks do not support assertions");
413 }
414
415 static void
416 lock_lockmgr(struct lock_object *lock, uintptr_t how)
417 {
418
419         panic("lockmgr locks do not support sleep interlocking");
420 }
421
422 static uintptr_t
423 unlock_lockmgr(struct lock_object *lock)
424 {
425
426         panic("lockmgr locks do not support sleep interlocking");
427 }
428
429 #ifdef KDTRACE_HOOKS
430 static int
431 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
432 {
433
434         panic("lockmgr locks do not support owner inquiring");
435 }
436 #endif
437
438 void
439 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
440 {
441         int iflags;
442
443         MPASS((flags & ~LK_INIT_MASK) == 0);
444         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
445             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
446             &lk->lk_lock));
447
448         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
449         if (flags & LK_CANRECURSE)
450                 iflags |= LO_RECURSABLE;
451         if ((flags & LK_NODUP) == 0)
452                 iflags |= LO_DUPOK;
453         if (flags & LK_NOPROFILE)
454                 iflags |= LO_NOPROFILE;
455         if ((flags & LK_NOWITNESS) == 0)
456                 iflags |= LO_WITNESS;
457         if (flags & LK_QUIET)
458                 iflags |= LO_QUIET;
459         if (flags & LK_IS_VNODE)
460                 iflags |= LO_IS_VNODE;
461         if (flags & LK_NEW)
462                 iflags |= LO_NEW;
463         iflags |= flags & LK_NOSHARE;
464
465         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
466         lk->lk_lock = LK_UNLOCKED;
467         lk->lk_recurse = 0;
468         lk->lk_exslpfail = 0;
469         lk->lk_timo = timo;
470         lk->lk_pri = pri;
471         STACK_ZERO(lk);
472 }
473
474 /*
475  * XXX: Gross hacks to manipulate external lock flags after
476  * initialization.  Used for certain vnode and buf locks.
477  */
478 void
479 lockallowshare(struct lock *lk)
480 {
481
482         lockmgr_assert(lk, KA_XLOCKED);
483         lk->lock_object.lo_flags &= ~LK_NOSHARE;
484 }
485
486 void
487 lockdisableshare(struct lock *lk)
488 {
489
490         lockmgr_assert(lk, KA_XLOCKED);
491         lk->lock_object.lo_flags |= LK_NOSHARE;
492 }
493
494 void
495 lockallowrecurse(struct lock *lk)
496 {
497
498         lockmgr_assert(lk, KA_XLOCKED);
499         lk->lock_object.lo_flags |= LO_RECURSABLE;
500 }
501
502 void
503 lockdisablerecurse(struct lock *lk)
504 {
505
506         lockmgr_assert(lk, KA_XLOCKED);
507         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
508 }
509
510 void
511 lockdestroy(struct lock *lk)
512 {
513
514         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
515         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
516         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
517         lock_destroy(&lk->lock_object);
518 }
519
520 static bool __always_inline
521 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
522 {
523
524         /*
525          * If no other thread has an exclusive lock, or
526          * no exclusive waiter is present, bump the count of
527          * sharers.  Since we have to preserve the state of
528          * waiters, if we fail to acquire the shared lock
529          * loop back and retry.
530          */
531         while (LK_CAN_SHARE(*xp, flags, fp)) {
532                 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
533                     *xp + LK_ONE_SHARER)) {
534                         return (true);
535                 }
536         }
537         return (false);
538 }
539
540 static bool __always_inline
541 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
542 {
543
544         for (;;) {
545                 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
546                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
547                             *xp - LK_ONE_SHARER))
548                                 return (true);
549                         continue;
550                 }
551                 break;
552         }
553         return (false);
554 }
555
556 static bool
557 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
558     int flags)
559 {
560         struct thread *owner;
561         uintptr_t x;
562
563         x = *xp;
564         MPASS(x != LK_UNLOCKED);
565         owner = (struct thread *)LK_HOLDER(x);
566         for (;;) {
567                 MPASS(owner != curthread);
568                 if (owner == (struct thread *)LK_KERNPROC)
569                         return (false);
570                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
571                         return (false);
572                 if (owner == NULL)
573                         return (false);
574                 if (!TD_IS_RUNNING(owner))
575                         return (false);
576                 if ((x & LK_ALL_WAITERS) != 0)
577                         return (false);
578                 lock_delay(lda);
579                 x = lockmgr_read_value(lk);
580                 if (LK_CAN_SHARE(x, flags, false)) {
581                         *xp = x;
582                         return (true);
583                 }
584                 owner = (struct thread *)LK_HOLDER(x);
585         }
586 }
587
588 static __noinline int
589 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
590     const char *file, int line, struct lockmgr_wait *lwa)
591 {
592         uintptr_t tid, x;
593         int error = 0;
594         const char *iwmesg;
595         int ipri, itimo;
596
597 #ifdef KDTRACE_HOOKS
598         uint64_t sleep_time = 0;
599 #endif
600 #ifdef LOCK_PROFILING
601         uint64_t waittime = 0;
602         int contested = 0;
603 #endif
604         struct lock_delay_arg lda;
605
606         if (KERNEL_PANICKED())
607                 goto out;
608
609         tid = (uintptr_t)curthread;
610
611         if (LK_CAN_WITNESS(flags))
612                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
613                     file, line, flags & LK_INTERLOCK ? ilk : NULL);
614         x = lockmgr_read_value(lk);
615         lock_delay_arg_init(&lda, &lockmgr_delay);
616         if (!lk_adaptive)
617                 flags &= ~LK_ADAPTIVE;
618         /*
619          * The lock may already be locked exclusive by curthread,
620          * avoid deadlock.
621          */
622         if (LK_HOLDER(x) == tid) {
623                 LOCK_LOG2(lk,
624                     "%s: %p already held in exclusive mode",
625                     __func__, lk);
626                 error = EDEADLK;
627                 goto out;
628         }
629
630         for (;;) {
631                 if (lockmgr_slock_try(lk, &x, flags, false))
632                         break;
633
634                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
635                         if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
636                                 continue;
637                 }
638
639 #ifdef HWPMC_HOOKS
640                 PMC_SOFT_CALL( , , lock, failed);
641 #endif
642                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
643                     &contested, &waittime);
644
645                 /*
646                  * If the lock is expected to not sleep just give up
647                  * and return.
648                  */
649                 if (LK_TRYOP(flags)) {
650                         LOCK_LOG2(lk, "%s: %p fails the try operation",
651                             __func__, lk);
652                         error = EBUSY;
653                         break;
654                 }
655
656                 /*
657                  * Acquire the sleepqueue chain lock because we
658                  * probabilly will need to manipulate waiters flags.
659                  */
660                 sleepq_lock(&lk->lock_object);
661                 x = lockmgr_read_value(lk);
662 retry_sleepq:
663
664                 /*
665                  * if the lock can be acquired in shared mode, try
666                  * again.
667                  */
668                 if (LK_CAN_SHARE(x, flags, false)) {
669                         sleepq_release(&lk->lock_object);
670                         continue;
671                 }
672
673                 /*
674                  * Try to set the LK_SHARED_WAITERS flag.  If we fail,
675                  * loop back and retry.
676                  */
677                 if ((x & LK_SHARED_WAITERS) == 0) {
678                         if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
679                             x | LK_SHARED_WAITERS)) {
680                                 goto retry_sleepq;
681                         }
682                         LOCK_LOG2(lk, "%s: %p set shared waiters flag",
683                             __func__, lk);
684                 }
685
686                 if (lwa == NULL) {
687                         iwmesg = lk->lock_object.lo_name;
688                         ipri = lk->lk_pri;
689                         itimo = lk->lk_timo;
690                 } else {
691                         iwmesg = lwa->iwmesg;
692                         ipri = lwa->ipri;
693                         itimo = lwa->itimo;
694                 }
695
696                 /*
697                  * As far as we have been unable to acquire the
698                  * shared lock and the shared waiters flag is set,
699                  * we will sleep.
700                  */
701 #ifdef KDTRACE_HOOKS
702                 sleep_time -= lockstat_nsecs(&lk->lock_object);
703 #endif
704                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
705                     SQ_SHARED_QUEUE);
706 #ifdef KDTRACE_HOOKS
707                 sleep_time += lockstat_nsecs(&lk->lock_object);
708 #endif
709                 flags &= ~LK_INTERLOCK;
710                 if (error) {
711                         LOCK_LOG3(lk,
712                             "%s: interrupted sleep for %p with %d",
713                             __func__, lk, error);
714                         break;
715                 }
716                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
717                     __func__, lk);
718                 x = lockmgr_read_value(lk);
719         }
720         if (error == 0) {
721 #ifdef KDTRACE_HOOKS
722                 if (sleep_time != 0)
723                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
724                             LOCKSTAT_READER, (x & LK_SHARE) == 0,
725                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
726 #endif
727 #ifdef LOCK_PROFILING
728                 lockmgr_note_shared_acquire(lk, contested, waittime,
729                     file, line, flags);
730 #else
731                 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
732                     flags);
733 #endif
734         }
735
736 out:
737         lockmgr_exit(flags, ilk, 0);
738         return (error);
739 }
740
741 static bool
742 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
743 {
744         struct thread *owner;
745         uintptr_t x;
746
747         x = *xp;
748         MPASS(x != LK_UNLOCKED);
749         owner = (struct thread *)LK_HOLDER(x);
750         for (;;) {
751                 MPASS(owner != curthread);
752                 if (owner == NULL)
753                         return (false);
754                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
755                         return (false);
756                 if (owner == (struct thread *)LK_KERNPROC)
757                         return (false);
758                 if (!TD_IS_RUNNING(owner))
759                         return (false);
760                 if ((x & LK_ALL_WAITERS) != 0)
761                         return (false);
762                 lock_delay(lda);
763                 x = lockmgr_read_value(lk);
764                 if (x == LK_UNLOCKED) {
765                         *xp = x;
766                         return (true);
767                 }
768                 owner = (struct thread *)LK_HOLDER(x);
769         }
770 }
771
772 static __noinline int
773 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
774     const char *file, int line, struct lockmgr_wait *lwa)
775 {
776         struct lock_class *class;
777         uintptr_t tid, x, v;
778         int error = 0;
779         const char *iwmesg;
780         int ipri, itimo;
781
782 #ifdef KDTRACE_HOOKS
783         uint64_t sleep_time = 0;
784 #endif
785 #ifdef LOCK_PROFILING
786         uint64_t waittime = 0;
787         int contested = 0;
788 #endif
789         struct lock_delay_arg lda;
790
791         if (KERNEL_PANICKED())
792                 goto out;
793
794         tid = (uintptr_t)curthread;
795
796         if (LK_CAN_WITNESS(flags))
797                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
798                     LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
799                     ilk : NULL);
800
801         /*
802          * If curthread already holds the lock and this one is
803          * allowed to recurse, simply recurse on it.
804          */
805         if (lockmgr_xlocked(lk)) {
806                 if ((flags & LK_CANRECURSE) == 0 &&
807                     (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
808                         /*
809                          * If the lock is expected to not panic just
810                          * give up and return.
811                          */
812                         if (LK_TRYOP(flags)) {
813                                 LOCK_LOG2(lk,
814                                     "%s: %p fails the try operation",
815                                     __func__, lk);
816                                 error = EBUSY;
817                                 goto out;
818                         }
819                         if (flags & LK_INTERLOCK) {
820                                 class = LOCK_CLASS(ilk);
821                                 class->lc_unlock(ilk);
822                         }
823                         STACK_PRINT(lk);
824                         panic("%s: recursing on non recursive lockmgr %p "
825                             "@ %s:%d\n", __func__, lk, file, line);
826                 }
827                 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
828                 lk->lk_recurse++;
829                 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
830                 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
831                     lk->lk_recurse, file, line);
832                 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
833                     LK_TRYWIT(flags), file, line);
834                 TD_LOCKS_INC(curthread);
835                 goto out;
836         }
837
838         x = LK_UNLOCKED;
839         lock_delay_arg_init(&lda, &lockmgr_delay);
840         if (!lk_adaptive)
841                 flags &= ~LK_ADAPTIVE;
842         for (;;) {
843                 if (x == LK_UNLOCKED) {
844                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
845                                 break;
846                         continue;
847                 }
848                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
849                         if (lockmgr_xlock_adaptive(&lda, lk, &x))
850                                 continue;
851                 }
852 #ifdef HWPMC_HOOKS
853                 PMC_SOFT_CALL( , , lock, failed);
854 #endif
855                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
856                     &contested, &waittime);
857
858                 /*
859                  * If the lock is expected to not sleep just give up
860                  * and return.
861                  */
862                 if (LK_TRYOP(flags)) {
863                         LOCK_LOG2(lk, "%s: %p fails the try operation",
864                             __func__, lk);
865                         error = EBUSY;
866                         break;
867                 }
868
869                 /*
870                  * Acquire the sleepqueue chain lock because we
871                  * probabilly will need to manipulate waiters flags.
872                  */
873                 sleepq_lock(&lk->lock_object);
874                 x = lockmgr_read_value(lk);
875 retry_sleepq:
876
877                 /*
878                  * if the lock has been released while we spun on
879                  * the sleepqueue chain lock just try again.
880                  */
881                 if (x == LK_UNLOCKED) {
882                         sleepq_release(&lk->lock_object);
883                         continue;
884                 }
885
886                 /*
887                  * The lock can be in the state where there is a
888                  * pending queue of waiters, but still no owner.
889                  * This happens when the lock is contested and an
890                  * owner is going to claim the lock.
891                  * If curthread is the one successfully acquiring it
892                  * claim lock ownership and return, preserving waiters
893                  * flags.
894                  */
895                 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
896                 if ((x & ~v) == LK_UNLOCKED) {
897                         v &= ~LK_EXCLUSIVE_SPINNERS;
898                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
899                             tid | v)) {
900                                 sleepq_release(&lk->lock_object);
901                                 LOCK_LOG2(lk,
902                                     "%s: %p claimed by a new writer",
903                                     __func__, lk);
904                                 break;
905                         }
906                         goto retry_sleepq;
907                 }
908
909                 /*
910                  * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
911                  * fail, loop back and retry.
912                  */
913                 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
914                         if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
915                             x | LK_EXCLUSIVE_WAITERS)) {
916                                 goto retry_sleepq;
917                         }
918                         LOCK_LOG2(lk, "%s: %p set excl waiters flag",
919                             __func__, lk);
920                 }
921
922                 if (lwa == NULL) {
923                         iwmesg = lk->lock_object.lo_name;
924                         ipri = lk->lk_pri;
925                         itimo = lk->lk_timo;
926                 } else {
927                         iwmesg = lwa->iwmesg;
928                         ipri = lwa->ipri;
929                         itimo = lwa->itimo;
930                 }
931
932                 /*
933                  * As far as we have been unable to acquire the
934                  * exclusive lock and the exclusive waiters flag
935                  * is set, we will sleep.
936                  */
937 #ifdef KDTRACE_HOOKS
938                 sleep_time -= lockstat_nsecs(&lk->lock_object);
939 #endif
940                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
941                     SQ_EXCLUSIVE_QUEUE);
942 #ifdef KDTRACE_HOOKS
943                 sleep_time += lockstat_nsecs(&lk->lock_object);
944 #endif
945                 flags &= ~LK_INTERLOCK;
946                 if (error) {
947                         LOCK_LOG3(lk,
948                             "%s: interrupted sleep for %p with %d",
949                             __func__, lk, error);
950                         break;
951                 }
952                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
953                     __func__, lk);
954                 x = lockmgr_read_value(lk);
955         }
956         if (error == 0) {
957 #ifdef KDTRACE_HOOKS
958                 if (sleep_time != 0)
959                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
960                             LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
961                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
962 #endif
963 #ifdef LOCK_PROFILING
964                 lockmgr_note_exclusive_acquire(lk, contested, waittime,
965                     file, line, flags);
966 #else
967                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
968                     flags);
969 #endif
970         }
971
972 out:
973         lockmgr_exit(flags, ilk, 0);
974         return (error);
975 }
976
977 static __noinline int
978 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
979     const char *file, int line, struct lockmgr_wait *lwa)
980 {
981         uintptr_t tid, v, setv;
982         int error = 0;
983         int op;
984
985         if (KERNEL_PANICKED())
986                 goto out;
987
988         tid = (uintptr_t)curthread;
989
990         _lockmgr_assert(lk, KA_SLOCKED, file, line);
991
992         op = flags & LK_TYPE_MASK;
993         v = lockmgr_read_value(lk);
994         for (;;) {
995                 if (LK_SHARERS(v) > 1) {
996                         if (op == LK_TRYUPGRADE) {
997                                 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
998                                     __func__, lk);
999                                 error = EBUSY;
1000                                 goto out;
1001                         }
1002                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
1003                             v - LK_ONE_SHARER)) {
1004                                 lockmgr_note_shared_release(lk, file, line);
1005                                 goto out_xlock;
1006                         }
1007                         continue;
1008                 }
1009                 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1010
1011                 setv = tid;
1012                 setv |= (v & LK_ALL_WAITERS);
1013
1014                 /*
1015                  * Try to switch from one shared lock to an exclusive one.
1016                  * We need to preserve waiters flags during the operation.
1017                  */
1018                 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1019                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1020                             line);
1021                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1022                             LK_TRYWIT(flags), file, line);
1023                         LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1024                         TD_SLOCKS_DEC(curthread);
1025                         goto out;
1026                 }
1027         }
1028
1029 out_xlock:
1030         error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1031         flags &= ~LK_INTERLOCK;
1032 out:
1033         lockmgr_exit(flags, ilk, 0);
1034         return (error);
1035 }
1036
1037 int
1038 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1039     const char *file, int line)
1040 {
1041         struct lock_class *class;
1042         uintptr_t x, tid;
1043         u_int op;
1044         bool locked;
1045
1046         if (KERNEL_PANICKED())
1047                 return (0);
1048
1049         op = flags & LK_TYPE_MASK;
1050         locked = false;
1051         switch (op) {
1052         case LK_SHARED:
1053                 if (LK_CAN_WITNESS(flags))
1054                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1055                             file, line, flags & LK_INTERLOCK ? ilk : NULL);
1056                 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1057                         break;
1058                 x = lockmgr_read_value(lk);
1059                 if (lockmgr_slock_try(lk, &x, flags, true)) {
1060                         lockmgr_note_shared_acquire(lk, 0, 0,
1061                             file, line, flags);
1062                         locked = true;
1063                 } else {
1064                         return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1065                             NULL));
1066                 }
1067                 break;
1068         case LK_EXCLUSIVE:
1069                 if (LK_CAN_WITNESS(flags))
1070                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1071                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1072                             ilk : NULL);
1073                 tid = (uintptr_t)curthread;
1074                 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1075                     atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1076                         lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1077                             flags);
1078                         locked = true;
1079                 } else {
1080                         return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1081                             NULL));
1082                 }
1083                 break;
1084         case LK_UPGRADE:
1085         case LK_TRYUPGRADE:
1086                 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1087         default:
1088                 break;
1089         }
1090         if (__predict_true(locked)) {
1091                 if (__predict_false(flags & LK_INTERLOCK)) {
1092                         class = LOCK_CLASS(ilk);
1093                         class->lc_unlock(ilk);
1094                 }
1095                 return (0);
1096         } else {
1097                 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1098                     LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1099         }
1100 }
1101
1102 static __noinline int
1103 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1104     const char *file, int line)
1105
1106 {
1107         int wakeup_swapper = 0;
1108
1109         if (KERNEL_PANICKED())
1110                 goto out;
1111
1112         wakeup_swapper = wakeupshlk(lk, file, line);
1113
1114 out:
1115         lockmgr_exit(flags, ilk, wakeup_swapper);
1116         return (0);
1117 }
1118
1119 static __noinline int
1120 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1121     const char *file, int line)
1122 {
1123         uintptr_t tid, v;
1124         int wakeup_swapper = 0;
1125         u_int realexslp;
1126         int queue;
1127
1128         if (KERNEL_PANICKED())
1129                 goto out;
1130
1131         tid = (uintptr_t)curthread;
1132
1133         /*
1134          * As first option, treact the lock as if it has not
1135          * any waiter.
1136          * Fix-up the tid var if the lock has been disowned.
1137          */
1138         if (LK_HOLDER(x) == LK_KERNPROC)
1139                 tid = LK_KERNPROC;
1140
1141         /*
1142          * The lock is held in exclusive mode.
1143          * If the lock is recursed also, then unrecurse it.
1144          */
1145         if (lockmgr_recursed_v(x)) {
1146                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1147                 lk->lk_recurse--;
1148                 if (lk->lk_recurse == 0)
1149                         atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1150                 goto out;
1151         }
1152         if (tid != LK_KERNPROC)
1153                 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1154                     LOCKSTAT_WRITER);
1155
1156         if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1157                 goto out;
1158
1159         sleepq_lock(&lk->lock_object);
1160         x = lockmgr_read_value(lk);
1161         v = LK_UNLOCKED;
1162
1163         /*
1164          * If the lock has exclusive waiters, give them
1165          * preference in order to avoid deadlock with
1166          * shared runners up.
1167          * If interruptible sleeps left the exclusive queue
1168          * empty avoid a starvation for the threads sleeping
1169          * on the shared queue by giving them precedence
1170          * and cleaning up the exclusive waiters bit anyway.
1171          * Please note that lk_exslpfail count may be lying
1172          * about the real number of waiters with the
1173          * LK_SLEEPFAIL flag on because they may be used in
1174          * conjunction with interruptible sleeps so
1175          * lk_exslpfail might be considered an 'upper limit'
1176          * bound, including the edge cases.
1177          */
1178         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1179         realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1180         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1181                 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
1182                         lk->lk_exslpfail = 0;
1183                         queue = SQ_EXCLUSIVE_QUEUE;
1184                         v |= (x & LK_SHARED_WAITERS);
1185                 } else {
1186                         lk->lk_exslpfail = 0;
1187                         LOCK_LOG2(lk,
1188                             "%s: %p has only LK_SLEEPFAIL sleepers",
1189                             __func__, lk);
1190                         LOCK_LOG2(lk,
1191                             "%s: %p waking up threads on the exclusive queue",
1192                             __func__, lk);
1193                         wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1194                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1195                         queue = SQ_SHARED_QUEUE;
1196                 }
1197         } else {
1198                 /*
1199                  * Exclusive waiters sleeping with LK_SLEEPFAIL
1200                  * on and using interruptible sleeps/timeout
1201                  * may have left spourious lk_exslpfail counts
1202                  * on, so clean it up anyway.
1203                  */
1204                 lk->lk_exslpfail = 0;
1205                 queue = SQ_SHARED_QUEUE;
1206         }
1207
1208         LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1209             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1210             "exclusive");
1211         atomic_store_rel_ptr(&lk->lk_lock, v);
1212         wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1213         sleepq_release(&lk->lock_object);
1214
1215 out:
1216         lockmgr_exit(flags, ilk, wakeup_swapper);
1217         return (0);
1218 }
1219
1220 /*
1221  * Lightweight entry points for common operations.
1222  *
1223  * Functionality is similar to sx locks, in that none of the additional lockmgr
1224  * features are supported. To be clear, these are NOT supported:
1225  * 1. shared locking disablement
1226  * 2. returning with an error after sleep
1227  * 3. unlocking the interlock
1228  *
1229  * If in doubt, use lockmgr_lock_flags.
1230  */
1231 int
1232 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1233 {
1234         uintptr_t x;
1235
1236         MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1237         MPASS((flags & LK_INTERLOCK) == 0);
1238         MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1239
1240         if (LK_CAN_WITNESS(flags))
1241                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1242                     file, line, NULL);
1243         x = lockmgr_read_value(lk);
1244         if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1245                 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1246                 return (0);
1247         }
1248
1249         return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1250 }
1251
1252 int
1253 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1254 {
1255         uintptr_t tid;
1256
1257         MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1258         MPASS((flags & LK_INTERLOCK) == 0);
1259
1260         if (LK_CAN_WITNESS(flags))
1261                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1262                     LOP_EXCLUSIVE, file, line, NULL);
1263         tid = (uintptr_t)curthread;
1264         if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1265                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1266                     flags);
1267                 return (0);
1268         }
1269
1270         return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1271 }
1272
1273 int
1274 lockmgr_unlock(struct lock *lk)
1275 {
1276         uintptr_t x, tid;
1277         const char *file;
1278         int line;
1279
1280         file = __FILE__;
1281         line = __LINE__;
1282
1283         _lockmgr_assert(lk, KA_LOCKED, file, line);
1284         x = lockmgr_read_value(lk);
1285         if (__predict_true(x & LK_SHARE) != 0) {
1286                 lockmgr_note_shared_release(lk, file, line);
1287                 if (lockmgr_sunlock_try(lk, &x)) {
1288                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1289                 } else {
1290                         return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1291                 }
1292         } else {
1293                 tid = (uintptr_t)curthread;
1294                 lockmgr_note_exclusive_release(lk, file, line);
1295                 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1296                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1297                 } else {
1298                         return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1299                 }
1300         }
1301         return (0);
1302 }
1303
1304 int
1305 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1306     const char *wmesg, int pri, int timo, const char *file, int line)
1307 {
1308         GIANT_DECLARE;
1309         struct lockmgr_wait lwa;
1310         struct lock_class *class;
1311         const char *iwmesg;
1312         uintptr_t tid, v, x;
1313         u_int op, realexslp;
1314         int error, ipri, itimo, queue, wakeup_swapper;
1315 #ifdef LOCK_PROFILING
1316         uint64_t waittime = 0;
1317         int contested = 0;
1318 #endif
1319
1320         if (KERNEL_PANICKED())
1321                 return (0);
1322
1323         error = 0;
1324         tid = (uintptr_t)curthread;
1325         op = (flags & LK_TYPE_MASK);
1326         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1327         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1328         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1329
1330         lwa.iwmesg = iwmesg;
1331         lwa.ipri = ipri;
1332         lwa.itimo = itimo;
1333
1334         MPASS((flags & ~LK_TOTAL_MASK) == 0);
1335         KASSERT((op & (op - 1)) == 0,
1336             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1337         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1338             (op != LK_DOWNGRADE && op != LK_RELEASE),
1339             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1340             __func__, file, line));
1341         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1342             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1343             __func__, file, line));
1344         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1345             ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1346             lk->lock_object.lo_name, file, line));
1347
1348         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1349
1350         if (lk->lock_object.lo_flags & LK_NOSHARE) {
1351                 switch (op) {
1352                 case LK_SHARED:
1353                         op = LK_EXCLUSIVE;
1354                         break;
1355                 case LK_UPGRADE:
1356                 case LK_TRYUPGRADE:
1357                 case LK_DOWNGRADE:
1358                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1359                             file, line);
1360                         if (flags & LK_INTERLOCK)
1361                                 class->lc_unlock(ilk);
1362                         return (0);
1363                 }
1364         }
1365
1366         wakeup_swapper = 0;
1367         switch (op) {
1368         case LK_SHARED:
1369                 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1370                 break;
1371         case LK_UPGRADE:
1372         case LK_TRYUPGRADE:
1373                 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1374                 break;
1375         case LK_EXCLUSIVE:
1376                 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1377                 break;
1378         case LK_DOWNGRADE:
1379                 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1380                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1381
1382                 /*
1383                  * Panic if the lock is recursed.
1384                  */
1385                 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1386                         if (flags & LK_INTERLOCK)
1387                                 class->lc_unlock(ilk);
1388                         panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1389                             __func__, iwmesg, file, line);
1390                 }
1391                 TD_SLOCKS_INC(curthread);
1392
1393                 /*
1394                  * In order to preserve waiters flags, just spin.
1395                  */
1396                 for (;;) {
1397                         x = lockmgr_read_value(lk);
1398                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1399                         x &= LK_ALL_WAITERS;
1400                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1401                             LK_SHARERS_LOCK(1) | x))
1402                                 break;
1403                         cpu_spinwait();
1404                 }
1405                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1406                 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1407                 break;
1408         case LK_RELEASE:
1409                 _lockmgr_assert(lk, KA_LOCKED, file, line);
1410                 x = lockmgr_read_value(lk);
1411
1412                 if (__predict_true(x & LK_SHARE) != 0) {
1413                         lockmgr_note_shared_release(lk, file, line);
1414                         return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1415                 } else {
1416                         lockmgr_note_exclusive_release(lk, file, line);
1417                         return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1418                 }
1419                 break;
1420         case LK_DRAIN:
1421                 if (LK_CAN_WITNESS(flags))
1422                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1423                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1424                             ilk : NULL);
1425
1426                 /*
1427                  * Trying to drain a lock we already own will result in a
1428                  * deadlock.
1429                  */
1430                 if (lockmgr_xlocked(lk)) {
1431                         if (flags & LK_INTERLOCK)
1432                                 class->lc_unlock(ilk);
1433                         panic("%s: draining %s with the lock held @ %s:%d\n",
1434                             __func__, iwmesg, file, line);
1435                 }
1436
1437                 for (;;) {
1438                         if (lk->lk_lock == LK_UNLOCKED &&
1439                             atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1440                                 break;
1441
1442 #ifdef HWPMC_HOOKS
1443                         PMC_SOFT_CALL( , , lock, failed);
1444 #endif
1445                         lock_profile_obtain_lock_failed(&lk->lock_object, false,
1446                             &contested, &waittime);
1447
1448                         /*
1449                          * If the lock is expected to not sleep just give up
1450                          * and return.
1451                          */
1452                         if (LK_TRYOP(flags)) {
1453                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
1454                                     __func__, lk);
1455                                 error = EBUSY;
1456                                 break;
1457                         }
1458
1459                         /*
1460                          * Acquire the sleepqueue chain lock because we
1461                          * probabilly will need to manipulate waiters flags.
1462                          */
1463                         sleepq_lock(&lk->lock_object);
1464                         x = lockmgr_read_value(lk);
1465
1466                         /*
1467                          * if the lock has been released while we spun on
1468                          * the sleepqueue chain lock just try again.
1469                          */
1470                         if (x == LK_UNLOCKED) {
1471                                 sleepq_release(&lk->lock_object);
1472                                 continue;
1473                         }
1474
1475                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1476                         if ((x & ~v) == LK_UNLOCKED) {
1477                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1478
1479                                 /*
1480                                  * If interruptible sleeps left the exclusive
1481                                  * queue empty avoid a starvation for the
1482                                  * threads sleeping on the shared queue by
1483                                  * giving them precedence and cleaning up the
1484                                  * exclusive waiters bit anyway.
1485                                  * Please note that lk_exslpfail count may be
1486                                  * lying about the real number of waiters with
1487                                  * the LK_SLEEPFAIL flag on because they may
1488                                  * be used in conjunction with interruptible
1489                                  * sleeps so lk_exslpfail might be considered
1490                                  * an 'upper limit' bound, including the edge
1491                                  * cases.
1492                                  */
1493                                 if (v & LK_EXCLUSIVE_WAITERS) {
1494                                         queue = SQ_EXCLUSIVE_QUEUE;
1495                                         v &= ~LK_EXCLUSIVE_WAITERS;
1496                                 } else {
1497                                         /*
1498                                          * Exclusive waiters sleeping with
1499                                          * LK_SLEEPFAIL on and using
1500                                          * interruptible sleeps/timeout may
1501                                          * have left spourious lk_exslpfail
1502                                          * counts on, so clean it up anyway.
1503                                          */
1504                                         MPASS(v & LK_SHARED_WAITERS);
1505                                         lk->lk_exslpfail = 0;
1506                                         queue = SQ_SHARED_QUEUE;
1507                                         v &= ~LK_SHARED_WAITERS;
1508                                 }
1509                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
1510                                         realexslp =
1511                                             sleepq_sleepcnt(&lk->lock_object,
1512                                             SQ_EXCLUSIVE_QUEUE);
1513                                         if (lk->lk_exslpfail >= realexslp) {
1514                                                 lk->lk_exslpfail = 0;
1515                                                 queue = SQ_SHARED_QUEUE;
1516                                                 v &= ~LK_SHARED_WAITERS;
1517                                                 if (realexslp != 0) {
1518                                                         LOCK_LOG2(lk,
1519                                         "%s: %p has only LK_SLEEPFAIL sleepers",
1520                                                             __func__, lk);
1521                                                         LOCK_LOG2(lk,
1522                         "%s: %p waking up threads on the exclusive queue",
1523                                                             __func__, lk);
1524                                                         wakeup_swapper =
1525                                                             sleepq_broadcast(
1526                                                             &lk->lock_object,
1527                                                             SLEEPQ_LK, 0,
1528                                                             SQ_EXCLUSIVE_QUEUE);
1529                                                 }
1530                                         } else
1531                                                 lk->lk_exslpfail = 0;
1532                                 }
1533                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1534                                         sleepq_release(&lk->lock_object);
1535                                         continue;
1536                                 }
1537                                 LOCK_LOG3(lk,
1538                                 "%s: %p waking up all threads on the %s queue",
1539                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
1540                                     "shared" : "exclusive");
1541                                 wakeup_swapper |= sleepq_broadcast(
1542                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
1543
1544                                 /*
1545                                  * If shared waiters have been woken up we need
1546                                  * to wait for one of them to acquire the lock
1547                                  * before to set the exclusive waiters in
1548                                  * order to avoid a deadlock.
1549                                  */
1550                                 if (queue == SQ_SHARED_QUEUE) {
1551                                         for (v = lk->lk_lock;
1552                                             (v & LK_SHARE) && !LK_SHARERS(v);
1553                                             v = lk->lk_lock)
1554                                                 cpu_spinwait();
1555                                 }
1556                         }
1557
1558                         /*
1559                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1560                          * fail, loop back and retry.
1561                          */
1562                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1563                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1564                                     x | LK_EXCLUSIVE_WAITERS)) {
1565                                         sleepq_release(&lk->lock_object);
1566                                         continue;
1567                                 }
1568                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1569                                     __func__, lk);
1570                         }
1571
1572                         /*
1573                          * As far as we have been unable to acquire the
1574                          * exclusive lock and the exclusive waiters flag
1575                          * is set, we will sleep.
1576                          */
1577                         if (flags & LK_INTERLOCK) {
1578                                 class->lc_unlock(ilk);
1579                                 flags &= ~LK_INTERLOCK;
1580                         }
1581                         GIANT_SAVE();
1582                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1583                             SQ_EXCLUSIVE_QUEUE);
1584                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1585                         GIANT_RESTORE();
1586                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1587                             __func__, lk);
1588                 }
1589
1590                 if (error == 0) {
1591                         lock_profile_obtain_lock_success(&lk->lock_object,
1592                             false, contested, waittime, file, line);
1593                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1594                             lk->lk_recurse, file, line);
1595                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1596                             LK_TRYWIT(flags), file, line);
1597                         TD_LOCKS_INC(curthread);
1598                         STACK_SAVE(lk);
1599                 }
1600                 break;
1601         default:
1602                 if (flags & LK_INTERLOCK)
1603                         class->lc_unlock(ilk);
1604                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1605         }
1606
1607         if (flags & LK_INTERLOCK)
1608                 class->lc_unlock(ilk);
1609         if (wakeup_swapper)
1610                 kick_proc0();
1611
1612         return (error);
1613 }
1614
1615 void
1616 _lockmgr_disown(struct lock *lk, const char *file, int line)
1617 {
1618         uintptr_t tid, x;
1619
1620         if (SCHEDULER_STOPPED())
1621                 return;
1622
1623         tid = (uintptr_t)curthread;
1624         _lockmgr_assert(lk, KA_XLOCKED, file, line);
1625
1626         /*
1627          * Panic if the lock is recursed.
1628          */
1629         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1630                 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1631                     __func__,  file, line);
1632
1633         /*
1634          * If the owner is already LK_KERNPROC just skip the whole operation.
1635          */
1636         if (LK_HOLDER(lk->lk_lock) != tid)
1637                 return;
1638         lock_profile_release_lock(&lk->lock_object, false);
1639         LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1640         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1641         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1642         TD_LOCKS_DEC(curthread);
1643         STACK_SAVE(lk);
1644
1645         /*
1646          * In order to preserve waiters flags, just spin.
1647          */
1648         for (;;) {
1649                 x = lockmgr_read_value(lk);
1650                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1651                 x &= LK_ALL_WAITERS;
1652                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1653                     LK_KERNPROC | x))
1654                         return;
1655                 cpu_spinwait();
1656         }
1657 }
1658
1659 void
1660 lockmgr_printinfo(const struct lock *lk)
1661 {
1662         struct thread *td;
1663         uintptr_t x;
1664
1665         if (lk->lk_lock == LK_UNLOCKED)
1666                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1667         else if (lk->lk_lock & LK_SHARE)
1668                 printf("lock type %s: SHARED (count %ju)\n",
1669                     lk->lock_object.lo_name,
1670                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1671         else {
1672                 td = lockmgr_xholder(lk);
1673                 if (td == (struct thread *)LK_KERNPROC)
1674                         printf("lock type %s: EXCL by KERNPROC\n",
1675                             lk->lock_object.lo_name);
1676                 else
1677                         printf("lock type %s: EXCL by thread %p "
1678                             "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1679                             td, td->td_proc->p_pid, td->td_proc->p_comm,
1680                             td->td_tid);
1681         }
1682
1683         x = lk->lk_lock;
1684         if (x & LK_EXCLUSIVE_WAITERS)
1685                 printf(" with exclusive waiters pending\n");
1686         if (x & LK_SHARED_WAITERS)
1687                 printf(" with shared waiters pending\n");
1688         if (x & LK_EXCLUSIVE_SPINNERS)
1689                 printf(" with exclusive spinners pending\n");
1690
1691         STACK_PRINT(lk);
1692 }
1693
1694 int
1695 lockstatus(const struct lock *lk)
1696 {
1697         uintptr_t v, x;
1698         int ret;
1699
1700         ret = LK_SHARED;
1701         x = lockmgr_read_value(lk);
1702         v = LK_HOLDER(x);
1703
1704         if ((x & LK_SHARE) == 0) {
1705                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1706                         ret = LK_EXCLUSIVE;
1707                 else
1708                         ret = LK_EXCLOTHER;
1709         } else if (x == LK_UNLOCKED)
1710                 ret = 0;
1711
1712         return (ret);
1713 }
1714
1715 #ifdef INVARIANT_SUPPORT
1716
1717 FEATURE(invariant_support,
1718     "Support for modules compiled with INVARIANTS option");
1719
1720 #ifndef INVARIANTS
1721 #undef  _lockmgr_assert
1722 #endif
1723
1724 void
1725 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1726 {
1727         int slocked = 0;
1728
1729         if (KERNEL_PANICKED())
1730                 return;
1731         switch (what) {
1732         case KA_SLOCKED:
1733         case KA_SLOCKED | KA_NOTRECURSED:
1734         case KA_SLOCKED | KA_RECURSED:
1735                 slocked = 1;
1736         case KA_LOCKED:
1737         case KA_LOCKED | KA_NOTRECURSED:
1738         case KA_LOCKED | KA_RECURSED:
1739 #ifdef WITNESS
1740
1741                 /*
1742                  * We cannot trust WITNESS if the lock is held in exclusive
1743                  * mode and a call to lockmgr_disown() happened.
1744                  * Workaround this skipping the check if the lock is held in
1745                  * exclusive mode even for the KA_LOCKED case.
1746                  */
1747                 if (slocked || (lk->lk_lock & LK_SHARE)) {
1748                         witness_assert(&lk->lock_object, what, file, line);
1749                         break;
1750                 }
1751 #endif
1752                 if (lk->lk_lock == LK_UNLOCKED ||
1753                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1754                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1755                         panic("Lock %s not %slocked @ %s:%d\n",
1756                             lk->lock_object.lo_name, slocked ? "share" : "",
1757                             file, line);
1758
1759                 if ((lk->lk_lock & LK_SHARE) == 0) {
1760                         if (lockmgr_recursed(lk)) {
1761                                 if (what & KA_NOTRECURSED)
1762                                         panic("Lock %s recursed @ %s:%d\n",
1763                                             lk->lock_object.lo_name, file,
1764                                             line);
1765                         } else if (what & KA_RECURSED)
1766                                 panic("Lock %s not recursed @ %s:%d\n",
1767                                     lk->lock_object.lo_name, file, line);
1768                 }
1769                 break;
1770         case KA_XLOCKED:
1771         case KA_XLOCKED | KA_NOTRECURSED:
1772         case KA_XLOCKED | KA_RECURSED:
1773                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1774                         panic("Lock %s not exclusively locked @ %s:%d\n",
1775                             lk->lock_object.lo_name, file, line);
1776                 if (lockmgr_recursed(lk)) {
1777                         if (what & KA_NOTRECURSED)
1778                                 panic("Lock %s recursed @ %s:%d\n",
1779                                     lk->lock_object.lo_name, file, line);
1780                 } else if (what & KA_RECURSED)
1781                         panic("Lock %s not recursed @ %s:%d\n",
1782                             lk->lock_object.lo_name, file, line);
1783                 break;
1784         case KA_UNLOCKED:
1785                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1786                         panic("Lock %s exclusively locked @ %s:%d\n",
1787                             lk->lock_object.lo_name, file, line);
1788                 break;
1789         default:
1790                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1791                     line);
1792         }
1793 }
1794 #endif
1795
1796 #ifdef DDB
1797 int
1798 lockmgr_chain(struct thread *td, struct thread **ownerp)
1799 {
1800         const struct lock *lk;
1801
1802         lk = td->td_wchan;
1803
1804         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1805                 return (0);
1806         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1807         if (lk->lk_lock & LK_SHARE)
1808                 db_printf("SHARED (count %ju)\n",
1809                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1810         else
1811                 db_printf("EXCL\n");
1812         *ownerp = lockmgr_xholder(lk);
1813
1814         return (1);
1815 }
1816
1817 static void
1818 db_show_lockmgr(const struct lock_object *lock)
1819 {
1820         struct thread *td;
1821         const struct lock *lk;
1822
1823         lk = (const struct lock *)lock;
1824
1825         db_printf(" state: ");
1826         if (lk->lk_lock == LK_UNLOCKED)
1827                 db_printf("UNLOCKED\n");
1828         else if (lk->lk_lock & LK_SHARE)
1829                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1830         else {
1831                 td = lockmgr_xholder(lk);
1832                 if (td == (struct thread *)LK_KERNPROC)
1833                         db_printf("XLOCK: LK_KERNPROC\n");
1834                 else
1835                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1836                             td->td_tid, td->td_proc->p_pid,
1837                             td->td_proc->p_comm);
1838                 if (lockmgr_recursed(lk))
1839                         db_printf(" recursed: %d\n", lk->lk_recurse);
1840         }
1841         db_printf(" waiters: ");
1842         switch (lk->lk_lock & LK_ALL_WAITERS) {
1843         case LK_SHARED_WAITERS:
1844                 db_printf("shared\n");
1845                 break;
1846         case LK_EXCLUSIVE_WAITERS:
1847                 db_printf("exclusive\n");
1848                 break;
1849         case LK_ALL_WAITERS:
1850                 db_printf("shared and exclusive\n");
1851                 break;
1852         default:
1853                 db_printf("none\n");
1854         }
1855         db_printf(" spinners: ");
1856         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1857                 db_printf("exclusive\n");
1858         else
1859                 db_printf("none\n");
1860 }
1861 #endif