]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_lock.c
Add timespecvalid_interval macro and use it.
[FreeBSD/FreeBSD.git] / sys / kern / kern_lock.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30
31 #include "opt_ddb.h"
32 #include "opt_hwpmc_hooks.h"
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/limits.h>
41 #include <sys/lock.h>
42 #include <sys/lock_profile.h>
43 #include <sys/lockmgr.h>
44 #include <sys/lockstat.h>
45 #include <sys/mutex.h>
46 #include <sys/proc.h>
47 #include <sys/sleepqueue.h>
48 #ifdef DEBUG_LOCKS
49 #include <sys/stack.h>
50 #endif
51 #include <sys/sysctl.h>
52 #include <sys/systm.h>
53
54 #include <machine/cpu.h>
55
56 #ifdef DDB
57 #include <ddb/ddb.h>
58 #endif
59
60 #ifdef HWPMC_HOOKS
61 #include <sys/pmckern.h>
62 PMC_SOFT_DECLARE( , , lock, failed);
63 #endif
64
65 /*
66  * Hack. There should be prio_t or similar so that this is not necessary.
67  */
68 _Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
69     "prio flags wont fit in u_short pri in struct lock");
70
71 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
72     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
73
74 #define SQ_EXCLUSIVE_QUEUE      0
75 #define SQ_SHARED_QUEUE         1
76
77 #ifndef INVARIANTS
78 #define _lockmgr_assert(lk, what, file, line)
79 #endif
80
81 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
82 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
83
84 #ifndef DEBUG_LOCKS
85 #define STACK_PRINT(lk)
86 #define STACK_SAVE(lk)
87 #define STACK_ZERO(lk)
88 #else
89 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
90 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
91 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
92 #endif
93
94 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
95         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
96                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
97 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
98         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
99                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
100
101 #define GIANT_DECLARE                                                   \
102         int _i = 0;                                                     \
103         WITNESS_SAVE_DECL(Giant)
104 #define GIANT_RESTORE() do {                                            \
105         if (__predict_false(_i > 0)) {                                  \
106                 while (_i--)                                            \
107                         mtx_lock(&Giant);                               \
108                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
109         }                                                               \
110 } while (0)
111 #define GIANT_SAVE() do {                                               \
112         if (__predict_false(mtx_owned(&Giant))) {                       \
113                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
114                 while (mtx_owned(&Giant)) {                             \
115                         _i++;                                           \
116                         mtx_unlock(&Giant);                             \
117                 }                                                       \
118         }                                                               \
119 } while (0)
120
121 static bool __always_inline
122 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
123 {
124
125         if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
126             LK_SHARE)
127                 return (true);
128         if (fp || (!(x & LK_SHARE)))
129                 return (false);
130         if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
131             (curthread->td_pflags & TDP_DEADLKTREAT))
132                 return (true);
133         return (false);
134 }
135
136 #define LK_TRYOP(x)                                                     \
137         ((x) & LK_NOWAIT)
138
139 #define LK_CAN_WITNESS(x)                                               \
140         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
141 #define LK_TRYWIT(x)                                                    \
142         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
143
144 #define lockmgr_disowned(lk)                                            \
145         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
146
147 #define lockmgr_xlocked_v(v)                                            \
148         (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
149
150 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
151
152 static void     assert_lockmgr(const struct lock_object *lock, int how);
153 #ifdef DDB
154 static void     db_show_lockmgr(const struct lock_object *lock);
155 #endif
156 static void     lock_lockmgr(struct lock_object *lock, uintptr_t how);
157 #ifdef KDTRACE_HOOKS
158 static int      owner_lockmgr(const struct lock_object *lock,
159                     struct thread **owner);
160 #endif
161 static uintptr_t unlock_lockmgr(struct lock_object *lock);
162
163 struct lock_class lock_class_lockmgr = {
164         .lc_name = "lockmgr",
165         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
166         .lc_assert = assert_lockmgr,
167 #ifdef DDB
168         .lc_ddb_show = db_show_lockmgr,
169 #endif
170         .lc_lock = lock_lockmgr,
171         .lc_unlock = unlock_lockmgr,
172 #ifdef KDTRACE_HOOKS
173         .lc_owner = owner_lockmgr,
174 #endif
175 };
176
177 static __read_mostly bool lk_adaptive = true;
178 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
179 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
180     0, "");
181 #define lockmgr_delay  locks_delay
182
183 struct lockmgr_wait {
184         const char *iwmesg;
185         int ipri;
186         int itimo;
187 };
188
189 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
190     int flags, bool fp);
191 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
192
193 static void
194 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
195 {
196         struct lock_class *class;
197
198         if (flags & LK_INTERLOCK) {
199                 class = LOCK_CLASS(ilk);
200                 class->lc_unlock(ilk);
201         }
202
203         if (__predict_false(wakeup_swapper))
204                 kick_proc0();
205 }
206
207 static void
208 lockmgr_note_shared_acquire(struct lock *lk, int contested,
209     uint64_t waittime, const char *file, int line, int flags)
210 {
211
212         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
213             waittime, file, line, LOCKSTAT_READER);
214         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
215         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
216         TD_LOCKS_INC(curthread);
217         TD_SLOCKS_INC(curthread);
218         STACK_SAVE(lk);
219 }
220
221 static void
222 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
223 {
224
225         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
226         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
227         TD_LOCKS_DEC(curthread);
228         TD_SLOCKS_DEC(curthread);
229 }
230
231 static void
232 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
233     uint64_t waittime, const char *file, int line, int flags)
234 {
235
236         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
237             waittime, file, line, LOCKSTAT_WRITER);
238         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
239         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
240             line);
241         TD_LOCKS_INC(curthread);
242         STACK_SAVE(lk);
243 }
244
245 static void
246 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
247 {
248
249         if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
250                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
251                 TD_LOCKS_DEC(curthread);
252         }
253         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
254             line);
255 }
256
257 static __inline struct thread *
258 lockmgr_xholder(const struct lock *lk)
259 {
260         uintptr_t x;
261
262         x = lockmgr_read_value(lk);
263         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
264 }
265
266 /*
267  * It assumes sleepq_lock held and returns with this one unheld.
268  * It also assumes the generic interlock is sane and previously checked.
269  * If LK_INTERLOCK is specified the interlock is not reacquired after the
270  * sleep.
271  */
272 static __inline int
273 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
274     const char *wmesg, int pri, int timo, int queue)
275 {
276         GIANT_DECLARE;
277         struct lock_class *class;
278         int catch, error;
279
280         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
281         catch = pri & PCATCH;
282         pri &= PRIMASK;
283         error = 0;
284
285         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
286             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
287
288         if (flags & LK_INTERLOCK)
289                 class->lc_unlock(ilk);
290         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
291                 if (lk->lk_exslpfail < USHRT_MAX)
292                         lk->lk_exslpfail++;
293         }
294         GIANT_SAVE();
295         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
296             SLEEPQ_INTERRUPTIBLE : 0), queue);
297         if ((flags & LK_TIMELOCK) && timo)
298                 sleepq_set_timeout(&lk->lock_object, timo);
299
300         /*
301          * Decisional switch for real sleeping.
302          */
303         if ((flags & LK_TIMELOCK) && timo && catch)
304                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
305         else if ((flags & LK_TIMELOCK) && timo)
306                 error = sleepq_timedwait(&lk->lock_object, pri);
307         else if (catch)
308                 error = sleepq_wait_sig(&lk->lock_object, pri);
309         else
310                 sleepq_wait(&lk->lock_object, pri);
311         GIANT_RESTORE();
312         if ((flags & LK_SLEEPFAIL) && error == 0)
313                 error = ENOLCK;
314
315         return (error);
316 }
317
318 static __inline int
319 wakeupshlk(struct lock *lk, const char *file, int line)
320 {
321         uintptr_t v, x, orig_x;
322         u_int realexslp;
323         int queue, wakeup_swapper;
324
325         wakeup_swapper = 0;
326         for (;;) {
327                 x = lockmgr_read_value(lk);
328                 if (lockmgr_sunlock_try(lk, &x))
329                         break;
330
331                 /*
332                  * We should have a sharer with waiters, so enter the hard
333                  * path in order to handle wakeups correctly.
334                  */
335                 sleepq_lock(&lk->lock_object);
336                 orig_x = lockmgr_read_value(lk);
337 retry_sleepq:
338                 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
339                 v = LK_UNLOCKED;
340
341                 /*
342                  * If the lock has exclusive waiters, give them preference in
343                  * order to avoid deadlock with shared runners up.
344                  * If interruptible sleeps left the exclusive queue empty
345                  * avoid a starvation for the threads sleeping on the shared
346                  * queue by giving them precedence and cleaning up the
347                  * exclusive waiters bit anyway.
348                  * Please note that lk_exslpfail count may be lying about
349                  * the real number of waiters with the LK_SLEEPFAIL flag on
350                  * because they may be used in conjunction with interruptible
351                  * sleeps so lk_exslpfail might be considered an 'upper limit'
352                  * bound, including the edge cases.
353                  */
354                 realexslp = sleepq_sleepcnt(&lk->lock_object,
355                     SQ_EXCLUSIVE_QUEUE);
356                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
357                         if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
358                                 lk->lk_exslpfail = 0;
359                                 queue = SQ_EXCLUSIVE_QUEUE;
360                                 v |= (x & LK_SHARED_WAITERS);
361                         } else {
362                                 lk->lk_exslpfail = 0;
363                                 LOCK_LOG2(lk,
364                                     "%s: %p has only LK_SLEEPFAIL sleepers",
365                                     __func__, lk);
366                                 LOCK_LOG2(lk,
367                             "%s: %p waking up threads on the exclusive queue",
368                                     __func__, lk);
369                                 wakeup_swapper =
370                                     sleepq_broadcast(&lk->lock_object,
371                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
372                                 queue = SQ_SHARED_QUEUE;
373                         }
374                 } else {
375                         /*
376                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
377                          * and using interruptible sleeps/timeout may have
378                          * left spourious lk_exslpfail counts on, so clean
379                          * it up anyway.
380                          */
381                         lk->lk_exslpfail = 0;
382                         queue = SQ_SHARED_QUEUE;
383                 }
384
385                 if (lockmgr_sunlock_try(lk, &orig_x)) {
386                         sleepq_release(&lk->lock_object);
387                         break;
388                 }
389
390                 x |= LK_SHARERS_LOCK(1);
391                 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
392                         orig_x = x;
393                         goto retry_sleepq;
394                 }
395                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
396                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
397                     "exclusive");
398                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
399                     0, queue);
400                 sleepq_release(&lk->lock_object);
401                 break;
402         }
403
404         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
405         return (wakeup_swapper);
406 }
407
408 static void
409 assert_lockmgr(const struct lock_object *lock, int what)
410 {
411
412         panic("lockmgr locks do not support assertions");
413 }
414
415 static void
416 lock_lockmgr(struct lock_object *lock, uintptr_t how)
417 {
418
419         panic("lockmgr locks do not support sleep interlocking");
420 }
421
422 static uintptr_t
423 unlock_lockmgr(struct lock_object *lock)
424 {
425
426         panic("lockmgr locks do not support sleep interlocking");
427 }
428
429 #ifdef KDTRACE_HOOKS
430 static int
431 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
432 {
433
434         panic("lockmgr locks do not support owner inquiring");
435 }
436 #endif
437
438 void
439 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
440 {
441         int iflags;
442
443         MPASS((flags & ~LK_INIT_MASK) == 0);
444         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
445             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
446             &lk->lk_lock));
447
448         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
449         if (flags & LK_CANRECURSE)
450                 iflags |= LO_RECURSABLE;
451         if ((flags & LK_NODUP) == 0)
452                 iflags |= LO_DUPOK;
453         if (flags & LK_NOPROFILE)
454                 iflags |= LO_NOPROFILE;
455         if ((flags & LK_NOWITNESS) == 0)
456                 iflags |= LO_WITNESS;
457         if (flags & LK_QUIET)
458                 iflags |= LO_QUIET;
459         if (flags & LK_IS_VNODE)
460                 iflags |= LO_IS_VNODE;
461         if (flags & LK_NEW)
462                 iflags |= LO_NEW;
463         iflags |= flags & LK_NOSHARE;
464
465         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
466         lk->lk_lock = LK_UNLOCKED;
467         lk->lk_recurse = 0;
468         lk->lk_exslpfail = 0;
469         lk->lk_timo = timo;
470         lk->lk_pri = pri;
471         STACK_ZERO(lk);
472 }
473
474 /*
475  * XXX: Gross hacks to manipulate external lock flags after
476  * initialization.  Used for certain vnode and buf locks.
477  */
478 void
479 lockallowshare(struct lock *lk)
480 {
481
482         lockmgr_assert(lk, KA_XLOCKED);
483         lk->lock_object.lo_flags &= ~LK_NOSHARE;
484 }
485
486 void
487 lockdisableshare(struct lock *lk)
488 {
489
490         lockmgr_assert(lk, KA_XLOCKED);
491         lk->lock_object.lo_flags |= LK_NOSHARE;
492 }
493
494 void
495 lockallowrecurse(struct lock *lk)
496 {
497
498         lockmgr_assert(lk, KA_XLOCKED);
499         lk->lock_object.lo_flags |= LO_RECURSABLE;
500 }
501
502 void
503 lockdisablerecurse(struct lock *lk)
504 {
505
506         lockmgr_assert(lk, KA_XLOCKED);
507         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
508 }
509
510 void
511 lockdestroy(struct lock *lk)
512 {
513
514         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
515         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
516         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
517         lock_destroy(&lk->lock_object);
518 }
519
520 static bool __always_inline
521 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
522 {
523
524         /*
525          * If no other thread has an exclusive lock, or
526          * no exclusive waiter is present, bump the count of
527          * sharers.  Since we have to preserve the state of
528          * waiters, if we fail to acquire the shared lock
529          * loop back and retry.
530          */
531         while (LK_CAN_SHARE(*xp, flags, fp)) {
532                 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
533                     *xp + LK_ONE_SHARER)) {
534                         return (true);
535                 }
536         }
537         return (false);
538 }
539
540 static bool __always_inline
541 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
542 {
543
544         for (;;) {
545                 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
546                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
547                             *xp - LK_ONE_SHARER))
548                                 return (true);
549                         continue;
550                 }
551                 break;
552         }
553         return (false);
554 }
555
556 static bool
557 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
558     int flags)
559 {
560         struct thread *owner;
561         uintptr_t x;
562
563         x = *xp;
564         MPASS(x != LK_UNLOCKED);
565         owner = (struct thread *)LK_HOLDER(x);
566         for (;;) {
567                 MPASS(owner != curthread);
568                 if (owner == (struct thread *)LK_KERNPROC)
569                         return (false);
570                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
571                         return (false);
572                 if (owner == NULL)
573                         return (false);
574                 if (!TD_IS_RUNNING(owner))
575                         return (false);
576                 if ((x & LK_ALL_WAITERS) != 0)
577                         return (false);
578                 lock_delay(lda);
579                 x = lockmgr_read_value(lk);
580                 if (LK_CAN_SHARE(x, flags, false)) {
581                         *xp = x;
582                         return (true);
583                 }
584                 owner = (struct thread *)LK_HOLDER(x);
585         }
586 }
587
588 static __noinline int
589 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
590     const char *file, int line, struct lockmgr_wait *lwa)
591 {
592         uintptr_t tid, x;
593         int error = 0;
594         const char *iwmesg;
595         int ipri, itimo;
596
597 #ifdef KDTRACE_HOOKS
598         uint64_t sleep_time = 0;
599 #endif
600 #ifdef LOCK_PROFILING
601         uint64_t waittime = 0;
602         int contested = 0;
603 #endif
604         struct lock_delay_arg lda;
605
606         if (KERNEL_PANICKED())
607                 goto out;
608
609         tid = (uintptr_t)curthread;
610
611         if (LK_CAN_WITNESS(flags))
612                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
613                     file, line, flags & LK_INTERLOCK ? ilk : NULL);
614         x = lockmgr_read_value(lk);
615         lock_delay_arg_init(&lda, &lockmgr_delay);
616         if (!lk_adaptive)
617                 flags &= ~LK_ADAPTIVE;
618         /*
619          * The lock may already be locked exclusive by curthread,
620          * avoid deadlock.
621          */
622         if (LK_HOLDER(x) == tid) {
623                 LOCK_LOG2(lk,
624                     "%s: %p already held in exclusive mode",
625                     __func__, lk);
626                 error = EDEADLK;
627                 goto out;
628         }
629
630         for (;;) {
631                 if (lockmgr_slock_try(lk, &x, flags, false))
632                         break;
633
634                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
635                     &contested, &waittime);
636
637                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
638                         if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
639                                 continue;
640                 }
641
642 #ifdef HWPMC_HOOKS
643                 PMC_SOFT_CALL( , , lock, failed);
644 #endif
645
646                 /*
647                  * If the lock is expected to not sleep just give up
648                  * and return.
649                  */
650                 if (LK_TRYOP(flags)) {
651                         LOCK_LOG2(lk, "%s: %p fails the try operation",
652                             __func__, lk);
653                         error = EBUSY;
654                         break;
655                 }
656
657                 /*
658                  * Acquire the sleepqueue chain lock because we
659                  * probabilly will need to manipulate waiters flags.
660                  */
661                 sleepq_lock(&lk->lock_object);
662                 x = lockmgr_read_value(lk);
663 retry_sleepq:
664
665                 /*
666                  * if the lock can be acquired in shared mode, try
667                  * again.
668                  */
669                 if (LK_CAN_SHARE(x, flags, false)) {
670                         sleepq_release(&lk->lock_object);
671                         continue;
672                 }
673
674                 /*
675                  * Try to set the LK_SHARED_WAITERS flag.  If we fail,
676                  * loop back and retry.
677                  */
678                 if ((x & LK_SHARED_WAITERS) == 0) {
679                         if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
680                             x | LK_SHARED_WAITERS)) {
681                                 goto retry_sleepq;
682                         }
683                         LOCK_LOG2(lk, "%s: %p set shared waiters flag",
684                             __func__, lk);
685                 }
686
687                 if (lwa == NULL) {
688                         iwmesg = lk->lock_object.lo_name;
689                         ipri = lk->lk_pri;
690                         itimo = lk->lk_timo;
691                 } else {
692                         iwmesg = lwa->iwmesg;
693                         ipri = lwa->ipri;
694                         itimo = lwa->itimo;
695                 }
696
697                 /*
698                  * As far as we have been unable to acquire the
699                  * shared lock and the shared waiters flag is set,
700                  * we will sleep.
701                  */
702 #ifdef KDTRACE_HOOKS
703                 sleep_time -= lockstat_nsecs(&lk->lock_object);
704 #endif
705                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
706                     SQ_SHARED_QUEUE);
707 #ifdef KDTRACE_HOOKS
708                 sleep_time += lockstat_nsecs(&lk->lock_object);
709 #endif
710                 flags &= ~LK_INTERLOCK;
711                 if (error) {
712                         LOCK_LOG3(lk,
713                             "%s: interrupted sleep for %p with %d",
714                             __func__, lk, error);
715                         break;
716                 }
717                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
718                     __func__, lk);
719                 x = lockmgr_read_value(lk);
720         }
721         if (error == 0) {
722 #ifdef KDTRACE_HOOKS
723                 if (sleep_time != 0)
724                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
725                             LOCKSTAT_READER, (x & LK_SHARE) == 0,
726                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
727 #endif
728 #ifdef LOCK_PROFILING
729                 lockmgr_note_shared_acquire(lk, contested, waittime,
730                     file, line, flags);
731 #else
732                 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
733                     flags);
734 #endif
735         }
736
737 out:
738         lockmgr_exit(flags, ilk, 0);
739         return (error);
740 }
741
742 static bool
743 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
744 {
745         struct thread *owner;
746         uintptr_t x;
747
748         x = *xp;
749         MPASS(x != LK_UNLOCKED);
750         owner = (struct thread *)LK_HOLDER(x);
751         for (;;) {
752                 MPASS(owner != curthread);
753                 if (owner == NULL)
754                         return (false);
755                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
756                         return (false);
757                 if (owner == (struct thread *)LK_KERNPROC)
758                         return (false);
759                 if (!TD_IS_RUNNING(owner))
760                         return (false);
761                 if ((x & LK_ALL_WAITERS) != 0)
762                         return (false);
763                 lock_delay(lda);
764                 x = lockmgr_read_value(lk);
765                 if (x == LK_UNLOCKED) {
766                         *xp = x;
767                         return (true);
768                 }
769                 owner = (struct thread *)LK_HOLDER(x);
770         }
771 }
772
773 static __noinline int
774 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
775     const char *file, int line, struct lockmgr_wait *lwa)
776 {
777         struct lock_class *class;
778         uintptr_t tid, x, v;
779         int error = 0;
780         const char *iwmesg;
781         int ipri, itimo;
782
783 #ifdef KDTRACE_HOOKS
784         uint64_t sleep_time = 0;
785 #endif
786 #ifdef LOCK_PROFILING
787         uint64_t waittime = 0;
788         int contested = 0;
789 #endif
790         struct lock_delay_arg lda;
791
792         if (KERNEL_PANICKED())
793                 goto out;
794
795         tid = (uintptr_t)curthread;
796
797         if (LK_CAN_WITNESS(flags))
798                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
799                     LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
800                     ilk : NULL);
801
802         /*
803          * If curthread already holds the lock and this one is
804          * allowed to recurse, simply recurse on it.
805          */
806         if (lockmgr_xlocked(lk)) {
807                 if ((flags & LK_CANRECURSE) == 0 &&
808                     (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
809                         /*
810                          * If the lock is expected to not panic just
811                          * give up and return.
812                          */
813                         if (LK_TRYOP(flags)) {
814                                 LOCK_LOG2(lk,
815                                     "%s: %p fails the try operation",
816                                     __func__, lk);
817                                 error = EBUSY;
818                                 goto out;
819                         }
820                         if (flags & LK_INTERLOCK) {
821                                 class = LOCK_CLASS(ilk);
822                                 class->lc_unlock(ilk);
823                         }
824                         STACK_PRINT(lk);
825                         panic("%s: recursing on non recursive lockmgr %p "
826                             "@ %s:%d\n", __func__, lk, file, line);
827                 }
828                 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
829                 lk->lk_recurse++;
830                 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
831                 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
832                     lk->lk_recurse, file, line);
833                 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
834                     LK_TRYWIT(flags), file, line);
835                 TD_LOCKS_INC(curthread);
836                 goto out;
837         }
838
839         x = LK_UNLOCKED;
840         lock_delay_arg_init(&lda, &lockmgr_delay);
841         if (!lk_adaptive)
842                 flags &= ~LK_ADAPTIVE;
843         for (;;) {
844                 if (x == LK_UNLOCKED) {
845                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
846                                 break;
847                         continue;
848                 }
849
850                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
851                     &contested, &waittime);
852
853                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
854                         if (lockmgr_xlock_adaptive(&lda, lk, &x))
855                                 continue;
856                 }
857 #ifdef HWPMC_HOOKS
858                 PMC_SOFT_CALL( , , lock, failed);
859 #endif
860
861                 /*
862                  * If the lock is expected to not sleep just give up
863                  * and return.
864                  */
865                 if (LK_TRYOP(flags)) {
866                         LOCK_LOG2(lk, "%s: %p fails the try operation",
867                             __func__, lk);
868                         error = EBUSY;
869                         break;
870                 }
871
872                 /*
873                  * Acquire the sleepqueue chain lock because we
874                  * probabilly will need to manipulate waiters flags.
875                  */
876                 sleepq_lock(&lk->lock_object);
877                 x = lockmgr_read_value(lk);
878 retry_sleepq:
879
880                 /*
881                  * if the lock has been released while we spun on
882                  * the sleepqueue chain lock just try again.
883                  */
884                 if (x == LK_UNLOCKED) {
885                         sleepq_release(&lk->lock_object);
886                         continue;
887                 }
888
889                 /*
890                  * The lock can be in the state where there is a
891                  * pending queue of waiters, but still no owner.
892                  * This happens when the lock is contested and an
893                  * owner is going to claim the lock.
894                  * If curthread is the one successfully acquiring it
895                  * claim lock ownership and return, preserving waiters
896                  * flags.
897                  */
898                 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
899                 if ((x & ~v) == LK_UNLOCKED) {
900                         v &= ~LK_EXCLUSIVE_SPINNERS;
901                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
902                             tid | v)) {
903                                 sleepq_release(&lk->lock_object);
904                                 LOCK_LOG2(lk,
905                                     "%s: %p claimed by a new writer",
906                                     __func__, lk);
907                                 break;
908                         }
909                         goto retry_sleepq;
910                 }
911
912                 /*
913                  * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
914                  * fail, loop back and retry.
915                  */
916                 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
917                         if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
918                             x | LK_EXCLUSIVE_WAITERS)) {
919                                 goto retry_sleepq;
920                         }
921                         LOCK_LOG2(lk, "%s: %p set excl waiters flag",
922                             __func__, lk);
923                 }
924
925                 if (lwa == NULL) {
926                         iwmesg = lk->lock_object.lo_name;
927                         ipri = lk->lk_pri;
928                         itimo = lk->lk_timo;
929                 } else {
930                         iwmesg = lwa->iwmesg;
931                         ipri = lwa->ipri;
932                         itimo = lwa->itimo;
933                 }
934
935                 /*
936                  * As far as we have been unable to acquire the
937                  * exclusive lock and the exclusive waiters flag
938                  * is set, we will sleep.
939                  */
940 #ifdef KDTRACE_HOOKS
941                 sleep_time -= lockstat_nsecs(&lk->lock_object);
942 #endif
943                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
944                     SQ_EXCLUSIVE_QUEUE);
945 #ifdef KDTRACE_HOOKS
946                 sleep_time += lockstat_nsecs(&lk->lock_object);
947 #endif
948                 flags &= ~LK_INTERLOCK;
949                 if (error) {
950                         LOCK_LOG3(lk,
951                             "%s: interrupted sleep for %p with %d",
952                             __func__, lk, error);
953                         break;
954                 }
955                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
956                     __func__, lk);
957                 x = lockmgr_read_value(lk);
958         }
959         if (error == 0) {
960 #ifdef KDTRACE_HOOKS
961                 if (sleep_time != 0)
962                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
963                             LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
964                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
965 #endif
966 #ifdef LOCK_PROFILING
967                 lockmgr_note_exclusive_acquire(lk, contested, waittime,
968                     file, line, flags);
969 #else
970                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
971                     flags);
972 #endif
973         }
974
975 out:
976         lockmgr_exit(flags, ilk, 0);
977         return (error);
978 }
979
980 static __noinline int
981 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
982     const char *file, int line, struct lockmgr_wait *lwa)
983 {
984         uintptr_t tid, v, setv;
985         int error = 0;
986         int op;
987
988         if (KERNEL_PANICKED())
989                 goto out;
990
991         tid = (uintptr_t)curthread;
992
993         _lockmgr_assert(lk, KA_SLOCKED, file, line);
994
995         op = flags & LK_TYPE_MASK;
996         v = lockmgr_read_value(lk);
997         for (;;) {
998                 if (LK_SHARERS(v) > 1) {
999                         if (op == LK_TRYUPGRADE) {
1000                                 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
1001                                     __func__, lk);
1002                                 error = EBUSY;
1003                                 goto out;
1004                         }
1005                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
1006                             v - LK_ONE_SHARER)) {
1007                                 lockmgr_note_shared_release(lk, file, line);
1008                                 goto out_xlock;
1009                         }
1010                         continue;
1011                 }
1012                 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1013
1014                 setv = tid;
1015                 setv |= (v & LK_ALL_WAITERS);
1016
1017                 /*
1018                  * Try to switch from one shared lock to an exclusive one.
1019                  * We need to preserve waiters flags during the operation.
1020                  */
1021                 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1022                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1023                             line);
1024                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1025                             LK_TRYWIT(flags), file, line);
1026                         LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1027                         TD_SLOCKS_DEC(curthread);
1028                         goto out;
1029                 }
1030         }
1031
1032 out_xlock:
1033         error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1034         flags &= ~LK_INTERLOCK;
1035 out:
1036         lockmgr_exit(flags, ilk, 0);
1037         return (error);
1038 }
1039
1040 int
1041 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1042     const char *file, int line)
1043 {
1044         struct lock_class *class;
1045         uintptr_t x, tid;
1046         u_int op;
1047         bool locked;
1048
1049         if (KERNEL_PANICKED())
1050                 return (0);
1051
1052         op = flags & LK_TYPE_MASK;
1053         locked = false;
1054         switch (op) {
1055         case LK_SHARED:
1056                 if (LK_CAN_WITNESS(flags))
1057                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1058                             file, line, flags & LK_INTERLOCK ? ilk : NULL);
1059                 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1060                         break;
1061                 x = lockmgr_read_value(lk);
1062                 if (lockmgr_slock_try(lk, &x, flags, true)) {
1063                         lockmgr_note_shared_acquire(lk, 0, 0,
1064                             file, line, flags);
1065                         locked = true;
1066                 } else {
1067                         return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1068                             NULL));
1069                 }
1070                 break;
1071         case LK_EXCLUSIVE:
1072                 if (LK_CAN_WITNESS(flags))
1073                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1074                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1075                             ilk : NULL);
1076                 tid = (uintptr_t)curthread;
1077                 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1078                     atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1079                         lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1080                             flags);
1081                         locked = true;
1082                 } else {
1083                         return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1084                             NULL));
1085                 }
1086                 break;
1087         case LK_UPGRADE:
1088         case LK_TRYUPGRADE:
1089                 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1090         default:
1091                 break;
1092         }
1093         if (__predict_true(locked)) {
1094                 if (__predict_false(flags & LK_INTERLOCK)) {
1095                         class = LOCK_CLASS(ilk);
1096                         class->lc_unlock(ilk);
1097                 }
1098                 return (0);
1099         } else {
1100                 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1101                     LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1102         }
1103 }
1104
1105 static __noinline int
1106 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1107     const char *file, int line)
1108
1109 {
1110         int wakeup_swapper = 0;
1111
1112         if (KERNEL_PANICKED())
1113                 goto out;
1114
1115         wakeup_swapper = wakeupshlk(lk, file, line);
1116
1117 out:
1118         lockmgr_exit(flags, ilk, wakeup_swapper);
1119         return (0);
1120 }
1121
1122 static __noinline int
1123 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1124     const char *file, int line)
1125 {
1126         uintptr_t tid, v;
1127         int wakeup_swapper = 0;
1128         u_int realexslp;
1129         int queue;
1130
1131         if (KERNEL_PANICKED())
1132                 goto out;
1133
1134         tid = (uintptr_t)curthread;
1135
1136         /*
1137          * As first option, treact the lock as if it has not
1138          * any waiter.
1139          * Fix-up the tid var if the lock has been disowned.
1140          */
1141         if (LK_HOLDER(x) == LK_KERNPROC)
1142                 tid = LK_KERNPROC;
1143
1144         /*
1145          * The lock is held in exclusive mode.
1146          * If the lock is recursed also, then unrecurse it.
1147          */
1148         if (lockmgr_recursed_v(x)) {
1149                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1150                 lk->lk_recurse--;
1151                 if (lk->lk_recurse == 0)
1152                         atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1153                 goto out;
1154         }
1155         if (tid != LK_KERNPROC)
1156                 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1157                     LOCKSTAT_WRITER);
1158
1159         if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1160                 goto out;
1161
1162         sleepq_lock(&lk->lock_object);
1163         x = lockmgr_read_value(lk);
1164         v = LK_UNLOCKED;
1165
1166         /*
1167          * If the lock has exclusive waiters, give them
1168          * preference in order to avoid deadlock with
1169          * shared runners up.
1170          * If interruptible sleeps left the exclusive queue
1171          * empty avoid a starvation for the threads sleeping
1172          * on the shared queue by giving them precedence
1173          * and cleaning up the exclusive waiters bit anyway.
1174          * Please note that lk_exslpfail count may be lying
1175          * about the real number of waiters with the
1176          * LK_SLEEPFAIL flag on because they may be used in
1177          * conjunction with interruptible sleeps so
1178          * lk_exslpfail might be considered an 'upper limit'
1179          * bound, including the edge cases.
1180          */
1181         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1182         realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1183         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1184                 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
1185                         lk->lk_exslpfail = 0;
1186                         queue = SQ_EXCLUSIVE_QUEUE;
1187                         v |= (x & LK_SHARED_WAITERS);
1188                 } else {
1189                         lk->lk_exslpfail = 0;
1190                         LOCK_LOG2(lk,
1191                             "%s: %p has only LK_SLEEPFAIL sleepers",
1192                             __func__, lk);
1193                         LOCK_LOG2(lk,
1194                             "%s: %p waking up threads on the exclusive queue",
1195                             __func__, lk);
1196                         wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1197                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1198                         queue = SQ_SHARED_QUEUE;
1199                 }
1200         } else {
1201                 /*
1202                  * Exclusive waiters sleeping with LK_SLEEPFAIL
1203                  * on and using interruptible sleeps/timeout
1204                  * may have left spourious lk_exslpfail counts
1205                  * on, so clean it up anyway.
1206                  */
1207                 lk->lk_exslpfail = 0;
1208                 queue = SQ_SHARED_QUEUE;
1209         }
1210
1211         LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1212             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1213             "exclusive");
1214         atomic_store_rel_ptr(&lk->lk_lock, v);
1215         wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1216         sleepq_release(&lk->lock_object);
1217
1218 out:
1219         lockmgr_exit(flags, ilk, wakeup_swapper);
1220         return (0);
1221 }
1222
1223 /*
1224  * Lightweight entry points for common operations.
1225  *
1226  * Functionality is similar to sx locks, in that none of the additional lockmgr
1227  * features are supported. To be clear, these are NOT supported:
1228  * 1. shared locking disablement
1229  * 2. returning with an error after sleep
1230  * 3. unlocking the interlock
1231  *
1232  * If in doubt, use lockmgr_lock_flags.
1233  */
1234 int
1235 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1236 {
1237         uintptr_t x;
1238
1239         MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1240         MPASS((flags & LK_INTERLOCK) == 0);
1241         MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1242
1243         if (LK_CAN_WITNESS(flags))
1244                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1245                     file, line, NULL);
1246         x = lockmgr_read_value(lk);
1247         if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1248                 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1249                 return (0);
1250         }
1251
1252         return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1253 }
1254
1255 int
1256 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1257 {
1258         uintptr_t tid;
1259
1260         MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1261         MPASS((flags & LK_INTERLOCK) == 0);
1262
1263         if (LK_CAN_WITNESS(flags))
1264                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1265                     LOP_EXCLUSIVE, file, line, NULL);
1266         tid = (uintptr_t)curthread;
1267         if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1268                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1269                     flags);
1270                 return (0);
1271         }
1272
1273         return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1274 }
1275
1276 int
1277 lockmgr_unlock(struct lock *lk)
1278 {
1279         uintptr_t x, tid;
1280         const char *file;
1281         int line;
1282
1283         file = __FILE__;
1284         line = __LINE__;
1285
1286         _lockmgr_assert(lk, KA_LOCKED, file, line);
1287         x = lockmgr_read_value(lk);
1288         if (__predict_true(x & LK_SHARE) != 0) {
1289                 lockmgr_note_shared_release(lk, file, line);
1290                 if (lockmgr_sunlock_try(lk, &x)) {
1291                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1292                 } else {
1293                         return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1294                 }
1295         } else {
1296                 tid = (uintptr_t)curthread;
1297                 lockmgr_note_exclusive_release(lk, file, line);
1298                 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1299                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1300                 } else {
1301                         return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1302                 }
1303         }
1304         return (0);
1305 }
1306
1307 int
1308 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1309     const char *wmesg, int pri, int timo, const char *file, int line)
1310 {
1311         GIANT_DECLARE;
1312         struct lockmgr_wait lwa;
1313         struct lock_class *class;
1314         const char *iwmesg;
1315         uintptr_t tid, v, x;
1316         u_int op, realexslp;
1317         int error, ipri, itimo, queue, wakeup_swapper;
1318 #ifdef LOCK_PROFILING
1319         uint64_t waittime = 0;
1320         int contested = 0;
1321 #endif
1322
1323         if (KERNEL_PANICKED())
1324                 return (0);
1325
1326         error = 0;
1327         tid = (uintptr_t)curthread;
1328         op = (flags & LK_TYPE_MASK);
1329         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1330         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1331         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1332
1333         lwa.iwmesg = iwmesg;
1334         lwa.ipri = ipri;
1335         lwa.itimo = itimo;
1336
1337         MPASS((flags & ~LK_TOTAL_MASK) == 0);
1338         KASSERT((op & (op - 1)) == 0,
1339             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1340         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1341             (op != LK_DOWNGRADE && op != LK_RELEASE),
1342             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1343             __func__, file, line));
1344         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1345             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1346             __func__, file, line));
1347         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1348             ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1349             lk->lock_object.lo_name, file, line));
1350
1351         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1352
1353         if (lk->lock_object.lo_flags & LK_NOSHARE) {
1354                 switch (op) {
1355                 case LK_SHARED:
1356                         op = LK_EXCLUSIVE;
1357                         break;
1358                 case LK_UPGRADE:
1359                 case LK_TRYUPGRADE:
1360                 case LK_DOWNGRADE:
1361                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1362                             file, line);
1363                         if (flags & LK_INTERLOCK)
1364                                 class->lc_unlock(ilk);
1365                         return (0);
1366                 }
1367         }
1368
1369         wakeup_swapper = 0;
1370         switch (op) {
1371         case LK_SHARED:
1372                 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1373                 break;
1374         case LK_UPGRADE:
1375         case LK_TRYUPGRADE:
1376                 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1377                 break;
1378         case LK_EXCLUSIVE:
1379                 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1380                 break;
1381         case LK_DOWNGRADE:
1382                 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1383                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1384
1385                 /*
1386                  * Panic if the lock is recursed.
1387                  */
1388                 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1389                         if (flags & LK_INTERLOCK)
1390                                 class->lc_unlock(ilk);
1391                         panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1392                             __func__, iwmesg, file, line);
1393                 }
1394                 TD_SLOCKS_INC(curthread);
1395
1396                 /*
1397                  * In order to preserve waiters flags, just spin.
1398                  */
1399                 for (;;) {
1400                         x = lockmgr_read_value(lk);
1401                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1402                         x &= LK_ALL_WAITERS;
1403                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1404                             LK_SHARERS_LOCK(1) | x))
1405                                 break;
1406                         cpu_spinwait();
1407                 }
1408                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1409                 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1410                 break;
1411         case LK_RELEASE:
1412                 _lockmgr_assert(lk, KA_LOCKED, file, line);
1413                 x = lockmgr_read_value(lk);
1414
1415                 if (__predict_true(x & LK_SHARE) != 0) {
1416                         lockmgr_note_shared_release(lk, file, line);
1417                         return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1418                 } else {
1419                         lockmgr_note_exclusive_release(lk, file, line);
1420                         return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1421                 }
1422                 break;
1423         case LK_DRAIN:
1424                 if (LK_CAN_WITNESS(flags))
1425                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1426                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1427                             ilk : NULL);
1428
1429                 /*
1430                  * Trying to drain a lock we already own will result in a
1431                  * deadlock.
1432                  */
1433                 if (lockmgr_xlocked(lk)) {
1434                         if (flags & LK_INTERLOCK)
1435                                 class->lc_unlock(ilk);
1436                         panic("%s: draining %s with the lock held @ %s:%d\n",
1437                             __func__, iwmesg, file, line);
1438                 }
1439
1440                 for (;;) {
1441                         if (lk->lk_lock == LK_UNLOCKED &&
1442                             atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1443                                 break;
1444
1445 #ifdef HWPMC_HOOKS
1446                         PMC_SOFT_CALL( , , lock, failed);
1447 #endif
1448                         lock_profile_obtain_lock_failed(&lk->lock_object, false,
1449                             &contested, &waittime);
1450
1451                         /*
1452                          * If the lock is expected to not sleep just give up
1453                          * and return.
1454                          */
1455                         if (LK_TRYOP(flags)) {
1456                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
1457                                     __func__, lk);
1458                                 error = EBUSY;
1459                                 break;
1460                         }
1461
1462                         /*
1463                          * Acquire the sleepqueue chain lock because we
1464                          * probabilly will need to manipulate waiters flags.
1465                          */
1466                         sleepq_lock(&lk->lock_object);
1467                         x = lockmgr_read_value(lk);
1468
1469                         /*
1470                          * if the lock has been released while we spun on
1471                          * the sleepqueue chain lock just try again.
1472                          */
1473                         if (x == LK_UNLOCKED) {
1474                                 sleepq_release(&lk->lock_object);
1475                                 continue;
1476                         }
1477
1478                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1479                         if ((x & ~v) == LK_UNLOCKED) {
1480                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1481
1482                                 /*
1483                                  * If interruptible sleeps left the exclusive
1484                                  * queue empty avoid a starvation for the
1485                                  * threads sleeping on the shared queue by
1486                                  * giving them precedence and cleaning up the
1487                                  * exclusive waiters bit anyway.
1488                                  * Please note that lk_exslpfail count may be
1489                                  * lying about the real number of waiters with
1490                                  * the LK_SLEEPFAIL flag on because they may
1491                                  * be used in conjunction with interruptible
1492                                  * sleeps so lk_exslpfail might be considered
1493                                  * an 'upper limit' bound, including the edge
1494                                  * cases.
1495                                  */
1496                                 if (v & LK_EXCLUSIVE_WAITERS) {
1497                                         queue = SQ_EXCLUSIVE_QUEUE;
1498                                         v &= ~LK_EXCLUSIVE_WAITERS;
1499                                 } else {
1500                                         /*
1501                                          * Exclusive waiters sleeping with
1502                                          * LK_SLEEPFAIL on and using
1503                                          * interruptible sleeps/timeout may
1504                                          * have left spourious lk_exslpfail
1505                                          * counts on, so clean it up anyway.
1506                                          */
1507                                         MPASS(v & LK_SHARED_WAITERS);
1508                                         lk->lk_exslpfail = 0;
1509                                         queue = SQ_SHARED_QUEUE;
1510                                         v &= ~LK_SHARED_WAITERS;
1511                                 }
1512                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
1513                                         realexslp =
1514                                             sleepq_sleepcnt(&lk->lock_object,
1515                                             SQ_EXCLUSIVE_QUEUE);
1516                                         if (lk->lk_exslpfail >= realexslp) {
1517                                                 lk->lk_exslpfail = 0;
1518                                                 queue = SQ_SHARED_QUEUE;
1519                                                 v &= ~LK_SHARED_WAITERS;
1520                                                 if (realexslp != 0) {
1521                                                         LOCK_LOG2(lk,
1522                                         "%s: %p has only LK_SLEEPFAIL sleepers",
1523                                                             __func__, lk);
1524                                                         LOCK_LOG2(lk,
1525                         "%s: %p waking up threads on the exclusive queue",
1526                                                             __func__, lk);
1527                                                         wakeup_swapper =
1528                                                             sleepq_broadcast(
1529                                                             &lk->lock_object,
1530                                                             SLEEPQ_LK, 0,
1531                                                             SQ_EXCLUSIVE_QUEUE);
1532                                                 }
1533                                         } else
1534                                                 lk->lk_exslpfail = 0;
1535                                 }
1536                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1537                                         sleepq_release(&lk->lock_object);
1538                                         continue;
1539                                 }
1540                                 LOCK_LOG3(lk,
1541                                 "%s: %p waking up all threads on the %s queue",
1542                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
1543                                     "shared" : "exclusive");
1544                                 wakeup_swapper |= sleepq_broadcast(
1545                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
1546
1547                                 /*
1548                                  * If shared waiters have been woken up we need
1549                                  * to wait for one of them to acquire the lock
1550                                  * before to set the exclusive waiters in
1551                                  * order to avoid a deadlock.
1552                                  */
1553                                 if (queue == SQ_SHARED_QUEUE) {
1554                                         for (v = lk->lk_lock;
1555                                             (v & LK_SHARE) && !LK_SHARERS(v);
1556                                             v = lk->lk_lock)
1557                                                 cpu_spinwait();
1558                                 }
1559                         }
1560
1561                         /*
1562                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1563                          * fail, loop back and retry.
1564                          */
1565                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1566                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1567                                     x | LK_EXCLUSIVE_WAITERS)) {
1568                                         sleepq_release(&lk->lock_object);
1569                                         continue;
1570                                 }
1571                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1572                                     __func__, lk);
1573                         }
1574
1575                         /*
1576                          * As far as we have been unable to acquire the
1577                          * exclusive lock and the exclusive waiters flag
1578                          * is set, we will sleep.
1579                          */
1580                         if (flags & LK_INTERLOCK) {
1581                                 class->lc_unlock(ilk);
1582                                 flags &= ~LK_INTERLOCK;
1583                         }
1584                         GIANT_SAVE();
1585                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1586                             SQ_EXCLUSIVE_QUEUE);
1587                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1588                         GIANT_RESTORE();
1589                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1590                             __func__, lk);
1591                 }
1592
1593                 if (error == 0) {
1594                         lock_profile_obtain_lock_success(&lk->lock_object,
1595                             false, contested, waittime, file, line);
1596                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1597                             lk->lk_recurse, file, line);
1598                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1599                             LK_TRYWIT(flags), file, line);
1600                         TD_LOCKS_INC(curthread);
1601                         STACK_SAVE(lk);
1602                 }
1603                 break;
1604         default:
1605                 if (flags & LK_INTERLOCK)
1606                         class->lc_unlock(ilk);
1607                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1608         }
1609
1610         if (flags & LK_INTERLOCK)
1611                 class->lc_unlock(ilk);
1612         if (wakeup_swapper)
1613                 kick_proc0();
1614
1615         return (error);
1616 }
1617
1618 void
1619 _lockmgr_disown(struct lock *lk, const char *file, int line)
1620 {
1621         uintptr_t tid, x;
1622
1623         if (SCHEDULER_STOPPED())
1624                 return;
1625
1626         tid = (uintptr_t)curthread;
1627         _lockmgr_assert(lk, KA_XLOCKED, file, line);
1628
1629         /*
1630          * Panic if the lock is recursed.
1631          */
1632         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1633                 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1634                     __func__,  file, line);
1635
1636         /*
1637          * If the owner is already LK_KERNPROC just skip the whole operation.
1638          */
1639         if (LK_HOLDER(lk->lk_lock) != tid)
1640                 return;
1641         lock_profile_release_lock(&lk->lock_object, false);
1642         LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1643         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1644         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1645         TD_LOCKS_DEC(curthread);
1646         STACK_SAVE(lk);
1647
1648         /*
1649          * In order to preserve waiters flags, just spin.
1650          */
1651         for (;;) {
1652                 x = lockmgr_read_value(lk);
1653                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1654                 x &= LK_ALL_WAITERS;
1655                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1656                     LK_KERNPROC | x))
1657                         return;
1658                 cpu_spinwait();
1659         }
1660 }
1661
1662 void
1663 lockmgr_printinfo(const struct lock *lk)
1664 {
1665         struct thread *td;
1666         uintptr_t x;
1667
1668         if (lk->lk_lock == LK_UNLOCKED)
1669                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1670         else if (lk->lk_lock & LK_SHARE)
1671                 printf("lock type %s: SHARED (count %ju)\n",
1672                     lk->lock_object.lo_name,
1673                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1674         else {
1675                 td = lockmgr_xholder(lk);
1676                 if (td == (struct thread *)LK_KERNPROC)
1677                         printf("lock type %s: EXCL by KERNPROC\n",
1678                             lk->lock_object.lo_name);
1679                 else
1680                         printf("lock type %s: EXCL by thread %p "
1681                             "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1682                             td, td->td_proc->p_pid, td->td_proc->p_comm,
1683                             td->td_tid);
1684         }
1685
1686         x = lk->lk_lock;
1687         if (x & LK_EXCLUSIVE_WAITERS)
1688                 printf(" with exclusive waiters pending\n");
1689         if (x & LK_SHARED_WAITERS)
1690                 printf(" with shared waiters pending\n");
1691         if (x & LK_EXCLUSIVE_SPINNERS)
1692                 printf(" with exclusive spinners pending\n");
1693
1694         STACK_PRINT(lk);
1695 }
1696
1697 int
1698 lockstatus(const struct lock *lk)
1699 {
1700         uintptr_t v, x;
1701         int ret;
1702
1703         ret = LK_SHARED;
1704         x = lockmgr_read_value(lk);
1705         v = LK_HOLDER(x);
1706
1707         if ((x & LK_SHARE) == 0) {
1708                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1709                         ret = LK_EXCLUSIVE;
1710                 else
1711                         ret = LK_EXCLOTHER;
1712         } else if (x == LK_UNLOCKED)
1713                 ret = 0;
1714
1715         return (ret);
1716 }
1717
1718 #ifdef INVARIANT_SUPPORT
1719
1720 FEATURE(invariant_support,
1721     "Support for modules compiled with INVARIANTS option");
1722
1723 #ifndef INVARIANTS
1724 #undef  _lockmgr_assert
1725 #endif
1726
1727 void
1728 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1729 {
1730         int slocked = 0;
1731
1732         if (KERNEL_PANICKED())
1733                 return;
1734         switch (what) {
1735         case KA_SLOCKED:
1736         case KA_SLOCKED | KA_NOTRECURSED:
1737         case KA_SLOCKED | KA_RECURSED:
1738                 slocked = 1;
1739         case KA_LOCKED:
1740         case KA_LOCKED | KA_NOTRECURSED:
1741         case KA_LOCKED | KA_RECURSED:
1742 #ifdef WITNESS
1743
1744                 /*
1745                  * We cannot trust WITNESS if the lock is held in exclusive
1746                  * mode and a call to lockmgr_disown() happened.
1747                  * Workaround this skipping the check if the lock is held in
1748                  * exclusive mode even for the KA_LOCKED case.
1749                  */
1750                 if (slocked || (lk->lk_lock & LK_SHARE)) {
1751                         witness_assert(&lk->lock_object, what, file, line);
1752                         break;
1753                 }
1754 #endif
1755                 if (lk->lk_lock == LK_UNLOCKED ||
1756                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1757                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1758                         panic("Lock %s not %slocked @ %s:%d\n",
1759                             lk->lock_object.lo_name, slocked ? "share" : "",
1760                             file, line);
1761
1762                 if ((lk->lk_lock & LK_SHARE) == 0) {
1763                         if (lockmgr_recursed(lk)) {
1764                                 if (what & KA_NOTRECURSED)
1765                                         panic("Lock %s recursed @ %s:%d\n",
1766                                             lk->lock_object.lo_name, file,
1767                                             line);
1768                         } else if (what & KA_RECURSED)
1769                                 panic("Lock %s not recursed @ %s:%d\n",
1770                                     lk->lock_object.lo_name, file, line);
1771                 }
1772                 break;
1773         case KA_XLOCKED:
1774         case KA_XLOCKED | KA_NOTRECURSED:
1775         case KA_XLOCKED | KA_RECURSED:
1776                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1777                         panic("Lock %s not exclusively locked @ %s:%d\n",
1778                             lk->lock_object.lo_name, file, line);
1779                 if (lockmgr_recursed(lk)) {
1780                         if (what & KA_NOTRECURSED)
1781                                 panic("Lock %s recursed @ %s:%d\n",
1782                                     lk->lock_object.lo_name, file, line);
1783                 } else if (what & KA_RECURSED)
1784                         panic("Lock %s not recursed @ %s:%d\n",
1785                             lk->lock_object.lo_name, file, line);
1786                 break;
1787         case KA_UNLOCKED:
1788                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1789                         panic("Lock %s exclusively locked @ %s:%d\n",
1790                             lk->lock_object.lo_name, file, line);
1791                 break;
1792         default:
1793                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1794                     line);
1795         }
1796 }
1797 #endif
1798
1799 #ifdef DDB
1800 int
1801 lockmgr_chain(struct thread *td, struct thread **ownerp)
1802 {
1803         const struct lock *lk;
1804
1805         lk = td->td_wchan;
1806
1807         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1808                 return (0);
1809         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1810         if (lk->lk_lock & LK_SHARE)
1811                 db_printf("SHARED (count %ju)\n",
1812                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1813         else
1814                 db_printf("EXCL\n");
1815         *ownerp = lockmgr_xholder(lk);
1816
1817         return (1);
1818 }
1819
1820 static void
1821 db_show_lockmgr(const struct lock_object *lock)
1822 {
1823         struct thread *td;
1824         const struct lock *lk;
1825
1826         lk = (const struct lock *)lock;
1827
1828         db_printf(" state: ");
1829         if (lk->lk_lock == LK_UNLOCKED)
1830                 db_printf("UNLOCKED\n");
1831         else if (lk->lk_lock & LK_SHARE)
1832                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1833         else {
1834                 td = lockmgr_xholder(lk);
1835                 if (td == (struct thread *)LK_KERNPROC)
1836                         db_printf("XLOCK: LK_KERNPROC\n");
1837                 else
1838                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1839                             td->td_tid, td->td_proc->p_pid,
1840                             td->td_proc->p_comm);
1841                 if (lockmgr_recursed(lk))
1842                         db_printf(" recursed: %d\n", lk->lk_recurse);
1843         }
1844         db_printf(" waiters: ");
1845         switch (lk->lk_lock & LK_ALL_WAITERS) {
1846         case LK_SHARED_WAITERS:
1847                 db_printf("shared\n");
1848                 break;
1849         case LK_EXCLUSIVE_WAITERS:
1850                 db_printf("exclusive\n");
1851                 break;
1852         case LK_ALL_WAITERS:
1853                 db_printf("shared and exclusive\n");
1854                 break;
1855         default:
1856                 db_printf("none\n");
1857         }
1858         db_printf(" spinners: ");
1859         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1860                 db_printf("exclusive\n");
1861         else
1862                 db_printf("none\n");
1863 }
1864 #endif