]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_lock.c
zfs: merge openzfs/zfs@d99134be8 (zfs-2.1-release) into stable/13
[FreeBSD/FreeBSD.git] / sys / kern / kern_lock.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30
31 #include "opt_ddb.h"
32 #include "opt_hwpmc_hooks.h"
33
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/kdb.h>
37 #include <sys/ktr.h>
38 #include <sys/lock.h>
39 #include <sys/lock_profile.h>
40 #include <sys/lockmgr.h>
41 #include <sys/lockstat.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sleepqueue.h>
45 #ifdef DEBUG_LOCKS
46 #include <sys/stack.h>
47 #endif
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50
51 #include <machine/cpu.h>
52
53 #ifdef DDB
54 #include <ddb/ddb.h>
55 #endif
56
57 #ifdef HWPMC_HOOKS
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
60 #endif
61
62 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
63     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
64
65 #define SQ_EXCLUSIVE_QUEUE      0
66 #define SQ_SHARED_QUEUE         1
67
68 #ifndef INVARIANTS
69 #define _lockmgr_assert(lk, what, file, line)
70 #endif
71
72 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
73 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
74
75 #ifndef DEBUG_LOCKS
76 #define STACK_PRINT(lk)
77 #define STACK_SAVE(lk)
78 #define STACK_ZERO(lk)
79 #else
80 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
81 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
82 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
83 #endif
84
85 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
86         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
87                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
88 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
89         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
90                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
91
92 #define GIANT_DECLARE                                                   \
93         int _i = 0;                                                     \
94         WITNESS_SAVE_DECL(Giant)
95 #define GIANT_RESTORE() do {                                            \
96         if (__predict_false(_i > 0)) {                                  \
97                 while (_i--)                                            \
98                         mtx_lock(&Giant);                               \
99                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
100         }                                                               \
101 } while (0)
102 #define GIANT_SAVE() do {                                               \
103         if (__predict_false(mtx_owned(&Giant))) {                       \
104                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
105                 while (mtx_owned(&Giant)) {                             \
106                         _i++;                                           \
107                         mtx_unlock(&Giant);                             \
108                 }                                                       \
109         }                                                               \
110 } while (0)
111
112 static bool __always_inline
113 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
114 {
115
116         if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
117             LK_SHARE)
118                 return (true);
119         if (fp || (!(x & LK_SHARE)))
120                 return (false);
121         if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
122             (curthread->td_pflags & TDP_DEADLKTREAT))
123                 return (true);
124         return (false);
125 }
126
127 #define LK_TRYOP(x)                                                     \
128         ((x) & LK_NOWAIT)
129
130 #define LK_CAN_WITNESS(x)                                               \
131         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
132 #define LK_TRYWIT(x)                                                    \
133         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
134
135 #define lockmgr_disowned(lk)                                            \
136         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
137
138 #define lockmgr_xlocked_v(v)                                            \
139         (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
140
141 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
142
143 static void     assert_lockmgr(const struct lock_object *lock, int how);
144 #ifdef DDB
145 static void     db_show_lockmgr(const struct lock_object *lock);
146 #endif
147 static void     lock_lockmgr(struct lock_object *lock, uintptr_t how);
148 #ifdef KDTRACE_HOOKS
149 static int      owner_lockmgr(const struct lock_object *lock,
150                     struct thread **owner);
151 #endif
152 static uintptr_t unlock_lockmgr(struct lock_object *lock);
153
154 struct lock_class lock_class_lockmgr = {
155         .lc_name = "lockmgr",
156         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
157         .lc_assert = assert_lockmgr,
158 #ifdef DDB
159         .lc_ddb_show = db_show_lockmgr,
160 #endif
161         .lc_lock = lock_lockmgr,
162         .lc_unlock = unlock_lockmgr,
163 #ifdef KDTRACE_HOOKS
164         .lc_owner = owner_lockmgr,
165 #endif
166 };
167
168 static __read_mostly bool lk_adaptive = true;
169 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
170 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
171     0, "");
172 #define lockmgr_delay  locks_delay
173
174 struct lockmgr_wait {
175         const char *iwmesg;
176         int ipri;
177         int itimo;
178 };
179
180 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
181     int flags, bool fp);
182 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
183
184 static void
185 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
186 {
187         struct lock_class *class;
188
189         if (flags & LK_INTERLOCK) {
190                 class = LOCK_CLASS(ilk);
191                 class->lc_unlock(ilk);
192         }
193
194         if (__predict_false(wakeup_swapper))
195                 kick_proc0();
196 }
197
198 static void
199 lockmgr_note_shared_acquire(struct lock *lk, int contested,
200     uint64_t waittime, const char *file, int line, int flags)
201 {
202
203         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
204             waittime, file, line, LOCKSTAT_READER);
205         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
206         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
207         TD_LOCKS_INC(curthread);
208         TD_SLOCKS_INC(curthread);
209         STACK_SAVE(lk);
210 }
211
212 static void
213 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
214 {
215
216         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
217         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
218         TD_LOCKS_DEC(curthread);
219         TD_SLOCKS_DEC(curthread);
220 }
221
222 static void
223 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
224     uint64_t waittime, const char *file, int line, int flags)
225 {
226
227         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
228             waittime, file, line, LOCKSTAT_WRITER);
229         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
230         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
231             line);
232         TD_LOCKS_INC(curthread);
233         STACK_SAVE(lk);
234 }
235
236 static void
237 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
238 {
239
240         if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
241                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
242                 TD_LOCKS_DEC(curthread);
243         }
244         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
245             line);
246 }
247
248 static __inline struct thread *
249 lockmgr_xholder(const struct lock *lk)
250 {
251         uintptr_t x;
252
253         x = lockmgr_read_value(lk);
254         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
255 }
256
257 /*
258  * It assumes sleepq_lock held and returns with this one unheld.
259  * It also assumes the generic interlock is sane and previously checked.
260  * If LK_INTERLOCK is specified the interlock is not reacquired after the
261  * sleep.
262  */
263 static __inline int
264 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
265     const char *wmesg, int pri, int timo, int queue)
266 {
267         GIANT_DECLARE;
268         struct lock_class *class;
269         int catch, error;
270
271         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
272         catch = pri & PCATCH;
273         pri &= PRIMASK;
274         error = 0;
275
276         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
277             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
278
279         if (flags & LK_INTERLOCK)
280                 class->lc_unlock(ilk);
281         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
282                 lk->lk_exslpfail++;
283         GIANT_SAVE();
284         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
285             SLEEPQ_INTERRUPTIBLE : 0), queue);
286         if ((flags & LK_TIMELOCK) && timo)
287                 sleepq_set_timeout(&lk->lock_object, timo);
288
289         /*
290          * Decisional switch for real sleeping.
291          */
292         if ((flags & LK_TIMELOCK) && timo && catch)
293                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
294         else if ((flags & LK_TIMELOCK) && timo)
295                 error = sleepq_timedwait(&lk->lock_object, pri);
296         else if (catch)
297                 error = sleepq_wait_sig(&lk->lock_object, pri);
298         else
299                 sleepq_wait(&lk->lock_object, pri);
300         GIANT_RESTORE();
301         if ((flags & LK_SLEEPFAIL) && error == 0)
302                 error = ENOLCK;
303
304         return (error);
305 }
306
307 static __inline int
308 wakeupshlk(struct lock *lk, const char *file, int line)
309 {
310         uintptr_t v, x, orig_x;
311         u_int realexslp;
312         int queue, wakeup_swapper;
313
314         wakeup_swapper = 0;
315         for (;;) {
316                 x = lockmgr_read_value(lk);
317                 if (lockmgr_sunlock_try(lk, &x))
318                         break;
319
320                 /*
321                  * We should have a sharer with waiters, so enter the hard
322                  * path in order to handle wakeups correctly.
323                  */
324                 sleepq_lock(&lk->lock_object);
325                 orig_x = lockmgr_read_value(lk);
326 retry_sleepq:
327                 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
328                 v = LK_UNLOCKED;
329
330                 /*
331                  * If the lock has exclusive waiters, give them preference in
332                  * order to avoid deadlock with shared runners up.
333                  * If interruptible sleeps left the exclusive queue empty
334                  * avoid a starvation for the threads sleeping on the shared
335                  * queue by giving them precedence and cleaning up the
336                  * exclusive waiters bit anyway.
337                  * Please note that lk_exslpfail count may be lying about
338                  * the real number of waiters with the LK_SLEEPFAIL flag on
339                  * because they may be used in conjunction with interruptible
340                  * sleeps so lk_exslpfail might be considered an 'upper limit'
341                  * bound, including the edge cases.
342                  */
343                 realexslp = sleepq_sleepcnt(&lk->lock_object,
344                     SQ_EXCLUSIVE_QUEUE);
345                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
346                         if (lk->lk_exslpfail < realexslp) {
347                                 lk->lk_exslpfail = 0;
348                                 queue = SQ_EXCLUSIVE_QUEUE;
349                                 v |= (x & LK_SHARED_WAITERS);
350                         } else {
351                                 lk->lk_exslpfail = 0;
352                                 LOCK_LOG2(lk,
353                                     "%s: %p has only LK_SLEEPFAIL sleepers",
354                                     __func__, lk);
355                                 LOCK_LOG2(lk,
356                             "%s: %p waking up threads on the exclusive queue",
357                                     __func__, lk);
358                                 wakeup_swapper =
359                                     sleepq_broadcast(&lk->lock_object,
360                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
361                                 queue = SQ_SHARED_QUEUE;
362                         }
363                                 
364                 } else {
365                         /*
366                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
367                          * and using interruptible sleeps/timeout may have
368                          * left spourious lk_exslpfail counts on, so clean
369                          * it up anyway.
370                          */
371                         lk->lk_exslpfail = 0;
372                         queue = SQ_SHARED_QUEUE;
373                 }
374
375                 if (lockmgr_sunlock_try(lk, &orig_x)) {
376                         sleepq_release(&lk->lock_object);
377                         break;
378                 }
379
380                 x |= LK_SHARERS_LOCK(1);
381                 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
382                         orig_x = x;
383                         goto retry_sleepq;
384                 }
385                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
386                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
387                     "exclusive");
388                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
389                     0, queue);
390                 sleepq_release(&lk->lock_object);
391                 break;
392         }
393
394         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
395         return (wakeup_swapper);
396 }
397
398 static void
399 assert_lockmgr(const struct lock_object *lock, int what)
400 {
401
402         panic("lockmgr locks do not support assertions");
403 }
404
405 static void
406 lock_lockmgr(struct lock_object *lock, uintptr_t how)
407 {
408
409         panic("lockmgr locks do not support sleep interlocking");
410 }
411
412 static uintptr_t
413 unlock_lockmgr(struct lock_object *lock)
414 {
415
416         panic("lockmgr locks do not support sleep interlocking");
417 }
418
419 #ifdef KDTRACE_HOOKS
420 static int
421 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
422 {
423
424         panic("lockmgr locks do not support owner inquiring");
425 }
426 #endif
427
428 void
429 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
430 {
431         int iflags;
432
433         MPASS((flags & ~LK_INIT_MASK) == 0);
434         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
435             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
436             &lk->lk_lock));
437
438         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
439         if (flags & LK_CANRECURSE)
440                 iflags |= LO_RECURSABLE;
441         if ((flags & LK_NODUP) == 0)
442                 iflags |= LO_DUPOK;
443         if (flags & LK_NOPROFILE)
444                 iflags |= LO_NOPROFILE;
445         if ((flags & LK_NOWITNESS) == 0)
446                 iflags |= LO_WITNESS;
447         if (flags & LK_QUIET)
448                 iflags |= LO_QUIET;
449         if (flags & LK_IS_VNODE)
450                 iflags |= LO_IS_VNODE;
451         if (flags & LK_NEW)
452                 iflags |= LO_NEW;
453         iflags |= flags & LK_NOSHARE;
454
455         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
456         lk->lk_lock = LK_UNLOCKED;
457         lk->lk_recurse = 0;
458         lk->lk_exslpfail = 0;
459         lk->lk_timo = timo;
460         lk->lk_pri = pri;
461         STACK_ZERO(lk);
462 }
463
464 /*
465  * XXX: Gross hacks to manipulate external lock flags after
466  * initialization.  Used for certain vnode and buf locks.
467  */
468 void
469 lockallowshare(struct lock *lk)
470 {
471
472         lockmgr_assert(lk, KA_XLOCKED);
473         lk->lock_object.lo_flags &= ~LK_NOSHARE;
474 }
475
476 void
477 lockdisableshare(struct lock *lk)
478 {
479
480         lockmgr_assert(lk, KA_XLOCKED);
481         lk->lock_object.lo_flags |= LK_NOSHARE;
482 }
483
484 void
485 lockallowrecurse(struct lock *lk)
486 {
487
488         lockmgr_assert(lk, KA_XLOCKED);
489         lk->lock_object.lo_flags |= LO_RECURSABLE;
490 }
491
492 void
493 lockdisablerecurse(struct lock *lk)
494 {
495
496         lockmgr_assert(lk, KA_XLOCKED);
497         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
498 }
499
500 void
501 lockdestroy(struct lock *lk)
502 {
503
504         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
505         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
506         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
507         lock_destroy(&lk->lock_object);
508 }
509
510 static bool __always_inline
511 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
512 {
513
514         /*
515          * If no other thread has an exclusive lock, or
516          * no exclusive waiter is present, bump the count of
517          * sharers.  Since we have to preserve the state of
518          * waiters, if we fail to acquire the shared lock
519          * loop back and retry.
520          */
521         while (LK_CAN_SHARE(*xp, flags, fp)) {
522                 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
523                     *xp + LK_ONE_SHARER)) {
524                         return (true);
525                 }
526         }
527         return (false);
528 }
529
530 static bool __always_inline
531 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
532 {
533
534         for (;;) {
535                 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
536                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
537                             *xp - LK_ONE_SHARER))
538                                 return (true);
539                         continue;
540                 }
541                 break;
542         }
543         return (false);
544 }
545
546 static bool
547 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
548     int flags)
549 {
550         struct thread *owner;
551         uintptr_t x;
552
553         x = *xp;
554         MPASS(x != LK_UNLOCKED);
555         owner = (struct thread *)LK_HOLDER(x);
556         for (;;) {
557                 MPASS(owner != curthread);
558                 if (owner == (struct thread *)LK_KERNPROC)
559                         return (false);
560                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
561                         return (false);
562                 if (owner == NULL)
563                         return (false);
564                 if (!TD_IS_RUNNING(owner))
565                         return (false);
566                 if ((x & LK_ALL_WAITERS) != 0)
567                         return (false);
568                 lock_delay(lda);
569                 x = lockmgr_read_value(lk);
570                 if (LK_CAN_SHARE(x, flags, false)) {
571                         *xp = x;
572                         return (true);
573                 }
574                 owner = (struct thread *)LK_HOLDER(x);
575         }
576 }
577
578 static __noinline int
579 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
580     const char *file, int line, struct lockmgr_wait *lwa)
581 {
582         uintptr_t tid, x;
583         int error = 0;
584         const char *iwmesg;
585         int ipri, itimo;
586
587 #ifdef KDTRACE_HOOKS
588         uint64_t sleep_time = 0;
589 #endif
590 #ifdef LOCK_PROFILING
591         uint64_t waittime = 0;
592         int contested = 0;
593 #endif
594         struct lock_delay_arg lda;
595
596         if (SCHEDULER_STOPPED())
597                 goto out;
598
599         tid = (uintptr_t)curthread;
600
601         if (LK_CAN_WITNESS(flags))
602                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
603                     file, line, flags & LK_INTERLOCK ? ilk : NULL);
604         x = lockmgr_read_value(lk);
605         lock_delay_arg_init(&lda, &lockmgr_delay);
606         if (!lk_adaptive)
607                 flags &= ~LK_ADAPTIVE;
608         /*
609          * The lock may already be locked exclusive by curthread,
610          * avoid deadlock.
611          */
612         if (LK_HOLDER(x) == tid) {
613                 LOCK_LOG2(lk,
614                     "%s: %p already held in exclusive mode",
615                     __func__, lk);
616                 error = EDEADLK;
617                 goto out;
618         }
619
620         for (;;) {
621                 if (lockmgr_slock_try(lk, &x, flags, false))
622                         break;
623
624                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
625                     &contested, &waittime);
626
627                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
628                         if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
629                                 continue;
630                 }
631
632 #ifdef HWPMC_HOOKS
633                 PMC_SOFT_CALL( , , lock, failed);
634 #endif
635
636                 /*
637                  * If the lock is expected to not sleep just give up
638                  * and return.
639                  */
640                 if (LK_TRYOP(flags)) {
641                         LOCK_LOG2(lk, "%s: %p fails the try operation",
642                             __func__, lk);
643                         error = EBUSY;
644                         break;
645                 }
646
647                 /*
648                  * Acquire the sleepqueue chain lock because we
649                  * probabilly will need to manipulate waiters flags.
650                  */
651                 sleepq_lock(&lk->lock_object);
652                 x = lockmgr_read_value(lk);
653 retry_sleepq:
654
655                 /*
656                  * if the lock can be acquired in shared mode, try
657                  * again.
658                  */
659                 if (LK_CAN_SHARE(x, flags, false)) {
660                         sleepq_release(&lk->lock_object);
661                         continue;
662                 }
663
664                 /*
665                  * Try to set the LK_SHARED_WAITERS flag.  If we fail,
666                  * loop back and retry.
667                  */
668                 if ((x & LK_SHARED_WAITERS) == 0) {
669                         if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
670                             x | LK_SHARED_WAITERS)) {
671                                 goto retry_sleepq;
672                         }
673                         LOCK_LOG2(lk, "%s: %p set shared waiters flag",
674                             __func__, lk);
675                 }
676
677                 if (lwa == NULL) {
678                         iwmesg = lk->lock_object.lo_name;
679                         ipri = lk->lk_pri;
680                         itimo = lk->lk_timo;
681                 } else {
682                         iwmesg = lwa->iwmesg;
683                         ipri = lwa->ipri;
684                         itimo = lwa->itimo;
685                 }
686
687                 /*
688                  * As far as we have been unable to acquire the
689                  * shared lock and the shared waiters flag is set,
690                  * we will sleep.
691                  */
692 #ifdef KDTRACE_HOOKS
693                 sleep_time -= lockstat_nsecs(&lk->lock_object);
694 #endif
695                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
696                     SQ_SHARED_QUEUE);
697 #ifdef KDTRACE_HOOKS
698                 sleep_time += lockstat_nsecs(&lk->lock_object);
699 #endif
700                 flags &= ~LK_INTERLOCK;
701                 if (error) {
702                         LOCK_LOG3(lk,
703                             "%s: interrupted sleep for %p with %d",
704                             __func__, lk, error);
705                         break;
706                 }
707                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
708                     __func__, lk);
709                 x = lockmgr_read_value(lk);
710         }
711         if (error == 0) {
712 #ifdef KDTRACE_HOOKS
713                 if (sleep_time != 0)
714                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
715                             LOCKSTAT_READER, (x & LK_SHARE) == 0,
716                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
717 #endif
718 #ifdef LOCK_PROFILING
719                 lockmgr_note_shared_acquire(lk, contested, waittime,
720                     file, line, flags);
721 #else
722                 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
723                     flags);
724 #endif
725         }
726
727 out:
728         lockmgr_exit(flags, ilk, 0);
729         return (error);
730 }
731
732 static bool
733 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
734 {
735         struct thread *owner;
736         uintptr_t x;
737
738         x = *xp;
739         MPASS(x != LK_UNLOCKED);
740         owner = (struct thread *)LK_HOLDER(x);
741         for (;;) {
742                 MPASS(owner != curthread);
743                 if (owner == NULL)
744                         return (false);
745                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
746                         return (false);
747                 if (owner == (struct thread *)LK_KERNPROC)
748                         return (false);
749                 if (!TD_IS_RUNNING(owner))
750                         return (false);
751                 if ((x & LK_ALL_WAITERS) != 0)
752                         return (false);
753                 lock_delay(lda);
754                 x = lockmgr_read_value(lk);
755                 if (x == LK_UNLOCKED) {
756                         *xp = x;
757                         return (true);
758                 }
759                 owner = (struct thread *)LK_HOLDER(x);
760         }
761 }
762
763 static __noinline int
764 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
765     const char *file, int line, struct lockmgr_wait *lwa)
766 {
767         struct lock_class *class;
768         uintptr_t tid, x, v;
769         int error = 0;
770         const char *iwmesg;
771         int ipri, itimo;
772
773 #ifdef KDTRACE_HOOKS
774         uint64_t sleep_time = 0;
775 #endif
776 #ifdef LOCK_PROFILING
777         uint64_t waittime = 0;
778         int contested = 0;
779 #endif
780         struct lock_delay_arg lda;
781
782         if (SCHEDULER_STOPPED())
783                 goto out;
784
785         tid = (uintptr_t)curthread;
786
787         if (LK_CAN_WITNESS(flags))
788                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
789                     LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
790                     ilk : NULL);
791
792         /*
793          * If curthread already holds the lock and this one is
794          * allowed to recurse, simply recurse on it.
795          */
796         if (lockmgr_xlocked(lk)) {
797                 if ((flags & LK_CANRECURSE) == 0 &&
798                     (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
799                         /*
800                          * If the lock is expected to not panic just
801                          * give up and return.
802                          */
803                         if (LK_TRYOP(flags)) {
804                                 LOCK_LOG2(lk,
805                                     "%s: %p fails the try operation",
806                                     __func__, lk);
807                                 error = EBUSY;
808                                 goto out;
809                         }
810                         if (flags & LK_INTERLOCK) {
811                                 class = LOCK_CLASS(ilk);
812                                 class->lc_unlock(ilk);
813                         }
814                         STACK_PRINT(lk);
815                         panic("%s: recursing on non recursive lockmgr %p "
816                             "@ %s:%d\n", __func__, lk, file, line);
817                 }
818                 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
819                 lk->lk_recurse++;
820                 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
821                 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
822                     lk->lk_recurse, file, line);
823                 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
824                     LK_TRYWIT(flags), file, line);
825                 TD_LOCKS_INC(curthread);
826                 goto out;
827         }
828
829         x = LK_UNLOCKED;
830         lock_delay_arg_init(&lda, &lockmgr_delay);
831         if (!lk_adaptive)
832                 flags &= ~LK_ADAPTIVE;
833         for (;;) {
834                 if (x == LK_UNLOCKED) {
835                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
836                                 break;
837                         continue;
838                 }
839
840                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
841                     &contested, &waittime);
842
843                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
844                         if (lockmgr_xlock_adaptive(&lda, lk, &x))
845                                 continue;
846                 }
847 #ifdef HWPMC_HOOKS
848                 PMC_SOFT_CALL( , , lock, failed);
849 #endif
850
851                 /*
852                  * If the lock is expected to not sleep just give up
853                  * and return.
854                  */
855                 if (LK_TRYOP(flags)) {
856                         LOCK_LOG2(lk, "%s: %p fails the try operation",
857                             __func__, lk);
858                         error = EBUSY;
859                         break;
860                 }
861
862                 /*
863                  * Acquire the sleepqueue chain lock because we
864                  * probabilly will need to manipulate waiters flags.
865                  */
866                 sleepq_lock(&lk->lock_object);
867                 x = lockmgr_read_value(lk);
868 retry_sleepq:
869
870                 /*
871                  * if the lock has been released while we spun on
872                  * the sleepqueue chain lock just try again.
873                  */
874                 if (x == LK_UNLOCKED) {
875                         sleepq_release(&lk->lock_object);
876                         continue;
877                 }
878
879                 /*
880                  * The lock can be in the state where there is a
881                  * pending queue of waiters, but still no owner.
882                  * This happens when the lock is contested and an
883                  * owner is going to claim the lock.
884                  * If curthread is the one successfully acquiring it
885                  * claim lock ownership and return, preserving waiters
886                  * flags.
887                  */
888                 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
889                 if ((x & ~v) == LK_UNLOCKED) {
890                         v &= ~LK_EXCLUSIVE_SPINNERS;
891                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
892                             tid | v)) {
893                                 sleepq_release(&lk->lock_object);
894                                 LOCK_LOG2(lk,
895                                     "%s: %p claimed by a new writer",
896                                     __func__, lk);
897                                 break;
898                         }
899                         goto retry_sleepq;
900                 }
901
902                 /*
903                  * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
904                  * fail, loop back and retry.
905                  */
906                 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
907                         if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
908                             x | LK_EXCLUSIVE_WAITERS)) {
909                                 goto retry_sleepq;
910                         }
911                         LOCK_LOG2(lk, "%s: %p set excl waiters flag",
912                             __func__, lk);
913                 }
914
915                 if (lwa == NULL) {
916                         iwmesg = lk->lock_object.lo_name;
917                         ipri = lk->lk_pri;
918                         itimo = lk->lk_timo;
919                 } else {
920                         iwmesg = lwa->iwmesg;
921                         ipri = lwa->ipri;
922                         itimo = lwa->itimo;
923                 }
924
925                 /*
926                  * As far as we have been unable to acquire the
927                  * exclusive lock and the exclusive waiters flag
928                  * is set, we will sleep.
929                  */
930 #ifdef KDTRACE_HOOKS
931                 sleep_time -= lockstat_nsecs(&lk->lock_object);
932 #endif
933                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
934                     SQ_EXCLUSIVE_QUEUE);
935 #ifdef KDTRACE_HOOKS
936                 sleep_time += lockstat_nsecs(&lk->lock_object);
937 #endif
938                 flags &= ~LK_INTERLOCK;
939                 if (error) {
940                         LOCK_LOG3(lk,
941                             "%s: interrupted sleep for %p with %d",
942                             __func__, lk, error);
943                         break;
944                 }
945                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
946                     __func__, lk);
947                 x = lockmgr_read_value(lk);
948         }
949         if (error == 0) {
950 #ifdef KDTRACE_HOOKS
951                 if (sleep_time != 0)
952                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
953                             LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
954                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
955 #endif
956 #ifdef LOCK_PROFILING
957                 lockmgr_note_exclusive_acquire(lk, contested, waittime,
958                     file, line, flags);
959 #else
960                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
961                     flags);
962 #endif
963         }
964
965 out:
966         lockmgr_exit(flags, ilk, 0);
967         return (error);
968 }
969
970 static __noinline int
971 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
972     const char *file, int line, struct lockmgr_wait *lwa)
973 {
974         uintptr_t tid, v, setv;
975         int error = 0;
976         int op;
977
978         if (SCHEDULER_STOPPED())
979                 goto out;
980
981         tid = (uintptr_t)curthread;
982
983         _lockmgr_assert(lk, KA_SLOCKED, file, line);
984
985         op = flags & LK_TYPE_MASK;
986         v = lockmgr_read_value(lk);
987         for (;;) {
988                 if (LK_SHARERS(v) > 1) {
989                         if (op == LK_TRYUPGRADE) {
990                                 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
991                                     __func__, lk);
992                                 error = EBUSY;
993                                 goto out;
994                         }
995                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
996                             v - LK_ONE_SHARER)) {
997                                 lockmgr_note_shared_release(lk, file, line);
998                                 goto out_xlock;
999                         }
1000                         continue;
1001                 }
1002                 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1003
1004                 setv = tid;
1005                 setv |= (v & LK_ALL_WAITERS);
1006
1007                 /*
1008                  * Try to switch from one shared lock to an exclusive one.
1009                  * We need to preserve waiters flags during the operation.
1010                  */
1011                 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1012                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1013                             line);
1014                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1015                             LK_TRYWIT(flags), file, line);
1016                         LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1017                         TD_SLOCKS_DEC(curthread);
1018                         goto out;
1019                 }
1020         }
1021
1022 out_xlock:
1023         error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1024         flags &= ~LK_INTERLOCK;
1025 out:
1026         lockmgr_exit(flags, ilk, 0);
1027         return (error);
1028 }
1029
1030 int
1031 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1032     const char *file, int line)
1033 {
1034         struct lock_class *class;
1035         uintptr_t x, tid;
1036         u_int op;
1037         bool locked;
1038
1039         if (SCHEDULER_STOPPED())
1040                 return (0);
1041
1042         op = flags & LK_TYPE_MASK;
1043         locked = false;
1044         switch (op) {
1045         case LK_SHARED:
1046                 if (LK_CAN_WITNESS(flags))
1047                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1048                             file, line, flags & LK_INTERLOCK ? ilk : NULL);
1049                 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1050                         break;
1051                 x = lockmgr_read_value(lk);
1052                 if (lockmgr_slock_try(lk, &x, flags, true)) {
1053                         lockmgr_note_shared_acquire(lk, 0, 0,
1054                             file, line, flags);
1055                         locked = true;
1056                 } else {
1057                         return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1058                             NULL));
1059                 }
1060                 break;
1061         case LK_EXCLUSIVE:
1062                 if (LK_CAN_WITNESS(flags))
1063                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1064                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1065                             ilk : NULL);
1066                 tid = (uintptr_t)curthread;
1067                 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1068                     atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1069                         lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1070                             flags);
1071                         locked = true;
1072                 } else {
1073                         return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1074                             NULL));
1075                 }
1076                 break;
1077         case LK_UPGRADE:
1078         case LK_TRYUPGRADE:
1079                 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1080         default:
1081                 break;
1082         }
1083         if (__predict_true(locked)) {
1084                 if (__predict_false(flags & LK_INTERLOCK)) {
1085                         class = LOCK_CLASS(ilk);
1086                         class->lc_unlock(ilk);
1087                 }
1088                 return (0);
1089         } else {
1090                 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1091                     LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1092         }
1093 }
1094
1095 static __noinline int
1096 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1097     const char *file, int line)
1098
1099 {
1100         int wakeup_swapper = 0;
1101
1102         if (SCHEDULER_STOPPED())
1103                 goto out;
1104
1105         wakeup_swapper = wakeupshlk(lk, file, line);
1106
1107 out:
1108         lockmgr_exit(flags, ilk, wakeup_swapper);
1109         return (0);
1110 }
1111
1112 static __noinline int
1113 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1114     const char *file, int line)
1115 {
1116         uintptr_t tid, v;
1117         int wakeup_swapper = 0;
1118         u_int realexslp;
1119         int queue;
1120
1121         if (SCHEDULER_STOPPED())
1122                 goto out;
1123
1124         tid = (uintptr_t)curthread;
1125
1126         /*
1127          * As first option, treact the lock as if it has not
1128          * any waiter.
1129          * Fix-up the tid var if the lock has been disowned.
1130          */
1131         if (LK_HOLDER(x) == LK_KERNPROC)
1132                 tid = LK_KERNPROC;
1133
1134         /*
1135          * The lock is held in exclusive mode.
1136          * If the lock is recursed also, then unrecurse it.
1137          */
1138         if (lockmgr_recursed_v(x)) {
1139                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1140                 lk->lk_recurse--;
1141                 if (lk->lk_recurse == 0)
1142                         atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1143                 goto out;
1144         }
1145         if (tid != LK_KERNPROC)
1146                 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1147                     LOCKSTAT_WRITER);
1148
1149         if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1150                 goto out;
1151
1152         sleepq_lock(&lk->lock_object);
1153         x = lockmgr_read_value(lk);
1154         v = LK_UNLOCKED;
1155
1156         /*
1157          * If the lock has exclusive waiters, give them
1158          * preference in order to avoid deadlock with
1159          * shared runners up.
1160          * If interruptible sleeps left the exclusive queue
1161          * empty avoid a starvation for the threads sleeping
1162          * on the shared queue by giving them precedence
1163          * and cleaning up the exclusive waiters bit anyway.
1164          * Please note that lk_exslpfail count may be lying
1165          * about the real number of waiters with the
1166          * LK_SLEEPFAIL flag on because they may be used in
1167          * conjunction with interruptible sleeps so
1168          * lk_exslpfail might be considered an 'upper limit'
1169          * bound, including the edge cases.
1170          */
1171         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1172         realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1173         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1174                 if (lk->lk_exslpfail < realexslp) {
1175                         lk->lk_exslpfail = 0;
1176                         queue = SQ_EXCLUSIVE_QUEUE;
1177                         v |= (x & LK_SHARED_WAITERS);
1178                 } else {
1179                         lk->lk_exslpfail = 0;
1180                         LOCK_LOG2(lk,
1181                             "%s: %p has only LK_SLEEPFAIL sleepers",
1182                             __func__, lk);
1183                         LOCK_LOG2(lk,
1184                             "%s: %p waking up threads on the exclusive queue",
1185                             __func__, lk);
1186                         wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1187                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1188                         queue = SQ_SHARED_QUEUE;
1189                 }
1190         } else {
1191                 /*
1192                  * Exclusive waiters sleeping with LK_SLEEPFAIL
1193                  * on and using interruptible sleeps/timeout
1194                  * may have left spourious lk_exslpfail counts
1195                  * on, so clean it up anyway.
1196                  */
1197                 lk->lk_exslpfail = 0;
1198                 queue = SQ_SHARED_QUEUE;
1199         }
1200
1201         LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1202             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1203             "exclusive");
1204         atomic_store_rel_ptr(&lk->lk_lock, v);
1205         wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1206         sleepq_release(&lk->lock_object);
1207
1208 out:
1209         lockmgr_exit(flags, ilk, wakeup_swapper);
1210         return (0);
1211 }
1212
1213 /*
1214  * Lightweight entry points for common operations.
1215  *
1216  * Functionality is similar to sx locks, in that none of the additional lockmgr
1217  * features are supported. To be clear, these are NOT supported:
1218  * 1. shared locking disablement
1219  * 2. returning with an error after sleep
1220  * 3. unlocking the interlock
1221  *
1222  * If in doubt, use lockmgr_lock_flags.
1223  */
1224 int
1225 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1226 {
1227         uintptr_t x;
1228
1229         MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1230         MPASS((flags & LK_INTERLOCK) == 0);
1231         MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1232
1233         if (LK_CAN_WITNESS(flags))
1234                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1235                     file, line, NULL);
1236         x = lockmgr_read_value(lk);
1237         if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1238                 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1239                 return (0);
1240         }
1241
1242         return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1243 }
1244
1245 int
1246 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1247 {
1248         uintptr_t tid;
1249
1250         MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1251         MPASS((flags & LK_INTERLOCK) == 0);
1252
1253         if (LK_CAN_WITNESS(flags))
1254                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1255                     LOP_EXCLUSIVE, file, line, NULL);
1256         tid = (uintptr_t)curthread;
1257         if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1258                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1259                     flags);
1260                 return (0);
1261         }
1262
1263         return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1264 }
1265
1266 int
1267 lockmgr_unlock(struct lock *lk)
1268 {
1269         uintptr_t x, tid;
1270         const char *file;
1271         int line;
1272
1273         file = __FILE__;
1274         line = __LINE__;
1275
1276         _lockmgr_assert(lk, KA_LOCKED, file, line);
1277         x = lockmgr_read_value(lk);
1278         if (__predict_true(x & LK_SHARE) != 0) {
1279                 lockmgr_note_shared_release(lk, file, line);
1280                 if (lockmgr_sunlock_try(lk, &x)) {
1281                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1282                 } else {
1283                         return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1284                 }
1285         } else {
1286                 tid = (uintptr_t)curthread;
1287                 lockmgr_note_exclusive_release(lk, file, line);
1288                 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1289                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1290                 } else {
1291                         return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1292                 }
1293         }
1294         return (0);
1295 }
1296
1297 int
1298 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1299     const char *wmesg, int pri, int timo, const char *file, int line)
1300 {
1301         GIANT_DECLARE;
1302         struct lockmgr_wait lwa;
1303         struct lock_class *class;
1304         const char *iwmesg;
1305         uintptr_t tid, v, x;
1306         u_int op, realexslp;
1307         int error, ipri, itimo, queue, wakeup_swapper;
1308 #ifdef LOCK_PROFILING
1309         uint64_t waittime = 0;
1310         int contested = 0;
1311 #endif
1312
1313         if (SCHEDULER_STOPPED())
1314                 return (0);
1315
1316         error = 0;
1317         tid = (uintptr_t)curthread;
1318         op = (flags & LK_TYPE_MASK);
1319         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1320         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1321         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1322
1323         lwa.iwmesg = iwmesg;
1324         lwa.ipri = ipri;
1325         lwa.itimo = itimo;
1326
1327         MPASS((flags & ~LK_TOTAL_MASK) == 0);
1328         KASSERT((op & (op - 1)) == 0,
1329             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1330         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1331             (op != LK_DOWNGRADE && op != LK_RELEASE),
1332             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1333             __func__, file, line));
1334         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1335             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1336             __func__, file, line));
1337         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1338             ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1339             lk->lock_object.lo_name, file, line));
1340
1341         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1342
1343         if (lk->lock_object.lo_flags & LK_NOSHARE) {
1344                 switch (op) {
1345                 case LK_SHARED:
1346                         op = LK_EXCLUSIVE;
1347                         break;
1348                 case LK_UPGRADE:
1349                 case LK_TRYUPGRADE:
1350                 case LK_DOWNGRADE:
1351                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1352                             file, line);
1353                         if (flags & LK_INTERLOCK)
1354                                 class->lc_unlock(ilk);
1355                         return (0);
1356                 }
1357         }
1358
1359         wakeup_swapper = 0;
1360         switch (op) {
1361         case LK_SHARED:
1362                 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1363                 break;
1364         case LK_UPGRADE:
1365         case LK_TRYUPGRADE:
1366                 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1367                 break;
1368         case LK_EXCLUSIVE:
1369                 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1370                 break;
1371         case LK_DOWNGRADE:
1372                 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1373                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1374
1375                 /*
1376                  * Panic if the lock is recursed.
1377                  */
1378                 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1379                         if (flags & LK_INTERLOCK)
1380                                 class->lc_unlock(ilk);
1381                         panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1382                             __func__, iwmesg, file, line);
1383                 }
1384                 TD_SLOCKS_INC(curthread);
1385
1386                 /*
1387                  * In order to preserve waiters flags, just spin.
1388                  */
1389                 for (;;) {
1390                         x = lockmgr_read_value(lk);
1391                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1392                         x &= LK_ALL_WAITERS;
1393                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1394                             LK_SHARERS_LOCK(1) | x))
1395                                 break;
1396                         cpu_spinwait();
1397                 }
1398                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1399                 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1400                 break;
1401         case LK_RELEASE:
1402                 _lockmgr_assert(lk, KA_LOCKED, file, line);
1403                 x = lockmgr_read_value(lk);
1404
1405                 if (__predict_true(x & LK_SHARE) != 0) {
1406                         lockmgr_note_shared_release(lk, file, line);
1407                         return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1408                 } else {
1409                         lockmgr_note_exclusive_release(lk, file, line);
1410                         return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1411                 }
1412                 break;
1413         case LK_DRAIN:
1414                 if (LK_CAN_WITNESS(flags))
1415                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1416                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1417                             ilk : NULL);
1418
1419                 /*
1420                  * Trying to drain a lock we already own will result in a
1421                  * deadlock.
1422                  */
1423                 if (lockmgr_xlocked(lk)) {
1424                         if (flags & LK_INTERLOCK)
1425                                 class->lc_unlock(ilk);
1426                         panic("%s: draining %s with the lock held @ %s:%d\n",
1427                             __func__, iwmesg, file, line);
1428                 }
1429
1430                 for (;;) {
1431                         if (lk->lk_lock == LK_UNLOCKED &&
1432                             atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1433                                 break;
1434
1435 #ifdef HWPMC_HOOKS
1436                         PMC_SOFT_CALL( , , lock, failed);
1437 #endif
1438                         lock_profile_obtain_lock_failed(&lk->lock_object, false,
1439                             &contested, &waittime);
1440
1441                         /*
1442                          * If the lock is expected to not sleep just give up
1443                          * and return.
1444                          */
1445                         if (LK_TRYOP(flags)) {
1446                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
1447                                     __func__, lk);
1448                                 error = EBUSY;
1449                                 break;
1450                         }
1451
1452                         /*
1453                          * Acquire the sleepqueue chain lock because we
1454                          * probabilly will need to manipulate waiters flags.
1455                          */
1456                         sleepq_lock(&lk->lock_object);
1457                         x = lockmgr_read_value(lk);
1458
1459                         /*
1460                          * if the lock has been released while we spun on
1461                          * the sleepqueue chain lock just try again.
1462                          */
1463                         if (x == LK_UNLOCKED) {
1464                                 sleepq_release(&lk->lock_object);
1465                                 continue;
1466                         }
1467
1468                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1469                         if ((x & ~v) == LK_UNLOCKED) {
1470                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1471
1472                                 /*
1473                                  * If interruptible sleeps left the exclusive
1474                                  * queue empty avoid a starvation for the
1475                                  * threads sleeping on the shared queue by
1476                                  * giving them precedence and cleaning up the
1477                                  * exclusive waiters bit anyway.
1478                                  * Please note that lk_exslpfail count may be
1479                                  * lying about the real number of waiters with
1480                                  * the LK_SLEEPFAIL flag on because they may
1481                                  * be used in conjunction with interruptible
1482                                  * sleeps so lk_exslpfail might be considered
1483                                  * an 'upper limit' bound, including the edge
1484                                  * cases.
1485                                  */
1486                                 if (v & LK_EXCLUSIVE_WAITERS) {
1487                                         queue = SQ_EXCLUSIVE_QUEUE;
1488                                         v &= ~LK_EXCLUSIVE_WAITERS;
1489                                 } else {
1490                                         /*
1491                                          * Exclusive waiters sleeping with
1492                                          * LK_SLEEPFAIL on and using
1493                                          * interruptible sleeps/timeout may
1494                                          * have left spourious lk_exslpfail
1495                                          * counts on, so clean it up anyway.
1496                                          */
1497                                         MPASS(v & LK_SHARED_WAITERS);
1498                                         lk->lk_exslpfail = 0;
1499                                         queue = SQ_SHARED_QUEUE;
1500                                         v &= ~LK_SHARED_WAITERS;
1501                                 }
1502                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
1503                                         realexslp =
1504                                             sleepq_sleepcnt(&lk->lock_object,
1505                                             SQ_EXCLUSIVE_QUEUE);
1506                                         if (lk->lk_exslpfail >= realexslp) {
1507                                                 lk->lk_exslpfail = 0;
1508                                                 queue = SQ_SHARED_QUEUE;
1509                                                 v &= ~LK_SHARED_WAITERS;
1510                                                 if (realexslp != 0) {
1511                                                         LOCK_LOG2(lk,
1512                                         "%s: %p has only LK_SLEEPFAIL sleepers",
1513                                                             __func__, lk);
1514                                                         LOCK_LOG2(lk,
1515                         "%s: %p waking up threads on the exclusive queue",
1516                                                             __func__, lk);
1517                                                         wakeup_swapper =
1518                                                             sleepq_broadcast(
1519                                                             &lk->lock_object,
1520                                                             SLEEPQ_LK, 0,
1521                                                             SQ_EXCLUSIVE_QUEUE);
1522                                                 }
1523                                         } else
1524                                                 lk->lk_exslpfail = 0;
1525                                 }
1526                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1527                                         sleepq_release(&lk->lock_object);
1528                                         continue;
1529                                 }
1530                                 LOCK_LOG3(lk,
1531                                 "%s: %p waking up all threads on the %s queue",
1532                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
1533                                     "shared" : "exclusive");
1534                                 wakeup_swapper |= sleepq_broadcast(
1535                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
1536
1537                                 /*
1538                                  * If shared waiters have been woken up we need
1539                                  * to wait for one of them to acquire the lock
1540                                  * before to set the exclusive waiters in
1541                                  * order to avoid a deadlock.
1542                                  */
1543                                 if (queue == SQ_SHARED_QUEUE) {
1544                                         for (v = lk->lk_lock;
1545                                             (v & LK_SHARE) && !LK_SHARERS(v);
1546                                             v = lk->lk_lock)
1547                                                 cpu_spinwait();
1548                                 }
1549                         }
1550
1551                         /*
1552                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1553                          * fail, loop back and retry.
1554                          */
1555                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1556                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1557                                     x | LK_EXCLUSIVE_WAITERS)) {
1558                                         sleepq_release(&lk->lock_object);
1559                                         continue;
1560                                 }
1561                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1562                                     __func__, lk);
1563                         }
1564
1565                         /*
1566                          * As far as we have been unable to acquire the
1567                          * exclusive lock and the exclusive waiters flag
1568                          * is set, we will sleep.
1569                          */
1570                         if (flags & LK_INTERLOCK) {
1571                                 class->lc_unlock(ilk);
1572                                 flags &= ~LK_INTERLOCK;
1573                         }
1574                         GIANT_SAVE();
1575                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1576                             SQ_EXCLUSIVE_QUEUE);
1577                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1578                         GIANT_RESTORE();
1579                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1580                             __func__, lk);
1581                 }
1582
1583                 if (error == 0) {
1584                         lock_profile_obtain_lock_success(&lk->lock_object,
1585                             false, contested, waittime, file, line);
1586                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1587                             lk->lk_recurse, file, line);
1588                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1589                             LK_TRYWIT(flags), file, line);
1590                         TD_LOCKS_INC(curthread);
1591                         STACK_SAVE(lk);
1592                 }
1593                 break;
1594         default:
1595                 if (flags & LK_INTERLOCK)
1596                         class->lc_unlock(ilk);
1597                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1598         }
1599
1600         if (flags & LK_INTERLOCK)
1601                 class->lc_unlock(ilk);
1602         if (wakeup_swapper)
1603                 kick_proc0();
1604
1605         return (error);
1606 }
1607
1608 void
1609 _lockmgr_disown(struct lock *lk, const char *file, int line)
1610 {
1611         uintptr_t tid, x;
1612
1613         if (SCHEDULER_STOPPED())
1614                 return;
1615
1616         tid = (uintptr_t)curthread;
1617         _lockmgr_assert(lk, KA_XLOCKED, file, line);
1618
1619         /*
1620          * Panic if the lock is recursed.
1621          */
1622         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1623                 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1624                     __func__,  file, line);
1625
1626         /*
1627          * If the owner is already LK_KERNPROC just skip the whole operation.
1628          */
1629         if (LK_HOLDER(lk->lk_lock) != tid)
1630                 return;
1631         lock_profile_release_lock(&lk->lock_object, false);
1632         LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1633         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1634         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1635         TD_LOCKS_DEC(curthread);
1636         STACK_SAVE(lk);
1637
1638         /*
1639          * In order to preserve waiters flags, just spin.
1640          */
1641         for (;;) {
1642                 x = lockmgr_read_value(lk);
1643                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1644                 x &= LK_ALL_WAITERS;
1645                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1646                     LK_KERNPROC | x))
1647                         return;
1648                 cpu_spinwait();
1649         }
1650 }
1651
1652 void
1653 lockmgr_printinfo(const struct lock *lk)
1654 {
1655         struct thread *td;
1656         uintptr_t x;
1657
1658         if (lk->lk_lock == LK_UNLOCKED)
1659                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1660         else if (lk->lk_lock & LK_SHARE)
1661                 printf("lock type %s: SHARED (count %ju)\n",
1662                     lk->lock_object.lo_name,
1663                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1664         else {
1665                 td = lockmgr_xholder(lk);
1666                 if (td == (struct thread *)LK_KERNPROC)
1667                         printf("lock type %s: EXCL by KERNPROC\n",
1668                             lk->lock_object.lo_name);
1669                 else
1670                         printf("lock type %s: EXCL by thread %p "
1671                             "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1672                             td, td->td_proc->p_pid, td->td_proc->p_comm,
1673                             td->td_tid);
1674         }
1675
1676         x = lk->lk_lock;
1677         if (x & LK_EXCLUSIVE_WAITERS)
1678                 printf(" with exclusive waiters pending\n");
1679         if (x & LK_SHARED_WAITERS)
1680                 printf(" with shared waiters pending\n");
1681         if (x & LK_EXCLUSIVE_SPINNERS)
1682                 printf(" with exclusive spinners pending\n");
1683
1684         STACK_PRINT(lk);
1685 }
1686
1687 int
1688 lockstatus(const struct lock *lk)
1689 {
1690         uintptr_t v, x;
1691         int ret;
1692
1693         ret = LK_SHARED;
1694         x = lockmgr_read_value(lk);
1695         v = LK_HOLDER(x);
1696
1697         if ((x & LK_SHARE) == 0) {
1698                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1699                         ret = LK_EXCLUSIVE;
1700                 else
1701                         ret = LK_EXCLOTHER;
1702         } else if (x == LK_UNLOCKED)
1703                 ret = 0;
1704
1705         return (ret);
1706 }
1707
1708 #ifdef INVARIANT_SUPPORT
1709
1710 FEATURE(invariant_support,
1711     "Support for modules compiled with INVARIANTS option");
1712
1713 #ifndef INVARIANTS
1714 #undef  _lockmgr_assert
1715 #endif
1716
1717 void
1718 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1719 {
1720         int slocked = 0;
1721
1722         if (SCHEDULER_STOPPED())
1723                 return;
1724         switch (what) {
1725         case KA_SLOCKED:
1726         case KA_SLOCKED | KA_NOTRECURSED:
1727         case KA_SLOCKED | KA_RECURSED:
1728                 slocked = 1;
1729         case KA_LOCKED:
1730         case KA_LOCKED | KA_NOTRECURSED:
1731         case KA_LOCKED | KA_RECURSED:
1732 #ifdef WITNESS
1733
1734                 /*
1735                  * We cannot trust WITNESS if the lock is held in exclusive
1736                  * mode and a call to lockmgr_disown() happened.
1737                  * Workaround this skipping the check if the lock is held in
1738                  * exclusive mode even for the KA_LOCKED case.
1739                  */
1740                 if (slocked || (lk->lk_lock & LK_SHARE)) {
1741                         witness_assert(&lk->lock_object, what, file, line);
1742                         break;
1743                 }
1744 #endif
1745                 if (lk->lk_lock == LK_UNLOCKED ||
1746                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1747                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1748                         panic("Lock %s not %slocked @ %s:%d\n",
1749                             lk->lock_object.lo_name, slocked ? "share" : "",
1750                             file, line);
1751
1752                 if ((lk->lk_lock & LK_SHARE) == 0) {
1753                         if (lockmgr_recursed(lk)) {
1754                                 if (what & KA_NOTRECURSED)
1755                                         panic("Lock %s recursed @ %s:%d\n",
1756                                             lk->lock_object.lo_name, file,
1757                                             line);
1758                         } else if (what & KA_RECURSED)
1759                                 panic("Lock %s not recursed @ %s:%d\n",
1760                                     lk->lock_object.lo_name, file, line);
1761                 }
1762                 break;
1763         case KA_XLOCKED:
1764         case KA_XLOCKED | KA_NOTRECURSED:
1765         case KA_XLOCKED | KA_RECURSED:
1766                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1767                         panic("Lock %s not exclusively locked @ %s:%d\n",
1768                             lk->lock_object.lo_name, file, line);
1769                 if (lockmgr_recursed(lk)) {
1770                         if (what & KA_NOTRECURSED)
1771                                 panic("Lock %s recursed @ %s:%d\n",
1772                                     lk->lock_object.lo_name, file, line);
1773                 } else if (what & KA_RECURSED)
1774                         panic("Lock %s not recursed @ %s:%d\n",
1775                             lk->lock_object.lo_name, file, line);
1776                 break;
1777         case KA_UNLOCKED:
1778                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1779                         panic("Lock %s exclusively locked @ %s:%d\n",
1780                             lk->lock_object.lo_name, file, line);
1781                 break;
1782         default:
1783                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1784                     line);
1785         }
1786 }
1787 #endif
1788
1789 #ifdef DDB
1790 int
1791 lockmgr_chain(struct thread *td, struct thread **ownerp)
1792 {
1793         const struct lock *lk;
1794
1795         lk = td->td_wchan;
1796
1797         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1798                 return (0);
1799         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1800         if (lk->lk_lock & LK_SHARE)
1801                 db_printf("SHARED (count %ju)\n",
1802                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1803         else
1804                 db_printf("EXCL\n");
1805         *ownerp = lockmgr_xholder(lk);
1806
1807         return (1);
1808 }
1809
1810 static void
1811 db_show_lockmgr(const struct lock_object *lock)
1812 {
1813         struct thread *td;
1814         const struct lock *lk;
1815
1816         lk = (const struct lock *)lock;
1817
1818         db_printf(" state: ");
1819         if (lk->lk_lock == LK_UNLOCKED)
1820                 db_printf("UNLOCKED\n");
1821         else if (lk->lk_lock & LK_SHARE)
1822                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1823         else {
1824                 td = lockmgr_xholder(lk);
1825                 if (td == (struct thread *)LK_KERNPROC)
1826                         db_printf("XLOCK: LK_KERNPROC\n");
1827                 else
1828                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1829                             td->td_tid, td->td_proc->p_pid,
1830                             td->td_proc->p_comm);
1831                 if (lockmgr_recursed(lk))
1832                         db_printf(" recursed: %d\n", lk->lk_recurse);
1833         }
1834         db_printf(" waiters: ");
1835         switch (lk->lk_lock & LK_ALL_WAITERS) {
1836         case LK_SHARED_WAITERS:
1837                 db_printf("shared\n");
1838                 break;
1839         case LK_EXCLUSIVE_WAITERS:
1840                 db_printf("exclusive\n");
1841                 break;
1842         case LK_ALL_WAITERS:
1843                 db_printf("shared and exclusive\n");
1844                 break;
1845         default:
1846                 db_printf("none\n");
1847         }
1848         db_printf(" spinners: ");
1849         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1850                 db_printf("exclusive\n");
1851         else
1852                 db_printf("none\n");
1853 }
1854 #endif