]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_lock.c
ath10k: ath11k: add specific LinuxKPI support
[FreeBSD/FreeBSD.git] / sys / kern / kern_lock.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30
31 #include "opt_ddb.h"
32 #include "opt_hwpmc_hooks.h"
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/lockstat.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/sleepqueue.h>
47 #ifdef DEBUG_LOCKS
48 #include <sys/stack.h>
49 #endif
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52
53 #include <machine/cpu.h>
54
55 #ifdef DDB
56 #include <ddb/ddb.h>
57 #endif
58
59 #ifdef HWPMC_HOOKS
60 #include <sys/pmckern.h>
61 PMC_SOFT_DECLARE( , , lock, failed);
62 #endif
63
64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66
67 #define SQ_EXCLUSIVE_QUEUE      0
68 #define SQ_SHARED_QUEUE         1
69
70 #ifndef INVARIANTS
71 #define _lockmgr_assert(lk, what, file, line)
72 #endif
73
74 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
75 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
76
77 #ifndef DEBUG_LOCKS
78 #define STACK_PRINT(lk)
79 #define STACK_SAVE(lk)
80 #define STACK_ZERO(lk)
81 #else
82 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
83 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
84 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
85 #endif
86
87 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
88         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
89                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
90 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
91         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
92                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
93
94 #define GIANT_DECLARE                                                   \
95         int _i = 0;                                                     \
96         WITNESS_SAVE_DECL(Giant)
97 #define GIANT_RESTORE() do {                                            \
98         if (__predict_false(_i > 0)) {                                  \
99                 while (_i--)                                            \
100                         mtx_lock(&Giant);                               \
101                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
102         }                                                               \
103 } while (0)
104 #define GIANT_SAVE() do {                                               \
105         if (__predict_false(mtx_owned(&Giant))) {                       \
106                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
107                 while (mtx_owned(&Giant)) {                             \
108                         _i++;                                           \
109                         mtx_unlock(&Giant);                             \
110                 }                                                       \
111         }                                                               \
112 } while (0)
113
114 static bool __always_inline
115 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
116 {
117
118         if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
119             LK_SHARE)
120                 return (true);
121         if (fp || (!(x & LK_SHARE)))
122                 return (false);
123         if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
124             (curthread->td_pflags & TDP_DEADLKTREAT))
125                 return (true);
126         return (false);
127 }
128
129 #define LK_TRYOP(x)                                                     \
130         ((x) & LK_NOWAIT)
131
132 #define LK_CAN_WITNESS(x)                                               \
133         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
134 #define LK_TRYWIT(x)                                                    \
135         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
136
137 #define lockmgr_disowned(lk)                                            \
138         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
139
140 #define lockmgr_xlocked_v(v)                                            \
141         (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
142
143 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
144
145 static void     assert_lockmgr(const struct lock_object *lock, int how);
146 #ifdef DDB
147 static void     db_show_lockmgr(const struct lock_object *lock);
148 #endif
149 static void     lock_lockmgr(struct lock_object *lock, uintptr_t how);
150 #ifdef KDTRACE_HOOKS
151 static int      owner_lockmgr(const struct lock_object *lock,
152                     struct thread **owner);
153 #endif
154 static uintptr_t unlock_lockmgr(struct lock_object *lock);
155
156 struct lock_class lock_class_lockmgr = {
157         .lc_name = "lockmgr",
158         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
159         .lc_assert = assert_lockmgr,
160 #ifdef DDB
161         .lc_ddb_show = db_show_lockmgr,
162 #endif
163         .lc_lock = lock_lockmgr,
164         .lc_unlock = unlock_lockmgr,
165 #ifdef KDTRACE_HOOKS
166         .lc_owner = owner_lockmgr,
167 #endif
168 };
169
170 static __read_mostly bool lk_adaptive = true;
171 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
172 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
173     0, "");
174 #define lockmgr_delay  locks_delay
175
176 struct lockmgr_wait {
177         const char *iwmesg;
178         int ipri;
179         int itimo;
180 };
181
182 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
183     int flags, bool fp);
184 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
185
186 static void
187 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
188 {
189         struct lock_class *class;
190
191         if (flags & LK_INTERLOCK) {
192                 class = LOCK_CLASS(ilk);
193                 class->lc_unlock(ilk);
194         }
195
196         if (__predict_false(wakeup_swapper))
197                 kick_proc0();
198 }
199
200 static void
201 lockmgr_note_shared_acquire(struct lock *lk, int contested,
202     uint64_t waittime, const char *file, int line, int flags)
203 {
204
205         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
206             waittime, file, line, LOCKSTAT_READER);
207         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
208         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
209         TD_LOCKS_INC(curthread);
210         TD_SLOCKS_INC(curthread);
211         STACK_SAVE(lk);
212 }
213
214 static void
215 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
216 {
217
218         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
219         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
220         TD_LOCKS_DEC(curthread);
221         TD_SLOCKS_DEC(curthread);
222 }
223
224 static void
225 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
226     uint64_t waittime, const char *file, int line, int flags)
227 {
228
229         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
230             waittime, file, line, LOCKSTAT_WRITER);
231         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
232         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
233             line);
234         TD_LOCKS_INC(curthread);
235         STACK_SAVE(lk);
236 }
237
238 static void
239 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
240 {
241
242         if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
243                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
244                 TD_LOCKS_DEC(curthread);
245         }
246         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
247             line);
248 }
249
250 static __inline struct thread *
251 lockmgr_xholder(const struct lock *lk)
252 {
253         uintptr_t x;
254
255         x = lockmgr_read_value(lk);
256         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
257 }
258
259 /*
260  * It assumes sleepq_lock held and returns with this one unheld.
261  * It also assumes the generic interlock is sane and previously checked.
262  * If LK_INTERLOCK is specified the interlock is not reacquired after the
263  * sleep.
264  */
265 static __inline int
266 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
267     const char *wmesg, int pri, int timo, int queue)
268 {
269         GIANT_DECLARE;
270         struct lock_class *class;
271         int catch, error;
272
273         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
274         catch = pri & PCATCH;
275         pri &= PRIMASK;
276         error = 0;
277
278         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
279             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
280
281         if (flags & LK_INTERLOCK)
282                 class->lc_unlock(ilk);
283         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
284                 lk->lk_exslpfail++;
285         GIANT_SAVE();
286         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
287             SLEEPQ_INTERRUPTIBLE : 0), queue);
288         if ((flags & LK_TIMELOCK) && timo)
289                 sleepq_set_timeout(&lk->lock_object, timo);
290
291         /*
292          * Decisional switch for real sleeping.
293          */
294         if ((flags & LK_TIMELOCK) && timo && catch)
295                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
296         else if ((flags & LK_TIMELOCK) && timo)
297                 error = sleepq_timedwait(&lk->lock_object, pri);
298         else if (catch)
299                 error = sleepq_wait_sig(&lk->lock_object, pri);
300         else
301                 sleepq_wait(&lk->lock_object, pri);
302         GIANT_RESTORE();
303         if ((flags & LK_SLEEPFAIL) && error == 0)
304                 error = ENOLCK;
305
306         return (error);
307 }
308
309 static __inline int
310 wakeupshlk(struct lock *lk, const char *file, int line)
311 {
312         uintptr_t v, x, orig_x;
313         u_int realexslp;
314         int queue, wakeup_swapper;
315
316         wakeup_swapper = 0;
317         for (;;) {
318                 x = lockmgr_read_value(lk);
319                 if (lockmgr_sunlock_try(lk, &x))
320                         break;
321
322                 /*
323                  * We should have a sharer with waiters, so enter the hard
324                  * path in order to handle wakeups correctly.
325                  */
326                 sleepq_lock(&lk->lock_object);
327                 orig_x = lockmgr_read_value(lk);
328 retry_sleepq:
329                 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
330                 v = LK_UNLOCKED;
331
332                 /*
333                  * If the lock has exclusive waiters, give them preference in
334                  * order to avoid deadlock with shared runners up.
335                  * If interruptible sleeps left the exclusive queue empty
336                  * avoid a starvation for the threads sleeping on the shared
337                  * queue by giving them precedence and cleaning up the
338                  * exclusive waiters bit anyway.
339                  * Please note that lk_exslpfail count may be lying about
340                  * the real number of waiters with the LK_SLEEPFAIL flag on
341                  * because they may be used in conjunction with interruptible
342                  * sleeps so lk_exslpfail might be considered an 'upper limit'
343                  * bound, including the edge cases.
344                  */
345                 realexslp = sleepq_sleepcnt(&lk->lock_object,
346                     SQ_EXCLUSIVE_QUEUE);
347                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
348                         if (lk->lk_exslpfail < realexslp) {
349                                 lk->lk_exslpfail = 0;
350                                 queue = SQ_EXCLUSIVE_QUEUE;
351                                 v |= (x & LK_SHARED_WAITERS);
352                         } else {
353                                 lk->lk_exslpfail = 0;
354                                 LOCK_LOG2(lk,
355                                     "%s: %p has only LK_SLEEPFAIL sleepers",
356                                     __func__, lk);
357                                 LOCK_LOG2(lk,
358                             "%s: %p waking up threads on the exclusive queue",
359                                     __func__, lk);
360                                 wakeup_swapper =
361                                     sleepq_broadcast(&lk->lock_object,
362                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
363                                 queue = SQ_SHARED_QUEUE;
364                         }
365                                 
366                 } else {
367                         /*
368                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
369                          * and using interruptible sleeps/timeout may have
370                          * left spourious lk_exslpfail counts on, so clean
371                          * it up anyway.
372                          */
373                         lk->lk_exslpfail = 0;
374                         queue = SQ_SHARED_QUEUE;
375                 }
376
377                 if (lockmgr_sunlock_try(lk, &orig_x)) {
378                         sleepq_release(&lk->lock_object);
379                         break;
380                 }
381
382                 x |= LK_SHARERS_LOCK(1);
383                 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
384                         orig_x = x;
385                         goto retry_sleepq;
386                 }
387                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
388                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
389                     "exclusive");
390                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
391                     0, queue);
392                 sleepq_release(&lk->lock_object);
393                 break;
394         }
395
396         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
397         return (wakeup_swapper);
398 }
399
400 static void
401 assert_lockmgr(const struct lock_object *lock, int what)
402 {
403
404         panic("lockmgr locks do not support assertions");
405 }
406
407 static void
408 lock_lockmgr(struct lock_object *lock, uintptr_t how)
409 {
410
411         panic("lockmgr locks do not support sleep interlocking");
412 }
413
414 static uintptr_t
415 unlock_lockmgr(struct lock_object *lock)
416 {
417
418         panic("lockmgr locks do not support sleep interlocking");
419 }
420
421 #ifdef KDTRACE_HOOKS
422 static int
423 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
424 {
425
426         panic("lockmgr locks do not support owner inquiring");
427 }
428 #endif
429
430 void
431 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
432 {
433         int iflags;
434
435         MPASS((flags & ~LK_INIT_MASK) == 0);
436         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
437             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
438             &lk->lk_lock));
439
440         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
441         if (flags & LK_CANRECURSE)
442                 iflags |= LO_RECURSABLE;
443         if ((flags & LK_NODUP) == 0)
444                 iflags |= LO_DUPOK;
445         if (flags & LK_NOPROFILE)
446                 iflags |= LO_NOPROFILE;
447         if ((flags & LK_NOWITNESS) == 0)
448                 iflags |= LO_WITNESS;
449         if (flags & LK_QUIET)
450                 iflags |= LO_QUIET;
451         if (flags & LK_IS_VNODE)
452                 iflags |= LO_IS_VNODE;
453         if (flags & LK_NEW)
454                 iflags |= LO_NEW;
455         iflags |= flags & LK_NOSHARE;
456
457         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
458         lk->lk_lock = LK_UNLOCKED;
459         lk->lk_recurse = 0;
460         lk->lk_exslpfail = 0;
461         lk->lk_timo = timo;
462         lk->lk_pri = pri;
463         STACK_ZERO(lk);
464 }
465
466 /*
467  * XXX: Gross hacks to manipulate external lock flags after
468  * initialization.  Used for certain vnode and buf locks.
469  */
470 void
471 lockallowshare(struct lock *lk)
472 {
473
474         lockmgr_assert(lk, KA_XLOCKED);
475         lk->lock_object.lo_flags &= ~LK_NOSHARE;
476 }
477
478 void
479 lockdisableshare(struct lock *lk)
480 {
481
482         lockmgr_assert(lk, KA_XLOCKED);
483         lk->lock_object.lo_flags |= LK_NOSHARE;
484 }
485
486 void
487 lockallowrecurse(struct lock *lk)
488 {
489
490         lockmgr_assert(lk, KA_XLOCKED);
491         lk->lock_object.lo_flags |= LO_RECURSABLE;
492 }
493
494 void
495 lockdisablerecurse(struct lock *lk)
496 {
497
498         lockmgr_assert(lk, KA_XLOCKED);
499         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
500 }
501
502 void
503 lockdestroy(struct lock *lk)
504 {
505
506         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
507         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
508         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
509         lock_destroy(&lk->lock_object);
510 }
511
512 static bool __always_inline
513 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
514 {
515
516         /*
517          * If no other thread has an exclusive lock, or
518          * no exclusive waiter is present, bump the count of
519          * sharers.  Since we have to preserve the state of
520          * waiters, if we fail to acquire the shared lock
521          * loop back and retry.
522          */
523         while (LK_CAN_SHARE(*xp, flags, fp)) {
524                 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
525                     *xp + LK_ONE_SHARER)) {
526                         return (true);
527                 }
528         }
529         return (false);
530 }
531
532 static bool __always_inline
533 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
534 {
535
536         for (;;) {
537                 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
538                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
539                             *xp - LK_ONE_SHARER))
540                                 return (true);
541                         continue;
542                 }
543                 break;
544         }
545         return (false);
546 }
547
548 static bool
549 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
550     int flags)
551 {
552         struct thread *owner;
553         uintptr_t x;
554
555         x = *xp;
556         MPASS(x != LK_UNLOCKED);
557         owner = (struct thread *)LK_HOLDER(x);
558         for (;;) {
559                 MPASS(owner != curthread);
560                 if (owner == (struct thread *)LK_KERNPROC)
561                         return (false);
562                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
563                         return (false);
564                 if (owner == NULL)
565                         return (false);
566                 if (!TD_IS_RUNNING(owner))
567                         return (false);
568                 if ((x & LK_ALL_WAITERS) != 0)
569                         return (false);
570                 lock_delay(lda);
571                 x = lockmgr_read_value(lk);
572                 if (LK_CAN_SHARE(x, flags, false)) {
573                         *xp = x;
574                         return (true);
575                 }
576                 owner = (struct thread *)LK_HOLDER(x);
577         }
578 }
579
580 static __noinline int
581 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
582     const char *file, int line, struct lockmgr_wait *lwa)
583 {
584         uintptr_t tid, x;
585         int error = 0;
586         const char *iwmesg;
587         int ipri, itimo;
588
589 #ifdef KDTRACE_HOOKS
590         uint64_t sleep_time = 0;
591 #endif
592 #ifdef LOCK_PROFILING
593         uint64_t waittime = 0;
594         int contested = 0;
595 #endif
596         struct lock_delay_arg lda;
597
598         if (SCHEDULER_STOPPED())
599                 goto out;
600
601         tid = (uintptr_t)curthread;
602
603         if (LK_CAN_WITNESS(flags))
604                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
605                     file, line, flags & LK_INTERLOCK ? ilk : NULL);
606         x = lockmgr_read_value(lk);
607         lock_delay_arg_init(&lda, &lockmgr_delay);
608         if (!lk_adaptive)
609                 flags &= ~LK_ADAPTIVE;
610         /*
611          * The lock may already be locked exclusive by curthread,
612          * avoid deadlock.
613          */
614         if (LK_HOLDER(x) == tid) {
615                 LOCK_LOG2(lk,
616                     "%s: %p already held in exclusive mode",
617                     __func__, lk);
618                 error = EDEADLK;
619                 goto out;
620         }
621
622         for (;;) {
623                 if (lockmgr_slock_try(lk, &x, flags, false))
624                         break;
625
626                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
627                     &contested, &waittime);
628
629                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
630                         if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
631                                 continue;
632                 }
633
634 #ifdef HWPMC_HOOKS
635                 PMC_SOFT_CALL( , , lock, failed);
636 #endif
637
638                 /*
639                  * If the lock is expected to not sleep just give up
640                  * and return.
641                  */
642                 if (LK_TRYOP(flags)) {
643                         LOCK_LOG2(lk, "%s: %p fails the try operation",
644                             __func__, lk);
645                         error = EBUSY;
646                         break;
647                 }
648
649                 /*
650                  * Acquire the sleepqueue chain lock because we
651                  * probabilly will need to manipulate waiters flags.
652                  */
653                 sleepq_lock(&lk->lock_object);
654                 x = lockmgr_read_value(lk);
655 retry_sleepq:
656
657                 /*
658                  * if the lock can be acquired in shared mode, try
659                  * again.
660                  */
661                 if (LK_CAN_SHARE(x, flags, false)) {
662                         sleepq_release(&lk->lock_object);
663                         continue;
664                 }
665
666                 /*
667                  * Try to set the LK_SHARED_WAITERS flag.  If we fail,
668                  * loop back and retry.
669                  */
670                 if ((x & LK_SHARED_WAITERS) == 0) {
671                         if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
672                             x | LK_SHARED_WAITERS)) {
673                                 goto retry_sleepq;
674                         }
675                         LOCK_LOG2(lk, "%s: %p set shared waiters flag",
676                             __func__, lk);
677                 }
678
679                 if (lwa == NULL) {
680                         iwmesg = lk->lock_object.lo_name;
681                         ipri = lk->lk_pri;
682                         itimo = lk->lk_timo;
683                 } else {
684                         iwmesg = lwa->iwmesg;
685                         ipri = lwa->ipri;
686                         itimo = lwa->itimo;
687                 }
688
689                 /*
690                  * As far as we have been unable to acquire the
691                  * shared lock and the shared waiters flag is set,
692                  * we will sleep.
693                  */
694 #ifdef KDTRACE_HOOKS
695                 sleep_time -= lockstat_nsecs(&lk->lock_object);
696 #endif
697                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
698                     SQ_SHARED_QUEUE);
699 #ifdef KDTRACE_HOOKS
700                 sleep_time += lockstat_nsecs(&lk->lock_object);
701 #endif
702                 flags &= ~LK_INTERLOCK;
703                 if (error) {
704                         LOCK_LOG3(lk,
705                             "%s: interrupted sleep for %p with %d",
706                             __func__, lk, error);
707                         break;
708                 }
709                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
710                     __func__, lk);
711                 x = lockmgr_read_value(lk);
712         }
713         if (error == 0) {
714 #ifdef KDTRACE_HOOKS
715                 if (sleep_time != 0)
716                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
717                             LOCKSTAT_READER, (x & LK_SHARE) == 0,
718                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
719 #endif
720 #ifdef LOCK_PROFILING
721                 lockmgr_note_shared_acquire(lk, contested, waittime,
722                     file, line, flags);
723 #else
724                 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
725                     flags);
726 #endif
727         }
728
729 out:
730         lockmgr_exit(flags, ilk, 0);
731         return (error);
732 }
733
734 static bool
735 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
736 {
737         struct thread *owner;
738         uintptr_t x;
739
740         x = *xp;
741         MPASS(x != LK_UNLOCKED);
742         owner = (struct thread *)LK_HOLDER(x);
743         for (;;) {
744                 MPASS(owner != curthread);
745                 if (owner == NULL)
746                         return (false);
747                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
748                         return (false);
749                 if (owner == (struct thread *)LK_KERNPROC)
750                         return (false);
751                 if (!TD_IS_RUNNING(owner))
752                         return (false);
753                 if ((x & LK_ALL_WAITERS) != 0)
754                         return (false);
755                 lock_delay(lda);
756                 x = lockmgr_read_value(lk);
757                 if (x == LK_UNLOCKED) {
758                         *xp = x;
759                         return (true);
760                 }
761                 owner = (struct thread *)LK_HOLDER(x);
762         }
763 }
764
765 static __noinline int
766 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
767     const char *file, int line, struct lockmgr_wait *lwa)
768 {
769         struct lock_class *class;
770         uintptr_t tid, x, v;
771         int error = 0;
772         const char *iwmesg;
773         int ipri, itimo;
774
775 #ifdef KDTRACE_HOOKS
776         uint64_t sleep_time = 0;
777 #endif
778 #ifdef LOCK_PROFILING
779         uint64_t waittime = 0;
780         int contested = 0;
781 #endif
782         struct lock_delay_arg lda;
783
784         if (SCHEDULER_STOPPED())
785                 goto out;
786
787         tid = (uintptr_t)curthread;
788
789         if (LK_CAN_WITNESS(flags))
790                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
791                     LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
792                     ilk : NULL);
793
794         /*
795          * If curthread already holds the lock and this one is
796          * allowed to recurse, simply recurse on it.
797          */
798         if (lockmgr_xlocked(lk)) {
799                 if ((flags & LK_CANRECURSE) == 0 &&
800                     (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
801                         /*
802                          * If the lock is expected to not panic just
803                          * give up and return.
804                          */
805                         if (LK_TRYOP(flags)) {
806                                 LOCK_LOG2(lk,
807                                     "%s: %p fails the try operation",
808                                     __func__, lk);
809                                 error = EBUSY;
810                                 goto out;
811                         }
812                         if (flags & LK_INTERLOCK) {
813                                 class = LOCK_CLASS(ilk);
814                                 class->lc_unlock(ilk);
815                         }
816                         STACK_PRINT(lk);
817                         panic("%s: recursing on non recursive lockmgr %p "
818                             "@ %s:%d\n", __func__, lk, file, line);
819                 }
820                 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
821                 lk->lk_recurse++;
822                 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
823                 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
824                     lk->lk_recurse, file, line);
825                 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
826                     LK_TRYWIT(flags), file, line);
827                 TD_LOCKS_INC(curthread);
828                 goto out;
829         }
830
831         x = LK_UNLOCKED;
832         lock_delay_arg_init(&lda, &lockmgr_delay);
833         if (!lk_adaptive)
834                 flags &= ~LK_ADAPTIVE;
835         for (;;) {
836                 if (x == LK_UNLOCKED) {
837                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
838                                 break;
839                         continue;
840                 }
841
842                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
843                     &contested, &waittime);
844
845                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
846                         if (lockmgr_xlock_adaptive(&lda, lk, &x))
847                                 continue;
848                 }
849 #ifdef HWPMC_HOOKS
850                 PMC_SOFT_CALL( , , lock, failed);
851 #endif
852
853                 /*
854                  * If the lock is expected to not sleep just give up
855                  * and return.
856                  */
857                 if (LK_TRYOP(flags)) {
858                         LOCK_LOG2(lk, "%s: %p fails the try operation",
859                             __func__, lk);
860                         error = EBUSY;
861                         break;
862                 }
863
864                 /*
865                  * Acquire the sleepqueue chain lock because we
866                  * probabilly will need to manipulate waiters flags.
867                  */
868                 sleepq_lock(&lk->lock_object);
869                 x = lockmgr_read_value(lk);
870 retry_sleepq:
871
872                 /*
873                  * if the lock has been released while we spun on
874                  * the sleepqueue chain lock just try again.
875                  */
876                 if (x == LK_UNLOCKED) {
877                         sleepq_release(&lk->lock_object);
878                         continue;
879                 }
880
881                 /*
882                  * The lock can be in the state where there is a
883                  * pending queue of waiters, but still no owner.
884                  * This happens when the lock is contested and an
885                  * owner is going to claim the lock.
886                  * If curthread is the one successfully acquiring it
887                  * claim lock ownership and return, preserving waiters
888                  * flags.
889                  */
890                 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
891                 if ((x & ~v) == LK_UNLOCKED) {
892                         v &= ~LK_EXCLUSIVE_SPINNERS;
893                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
894                             tid | v)) {
895                                 sleepq_release(&lk->lock_object);
896                                 LOCK_LOG2(lk,
897                                     "%s: %p claimed by a new writer",
898                                     __func__, lk);
899                                 break;
900                         }
901                         goto retry_sleepq;
902                 }
903
904                 /*
905                  * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
906                  * fail, loop back and retry.
907                  */
908                 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
909                         if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
910                             x | LK_EXCLUSIVE_WAITERS)) {
911                                 goto retry_sleepq;
912                         }
913                         LOCK_LOG2(lk, "%s: %p set excl waiters flag",
914                             __func__, lk);
915                 }
916
917                 if (lwa == NULL) {
918                         iwmesg = lk->lock_object.lo_name;
919                         ipri = lk->lk_pri;
920                         itimo = lk->lk_timo;
921                 } else {
922                         iwmesg = lwa->iwmesg;
923                         ipri = lwa->ipri;
924                         itimo = lwa->itimo;
925                 }
926
927                 /*
928                  * As far as we have been unable to acquire the
929                  * exclusive lock and the exclusive waiters flag
930                  * is set, we will sleep.
931                  */
932 #ifdef KDTRACE_HOOKS
933                 sleep_time -= lockstat_nsecs(&lk->lock_object);
934 #endif
935                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
936                     SQ_EXCLUSIVE_QUEUE);
937 #ifdef KDTRACE_HOOKS
938                 sleep_time += lockstat_nsecs(&lk->lock_object);
939 #endif
940                 flags &= ~LK_INTERLOCK;
941                 if (error) {
942                         LOCK_LOG3(lk,
943                             "%s: interrupted sleep for %p with %d",
944                             __func__, lk, error);
945                         break;
946                 }
947                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
948                     __func__, lk);
949                 x = lockmgr_read_value(lk);
950         }
951         if (error == 0) {
952 #ifdef KDTRACE_HOOKS
953                 if (sleep_time != 0)
954                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
955                             LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
956                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
957 #endif
958 #ifdef LOCK_PROFILING
959                 lockmgr_note_exclusive_acquire(lk, contested, waittime,
960                     file, line, flags);
961 #else
962                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
963                     flags);
964 #endif
965         }
966
967 out:
968         lockmgr_exit(flags, ilk, 0);
969         return (error);
970 }
971
972 static __noinline int
973 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
974     const char *file, int line, struct lockmgr_wait *lwa)
975 {
976         uintptr_t tid, v, setv;
977         int error = 0;
978         int op;
979
980         if (SCHEDULER_STOPPED())
981                 goto out;
982
983         tid = (uintptr_t)curthread;
984
985         _lockmgr_assert(lk, KA_SLOCKED, file, line);
986
987         op = flags & LK_TYPE_MASK;
988         v = lockmgr_read_value(lk);
989         for (;;) {
990                 if (LK_SHARERS(v) > 1) {
991                         if (op == LK_TRYUPGRADE) {
992                                 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
993                                     __func__, lk);
994                                 error = EBUSY;
995                                 goto out;
996                         }
997                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
998                             v - LK_ONE_SHARER)) {
999                                 lockmgr_note_shared_release(lk, file, line);
1000                                 goto out_xlock;
1001                         }
1002                         continue;
1003                 }
1004                 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1005
1006                 setv = tid;
1007                 setv |= (v & LK_ALL_WAITERS);
1008
1009                 /*
1010                  * Try to switch from one shared lock to an exclusive one.
1011                  * We need to preserve waiters flags during the operation.
1012                  */
1013                 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1014                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1015                             line);
1016                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1017                             LK_TRYWIT(flags), file, line);
1018                         LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1019                         TD_SLOCKS_DEC(curthread);
1020                         goto out;
1021                 }
1022         }
1023
1024 out_xlock:
1025         error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1026         flags &= ~LK_INTERLOCK;
1027 out:
1028         lockmgr_exit(flags, ilk, 0);
1029         return (error);
1030 }
1031
1032 int
1033 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1034     const char *file, int line)
1035 {
1036         struct lock_class *class;
1037         uintptr_t x, tid;
1038         u_int op;
1039         bool locked;
1040
1041         if (SCHEDULER_STOPPED())
1042                 return (0);
1043
1044         op = flags & LK_TYPE_MASK;
1045         locked = false;
1046         switch (op) {
1047         case LK_SHARED:
1048                 if (LK_CAN_WITNESS(flags))
1049                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1050                             file, line, flags & LK_INTERLOCK ? ilk : NULL);
1051                 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1052                         break;
1053                 x = lockmgr_read_value(lk);
1054                 if (lockmgr_slock_try(lk, &x, flags, true)) {
1055                         lockmgr_note_shared_acquire(lk, 0, 0,
1056                             file, line, flags);
1057                         locked = true;
1058                 } else {
1059                         return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1060                             NULL));
1061                 }
1062                 break;
1063         case LK_EXCLUSIVE:
1064                 if (LK_CAN_WITNESS(flags))
1065                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1066                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1067                             ilk : NULL);
1068                 tid = (uintptr_t)curthread;
1069                 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1070                     atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1071                         lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1072                             flags);
1073                         locked = true;
1074                 } else {
1075                         return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1076                             NULL));
1077                 }
1078                 break;
1079         case LK_UPGRADE:
1080         case LK_TRYUPGRADE:
1081                 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1082         default:
1083                 break;
1084         }
1085         if (__predict_true(locked)) {
1086                 if (__predict_false(flags & LK_INTERLOCK)) {
1087                         class = LOCK_CLASS(ilk);
1088                         class->lc_unlock(ilk);
1089                 }
1090                 return (0);
1091         } else {
1092                 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1093                     LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1094         }
1095 }
1096
1097 static __noinline int
1098 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1099     const char *file, int line)
1100
1101 {
1102         int wakeup_swapper = 0;
1103
1104         if (SCHEDULER_STOPPED())
1105                 goto out;
1106
1107         wakeup_swapper = wakeupshlk(lk, file, line);
1108
1109 out:
1110         lockmgr_exit(flags, ilk, wakeup_swapper);
1111         return (0);
1112 }
1113
1114 static __noinline int
1115 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1116     const char *file, int line)
1117 {
1118         uintptr_t tid, v;
1119         int wakeup_swapper = 0;
1120         u_int realexslp;
1121         int queue;
1122
1123         if (SCHEDULER_STOPPED())
1124                 goto out;
1125
1126         tid = (uintptr_t)curthread;
1127
1128         /*
1129          * As first option, treact the lock as if it has not
1130          * any waiter.
1131          * Fix-up the tid var if the lock has been disowned.
1132          */
1133         if (LK_HOLDER(x) == LK_KERNPROC)
1134                 tid = LK_KERNPROC;
1135
1136         /*
1137          * The lock is held in exclusive mode.
1138          * If the lock is recursed also, then unrecurse it.
1139          */
1140         if (lockmgr_recursed_v(x)) {
1141                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1142                 lk->lk_recurse--;
1143                 if (lk->lk_recurse == 0)
1144                         atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1145                 goto out;
1146         }
1147         if (tid != LK_KERNPROC)
1148                 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1149                     LOCKSTAT_WRITER);
1150
1151         if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1152                 goto out;
1153
1154         sleepq_lock(&lk->lock_object);
1155         x = lockmgr_read_value(lk);
1156         v = LK_UNLOCKED;
1157
1158         /*
1159          * If the lock has exclusive waiters, give them
1160          * preference in order to avoid deadlock with
1161          * shared runners up.
1162          * If interruptible sleeps left the exclusive queue
1163          * empty avoid a starvation for the threads sleeping
1164          * on the shared queue by giving them precedence
1165          * and cleaning up the exclusive waiters bit anyway.
1166          * Please note that lk_exslpfail count may be lying
1167          * about the real number of waiters with the
1168          * LK_SLEEPFAIL flag on because they may be used in
1169          * conjunction with interruptible sleeps so
1170          * lk_exslpfail might be considered an 'upper limit'
1171          * bound, including the edge cases.
1172          */
1173         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1174         realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1175         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1176                 if (lk->lk_exslpfail < realexslp) {
1177                         lk->lk_exslpfail = 0;
1178                         queue = SQ_EXCLUSIVE_QUEUE;
1179                         v |= (x & LK_SHARED_WAITERS);
1180                 } else {
1181                         lk->lk_exslpfail = 0;
1182                         LOCK_LOG2(lk,
1183                             "%s: %p has only LK_SLEEPFAIL sleepers",
1184                             __func__, lk);
1185                         LOCK_LOG2(lk,
1186                             "%s: %p waking up threads on the exclusive queue",
1187                             __func__, lk);
1188                         wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1189                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1190                         queue = SQ_SHARED_QUEUE;
1191                 }
1192         } else {
1193                 /*
1194                  * Exclusive waiters sleeping with LK_SLEEPFAIL
1195                  * on and using interruptible sleeps/timeout
1196                  * may have left spourious lk_exslpfail counts
1197                  * on, so clean it up anyway.
1198                  */
1199                 lk->lk_exslpfail = 0;
1200                 queue = SQ_SHARED_QUEUE;
1201         }
1202
1203         LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1204             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1205             "exclusive");
1206         atomic_store_rel_ptr(&lk->lk_lock, v);
1207         wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1208         sleepq_release(&lk->lock_object);
1209
1210 out:
1211         lockmgr_exit(flags, ilk, wakeup_swapper);
1212         return (0);
1213 }
1214
1215 /*
1216  * Lightweight entry points for common operations.
1217  *
1218  * Functionality is similar to sx locks, in that none of the additional lockmgr
1219  * features are supported. To be clear, these are NOT supported:
1220  * 1. shared locking disablement
1221  * 2. returning with an error after sleep
1222  * 3. unlocking the interlock
1223  *
1224  * If in doubt, use lockmgr_lock_flags.
1225  */
1226 int
1227 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1228 {
1229         uintptr_t x;
1230
1231         MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1232         MPASS((flags & LK_INTERLOCK) == 0);
1233         MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1234
1235         if (LK_CAN_WITNESS(flags))
1236                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1237                     file, line, NULL);
1238         x = lockmgr_read_value(lk);
1239         if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1240                 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1241                 return (0);
1242         }
1243
1244         return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1245 }
1246
1247 int
1248 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1249 {
1250         uintptr_t tid;
1251
1252         MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1253         MPASS((flags & LK_INTERLOCK) == 0);
1254
1255         if (LK_CAN_WITNESS(flags))
1256                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1257                     LOP_EXCLUSIVE, file, line, NULL);
1258         tid = (uintptr_t)curthread;
1259         if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1260                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1261                     flags);
1262                 return (0);
1263         }
1264
1265         return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1266 }
1267
1268 int
1269 lockmgr_unlock(struct lock *lk)
1270 {
1271         uintptr_t x, tid;
1272         const char *file;
1273         int line;
1274
1275         file = __FILE__;
1276         line = __LINE__;
1277
1278         _lockmgr_assert(lk, KA_LOCKED, file, line);
1279         x = lockmgr_read_value(lk);
1280         if (__predict_true(x & LK_SHARE) != 0) {
1281                 lockmgr_note_shared_release(lk, file, line);
1282                 if (lockmgr_sunlock_try(lk, &x)) {
1283                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1284                 } else {
1285                         return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1286                 }
1287         } else {
1288                 tid = (uintptr_t)curthread;
1289                 lockmgr_note_exclusive_release(lk, file, line);
1290                 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1291                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1292                 } else {
1293                         return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1294                 }
1295         }
1296         return (0);
1297 }
1298
1299 int
1300 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1301     const char *wmesg, int pri, int timo, const char *file, int line)
1302 {
1303         GIANT_DECLARE;
1304         struct lockmgr_wait lwa;
1305         struct lock_class *class;
1306         const char *iwmesg;
1307         uintptr_t tid, v, x;
1308         u_int op, realexslp;
1309         int error, ipri, itimo, queue, wakeup_swapper;
1310 #ifdef LOCK_PROFILING
1311         uint64_t waittime = 0;
1312         int contested = 0;
1313 #endif
1314
1315         if (SCHEDULER_STOPPED())
1316                 return (0);
1317
1318         error = 0;
1319         tid = (uintptr_t)curthread;
1320         op = (flags & LK_TYPE_MASK);
1321         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1322         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1323         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1324
1325         lwa.iwmesg = iwmesg;
1326         lwa.ipri = ipri;
1327         lwa.itimo = itimo;
1328
1329         MPASS((flags & ~LK_TOTAL_MASK) == 0);
1330         KASSERT((op & (op - 1)) == 0,
1331             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1332         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1333             (op != LK_DOWNGRADE && op != LK_RELEASE),
1334             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1335             __func__, file, line));
1336         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1337             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1338             __func__, file, line));
1339         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1340             ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1341             lk->lock_object.lo_name, file, line));
1342
1343         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1344
1345         if (lk->lock_object.lo_flags & LK_NOSHARE) {
1346                 switch (op) {
1347                 case LK_SHARED:
1348                         op = LK_EXCLUSIVE;
1349                         break;
1350                 case LK_UPGRADE:
1351                 case LK_TRYUPGRADE:
1352                 case LK_DOWNGRADE:
1353                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1354                             file, line);
1355                         if (flags & LK_INTERLOCK)
1356                                 class->lc_unlock(ilk);
1357                         return (0);
1358                 }
1359         }
1360
1361         wakeup_swapper = 0;
1362         switch (op) {
1363         case LK_SHARED:
1364                 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1365                 break;
1366         case LK_UPGRADE:
1367         case LK_TRYUPGRADE:
1368                 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1369                 break;
1370         case LK_EXCLUSIVE:
1371                 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1372                 break;
1373         case LK_DOWNGRADE:
1374                 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1375                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1376
1377                 /*
1378                  * Panic if the lock is recursed.
1379                  */
1380                 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1381                         if (flags & LK_INTERLOCK)
1382                                 class->lc_unlock(ilk);
1383                         panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1384                             __func__, iwmesg, file, line);
1385                 }
1386                 TD_SLOCKS_INC(curthread);
1387
1388                 /*
1389                  * In order to preserve waiters flags, just spin.
1390                  */
1391                 for (;;) {
1392                         x = lockmgr_read_value(lk);
1393                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1394                         x &= LK_ALL_WAITERS;
1395                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1396                             LK_SHARERS_LOCK(1) | x))
1397                                 break;
1398                         cpu_spinwait();
1399                 }
1400                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1401                 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1402                 break;
1403         case LK_RELEASE:
1404                 _lockmgr_assert(lk, KA_LOCKED, file, line);
1405                 x = lockmgr_read_value(lk);
1406
1407                 if (__predict_true(x & LK_SHARE) != 0) {
1408                         lockmgr_note_shared_release(lk, file, line);
1409                         return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1410                 } else {
1411                         lockmgr_note_exclusive_release(lk, file, line);
1412                         return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1413                 }
1414                 break;
1415         case LK_DRAIN:
1416                 if (LK_CAN_WITNESS(flags))
1417                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1418                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1419                             ilk : NULL);
1420
1421                 /*
1422                  * Trying to drain a lock we already own will result in a
1423                  * deadlock.
1424                  */
1425                 if (lockmgr_xlocked(lk)) {
1426                         if (flags & LK_INTERLOCK)
1427                                 class->lc_unlock(ilk);
1428                         panic("%s: draining %s with the lock held @ %s:%d\n",
1429                             __func__, iwmesg, file, line);
1430                 }
1431
1432                 for (;;) {
1433                         if (lk->lk_lock == LK_UNLOCKED &&
1434                             atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1435                                 break;
1436
1437 #ifdef HWPMC_HOOKS
1438                         PMC_SOFT_CALL( , , lock, failed);
1439 #endif
1440                         lock_profile_obtain_lock_failed(&lk->lock_object, false,
1441                             &contested, &waittime);
1442
1443                         /*
1444                          * If the lock is expected to not sleep just give up
1445                          * and return.
1446                          */
1447                         if (LK_TRYOP(flags)) {
1448                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
1449                                     __func__, lk);
1450                                 error = EBUSY;
1451                                 break;
1452                         }
1453
1454                         /*
1455                          * Acquire the sleepqueue chain lock because we
1456                          * probabilly will need to manipulate waiters flags.
1457                          */
1458                         sleepq_lock(&lk->lock_object);
1459                         x = lockmgr_read_value(lk);
1460
1461                         /*
1462                          * if the lock has been released while we spun on
1463                          * the sleepqueue chain lock just try again.
1464                          */
1465                         if (x == LK_UNLOCKED) {
1466                                 sleepq_release(&lk->lock_object);
1467                                 continue;
1468                         }
1469
1470                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1471                         if ((x & ~v) == LK_UNLOCKED) {
1472                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1473
1474                                 /*
1475                                  * If interruptible sleeps left the exclusive
1476                                  * queue empty avoid a starvation for the
1477                                  * threads sleeping on the shared queue by
1478                                  * giving them precedence and cleaning up the
1479                                  * exclusive waiters bit anyway.
1480                                  * Please note that lk_exslpfail count may be
1481                                  * lying about the real number of waiters with
1482                                  * the LK_SLEEPFAIL flag on because they may
1483                                  * be used in conjunction with interruptible
1484                                  * sleeps so lk_exslpfail might be considered
1485                                  * an 'upper limit' bound, including the edge
1486                                  * cases.
1487                                  */
1488                                 if (v & LK_EXCLUSIVE_WAITERS) {
1489                                         queue = SQ_EXCLUSIVE_QUEUE;
1490                                         v &= ~LK_EXCLUSIVE_WAITERS;
1491                                 } else {
1492                                         /*
1493                                          * Exclusive waiters sleeping with
1494                                          * LK_SLEEPFAIL on and using
1495                                          * interruptible sleeps/timeout may
1496                                          * have left spourious lk_exslpfail
1497                                          * counts on, so clean it up anyway.
1498                                          */
1499                                         MPASS(v & LK_SHARED_WAITERS);
1500                                         lk->lk_exslpfail = 0;
1501                                         queue = SQ_SHARED_QUEUE;
1502                                         v &= ~LK_SHARED_WAITERS;
1503                                 }
1504                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
1505                                         realexslp =
1506                                             sleepq_sleepcnt(&lk->lock_object,
1507                                             SQ_EXCLUSIVE_QUEUE);
1508                                         if (lk->lk_exslpfail >= realexslp) {
1509                                                 lk->lk_exslpfail = 0;
1510                                                 queue = SQ_SHARED_QUEUE;
1511                                                 v &= ~LK_SHARED_WAITERS;
1512                                                 if (realexslp != 0) {
1513                                                         LOCK_LOG2(lk,
1514                                         "%s: %p has only LK_SLEEPFAIL sleepers",
1515                                                             __func__, lk);
1516                                                         LOCK_LOG2(lk,
1517                         "%s: %p waking up threads on the exclusive queue",
1518                                                             __func__, lk);
1519                                                         wakeup_swapper =
1520                                                             sleepq_broadcast(
1521                                                             &lk->lock_object,
1522                                                             SLEEPQ_LK, 0,
1523                                                             SQ_EXCLUSIVE_QUEUE);
1524                                                 }
1525                                         } else
1526                                                 lk->lk_exslpfail = 0;
1527                                 }
1528                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1529                                         sleepq_release(&lk->lock_object);
1530                                         continue;
1531                                 }
1532                                 LOCK_LOG3(lk,
1533                                 "%s: %p waking up all threads on the %s queue",
1534                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
1535                                     "shared" : "exclusive");
1536                                 wakeup_swapper |= sleepq_broadcast(
1537                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
1538
1539                                 /*
1540                                  * If shared waiters have been woken up we need
1541                                  * to wait for one of them to acquire the lock
1542                                  * before to set the exclusive waiters in
1543                                  * order to avoid a deadlock.
1544                                  */
1545                                 if (queue == SQ_SHARED_QUEUE) {
1546                                         for (v = lk->lk_lock;
1547                                             (v & LK_SHARE) && !LK_SHARERS(v);
1548                                             v = lk->lk_lock)
1549                                                 cpu_spinwait();
1550                                 }
1551                         }
1552
1553                         /*
1554                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1555                          * fail, loop back and retry.
1556                          */
1557                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1558                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1559                                     x | LK_EXCLUSIVE_WAITERS)) {
1560                                         sleepq_release(&lk->lock_object);
1561                                         continue;
1562                                 }
1563                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1564                                     __func__, lk);
1565                         }
1566
1567                         /*
1568                          * As far as we have been unable to acquire the
1569                          * exclusive lock and the exclusive waiters flag
1570                          * is set, we will sleep.
1571                          */
1572                         if (flags & LK_INTERLOCK) {
1573                                 class->lc_unlock(ilk);
1574                                 flags &= ~LK_INTERLOCK;
1575                         }
1576                         GIANT_SAVE();
1577                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1578                             SQ_EXCLUSIVE_QUEUE);
1579                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1580                         GIANT_RESTORE();
1581                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1582                             __func__, lk);
1583                 }
1584
1585                 if (error == 0) {
1586                         lock_profile_obtain_lock_success(&lk->lock_object,
1587                             false, contested, waittime, file, line);
1588                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1589                             lk->lk_recurse, file, line);
1590                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1591                             LK_TRYWIT(flags), file, line);
1592                         TD_LOCKS_INC(curthread);
1593                         STACK_SAVE(lk);
1594                 }
1595                 break;
1596         default:
1597                 if (flags & LK_INTERLOCK)
1598                         class->lc_unlock(ilk);
1599                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1600         }
1601
1602         if (flags & LK_INTERLOCK)
1603                 class->lc_unlock(ilk);
1604         if (wakeup_swapper)
1605                 kick_proc0();
1606
1607         return (error);
1608 }
1609
1610 void
1611 _lockmgr_disown(struct lock *lk, const char *file, int line)
1612 {
1613         uintptr_t tid, x;
1614
1615         if (SCHEDULER_STOPPED())
1616                 return;
1617
1618         tid = (uintptr_t)curthread;
1619         _lockmgr_assert(lk, KA_XLOCKED, file, line);
1620
1621         /*
1622          * Panic if the lock is recursed.
1623          */
1624         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1625                 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1626                     __func__,  file, line);
1627
1628         /*
1629          * If the owner is already LK_KERNPROC just skip the whole operation.
1630          */
1631         if (LK_HOLDER(lk->lk_lock) != tid)
1632                 return;
1633         lock_profile_release_lock(&lk->lock_object, false);
1634         LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1635         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1636         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1637         TD_LOCKS_DEC(curthread);
1638         STACK_SAVE(lk);
1639
1640         /*
1641          * In order to preserve waiters flags, just spin.
1642          */
1643         for (;;) {
1644                 x = lockmgr_read_value(lk);
1645                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1646                 x &= LK_ALL_WAITERS;
1647                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1648                     LK_KERNPROC | x))
1649                         return;
1650                 cpu_spinwait();
1651         }
1652 }
1653
1654 void
1655 lockmgr_printinfo(const struct lock *lk)
1656 {
1657         struct thread *td;
1658         uintptr_t x;
1659
1660         if (lk->lk_lock == LK_UNLOCKED)
1661                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1662         else if (lk->lk_lock & LK_SHARE)
1663                 printf("lock type %s: SHARED (count %ju)\n",
1664                     lk->lock_object.lo_name,
1665                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1666         else {
1667                 td = lockmgr_xholder(lk);
1668                 if (td == (struct thread *)LK_KERNPROC)
1669                         printf("lock type %s: EXCL by KERNPROC\n",
1670                             lk->lock_object.lo_name);
1671                 else
1672                         printf("lock type %s: EXCL by thread %p "
1673                             "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1674                             td, td->td_proc->p_pid, td->td_proc->p_comm,
1675                             td->td_tid);
1676         }
1677
1678         x = lk->lk_lock;
1679         if (x & LK_EXCLUSIVE_WAITERS)
1680                 printf(" with exclusive waiters pending\n");
1681         if (x & LK_SHARED_WAITERS)
1682                 printf(" with shared waiters pending\n");
1683         if (x & LK_EXCLUSIVE_SPINNERS)
1684                 printf(" with exclusive spinners pending\n");
1685
1686         STACK_PRINT(lk);
1687 }
1688
1689 int
1690 lockstatus(const struct lock *lk)
1691 {
1692         uintptr_t v, x;
1693         int ret;
1694
1695         ret = LK_SHARED;
1696         x = lockmgr_read_value(lk);
1697         v = LK_HOLDER(x);
1698
1699         if ((x & LK_SHARE) == 0) {
1700                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1701                         ret = LK_EXCLUSIVE;
1702                 else
1703                         ret = LK_EXCLOTHER;
1704         } else if (x == LK_UNLOCKED)
1705                 ret = 0;
1706
1707         return (ret);
1708 }
1709
1710 #ifdef INVARIANT_SUPPORT
1711
1712 FEATURE(invariant_support,
1713     "Support for modules compiled with INVARIANTS option");
1714
1715 #ifndef INVARIANTS
1716 #undef  _lockmgr_assert
1717 #endif
1718
1719 void
1720 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1721 {
1722         int slocked = 0;
1723
1724         if (SCHEDULER_STOPPED())
1725                 return;
1726         switch (what) {
1727         case KA_SLOCKED:
1728         case KA_SLOCKED | KA_NOTRECURSED:
1729         case KA_SLOCKED | KA_RECURSED:
1730                 slocked = 1;
1731         case KA_LOCKED:
1732         case KA_LOCKED | KA_NOTRECURSED:
1733         case KA_LOCKED | KA_RECURSED:
1734 #ifdef WITNESS
1735
1736                 /*
1737                  * We cannot trust WITNESS if the lock is held in exclusive
1738                  * mode and a call to lockmgr_disown() happened.
1739                  * Workaround this skipping the check if the lock is held in
1740                  * exclusive mode even for the KA_LOCKED case.
1741                  */
1742                 if (slocked || (lk->lk_lock & LK_SHARE)) {
1743                         witness_assert(&lk->lock_object, what, file, line);
1744                         break;
1745                 }
1746 #endif
1747                 if (lk->lk_lock == LK_UNLOCKED ||
1748                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1749                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1750                         panic("Lock %s not %slocked @ %s:%d\n",
1751                             lk->lock_object.lo_name, slocked ? "share" : "",
1752                             file, line);
1753
1754                 if ((lk->lk_lock & LK_SHARE) == 0) {
1755                         if (lockmgr_recursed(lk)) {
1756                                 if (what & KA_NOTRECURSED)
1757                                         panic("Lock %s recursed @ %s:%d\n",
1758                                             lk->lock_object.lo_name, file,
1759                                             line);
1760                         } else if (what & KA_RECURSED)
1761                                 panic("Lock %s not recursed @ %s:%d\n",
1762                                     lk->lock_object.lo_name, file, line);
1763                 }
1764                 break;
1765         case KA_XLOCKED:
1766         case KA_XLOCKED | KA_NOTRECURSED:
1767         case KA_XLOCKED | KA_RECURSED:
1768                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1769                         panic("Lock %s not exclusively locked @ %s:%d\n",
1770                             lk->lock_object.lo_name, file, line);
1771                 if (lockmgr_recursed(lk)) {
1772                         if (what & KA_NOTRECURSED)
1773                                 panic("Lock %s recursed @ %s:%d\n",
1774                                     lk->lock_object.lo_name, file, line);
1775                 } else if (what & KA_RECURSED)
1776                         panic("Lock %s not recursed @ %s:%d\n",
1777                             lk->lock_object.lo_name, file, line);
1778                 break;
1779         case KA_UNLOCKED:
1780                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1781                         panic("Lock %s exclusively locked @ %s:%d\n",
1782                             lk->lock_object.lo_name, file, line);
1783                 break;
1784         default:
1785                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1786                     line);
1787         }
1788 }
1789 #endif
1790
1791 #ifdef DDB
1792 int
1793 lockmgr_chain(struct thread *td, struct thread **ownerp)
1794 {
1795         const struct lock *lk;
1796
1797         lk = td->td_wchan;
1798
1799         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1800                 return (0);
1801         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1802         if (lk->lk_lock & LK_SHARE)
1803                 db_printf("SHARED (count %ju)\n",
1804                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1805         else
1806                 db_printf("EXCL\n");
1807         *ownerp = lockmgr_xholder(lk);
1808
1809         return (1);
1810 }
1811
1812 static void
1813 db_show_lockmgr(const struct lock_object *lock)
1814 {
1815         struct thread *td;
1816         const struct lock *lk;
1817
1818         lk = (const struct lock *)lock;
1819
1820         db_printf(" state: ");
1821         if (lk->lk_lock == LK_UNLOCKED)
1822                 db_printf("UNLOCKED\n");
1823         else if (lk->lk_lock & LK_SHARE)
1824                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1825         else {
1826                 td = lockmgr_xholder(lk);
1827                 if (td == (struct thread *)LK_KERNPROC)
1828                         db_printf("XLOCK: LK_KERNPROC\n");
1829                 else
1830                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1831                             td->td_tid, td->td_proc->p_pid,
1832                             td->td_proc->p_comm);
1833                 if (lockmgr_recursed(lk))
1834                         db_printf(" recursed: %d\n", lk->lk_recurse);
1835         }
1836         db_printf(" waiters: ");
1837         switch (lk->lk_lock & LK_ALL_WAITERS) {
1838         case LK_SHARED_WAITERS:
1839                 db_printf("shared\n");
1840                 break;
1841         case LK_EXCLUSIVE_WAITERS:
1842                 db_printf("exclusive\n");
1843                 break;
1844         case LK_ALL_WAITERS:
1845                 db_printf("shared and exclusive\n");
1846                 break;
1847         default:
1848                 db_printf("none\n");
1849         }
1850         db_printf(" spinners: ");
1851         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1852                 db_printf("exclusive\n");
1853         else
1854                 db_printf("none\n");
1855 }
1856 #endif