]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_lock.c
ZFS: MFV 2.0-rc1-ga00c61
[FreeBSD/FreeBSD.git] / sys / kern / kern_lock.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30
31 #include "opt_ddb.h"
32 #include "opt_hwpmc_hooks.h"
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include <sys/param.h>
38 #include <sys/kdb.h>
39 #include <sys/ktr.h>
40 #include <sys/lock.h>
41 #include <sys/lock_profile.h>
42 #include <sys/lockmgr.h>
43 #include <sys/lockstat.h>
44 #include <sys/mutex.h>
45 #include <sys/proc.h>
46 #include <sys/sleepqueue.h>
47 #ifdef DEBUG_LOCKS
48 #include <sys/stack.h>
49 #endif
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52
53 #include <machine/cpu.h>
54
55 #ifdef DDB
56 #include <ddb/ddb.h>
57 #endif
58
59 #ifdef HWPMC_HOOKS
60 #include <sys/pmckern.h>
61 PMC_SOFT_DECLARE( , , lock, failed);
62 #endif
63
64 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
65     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
66
67 #define SQ_EXCLUSIVE_QUEUE      0
68 #define SQ_SHARED_QUEUE         1
69
70 #ifndef INVARIANTS
71 #define _lockmgr_assert(lk, what, file, line)
72 #endif
73
74 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
75 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
76
77 #ifndef DEBUG_LOCKS
78 #define STACK_PRINT(lk)
79 #define STACK_SAVE(lk)
80 #define STACK_ZERO(lk)
81 #else
82 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
83 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
84 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
85 #endif
86
87 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
88         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
89                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
90 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
91         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
92                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
93
94 #define GIANT_DECLARE                                                   \
95         int _i = 0;                                                     \
96         WITNESS_SAVE_DECL(Giant)
97 #define GIANT_RESTORE() do {                                            \
98         if (__predict_false(_i > 0)) {                                  \
99                 while (_i--)                                            \
100                         mtx_lock(&Giant);                               \
101                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
102         }                                                               \
103 } while (0)
104 #define GIANT_SAVE() do {                                               \
105         if (__predict_false(mtx_owned(&Giant))) {                       \
106                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
107                 while (mtx_owned(&Giant)) {                             \
108                         _i++;                                           \
109                         mtx_unlock(&Giant);                             \
110                 }                                                       \
111         }                                                               \
112 } while (0)
113
114 static bool __always_inline
115 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
116 {
117
118         if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
119             LK_SHARE)
120                 return (true);
121         if (fp || (!(x & LK_SHARE)))
122                 return (false);
123         if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
124             (curthread->td_pflags & TDP_DEADLKTREAT))
125                 return (true);
126         return (false);
127 }
128
129 #define LK_TRYOP(x)                                                     \
130         ((x) & LK_NOWAIT)
131
132 #define LK_CAN_WITNESS(x)                                               \
133         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
134 #define LK_TRYWIT(x)                                                    \
135         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
136
137 #define lockmgr_disowned(lk)                                            \
138         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
139
140 #define lockmgr_xlocked_v(v)                                            \
141         (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
142
143 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
144
145 static void     assert_lockmgr(const struct lock_object *lock, int how);
146 #ifdef DDB
147 static void     db_show_lockmgr(const struct lock_object *lock);
148 #endif
149 static void     lock_lockmgr(struct lock_object *lock, uintptr_t how);
150 #ifdef KDTRACE_HOOKS
151 static int      owner_lockmgr(const struct lock_object *lock,
152                     struct thread **owner);
153 #endif
154 static uintptr_t unlock_lockmgr(struct lock_object *lock);
155
156 struct lock_class lock_class_lockmgr = {
157         .lc_name = "lockmgr",
158         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
159         .lc_assert = assert_lockmgr,
160 #ifdef DDB
161         .lc_ddb_show = db_show_lockmgr,
162 #endif
163         .lc_lock = lock_lockmgr,
164         .lc_unlock = unlock_lockmgr,
165 #ifdef KDTRACE_HOOKS
166         .lc_owner = owner_lockmgr,
167 #endif
168 };
169
170 static __read_mostly bool lk_adaptive = true;
171 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
172 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
173     0, "");
174 #define lockmgr_delay  locks_delay
175
176 struct lockmgr_wait {
177         const char *iwmesg;
178         int ipri;
179         int itimo;
180 };
181
182 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
183     int flags, bool fp);
184 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
185
186 static void
187 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
188 {
189         struct lock_class *class;
190
191         if (flags & LK_INTERLOCK) {
192                 class = LOCK_CLASS(ilk);
193                 class->lc_unlock(ilk);
194         }
195
196         if (__predict_false(wakeup_swapper))
197                 kick_proc0();
198 }
199
200 static void
201 lockmgr_note_shared_acquire(struct lock *lk, int contested,
202     uint64_t waittime, const char *file, int line, int flags)
203 {
204
205         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
206             waittime, file, line, LOCKSTAT_READER);
207         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
208         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
209         TD_LOCKS_INC(curthread);
210         TD_SLOCKS_INC(curthread);
211         STACK_SAVE(lk);
212 }
213
214 static void
215 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
216 {
217
218         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
219         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
220         TD_LOCKS_DEC(curthread);
221         TD_SLOCKS_DEC(curthread);
222 }
223
224 static void
225 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
226     uint64_t waittime, const char *file, int line, int flags)
227 {
228
229         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
230             waittime, file, line, LOCKSTAT_WRITER);
231         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
232         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
233             line);
234         TD_LOCKS_INC(curthread);
235         STACK_SAVE(lk);
236 }
237
238 static void
239 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
240 {
241
242         if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
243                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
244                 TD_LOCKS_DEC(curthread);
245         }
246         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
247             line);
248 }
249
250 static __inline struct thread *
251 lockmgr_xholder(const struct lock *lk)
252 {
253         uintptr_t x;
254
255         x = lockmgr_read_value(lk);
256         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
257 }
258
259 /*
260  * It assumes sleepq_lock held and returns with this one unheld.
261  * It also assumes the generic interlock is sane and previously checked.
262  * If LK_INTERLOCK is specified the interlock is not reacquired after the
263  * sleep.
264  */
265 static __inline int
266 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
267     const char *wmesg, int pri, int timo, int queue)
268 {
269         GIANT_DECLARE;
270         struct lock_class *class;
271         int catch, error;
272
273         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
274         catch = pri & PCATCH;
275         pri &= PRIMASK;
276         error = 0;
277
278         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
279             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
280
281         if (flags & LK_INTERLOCK)
282                 class->lc_unlock(ilk);
283         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0)
284                 lk->lk_exslpfail++;
285         GIANT_SAVE();
286         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
287             SLEEPQ_INTERRUPTIBLE : 0), queue);
288         if ((flags & LK_TIMELOCK) && timo)
289                 sleepq_set_timeout(&lk->lock_object, timo);
290
291         /*
292          * Decisional switch for real sleeping.
293          */
294         if ((flags & LK_TIMELOCK) && timo && catch)
295                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
296         else if ((flags & LK_TIMELOCK) && timo)
297                 error = sleepq_timedwait(&lk->lock_object, pri);
298         else if (catch)
299                 error = sleepq_wait_sig(&lk->lock_object, pri);
300         else
301                 sleepq_wait(&lk->lock_object, pri);
302         GIANT_RESTORE();
303         if ((flags & LK_SLEEPFAIL) && error == 0)
304                 error = ENOLCK;
305
306         return (error);
307 }
308
309 static __inline int
310 wakeupshlk(struct lock *lk, const char *file, int line)
311 {
312         uintptr_t v, x, orig_x;
313         u_int realexslp;
314         int queue, wakeup_swapper;
315
316         wakeup_swapper = 0;
317         for (;;) {
318                 x = lockmgr_read_value(lk);
319                 if (lockmgr_sunlock_try(lk, &x))
320                         break;
321
322                 /*
323                  * We should have a sharer with waiters, so enter the hard
324                  * path in order to handle wakeups correctly.
325                  */
326                 sleepq_lock(&lk->lock_object);
327                 orig_x = lockmgr_read_value(lk);
328 retry_sleepq:
329                 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
330                 v = LK_UNLOCKED;
331
332                 /*
333                  * If the lock has exclusive waiters, give them preference in
334                  * order to avoid deadlock with shared runners up.
335                  * If interruptible sleeps left the exclusive queue empty
336                  * avoid a starvation for the threads sleeping on the shared
337                  * queue by giving them precedence and cleaning up the
338                  * exclusive waiters bit anyway.
339                  * Please note that lk_exslpfail count may be lying about
340                  * the real number of waiters with the LK_SLEEPFAIL flag on
341                  * because they may be used in conjunction with interruptible
342                  * sleeps so lk_exslpfail might be considered an 'upper limit'
343                  * bound, including the edge cases.
344                  */
345                 realexslp = sleepq_sleepcnt(&lk->lock_object,
346                     SQ_EXCLUSIVE_QUEUE);
347                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
348                         if (lk->lk_exslpfail < realexslp) {
349                                 lk->lk_exslpfail = 0;
350                                 queue = SQ_EXCLUSIVE_QUEUE;
351                                 v |= (x & LK_SHARED_WAITERS);
352                         } else {
353                                 lk->lk_exslpfail = 0;
354                                 LOCK_LOG2(lk,
355                                     "%s: %p has only LK_SLEEPFAIL sleepers",
356                                     __func__, lk);
357                                 LOCK_LOG2(lk,
358                             "%s: %p waking up threads on the exclusive queue",
359                                     __func__, lk);
360                                 wakeup_swapper =
361                                     sleepq_broadcast(&lk->lock_object,
362                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
363                                 queue = SQ_SHARED_QUEUE;
364                         }
365                                 
366                 } else {
367
368                         /*
369                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
370                          * and using interruptible sleeps/timeout may have
371                          * left spourious lk_exslpfail counts on, so clean
372                          * it up anyway.
373                          */
374                         lk->lk_exslpfail = 0;
375                         queue = SQ_SHARED_QUEUE;
376                 }
377
378                 if (lockmgr_sunlock_try(lk, &orig_x)) {
379                         sleepq_release(&lk->lock_object);
380                         break;
381                 }
382
383                 x |= LK_SHARERS_LOCK(1);
384                 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
385                         orig_x = x;
386                         goto retry_sleepq;
387                 }
388                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
389                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
390                     "exclusive");
391                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
392                     0, queue);
393                 sleepq_release(&lk->lock_object);
394                 break;
395         }
396
397         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
398         return (wakeup_swapper);
399 }
400
401 static void
402 assert_lockmgr(const struct lock_object *lock, int what)
403 {
404
405         panic("lockmgr locks do not support assertions");
406 }
407
408 static void
409 lock_lockmgr(struct lock_object *lock, uintptr_t how)
410 {
411
412         panic("lockmgr locks do not support sleep interlocking");
413 }
414
415 static uintptr_t
416 unlock_lockmgr(struct lock_object *lock)
417 {
418
419         panic("lockmgr locks do not support sleep interlocking");
420 }
421
422 #ifdef KDTRACE_HOOKS
423 static int
424 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
425 {
426
427         panic("lockmgr locks do not support owner inquiring");
428 }
429 #endif
430
431 void
432 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
433 {
434         int iflags;
435
436         MPASS((flags & ~LK_INIT_MASK) == 0);
437         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
438             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
439             &lk->lk_lock));
440
441         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
442         if (flags & LK_CANRECURSE)
443                 iflags |= LO_RECURSABLE;
444         if ((flags & LK_NODUP) == 0)
445                 iflags |= LO_DUPOK;
446         if (flags & LK_NOPROFILE)
447                 iflags |= LO_NOPROFILE;
448         if ((flags & LK_NOWITNESS) == 0)
449                 iflags |= LO_WITNESS;
450         if (flags & LK_QUIET)
451                 iflags |= LO_QUIET;
452         if (flags & LK_IS_VNODE)
453                 iflags |= LO_IS_VNODE;
454         if (flags & LK_NEW)
455                 iflags |= LO_NEW;
456         iflags |= flags & LK_NOSHARE;
457
458         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
459         lk->lk_lock = LK_UNLOCKED;
460         lk->lk_recurse = 0;
461         lk->lk_exslpfail = 0;
462         lk->lk_timo = timo;
463         lk->lk_pri = pri;
464         STACK_ZERO(lk);
465 }
466
467 /*
468  * XXX: Gross hacks to manipulate external lock flags after
469  * initialization.  Used for certain vnode and buf locks.
470  */
471 void
472 lockallowshare(struct lock *lk)
473 {
474
475         lockmgr_assert(lk, KA_XLOCKED);
476         lk->lock_object.lo_flags &= ~LK_NOSHARE;
477 }
478
479 void
480 lockdisableshare(struct lock *lk)
481 {
482
483         lockmgr_assert(lk, KA_XLOCKED);
484         lk->lock_object.lo_flags |= LK_NOSHARE;
485 }
486
487 void
488 lockallowrecurse(struct lock *lk)
489 {
490
491         lockmgr_assert(lk, KA_XLOCKED);
492         lk->lock_object.lo_flags |= LO_RECURSABLE;
493 }
494
495 void
496 lockdisablerecurse(struct lock *lk)
497 {
498
499         lockmgr_assert(lk, KA_XLOCKED);
500         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
501 }
502
503 void
504 lockdestroy(struct lock *lk)
505 {
506
507         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
508         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
509         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
510         lock_destroy(&lk->lock_object);
511 }
512
513 static bool __always_inline
514 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
515 {
516
517         /*
518          * If no other thread has an exclusive lock, or
519          * no exclusive waiter is present, bump the count of
520          * sharers.  Since we have to preserve the state of
521          * waiters, if we fail to acquire the shared lock
522          * loop back and retry.
523          */
524         while (LK_CAN_SHARE(*xp, flags, fp)) {
525                 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
526                     *xp + LK_ONE_SHARER)) {
527                         return (true);
528                 }
529         }
530         return (false);
531 }
532
533 static bool __always_inline
534 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
535 {
536
537         for (;;) {
538                 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
539                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
540                             *xp - LK_ONE_SHARER))
541                                 return (true);
542                         continue;
543                 }
544                 break;
545         }
546         return (false);
547 }
548
549 static bool
550 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
551     int flags)
552 {
553         struct thread *owner;
554         uintptr_t x;
555
556         x = *xp;
557         MPASS(x != LK_UNLOCKED);
558         owner = (struct thread *)LK_HOLDER(x);
559         for (;;) {
560                 MPASS(owner != curthread);
561                 if (owner == (struct thread *)LK_KERNPROC)
562                         return (false);
563                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
564                         return (false);
565                 if (owner == NULL)
566                         return (false);
567                 if (!TD_IS_RUNNING(owner))
568                         return (false);
569                 if ((x & LK_ALL_WAITERS) != 0)
570                         return (false);
571                 lock_delay(lda);
572                 x = lockmgr_read_value(lk);
573                 if (LK_CAN_SHARE(x, flags, false)) {
574                         *xp = x;
575                         return (true);
576                 }
577                 owner = (struct thread *)LK_HOLDER(x);
578         }
579 }
580
581 static __noinline int
582 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
583     const char *file, int line, struct lockmgr_wait *lwa)
584 {
585         uintptr_t tid, x;
586         int error = 0;
587         const char *iwmesg;
588         int ipri, itimo;
589
590 #ifdef KDTRACE_HOOKS
591         uint64_t sleep_time = 0;
592 #endif
593 #ifdef LOCK_PROFILING
594         uint64_t waittime = 0;
595         int contested = 0;
596 #endif
597         struct lock_delay_arg lda;
598
599         if (KERNEL_PANICKED())
600                 goto out;
601
602         tid = (uintptr_t)curthread;
603
604         if (LK_CAN_WITNESS(flags))
605                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
606                     file, line, flags & LK_INTERLOCK ? ilk : NULL);
607         lock_delay_arg_init(&lda, &lockmgr_delay);
608         if (!lk_adaptive)
609                 flags &= ~LK_ADAPTIVE;
610         x = lockmgr_read_value(lk);
611         /*
612          * The lock may already be locked exclusive by curthread,
613          * avoid deadlock.
614          */
615         if (LK_HOLDER(x) == tid) {
616                 LOCK_LOG2(lk,
617                     "%s: %p already held in exclusive mode",
618                     __func__, lk);
619                 error = EDEADLK;
620                 goto out;
621         }
622
623         for (;;) {
624                 if (lockmgr_slock_try(lk, &x, flags, false))
625                         break;
626
627                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
628                         if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
629                                 continue;
630                 }
631
632 #ifdef HWPMC_HOOKS
633                 PMC_SOFT_CALL( , , lock, failed);
634 #endif
635                 lock_profile_obtain_lock_failed(&lk->lock_object,
636                     &contested, &waittime);
637
638                 /*
639                  * If the lock is expected to not sleep just give up
640                  * and return.
641                  */
642                 if (LK_TRYOP(flags)) {
643                         LOCK_LOG2(lk, "%s: %p fails the try operation",
644                             __func__, lk);
645                         error = EBUSY;
646                         break;
647                 }
648
649                 /*
650                  * Acquire the sleepqueue chain lock because we
651                  * probabilly will need to manipulate waiters flags.
652                  */
653                 sleepq_lock(&lk->lock_object);
654                 x = lockmgr_read_value(lk);
655 retry_sleepq:
656
657                 /*
658                  * if the lock can be acquired in shared mode, try
659                  * again.
660                  */
661                 if (LK_CAN_SHARE(x, flags, false)) {
662                         sleepq_release(&lk->lock_object);
663                         continue;
664                 }
665
666                 /*
667                  * Try to set the LK_SHARED_WAITERS flag.  If we fail,
668                  * loop back and retry.
669                  */
670                 if ((x & LK_SHARED_WAITERS) == 0) {
671                         if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
672                             x | LK_SHARED_WAITERS)) {
673                                 goto retry_sleepq;
674                         }
675                         LOCK_LOG2(lk, "%s: %p set shared waiters flag",
676                             __func__, lk);
677                 }
678
679                 if (lwa == NULL) {
680                         iwmesg = lk->lock_object.lo_name;
681                         ipri = lk->lk_pri;
682                         itimo = lk->lk_timo;
683                 } else {
684                         iwmesg = lwa->iwmesg;
685                         ipri = lwa->ipri;
686                         itimo = lwa->itimo;
687                 }
688
689                 /*
690                  * As far as we have been unable to acquire the
691                  * shared lock and the shared waiters flag is set,
692                  * we will sleep.
693                  */
694 #ifdef KDTRACE_HOOKS
695                 sleep_time -= lockstat_nsecs(&lk->lock_object);
696 #endif
697                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
698                     SQ_SHARED_QUEUE);
699 #ifdef KDTRACE_HOOKS
700                 sleep_time += lockstat_nsecs(&lk->lock_object);
701 #endif
702                 flags &= ~LK_INTERLOCK;
703                 if (error) {
704                         LOCK_LOG3(lk,
705                             "%s: interrupted sleep for %p with %d",
706                             __func__, lk, error);
707                         break;
708                 }
709                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
710                     __func__, lk);
711                 x = lockmgr_read_value(lk);
712         }
713         if (error == 0) {
714 #ifdef KDTRACE_HOOKS
715                 if (sleep_time != 0)
716                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
717                             LOCKSTAT_READER, (x & LK_SHARE) == 0,
718                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
719 #endif
720 #ifdef LOCK_PROFILING
721                 lockmgr_note_shared_acquire(lk, contested, waittime,
722                     file, line, flags);
723 #else
724                 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
725                     flags);
726 #endif
727         }
728
729 out:
730         lockmgr_exit(flags, ilk, 0);
731         return (error);
732 }
733
734 static bool
735 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
736 {
737         struct thread *owner;
738         uintptr_t x;
739
740         x = *xp;
741         MPASS(x != LK_UNLOCKED);
742         owner = (struct thread *)LK_HOLDER(x);
743         for (;;) {
744                 MPASS(owner != curthread);
745                 if (owner == NULL)
746                         return (false);
747                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
748                         return (false);
749                 if (owner == (struct thread *)LK_KERNPROC)
750                         return (false);
751                 if (!TD_IS_RUNNING(owner))
752                         return (false);
753                 if ((x & LK_ALL_WAITERS) != 0)
754                         return (false);
755                 lock_delay(lda);
756                 x = lockmgr_read_value(lk);
757                 if (x == LK_UNLOCKED) {
758                         *xp = x;
759                         return (true);
760                 }
761                 owner = (struct thread *)LK_HOLDER(x);
762         }
763 }
764
765 static __noinline int
766 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
767     const char *file, int line, struct lockmgr_wait *lwa)
768 {
769         struct lock_class *class;
770         uintptr_t tid, x, v;
771         int error = 0;
772         const char *iwmesg;
773         int ipri, itimo;
774
775 #ifdef KDTRACE_HOOKS
776         uint64_t sleep_time = 0;
777 #endif
778 #ifdef LOCK_PROFILING
779         uint64_t waittime = 0;
780         int contested = 0;
781 #endif
782         struct lock_delay_arg lda;
783
784         if (KERNEL_PANICKED())
785                 goto out;
786
787         tid = (uintptr_t)curthread;
788
789         if (LK_CAN_WITNESS(flags))
790                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
791                     LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
792                     ilk : NULL);
793
794         /*
795          * If curthread already holds the lock and this one is
796          * allowed to recurse, simply recurse on it.
797          */
798         if (lockmgr_xlocked(lk)) {
799                 if ((flags & LK_CANRECURSE) == 0 &&
800                     (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
801                         /*
802                          * If the lock is expected to not panic just
803                          * give up and return.
804                          */
805                         if (LK_TRYOP(flags)) {
806                                 LOCK_LOG2(lk,
807                                     "%s: %p fails the try operation",
808                                     __func__, lk);
809                                 error = EBUSY;
810                                 goto out;
811                         }
812                         if (flags & LK_INTERLOCK) {
813                                 class = LOCK_CLASS(ilk);
814                                 class->lc_unlock(ilk);
815                         }
816                         STACK_PRINT(lk);
817                         panic("%s: recursing on non recursive lockmgr %p "
818                             "@ %s:%d\n", __func__, lk, file, line);
819                 }
820                 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
821                 lk->lk_recurse++;
822                 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
823                 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
824                     lk->lk_recurse, file, line);
825                 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
826                     LK_TRYWIT(flags), file, line);
827                 TD_LOCKS_INC(curthread);
828                 goto out;
829         }
830
831         x = LK_UNLOCKED;
832         lock_delay_arg_init(&lda, &lockmgr_delay);
833         if (!lk_adaptive)
834                 flags &= ~LK_ADAPTIVE;
835         for (;;) {
836                 if (x == LK_UNLOCKED) {
837                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
838                                 break;
839                         continue;
840                 }
841                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
842                         if (lockmgr_xlock_adaptive(&lda, lk, &x))
843                                 continue;
844                 }
845 #ifdef HWPMC_HOOKS
846                 PMC_SOFT_CALL( , , lock, failed);
847 #endif
848                 lock_profile_obtain_lock_failed(&lk->lock_object,
849                     &contested, &waittime);
850
851                 /*
852                  * If the lock is expected to not sleep just give up
853                  * and return.
854                  */
855                 if (LK_TRYOP(flags)) {
856                         LOCK_LOG2(lk, "%s: %p fails the try operation",
857                             __func__, lk);
858                         error = EBUSY;
859                         break;
860                 }
861
862                 /*
863                  * Acquire the sleepqueue chain lock because we
864                  * probabilly will need to manipulate waiters flags.
865                  */
866                 sleepq_lock(&lk->lock_object);
867                 x = lockmgr_read_value(lk);
868 retry_sleepq:
869
870                 /*
871                  * if the lock has been released while we spun on
872                  * the sleepqueue chain lock just try again.
873                  */
874                 if (x == LK_UNLOCKED) {
875                         sleepq_release(&lk->lock_object);
876                         continue;
877                 }
878
879                 /*
880                  * The lock can be in the state where there is a
881                  * pending queue of waiters, but still no owner.
882                  * This happens when the lock is contested and an
883                  * owner is going to claim the lock.
884                  * If curthread is the one successfully acquiring it
885                  * claim lock ownership and return, preserving waiters
886                  * flags.
887                  */
888                 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
889                 if ((x & ~v) == LK_UNLOCKED) {
890                         v &= ~LK_EXCLUSIVE_SPINNERS;
891                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
892                             tid | v)) {
893                                 sleepq_release(&lk->lock_object);
894                                 LOCK_LOG2(lk,
895                                     "%s: %p claimed by a new writer",
896                                     __func__, lk);
897                                 break;
898                         }
899                         goto retry_sleepq;
900                 }
901
902                 /*
903                  * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
904                  * fail, loop back and retry.
905                  */
906                 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
907                         if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
908                             x | LK_EXCLUSIVE_WAITERS)) {
909                                 goto retry_sleepq;
910                         }
911                         LOCK_LOG2(lk, "%s: %p set excl waiters flag",
912                             __func__, lk);
913                 }
914
915                 if (lwa == NULL) {
916                         iwmesg = lk->lock_object.lo_name;
917                         ipri = lk->lk_pri;
918                         itimo = lk->lk_timo;
919                 } else {
920                         iwmesg = lwa->iwmesg;
921                         ipri = lwa->ipri;
922                         itimo = lwa->itimo;
923                 }
924
925                 /*
926                  * As far as we have been unable to acquire the
927                  * exclusive lock and the exclusive waiters flag
928                  * is set, we will sleep.
929                  */
930 #ifdef KDTRACE_HOOKS
931                 sleep_time -= lockstat_nsecs(&lk->lock_object);
932 #endif
933                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
934                     SQ_EXCLUSIVE_QUEUE);
935 #ifdef KDTRACE_HOOKS
936                 sleep_time += lockstat_nsecs(&lk->lock_object);
937 #endif
938                 flags &= ~LK_INTERLOCK;
939                 if (error) {
940                         LOCK_LOG3(lk,
941                             "%s: interrupted sleep for %p with %d",
942                             __func__, lk, error);
943                         break;
944                 }
945                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
946                     __func__, lk);
947                 x = lockmgr_read_value(lk);
948         }
949         if (error == 0) {
950 #ifdef KDTRACE_HOOKS
951                 if (sleep_time != 0)
952                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
953                             LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
954                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
955 #endif
956 #ifdef LOCK_PROFILING
957                 lockmgr_note_exclusive_acquire(lk, contested, waittime,
958                     file, line, flags);
959 #else
960                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
961                     flags);
962 #endif
963         }
964
965 out:
966         lockmgr_exit(flags, ilk, 0);
967         return (error);
968 }
969
970 static __noinline int
971 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
972     const char *file, int line, struct lockmgr_wait *lwa)
973 {
974         uintptr_t tid, v, setv;
975         int error = 0;
976         int op;
977
978         if (KERNEL_PANICKED())
979                 goto out;
980
981         tid = (uintptr_t)curthread;
982
983         _lockmgr_assert(lk, KA_SLOCKED, file, line);
984
985         op = flags & LK_TYPE_MASK;
986         v = lockmgr_read_value(lk);
987         for (;;) {
988                 if (LK_SHARERS_LOCK(v) > 1) {
989                         if (op == LK_TRYUPGRADE) {
990                                 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
991                                     __func__, lk);
992                                 error = EBUSY;
993                                 goto out;
994                         }
995                         if (lockmgr_sunlock_try(lk, &v)) {
996                                 lockmgr_note_shared_release(lk, file, line);
997                                 goto out_xlock;
998                         }
999                 }
1000                 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1001
1002                 setv = tid;
1003                 setv |= (v & LK_ALL_WAITERS);
1004
1005                 /*
1006                  * Try to switch from one shared lock to an exclusive one.
1007                  * We need to preserve waiters flags during the operation.
1008                  */
1009                 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1010                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1011                             line);
1012                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1013                             LK_TRYWIT(flags), file, line);
1014                         LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1015                         TD_SLOCKS_DEC(curthread);
1016                         goto out;
1017                 }
1018         }
1019
1020 out_xlock:
1021         error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1022         flags &= ~LK_INTERLOCK;
1023 out:
1024         lockmgr_exit(flags, ilk, 0);
1025         return (error);
1026 }
1027
1028 int
1029 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1030     const char *file, int line)
1031 {
1032         struct lock_class *class;
1033         uintptr_t x, tid;
1034         u_int op;
1035         bool locked;
1036
1037         if (KERNEL_PANICKED())
1038                 return (0);
1039
1040         op = flags & LK_TYPE_MASK;
1041         locked = false;
1042         switch (op) {
1043         case LK_SHARED:
1044                 if (LK_CAN_WITNESS(flags))
1045                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1046                             file, line, flags & LK_INTERLOCK ? ilk : NULL);
1047                 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1048                         break;
1049                 x = lockmgr_read_value(lk);
1050                 if (lockmgr_slock_try(lk, &x, flags, true)) {
1051                         lockmgr_note_shared_acquire(lk, 0, 0,
1052                             file, line, flags);
1053                         locked = true;
1054                 } else {
1055                         return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1056                             NULL));
1057                 }
1058                 break;
1059         case LK_EXCLUSIVE:
1060                 if (LK_CAN_WITNESS(flags))
1061                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1062                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1063                             ilk : NULL);
1064                 tid = (uintptr_t)curthread;
1065                 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1066                     atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1067                         lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1068                             flags);
1069                         locked = true;
1070                 } else {
1071                         return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1072                             NULL));
1073                 }
1074                 break;
1075         case LK_UPGRADE:
1076         case LK_TRYUPGRADE:
1077                 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1078         default:
1079                 break;
1080         }
1081         if (__predict_true(locked)) {
1082                 if (__predict_false(flags & LK_INTERLOCK)) {
1083                         class = LOCK_CLASS(ilk);
1084                         class->lc_unlock(ilk);
1085                 }
1086                 return (0);
1087         } else {
1088                 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1089                     LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1090         }
1091 }
1092
1093 static __noinline int
1094 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1095     const char *file, int line)
1096
1097 {
1098         int wakeup_swapper = 0;
1099
1100         if (KERNEL_PANICKED())
1101                 goto out;
1102
1103         wakeup_swapper = wakeupshlk(lk, file, line);
1104
1105 out:
1106         lockmgr_exit(flags, ilk, wakeup_swapper);
1107         return (0);
1108 }
1109
1110 static __noinline int
1111 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1112     const char *file, int line)
1113 {
1114         uintptr_t tid, v;
1115         int wakeup_swapper = 0;
1116         u_int realexslp;
1117         int queue;
1118
1119         if (KERNEL_PANICKED())
1120                 goto out;
1121
1122         tid = (uintptr_t)curthread;
1123
1124         /*
1125          * As first option, treact the lock as if it has not
1126          * any waiter.
1127          * Fix-up the tid var if the lock has been disowned.
1128          */
1129         if (LK_HOLDER(x) == LK_KERNPROC)
1130                 tid = LK_KERNPROC;
1131
1132         /*
1133          * The lock is held in exclusive mode.
1134          * If the lock is recursed also, then unrecurse it.
1135          */
1136         if (lockmgr_recursed_v(x)) {
1137                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1138                 lk->lk_recurse--;
1139                 if (lk->lk_recurse == 0)
1140                         atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1141                 goto out;
1142         }
1143         if (tid != LK_KERNPROC)
1144                 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1145                     LOCKSTAT_WRITER);
1146
1147         if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1148                 goto out;
1149
1150         sleepq_lock(&lk->lock_object);
1151         x = lockmgr_read_value(lk);
1152         v = LK_UNLOCKED;
1153
1154         /*
1155          * If the lock has exclusive waiters, give them
1156          * preference in order to avoid deadlock with
1157          * shared runners up.
1158          * If interruptible sleeps left the exclusive queue
1159          * empty avoid a starvation for the threads sleeping
1160          * on the shared queue by giving them precedence
1161          * and cleaning up the exclusive waiters bit anyway.
1162          * Please note that lk_exslpfail count may be lying
1163          * about the real number of waiters with the
1164          * LK_SLEEPFAIL flag on because they may be used in
1165          * conjunction with interruptible sleeps so
1166          * lk_exslpfail might be considered an 'upper limit'
1167          * bound, including the edge cases.
1168          */
1169         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1170         realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1171         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1172                 if (lk->lk_exslpfail < realexslp) {
1173                         lk->lk_exslpfail = 0;
1174                         queue = SQ_EXCLUSIVE_QUEUE;
1175                         v |= (x & LK_SHARED_WAITERS);
1176                 } else {
1177                         lk->lk_exslpfail = 0;
1178                         LOCK_LOG2(lk,
1179                             "%s: %p has only LK_SLEEPFAIL sleepers",
1180                             __func__, lk);
1181                         LOCK_LOG2(lk,
1182                             "%s: %p waking up threads on the exclusive queue",
1183                             __func__, lk);
1184                         wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1185                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1186                         queue = SQ_SHARED_QUEUE;
1187                 }
1188         } else {
1189
1190                 /*
1191                  * Exclusive waiters sleeping with LK_SLEEPFAIL
1192                  * on and using interruptible sleeps/timeout
1193                  * may have left spourious lk_exslpfail counts
1194                  * on, so clean it up anyway.
1195                  */
1196                 lk->lk_exslpfail = 0;
1197                 queue = SQ_SHARED_QUEUE;
1198         }
1199
1200         LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1201             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1202             "exclusive");
1203         atomic_store_rel_ptr(&lk->lk_lock, v);
1204         wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1205         sleepq_release(&lk->lock_object);
1206
1207 out:
1208         lockmgr_exit(flags, ilk, wakeup_swapper);
1209         return (0);
1210 }
1211
1212 /*
1213  * Lightweight entry points for common operations.
1214  *
1215  * Functionality is similar to sx locks, in that none of the additional lockmgr
1216  * features are supported. To be clear, these are NOT supported:
1217  * 1. shared locking disablement
1218  * 2. returning with an error after sleep
1219  * 3. unlocking the interlock
1220  *
1221  * If in doubt, use lockmgr_lock_flags.
1222  */
1223 int
1224 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1225 {
1226         uintptr_t x;
1227
1228         MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1229         MPASS((flags & LK_INTERLOCK) == 0);
1230         MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1231
1232         if (LK_CAN_WITNESS(flags))
1233                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1234                     file, line, NULL);
1235         x = lockmgr_read_value(lk);
1236         if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1237                 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1238                 return (0);
1239         }
1240
1241         return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1242 }
1243
1244 int
1245 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1246 {
1247         uintptr_t tid;
1248
1249         MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1250         MPASS((flags & LK_INTERLOCK) == 0);
1251
1252         if (LK_CAN_WITNESS(flags))
1253                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1254                     LOP_EXCLUSIVE, file, line, NULL);
1255         tid = (uintptr_t)curthread;
1256         if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1257                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1258                     flags);
1259                 return (0);
1260         }
1261
1262         return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1263 }
1264
1265 int
1266 lockmgr_unlock(struct lock *lk)
1267 {
1268         uintptr_t x, tid;
1269         const char *file;
1270         int line;
1271
1272         file = __FILE__;
1273         line = __LINE__;
1274
1275         _lockmgr_assert(lk, KA_LOCKED, file, line);
1276         x = lockmgr_read_value(lk);
1277         if (__predict_true(x & LK_SHARE) != 0) {
1278                 lockmgr_note_shared_release(lk, file, line);
1279                 if (lockmgr_sunlock_try(lk, &x)) {
1280                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1281                 } else {
1282                         return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1283                 }
1284         } else {
1285                 tid = (uintptr_t)curthread;
1286                 lockmgr_note_exclusive_release(lk, file, line);
1287                 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1288                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1289                 } else {
1290                         return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1291                 }
1292         }
1293         return (0);
1294 }
1295
1296 int
1297 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1298     const char *wmesg, int pri, int timo, const char *file, int line)
1299 {
1300         GIANT_DECLARE;
1301         struct lockmgr_wait lwa;
1302         struct lock_class *class;
1303         const char *iwmesg;
1304         uintptr_t tid, v, x;
1305         u_int op, realexslp;
1306         int error, ipri, itimo, queue, wakeup_swapper;
1307 #ifdef LOCK_PROFILING
1308         uint64_t waittime = 0;
1309         int contested = 0;
1310 #endif
1311
1312         if (KERNEL_PANICKED())
1313                 return (0);
1314
1315         error = 0;
1316         tid = (uintptr_t)curthread;
1317         op = (flags & LK_TYPE_MASK);
1318         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1319         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1320         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1321
1322         lwa.iwmesg = iwmesg;
1323         lwa.ipri = ipri;
1324         lwa.itimo = itimo;
1325
1326         MPASS((flags & ~LK_TOTAL_MASK) == 0);
1327         KASSERT((op & (op - 1)) == 0,
1328             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1329         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1330             (op != LK_DOWNGRADE && op != LK_RELEASE),
1331             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1332             __func__, file, line));
1333         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1334             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1335             __func__, file, line));
1336         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1337             ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1338             lk->lock_object.lo_name, file, line));
1339
1340         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1341
1342         if (lk->lock_object.lo_flags & LK_NOSHARE) {
1343                 switch (op) {
1344                 case LK_SHARED:
1345                         op = LK_EXCLUSIVE;
1346                         break;
1347                 case LK_UPGRADE:
1348                 case LK_TRYUPGRADE:
1349                 case LK_DOWNGRADE:
1350                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1351                             file, line);
1352                         if (flags & LK_INTERLOCK)
1353                                 class->lc_unlock(ilk);
1354                         return (0);
1355                 }
1356         }
1357
1358         wakeup_swapper = 0;
1359         switch (op) {
1360         case LK_SHARED:
1361                 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1362                 break;
1363         case LK_UPGRADE:
1364         case LK_TRYUPGRADE:
1365                 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1366                 break;
1367         case LK_EXCLUSIVE:
1368                 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1369                 break;
1370         case LK_DOWNGRADE:
1371                 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1372                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1373
1374                 /*
1375                  * Panic if the lock is recursed.
1376                  */
1377                 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1378                         if (flags & LK_INTERLOCK)
1379                                 class->lc_unlock(ilk);
1380                         panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1381                             __func__, iwmesg, file, line);
1382                 }
1383                 TD_SLOCKS_INC(curthread);
1384
1385                 /*
1386                  * In order to preserve waiters flags, just spin.
1387                  */
1388                 for (;;) {
1389                         x = lockmgr_read_value(lk);
1390                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1391                         x &= LK_ALL_WAITERS;
1392                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1393                             LK_SHARERS_LOCK(1) | x))
1394                                 break;
1395                         cpu_spinwait();
1396                 }
1397                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1398                 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1399                 break;
1400         case LK_RELEASE:
1401                 _lockmgr_assert(lk, KA_LOCKED, file, line);
1402                 x = lockmgr_read_value(lk);
1403
1404                 if (__predict_true(x & LK_SHARE) != 0) {
1405                         lockmgr_note_shared_release(lk, file, line);
1406                         return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1407                 } else {
1408                         lockmgr_note_exclusive_release(lk, file, line);
1409                         return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1410                 }
1411                 break;
1412         case LK_DRAIN:
1413                 if (LK_CAN_WITNESS(flags))
1414                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1415                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1416                             ilk : NULL);
1417
1418                 /*
1419                  * Trying to drain a lock we already own will result in a
1420                  * deadlock.
1421                  */
1422                 if (lockmgr_xlocked(lk)) {
1423                         if (flags & LK_INTERLOCK)
1424                                 class->lc_unlock(ilk);
1425                         panic("%s: draining %s with the lock held @ %s:%d\n",
1426                             __func__, iwmesg, file, line);
1427                 }
1428
1429                 for (;;) {
1430                         if (lk->lk_lock == LK_UNLOCKED &&
1431                             atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1432                                 break;
1433
1434 #ifdef HWPMC_HOOKS
1435                         PMC_SOFT_CALL( , , lock, failed);
1436 #endif
1437                         lock_profile_obtain_lock_failed(&lk->lock_object,
1438                             &contested, &waittime);
1439
1440                         /*
1441                          * If the lock is expected to not sleep just give up
1442                          * and return.
1443                          */
1444                         if (LK_TRYOP(flags)) {
1445                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
1446                                     __func__, lk);
1447                                 error = EBUSY;
1448                                 break;
1449                         }
1450
1451                         /*
1452                          * Acquire the sleepqueue chain lock because we
1453                          * probabilly will need to manipulate waiters flags.
1454                          */
1455                         sleepq_lock(&lk->lock_object);
1456                         x = lockmgr_read_value(lk);
1457
1458                         /*
1459                          * if the lock has been released while we spun on
1460                          * the sleepqueue chain lock just try again.
1461                          */
1462                         if (x == LK_UNLOCKED) {
1463                                 sleepq_release(&lk->lock_object);
1464                                 continue;
1465                         }
1466
1467                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1468                         if ((x & ~v) == LK_UNLOCKED) {
1469                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1470
1471                                 /*
1472                                  * If interruptible sleeps left the exclusive
1473                                  * queue empty avoid a starvation for the
1474                                  * threads sleeping on the shared queue by
1475                                  * giving them precedence and cleaning up the
1476                                  * exclusive waiters bit anyway.
1477                                  * Please note that lk_exslpfail count may be
1478                                  * lying about the real number of waiters with
1479                                  * the LK_SLEEPFAIL flag on because they may
1480                                  * be used in conjunction with interruptible
1481                                  * sleeps so lk_exslpfail might be considered
1482                                  * an 'upper limit' bound, including the edge
1483                                  * cases.
1484                                  */
1485                                 if (v & LK_EXCLUSIVE_WAITERS) {
1486                                         queue = SQ_EXCLUSIVE_QUEUE;
1487                                         v &= ~LK_EXCLUSIVE_WAITERS;
1488                                 } else {
1489
1490                                         /*
1491                                          * Exclusive waiters sleeping with
1492                                          * LK_SLEEPFAIL on and using
1493                                          * interruptible sleeps/timeout may
1494                                          * have left spourious lk_exslpfail
1495                                          * counts on, so clean it up anyway.
1496                                          */
1497                                         MPASS(v & LK_SHARED_WAITERS);
1498                                         lk->lk_exslpfail = 0;
1499                                         queue = SQ_SHARED_QUEUE;
1500                                         v &= ~LK_SHARED_WAITERS;
1501                                 }
1502                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
1503                                         realexslp =
1504                                             sleepq_sleepcnt(&lk->lock_object,
1505                                             SQ_EXCLUSIVE_QUEUE);
1506                                         if (lk->lk_exslpfail >= realexslp) {
1507                                                 lk->lk_exslpfail = 0;
1508                                                 queue = SQ_SHARED_QUEUE;
1509                                                 v &= ~LK_SHARED_WAITERS;
1510                                                 if (realexslp != 0) {
1511                                                         LOCK_LOG2(lk,
1512                                         "%s: %p has only LK_SLEEPFAIL sleepers",
1513                                                             __func__, lk);
1514                                                         LOCK_LOG2(lk,
1515                         "%s: %p waking up threads on the exclusive queue",
1516                                                             __func__, lk);
1517                                                         wakeup_swapper =
1518                                                             sleepq_broadcast(
1519                                                             &lk->lock_object,
1520                                                             SLEEPQ_LK, 0,
1521                                                             SQ_EXCLUSIVE_QUEUE);
1522                                                 }
1523                                         } else
1524                                                 lk->lk_exslpfail = 0;
1525                                 }
1526                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1527                                         sleepq_release(&lk->lock_object);
1528                                         continue;
1529                                 }
1530                                 LOCK_LOG3(lk,
1531                                 "%s: %p waking up all threads on the %s queue",
1532                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
1533                                     "shared" : "exclusive");
1534                                 wakeup_swapper |= sleepq_broadcast(
1535                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
1536
1537                                 /*
1538                                  * If shared waiters have been woken up we need
1539                                  * to wait for one of them to acquire the lock
1540                                  * before to set the exclusive waiters in
1541                                  * order to avoid a deadlock.
1542                                  */
1543                                 if (queue == SQ_SHARED_QUEUE) {
1544                                         for (v = lk->lk_lock;
1545                                             (v & LK_SHARE) && !LK_SHARERS(v);
1546                                             v = lk->lk_lock)
1547                                                 cpu_spinwait();
1548                                 }
1549                         }
1550
1551                         /*
1552                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1553                          * fail, loop back and retry.
1554                          */
1555                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1556                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1557                                     x | LK_EXCLUSIVE_WAITERS)) {
1558                                         sleepq_release(&lk->lock_object);
1559                                         continue;
1560                                 }
1561                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1562                                     __func__, lk);
1563                         }
1564
1565                         /*
1566                          * As far as we have been unable to acquire the
1567                          * exclusive lock and the exclusive waiters flag
1568                          * is set, we will sleep.
1569                          */
1570                         if (flags & LK_INTERLOCK) {
1571                                 class->lc_unlock(ilk);
1572                                 flags &= ~LK_INTERLOCK;
1573                         }
1574                         GIANT_SAVE();
1575                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1576                             SQ_EXCLUSIVE_QUEUE);
1577                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1578                         GIANT_RESTORE();
1579                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1580                             __func__, lk);
1581                 }
1582
1583                 if (error == 0) {
1584                         lock_profile_obtain_lock_success(&lk->lock_object,
1585                             contested, waittime, file, line);
1586                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1587                             lk->lk_recurse, file, line);
1588                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1589                             LK_TRYWIT(flags), file, line);
1590                         TD_LOCKS_INC(curthread);
1591                         STACK_SAVE(lk);
1592                 }
1593                 break;
1594         default:
1595                 if (flags & LK_INTERLOCK)
1596                         class->lc_unlock(ilk);
1597                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1598         }
1599
1600         if (flags & LK_INTERLOCK)
1601                 class->lc_unlock(ilk);
1602         if (wakeup_swapper)
1603                 kick_proc0();
1604
1605         return (error);
1606 }
1607
1608 void
1609 _lockmgr_disown(struct lock *lk, const char *file, int line)
1610 {
1611         uintptr_t tid, x;
1612
1613         if (SCHEDULER_STOPPED())
1614                 return;
1615
1616         tid = (uintptr_t)curthread;
1617         _lockmgr_assert(lk, KA_XLOCKED, file, line);
1618
1619         /*
1620          * Panic if the lock is recursed.
1621          */
1622         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1623                 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1624                     __func__,  file, line);
1625
1626         /*
1627          * If the owner is already LK_KERNPROC just skip the whole operation.
1628          */
1629         if (LK_HOLDER(lk->lk_lock) != tid)
1630                 return;
1631         lock_profile_release_lock(&lk->lock_object);
1632         LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1633         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1634         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1635         TD_LOCKS_DEC(curthread);
1636         STACK_SAVE(lk);
1637
1638         /*
1639          * In order to preserve waiters flags, just spin.
1640          */
1641         for (;;) {
1642                 x = lockmgr_read_value(lk);
1643                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1644                 x &= LK_ALL_WAITERS;
1645                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1646                     LK_KERNPROC | x))
1647                         return;
1648                 cpu_spinwait();
1649         }
1650 }
1651
1652 void
1653 lockmgr_printinfo(const struct lock *lk)
1654 {
1655         struct thread *td;
1656         uintptr_t x;
1657
1658         if (lk->lk_lock == LK_UNLOCKED)
1659                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1660         else if (lk->lk_lock & LK_SHARE)
1661                 printf("lock type %s: SHARED (count %ju)\n",
1662                     lk->lock_object.lo_name,
1663                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1664         else {
1665                 td = lockmgr_xholder(lk);
1666                 if (td == (struct thread *)LK_KERNPROC)
1667                         printf("lock type %s: EXCL by KERNPROC\n",
1668                             lk->lock_object.lo_name);
1669                 else
1670                         printf("lock type %s: EXCL by thread %p "
1671                             "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1672                             td, td->td_proc->p_pid, td->td_proc->p_comm,
1673                             td->td_tid);
1674         }
1675
1676         x = lk->lk_lock;
1677         if (x & LK_EXCLUSIVE_WAITERS)
1678                 printf(" with exclusive waiters pending\n");
1679         if (x & LK_SHARED_WAITERS)
1680                 printf(" with shared waiters pending\n");
1681         if (x & LK_EXCLUSIVE_SPINNERS)
1682                 printf(" with exclusive spinners pending\n");
1683
1684         STACK_PRINT(lk);
1685 }
1686
1687 int
1688 lockstatus(const struct lock *lk)
1689 {
1690         uintptr_t v, x;
1691         int ret;
1692
1693         ret = LK_SHARED;
1694         x = lockmgr_read_value(lk);
1695         v = LK_HOLDER(x);
1696
1697         if ((x & LK_SHARE) == 0) {
1698                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1699                         ret = LK_EXCLUSIVE;
1700                 else
1701                         ret = LK_EXCLOTHER;
1702         } else if (x == LK_UNLOCKED)
1703                 ret = 0;
1704
1705         return (ret);
1706 }
1707
1708 #ifdef INVARIANT_SUPPORT
1709
1710 FEATURE(invariant_support,
1711     "Support for modules compiled with INVARIANTS option");
1712
1713 #ifndef INVARIANTS
1714 #undef  _lockmgr_assert
1715 #endif
1716
1717 void
1718 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1719 {
1720         int slocked = 0;
1721
1722         if (KERNEL_PANICKED())
1723                 return;
1724         switch (what) {
1725         case KA_SLOCKED:
1726         case KA_SLOCKED | KA_NOTRECURSED:
1727         case KA_SLOCKED | KA_RECURSED:
1728                 slocked = 1;
1729         case KA_LOCKED:
1730         case KA_LOCKED | KA_NOTRECURSED:
1731         case KA_LOCKED | KA_RECURSED:
1732 #ifdef WITNESS
1733
1734                 /*
1735                  * We cannot trust WITNESS if the lock is held in exclusive
1736                  * mode and a call to lockmgr_disown() happened.
1737                  * Workaround this skipping the check if the lock is held in
1738                  * exclusive mode even for the KA_LOCKED case.
1739                  */
1740                 if (slocked || (lk->lk_lock & LK_SHARE)) {
1741                         witness_assert(&lk->lock_object, what, file, line);
1742                         break;
1743                 }
1744 #endif
1745                 if (lk->lk_lock == LK_UNLOCKED ||
1746                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1747                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1748                         panic("Lock %s not %slocked @ %s:%d\n",
1749                             lk->lock_object.lo_name, slocked ? "share" : "",
1750                             file, line);
1751
1752                 if ((lk->lk_lock & LK_SHARE) == 0) {
1753                         if (lockmgr_recursed(lk)) {
1754                                 if (what & KA_NOTRECURSED)
1755                                         panic("Lock %s recursed @ %s:%d\n",
1756                                             lk->lock_object.lo_name, file,
1757                                             line);
1758                         } else if (what & KA_RECURSED)
1759                                 panic("Lock %s not recursed @ %s:%d\n",
1760                                     lk->lock_object.lo_name, file, line);
1761                 }
1762                 break;
1763         case KA_XLOCKED:
1764         case KA_XLOCKED | KA_NOTRECURSED:
1765         case KA_XLOCKED | KA_RECURSED:
1766                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1767                         panic("Lock %s not exclusively locked @ %s:%d\n",
1768                             lk->lock_object.lo_name, file, line);
1769                 if (lockmgr_recursed(lk)) {
1770                         if (what & KA_NOTRECURSED)
1771                                 panic("Lock %s recursed @ %s:%d\n",
1772                                     lk->lock_object.lo_name, file, line);
1773                 } else if (what & KA_RECURSED)
1774                         panic("Lock %s not recursed @ %s:%d\n",
1775                             lk->lock_object.lo_name, file, line);
1776                 break;
1777         case KA_UNLOCKED:
1778                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1779                         panic("Lock %s exclusively locked @ %s:%d\n",
1780                             lk->lock_object.lo_name, file, line);
1781                 break;
1782         default:
1783                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1784                     line);
1785         }
1786 }
1787 #endif
1788
1789 #ifdef DDB
1790 int
1791 lockmgr_chain(struct thread *td, struct thread **ownerp)
1792 {
1793         const struct lock *lk;
1794
1795         lk = td->td_wchan;
1796
1797         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1798                 return (0);
1799         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1800         if (lk->lk_lock & LK_SHARE)
1801                 db_printf("SHARED (count %ju)\n",
1802                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1803         else
1804                 db_printf("EXCL\n");
1805         *ownerp = lockmgr_xholder(lk);
1806
1807         return (1);
1808 }
1809
1810 static void
1811 db_show_lockmgr(const struct lock_object *lock)
1812 {
1813         struct thread *td;
1814         const struct lock *lk;
1815
1816         lk = (const struct lock *)lock;
1817
1818         db_printf(" state: ");
1819         if (lk->lk_lock == LK_UNLOCKED)
1820                 db_printf("UNLOCKED\n");
1821         else if (lk->lk_lock & LK_SHARE)
1822                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1823         else {
1824                 td = lockmgr_xholder(lk);
1825                 if (td == (struct thread *)LK_KERNPROC)
1826                         db_printf("XLOCK: LK_KERNPROC\n");
1827                 else
1828                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1829                             td->td_tid, td->td_proc->p_pid,
1830                             td->td_proc->p_comm);
1831                 if (lockmgr_recursed(lk))
1832                         db_printf(" recursed: %d\n", lk->lk_recurse);
1833         }
1834         db_printf(" waiters: ");
1835         switch (lk->lk_lock & LK_ALL_WAITERS) {
1836         case LK_SHARED_WAITERS:
1837                 db_printf("shared\n");
1838                 break;
1839         case LK_EXCLUSIVE_WAITERS:
1840                 db_printf("exclusive\n");
1841                 break;
1842         case LK_ALL_WAITERS:
1843                 db_printf("shared and exclusive\n");
1844                 break;
1845         default:
1846                 db_printf("none\n");
1847         }
1848         db_printf(" spinners: ");
1849         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1850                 db_printf("exclusive\n");
1851         else
1852                 db_printf("none\n");
1853 }
1854 #endif