]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/kern_lock.c
src.conf.5: Regen after removing MK_NVME
[FreeBSD/FreeBSD.git] / sys / kern / kern_lock.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2008 Attilio Rao <attilio@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice(s), this list of conditions and the following disclaimer as
12  *    the first lines of this file unmodified other than the possible
13  *    addition of one or more copyright notices.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice(s), this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
19  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21  * DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
22  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
24  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
28  * DAMAGE.
29  */
30
31 #include "opt_ddb.h"
32 #include "opt_hwpmc_hooks.h"
33
34 #include <sys/param.h>
35 #include <sys/kdb.h>
36 #include <sys/ktr.h>
37 #include <sys/limits.h>
38 #include <sys/lock.h>
39 #include <sys/lock_profile.h>
40 #include <sys/lockmgr.h>
41 #include <sys/lockstat.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/sleepqueue.h>
45 #ifdef DEBUG_LOCKS
46 #include <sys/stack.h>
47 #endif
48 #include <sys/sysctl.h>
49 #include <sys/systm.h>
50
51 #include <machine/cpu.h>
52
53 #ifdef DDB
54 #include <ddb/ddb.h>
55 #endif
56
57 #ifdef HWPMC_HOOKS
58 #include <sys/pmckern.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
60 #endif
61
62 /*
63  * Hack. There should be prio_t or similar so that this is not necessary.
64  */
65 _Static_assert((PRILASTFLAG * 2) - 1 <= USHRT_MAX,
66     "prio flags wont fit in u_short pri in struct lock");
67
68 CTASSERT(LK_UNLOCKED == (LK_UNLOCKED &
69     ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS)));
70
71 #define SQ_EXCLUSIVE_QUEUE      0
72 #define SQ_SHARED_QUEUE         1
73
74 #ifndef INVARIANTS
75 #define _lockmgr_assert(lk, what, file, line)
76 #endif
77
78 #define TD_SLOCKS_INC(td)       ((td)->td_lk_slocks++)
79 #define TD_SLOCKS_DEC(td)       ((td)->td_lk_slocks--)
80
81 #ifndef DEBUG_LOCKS
82 #define STACK_PRINT(lk)
83 #define STACK_SAVE(lk)
84 #define STACK_ZERO(lk)
85 #else
86 #define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack)
87 #define STACK_SAVE(lk)  stack_save(&(lk)->lk_stack)
88 #define STACK_ZERO(lk)  stack_zero(&(lk)->lk_stack)
89 #endif
90
91 #define LOCK_LOG2(lk, string, arg1, arg2)                               \
92         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
93                 CTR2(KTR_LOCK, (string), (arg1), (arg2))
94 #define LOCK_LOG3(lk, string, arg1, arg2, arg3)                         \
95         if (LOCK_LOG_TEST(&(lk)->lock_object, 0))                       \
96                 CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3))
97
98 #define GIANT_DECLARE                                                   \
99         int _i = 0;                                                     \
100         WITNESS_SAVE_DECL(Giant)
101 #define GIANT_RESTORE() do {                                            \
102         if (__predict_false(_i > 0)) {                                  \
103                 while (_i--)                                            \
104                         mtx_lock(&Giant);                               \
105                 WITNESS_RESTORE(&Giant.lock_object, Giant);             \
106         }                                                               \
107 } while (0)
108 #define GIANT_SAVE() do {                                               \
109         if (__predict_false(mtx_owned(&Giant))) {                       \
110                 WITNESS_SAVE(&Giant.lock_object, Giant);                \
111                 while (mtx_owned(&Giant)) {                             \
112                         _i++;                                           \
113                         mtx_unlock(&Giant);                             \
114                 }                                                       \
115         }                                                               \
116 } while (0)
117
118 static bool __always_inline
119 LK_CAN_SHARE(uintptr_t x, int flags, bool fp)
120 {
121
122         if ((x & (LK_SHARE | LK_EXCLUSIVE_WAITERS | LK_EXCLUSIVE_SPINNERS)) ==
123             LK_SHARE)
124                 return (true);
125         if (fp || (!(x & LK_SHARE)))
126                 return (false);
127         if ((curthread->td_lk_slocks != 0 && !(flags & LK_NODDLKTREAT)) ||
128             (curthread->td_pflags & TDP_DEADLKTREAT))
129                 return (true);
130         return (false);
131 }
132
133 #define LK_TRYOP(x)                                                     \
134         ((x) & LK_NOWAIT)
135
136 #define LK_CAN_WITNESS(x)                                               \
137         (((x) & LK_NOWITNESS) == 0 && !LK_TRYOP(x))
138 #define LK_TRYWIT(x)                                                    \
139         (LK_TRYOP(x) ? LOP_TRYLOCK : 0)
140
141 #define lockmgr_disowned(lk)                                            \
142         (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC)
143
144 #define lockmgr_xlocked_v(v)                                            \
145         (((v) & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread)
146
147 #define lockmgr_xlocked(lk) lockmgr_xlocked_v(lockmgr_read_value(lk))
148
149 static void     assert_lockmgr(const struct lock_object *lock, int how);
150 #ifdef DDB
151 static void     db_show_lockmgr(const struct lock_object *lock);
152 #endif
153 static void     lock_lockmgr(struct lock_object *lock, uintptr_t how);
154 #ifdef KDTRACE_HOOKS
155 static int      owner_lockmgr(const struct lock_object *lock,
156                     struct thread **owner);
157 #endif
158 static uintptr_t unlock_lockmgr(struct lock_object *lock);
159
160 struct lock_class lock_class_lockmgr = {
161         .lc_name = "lockmgr",
162         .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE,
163         .lc_assert = assert_lockmgr,
164 #ifdef DDB
165         .lc_ddb_show = db_show_lockmgr,
166 #endif
167         .lc_lock = lock_lockmgr,
168         .lc_unlock = unlock_lockmgr,
169 #ifdef KDTRACE_HOOKS
170         .lc_owner = owner_lockmgr,
171 #endif
172 };
173
174 static __read_mostly bool lk_adaptive = true;
175 static SYSCTL_NODE(_debug, OID_AUTO, lockmgr, CTLFLAG_RD, NULL, "lockmgr debugging");
176 SYSCTL_BOOL(_debug_lockmgr, OID_AUTO, adaptive_spinning, CTLFLAG_RW, &lk_adaptive,
177     0, "");
178 #define lockmgr_delay  locks_delay
179
180 struct lockmgr_wait {
181         const char *iwmesg;
182         int ipri;
183         int itimo;
184 };
185
186 static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
187     int flags, bool fp);
188 static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp);
189
190 static void
191 lockmgr_exit(u_int flags, struct lock_object *ilk, int wakeup_swapper)
192 {
193         struct lock_class *class;
194
195         if (flags & LK_INTERLOCK) {
196                 class = LOCK_CLASS(ilk);
197                 class->lc_unlock(ilk);
198         }
199
200         if (__predict_false(wakeup_swapper))
201                 kick_proc0();
202 }
203
204 static void
205 lockmgr_note_shared_acquire(struct lock *lk, int contested,
206     uint64_t waittime, const char *file, int line, int flags)
207 {
208
209         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
210             waittime, file, line, LOCKSTAT_READER);
211         LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
212         WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
213         TD_LOCKS_INC(curthread);
214         TD_SLOCKS_INC(curthread);
215         STACK_SAVE(lk);
216 }
217
218 static void
219 lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
220 {
221
222         WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
223         LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
224         TD_LOCKS_DEC(curthread);
225         TD_SLOCKS_DEC(curthread);
226 }
227
228 static void
229 lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
230     uint64_t waittime, const char *file, int line, int flags)
231 {
232
233         LOCKSTAT_PROFILE_OBTAIN_RWLOCK_SUCCESS(lockmgr__acquire, lk, contested,
234             waittime, file, line, LOCKSTAT_WRITER);
235         LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
236         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
237             line);
238         TD_LOCKS_INC(curthread);
239         STACK_SAVE(lk);
240 }
241
242 static void
243 lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
244 {
245
246         if (LK_HOLDER(lockmgr_read_value(lk)) != LK_KERNPROC) {
247                 WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
248                 TD_LOCKS_DEC(curthread);
249         }
250         LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
251             line);
252 }
253
254 static __inline struct thread *
255 lockmgr_xholder(const struct lock *lk)
256 {
257         uintptr_t x;
258
259         x = lockmgr_read_value(lk);
260         return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x));
261 }
262
263 /*
264  * It assumes sleepq_lock held and returns with this one unheld.
265  * It also assumes the generic interlock is sane and previously checked.
266  * If LK_INTERLOCK is specified the interlock is not reacquired after the
267  * sleep.
268  */
269 static __inline int
270 sleeplk(struct lock *lk, u_int flags, struct lock_object *ilk,
271     const char *wmesg, int pri, int timo, int queue)
272 {
273         GIANT_DECLARE;
274         struct lock_class *class;
275         int catch, error;
276
277         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
278         catch = pri & PCATCH;
279         pri &= PRIMASK;
280         error = 0;
281
282         LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk,
283             (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
284
285         if (flags & LK_INTERLOCK)
286                 class->lc_unlock(ilk);
287         if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
288                 if (lk->lk_exslpfail < USHRT_MAX)
289                         lk->lk_exslpfail++;
290         }
291         GIANT_SAVE();
292         sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ?
293             SLEEPQ_INTERRUPTIBLE : 0), queue);
294         if ((flags & LK_TIMELOCK) && timo)
295                 sleepq_set_timeout(&lk->lock_object, timo);
296
297         /*
298          * Decisional switch for real sleeping.
299          */
300         if ((flags & LK_TIMELOCK) && timo && catch)
301                 error = sleepq_timedwait_sig(&lk->lock_object, pri);
302         else if ((flags & LK_TIMELOCK) && timo)
303                 error = sleepq_timedwait(&lk->lock_object, pri);
304         else if (catch)
305                 error = sleepq_wait_sig(&lk->lock_object, pri);
306         else
307                 sleepq_wait(&lk->lock_object, pri);
308         GIANT_RESTORE();
309         if ((flags & LK_SLEEPFAIL) && error == 0)
310                 error = ENOLCK;
311
312         return (error);
313 }
314
315 static __inline int
316 wakeupshlk(struct lock *lk, const char *file, int line)
317 {
318         uintptr_t v, x, orig_x;
319         u_int realexslp;
320         int queue, wakeup_swapper;
321
322         wakeup_swapper = 0;
323         for (;;) {
324                 x = lockmgr_read_value(lk);
325                 if (lockmgr_sunlock_try(lk, &x))
326                         break;
327
328                 /*
329                  * We should have a sharer with waiters, so enter the hard
330                  * path in order to handle wakeups correctly.
331                  */
332                 sleepq_lock(&lk->lock_object);
333                 orig_x = lockmgr_read_value(lk);
334 retry_sleepq:
335                 x = orig_x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
336                 v = LK_UNLOCKED;
337
338                 /*
339                  * If the lock has exclusive waiters, give them preference in
340                  * order to avoid deadlock with shared runners up.
341                  * If interruptible sleeps left the exclusive queue empty
342                  * avoid a starvation for the threads sleeping on the shared
343                  * queue by giving them precedence and cleaning up the
344                  * exclusive waiters bit anyway.
345                  * Please note that lk_exslpfail count may be lying about
346                  * the real number of waiters with the LK_SLEEPFAIL flag on
347                  * because they may be used in conjunction with interruptible
348                  * sleeps so lk_exslpfail might be considered an 'upper limit'
349                  * bound, including the edge cases.
350                  */
351                 realexslp = sleepq_sleepcnt(&lk->lock_object,
352                     SQ_EXCLUSIVE_QUEUE);
353                 if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
354                         if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
355                                 lk->lk_exslpfail = 0;
356                                 queue = SQ_EXCLUSIVE_QUEUE;
357                                 v |= (x & LK_SHARED_WAITERS);
358                         } else {
359                                 lk->lk_exslpfail = 0;
360                                 LOCK_LOG2(lk,
361                                     "%s: %p has only LK_SLEEPFAIL sleepers",
362                                     __func__, lk);
363                                 LOCK_LOG2(lk,
364                             "%s: %p waking up threads on the exclusive queue",
365                                     __func__, lk);
366                                 wakeup_swapper =
367                                     sleepq_broadcast(&lk->lock_object,
368                                     SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
369                                 queue = SQ_SHARED_QUEUE;
370                         }
371                 } else {
372                         /*
373                          * Exclusive waiters sleeping with LK_SLEEPFAIL on
374                          * and using interruptible sleeps/timeout may have
375                          * left spourious lk_exslpfail counts on, so clean
376                          * it up anyway.
377                          */
378                         lk->lk_exslpfail = 0;
379                         queue = SQ_SHARED_QUEUE;
380                 }
381
382                 if (lockmgr_sunlock_try(lk, &orig_x)) {
383                         sleepq_release(&lk->lock_object);
384                         break;
385                 }
386
387                 x |= LK_SHARERS_LOCK(1);
388                 if (!atomic_fcmpset_rel_ptr(&lk->lk_lock, &x, v)) {
389                         orig_x = x;
390                         goto retry_sleepq;
391                 }
392                 LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
393                     __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
394                     "exclusive");
395                 wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK,
396                     0, queue);
397                 sleepq_release(&lk->lock_object);
398                 break;
399         }
400
401         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
402         return (wakeup_swapper);
403 }
404
405 static void
406 assert_lockmgr(const struct lock_object *lock, int what)
407 {
408
409         panic("lockmgr locks do not support assertions");
410 }
411
412 static void
413 lock_lockmgr(struct lock_object *lock, uintptr_t how)
414 {
415
416         panic("lockmgr locks do not support sleep interlocking");
417 }
418
419 static uintptr_t
420 unlock_lockmgr(struct lock_object *lock)
421 {
422
423         panic("lockmgr locks do not support sleep interlocking");
424 }
425
426 #ifdef KDTRACE_HOOKS
427 static int
428 owner_lockmgr(const struct lock_object *lock, struct thread **owner)
429 {
430
431         panic("lockmgr locks do not support owner inquiring");
432 }
433 #endif
434
435 void
436 lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
437 {
438         int iflags;
439
440         MPASS((flags & ~LK_INIT_MASK) == 0);
441         ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
442             ("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
443             &lk->lk_lock));
444
445         iflags = LO_SLEEPABLE | LO_UPGRADABLE;
446         if (flags & LK_CANRECURSE)
447                 iflags |= LO_RECURSABLE;
448         if ((flags & LK_NODUP) == 0)
449                 iflags |= LO_DUPOK;
450         if (flags & LK_NOPROFILE)
451                 iflags |= LO_NOPROFILE;
452         if ((flags & LK_NOWITNESS) == 0)
453                 iflags |= LO_WITNESS;
454         if (flags & LK_QUIET)
455                 iflags |= LO_QUIET;
456         if (flags & LK_IS_VNODE)
457                 iflags |= LO_IS_VNODE;
458         if (flags & LK_NEW)
459                 iflags |= LO_NEW;
460         iflags |= flags & LK_NOSHARE;
461
462         lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags);
463         lk->lk_lock = LK_UNLOCKED;
464         lk->lk_recurse = 0;
465         lk->lk_exslpfail = 0;
466         lk->lk_timo = timo;
467         lk->lk_pri = pri;
468         STACK_ZERO(lk);
469 }
470
471 /*
472  * XXX: Gross hacks to manipulate external lock flags after
473  * initialization.  Used for certain vnode and buf locks.
474  */
475 void
476 lockallowshare(struct lock *lk)
477 {
478
479         lockmgr_assert(lk, KA_XLOCKED);
480         lk->lock_object.lo_flags &= ~LK_NOSHARE;
481 }
482
483 void
484 lockdisableshare(struct lock *lk)
485 {
486
487         lockmgr_assert(lk, KA_XLOCKED);
488         lk->lock_object.lo_flags |= LK_NOSHARE;
489 }
490
491 void
492 lockallowrecurse(struct lock *lk)
493 {
494
495         lockmgr_assert(lk, KA_XLOCKED);
496         lk->lock_object.lo_flags |= LO_RECURSABLE;
497 }
498
499 void
500 lockdisablerecurse(struct lock *lk)
501 {
502
503         lockmgr_assert(lk, KA_XLOCKED);
504         lk->lock_object.lo_flags &= ~LO_RECURSABLE;
505 }
506
507 void
508 lockdestroy(struct lock *lk)
509 {
510
511         KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held"));
512         KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed"));
513         KASSERT(lk->lk_exslpfail == 0, ("lockmgr still exclusive waiters"));
514         lock_destroy(&lk->lock_object);
515 }
516
517 static bool __always_inline
518 lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags, bool fp)
519 {
520
521         /*
522          * If no other thread has an exclusive lock, or
523          * no exclusive waiter is present, bump the count of
524          * sharers.  Since we have to preserve the state of
525          * waiters, if we fail to acquire the shared lock
526          * loop back and retry.
527          */
528         while (LK_CAN_SHARE(*xp, flags, fp)) {
529                 if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
530                     *xp + LK_ONE_SHARER)) {
531                         return (true);
532                 }
533         }
534         return (false);
535 }
536
537 static bool __always_inline
538 lockmgr_sunlock_try(struct lock *lk, uintptr_t *xp)
539 {
540
541         for (;;) {
542                 if (LK_SHARERS(*xp) > 1 || !(*xp & LK_ALL_WAITERS)) {
543                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, xp,
544                             *xp - LK_ONE_SHARER))
545                                 return (true);
546                         continue;
547                 }
548                 break;
549         }
550         return (false);
551 }
552
553 static bool
554 lockmgr_slock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp,
555     int flags)
556 {
557         struct thread *owner;
558         uintptr_t x;
559
560         x = *xp;
561         MPASS(x != LK_UNLOCKED);
562         owner = (struct thread *)LK_HOLDER(x);
563         for (;;) {
564                 MPASS(owner != curthread);
565                 if (owner == (struct thread *)LK_KERNPROC)
566                         return (false);
567                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
568                         return (false);
569                 if (owner == NULL)
570                         return (false);
571                 if (!TD_IS_RUNNING(owner))
572                         return (false);
573                 if ((x & LK_ALL_WAITERS) != 0)
574                         return (false);
575                 lock_delay(lda);
576                 x = lockmgr_read_value(lk);
577                 if (LK_CAN_SHARE(x, flags, false)) {
578                         *xp = x;
579                         return (true);
580                 }
581                 owner = (struct thread *)LK_HOLDER(x);
582         }
583 }
584
585 static __noinline int
586 lockmgr_slock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
587     const char *file, int line, struct lockmgr_wait *lwa)
588 {
589         uintptr_t tid, x;
590         int error = 0;
591         const char *iwmesg;
592         int ipri, itimo;
593
594 #ifdef KDTRACE_HOOKS
595         uint64_t sleep_time = 0;
596 #endif
597 #ifdef LOCK_PROFILING
598         uint64_t waittime = 0;
599         int contested = 0;
600 #endif
601         struct lock_delay_arg lda;
602
603         if (SCHEDULER_STOPPED())
604                 goto out;
605
606         tid = (uintptr_t)curthread;
607
608         if (LK_CAN_WITNESS(flags))
609                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
610                     file, line, flags & LK_INTERLOCK ? ilk : NULL);
611         x = lockmgr_read_value(lk);
612         lock_delay_arg_init(&lda, &lockmgr_delay);
613         if (!lk_adaptive)
614                 flags &= ~LK_ADAPTIVE;
615         /*
616          * The lock may already be locked exclusive by curthread,
617          * avoid deadlock.
618          */
619         if (LK_HOLDER(x) == tid) {
620                 LOCK_LOG2(lk,
621                     "%s: %p already held in exclusive mode",
622                     __func__, lk);
623                 error = EDEADLK;
624                 goto out;
625         }
626
627         for (;;) {
628                 if (lockmgr_slock_try(lk, &x, flags, false))
629                         break;
630
631                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
632                     &contested, &waittime);
633
634                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
635                         if (lockmgr_slock_adaptive(&lda, lk, &x, flags))
636                                 continue;
637                 }
638
639 #ifdef HWPMC_HOOKS
640                 PMC_SOFT_CALL( , , lock, failed);
641 #endif
642
643                 /*
644                  * If the lock is expected to not sleep just give up
645                  * and return.
646                  */
647                 if (LK_TRYOP(flags)) {
648                         LOCK_LOG2(lk, "%s: %p fails the try operation",
649                             __func__, lk);
650                         error = EBUSY;
651                         break;
652                 }
653
654                 /*
655                  * Acquire the sleepqueue chain lock because we
656                  * probabilly will need to manipulate waiters flags.
657                  */
658                 sleepq_lock(&lk->lock_object);
659                 x = lockmgr_read_value(lk);
660 retry_sleepq:
661
662                 /*
663                  * if the lock can be acquired in shared mode, try
664                  * again.
665                  */
666                 if (LK_CAN_SHARE(x, flags, false)) {
667                         sleepq_release(&lk->lock_object);
668                         continue;
669                 }
670
671                 /*
672                  * Try to set the LK_SHARED_WAITERS flag.  If we fail,
673                  * loop back and retry.
674                  */
675                 if ((x & LK_SHARED_WAITERS) == 0) {
676                         if (!atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
677                             x | LK_SHARED_WAITERS)) {
678                                 goto retry_sleepq;
679                         }
680                         LOCK_LOG2(lk, "%s: %p set shared waiters flag",
681                             __func__, lk);
682                 }
683
684                 if (lwa == NULL) {
685                         iwmesg = lk->lock_object.lo_name;
686                         ipri = lk->lk_pri;
687                         itimo = lk->lk_timo;
688                 } else {
689                         iwmesg = lwa->iwmesg;
690                         ipri = lwa->ipri;
691                         itimo = lwa->itimo;
692                 }
693
694                 /*
695                  * As far as we have been unable to acquire the
696                  * shared lock and the shared waiters flag is set,
697                  * we will sleep.
698                  */
699 #ifdef KDTRACE_HOOKS
700                 sleep_time -= lockstat_nsecs(&lk->lock_object);
701 #endif
702                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
703                     SQ_SHARED_QUEUE);
704 #ifdef KDTRACE_HOOKS
705                 sleep_time += lockstat_nsecs(&lk->lock_object);
706 #endif
707                 flags &= ~LK_INTERLOCK;
708                 if (error) {
709                         LOCK_LOG3(lk,
710                             "%s: interrupted sleep for %p with %d",
711                             __func__, lk, error);
712                         break;
713                 }
714                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
715                     __func__, lk);
716                 x = lockmgr_read_value(lk);
717         }
718         if (error == 0) {
719 #ifdef KDTRACE_HOOKS
720                 if (sleep_time != 0)
721                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
722                             LOCKSTAT_READER, (x & LK_SHARE) == 0,
723                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
724 #endif
725 #ifdef LOCK_PROFILING
726                 lockmgr_note_shared_acquire(lk, contested, waittime,
727                     file, line, flags);
728 #else
729                 lockmgr_note_shared_acquire(lk, 0, 0, file, line,
730                     flags);
731 #endif
732         }
733
734 out:
735         lockmgr_exit(flags, ilk, 0);
736         return (error);
737 }
738
739 static bool
740 lockmgr_xlock_adaptive(struct lock_delay_arg *lda, struct lock *lk, uintptr_t *xp)
741 {
742         struct thread *owner;
743         uintptr_t x;
744
745         x = *xp;
746         MPASS(x != LK_UNLOCKED);
747         owner = (struct thread *)LK_HOLDER(x);
748         for (;;) {
749                 MPASS(owner != curthread);
750                 if (owner == NULL)
751                         return (false);
752                 if ((x & LK_SHARE) && LK_SHARERS(x) > 0)
753                         return (false);
754                 if (owner == (struct thread *)LK_KERNPROC)
755                         return (false);
756                 if (!TD_IS_RUNNING(owner))
757                         return (false);
758                 if ((x & LK_ALL_WAITERS) != 0)
759                         return (false);
760                 lock_delay(lda);
761                 x = lockmgr_read_value(lk);
762                 if (x == LK_UNLOCKED) {
763                         *xp = x;
764                         return (true);
765                 }
766                 owner = (struct thread *)LK_HOLDER(x);
767         }
768 }
769
770 static __noinline int
771 lockmgr_xlock_hard(struct lock *lk, u_int flags, struct lock_object *ilk,
772     const char *file, int line, struct lockmgr_wait *lwa)
773 {
774         struct lock_class *class;
775         uintptr_t tid, x, v;
776         int error = 0;
777         const char *iwmesg;
778         int ipri, itimo;
779
780 #ifdef KDTRACE_HOOKS
781         uint64_t sleep_time = 0;
782 #endif
783 #ifdef LOCK_PROFILING
784         uint64_t waittime = 0;
785         int contested = 0;
786 #endif
787         struct lock_delay_arg lda;
788
789         if (SCHEDULER_STOPPED())
790                 goto out;
791
792         tid = (uintptr_t)curthread;
793
794         if (LK_CAN_WITNESS(flags))
795                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
796                     LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
797                     ilk : NULL);
798
799         /*
800          * If curthread already holds the lock and this one is
801          * allowed to recurse, simply recurse on it.
802          */
803         if (lockmgr_xlocked(lk)) {
804                 if ((flags & LK_CANRECURSE) == 0 &&
805                     (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) {
806                         /*
807                          * If the lock is expected to not panic just
808                          * give up and return.
809                          */
810                         if (LK_TRYOP(flags)) {
811                                 LOCK_LOG2(lk,
812                                     "%s: %p fails the try operation",
813                                     __func__, lk);
814                                 error = EBUSY;
815                                 goto out;
816                         }
817                         if (flags & LK_INTERLOCK) {
818                                 class = LOCK_CLASS(ilk);
819                                 class->lc_unlock(ilk);
820                         }
821                         STACK_PRINT(lk);
822                         panic("%s: recursing on non recursive lockmgr %p "
823                             "@ %s:%d\n", __func__, lk, file, line);
824                 }
825                 atomic_set_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
826                 lk->lk_recurse++;
827                 LOCK_LOG2(lk, "%s: %p recursing", __func__, lk);
828                 LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
829                     lk->lk_recurse, file, line);
830                 WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
831                     LK_TRYWIT(flags), file, line);
832                 TD_LOCKS_INC(curthread);
833                 goto out;
834         }
835
836         x = LK_UNLOCKED;
837         lock_delay_arg_init(&lda, &lockmgr_delay);
838         if (!lk_adaptive)
839                 flags &= ~LK_ADAPTIVE;
840         for (;;) {
841                 if (x == LK_UNLOCKED) {
842                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x, tid))
843                                 break;
844                         continue;
845                 }
846
847                 lock_profile_obtain_lock_failed(&lk->lock_object, false,
848                     &contested, &waittime);
849
850                 if ((flags & (LK_ADAPTIVE | LK_INTERLOCK)) == LK_ADAPTIVE) {
851                         if (lockmgr_xlock_adaptive(&lda, lk, &x))
852                                 continue;
853                 }
854 #ifdef HWPMC_HOOKS
855                 PMC_SOFT_CALL( , , lock, failed);
856 #endif
857
858                 /*
859                  * If the lock is expected to not sleep just give up
860                  * and return.
861                  */
862                 if (LK_TRYOP(flags)) {
863                         LOCK_LOG2(lk, "%s: %p fails the try operation",
864                             __func__, lk);
865                         error = EBUSY;
866                         break;
867                 }
868
869                 /*
870                  * Acquire the sleepqueue chain lock because we
871                  * probabilly will need to manipulate waiters flags.
872                  */
873                 sleepq_lock(&lk->lock_object);
874                 x = lockmgr_read_value(lk);
875 retry_sleepq:
876
877                 /*
878                  * if the lock has been released while we spun on
879                  * the sleepqueue chain lock just try again.
880                  */
881                 if (x == LK_UNLOCKED) {
882                         sleepq_release(&lk->lock_object);
883                         continue;
884                 }
885
886                 /*
887                  * The lock can be in the state where there is a
888                  * pending queue of waiters, but still no owner.
889                  * This happens when the lock is contested and an
890                  * owner is going to claim the lock.
891                  * If curthread is the one successfully acquiring it
892                  * claim lock ownership and return, preserving waiters
893                  * flags.
894                  */
895                 v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
896                 if ((x & ~v) == LK_UNLOCKED) {
897                         v &= ~LK_EXCLUSIVE_SPINNERS;
898                         if (atomic_fcmpset_acq_ptr(&lk->lk_lock, &x,
899                             tid | v)) {
900                                 sleepq_release(&lk->lock_object);
901                                 LOCK_LOG2(lk,
902                                     "%s: %p claimed by a new writer",
903                                     __func__, lk);
904                                 break;
905                         }
906                         goto retry_sleepq;
907                 }
908
909                 /*
910                  * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
911                  * fail, loop back and retry.
912                  */
913                 if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
914                         if (!atomic_fcmpset_ptr(&lk->lk_lock, &x,
915                             x | LK_EXCLUSIVE_WAITERS)) {
916                                 goto retry_sleepq;
917                         }
918                         LOCK_LOG2(lk, "%s: %p set excl waiters flag",
919                             __func__, lk);
920                 }
921
922                 if (lwa == NULL) {
923                         iwmesg = lk->lock_object.lo_name;
924                         ipri = lk->lk_pri;
925                         itimo = lk->lk_timo;
926                 } else {
927                         iwmesg = lwa->iwmesg;
928                         ipri = lwa->ipri;
929                         itimo = lwa->itimo;
930                 }
931
932                 /*
933                  * As far as we have been unable to acquire the
934                  * exclusive lock and the exclusive waiters flag
935                  * is set, we will sleep.
936                  */
937 #ifdef KDTRACE_HOOKS
938                 sleep_time -= lockstat_nsecs(&lk->lock_object);
939 #endif
940                 error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo,
941                     SQ_EXCLUSIVE_QUEUE);
942 #ifdef KDTRACE_HOOKS
943                 sleep_time += lockstat_nsecs(&lk->lock_object);
944 #endif
945                 flags &= ~LK_INTERLOCK;
946                 if (error) {
947                         LOCK_LOG3(lk,
948                             "%s: interrupted sleep for %p with %d",
949                             __func__, lk, error);
950                         break;
951                 }
952                 LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
953                     __func__, lk);
954                 x = lockmgr_read_value(lk);
955         }
956         if (error == 0) {
957 #ifdef KDTRACE_HOOKS
958                 if (sleep_time != 0)
959                         LOCKSTAT_RECORD4(lockmgr__block, lk, sleep_time,
960                             LOCKSTAT_WRITER, (x & LK_SHARE) == 0,
961                             (x & LK_SHARE) == 0 ? 0 : LK_SHARERS(x));
962 #endif
963 #ifdef LOCK_PROFILING
964                 lockmgr_note_exclusive_acquire(lk, contested, waittime,
965                     file, line, flags);
966 #else
967                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
968                     flags);
969 #endif
970         }
971
972 out:
973         lockmgr_exit(flags, ilk, 0);
974         return (error);
975 }
976
977 static __noinline int
978 lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
979     const char *file, int line, struct lockmgr_wait *lwa)
980 {
981         uintptr_t tid, v, setv;
982         int error = 0;
983         int op;
984
985         if (SCHEDULER_STOPPED())
986                 goto out;
987
988         tid = (uintptr_t)curthread;
989
990         _lockmgr_assert(lk, KA_SLOCKED, file, line);
991
992         op = flags & LK_TYPE_MASK;
993         v = lockmgr_read_value(lk);
994         for (;;) {
995                 if (LK_SHARERS(v) > 1) {
996                         if (op == LK_TRYUPGRADE) {
997                                 LOCK_LOG2(lk, "%s: %p failed the nowait upgrade",
998                                     __func__, lk);
999                                 error = EBUSY;
1000                                 goto out;
1001                         }
1002                         if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &v,
1003                             v - LK_ONE_SHARER)) {
1004                                 lockmgr_note_shared_release(lk, file, line);
1005                                 goto out_xlock;
1006                         }
1007                         continue;
1008                 }
1009                 MPASS((v & ~LK_ALL_WAITERS) == LK_SHARERS_LOCK(1));
1010
1011                 setv = tid;
1012                 setv |= (v & LK_ALL_WAITERS);
1013
1014                 /*
1015                  * Try to switch from one shared lock to an exclusive one.
1016                  * We need to preserve waiters flags during the operation.
1017                  */
1018                 if (atomic_fcmpset_ptr(&lk->lk_lock, &v, setv)) {
1019                         LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
1020                             line);
1021                         WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
1022                             LK_TRYWIT(flags), file, line);
1023                         LOCKSTAT_RECORD0(lockmgr__upgrade, lk);
1024                         TD_SLOCKS_DEC(curthread);
1025                         goto out;
1026                 }
1027         }
1028
1029 out_xlock:
1030         error = lockmgr_xlock_hard(lk, flags, ilk, file, line, lwa);
1031         flags &= ~LK_INTERLOCK;
1032 out:
1033         lockmgr_exit(flags, ilk, 0);
1034         return (error);
1035 }
1036
1037 int
1038 lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
1039     const char *file, int line)
1040 {
1041         struct lock_class *class;
1042         uintptr_t x, tid;
1043         u_int op;
1044         bool locked;
1045
1046         if (SCHEDULER_STOPPED())
1047                 return (0);
1048
1049         op = flags & LK_TYPE_MASK;
1050         locked = false;
1051         switch (op) {
1052         case LK_SHARED:
1053                 if (LK_CAN_WITNESS(flags))
1054                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1055                             file, line, flags & LK_INTERLOCK ? ilk : NULL);
1056                 if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
1057                         break;
1058                 x = lockmgr_read_value(lk);
1059                 if (lockmgr_slock_try(lk, &x, flags, true)) {
1060                         lockmgr_note_shared_acquire(lk, 0, 0,
1061                             file, line, flags);
1062                         locked = true;
1063                 } else {
1064                         return (lockmgr_slock_hard(lk, flags, ilk, file, line,
1065                             NULL));
1066                 }
1067                 break;
1068         case LK_EXCLUSIVE:
1069                 if (LK_CAN_WITNESS(flags))
1070                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1071                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1072                             ilk : NULL);
1073                 tid = (uintptr_t)curthread;
1074                 if (lockmgr_read_value(lk) == LK_UNLOCKED &&
1075                     atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1076                         lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1077                             flags);
1078                         locked = true;
1079                 } else {
1080                         return (lockmgr_xlock_hard(lk, flags, ilk, file, line,
1081                             NULL));
1082                 }
1083                 break;
1084         case LK_UPGRADE:
1085         case LK_TRYUPGRADE:
1086                 return (lockmgr_upgrade(lk, flags, ilk, file, line, NULL));
1087         default:
1088                 break;
1089         }
1090         if (__predict_true(locked)) {
1091                 if (__predict_false(flags & LK_INTERLOCK)) {
1092                         class = LOCK_CLASS(ilk);
1093                         class->lc_unlock(ilk);
1094                 }
1095                 return (0);
1096         } else {
1097                 return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
1098                     LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
1099         }
1100 }
1101
1102 static __noinline int
1103 lockmgr_sunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1104     const char *file, int line)
1105
1106 {
1107         int wakeup_swapper = 0;
1108
1109         if (SCHEDULER_STOPPED())
1110                 goto out;
1111
1112         wakeup_swapper = wakeupshlk(lk, file, line);
1113
1114 out:
1115         lockmgr_exit(flags, ilk, wakeup_swapper);
1116         return (0);
1117 }
1118
1119 static __noinline int
1120 lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_object *ilk,
1121     const char *file, int line)
1122 {
1123         uintptr_t tid, v;
1124         int wakeup_swapper = 0;
1125         u_int realexslp;
1126         int queue;
1127
1128         if (SCHEDULER_STOPPED())
1129                 goto out;
1130
1131         tid = (uintptr_t)curthread;
1132
1133         /*
1134          * As first option, treact the lock as if it has not
1135          * any waiter.
1136          * Fix-up the tid var if the lock has been disowned.
1137          */
1138         if (LK_HOLDER(x) == LK_KERNPROC)
1139                 tid = LK_KERNPROC;
1140
1141         /*
1142          * The lock is held in exclusive mode.
1143          * If the lock is recursed also, then unrecurse it.
1144          */
1145         if (lockmgr_recursed_v(x)) {
1146                 LOCK_LOG2(lk, "%s: %p unrecursing", __func__, lk);
1147                 lk->lk_recurse--;
1148                 if (lk->lk_recurse == 0)
1149                         atomic_clear_ptr(&lk->lk_lock, LK_WRITER_RECURSED);
1150                 goto out;
1151         }
1152         if (tid != LK_KERNPROC)
1153                 LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,
1154                     LOCKSTAT_WRITER);
1155
1156         if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED))
1157                 goto out;
1158
1159         sleepq_lock(&lk->lock_object);
1160         x = lockmgr_read_value(lk);
1161         v = LK_UNLOCKED;
1162
1163         /*
1164          * If the lock has exclusive waiters, give them
1165          * preference in order to avoid deadlock with
1166          * shared runners up.
1167          * If interruptible sleeps left the exclusive queue
1168          * empty avoid a starvation for the threads sleeping
1169          * on the shared queue by giving them precedence
1170          * and cleaning up the exclusive waiters bit anyway.
1171          * Please note that lk_exslpfail count may be lying
1172          * about the real number of waiters with the
1173          * LK_SLEEPFAIL flag on because they may be used in
1174          * conjunction with interruptible sleeps so
1175          * lk_exslpfail might be considered an 'upper limit'
1176          * bound, including the edge cases.
1177          */
1178         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1179         realexslp = sleepq_sleepcnt(&lk->lock_object, SQ_EXCLUSIVE_QUEUE);
1180         if ((x & LK_EXCLUSIVE_WAITERS) != 0 && realexslp != 0) {
1181                 if (lk->lk_exslpfail != USHRT_MAX && lk->lk_exslpfail < realexslp) {
1182                         lk->lk_exslpfail = 0;
1183                         queue = SQ_EXCLUSIVE_QUEUE;
1184                         v |= (x & LK_SHARED_WAITERS);
1185                 } else {
1186                         lk->lk_exslpfail = 0;
1187                         LOCK_LOG2(lk,
1188                             "%s: %p has only LK_SLEEPFAIL sleepers",
1189                             __func__, lk);
1190                         LOCK_LOG2(lk,
1191                             "%s: %p waking up threads on the exclusive queue",
1192                             __func__, lk);
1193                         wakeup_swapper = sleepq_broadcast(&lk->lock_object,
1194                             SLEEPQ_LK, 0, SQ_EXCLUSIVE_QUEUE);
1195                         queue = SQ_SHARED_QUEUE;
1196                 }
1197         } else {
1198                 /*
1199                  * Exclusive waiters sleeping with LK_SLEEPFAIL
1200                  * on and using interruptible sleeps/timeout
1201                  * may have left spourious lk_exslpfail counts
1202                  * on, so clean it up anyway.
1203                  */
1204                 lk->lk_exslpfail = 0;
1205                 queue = SQ_SHARED_QUEUE;
1206         }
1207
1208         LOCK_LOG3(lk, "%s: %p waking up threads on the %s queue",
1209             __func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
1210             "exclusive");
1211         atomic_store_rel_ptr(&lk->lk_lock, v);
1212         wakeup_swapper |= sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
1213         sleepq_release(&lk->lock_object);
1214
1215 out:
1216         lockmgr_exit(flags, ilk, wakeup_swapper);
1217         return (0);
1218 }
1219
1220 /*
1221  * Lightweight entry points for common operations.
1222  *
1223  * Functionality is similar to sx locks, in that none of the additional lockmgr
1224  * features are supported. To be clear, these are NOT supported:
1225  * 1. shared locking disablement
1226  * 2. returning with an error after sleep
1227  * 3. unlocking the interlock
1228  *
1229  * If in doubt, use lockmgr_lock_flags.
1230  */
1231 int
1232 lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)
1233 {
1234         uintptr_t x;
1235
1236         MPASS((flags & LK_TYPE_MASK) == LK_SHARED);
1237         MPASS((flags & LK_INTERLOCK) == 0);
1238         MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0);
1239
1240         if (LK_CAN_WITNESS(flags))
1241                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
1242                     file, line, NULL);
1243         x = lockmgr_read_value(lk);
1244         if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) {
1245                 lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags);
1246                 return (0);
1247         }
1248
1249         return (lockmgr_slock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1250 }
1251
1252 int
1253 lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line)
1254 {
1255         uintptr_t tid;
1256
1257         MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE);
1258         MPASS((flags & LK_INTERLOCK) == 0);
1259
1260         if (LK_CAN_WITNESS(flags))
1261                 WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1262                     LOP_EXCLUSIVE, file, line, NULL);
1263         tid = (uintptr_t)curthread;
1264         if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
1265                 lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
1266                     flags);
1267                 return (0);
1268         }
1269
1270         return (lockmgr_xlock_hard(lk, flags | LK_ADAPTIVE, NULL, file, line, NULL));
1271 }
1272
1273 int
1274 lockmgr_unlock(struct lock *lk)
1275 {
1276         uintptr_t x, tid;
1277         const char *file;
1278         int line;
1279
1280         file = __FILE__;
1281         line = __LINE__;
1282
1283         _lockmgr_assert(lk, KA_LOCKED, file, line);
1284         x = lockmgr_read_value(lk);
1285         if (__predict_true(x & LK_SHARE) != 0) {
1286                 lockmgr_note_shared_release(lk, file, line);
1287                 if (lockmgr_sunlock_try(lk, &x)) {
1288                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
1289                 } else {
1290                         return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1291                 }
1292         } else {
1293                 tid = (uintptr_t)curthread;
1294                 lockmgr_note_exclusive_release(lk, file, line);
1295                 if (x == tid && atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
1296                         LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk,LOCKSTAT_WRITER);
1297                 } else {
1298                         return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line));
1299                 }
1300         }
1301         return (0);
1302 }
1303
1304 int
1305 __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
1306     const char *wmesg, int pri, int timo, const char *file, int line)
1307 {
1308         GIANT_DECLARE;
1309         struct lockmgr_wait lwa;
1310         struct lock_class *class;
1311         const char *iwmesg;
1312         uintptr_t tid, v, x;
1313         u_int op, realexslp;
1314         int error, ipri, itimo, queue, wakeup_swapper;
1315 #ifdef LOCK_PROFILING
1316         uint64_t waittime = 0;
1317         int contested = 0;
1318 #endif
1319
1320         if (SCHEDULER_STOPPED())
1321                 return (0);
1322
1323         error = 0;
1324         tid = (uintptr_t)curthread;
1325         op = (flags & LK_TYPE_MASK);
1326         iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg;
1327         ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri;
1328         itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo;
1329
1330         lwa.iwmesg = iwmesg;
1331         lwa.ipri = ipri;
1332         lwa.itimo = itimo;
1333
1334         MPASS((flags & ~LK_TOTAL_MASK) == 0);
1335         KASSERT((op & (op - 1)) == 0,
1336             ("%s: Invalid requested operation @ %s:%d", __func__, file, line));
1337         KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 ||
1338             (op != LK_DOWNGRADE && op != LK_RELEASE),
1339             ("%s: Invalid flags in regard of the operation desired @ %s:%d",
1340             __func__, file, line));
1341         KASSERT((flags & LK_INTERLOCK) == 0 || ilk != NULL,
1342             ("%s: LK_INTERLOCK passed without valid interlock @ %s:%d",
1343             __func__, file, line));
1344         KASSERT(kdb_active != 0 || !TD_IS_IDLETHREAD(curthread),
1345             ("%s: idle thread %p on lockmgr %s @ %s:%d", __func__, curthread,
1346             lk->lock_object.lo_name, file, line));
1347
1348         class = (flags & LK_INTERLOCK) ? LOCK_CLASS(ilk) : NULL;
1349
1350         if (lk->lock_object.lo_flags & LK_NOSHARE) {
1351                 switch (op) {
1352                 case LK_SHARED:
1353                         op = LK_EXCLUSIVE;
1354                         break;
1355                 case LK_UPGRADE:
1356                 case LK_TRYUPGRADE:
1357                 case LK_DOWNGRADE:
1358                         _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED,
1359                             file, line);
1360                         if (flags & LK_INTERLOCK)
1361                                 class->lc_unlock(ilk);
1362                         return (0);
1363                 }
1364         }
1365
1366         wakeup_swapper = 0;
1367         switch (op) {
1368         case LK_SHARED:
1369                 return (lockmgr_slock_hard(lk, flags, ilk, file, line, &lwa));
1370                 break;
1371         case LK_UPGRADE:
1372         case LK_TRYUPGRADE:
1373                 return (lockmgr_upgrade(lk, flags, ilk, file, line, &lwa));
1374                 break;
1375         case LK_EXCLUSIVE:
1376                 return (lockmgr_xlock_hard(lk, flags, ilk, file, line, &lwa));
1377                 break;
1378         case LK_DOWNGRADE:
1379                 _lockmgr_assert(lk, KA_XLOCKED, file, line);
1380                 WITNESS_DOWNGRADE(&lk->lock_object, 0, file, line);
1381
1382                 /*
1383                  * Panic if the lock is recursed.
1384                  */
1385                 if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) {
1386                         if (flags & LK_INTERLOCK)
1387                                 class->lc_unlock(ilk);
1388                         panic("%s: downgrade a recursed lockmgr %s @ %s:%d\n",
1389                             __func__, iwmesg, file, line);
1390                 }
1391                 TD_SLOCKS_INC(curthread);
1392
1393                 /*
1394                  * In order to preserve waiters flags, just spin.
1395                  */
1396                 for (;;) {
1397                         x = lockmgr_read_value(lk);
1398                         MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1399                         x &= LK_ALL_WAITERS;
1400                         if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1401                             LK_SHARERS_LOCK(1) | x))
1402                                 break;
1403                         cpu_spinwait();
1404                 }
1405                 LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line);
1406                 LOCKSTAT_RECORD0(lockmgr__downgrade, lk);
1407                 break;
1408         case LK_RELEASE:
1409                 _lockmgr_assert(lk, KA_LOCKED, file, line);
1410                 x = lockmgr_read_value(lk);
1411
1412                 if (__predict_true(x & LK_SHARE) != 0) {
1413                         lockmgr_note_shared_release(lk, file, line);
1414                         return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
1415                 } else {
1416                         lockmgr_note_exclusive_release(lk, file, line);
1417                         return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
1418                 }
1419                 break;
1420         case LK_DRAIN:
1421                 if (LK_CAN_WITNESS(flags))
1422                         WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
1423                             LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
1424                             ilk : NULL);
1425
1426                 /*
1427                  * Trying to drain a lock we already own will result in a
1428                  * deadlock.
1429                  */
1430                 if (lockmgr_xlocked(lk)) {
1431                         if (flags & LK_INTERLOCK)
1432                                 class->lc_unlock(ilk);
1433                         panic("%s: draining %s with the lock held @ %s:%d\n",
1434                             __func__, iwmesg, file, line);
1435                 }
1436
1437                 for (;;) {
1438                         if (lk->lk_lock == LK_UNLOCKED &&
1439                             atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid))
1440                                 break;
1441
1442 #ifdef HWPMC_HOOKS
1443                         PMC_SOFT_CALL( , , lock, failed);
1444 #endif
1445                         lock_profile_obtain_lock_failed(&lk->lock_object, false,
1446                             &contested, &waittime);
1447
1448                         /*
1449                          * If the lock is expected to not sleep just give up
1450                          * and return.
1451                          */
1452                         if (LK_TRYOP(flags)) {
1453                                 LOCK_LOG2(lk, "%s: %p fails the try operation",
1454                                     __func__, lk);
1455                                 error = EBUSY;
1456                                 break;
1457                         }
1458
1459                         /*
1460                          * Acquire the sleepqueue chain lock because we
1461                          * probabilly will need to manipulate waiters flags.
1462                          */
1463                         sleepq_lock(&lk->lock_object);
1464                         x = lockmgr_read_value(lk);
1465
1466                         /*
1467                          * if the lock has been released while we spun on
1468                          * the sleepqueue chain lock just try again.
1469                          */
1470                         if (x == LK_UNLOCKED) {
1471                                 sleepq_release(&lk->lock_object);
1472                                 continue;
1473                         }
1474
1475                         v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS);
1476                         if ((x & ~v) == LK_UNLOCKED) {
1477                                 v = (x & ~LK_EXCLUSIVE_SPINNERS);
1478
1479                                 /*
1480                                  * If interruptible sleeps left the exclusive
1481                                  * queue empty avoid a starvation for the
1482                                  * threads sleeping on the shared queue by
1483                                  * giving them precedence and cleaning up the
1484                                  * exclusive waiters bit anyway.
1485                                  * Please note that lk_exslpfail count may be
1486                                  * lying about the real number of waiters with
1487                                  * the LK_SLEEPFAIL flag on because they may
1488                                  * be used in conjunction with interruptible
1489                                  * sleeps so lk_exslpfail might be considered
1490                                  * an 'upper limit' bound, including the edge
1491                                  * cases.
1492                                  */
1493                                 if (v & LK_EXCLUSIVE_WAITERS) {
1494                                         queue = SQ_EXCLUSIVE_QUEUE;
1495                                         v &= ~LK_EXCLUSIVE_WAITERS;
1496                                 } else {
1497                                         /*
1498                                          * Exclusive waiters sleeping with
1499                                          * LK_SLEEPFAIL on and using
1500                                          * interruptible sleeps/timeout may
1501                                          * have left spourious lk_exslpfail
1502                                          * counts on, so clean it up anyway.
1503                                          */
1504                                         MPASS(v & LK_SHARED_WAITERS);
1505                                         lk->lk_exslpfail = 0;
1506                                         queue = SQ_SHARED_QUEUE;
1507                                         v &= ~LK_SHARED_WAITERS;
1508                                 }
1509                                 if (queue == SQ_EXCLUSIVE_QUEUE) {
1510                                         realexslp =
1511                                             sleepq_sleepcnt(&lk->lock_object,
1512                                             SQ_EXCLUSIVE_QUEUE);
1513                                         if (lk->lk_exslpfail >= realexslp) {
1514                                                 lk->lk_exslpfail = 0;
1515                                                 queue = SQ_SHARED_QUEUE;
1516                                                 v &= ~LK_SHARED_WAITERS;
1517                                                 if (realexslp != 0) {
1518                                                         LOCK_LOG2(lk,
1519                                         "%s: %p has only LK_SLEEPFAIL sleepers",
1520                                                             __func__, lk);
1521                                                         LOCK_LOG2(lk,
1522                         "%s: %p waking up threads on the exclusive queue",
1523                                                             __func__, lk);
1524                                                         wakeup_swapper =
1525                                                             sleepq_broadcast(
1526                                                             &lk->lock_object,
1527                                                             SLEEPQ_LK, 0,
1528                                                             SQ_EXCLUSIVE_QUEUE);
1529                                                 }
1530                                         } else
1531                                                 lk->lk_exslpfail = 0;
1532                                 }
1533                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) {
1534                                         sleepq_release(&lk->lock_object);
1535                                         continue;
1536                                 }
1537                                 LOCK_LOG3(lk,
1538                                 "%s: %p waking up all threads on the %s queue",
1539                                     __func__, lk, queue == SQ_SHARED_QUEUE ?
1540                                     "shared" : "exclusive");
1541                                 wakeup_swapper |= sleepq_broadcast(
1542                                     &lk->lock_object, SLEEPQ_LK, 0, queue);
1543
1544                                 /*
1545                                  * If shared waiters have been woken up we need
1546                                  * to wait for one of them to acquire the lock
1547                                  * before to set the exclusive waiters in
1548                                  * order to avoid a deadlock.
1549                                  */
1550                                 if (queue == SQ_SHARED_QUEUE) {
1551                                         for (v = lk->lk_lock;
1552                                             (v & LK_SHARE) && !LK_SHARERS(v);
1553                                             v = lk->lk_lock)
1554                                                 cpu_spinwait();
1555                                 }
1556                         }
1557
1558                         /*
1559                          * Try to set the LK_EXCLUSIVE_WAITERS flag.  If we
1560                          * fail, loop back and retry.
1561                          */
1562                         if ((x & LK_EXCLUSIVE_WAITERS) == 0) {
1563                                 if (!atomic_cmpset_ptr(&lk->lk_lock, x,
1564                                     x | LK_EXCLUSIVE_WAITERS)) {
1565                                         sleepq_release(&lk->lock_object);
1566                                         continue;
1567                                 }
1568                                 LOCK_LOG2(lk, "%s: %p set drain waiters flag",
1569                                     __func__, lk);
1570                         }
1571
1572                         /*
1573                          * As far as we have been unable to acquire the
1574                          * exclusive lock and the exclusive waiters flag
1575                          * is set, we will sleep.
1576                          */
1577                         if (flags & LK_INTERLOCK) {
1578                                 class->lc_unlock(ilk);
1579                                 flags &= ~LK_INTERLOCK;
1580                         }
1581                         GIANT_SAVE();
1582                         sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK,
1583                             SQ_EXCLUSIVE_QUEUE);
1584                         sleepq_wait(&lk->lock_object, ipri & PRIMASK);
1585                         GIANT_RESTORE();
1586                         LOCK_LOG2(lk, "%s: %p resuming from the sleep queue",
1587                             __func__, lk);
1588                 }
1589
1590                 if (error == 0) {
1591                         lock_profile_obtain_lock_success(&lk->lock_object,
1592                             false, contested, waittime, file, line);
1593                         LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0,
1594                             lk->lk_recurse, file, line);
1595                         WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
1596                             LK_TRYWIT(flags), file, line);
1597                         TD_LOCKS_INC(curthread);
1598                         STACK_SAVE(lk);
1599                 }
1600                 break;
1601         default:
1602                 if (flags & LK_INTERLOCK)
1603                         class->lc_unlock(ilk);
1604                 panic("%s: unknown lockmgr request 0x%x\n", __func__, op);
1605         }
1606
1607         if (flags & LK_INTERLOCK)
1608                 class->lc_unlock(ilk);
1609         if (wakeup_swapper)
1610                 kick_proc0();
1611
1612         return (error);
1613 }
1614
1615 void
1616 _lockmgr_disown(struct lock *lk, const char *file, int line)
1617 {
1618         uintptr_t tid, x;
1619
1620         if (SCHEDULER_STOPPED())
1621                 return;
1622
1623         tid = (uintptr_t)curthread;
1624         _lockmgr_assert(lk, KA_XLOCKED, file, line);
1625
1626         /*
1627          * Panic if the lock is recursed.
1628          */
1629         if (lockmgr_xlocked(lk) && lockmgr_recursed(lk))
1630                 panic("%s: disown a recursed lockmgr @ %s:%d\n",
1631                     __func__,  file, line);
1632
1633         /*
1634          * If the owner is already LK_KERNPROC just skip the whole operation.
1635          */
1636         if (LK_HOLDER(lk->lk_lock) != tid)
1637                 return;
1638         lock_profile_release_lock(&lk->lock_object, false);
1639         LOCKSTAT_RECORD1(lockmgr__disown, lk, LOCKSTAT_WRITER);
1640         LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line);
1641         WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
1642         TD_LOCKS_DEC(curthread);
1643         STACK_SAVE(lk);
1644
1645         /*
1646          * In order to preserve waiters flags, just spin.
1647          */
1648         for (;;) {
1649                 x = lockmgr_read_value(lk);
1650                 MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0);
1651                 x &= LK_ALL_WAITERS;
1652                 if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x,
1653                     LK_KERNPROC | x))
1654                         return;
1655                 cpu_spinwait();
1656         }
1657 }
1658
1659 void
1660 lockmgr_printinfo(const struct lock *lk)
1661 {
1662         struct thread *td;
1663         uintptr_t x;
1664
1665         if (lk->lk_lock == LK_UNLOCKED)
1666                 printf("lock type %s: UNLOCKED\n", lk->lock_object.lo_name);
1667         else if (lk->lk_lock & LK_SHARE)
1668                 printf("lock type %s: SHARED (count %ju)\n",
1669                     lk->lock_object.lo_name,
1670                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1671         else {
1672                 td = lockmgr_xholder(lk);
1673                 if (td == (struct thread *)LK_KERNPROC)
1674                         printf("lock type %s: EXCL by KERNPROC\n",
1675                             lk->lock_object.lo_name);
1676                 else
1677                         printf("lock type %s: EXCL by thread %p "
1678                             "(pid %d, %s, tid %d)\n", lk->lock_object.lo_name,
1679                             td, td->td_proc->p_pid, td->td_proc->p_comm,
1680                             td->td_tid);
1681         }
1682
1683         x = lk->lk_lock;
1684         if (x & LK_EXCLUSIVE_WAITERS)
1685                 printf(" with exclusive waiters pending\n");
1686         if (x & LK_SHARED_WAITERS)
1687                 printf(" with shared waiters pending\n");
1688         if (x & LK_EXCLUSIVE_SPINNERS)
1689                 printf(" with exclusive spinners pending\n");
1690
1691         STACK_PRINT(lk);
1692 }
1693
1694 int
1695 lockstatus(const struct lock *lk)
1696 {
1697         uintptr_t v, x;
1698         int ret;
1699
1700         ret = LK_SHARED;
1701         x = lockmgr_read_value(lk);
1702         v = LK_HOLDER(x);
1703
1704         if ((x & LK_SHARE) == 0) {
1705                 if (v == (uintptr_t)curthread || v == LK_KERNPROC)
1706                         ret = LK_EXCLUSIVE;
1707                 else
1708                         ret = LK_EXCLOTHER;
1709         } else if (x == LK_UNLOCKED)
1710                 ret = 0;
1711
1712         return (ret);
1713 }
1714
1715 #ifdef INVARIANT_SUPPORT
1716
1717 FEATURE(invariant_support,
1718     "Support for modules compiled with INVARIANTS option");
1719
1720 #ifndef INVARIANTS
1721 #undef  _lockmgr_assert
1722 #endif
1723
1724 void
1725 _lockmgr_assert(const struct lock *lk, int what, const char *file, int line)
1726 {
1727         int slocked = 0;
1728
1729         if (SCHEDULER_STOPPED())
1730                 return;
1731         switch (what) {
1732         case KA_SLOCKED:
1733         case KA_SLOCKED | KA_NOTRECURSED:
1734         case KA_SLOCKED | KA_RECURSED:
1735                 slocked = 1;
1736         case KA_LOCKED:
1737         case KA_LOCKED | KA_NOTRECURSED:
1738         case KA_LOCKED | KA_RECURSED:
1739 #ifdef WITNESS
1740
1741                 /*
1742                  * We cannot trust WITNESS if the lock is held in exclusive
1743                  * mode and a call to lockmgr_disown() happened.
1744                  * Workaround this skipping the check if the lock is held in
1745                  * exclusive mode even for the KA_LOCKED case.
1746                  */
1747                 if (slocked || (lk->lk_lock & LK_SHARE)) {
1748                         witness_assert(&lk->lock_object, what, file, line);
1749                         break;
1750                 }
1751 #endif
1752                 if (lk->lk_lock == LK_UNLOCKED ||
1753                     ((lk->lk_lock & LK_SHARE) == 0 && (slocked ||
1754                     (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)))))
1755                         panic("Lock %s not %slocked @ %s:%d\n",
1756                             lk->lock_object.lo_name, slocked ? "share" : "",
1757                             file, line);
1758
1759                 if ((lk->lk_lock & LK_SHARE) == 0) {
1760                         if (lockmgr_recursed(lk)) {
1761                                 if (what & KA_NOTRECURSED)
1762                                         panic("Lock %s recursed @ %s:%d\n",
1763                                             lk->lock_object.lo_name, file,
1764                                             line);
1765                         } else if (what & KA_RECURSED)
1766                                 panic("Lock %s not recursed @ %s:%d\n",
1767                                     lk->lock_object.lo_name, file, line);
1768                 }
1769                 break;
1770         case KA_XLOCKED:
1771         case KA_XLOCKED | KA_NOTRECURSED:
1772         case KA_XLOCKED | KA_RECURSED:
1773                 if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))
1774                         panic("Lock %s not exclusively locked @ %s:%d\n",
1775                             lk->lock_object.lo_name, file, line);
1776                 if (lockmgr_recursed(lk)) {
1777                         if (what & KA_NOTRECURSED)
1778                                 panic("Lock %s recursed @ %s:%d\n",
1779                                     lk->lock_object.lo_name, file, line);
1780                 } else if (what & KA_RECURSED)
1781                         panic("Lock %s not recursed @ %s:%d\n",
1782                             lk->lock_object.lo_name, file, line);
1783                 break;
1784         case KA_UNLOCKED:
1785                 if (lockmgr_xlocked(lk) || lockmgr_disowned(lk))
1786                         panic("Lock %s exclusively locked @ %s:%d\n",
1787                             lk->lock_object.lo_name, file, line);
1788                 break;
1789         default:
1790                 panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file,
1791                     line);
1792         }
1793 }
1794 #endif
1795
1796 #ifdef DDB
1797 int
1798 lockmgr_chain(struct thread *td, struct thread **ownerp)
1799 {
1800         const struct lock *lk;
1801
1802         lk = td->td_wchan;
1803
1804         if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr)
1805                 return (0);
1806         db_printf("blocked on lockmgr %s", lk->lock_object.lo_name);
1807         if (lk->lk_lock & LK_SHARE)
1808                 db_printf("SHARED (count %ju)\n",
1809                     (uintmax_t)LK_SHARERS(lk->lk_lock));
1810         else
1811                 db_printf("EXCL\n");
1812         *ownerp = lockmgr_xholder(lk);
1813
1814         return (1);
1815 }
1816
1817 static void
1818 db_show_lockmgr(const struct lock_object *lock)
1819 {
1820         struct thread *td;
1821         const struct lock *lk;
1822
1823         lk = (const struct lock *)lock;
1824
1825         db_printf(" state: ");
1826         if (lk->lk_lock == LK_UNLOCKED)
1827                 db_printf("UNLOCKED\n");
1828         else if (lk->lk_lock & LK_SHARE)
1829                 db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock));
1830         else {
1831                 td = lockmgr_xholder(lk);
1832                 if (td == (struct thread *)LK_KERNPROC)
1833                         db_printf("XLOCK: LK_KERNPROC\n");
1834                 else
1835                         db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td,
1836                             td->td_tid, td->td_proc->p_pid,
1837                             td->td_proc->p_comm);
1838                 if (lockmgr_recursed(lk))
1839                         db_printf(" recursed: %d\n", lk->lk_recurse);
1840         }
1841         db_printf(" waiters: ");
1842         switch (lk->lk_lock & LK_ALL_WAITERS) {
1843         case LK_SHARED_WAITERS:
1844                 db_printf("shared\n");
1845                 break;
1846         case LK_EXCLUSIVE_WAITERS:
1847                 db_printf("exclusive\n");
1848                 break;
1849         case LK_ALL_WAITERS:
1850                 db_printf("shared and exclusive\n");
1851                 break;
1852         default:
1853                 db_printf("none\n");
1854         }
1855         db_printf(" spinners: ");
1856         if (lk->lk_lock & LK_EXCLUSIVE_SPINNERS)
1857                 db_printf("exclusive\n");
1858         else
1859                 db_printf("none\n");
1860 }
1861 #endif